code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", 500)
table = "TB_PLANES_COBERTURA.tsv"
location = "../../data/raw"
sep = '\t'
encoding = 'latin1'
decimal = ','
df = pd.read_csv(f"{location}/{table}",
sep=sep,
encoding=encoding,
decimal=decimal)
df.head(15)
| notebooks/0. Overview/2. Vista de TB_PLANES_COBERTURA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="dr4x6hq8H1gu" outputId="3c60f3f8-e2f6-4d98-b06b-e7c30dc0c3cf"
# ! unzip /content/LSTM.zip
# + [markdown] id="OgvzeT11jXbR"
# # Covid-19 confirmed cases prediction System
# + id="9CT-lKhfjXBg"
# + [markdown] id="XzKmwNVEjnYw"
# * #### The system is designed to work on the real-time data to capture the ongoing trend
#
#
# * #### Data is downloaded from the web site which provides the real-time data and then it’s stored in the local storage
#
# * #### Data then preprocessed and presented in the required format
#
# * #### Here we have used LSTM based RNN model for the time series prediction
#
# * #### Once the data is cleaned, it is used to train the model
#
# * #### Once the model is trained we use it for the inference and results are shown in the graph format
#
# + id="Jmhj6SjyjlXG"
# + id="vsdUfhjTlQsz"
# + [markdown] id="qE1UvmRultQO"
# ## Code description
# + id="SpQPCpLwls4y"
# + [markdown] id="u5Wsbgislw7t"
# * #### Complete functionalities is written in the src folder and presented in the main.py in an abstract way
#
#
# * #### Data reletated functionalities are implemented in the src.Data and model is implemented in the src.LSTM_model ( tests are included in the same )
#
# * #### Helper functionalities ae implemented in the src._helper
#
# + id="cZ5Fk82glsw-"
# + id="RIqwlQQTnGYs"
# + [markdown] id="pBFx_sejnG3E"
# ## Setup the Core
# + id="DCt51UlYK8VG"
from main import Core
C = Core(
n_steps = 30, # no. of days to consider for prediction
optimizer='adam',
loss='mape',
batch_size = 128,
epochs = 300,
steps_per_epoch = 10,
test_split = 0.1,
validation_steps = 5,
model_path = 'Data/saved_model/' # path to store the trained model
)
# + id="fFPrUc4LnJmq"
# + [markdown] id="FMzBHF2NnKjG"
# ## Get the data
# + id="A29bXS4bnKTv"
# + id="ch84i5_qLM44"
data = C.get_data()
# + colab={"base_uri": "https://localhost:8080/"} id="UPC6vUsxnSpr" outputId="b496ccf5-8b9e-42a6-c165-f8f9ca73ca70"
print(data)
# + id="jE4xyFrOoKMC"
# + [markdown] id="03h_dQbjnTiW"
# ## Get the model
# + id="6WfyzpGLnTK8"
# + id="S4SaqAKgLRny"
model = C.get_model()
# + colab={"base_uri": "https://localhost:8080/"} id="VEH4lVLtnWnt" outputId="4bcb29ea-7736-4a6c-df7e-93202c58c920"
model.summary()
# + id="ytfdJqxAoKlz"
# + [markdown] id="PwSU33ULnXHO"
# ## Train the model
# + id="8GyRV27TnXh-"
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="mAmL2ClQLhzo" outputId="725de889-4a60-41a7-9d8e-0bc430d5df4a"
C.train(data = data,
model = model)
# + id="ICEdL_4cncZc"
# + [markdown] id="bobYu_mKnc8D"
# ## Display the results
#
#
# * #### the first graph represents the prediction and actual for the seen data
#
#
# * #### the second graph represents the prediction on the future unseen data
#
#
# + id="o6rlkom6ncYE"
import tensorflow as tf
Model = tf.keras.models.load_model('/content/Data/saved_model')
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="9wKYmA1ULmUr" outputId="ccd1082d-8fe4-46fa-be93-d8e19964e60f"
C.show_results(model = Model,
data = data,
future_date = 10) # no. of days to show the feuture trend
# + id="YSjtAbKkMoBC"
| Prediction/LSTM/working_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"how to evaluate correctness of cerebra performance? with GATK Funcotator as a gold standard "
import pandas as pd
cerebra_bench = pd.read_csv('/Users/lincoln.harris/code/cerebra/cerebra/tmp/out/cerebra_giab_all_benchmarking.csv', index_col=0)
cerebra_bench
cerebra_bench = cerebra_bench.rename(index={'hapmap':'hap_map'})
cerebra_bench.to_csv('cerebra_bench_revised.csv')
cerebra_bench.loc['ash_father']['CDH3']
for df_line in cerebra_bench.index:
print(df_line)
curr_line = cerebra_bench.loc[df_line]
#print(curr_line)
#for gene_str in curr_line:
# print(gene_str)
cerebra_bench.loc['ash_father']
# +
# shot in the dark here -- works
# might get messy for genes that have multiple translation IDs though...might be good idea to match only one
# translation ID per gene?
ash_father_tids = []
for gene_str in cerebra_bench.loc['ash_father']:
for t_id in gene_str.split(','):
tid_strip = t_id.split('.')[0]
tid_strip = tid_strip.strip("[")
tid_strip = tid_strip.strip("]")
try:
tid_strip = tid_strip.split("'")[1]
except IndexError:
continue
#print(tid_strip)
ash_father_tids.append(tid_strip)
# -
ash_father_tids
# +
# this works! very slow tho
funco_file = '/Users/lincoln.harris/code/funcotator/out/ash_father_benchmark.vcf'
num_found = 0
num_not_found = 0
for tid in ash_father_tids:
g = ! grep $tid $funco_file
if g:
num_found = num_found + 1
else:
num_not_found = num_not
# -
num_found = 10
num_not_found = 11
# +
import csv
with open('funco_bench_sub/han_father_ensp_ids.csv', 'r') as f:
reader = csv.reader(f)
your_list = list(reader)
your_list
# -
t = 'ENSP00000412228'
t in your_list
t = pd.read_csv('funco_bench_sub/han_father_ensp_ids.csv', header=None, names=['col'])
t
list(t.col)
l1 = ['ENSP00000317992','ENSP00000349216','ENSP00000342313','ENSP00000411579','ENSP00000412228','ENSP00000317992','ENSP00000349216','ENSP00000349216','ENSP00000342313','ENSP00000342313','ENSP00000411579',]
l2 = ['ENSP00000342313',
'ENSP00000411579',
'ENSP00000412228',
'ENSP00000412228',
'ENSP00000317992',
'ENSP00000349216',
'ENSP00000342313',
'ENSP00000412228',
'ENSP00000317992',
'ENSP00000349216',
'ENSP00000342313',
'ENSP00000412228',
'ENSP00000317992',
'ENSP00000349216',
'ENSP00000342313',
'ENSP00000412228',
'ENSP00000317992',
'ENSP00000317992',
'ENSP00000317992',
'ENSP00000317992',
'ENSP00000317992',
'ENSP00000317992',
'ENSP00000317992',
'ENSP00000317992',
'ENSP00000317992',
'ENSP00000343930',
'ENSP00000463694',
'ENSP00000317992',
'ENSP00000317992',
'ENSP00000343930',
'ENSP00000343930',
'ENSP00000368717',
'ENSP00000368717',
'ENSP00000368719',
'ENSP00000368719',
'ENSP00000368720',
'ENSP00000368720',
'ENSP00000463694',
'ENSP00000463694',
'ENSP00000317992',
'ENSP00000343930',
'ENSP00000368717',
'ENSP00000368719',
'ENSP00000368720',
'ENSP00000463694',
'ENSP00000317992',
'ENSP00000343930',
'ENSP00000368717',
'ENSP00000368719',]
len(set(l1).intersection(set(l2)))
| python_notebooks/benchmark_scratch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
with open('../_posts/markdown/layout.R') as f:
docs = f.read()
docs.split('# https://')[1]
# +
template = '''
---
title: {title}
name: {name}
permalink: {url}
description: {description}
layout: base
thumbnail: {thumbnail}
language: r
page_type: example_index
has_thumbnail: false
display_as: {display_as}
---
```{{r, echo = FALSE, message=FALSE}}
knitr::opts_chunk$set(message = FALSE)
Sys.setenv("plotly_username"="RPlotBot")
Sys.setenv("plotly_api_key"="<KEY>")
```
# {name}
```{{r, message=FALSE}}
{code_block}
```
'''
# -
import re
exs = re.split('# (https://plot\.ly/r/.*)', docs)[1:]
# +
import os.path
for i in range(0, len(exs)-1, 2):
relative_url = exs[i].split('https://plot.ly/r/')[1]
url = 'r1/{}'.format(relative_url)
name = relative_url.replace('-', ' ').replace('/', '')
title = name.title()
name = title
description = '{} in R.'.format(relative_url.replace('-', ' '))
thumbnail = '{}.png'.format(relative_url.replace('/',''))
display_as = 'layout'
code_block = exs[i+1]
code_block = code_block.replace("#'", '###')
example = template.format(
title=title,
name=name,
url=url.lower(),
description=description,
thumbnail=thumbnail,
display_as=display_as,
code_block=code_block
)
if example[0] == '\n':
example = example[1:]
print relative_url
filename = '/Users/chriddyp/Repos/documentation/_posts/markdown/2015-07-30-{}.Rmd'.format(relative_url.replace('/', ''))
with open(filename, 'w') as f:
f.write(example)
print "## Rscript -e 'knitr::knit(\{}\")'".format(filename)
# ! mv /Users/chriddyp/Repos/documentation/scripts/*md /Users/chriddyp/Repos/documentation/_posts/markdown/
# -
print exs[1]
# ! mv /Users/chriddyp/Repos/documentation/scripts/*md /Users/chriddyp/Repos/documentation/_posts/markdown/
print exs[11]
| scripts/scrape-r-docs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import argparse
import copy
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import roc_auc_score
from ml import learner, data
from ml.vision import utils
from pipe import constants, augmentations
# -
test = pd.read_csv(constants.sample_submission_fpath)
train = pd.read_csv(constants.train_folds_fpath)
test_image_paths = [
constants.data_path / f"test_256/{x}.jpg" for x in test.StudyInstanceUID.values
]
test_image_paths[:5]
train_image_paths = [
constants.data_path / f"train_256/{x}.jpg" for x in train.StudyInstanceUID.values
]
train_image_paths[:5]
# ## Predict on training set
checkpoint_path = Path('../models/arch=resnest14d_sz=128_fold=0.ckpt')
pretrained_model = learner.ImageClassifier.load_from_checkpoint(
checkpoint_path, in_channels=1, num_classes=11
)
pretrained_model.freeze()
list(pretrained_model.parameters())[0][0]
list(pretrained_model.model.parameters())[0][0]
# +
hparams = argparse.Namespace(
arch='resnest14d',
sz=128,
test_data=Path('../data/train_256'),
aug='baseline'
)
train_aug, valid_aug, test_aug = augmentations.augmentations_factory(hparams)
dm = data.ImageDataModule(
batch_size=256,
test_image_paths=train_image_paths,
test_augmentations=test_aug,
)
dm.setup()
outs = list(pretrained_model.predict_proba(dm.test_dataloader))
preds = np.vstack(outs)
train_preds = copy.copy(train)
train_preds[constants.target_cols] = preds
# -
train_preds[constants.target_cols].mean()
train[constants.target_cols].mean()
from sklearn.metrics import roc_auc_score
roc_auc_score(
y_true=train[constants.target_cols].values,
y_score=train_preds[constants.target_cols].values,
average='macro'
)
# ## Predict on test set
# +
hparams = argparse.Namespace(
arch='resnest14d',
sz=128,
test_data=Path('../data/test_256'),
aug='baseline'
)
train_aug, valid_aug, test_aug = augmentations.augmentations_factory(hparams)
dm = data.ImageDataModule(
batch_size=256,
test_image_paths=test_image_paths,
test_augmentations=test_aug,
)
dm.setup()
outs = list(pretrained_model.predict_proba(dm.test_dataloader))
preds = np.vstack(outs)
test_preds = copy.copy(test)
test_preds[constants.target_cols] = preds
test_preds[constants.target_cols].mean()
# -
# ## Train target distribution
train[constants.target_cols].mean()
# ## Replicate best scores for each fold
# +
train_preds = copy.copy(train)
ARCH = 'rexnet_200'
SZ = 512
SZ_INPUT = SZ * 2
BS = 16
for fold in range(5):
checkpoint_path = Path(f'../models/arch={ARCH}_sz={SZ}_fold={fold}.ckpt')
pretrained_model = learner.ImageClassifier.load_from_checkpoint(
checkpoint_path, in_channels=1, num_classes=11
)
pretrained_model.freeze()
hparams = argparse.Namespace(
arch=ARCH,
sz=SZ,
test_data=Path(f'../data/train_{SZ_INPUT}'),
aug='baseline'
)
train_aug, valid_aug, test_aug = augmentations.augmentations_factory(hparams)
valid_image_paths = [
constants.data_path / f"train_{SZ_INPUT}/{x}.jpg" for x in train[train.kfold==fold].StudyInstanceUID.values
]
valid_image_paths[:5]
dm = data.ImageDataModule(
batch_size=BS,
test_image_paths=valid_image_paths,
test_augmentations=valid_aug,
)
dm.setup()
outs = list(pretrained_model.predict_proba(dm.test_dataloader))
preds = np.vstack(outs)
break
# -
preds.shape
from sklearn.metrics import roc_auc_score
roc_auc_score(
y_true=train[train.kfold==fold][constants.target_cols].values,
y_score=preds,
average='macro'
)
from sklearn.metrics import roc_auc_score
roc_auc_score(
y_true=train[train.kfold==fold][constants.target_cols].values,
y_score=preds,
average='macro'
)
pretrained_model.__dict__
| nbs/predict.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Basic Usage of DirtyDF with Stainers
#
# This page shows some basic examples of using DirtyDF, and applying stainers to transform them. We recommend you go through the
# Basic Usage of Stainers (no DirtyDF) example first.
#
import pandas as pd
import numpy as np
from ddf.stainer import ShuffleStainer, InflectionStainer, RowDuplicateStainer
from ddf.DirtyDF import DirtyDF
# ## Single Stainer Example
#
#
# For the first example, let us once again use the basic dataset containing only 6 rows and 2 columns,
# an integer ID and an animal class.
#
#
animal = pd.DataFrame([(0, 'Cat'), (1, 'Dog'), (2, 'Rabbit'), (3, 'Cat'), (4, 'Cat'), (5, 'Dog')],
columns=('id', 'class'))
# Let us convert the pandas dataframe into a DirtyDF object. We specify a seed for the numpy random generator. This generator will
# be used for the staining.
#
#
animal_ddf = DirtyDF(animal, seed = 123)
# Let us use only 1 stainer: ShuffleStainer, for now.
#
#
shuffle_stainer = ShuffleStainer()
# Instead of calling on the stainer's transform method directly, we now add the stainer into the DirtyDF object, to be used later when
# calling the DDF.run_stainer() method.
#
#
animal_ddf2 = animal_ddf.add_stainers(shuffle_stainer)
# Note that the DDF methods return new DDF objects, and do not change the DDF in-place. This can be verified by checking the current
# stainers stored in a DDF using the .summarise_stainers() method.
#
#
animal_ddf.summarise_stainers() #empty
animal_ddf2.summarise_stainers() #ShuffleStainer present
# We run the stainer by calling the .run_stainer() method.
#
#
animal_ddf3 = animal_ddf2.run_stainer()
# Note that same as before, the above call returns a new DDF object. To view the dataframe content of the DDF object, we can use the
# .get_df() method.
#
#
animal_ddf3.get_df()
# Notice that animal_ddf2 still contains the original df, and contains ShuffleStainer inside, but not yet run.
#
#
animal_ddf2.get_df()
# On the other hand, since ShuffleStainer had already been run to obtain animal_ddf3, we can verify that animal_ddf3 does not contain
# ShuffleStainer anymore.
#
#
animal_ddf3.summarise_stainers() #empty
# We can view the history of stainers that were run to obtain animal_ddf3 (in this case, only the ShuffleStainer's history) by using
# the DDF.print_history() method.
#
#
animal_ddf3.print_history()
# We can also obtain the row and column mappings from the original df to the latest transformed df.
#
#
animal_ddf3.get_map_from_history(index=0, axis=0) #index=0 since there was only 1 stainer used, and axis=0 specifies rows.
animal_ddf3.get_map_from_history(index=0, axis=1) #axis=1 specifies columns. Note that ShuffleStainer doesn't alter columns.
# ## Multiple Stainers Example
#
#
# Now lets get to the beauty of DirtyDF: using multiple stainers for transformation. For this example, we use 3 stainers, namely,
# ShuffleStainer, InflectionStainer, and RowDuplicateStainer.
#
#
shuffle_stainer = ShuffleStainer()
dup_stainer = RowDuplicateStainer(deg = 0.6, max_rep = 3)
inflection_stainer = InflectionStainer(num_format=2, formats=['lowercase', 'uppercase'])
# We work with the same dataset as before. However, note that we have to explicitly convert the 'class' column as 'category'
# type. This is for the InflectionStainer to be able to detect the column as a categorical and automatically be applied onto it.
#
#
animal["class"] = animal["class"].astype("category")
# We can add multiple stainers at a time by passing a list of stainers into the .add_stainers()
# method.
#
#
# +
animal_ddf_mult = DirtyDF(animal).add_stainers([shuffle_stainer, dup_stainer, inflection_stainer])
animal_ddf_mult.summarise_stainers()
# -
# We can now run the stainers one-by-one by sequentially applying the .run_stainer() method.
#
#
# <div class="alert alert-info"><h4>Note</h4><p>Stainers are run in the order that they were inserted in. This order can be altered by using the DDF.reindex_stainer() method,
# or we can also shuffle the order of stainers by using the DDF.shuffle_stainer() method, however do note that not all stainers
# are able to be run in any order (i.e. some stainers may need to come before or after others).</p></div>
#
#
animal_ddf_mult2 = animal_ddf_mult.run_stainer().run_stainer().run_stainer()
# Note that we can also use .run_all_stainers() to run all stainers sequentially at once.
#
#
animal_ddf_mult3 = animal_ddf_mult.run_all_stainers() #does the same as above
animal_ddf_mult3.print_history()
# We can now view the transformed dataframe.
#
#
animal_ddf_mult3.get_df()
| docs/html/_downloads/48e6697783a232fd928fb9fd2a4e4f2c/plot_dirty_df_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import collections
import math
import os
import random
import zipfile
import numpy as np
import tensorflow as tf
# +
def read_data(filename):
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data("text8.zip")
print('Data size', len(vocabulary))
# +
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
# +
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
# +
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# +
# Step 5: Begin training.
num_steps = 2000001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 20000 == 0:
if step > 0:
average_loss /= 20000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 200000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# +
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in range(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
| word2vec/word2vec_basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
# <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html>
#
df = pd.read_csv('/home/vik/weather.log',sep='\s+', header=None, names=['date','time','temp','hum','water'])
df
# <https://stackoverflow.com/questions/50444346/fast-punctuation-removal-with-pandas>
#
# str.replace
df['water'] = df['water'].str.replace('~','').str.replace('°C','')
df['temp'] = df['temp'].str.replace(',','').str.replace('°C','')
# https://stackoverflow.com/questions/46544351/removing-character-from-string-in-dataframe
dt = df['date'] + ' ' + df['time']
pd.to_datetime(dt)
type(pd.to_datetime(dt))
# https://datatofish.com/string-to-integer-dataframe/
#
df['temp'] = pd.to_numeric(df['temp'])
df['water'] = pd.to_numeric(df['water'])
df['time'] = dt
# https://pandas.pydata.org/pandas-docs/stable/user_guide/visualization.html#plotting-tools
plt.rcParams["figure.figsize"] = (20,10)
df.plot()
# https://ourcodingclub.github.io/tutorials/pandas-time-series/
#
# https://stackoverflow.com/questions/38197964/pandas-plot-multiple-time-series-dataframe-into-a-single-plot
#
# https://stackoverflow.com/questions/38837421/simple-way-to-plot-time-series-with-real-dates-using-pandas
#
#
fig = df.plot(figsize=(20, 16)).get_figure()
fig.savefig('temp_and_water.png')
plt.plot(df['time'], df['water'])
plt.plot(df['time'], df['temp'])
# plt.rcParams["figure.figsize"] = (20,10)
# https://www.dataquest.io/blog/tutorial-time-series-analysis-with-pandas/
#
# https://www.mikulskibartosz.name/how-to-change-plot-size-in-jupyter-notebook/
| pandas_datamining_logs/02_case_of_ufo/.ipynb_checkpoints/pandas_dz-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Collaboration and Review
#
# We can use `rubicon` to share experiment results with teammates and collaborators by `publishing` experiments.
#
# After we've run a few experiments in the *Iris Classifier* example, let's publishing those experiments for sharing with collaborators or reviewers.
# +
from rubicon import Rubicon
root_dir = "./rubicon-root"
rubicon = Rubicon(persistence="filesystem", root_dir=root_dir)
help(rubicon.publish)
# -
catalog_yml = rubicon.publish("Iris Model")
print(catalog_yml)
# **Catalogs** are simply YAML files that the `rubicon` Intake driver can read and convert back to native `rubicon` objects. **The publishing process does not copy or move any data from the source it was originally logged to.** **Catalogs** point to the source data and tell the `rubicon` Intake driver how to read it.
#
# *Access controls should be properly defined on the underlying storage layer such that experimenters and reviewers can read and write as appropriate.*
#
# ___
#
# In the above **catalog**, we published all of the *Iris Model* project's experiments. We can use the experiments' IDs to publish a subset of them. The next one only contains the *Iris Model* project's first experiment.
# +
project = rubicon.get_project("Iris Model")
first_experiment = project.experiments()[0]
catalog_yml = rubicon.publish("Iris Model", experiment_ids=[first_experiment.id])
print(catalog_yml)
# -
# We can also use the experiments' tags to select which ones we want to publish. Let's make a **catalog** with all our successful experiments.
catalog_yml = rubicon.publish("Iris Model", experiment_tags=["success"])
print(catalog_yml)
# At this point, sharing our published experiments is a copy/paste away! We can share the YAML with whoever we want to use it, and they can use `rubicon` to load the referenced experiments. If we'd rather send a file, we can write one by specifying the `output_filepath` parameter.
# +
rubicon.publish("Iris Model", experiment_tags=["success"], output_filepath="./catalog.yml")
# ! ls -l catalog.yml
# ! cat catalog.yml
# -
# ___
#
# Now that we've published experiments to a file ( `./catalog.yml` ), we could have collaborators or reviewers import them! Let's see how:
# +
import intake
catalog_path = "./catalog.yml"
catalog = intake.open_catalog(catalog_path)
list(catalog)
# -
# Above, we listed each of the **sources** within the catalog we loaded. Each **source** represents a project or experiment.
# +
source_name = [
name for name in list(catalog) if "experiment" in name
][0]
source = catalog[source_name]
print(source)
# -
# We can call discover on a source to show its metadata, and read to load the full experiment into memory. Until we call one of those, Intake won't reach out to the underlying storage layer or load the source data. At no point will the source data be physically copied to the local filesystem. read returns native rubicon projects and experiments, so once we've got them we can do anything we normally could.
# +
from pprint import pprint
pprint(source.discover())
experiment = source.read()
print(experiment)
# -
# Let's see how this experiment was trained and how well it performed.
# +
for parameter in experiment.parameters():
print(parameter.name, parameter.value)
print()
for feature in experiment.features():
print(feature.name, feature.importance)
print()
for metric in experiment.metrics():
print(metric.name, metric.value)
# -
# The catalog we've been given to review may only contain experiments the experimenter wanted to highlight, like ones with an accuracy above a certain threshold. We can still load all the experiments they logged to their project because catalogs generated by rubicon.publish contain a source for the parent project as well.
# +
project_source_name = [
name for name in list(catalog) if "project" in name
][0]
source = catalog[project_source_name]
pprint(source.discover())
# +
project = source.read()
experiments = project.experiments()
print(len(experiments), "total experiments")
print(experiments)
# -
# Now we can dive into each experiment like we did the one above.
| notebooks/collaboration-and-review.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# ## What is a Brain Atlas or Parcellation?
# A brain atlas/parcellation is a voxel-based labelling of your data into "structural or functional units". In a parcellation schema each voxel is assigned a numeric (integer) label corresponding to the structural/functional unit that the particular voxel is thought to belong to based on some criteria. You might wonder why someone would simply *average together a bunch of voxels* in a way that would reduce the richness of the data. This boils down to a few problems inherit to functional brain imaging:
#
# 1. Resting state data is noisy, averaging groups of "similar" voxels reduces the effect of random noise effects
# 2. Provide an interpretative framework to functional imaging data. For example one parcellation group might be defined as the Default Mode Network which is thought to be functionally significant. So averaging voxels together belonging to the Default Mode Network provides an average estimate of the Default Mode Network signal. In addition the discovery of the Default Mode Network has yielded important insights into the organizational principles of the brain.
# 3. Limit the number of statistical tests thereby reducing potential Type I errors without resorting to strong statistical correction techniques that might reduce statistical power.
# 4. A simpler way to visualize your data, instead of 40x40x40=6400 data points, you might have 17 or up to 200; this is still significantly less data to deal with!
#
#
# ## Applying a Parcellation to your Data
# Since the parcellation of a brain is defined (currently) by spatial locations, application of an parcellation to fMRI data only concerns the first 3 dimensions; the last dimension (time) is retained. Thus a parcellation assigns every voxel (x,y,z) to a particular parcel ID (an integer).
#
#
# Nilearn supports a large selection of different atlases that can be found [here](http://nilearn.github.io/modules/reference.html#module-nilearn.datasets). For information about how to select which parcellation to use for analysis of your data we refer you to Arslan et al. 2018.
#
# ### Retrieving the Atlas
# For this tutorial we'll be using a set of parcellation from [Yeo et al. 2011](link). This atlas was generated from fMRI data from 1000 healthy control participants.
#
# First we'll load in our packages as usual:
#
# +
from nilearn import datasets
from nilearn import image as nimg
from nilearn import plotting as nplot
#for inline visualization in jupyter notebook
# %matplotlib inline
# -
# To retrieve the Yeo atlas we'll use the `fetch_atlas_*` family of functions provided for by nilearn.datasets and download it into a local directory:
#
parcel_dir = '../resources/rois/'
atlas_yeo_2011 = datasets.fetch_atlas_yeo_2011(parcel_dir)
# The method `datasets.fetch_atlas_yeo_2011()` returns a `dict` object. Examining the keys of the dictionary yields the following:
#
atlas_yeo_2011.keys()
# Each of the values associated with a key in `atlas_yeo_2011` is a `.nii.gz` image which contains a 3D NIFTI volume with a label for a given (x,y,z) voxel. Since these images are 3D volumes (sort of like structural images), we can view them using nilearn's plotting utilities:
#
# +
cut_coords = (8, -4 ,9)
colorbar=True
cmap = 'Paired'
nplot.plot_roi(atlas_yeo_2011['thin_7'], cut_coords=cut_coords, colorbar=colorbar, cmap=cmap, title='thin_7')
nplot.plot_roi(atlas_yeo_2011['thin_17'], cut_coords=cut_coords, colorbar=colorbar, cmap=cmap, title='thin_17')
nplot.plot_roi(atlas_yeo_2011['thick_7'], cut_coords=cut_coords, colorbar=colorbar, cmap=cmap, title='thick_7')
nplot.plot_roi(atlas_yeo_2011['thick_17'], cut_coords=cut_coords, colorbar=colorbar, cmap=cmap, title='thick_17')
# -
# You'll notice that the colour bar on the right shows the number of labels in each atlas and which colour corresponds to which network
#
# The 7 and 17 network parcellations correspond to the two most stable clustering solutions from the algorithm used by the authors. The thin/thick designation refer to how strict the voxel inclusion is (thick might include white matter/CSF, thin might exclude some regions of grey matter due to partial voluming effects).
#
# For simplicity we'll use the thick_7 variation which includes the following networks:
#
# 1. Visual
# 2. Somatosensory
# 3. Dorsal Attention
# 4. Ventral Attention
# 5. Limbic
# 6. Frontoparietal
# 7. Default
#
# The parcel areas labelled with 0 are background voxels not associated with a particular network.
#
atlas_yeo = atlas_yeo_2011['thick_7']
# ## Spatial Separation of Network
# A key feature of the Yeo2011 networks is that they are *spatially distributed*, meaning that the locations of two voxels in the same network need not be part of the same region. However, there could be some cases in which you might want to examine voxels belonging to a network within a particular region. To do this, we can separate parcels belonging to the same network based on spatial continuity. If there is a gap between two sets of voxels belonging to the same parcel group, we can assign new labels to separate them out. Nilearn has a feature to handle this:
#
# +
from nilearn.regions import connected_label_regions
region_labels = connected_label_regions(atlas_yeo)
nplot.plot_roi(region_labels,
cut_coords=(-20, -10, 0, 10, 20, 30, 40, 50, 60, 70),
display_mode='z',
colorbar=True,
cmap='Paired',
title='Relabeled Yeo Atlas')
# -
# ### Resampling the atlas
# Let's store the separated version of the atlas into a NIFTI file so that we can work with it later:
#
region_labels.to_filename('../resources/rois/yeo_2011/Yeo_JNeurophysiol11_MNI152/relabeled_yeo_atlas.nii.gz')
# #### Resampling Exercise
# Our goal is to match the parcellation atlas dimensions to our functional file so that we can use it to extract the mean time series of each parcel region. Using `Nilearn`'s resampling capabilities match the dimensions of the atlas file to the functional file
#
# First let's pick our functional file. Atlases are typically defined in standard space so we will use the MNI152NLin2009cAsym version of the functional file:
#
func_file = '../data/ds000030/derivatives/fmriprep/sub-10788/func/sub-10788_task-rest_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'
func_img = nimg.load_img(func_file)
# First examine the size of both files, if they match we are done:
print("Size of functional image:", func_img.shape)
print("Size of atlas image:", region_labels.shape)
# Looks like they don't match. To resolve this, we can use <code>nimg.resample_to_img</code> to resize the *atlas image* to match that of the *functional image*. Think about what kind of interpolation we'd like to use. Recall that the atlas contains integer values (i.e 0, 1, 2, 3,...), we *do not want any in-between values!*
#
resampled_yeo = nimg.resample_to_img(region_labels,
func_img,
interpolation = 'nearest')
# Let's see what the resampled atlas looks like overlayed on a slice of our NifTI file
# Note we're pulling a random timepoint from the fMRI data
nplot.plot_roi(resampled_yeo, func_img.slicer[:, :, :, 54])
# ### Visualizing ROIs
# <div class=exercise>
# <b>EXERCISE:</b> Visualize ROIs 44 and 46 in the Yeo atlas. We'll be looking at these 2 ROIs in more detail during our analysis
# </div>
# +
roi = 44
# Make a mask for ROI 44
roi_mask_44 = nimg.math_img('a == 44', a=resampled_yeo)
# Visualize ROI
nplot.plot_roi(roi_mask_44)
# +
roi = 46
# Make a mask for ROI 44
roi_mask_46 = nimg.math_img('a == 46', a=resampled_yeo)
# Visualize ROI
nplot.plot_roi(roi_mask_46)
| code/06_applying-a-parcellation_solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="MC3wRsaTp72U" colab_type="text"
# # Contrastive Explanation Method
# [Alibi MNIST Reference Code](https://github.com/SeldonIO/alibi/blob/524d786c81735ed90da2d2c68851c1145fa1b595/examples/cem_mnist.ipynb)
#
# Copyright 2020, <NAME>, MIT License. Added training options form and auto-encoder options form. Added Alibi installation.
#
# + id="kswjdYmE0qLB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="745eb283-07c7-419d-bbc8-cd8631412d97"
#@title Install Alibi
try:
import alibi
except:
# !pip install alibi
# + id="fjZG7JJlc6fc" colab_type="code" colab={}
#@title Import modules
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR) # suppress deprecation messages
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Input, UpSampling2D
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import to_categorical
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
from time import time
from alibi.explainers import CEM
# + id="U5aDORjneqaq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="ba44d0b5-de90-40d1-b873-36640244d8f8"
#@title Load and prepare MNIST Data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print('x_train shape:', x_train.shape, 'y_train shape:', y_train.shape)
plt.gray()
plt.imshow(x_test[15]);
# + id="oM4_K-DMjkl-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="9355847b-ccd0-47ee-f4f2-1588c930a795"
#@title Preparing data: scaling the data
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
print(x_test[1])
# + id="tqC3ByAsmY5m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="8eebfef6-50e1-434c-8d0e-2f6de404f266"
#@title Preparing data: shaping the data
print("Initial Shape",x_test.shape)
x_train = np.reshape(x_train, x_train.shape + (1,))
x_test = np.reshape(x_test, x_test.shape + (1,))
print('x_train shape:', x_train.shape, 'x_test shape:', x_test.shape)
# + id="IE5uL7p-itjS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="59014df2-cf48-47df-f736-4a17e78c58d2"
#@title Preparing data: categorizing the data
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('y_train shape:', y_train.shape, 'y_test shape:', y_test.shape)
xmin, xmax = -.5, .5
x_train = ((x_train - x_train.min()) / (x_train.max() - x_train.min())) * (xmax - xmin) + xmin
x_test = ((x_test - x_test.min()) / (x_test.max() - x_test.min())) * (xmax - xmin) + xmin
# + id="AHTKZ986xZZu" colab_type="code" colab={}
#@title Create and train CNN model
def cnn_model():
x_in = Input(shape=(28, 28, 1))
x = Conv2D(filters=64, kernel_size=2, padding='same', activation='relu')(x_in)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Conv2D(filters=32, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
x_out = Dense(10, activation='softmax')(x)
cnn = Model(inputs=x_in, outputs=x_out)
cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return cnn
# + id="87u2995Y_bf2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 527} outputId="0bb4e2b4-b9a1-4959-db10-abf91fe02235"
train_cnn = 'yes' #@param ["yes","no"]
if train_cnn=="yes":
cnn = cnn_model()
cnn.summary()
cnn.fit(x_train, y_train, batch_size=64, epochs=3, verbose=0)
cnn.save('mnist_cnn.h5')
# + id="paAy-Ej8yjKV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 544} outputId="7e0f3b6c-b45d-40c1-e6b5-d004df428da6"
#@title Load and test accuracy on test dataset
cnn = load_model('/content/mnist_cnn.h5')
cnn.summary()
score = cnn.evaluate(x_test, y_test, verbose=0)
print('Test accuracy: ', score[1])
# + id="huwM2cfj69Uz" colab_type="code" colab={}
#@title Define and train autoencoder
def ae_model():
x_in = Input(shape=(28, 28, 1))
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x_in)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
encoded = Conv2D(1, (3, 3), activation=None, padding='same')(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
decoded = Conv2D(1, (3, 3), activation=None, padding='same')(x)
autoencoder = Model(x_in, decoded)
autoencoder.compile(optimizer='adam', loss='mse')
return autoencoder
# + id="DHC3QgGT7Dum" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 459} outputId="2dd1e8cc-d04d-49b5-d16c-c6f4f29c96fa"
train_auto_encoder = 'yes' #@param ["yes","no"]
if train_auto_encoder=="yes":
ae = ae_model()
ae.summary()
ae.fit(x_train, x_train, batch_size=128, epochs=4, validation_data=(x_test, x_test), verbose=0)
ae.save('mnist_ae.h5', save_format='h5')
# + id="xv3CzfEb7KsN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 694} outputId="e2945769-3921-495f-a86c-02920e209ef9"
#@title Compare original with decoded images
ae = load_model('/content/mnist_ae.h5')
ae.summary()
decoded_imgs = ae.predict(x_test)
n = 5
plt.figure(figsize=(20, 4))
for i in range(1, n+1):
# display original
ax = plt.subplot(2, n, i)
plt.imshow(x_test[i].reshape(28, 28))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# + id="NZDIm-2_16_1" colab_type="code" colab={}
#@title Generate contrastive explanation with pertinent negative
#Explained instance
idx = 15
X = x_test[idx].reshape((1,) + x_test[idx].shape)
# + id="uEvkauXp2HoQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="f4f8af00-a3d1-4904-96f4-92537b42845a"
plt.imshow(X.reshape(28, 28));
# + id="-gWVd3UH2Riu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fef92d4f-0c85-4ad8-d9e1-6e8c78bb3a33"
#@title Model prediction
cnn.predict(X).argmax(), cnn.predict(X).max()
# + id="elIuLkcU2dyZ" colab_type="code" colab={}
#@title CEM parameters
mode = 'PN' # 'PN' (pertinent negative) or 'PP' (pertinent positive)
shape = (1,) + x_train.shape[1:] # instance shape
kappa = 0. # minimum difference needed between the prediction probability for the perturbed instance on the
# class predicted by the original instance and the max probability on the other classes
# in order for the first loss term to be minimized
beta = .1 # weight of the L1 loss term
gamma = 100 # weight of the optional auto-encoder loss term
c_init = 1. # initial weight c of the loss term encouraging to predict a different class (PN) or
# the same class (PP) for the perturbed instance compared to the original instance to be explained
c_steps = 10 # nb of updates for c
max_iterations = 1000 # nb of iterations per value of c
feature_range = (x_train.min(),x_train.max()) # feature range for the perturbed instance
clip = (-1000.,1000.) # gradient clipping
lr = 1e-2 # initial learning rate
no_info_val = -1. # a value, float or feature-wise, which can be seen as containing no info to make a prediction
# perturbations towards this value means removing features, and away means adding features
# for our MNIST images, the background (-0.5) is the least informative,
# so positive/negative perturbations imply adding/removing features
# + id="MM2hQ-ef2j9s" colab_type="code" colab={}
#@title initialize CEM explainer and explain instance
cem = CEM(cnn, mode, shape, kappa=kappa, beta=beta, feature_range=feature_range,
gamma=gamma, ae_model=ae, max_iterations=max_iterations,
c_init=c_init, c_steps=c_steps, learning_rate_init=lr, clip=clip, no_info_val=no_info_val)
explanation = cem.explain(X)
# + id="qi4hauCb2qaI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="e8803421-3746-4618-da6c-5c12647bad9b"
#@title Pertinent negative
print('Pertinent negative prediction: {}'.format(explanation.PN_pred))
plt.imshow(explanation.PN.reshape(28, 28));
# + id="kUeUvz_x2xxE" colab_type="code" colab={}
#@title Generate pertinent positive
mode = 'PP'
# + id="giDYPtYB26xe" colab_type="code" colab={}
#@title initialize CEM explainer and explain instance
cem = CEM(cnn, mode, shape, kappa=kappa, beta=beta, feature_range=feature_range,
gamma=gamma, ae_model=ae, max_iterations=max_iterations,
c_init=c_init, c_steps=c_steps, learning_rate_init=lr, clip=clip, no_info_val=no_info_val)
explanation = cem.explain(X)
# + id="hnz-m3OO3CAo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="f8e4259f-f60d-4ed9-8b67-ab75893642df"
#@title Pertinent positive
print('Pertinent positive prediction: {}'.format(explanation.PP_pred))
plt.imshow(explanation.PP.reshape(28, 28));
| Chapter10/CEM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# preliminaries
import sys,os,time,cv2,json
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as io
import ops
# for video file generation
from utils import imread_to_rgb, crop_img
DB_PATH = '/home/jhchoi/datasets2/friends/'
MD_PATH = '../../anno/'
window_size = 1
crop_size = ops.img_sz
# +
def hmsf_to_fnum(hmsf, fps):
hms = hmsf.split(';')[0].split(':')
f = hmsf.split(';')[1]
return (int(hms[0])*60*60 + int(hms[1])*60 + int(hms[2]))*fps + int(f)
def emo_char_idx(emo):
# 0=angry, 1=disgust, 2=fear, 3=happy, 4=sad, 5=surprise, 6=neutral
if emo == 'angry' or emo == 'anger':
return 0
elif emo == 'disgust':
return 1
elif emo == 'fear':
return 2
elif emo == 'happy' or emo == 'happiness':
return 3
elif emo == 'sad' or emo == 'sadness':
return 4
elif emo == 'surprise':
return 5
elif emo == 'neutral':
return 6
else:
'error'
def coor_change(coor):
spl = coor.split(' ')
if len(spl)>1:
return int(spl[0])
else:
return int(coor)
# +
# # remove all files
# epi_list = ['ep01', 'ep02', 'ep03', 'ep04', 'ep05', 'ep06', 'ep07', 'ep08', 'ep09', 'ep10']
# for epdir in epi_list:
# flist = os.listdir(DB_PATH+'/'+epdir+'_p')
# for fname in flist:
# os.remove(DB_PATH+'/'+epdir+'_p/'+fname)
# -
# +
epi_list = ['ep01', 'ep02', 'ep03', 'ep04', 'ep05', 'ep06', 'ep07', 'ep08', 'ep09', 'ep10']
val_dict = dict()
emo_stat = list()
numl_all = np.zeros(7)
for i in range(7):
val_dict[i] = dict()
# for epi in epi_list:
# val_dict[i][epi] = dict()
for epi_i in range(len(epi_list)):
print '\n'+str(epi_i)
# open
with open(MD_PATH+'s01_'+epi_list[epi_i]+'_tag2_visual_Final_180809.json') as md:
epi_md = json.load(md)
epi_md = epi_md['visual_results']
# every period
for i in range(len(epi_md)):
sys.stdout.write("\r"+str(i+1)+'/'+str(len(epi_md)))
# per num
pnum = int(epi_md[i]['period_num'])
# start-end
stime = hmsf_to_fnum(epi_md[i]['start_time'], 24)
etime = hmsf_to_fnum(epi_md[i]['end_time'], 24)
# img
imname = epi_md[i]['image']
if imname[0:2] == 'Fr':
pfnum = int(epi_md[i]['image'].split('.')[0].split('_')[1])
else:
pfnum = int(epi_md[i]['image'].split('.')[0][1:])
# person
pid_md = epi_md[i]['person'][0]
# for every person
for char in pid_md.keys():
emo = pid_md[char][0]['emotion'].lower()
face_bb = pid_md[char][0]['face_rect']
if emo == 'none' or face_bb['max_x'] == 'none':
continue
# face xy
face_bb = [coor_change(face_bb['min_x']), coor_change(face_bb['min_y']), coor_change(face_bb['max_x']), coor_change(face_bb['max_y'])]
if face_bb[0] >= face_bb[2] or face_bb[1] >= face_bb[3]:
continue
# 0=angry, 1=disgust, 2=fear, 3=happy, 4=sad, 5=surprise, 6=neutral
emo_idx = emo_char_idx(emo); emo_stat.append(emo_idx)
bat_img_temp = list() #np.zeros([crop_size, crop_size, window_size])
for i in range(window_size):
pfnum_batch = pfnum-window_size+1+i
# fimg
frame_img = cv2.imread(DB_PATH+'/'+epi_list[epi_i]+'/'+str('%05d' % pfnum_batch)+'.jpg')
face_crop = frame_img[face_bb[1]:face_bb[3], face_bb[0]:face_bb[2]]
#face_resz = (255*(face_crop)).astype(np.uint8)
face_resz = cv2.resize(face_crop, (crop_size,crop_size))
savename = epi_list[epi_i]+'_p/'+epi_list[epi_i]+'_'+str('%04d' % pnum)+'_'+char+'_'+str(emo_idx)+'_'+str('%05d' % pfnum_batch)+'.jpg'
cv2.imwrite(DB_PATH+savename, face_resz)
bat_img_temp.append(savename)
# save to dict
numl = numl_all[emo_idx]
val_dict[emo_idx][numl] = dict()
val_dict[emo_idx][numl]['img'] = savename
val_dict[emo_idx][numl]['emo'] = emo_idx
numl_all[emo_idx] += 1
# val_dict[emo_idx][epi_list[epi_i]][str(pnum)+'_'+char] = dict()
# val_dict[emo_idx][epi_list[epi_i]][str(pnum)+'_'+char]['crop'] = bat_img_temp
# val_dict[emo_idx][epi_list[epi_i]][str(pnum)+'_'+char]['bb'] = face_bb
np.save('../../dicts/friends_valid.npy', val_dict)
# +
# tt = 0
# for i in range(7):
# for j in val_dict[i].keys():
# tt += 1
# print tt
# -
| src/parse_Friends.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import libraries
import random
import numpy as np
from statistics import median, mean
from collections import Counter
from IPython.display import clear_output # only for jupyter notebook
# import game env
from puzzle import GameGrid
env = GameGrid()
env.reset()
# display current state
env.display_state()
# # Generate Random Data
# To check if its working properly
def some_random_games_first():
# Each of these is its own game.
for episode in range(5):
env.reset()
# this is each frame, up to 200...but we wont make it that far.
for t in range(200):
# This will display the environment
# Only display if you really want to see it.
# Takes much longer to display it.
clear_output()
env.display_state()
# This will just create a sample action in any environment.
# In this environment, the action can be 0 or 1, which is left or right
action = env.action_space()
# this executes the environment with an action,
# and returns the observation of the environment,
# the reward, if the env is over, and other info.
observation, reward, done, info = env.step(action)
if done:
break
some_random_games_first()
# # Generate and Save Training Data
# +
score_requirement = 16 # Save (state, action) pair only if score is higher than score_requirement
initial_games = 10000 # number of games played
goal_steps = 10000 # number of steps in each game
# [OBS, MOVES]
training_data = []
# all rewards:
rewards = []
# just the rewards that met our threshold:
accepted_rewards = []
# list of top scores
accepted_scores = []
# iterate through however many games we want:
for _ in range(initial_games):
score = 0
# moves specifically from this environment:
game_memory = []
# previous observation that we saw
prev_observation = []
# for each frame in 200
for _ in range(goal_steps):
# choose random action (0 or 1)
action = env.action_space()
# do it!
observation, reward, done, info = env.step(action)
# notice that the observation is returned FROM the action
# so we'll store the previous observation here, pairing
# the prev observation to the action we'll take.
if len(prev_observation) > 0 :
game_memory.append([prev_observation, action])
prev_observation = observation
score+=reward
if done: break
# IF our score is higher than our threshold, we'd like to save
# every move we made
# NOTE the reinforcement methodology here.
# all we're doing is reinforcing the score, we're not trying
# to influence the machine in any way as to HOW that score is
# reached.
if score >= score_requirement:
accepted_rewards.append(score)
accepted_scores.append(env.highest_score())
for data in game_memory:
# Create one hot vector for actions
# ["'w'", "'s'", "'d'", "'a'"] === [UP, DOWN, RIGHT, LEFT]
if data[1] == "'w'":
output = [1,0,0,0]
elif data[1] == "'s'":
output = [0,1,0,0]
elif data[1] == "'d'":
output = [0,0,1,0]
elif data[1] == "'a'":
output = [0,0,0,1]
# saving our training data
training_data.append([np.array(data[0]).flatten().tolist(), output])
# reset env to play again
env.reset()
# save overall scores
rewards.append(score)
# just in case you wanted to reference later
training_data_save = np.array(training_data)
np.save('data/saved.npy',training_data_save)
# some stats here, to further illustrate the neural network magic!
print("Training Points", len(training_data))
print('Average accepted score:', mean(accepted_rewards))
print('Median score for accepted rewards:',median(accepted_rewards))
print(Counter(accepted_scores))
# -
| generate_data_random_action.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# default_exp models.TSiTPlus
# -
# # TSiT & InceptionTSiT
#
# > These are PyTorch implementations created by <NAME> (<EMAIL>) based on ViT (Vision Transformer)
#
# Reference:
#
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (2020).
# An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929.
#
# This implementation is a modified version of Vision Transformer that is part of the grat timm library
# (https://github.com/rwightman/pytorch-image-models/blob/72b227dcf57c0c62291673b96bdc06576bb90457/timm/models/vision_transformer.py)
#export
from tsai.imports import *
from tsai.utils import *
from tsai.models.layers import *
from tsai.models.InceptionTimePlus import InceptionBlockPlus
from typing import Callable
# +
# export
class _TSiTEncoder(nn.Module):
def __init__(self, d_model, n_heads, n_layers:int=6, attn_dropout:float=0, dropout:float=0, drop_path_rate:float=0.,
mlp_ratio:int=1, qkv_bias:bool=True, act:str='reglu', pre_norm:bool=False):
super().__init__()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.layers = nn.ModuleList([])
for i in range(n_layers):
self.layers.append(nn.ModuleList([
MultiheadAttention(d_model, n_heads, dropout=attn_dropout, qkv_bias=qkv_bias), nn.LayerNorm(d_model),
PositionwiseFeedForward(d_model, dropout=dropout, act=act, mlp_ratio=mlp_ratio), nn.LayerNorm(d_model),
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
DropPath(dpr[i]) if dpr[i] != 0 else nn.Identity(),
# nn.Dropout(drop_path_rate) if drop_path_rate != 0 else nn.Identity()
]))
self.pre_norm = pre_norm
self.norm = nn.LayerNorm(d_model) if self.pre_norm else nn.Identity()
def forward(self, x):
for i, (mha, attn_norm, pwff, ff_norm, drop_path) in enumerate(self.layers):
if self.pre_norm:
x = drop_path(mha(attn_norm(x))[0]) + x
x = drop_path(pwff(ff_norm(x))) + x
else:
x = attn_norm(drop_path(mha(x)[0]) + x)
x = ff_norm(drop_path(pwff(x)) + x)
x = self.norm(x)
return x
class _TSiTBackbone(Module):
def __init__(self, c_in:int, seq_len:int, n_layers:int=6, d_model:int=128, n_heads:int=16, d_head:Optional[int]=None, act:str='reglu',
d_ff:int=256, qkv_bias:bool=True, dropout:float=0., attn_dropout:float=0,drop_path_rate:float=0.,
mlp_ratio:int=1, pre_norm:bool=False, use_token:bool=True, ks:Optional[int]=None, maxpool:bool=True,
preprocessor:Optional[Callable]=None, device=None, verbose:bool=False):
device = ifnone(device, default_device())
self.preprocessor = nn.Identity()
if preprocessor is not None:
xb = torch.randn(1, c_in, seq_len).to(device)
ori_c_in, ori_seq_len = c_in, seq_len
if not isinstance(preprocessor, nn.Module): preprocessor = preprocessor(c_in, d_model).to(device)
else: preprocessor = preprocessor.to(device)
with torch.no_grad():
# NOTE Most reliable way of determining output dims is to run forward pass
training = preprocessor.training
if training:
preprocessor.eval()
c_in, seq_len = preprocessor(xb).shape[1:]
preprocessor.train(training)
pv(f'preprocessor: (?, {ori_c_in}, {ori_seq_len}) --> (?, {c_in}, {seq_len})', verbose=verbose)
self.preprocessor = preprocessor
if ks is not None:
self.to_embedding = nn.Sequential(MultiConcatConv1d(c_in, d_model, kss=ks, maxpool=maxpool),Transpose(1,2))
else:
self.to_embedding = nn.Sequential(Conv1d(c_in, d_model, 1),Transpose(1,2))
self.pos_embedding = nn.Parameter(torch.zeros(1, seq_len + use_token, d_model))
self.cls_token = nn.Parameter(torch.zeros(1, 1, d_model))
self.use_token = use_token
self.pos_dropout = nn.Dropout(dropout)
self.encoder = _TSiTEncoder(d_model, n_heads, n_layers=n_layers, qkv_bias=qkv_bias, attn_dropout=attn_dropout, dropout=dropout,
mlp_ratio=mlp_ratio, drop_path_rate=drop_path_rate, act=act, pre_norm=pre_norm)
def forward(self, x):
# apply preprocessor module if exists
x = self.preprocessor(x)
# embedding
x = self.to_embedding(x)
if self.use_token:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
x = self.pos_dropout(x + self.pos_embedding)
x = self.encoder(x)
x = x.transpose(1,2)
return x
# +
#exports
class TSiTPlus(nn.Sequential):
"""Time series transformer model based on ViT (Vision Transformer):
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (2020).
An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929.
This implementation is a modified version of Vision Transformer that is part of the grat timm library
(https://github.com/rwightman/pytorch-image-models/blob/72b227dcf57c0c62291673b96bdc06576bb90457/timm/models/vision_transformer.py)
"""
def __init__(self, c_in:int, c_out:int, seq_len:int, n_layers:int=6, d_model:int=128, n_heads:int=16, d_head:Optional[int]=None, act:str='reglu',
d_ff:int=256, dropout:float=0., attn_dropout:float=0, drop_path_rate:float=0., mlp_ratio:int=1,
qkv_bias:bool=True, pre_norm:bool=False, use_token:bool=True, fc_dropout:float=0., bn:bool=False, y_range:Optional[tuple]=None,
ks:Optional[int]=None, maxpool:bool=True, preprocessor:Optional[Callable]=None, custom_head:Optional[Callable]=None, verbose:bool=False):
"""
Args:
=====
c_in: the number of features (aka variables, dimensions, channels) in the time series dataset.
c_out: the number of target classes.
seq_len: number of time steps in the time series.
n_layers: number of layers (or blocks) in the encoder. Default: 3 (range(1-4))
d_model: total dimension of the model (number of features created by the model). Default: 128 (range(64-512))
n_heads: parallel attention heads. Default:16 (range(8-16)).
d_head: size of the learned linear projection of queries, keys and values in the MHA. Usual values: 16-512.
Default: None -> (d_model/n_heads) = 32.
act: the activation function of intermediate layer, relu, gelu, geglu, reglu.
d_ff: the dimension of the feedforward network model. Default: 512 (range(256-512))
dropout: dropout applied to to the embedded sequence steps after position embeddings have been added and
to the mlp sublayer in the encoder.
attn_dropout: dropout rate applied to the attention sublayer.
drop_path_rate: stochastic depth rate.
mlp_ratio: ratio of mlp hidden dim to embedding dim.
qkv_bias: determines whether bias is applied to the Linear projections of queries, keys and values in the MultiheadAttention
pre_norm: if True normalization will be applied as the first step in the sublayers. Defaults to False.
use_token: if True, the output will come from the transformed token. Otherwise a pooling layer will be applied.
fc_dropout: dropout applied to the final fully connected layer.
bn: flag that indicates if batchnorm will be applied to the head.
y_range: range of possible y values (used in regression tasks).
ks: (Optional) kernel sizes that will be applied to a hybrid embedding.
maxpool: If true and kernel sizes are passed, maxpool will also be added to the hybrid embedding.
preprocessor: an optional callable (nn.Conv1d with dilation > 1 or stride > 1 for example) that will be used to preprocess the time series before
the embedding step. It is useful to extract features or resample the time series.
custom_head: custom head that will be applied to the network. It must contain all kwargs (pass a partial function)
Input shape:
x: bs (batch size) x nvars (aka features, variables, dimensions, channels) x seq_len (aka time steps)
"""
backbone = _TSiTBackbone(c_in, seq_len, n_layers=n_layers, d_model=d_model, n_heads=n_heads, d_head=d_head, act=act,
d_ff=d_ff, dropout=dropout, attn_dropout=attn_dropout,
drop_path_rate=drop_path_rate, pre_norm=pre_norm, mlp_ratio=mlp_ratio, use_token=use_token,
ks=ks, maxpool=maxpool, preprocessor=preprocessor, verbose=verbose)
self.head_nf = d_model
self.c_out = c_out
self.seq_len = seq_len
if custom_head:
head = custom_head(self.head_nf, c_out, self.seq_len) # custom head passed as a partial func with all its kwargs
else:
layers = [TokenLayer(token=use_token)]
layers += [LinBnDrop(d_model, c_out, bn=bn, p=fc_dropout)]
if y_range: layers += [SigmoidRange(*y_range)]
head = nn.Sequential(*layers)
super().__init__(OrderedDict([('backbone', backbone), ('head', head)]))
TSiT = TSiTPlus
InceptionTSiTPlus = named_partial("InceptionTSiTPlus", TSiTPlus, preprocessor=partial(InceptionBlockPlus, ks=[3,5,7]))
InceptionTSiT = named_partial("InceptionTSiT", TSiTPlus, preprocessor=partial(InceptionBlockPlus, ks=[3,5,7]))
ConvTSiT = named_partial("ConvTSiT", TSiTPlus, ks=[1,3,5,7])
ConvTSiTPlus = named_partial("ConvTSiTPlus", TSiTPlus, ks=[1,3,5,7])
# -
bs = 16
nvars = 4
seq_len = 50
c_out = 2
xb = torch.rand(bs, nvars, seq_len)
model = TSiTPlus(nvars, c_out, seq_len)
test_eq(model(xb).shape, (bs, c_out))
model
bs = 16
nvars = 4
seq_len = 50
c_out = 2
xb = torch.rand(bs, nvars, seq_len)
model = InceptionTSiTPlus(nvars, c_out, seq_len)
test_eq(model(xb).shape, (bs, c_out))
# ### Subsampling
#
# It's a known fact that transformers cannot be directly applied to long sequences. To avoid this, we have included a way to subsample the sequence to generate a more manageable input.
from tsai.data.validation import get_splits
from tsai.data.core import get_ts_dls
X = np.zeros((10, 3, 5000))
y = np.random.randint(0,2,X.shape[0])
splits = get_splits(y)
dls = get_ts_dls(X, y, splits=splits)
xb, yb = dls.train.one_batch()
xb
# If you try to use TSiTPlus, it's likely you'll get an 'out-of-memory' error.
#
# To avoid this you can subsample the sequence reducing the input's length. This can be done in multiple ways. Here are a few examples:
# Separable convolution (to avoid mixing channels)
preprocessor = Conv1d(xb.shape[1], xb.shape[1], ks=100, stride=50, padding='same', groups=xb.shape[1]).to(default_device())
preprocessor(xb).shape
# Convolution (if you want to mix channels or change number of channels)
preprocessor = Conv1d(xb.shape[1], 2, ks=100, stride=50, padding='same').to(default_device())
preprocessor(xb).shape
# MaxPool
preprocessor = nn.Sequential(Pad1d((0, 50), 0), nn.MaxPool1d(kernel_size=100, stride=50)).to(default_device())
preprocessor(xb).shape
# AvgPool
preprocessor = nn.Sequential(Pad1d((0, 50), 0), nn.AvgPool1d(kernel_size=100, stride=50)).to(default_device())
preprocessor(xb).shape
# Once you decide what type of transform you want to apply, you just need to pass the layer as the preprocessor attribute:
# +
bs = 16
nvars = 4
seq_len = 1000
c_out = 2
d_model = 128
xb = torch.rand(bs, nvars, seq_len)
preprocessor = partial(Conv1d, ks=5, stride=3, padding='same', groups=xb.shape[1])
model = TSiTPlus(nvars, c_out, seq_len, d_model=d_model, preprocessor=preprocessor)
test_eq(model(xb).shape, (bs, c_out))
# -
#hide
from tsai.imports import create_scripts
from tsai.export import get_nb_name
nb_name = get_nb_name()
create_scripts(nb_name);
| nbs/124_models.TSiTPlus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kaethejulian/Kaethe-Julian/blob/main/demo1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="pq5esAiHiQmo"
# ##<NAME>
# ###BS CPE 1-1
# + colab={"base_uri": "https://localhost:8080/"} id="QNWcGj4FWvI-" outputId="67920435-651a-456f-8a9f-5e7aa4a392b7"
b = "Sally"
print(b)
#This is a type of string
# + colab={"base_uri": "https://localhost:8080/"} id="wO0GWFeKX6Lo" outputId="b9773598-79aa-44ee-ff47-88ca94adf69f"
a, b, c = 0, 1, 2
print(type(b))
print(a)
print(c)
# + colab={"base_uri": "https://localhost:8080/"} id="6Z8FDaIAZGCO" outputId="cf3e415d-4add-403d-a367-8268e6c0aeca"
a = 4.50
print(type(a))
# + colab={"base_uri": "https://localhost:8080/"} id="TvSo7moaYSiO" outputId="6d34c03b-0cf4-4f42-83e9-f965ee8d9a4c"
a = float(4)
print(a)
# + colab={"base_uri": "https://localhost:8080/"} id="qb70StD_YpNw" outputId="9c162be0-81b7-4662-c316-ae1c46f471a2"
b = "sally"
print(type(b))
# + [markdown] id="SFWkqQfoXGCn"
# ###Introduction to Phyton Programming
# + colab={"base_uri": "https://localhost:8080/"} id="7bvF0hhsZOxn" outputId="b3ebe617-ed95-46dc-cef2-ad0888fbf686"
a = 'sally' #This is a program using type function
print(a)
# + colab={"base_uri": "https://localhost:8080/"} id="GGWie_jgZZd3" outputId="bc0598cd-195d-47b0-b92c-8c2ecaccd5e4"
a = 4
A = 'Sally'
print(a)
print(A)
# + colab={"base_uri": "https://localhost:8080/"} id="4r8znRFiZndl" outputId="b2cbae87-7bb0-481e-c0af-9b85b988ab9f"
x = y = z = "four"
print(x)
print(y)
print(z)
# + colab={"base_uri": "https://localhost:8080/"} id="nOkv6GuBayWz" outputId="c7577b37-ca9f-499c-c7ed-ed9e9af24895"
x = "enjoying"
print('phyton programming is '+x)
# + colab={"base_uri": "https://localhost:8080/"} id="Th_iAtnXb6KW" outputId="c3e6044f-09eb-4376-8498-deffe7590a23"
x = 4
y = 5
print(x+y)
print(x-y)
# + colab={"base_uri": "https://localhost:8080/"} id="QElXocCKctE8" outputId="5c52d96a-ed08-484b-e0a7-d65af651aff2"
(x<y and x==x)
# + colab={"base_uri": "https://localhost:8080/"} id="ZcfJkvGAcwJZ" outputId="75014573-2ae8-439a-ffc8-e7e79ab0dc85"
not(x<y or x==x) #this is an example of program using logical operator
# + colab={"base_uri": "https://localhost:8080/"} id="atTuilIgcJFw" outputId="c63d42d8-8f02-4bc4-e8fc-b841f09a6e6b"
not(x<y and x==x)
| demo1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="6RZjVHEXWYgZ" outputId="13b0455d-61ad-4577-f4e8-b831669ce40f"
"""
Name : <NAME>
Date : 1/21/2021
Class : Deep Learning Lab G2
"""
# + colab={"base_uri": "https://localhost:8080/"} id="Fro_J3hJaPaI" outputId="7c8d0370-bf1f-48a3-8a96-0f1efed78d09"
# %cd "/content/drive/MyDrive/Colab Notebooks/Classroom"
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} id="ve7VVoMSZxMN" outputId="85f3726c-d39f-4a2b-c096-6faf9c007426"
from google.colab import files
uploaded = files.upload()
# + id="mUAROX10Z-nS"
path_csv_file = r"/content/drive/MyDrive/Colab Notebooks/Classroom/Automobile_data.csv"
# + id="FFpisA3qaoXb"
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/"} id="jkteWjMGaqkd" outputId="97a74a21-524d-470a-f73b-5eb94a1236f2"
# insert a list of datatypes into a cell of index 0
lst = ['Welcome','Ladies','And','Gentleman']
df = pd.DataFrame(lst)
print(df.head()) # same as df
# + colab={"base_uri": "https://localhost:8080/"} id="aJcP3wjybC5-" outputId="81d99632-2eb9-43a4-fd1f-73f215c4bf0e"
import numpy as np
exam_data = { 'name' : ['Anastasia','Dima','Katherine','James','Emily','Michael','Mathew','Laura','Kevin','Jonas'],
'score' : [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],
'attempts' : [1,3,2,3,2,3,1,1,2,1],
'qualify' : ['yes','no','yes','no','no','yes','yes','no','no','yes'] }
labels = ['a','b','c','d','e','f','g','h','i','j']
df = pd.DataFrame(exam_data, index = labels)
print(df)
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="35EqugtDdXOm" outputId="1a4bafc8-5d08-407f-d4f5-6531f8149638"
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="d8fWp6dSdeka" outputId="7d690152-20eb-434f-d71d-fe1fe4d7668e"
df['name'] = df['name'].replace('James','John')
print(df)
# + colab={"base_uri": "https://localhost:8080/"} id="GrBsmijmdoGr" outputId="ff8595ab-d888-42f1-d3d3-fe4fa1a6f3b4"
# Find the highest score in each of the three attemps
attm = df.groupby('attempts')
priceDf = attm['score'].max()
priceDf
# + colab={"base_uri": "https://localhost:8080/", "height": 347} id="HQuVu2mgee7E" outputId="4fdabdae-7474-4926-b347-52161ec9aace"
# Sort all values by score column
scoreDF = df.sort_values(by = ['score'])
scoreDF
# + colab={"base_uri": "https://localhost:8080/", "height": 347} id="HWBhaX3QfBlu" outputId="fd076854-56d5-4ad3-b4e6-b917465c6660"
df.pop('attempts')
df
# + colab={"base_uri": "https://localhost:8080/"} id="31tJ6UDyfGLB" outputId="ff2e4363-a884-4b0b-8616-a0389ab8265a"
color = ['Red','Blue','Orange','Red','White','White','Blue','Green','Green','Red']
df['color'] = color
print("]nNew DataFrame after inserting the 'color' column")
print(df)
# + colab={"base_uri": "https://localhost:8080/"} id="1_2JdLwbfpUd" outputId="c94597e8-dcfa-403b-ed8c-3a992dbd18e9"
print('Data from new_file.csv file: ')
df.to_csv('new_file.csv',sep='\t',index = False)
# + id="mkq8tKr5gC-0"
import pandas as pd
df = pd.read_csv(path_csv_file)
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="A709qJYmgz7s" outputId="9c7e7d2d-bcde-4cab-cea4-8b8e7b1f65cc"
df.head()[:10]
# + colab={"base_uri": "https://localhost:8080/"} id="iLj7UhEMg3dy" outputId="1bc36904-ee24-45dc-826d-c91133e24e91"
# count total cars per company
df['company'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="8exUReH8hGWq" outputId="1c6e1596-c608-4e9d-e0ec-669de860a249"
car_Manufacturers = df.groupby('company')
mileageDf = car_Manufacturers['average-mileage'].mean() # groups company by the values of average-mileage and takes there mean
mileageDf
# + colab={"base_uri": "https://localhost:8080/"} id="4bgVgYSchfLd" outputId="c81f029d-8c59-4442-a0ed-d674aa647f6d"
group_by_company = df.groupby('company')
highest_price = group_by_company['price'].max()
highest_price
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="khNLYl6gh0-L" outputId="e5ab02b3-4442-42f5-98f6-430310c6a838"
# removing duplicate cars if any
df.drop_duplicates(subset = None,keep = 'first',inplace=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="4VQAl3K1ikYT" outputId="7ef51fd1-41f7-4e4a-f853-a4aaad1ae1fa"
df.sort_values(['price'],axis=0,ascending=True) # sorting in ascending order
| Experiment 2/code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Bootstrapping the project
#
# To run cogito, you will need to install and setup some software on your
# system.
#
# - zsh: Because it's cooler than bash
# - pyenv: To manage python environments
# - jupyter: To install jupyterlab and notebook itself
# - kotlin-jupyter: A kotlin kernel for jupyter
#
# ## Installing pyenv
#
# pyenv is a tool that lets you not only install different python virtual environments,
# but also different python versions. This is the best way to manage both.
#
# Before running the script block here you will want
# + pycharm={"is_executing": true, "name": "#%%\n"}
# !curl https://pyenv.run | bash
# !exec $SHELL
# !pyenv update
| notebooks/setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# + [markdown] id="MsS2LPkq3Q7P" colab_type="text"
# # Bring your own data to create a music genre model for AWS DeepComposer
# + [markdown] id="rlDCo8UR3Q7Q" colab_type="text"
# ---
#
# This notebook is for the <b>Bring your own data to create a music genre model for AWS DeepComposer</b> blog and is associated with the <b> AWS DeepComposer: Train it Again Maestro </b> web series on the <b>A Cloud Guru</b> platform.
#
# This covers preparing your data to train a custom music genre model for AWS DeepComposer.
#
# ---
# + id="thSw3RfP3Q7Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 796} outputId="14c1d478-988c-4fad-b2f5-eae6a1fb7b29"
# Create the environment
#conda update --all --y
# !pip install numpy==1.16.4
# !pip install pretty_midi
# !pip install pypianoroll
# + id="nTvvIaNh3Q7U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 460} outputId="5c75ffa6-aa09-40df-df77-49a5d4431313"
# IMPORTS
import os
import numpy as np
from numpy import save
import pypianoroll
from pypianoroll import Multitrack, Track
from utils import display_utils
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="-apkaqKH3Q7W" colab_type="code" colab={}
root_dir = './2Experiments'
# Directory to save checkpoints
model_dir = os.path.join(root_dir,'2Reggae') # JSP: 229, Bach: 19199
# Directory to save pianorolls during training
train_dir = os.path.join(model_dir, 'train')
# Location of the original MIDI files used for training; place your MIDI files here
reggae_midi_location = './reggae_midi/'
# Directory to save eval data
dataset_eval_dir = './dataset/'
# + [markdown] id="rjQOl8523Q7Z" colab_type="text"
# # Prepare Training Data (MIDI files -----> .npy)
# + [markdown] id="AHc1dPVg3Q7a" colab_type="text"
# ---
#
# This section of code demonstrates the process of converting MIDI files to the needed format for training, which is a .npy file. The final shape on the .npy file should be (x, 32, 128, 4), which represents (number of samples, number of time steps per sample, pitch range, instruments).
#
# ---
# + [markdown] id="cYfJiE9o3Q7a" colab_type="text"
# <img src="https://github.com/ACloudGuru-Resources/Series_DeepComposer/blob/master/aws_blog_sample/images/training-image.png?raw=1" alt="multitrack object" width="600">
# + id="pwaOkxe73Q7b" colab_type="code" colab={}
#helper function that stores the reshaped arrays, per instrument
def store_track(track, collection):
"""
Pull out the 4 selected instrument types based on program number
The program number represents the unique identifier for the instrument (ie. track.program)
https://en.wikipedia.org/wiki/General_MIDI
"""
instrument1_program_numbers = [1,2,3,4,5,6,7,8] #Piano
instrument2_program_numbers = [17,18,19,20,21,22,23,24] #Organ
instrument3_program_numbers = [33,34,35,36,37,38,39,40] #Bass
instrument4_program_numbers = [25,26,27,28,29,30,31,32] #Guitar
if isinstance (collection, dict):
if track.program in instrument1_program_numbers:
collection['Piano'].append(track)
elif track.program in instrument2_program_numbers:
collection['Organ'].append(track)
elif track.program in instrument3_program_numbers:
collection['Bass'].append(track)
elif track.program in instrument4_program_numbers:
collection['Guitar'].append(track)
else:
print("Skipping this instrument------------------->", track.name)
else: #collection will hold chosen tracks
if track.program in instrument1_program_numbers:
collection.append(track)
elif track.program in instrument2_program_numbers:
collection.append(track)
elif track.program in instrument3_program_numbers:
collection.append(track)
elif track.program in instrument4_program_numbers:
collection.append(track)
else:
print("Skipping this instrument------------------->", track.name)
return collection
# + id="XSqJaIE53Q7d" colab_type="code" colab={}
#helper function that returns the pianorolls merged to 4 tracks for 4 chosen instruments
def get_merged(music_tracks, filename):
chosen_tracks = []
#choose the tracks from the Multitrack object
for index, track in enumerate(music_tracks.tracks):
chosen_tracks = store_track(track, chosen_tracks)
#dictionary to hold reshaped pianorolls for 4 chosen instruments
reshaped_piano_roll_dict = {'Piano': [], 'Organ': [], 'Bass': [], 'Guitar': []}
#loop thru chosen tracks
for index, track in enumerate(chosen_tracks):
fig, ax = track.plot()
plt.show()
try:
#reshape pianoroll to 2 bar (i.e. 32 time step) chunks
track.pianoroll = track.pianoroll.reshape( -1, 32, 128)
#store reshaped pianoroll per instrument
reshaped_piano_roll_dict = store_track(track, reshaped_piano_roll_dict)
except Exception as e:
print("ERROR!!!!!----> Skipping track # ", index, " with error ", e)
#will hold all merged instrument tracks
merge_piano_roll_list = []
for instrument in reshaped_piano_roll_dict:
try:
merged_pianorolls = np.empty(shape=(0,32,128))
#concatenate/stack all tracks for a single instrument
if len(reshaped_piano_roll_dict[instrument]) > 0:
if reshaped_piano_roll_dict[instrument]:
merged_pianorolls = np.stack([track.pianoroll for track in reshaped_piano_roll_dict[instrument]], -1)
merged_pianorolls = merged_pianorolls[:, :, :, 0]
merged_piano_rolls = np.any(merged_pianorolls, axis=0)
merge_piano_roll_list.append(merged_piano_rolls)
except Exception as e:
print("ERROR!!!!!----> Cannot concatenate/merge track for instrument", instrument, " with error ", e)
continue;
merge_piano_roll_list = np.stack([track for track in merge_piano_roll_list], -1)
return merge_piano_roll_list.reshape(-1,32,128,4)
# + [markdown] id="RoBTr2u-3Q7f" colab_type="text"
# <img src="https://github.com/ACloudGuru-Resources/Series_DeepComposer/blob/master/aws_blog_sample/images/multi_track_object.png?raw=1" alt="multitrack object" width="600">
# + [markdown] id="raVKN3MZ3Q7g" colab_type="text"
# <img src="https://github.com/ACloudGuru-Resources/Series_DeepComposer/blob/master/aws_blog_sample/images/track_object.png?raw=1" alt="track object" width="600">
# + id="pHkw0ueV3Q7g" colab_type="code" colab={} outputId="c9beca52-11d0-4844-abef-9007293abdd8"
#holds final reshaped tracks that will be saved to training .npy file
track_list = np.empty(shape=(0,32,128,4))
#init with beat resolution of 4
music_tracks = pypianoroll.Multitrack(beat_resolution=4)
#loop through all the .mid files
for filename in os.listdir(reggae_midi_location):
print("Starting to process filename---->", reggae_midi_location + filename)
if filename.endswith(".mid"):
try:
#Load MIDI file using parse_midi
#returns Multi-Track object containing Track objects
music_tracks.parse_midi(reggae_midi_location + filename)
#add padding to avoid reshape errors
#pad the pianorolls with zeros making the length a multiple of 32
music_tracks.pad_to_multiple(32)
music_tracks.pad_to_same()
#merge pianoroll objects by instrument
merged_tracks_to_add_to_training_file = get_merged(music_tracks, filename)
#concatenate merged pianoroll objects to final training data track list
track_list = np.concatenate((merged_tracks_to_add_to_training_file, track_list))
print("Successfully processed filename---->", reggae_midi_location + filename)
except Exception as e:
print("**********ERROR**************It's possible that not all 4 instruments exist in this track; at least one is 0")
print("Skipping file---->", filename, e)
print(e)
# binarize data
track_list[track_list == 0] = -1
track_list[track_list >= 0] = 1
# + id="cr-gDnJN3Q7l" colab_type="code" colab={}
#split the data into training and evaluation datasets
training_data, eval_data = np.split(track_list, 2)
#save training data
save(train_dir + '/reggae-train.npy', np.array(training_data))
#save evaluation data
save(dataset_eval_dir + '/eval.npy', np.array(eval_data))
# + [markdown] id="oo4X1CWI3Q7n" colab_type="text"
# # Review Training Data
# + id="ginjoVPh3Q7n" colab_type="code" colab={} outputId="6b30a689-2ed7-4fd2-fad6-16015438d4b0"
#double check the shape on training data, should be (x, 32, 128, 4), where x represents the amount of records
training_data = np.load(train_dir + '/reggae-train.npy')
print("Testing the training shape: ", training_data.shape)
#view sample of data that will be fed to model, four graphs == four tracks
display_utils.show_pianoroll(training_data)
| Lab 2/Prepare-Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Dates and times in Python
#
# In addition to common data types like strings, integers and booleans, Python also has date and time data types. You'll probably come across these most often via the built-in [`datetime`](https://docs.python.org/3/library/datetime.html) module, so that's what we'll start with in this notebook.
#
# Here's what we'll cover in this notebook:
# - [Creating new dates and times](#Creating-new-dates-and-times)
# - [Parsing dates and times from text](#Parsing-dates-and-times-from-text)
# - [Formatting dates as text](#Formatting-dates-as-text)
# - [Calculating the difference between two datetimes](#Calculating-the-difference-between-two-datetimes)
# - [Working with dates in pandas](#Working-with-dates-in-pandas)
#
# Let's start by importing the `datetime` object from the `datetime` module -- this will allow us to create dates _and_ times tied to a particular date, which is a common use.
from datetime import datetime
# ### Creating new dates and times
#
# The `datetime` object expects arguments in this order: year, month, day, hour, minute, second, microsecond, and `tzinfo`, a keyword argument for passing in timezone information. Let's create a date object for Aug. 30, 2021.
our_date = datetime(2021, 8, 30)
print(our_date)
# You can access attributes of this date now, like `year` and `month`:
our_date.year
our_date.month
# We could also make a specific time on that day -- say, 1:30 p.m.:
our_datetime = datetime(2021, 8, 30, 13, 30)
print(our_datetime)
# We could also make it 1:30 p.m. Australia/Sydney time:
#
# (Dealing with timezones in Python can be a huge pain, even for uncomplicated data, so it's usually easier to use a third-party library like [`pytz`](http://pytz.sourceforge.net/) -- which we'll use here -- instead of manually calculating timezone offsets with a `datetime.timezone` object.)
import pytz
aus_sydney_tz = pytz.timezone('Australia/Sydney')
# you can get a list of all available timezones by running this cell
pytz.all_timezones
our_datetime_with_tz = datetime(2021, 8, 30, 13, 30, tzinfo=aus_sydney_tz)
print(our_datetime_with_tz)
# If you need to get the datetime of _now_ -- i.e., when the script is run -- you can use the handy method `now()`.
datetime.now()
# ### Parsing dates and times from text
#
# Let's say we've got a list of dates stored as strings (a common thing), and we want to make them into dates for future analysis:
our_dates = [
'2021-09-10',
'2018-10-30',
'2017-03-13',
'2000-01-02'
]
# We can use datetime's `strptime` method for this. It expects two arguments:
# - The string to parse into a date
# - The _pattern_ of the dates
#
# In this case, the pattern is: four-digit year, dash, two-digit month, dash, two-digit day. According to the little [mini-language of directives](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior) used to represent each piece of a date, that translates to `'%Y-%m-%d'`.
#
# I can never remember these, so I have [strftime.org](http://strftime.org/) bookmarked as a reference, and I check it _all the time_.
for d in our_dates:
native_date = datetime.strptime(d, '%Y-%m-%d')
print(native_date, type(native_date))
# ### Formatting dates as text
#
# You can use the `strftime` method to turn date objects into strings, formatted however you like and using the same mini-language of date directives we used with `strptime`. For these examples, we'll use the `our_date` variable.
our_date
our_date.strftime('%Y-%m-%d')
our_date.strftime('%m/%d/%Y')
our_date.strftime('It is %A, %B %-d, Year of our Lord %Y')
# ### Calculating the difference between two datetimes
# How old was each victim when they died? For each criminal case, how much time elapsed between the indictment and the sentencing? On average, how soon after the market-moving tweet did investors begin dumping stock in the company?
#
# "Date diff" questions like this show up all the time in newsrooms. If you're using Python to work with your data, you can use basic math to yield a [`datetime.timedelta`](https://docs.python.org/3/library/datetime.html#timedelta-objects) object and calculate the difference between two datetimes.
#
# (For more complex data, using a third-party library like [`dateutil`](https://github.com/dateutil/dateutil) would be worth your while.)
#
# But let's start with a simple example: How many minutes elapsed between two dates?
datetime1 = datetime(2021, 8, 30, 13, 30)
datetime2 = datetime(2021, 10, 7, 17, 45)
datetime2 - datetime1
# We can access the `seconds` attribute of the `timedelta` object that gets returned from this math problem:
elapsed_time = datetime2 - datetime1
print(elapsed_time.seconds)
# ... and if we want minutes, just divide by 60.
print(elapsed_time.seconds / 60)
# `timedelta` objects are also useful for "what was the date 160 days ago"-type problems:
# +
from datetime import timedelta
rn = datetime.now()
date_248_days_ago = rn - timedelta(days=248)
print(date_248_days_ago)
# -
# two weeks ago
rn - timedelta(weeks=2)
# 89382 seconds ago
rn - timedelta(seconds=89382)
# Date math can get a little complicated (time zones! leap years! birthdays!), but if you get stuck it's probable that someone on the Internet has solved your problem already.
# ### Working with dates in pandas
#
# Let's take a look at a couple of things you might want to do when working with dates in pandas: Parsing values as dates when you _import_ the data into a dataframe, and coercing _existing_ data to dates.
#
# Let's import pandas and load up some congressional junkets data (`../data/congress_junkets.csv`) with date values in two of the columns (`DepartureDate` and `ReturnDate`).
#
# We're going to specify the [`parse_dates`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) argument when we call the `read_csv()` function -- we'll hand this keyword argument a _list_ of columns to parse as dates.
import pandas as pd
df = pd.read_csv('../data/congress_junkets.csv',
parse_dates=['DepartureDate', 'ReturnDate'])
df.head()
# This method can be slow if you have a lot of data; one way to speed it up is to supply pandas with the correct date format using the specification language mentioned above ([here's an example from StackOverflow](https://stackoverflow.com/questions/23797491/parse-dates-in-pandas)).
#
# We'll use a [lambda expression](Functions.ipynb#Lambda-expressions) here for brevity, but you can also define a new function and pass that in as the `date_parser` argument, as well.
df = pd.read_csv('../data/congress_junkets.csv',
parse_dates=['DepartureDate', 'ReturnDate'],
date_parser=lambda x: datetime.strptime(x, '%m/%d/%Y'))
# Much quicker. We can verify that the column is now a date column by checking the data frame's `dtypes` attribute:
df.dtypes
# Sometimes, for Reasons™️, it makes more sense to do the conversion after you've loaded your data. Let's use the same data but do the conversion _after_ it's in the dataframe.
df = pd.read_csv('../data/congress_junkets.csv')
df.head()
# To do this, we'll create a new column for each date with the `to_datetime()` method. We'll hand it three things:
# - The column to convert to a datetime
# - The expected `format` of the dates (m/d/y, in this case)
# - What to do if the parser runs into an error -- instead of throwing an error, we'll `coerce` (errors will become `NaN`)
# +
df['departure_date_new'] = pd.to_datetime(df['DepartureDate'],
format='%m/%d/%Y',
errors='coerce')
df['return_date_new'] = pd.to_datetime(df['ReturnDate'],
format='%m/%d/%Y',
errors='coerce')
# -
df.head()
| python/notebooks/Date and time data types.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Ubuntu Linux)
# language: python
# metadata:
# cocalc:
# description: Python 3 programming language
# priority: 100
# url: https://www.python.org/
# name: python3
# ---
# Circle - Neutral Axis in Middle - Rev 0.0
# **Abstract:**
# Given: diameter d
# Returns: Area, Section Modulus, Plastic Modulus, Second Moment of Inertia, Radius of Gyration, and distance from neutral axis to extreme fibre
#
# **Instructions:**
# Enter radius below:
from math import pi
from pint import UnitRegistry
unit = UnitRegistry()
unit.default_format = '~' # ~ for unit abreviations, P for pretty print, or both
# Define symbols for common units
m = unit.meter; mm = unit.millimeter; inch = unit.inch;
kN = unit.kilonewton; MPa = unit.megapascal; psi = unit.psi
# Define dimensional quantities
d = 5.5*inch # diameter in given unit
d.ito(mm) # convert to desired output unit
R = d/2
# Formulas
A = (pi*d**2)/4 # Area
S = (pi*d**3)/32 # Elastic Section Modulus
Z = (d**3)/6 # Plastic Section Modulus
I = (pi*d**4)/64 # Second Moment of Inertia
J = (pi*d**4)/32 # St. Venant's Torsional Constant
r = d/4 # Radius of Gyration
c = d/2 # Distance from Neutral Axis to Extreme Fibre
# Define Output
print('Given:')
print(' Diameter, d = {0:n}{1} and'.format(d.magnitude, d.units))
print('')
print('Geometric Properties:')
print(' Area, A = {0:n}{1}'.format(A.magnitude, A.units))
print(' Elastic Section Modulus, S = {0:n}{1}'.format(S.magnitude,
S.units))
print(' Plastic Section Modulus, Z = {0:n}{1}'.format(Z.magnitude,
Z.units))
print(' Second Moment of Inertia, I = {0:n}{1}'.format(I.magnitude,
I.units))
print(" St. Venant's Torsional Constant, J = {0:n}{1}".format(J.magnitude,
J.units))
print(' Radius of Gyration, r = {0:n}{1}'.format(r.magnitude,
r.units))
print(' Distance from Neutral Axis to Extreme Fibre, c = {0:n}{1}'.format(
c.magnitude, c.units))
# **Revision History:**
# - Rev 0.0 20-Jun-2019 E.Durham Created notebook using formulas from Aluminum Design Manual 2015
| Properties_of_Geometric_Sections/archived_files/Circle_0.0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.10.0 64-bit (''base-new'': conda)'
# language: python
# name: python3
# ---
# +
import os
import pandas as pd
import numpy as np
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
print("Setup complete.")
# +
path = os.path.abspath("./raw")
file = "raw_reduced_7_years"
ext = "csv"
raw_data = pd.read_csv(os.path.join(path, ".".join([file, ext])))
print("Raw data loaded.")
# -
# Convert features to datetime and get day of year.
raw_data["Date"] = pd.to_datetime(raw_data["Date"])
raw_data["Time"] = pd.to_datetime(raw_data["Time"], format = "%H:%M:%S")
raw_data["day_of_year"] = raw_data["Date"].dt.day_of_year
raw_data["hour"] = raw_data["Time"].dt.hour
# Drop unnecessary columns.
columns_to_drop = ["Incident_Number", "Report_Location", "Address", "Type"]
data = raw_data.drop(columns = columns_to_drop)
data = data.sort_values(by = "Date")
# data[["Time", "Date"]] = data[["Time", "Date"]].isna()
# +
action = "impute"
if action == "all":
data.dropna(inplace = True)
elif action == "impute":
columns_to_impute = list(data.isna().any()[data.isna().any()].index)
imp = IterativeImputer(max_iter = 20, random_state = 26)
imp.fit(data[columns_to_impute])
new_data = pd.DataFrame(imp.transform(data[columns_to_impute]), columns = columns_to_impute)
data.drop(columns = columns_to_impute)
data[columns_to_impute] = new_data
elif action == "mean replace":
data_filled = data.fillna(data.mean())
else:
print("Select specific action.")
# +
def origin_haversine(coord: tuple, degrees = True) -> float:
"""
Calculates the Haversine the point `(latitude, longitude)` and `(0, 0)`.
Parameters
----------
coord:
The coordinates specified as `(latitude, longitude)` either in degrees or radians.
degrees:
If true converts coordinates from (assumed) degrees to radians.
Returns
-------
float:
The distance.
"""
lat, lng = coord
# Earth's radius in km.
r = 6371
# Convert decimal degrees to radians, if needed.
if degrees:
lat, lng = map(np.radians, [lat, lng])
# Harvesine distance between (0, 0) and (lat, long)
a = np.sin(lat / 2) ** 2 + np.cos(lat) * np.sin(lng / 2) ** 2
d = 2 * r * np.arcsin(np.sqrt(a))
return d
HAVERSINE_FEATURE = False
if HAVERSINE_FEATURE:
data["latlong_combined"] = [origin_haversine((lat, lng)) for lat, lng in zip(data.Latitude, data.Longitude)]
data_haversine = data.drop(columns = ["Latitude", "Longitude"])
# -
def create_main_data(data: pd.DataFrame, date: str, time_groups: list):
"""
Splits the dataframe `data` by year using the date column `date`. Groups by the specified time groups (eg: `year_of_day` and `hour`), averages the latitudes and longitudes.
Parameters
----------
data:
The data to be processed.
date:
Column name of the date column in `data`.
Returns
-------
list:
A list of dataframes.
"""
year_frames = [data[data[date].dt.year == y] for y in data[date].dt.year.unique()]
main_frames = list()
for df in year_frames:
d_temp = df.groupby(time_groups)\
.agg({
date: ["count"],
"Latitude": ["mean"],
"Longitude": ["mean"]
})\
.reset_index(time_groups)\
.sort_values(time_groups)
d_temp.rename(columns = {date: "calls"}, inplace = True)
d_temp.columns = d_temp.columns.droplevel(1)
main_frames.append(d_temp)
return main_frames
main_frames = create_main_data(data, "Date", ["day_of_year", "hour"])
final_data_mean = pd.concat(main_frames[::-1], ignore_index = True)
final_data_test_mean = main_frames[-1]
path_to_save = os.path.relpath("./train-test")
final_data_mean.to_csv(os.path.join(path_to_save, ".".join(["data_yearly_hourly_train", "csv"])), index = False)
final_data_test_mean.to_csv(os.path.join(path_to_save, ".".join(["data_yearly_hourly_test", "csv"])), index = False)
| data/clean.ipynb |
# ##### Copyright 2021 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # linear_programming_example
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/linear_solver/linear_programming_example.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/ortools/linear_solver/samples/linear_programming_example.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# #!/usr/bin/env python3
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linear optimization example."""
# [START program]
# [START import]
from ortools.linear_solver import pywraplp
# [END import]
def LinearProgrammingExample():
"""Linear programming sample."""
# Instantiate a Glop solver, naming it LinearExample.
# [START solver]
solver = pywraplp.Solver.CreateSolver('GLOP')
# [END solver]
# Create the two variables and let them take on any non-negative value.
# [START variables]
x = solver.NumVar(0, solver.infinity(), 'x')
y = solver.NumVar(0, solver.infinity(), 'y')
print('Number of variables =', solver.NumVariables())
# [END variables]
# [START constraints]
# Constraint 0: x + 2y <= 14.
solver.Add(x + 2 * y <= 14.0)
# Constraint 1: 3x - y >= 0.
solver.Add(3 * x - y >= 0.0)
# Constraint 2: x - y <= 2.
solver.Add(x - y <= 2.0)
print('Number of constraints =', solver.NumConstraints())
# [END constraints]
# [START objective]
# Objective function: 3x + 4y.
solver.Maximize(3 * x + 4 * y)
# [END objective]
# Solve the system.
# [START solve]
status = solver.Solve()
# [END solve]
# [START print_solution]
if status == pywraplp.Solver.OPTIMAL:
print('Solution:')
print('Objective value =', solver.Objective().Value())
print('x =', x.solution_value())
print('y =', y.solution_value())
else:
print('The problem does not have an optimal solution.')
# [END print_solution]
# [START advanced]
print('\nAdvanced usage:')
print('Problem solved in %f milliseconds' % solver.wall_time())
print('Problem solved in %d iterations' % solver.iterations())
# [END advanced]
LinearProgrammingExample()
# [END program]
| examples/notebook/linear_solver/linear_programming_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Prediction of surgery vs non-sergery using taxonomy profile
#
# Train machine learning models on sepecies abundance to predict if the sample is from a patient tha has undergone surgery
import numpy as np
import biom
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# +
#load species abundance
df_species=pd.read_csv('./data/metaphlan2_species_level_abundance_classified.csv',index_col=0)
df_meta=pd.read_csv('./data/metadata_updated_filtered_201904.csv',index_col=0)
#create dictionary to
dic_date=dict(zip(list(df_meta.index),list(df_meta.time_point)))
dic_surgery=dict(zip(list(df_meta.index),list(df_meta.surgery_type)))
# -
## only train the model on bacteria
species_keep=[]
for i in df_species.index:
if not 'virus' in i and not 'Archaea'in i:
species_keep.append(i)
df_species=df_species.loc[species_keep]
df_species.shape
# ## RandomForest classifier for 2 groups: surgery vs no surgery
# ### Split approach: Separates by subjects
# make sure subject in training data does not overlap with subjects in test data
#
# +
from random import sample
#organize subjects and samples, and record their surgery status
dic_subject=dict(zip(list(df_meta.host_id),list(df_meta.surgery_type)))
dic_sample_host=dict(zip(list(df_meta.index),list(df_meta.host_id)))
subject_nonsurgery=[i for i in dic_subject.keys() if dic_subject[i]=='none']
subject_surgery=[i for i in dic_subject.keys() if i not in subject_nonsurgery]
# write a function to randomly split test and training set, so that the subjects in test and training do not overlap
# 70% percent of the subjects are in training samples while 30% subjects are in test samples
def split_data(df_meta,df_species):
#use randome sample to select ~70% subject from each group as training data
sub_train_nosurg=sample(subject_nonsurgery,int(len(subject_nonsurgery)*0.7))
sub_train_surg=sample(subject_surgery,int(len(subject_surgery)*0.7))
sub_train=sub_train_nosurg+sub_train_surg
#map subjects back to samples and build the training dataset
sample_train=[i for i in df_species.columns if dic_sample_host[i] in sub_train]
sample_test=[i for i in df_species.columns if i not in sample_train]
X_train=df_species[sample_train]
X_test=df_species[sample_test]
## create the output
y_train=[]
for i in X_train.columns:
if dic_surgery[i]=='none':
y_train.append(0)
else:
y_train.append(1)
y_test=[]
for i in X_test.columns:
if dic_surgery[i]=='none':
y_test.append(0)
else:
y_test.append(1)
return X_train, X_test,y_test,y_train
X_train, X_test,y_test,y_train=split_data(df_meta,df_species)
# -
# ### Train and test the model
# +
#train regressor and run it on test data
from sklearn.ensemble import RandomForestClassifier
class_rf = RandomForestClassifier(n_jobs=4)
class_rf.fit(X_train.T, y_train)
# 10-fold cross validation on the test data
from sklearn.cross_validation import cross_val_score
scores = cross_val_score(class_rf, X_train.T, y_train, cv=10)
print 'cross-validation on the training data: '
print scores
# Predict on new data
y_rf = class_rf.predict(X_test.T)
# calculate the accuracy score of the classifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
print 'Prediction accuracy score on the test data: ', accuracy_score(y_test, y_rf)
average_precision=average_precision_score(y_test, y_rf)
print 'Average precision-recall score: ', average_precision
# -
# ## Randomly split data, train and test the model for 100 times
# ### Record ad calculate the average precision-recall score
# ### identify the most important species that had the highest occurance
# +
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
#define classifier
from sklearn.ensemble import RandomForestClassifier
class_rf = RandomForestClassifier(n_jobs=4)
y_test_all=[]
y_rf_all=[]
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
pcscore_list=[]
featuelist=[]
for i in range(100):
print i
X_train, X_test,y_test,y_train=split_data(df_meta,df_species)
class_rf.fit(X_train.T, y_train)
# Predict on new data
y_rf = class_rf.predict(X_test.T)
#record the y_test and y_rf for each iteration so it could be used to calculate over all precision-recall curve
y_test_all.append(y_test)
y_rf_all.append(y_rf)
# calculate the accuracy score of the classifier
pcscore_list.append(average_precision_score(y_test, y_rf))
ind_top30=sorted(range(len(class_rf.feature_importances_)), key=lambda i: class_rf.feature_importances_[i])[-30:]
for i in ind_top30:
featuelist.append(df_species.index[i])
probas_= class_rf.fit(X_train.T, y_train).predict_proba(X_test.T)
fpr, tpr, thresholds = roc_curve(y_test, probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
# +
# count the most important features that overlapped in 100 runs
dic_count={}
for s in set(featuelist):
count=featuelist.count(s)
dic_count[s]=count
dic_count
import operator
sorted_x = sorted(dic_count.items(), key=operator.itemgetter(1))
sorted_x
# -
# ### Precision-recall curve
# +
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from inspect import signature
mean_recall = np.linspace(0, 1, 100)
precisions = []
fig = plt.figure()
fig.set_size_inches(5, 5)
for y_test, y_prob in zip(y_test_all, y_rf_all):
precision, recall, _ = precision_recall_curve(y_test, y_prob)
precisions.append(np.interp(mean_recall, recall[::-1], precision[::-1]))
plt.step(recall, precision, color='steelblue', alpha=0.08,
where='post')
mean_precision = np.mean(precisions, axis=0)
plt.step(mean_recall, mean_precision, color='steelblue',
label='Mean PRC',
lw=2, alpha=.8)
std_precision = np.std(precisions, axis=0)
precision_upper = np.minimum(mean_precision + std_precision, 1)
precision_lower = np.maximum(mean_precision - std_precision, 0)
plt.fill_between(mean_recall, precision_lower, precision_upper, color='steelblue', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall curve: AP={0:0.2f}'.format(
average_precision))
plt.legend(loc="lower right")
# -
# ### Compute ROC curve and area under the curve
# +
#plot the average AUC ROC for 100 iterations
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='darkorange',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='navy',
label='Chance', alpha=.8)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve of Random Forest Classifier to predict surgery vs. no surgery')
plt.legend(loc="lower right")
plt.show()
#plt.savefig('../data/Sandborn_data/figures/AUC_ROC_100_iterations_taxonomy_classification.svg')
#plt.savefig('../data/Sandborn_data/figures/AUC_ROC_100_iterations_taxonomy_classification.pdf')
| notebooks_metagenomics/3.1 Classification of surgery vs non-surgery patients by taxonomy profile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NetworKit User Guide
# ## About NetworKit
# [NetworKit][networkit] is an open-source toolkit for high-performance
# network analysis. Its aim is to provide tools for the analysis of large
# networks in the size range from thousands to billions of edges. For this
# purpose, it implements efficient graph algorithms, many of them parallel to
# utilize multicore architectures. These are meant to compute standard measures
# of network analysis, such as degree sequences, clustering coefficients and
# centrality. In this respect, NetworKit is comparable
# to packages such as [NetworkX][networkx], albeit with a focus on parallelism
# and scalability. NetworKit is also a testbed for algorithm engineering and
# contains a few novel algorithms from recently published research, especially
# in the area of community detection.
#
# [networkit]: http://parco.iti.kit.edu/software/networkit.shtml
# [networkx]: http://networkx.github.com/
#
#
# ## Introduction
# This notebook provides an interactive introduction to the features of NetworKit, consisting of text and executable code. We assume that you have read the Readme and successfully built the core library and the Python module. Code cells can be run one by one (e.g. by selecting the cell and pressing `shift+enter`), or all at once (via the `Cell->Run All` command). Try running all cells now to verify that NetworKit has been properly built and installed.
#
# ## Preparation
# This notebook creates some plots. To show them in the notebook, matplotlib must be imported and we need to activate matplotlib's inline mode:
# %matplotlib inline
import matplotlib.pyplot as plt
# IPython lets us use familiar shell commands in a Python interpreter. Use one of them now to change into the directory of your NetworKit download:
# cd ../../
# NetworKit is a hybrid built from C++ and Python code: Its core functionality is implemented in C++ for performance reasons, and then wrapped for Python using the Cython toolchain. This allows us to expose high-performance parallel code as a normal Python module. On the surface, NetworKit is just that and can be imported accordingly:
from networkit import *
# ## Reading and Writing Graphs
# Let us start by reading a network from a file on disk: [PGPgiantcompo.graph](http://www.cc.gatech.edu/dimacs10/archive/data/clustering/PGPgiantcompo.graph.bz2). In the course of this tutorial, we are going to work on the `PGPgiantcompo` network, a social network/web of trust in which nodes are PGP keys and an edge represents a signature from one key on another. It is distributed with NetworKit as a good starting point.
#
# There is a convenient function in the top namespace which tries to guess the input format and select the appropriate reader:
G = readGraph("input/PGPgiantcompo.graph", Format.METIS)
# There is a large variety of formats for storing graph data in files. For NetworKit, the currently best supported format is the [METIS adjacency format](http://people.sc.fsu.edu/~jburkardt/data/metis_graph/metis_graph.html). Various example graphs in this format can be found [here](http://www.cc.gatech.edu/dimacs10/downloads.shtml). The `readGraph` function tries to be an intelligent wrapper for various reader classes. In this example, it uses the `METISGraphReader` which is located in the `graphio` submodule, alongside other readers. These classes can also be used explicitly:
graphio.METISGraphReader().read("input/PGPgiantcompo.graph")
# is the same as: readGraph("input/PGPgiantcompo.graph", Format.METIS)
# It is also possible to specify the format for `readGraph()` and `writeGraph()`. Supported formats can be found via `[graphio.]Format`. However, graph formats are most likely only supported as far as the NetworKit::Graph can hold and use the data. Please note, that not all graph formats are supported for reading and writing.
#
# Thus, it is possible to use NetworKit to convert graphs between formats. Let's say I need the previously read PGP graph in the Graphviz format:
graphio.writeGraph(G,"output/PGPgiantcompo.graphviz", Format.GraphViz)
# NetworKit also provides a function to convert graphs directly:
graphio.convertGraph(Format.LFR, Format.GML, "input/example.edgelist", "output/example.gml")
# ## The Graph Object
# `Graph` is the central class of NetworKit. An object of this type represents an undirected, optionally weighted network. Let us inspect several of the methods which the class provides.
n = G.numberOfNodes()
m = G.numberOfEdges()
print(n, m)
G.toString()
# Nodes are simply integer indices, and edges are pairs of such indices.
V = G.nodes()
print(V[:10])
E = G.edges()
print(E[:10])
G.hasEdge(42,11)
# This network is unweighted, meaning that each edge has the default weight of 1.
G.weight(42,11)
# ## Drawing Graphs
# Sometimes it be may interesting to take a glance at a visualization of a graph. As this is not the scope of NetworKit, the `viztasks`-module provides two convenience functions to draw graphs via NetworkX. If you have it installed, you will see usage examples throughout this guide.
#
# It also possible to load a graph and the results of our analytic kernels directly into [Gephi](https://gephi.org/), a software package for interactive graph visualization, via its streaming plugin. You may want to take a look at the GephiStreaming notebook.
# ## Profiling a network
# The `profiling`-module introduced with version 4.0 of NetworKit is the successor of the `properties`-module. It provides a convenient way to run a selection of NetworKit's analytic kernels. The results are further processed to show all kinds of statistics. A very brief example follows. First, let's load a different graph:
astro = readGraph("input/astro-ph.graph", Format.METIS)
# One simple function call is enough to run and evaluate several kernels. The `preset`-parameter is a convenient way to choose a set of algorithms. Currently, `minimal`, `default` and `complete` can be passed.
pf = profiling.Profile.create(astro, preset="minimal")
# When running inside a notebook, the `show`-function can be used to display the profile. Depending on the selection of kernels, it may take a while to produce all the plots.
pf.show()
# It is also possible to save the profile in a file with the following command. Two formats are available: `HTML` and `LaTeX`.
pf.output("HTML",".")
# For a more customized selection of kernels, a `Config`-object can be created and passed to `Profile.create`. Take a look at the specific Profiling notebook for more detailed instructions.
# ## Connected Components
# A connected component is a set of nodes in which each pair of nodes is connected by a path. The following function determines the connected components of a graph:
cc = components.ConnectedComponents(G)
cc.run()
print("number of components ", cc.numberOfComponents())
v = 0
print("component of node ", v , ": " , cc.componentOfNode(0))
print("map of component sizes: ", cc.getComponentSizes())
# ## Degree Distribution
# Node degree, the number of edges connected to a node, is one of the most studied properties of networks. Types of networks are often characterized in terms of their distribution of node degrees. We obtain and visualize the degree distribution of our example network as follows.
dd = sorted(centrality.DegreeCentrality(G).run().scores(), reverse=True)
plt.xscale("log")
plt.xlabel("degree")
plt.yscale("log")
plt.ylabel("number of nodes")
plt.plot(dd)
plt.show()
# We choose a logarithmic scale on both axes because a _powerlaw degree distribution_, a characteristic feature of complex networks, would show up as a straight line from the top left to the bottom right on such a plot. As we see, the degree distribution of the `PGPgiantcompo` network is definitely skewed, with few high-degree nodes and many low-degree nodes. But does the distribution actually obey a power law? In order to study this, we need to apply the [powerlaw](https://pypi.python.org/pypi/powerlaw) module. Call the following function:
import powerlaw
fit = powerlaw.Fit(dd)
# The powerlaw coefficient can then be retrieved via:
fit.alpha
# If you further want to know how "good" it fits the power law distribution, you can use the the `distribution_compare`-function. From the documentation of the function:
# > R : float
# >
# > Loglikelihood ratio of the two distributions' fit to the data. If
# > greater than 0, the first distribution is preferred. If less than
# > 0, the second distribution is preferred.
#
# > p : float
# >
# > Significance of R
#
fit.distribution_compare('power_law','exponential')
# ## Transitivity / Clustering Coefficients
# In the most general sense, transitivity measures quantify how likely it is that the relations out of which the network is built are transitive. The clustering coefficient is the most prominent of such measures. We need to distinguish between global and local clustering coefficient: The global clustering coefficient for a network gives the fraction of closed triads. The local clustering coefficient focuses on a single node and counts how many of the possible edges between neighbors of the node exist. The average of this value over all nodes is a good indicator for the degreee of transitivity and the presence of community structures in a network, and this is what the following function returns:
globals.clustering(G)
# ## Search and Shortest Paths
# A simple breadth-first search from a starting node can be performed as follows:
# +
v = 0
bfs = distance.BFS(G, v)
bfs.run()
bfsdist = bfs.getDistances()
# -
# The return value is a list of distances from `v` to other nodes - indexed by node id. For example, we can now calculate the mean distance from the starting node to all other nodes:
sum(bfsdist) / len(bfsdist)
# Similarly, Dijkstra's algorithm yields shortest path distances from a starting node to all other nodes in a weighted graph. Because `PGPgiantcompo` is an unweighted graph, the result is the same here:
dijkstra = distance.Dijkstra(G, v)
dijkstra.run()
spdist = dijkstra.getDistances()
sum(spdist) / len(spdist)
# ## Core Decomposition
# A $k$-core decomposition of a graph is performed by successicely peeling away nodes with degree less than $k$. The remaining nodes form the $k$-core of the graph.
K = readGraph("input/karate.graph", Format.METIS)
coreDec = centrality.CoreDecomposition(K)
coreDec.run()
# Core decomposition assigns a core number to each node, being the maximum $k$ for which a node is contained in the $k$-core. For this small graph, core numbers have the following range:
set(coreDec.scores())
viztasks.drawGraph(K, nodeSizes=[(k**2)*20 for k in coreDec.scores()])
plt.show()
# ## Community Detection
# This section demonstrates the community detection capabilities of NetworKit. Community detection is concerned with identifying groups of nodes which are significantly more densely connected to eachother than to the rest of the network.
# Code for community detection is contained in the `community` module. The module provides a top-level function to quickly perform community detection with a suitable algorithm and print some stats about the result.
community.detectCommunities(G)
# The function prints some statistics and returns the partition object representing the communities in the network as an assignment of node to community label. Let's capture this result of the last function call.
communities = community.detectCommunities(G)
# *Modularity* is the primary measure for the quality of a community detection solution. The value is in the range `[-0.5,1]` and usually depends both on the performance of the algorithm and the presence of distinctive community structures in the network.
community.Modularity().getQuality(communities, G)
# ### The Partition Data Structure
# The result of community detection is a partition of the node set into disjoint subsets. It is represented by the `Partition` data strucure, which provides several methods for inspecting and manipulating a partition of a set of elements (which need not be the nodes of a graph).
type(communities)
print("{0} elements assigned to {1} subsets".format(communities.numberOfElements(), communities.numberOfSubsets()))
print("the biggest subset has size {0}".format(max(communities.subsetSizes())))
# The contents of a partition object can be written to file in a simple format, in which each line *i* contains the subset id of node *i*.
community.writeCommunities(communities, "output/communties.partition")
# ### Choice of Algorithm
# The community detection function used a good default choice for an algorithm: *PLM*, our parallel implementation of the well-known Louvain method. It yields a high-quality solution at reasonably fast running times. Let us now apply a variation of this algorithm.
community.detectCommunities(G, algo=community.PLM(G, True))
# We have switched on refinement, and we can see how modularity is slightly improved. For a small network like this, this takes only marginally longer.
# ### Visualizing the Result
# We can easily plot the distribution of community sizes as follows. While the distribution is skewed, it does not seem to fit a power-law, as shown by a log-log plot.
# +
sizes = communities.subsetSizes()
sizes.sort(reverse=True)
ax1 = plt.subplot(2,1,1)
ax1.set_ylabel("size")
ax1.plot(sizes)
ax2 = plt.subplot(2,1,2)
ax2.set_xscale("log")
ax2.set_yscale("log")
ax2.set_ylabel("size")
ax2.plot(sizes)
plt.show()
# -
# ## Subgraph
# NetworKit supports the creation of Subgraphs depending on an original graph and a set of nodes. This might be useful in case you want to analyze certain communities of a graph. Let's say that community 2 of the above result is of further interest, so we want a new graph that consists of nodes and intra cluster edges of community 2.
c2 = communities.getMembers(2)
g2 = G.subgraphFromNodes(c2)
communities.subsetSizeMap()[2]
g2.numberOfNodes()
# As we can see, the number of nodes in our subgraph matches the number of nodes of community 2. The subgraph can be used like any other graph object, e.g. further community analysis:
communities2 = community.detectCommunities(g2)
viztasks.drawCommunityGraph(g2,communities2)
plt.show()
# ## Centrality
# [Centrality](http://en.wikipedia.org/wiki/Centrality) measures the relative importance of a node within a graph. Code for centrality analysis is grouped into the `centrality` module.
# ### Betweenness Centrality
# We implement Brandes' algorithm for the exact calculation of betweenness centrality. While the algorithm is efficient, it still needs to calculate shortest paths between all pairs of nodes, so its scalability is limited. We demonstrate it here on the small Karate club graph.
K = readGraph("input/karate.graph", Format.METIS)
bc = centrality.Betweenness(K)
bc.run()
# We have now calculated centrality values for the given graph, and can retrieve them either as an ordered ranking of nodes or as a list of values indexed by node id.
bc.ranking()[:10] # the 10 most central nodes
# ### Approximation of Betweenness
# Since exact calculation of betweenness scores is often out of reach, NetworKit provides an approximation algorithm based on path sampling. Here we estimate betweenness centrality in `PGPgiantcompo`, with a probabilistic guarantee that the error is no larger than an additive constant $\epsilon$.
abc = centrality.ApproxBetweenness(G, epsilon=0.1)
abc.run()
# The 10 most central nodes according to betweenness are then
abc.ranking()[:10]
# ### Eigenvector Centrality and PageRank
# Eigenvector centrality and its variant PageRank assign relative importance to nodes according to their connections, incorporating the idea that edges to high-scoring nodes contribute more. PageRank is a version of eigenvector centrality which introduces a damping factor, modeling a random web surfer which at some point stops following links and jumps to a random page. In PageRank theory, centrality is understood as the probability of such a web surfer to arrive on a certain page. Our implementation of both measures is based on parallel power iteration, a relatively simple eigensolver.
# Eigenvector centrality
ec = centrality.EigenvectorCentrality(K)
ec.run()
ec.ranking()[:10] # the 10 most central nodes
# PageRank
pr = centrality.PageRank(K, 1e-6)
pr.run()
pr.ranking()[:10] # the 10 most central nodes
# ## NetworkX Compatibility
# [NetworkX](http://en.wikipedia.org/wiki/Centrality) is a popular Python package for network analysis. To let both packages complement eachother, and to enable the adaptation of existing NetworkX-based code, we support the conversion of the respective graph data structures.
import networkx as nx
nxG = nxadapter.nk2nx(G) # convert from NetworKit.Graph to networkx.Graph
print(nx.degree_assortativity_coefficient(nxG))
# ## Generating Graphs
# An important subfield of network science is the design and analysis of generative models. A variety of generative models have been proposed with the aim of reproducing one or several of the properties we find in real-world complex networks. NetworKit includes generator algorithms for several of them.
# The **Erdös-Renyi model** is the most basic random graph model, in which each edge exists with the same uniform probability. NetworKit provides an efficient generator:
ERG = generators.ErdosRenyiGenerator(1000, 0.1).generate()
profiling.Profile.create(ERG, preset="minimal").show()
# A simple way to generate a **random graph with community structure** is to use the `ClusteredRandomGraphGenerator`. It uses a simple variant of the Erdös-Renyi model: The node set is partitioned into a given number of subsets. Nodes within the same subset have a higher edge probability.
CRG = generators.ClusteredRandomGraphGenerator(200, 4, 0.2, 0.002).generate()
community.detectCommunities(CRG)
profiling.Profile.create(CRG, preset="minimal").show()
# The **Chung-Lu model** (also called **configuration model**) generates a random graph which corresponds to a given degree sequence, i.e. has the same expected degree sequence. It can therefore be used to replicate some of the properties of a given real networks, while others are not retained, such as high clustering and the specific community structure.
degreeSequence = [CRG.degree(v) for v in CRG.nodes()]
clgen = generators.ChungLuGenerator(degreeSequence)
CLG = clgen.generate()
community.detectCommunities(CLG)
profiling.Profile.create(CLG, preset="minimal").show()
# ## Settings
# In this section we discuss global settings.
# ### Logging
# When using NetworKit from the command line, the verbosity of console output can be controlled via several loglevels, from least to most verbose: `FATAL`, `ERROR`, `WARN`, `INFO`, `DEBUG` and `TRACE`. (Currently, logging is only available on the console and not visible in the IPython Notebook).
getLogLevel() # the default loglevel
setLogLevel("TRACE") # set to most verbose mode
setLogLevel("ERROR") # set back to default
# Please note, that the default build setting is optimized (`--optimize=Opt`) and thus, every LOG statement below INFO is removed. If you need DEBUG and TRACE statements, please build the extension module by appending `--optimize=Dbg` when calling the setup script.
# ### Parallelism
# The degree of parallelism can be controlled and monitored in the following way:
setNumberOfThreads(4) # set the maximum number of available threads
getMaxNumberOfThreads() # see maximum number of available threads
getCurrentNumberOfThreads() # the number of threads currently executing
# ## Support
# NetworKit is an open-source project that improves with suggestions and contributions from its users. The email list `<EMAIL>` is the place for general discussion and questions.
| Doc/Notebooks/NetworKit_UserGuide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
pId = 1
np.array([dfState[dfState.state == "start"].time.values, dfState[dfState.state == "end"].time.values])
# +
dfState = pd.read_csv("./data/ID%i-state.csv" % pId)
dfState.head()
if (len(dfState) % 1 == 0):
dfState = dfState[:-1]
x = np.array([dfState[dfState.state == "start"].time.values, dfState[dfState.state == "end"].time.values])
dfRounds = pd.DataFrame(x.T)
dfRounds.columns = ["start", "end"]
dfRounds["totalTime"] = dfRounds["end"] -dfRounds["start"]
dfRounds
# +
dfFlow = pd.read_csv("./data/ID%i-flow.csv" % pId)
print("Flow sum: %i" % len(dfFlow))
dfFlow.head()
dfVisitor = pd.read_csv("./data/ID%i-visitor.csv" % pId)
dfVisitor = dfVisitor.drop_duplicates(["name", "hasTicket"])
# -
dfV = pd.merge(dfFlow, dfVisitor[["time", "name"]], on="name", how="outer")
# +
lst = []
lstRate = []
for i, e in dfRounds.iterrows():
print( e.start, e.end)
x = len(dfFlow[(dfFlow.time >= e.start) & (dfFlow.time <= e.end)])
dfX = dfV[(dfV.time_x >= e.start) & (dfV.time_x <= e.end)]
fn = len(dfX[dfX.time_y.isna()])
tp = len(dfX[~dfX.time_y.isna()])
tn = len(dfX[dfX.time_y.isna()])
fp = len(dfX[~dfX.time_y.isna()])
lst.append(x)
lstRate.append([tp, fp, tn, fn])
dfRounds["totalVisitor"] = lst
# -
dfX
dfRounds["VisitorPerMinute"] = dfRounds.totalVisitor / (dfRounds.totalTime/60)
dfRounds
dfX = df[df.hasTicket == False]
print("Missed: %i" % len(dfX[dfX.time_y.isna()]))
print("Found: %i" % len(dfX[~dfX.time_y.isna()]))
dfX = df[df.hasTicket == True]
print("Entered without: %i" % len(dfX[dfX.time_y.isna()]))
print("Shound not have: %i" % len(dfX[~dfX.time_y.isna()]))
| .ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('./samples/sample_receipt_eng.jpg')
# img = cv2.imread('./samples/sample_receipt.png')
ret,thresh1 = cv2.threshold(img,210,255,cv2.THRESH_BINARY)
plt.figure(figsize=(8, 8))
plt.imshow(thresh1)
import pytesseract
from pytesseract import Output
# +
d = pytesseract.image_to_data(img, output_type=Output.DICT)
n_boxes = len(d['level'])
for i in range(n_boxes):
(x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])
img_out = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 125), 2)
plt.figure(figsize=(8, 8))
plt.imshow(img_out)
# -
extracted_text = pytesseract.image_to_string(img)
print(extracted_text)
| pytesseract_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introducing Geospatial Analytics
#
# Formerly known as IBM PAIRS Geoscope, the [Geopatial Analytics](https://www.ibm.com/products/environmental-intelligence-suite/geospatial-analytics) component within the [IBM Environmental Intelligence Suite](https://www.ibm.com/products/environmental-intelligence-suite) is a platform specifically designed for massive geospatial-temporal (maps, satellite, weather, drone, IoT) query and analytics services. It frees up data scientists and developers from cumbersome processes that dominate conventional data preparation including data governance, providing search-friendly access to a rich, diverse and growing catalog of continually updated geospatial-temporal information.
#
# ## Gain Valuable Insights with Hundreds of Data Layers
#
# Geospatial Analytics offers more than 750 geospatial-temporal data layers to equip your business with a unique set of knowledge that is imperative to maintaining leadership in the era of machine learning and AI. Included is a curated repository of several hundred harmonized, analytics-ready data layers ranging from satellite imagery, weather data, demographic data, land and sea data, agricultural data and more.
#
# You can extend the library by efficiently ingesting, curating and aligning your organization’s proprietary data for use in combination with the provided geospatial-temporal data, so you can gain even deeper insights around decisions that matter in a scalable, cost-efficient manner.
#
# ## Industry Use Cases
#
# There are many ways industries can take advantage of Geospatial Analytics:
#
# * A government can derive actionable insights on health, disease and weather, to better advise the public during a natural disaster or pandemic.
# * An electricity utility can better predict disruption risks to optimize maintenance and crew schedules.
# * An insurer can better project risk and alert policy holders before issues occur, increasing efficiency.
# * A lender can improve its credit risk scoring methodology and reduce bad loan placements.
# * A food company can better respond to consumer demand for greater quality and ensure sustainability initiatives are being met.
#
# All industries can optimize querying of multiple data sets and quickly retrieve relevant data even when the underlying data sets possess different spatial and temporal resolutions.
# ## Data Ingestion
#
# One of the major benefits of Geospatial Analytics is to govern the ingestion of data wherever it is located (FTP, AWS, APIs,..), manage provider licensing and Ts & Cs, and curate professionally the data sets and data layers on behalf of a data scientist or an organisation.
#
# Here is an overview of the ingestion process which is key in simplifying the lives of data scientists during the querying and model building.
#
# 
# ## Querying
#
# Once the data has been ingested and normalised in Geospatial Analytics, it is much easier to query across different domains for faster data science.
#
# One benefit of using the Geospatial Analytics query language is to "push-down" data reduction and algorithms to be executed within the Geospatial Analytics big data platform. This is a great advantage to a data scientists as they don't have to fetch TBs of data and process these TBs locally in for example, a python environment.
#
# The overall Geospatial Analytics value proposition is as follows:
# * Access to an ever growing number of layers of analytics
# * Accurate & consistent results
# * Reduced time to value!
#
# 
# ## Use Cases for Geospatial Analytics are Plentiful
#
# 
#
# ## How to use the Geospatial Analytics Platform
#
# To use Geospatial Analytics, you can either:
# * use the GUI (see screenshot below) for creating Raster, Vector or Point queries and displaying the results. The GUI can display the results of all queries, but also when you click on a specific lat/long, it can display a corresponding time-series graph in a sub-second response time (as shown below). This allows for speedy assessment of availabilty of data especially in the case of tiled satellite imagery or discovery of long historic trends.
# * use the APIs for more in-depth data processing and analysis in python leveraging the IBM PAIRS PAW library or by using any other language by HTTP REST APIs calls. The Geospatial Analytics APIs allows the same types of queries as the Geospatial Analytics GUI, while also introducing more flexibility with user-defined functions for more push-down analytics.
# * use both of the above- using the GUI for exploring the data sets, then using the python APIs for building reproducable analytics in a production process.
#
# 
#
| tutorials/notebooks/introducingpairsgeospatialanalytics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: '''Python Interactive'''
# language: python
# name: 229ff3b6-f7e8-4020-ae2d-0eceb6589aaf
# ---
print("hello")
for i in range(3):
print(i)
# +
# # %load 1.py
def happy():
print("Happy Birthday to you!")
def sing(P):
happy()
happy()
print("Happy Birthday dear " + P + "!")
happy()
# main
import turtle
f = open("aa.txt", "w", encoding="utf-8")
f.write("你好")
f.close()
turtle.forward(100)
sing("李明")
print("hello")
# -
| test1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Project Code
# +
from __future__ import print_function
from distutils.version import LooseVersion as Version
import sys
OK = '\x1b[42m[ OK ]\x1b[0m'
FAIL = "\x1b[41m[FAIL]\x1b[0m"
try:
import importlib
except ImportError:
print(FAIL, "Python version 3.7 is required,"
" but %s is installed." % sys.version)
def import_version(pkg, min_ver, fail_msg=""):
mod = None
try:
mod = importlib.import_module(pkg)
if pkg in {'PIL'}:
ver = mod.VERSION
else:
ver = mod.__version__
if Version(ver) == min_ver:
print(OK, "%s version %s is installed."
% (lib, min_ver))
else:
print(FAIL, "%s version %s is required, but %s installed."
% (lib, min_ver, ver))
except ImportError:
print(FAIL, '%s not installed. %s' % (pkg, fail_msg))
return mod
# first check the python version
pyversion = Version(sys.version)
if pyversion >= "3.7":
print(OK, "Python version is %s" % sys.version)
elif pyversion < "3.7":
print(FAIL, "Python version 3.7 is required,"
" but %s is installed." % sys.version)
else:
print(FAIL, "Unknown Python version: %s" % sys.version)
print()
requirements = {'numpy': "1.18.5", 'matplotlib': "3.2.2",'sklearn': "0.23.1",
'pandas': "1.0.5",'xgboost': "1.1.1", 'shap': "0.35.0"}
# now the dependencies
for lib, required_version in list(requirements.items()):
import_version(lib, required_version)
# -
import numpy as np
import pandas as pd
import matplotlib
import math
from matplotlib import pylab as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.ensemble import RandomForestClassifier
# ## For a given national park, given a species of specific nativeness, order and family, can we predict abundance?
# **Step 1**
#
# Read data into the cell
# read in the data in this cell
df1 = pd.read_csv('../data/species.csv', low_memory=False)
df1 = df1.loc[:, df1.columns != 'Unnamed: 13']
# print(df1)
df2 = pd.read_csv('../data/parks.csv')
# print(df2)
df = df1.merge(df2,how='outer',on='Park Name') # merging on IDs present in any dataframe
df
# +
# df = df[(df['Record Status']!='Approved')]
df.shape
# -
# The following columns seem irrelevant to the model:
# 1) Species ID
#
#
#
# 3) Common Names
#
# 4) Park Code
#
df = df.loc[:, df.columns != 'Species ID']
df = df.loc[:, df.columns != 'Common Names']
df = df.loc[:, df.columns != 'Park Code']
# df = df.loc[:, df.columns != 'State']
print(df.shape)
# +
# df2 = df[(df['Category']!='Vascular Plant')]
# df2 = df2[(df2['Category']!='Nonvascular Plant')]
# +
# df.dropna(subset=['Abundance'], inplace=True) Not dropping, converting to "Missing"
#for x in df['Abundance']: # iterates through each value in each column
# if math.isnan(x)==True: # if value is NaN and the column index for missing values has not been registered
# print(i)
# #col_missing = i
# values = {'Abundance': 'Missing'} # value = values
df.fillna("Missing", inplace = True)
df.shape
df
# -
df2 = df[(df['Category']!='Vascular Plant')]
df2 = df2[(df2['Category']!='Nonvascular Plant')]
df2.shape
# 1) Relative abundance
pd.value_counts(df2['Abundance']).plot.bar()
plt.ylabel('count')
plt.xlabel('Abundance')
plt.show()
# df = df[(df['Abundance']=="Uncommon")|(df['Abundance']=="Common")|(df['Abundance']=="Rare")|(df['Abundance']=="Occasional")|(df['Abundance']=="Abundant")]
# print(df.shape)
# print(df[(df['Category']=='Vascular Plant')].shape)
df2 = df2[(df2['Abundance']!='Native')]
df2 = df2[(df2['Abundance']!='Not Native')]
df2.shape
df = df2
df
print("Park Name: ", pd.unique(df2['Park Name']).shape[0])
print("Category: ", pd.unique(df2['Category']).shape[0])
print("Order: ", pd.unique(df2['Order']).shape[0])
print("Family: ", pd.unique(df2['Family']).shape[0])
print("Scientific Names: ", pd.unique(df2['Scientific Name']).shape[0])
df = df2
# ## Data Preprocessing
# +
y = df2['Abundance']
X = df2.loc[:, df2.columns != 'Abundance'] # all other columns are features
z = df2['Park Name']
X_train, X_other, y_train, y_other = train_test_split(X,y,train_size = 0.6,stratify=z,random_state=42) # first split
print('training set:',X_train.shape, y_train.shape) # 60% of points are in train
# z = X_train['Park Name']
# print(z.value_counts(normalize=True))
z_other = X_other['Park Name']
X_val, X_test, y_val, y_test = train_test_split(X_other,y_other,train_size = 0.5,stratify=z_other,random_state=42) # second split
print('validation set:',X_val.shape, y_val.shape) # 20% of points are in validation
print('test set:',X_test.shape, y_test.shape) # 20% of points are in test
# z_other = X_other['Park Name']
# print(z.value_counts(normalize=True))
# preprocess categorical variables - used OneHotEncoder
onehot_ftrs = ['Category', 'Order', 'Family', 'Scientific Name', 'Record Status', 'Occurrence', 'Nativeness', 'Seasonality', 'Conservation Status', 'State'] # initialize the encoder
enc = OneHotEncoder(sparse=False,handle_unknown='ignore')
enc.fit(X_train[onehot_ftrs]) # fit the training data
print(' feature names:',enc.get_feature_names(onehot_ftrs).shape)
onehot_train = enc.transform(X_train[onehot_ftrs]) # transform X_train
onehot_val = enc.transform(X_val[onehot_ftrs]) # transform X_val
onehot_test = enc.transform(X_test[onehot_ftrs]) # transform X_test
# preprocess continuous variables - used StandardScaler
std_ftrs = ['Acres', 'Latitude', 'Longitude']
scaler = StandardScaler()
scaler_train = scaler.fit_transform(X_train[std_ftrs]) # fit the training data, transform X_train
scaler_val = scaler.transform(X_val[std_ftrs]) # transform X_val
scaler_test = scaler.transform(X_test[std_ftrs]) # transform X_test
print(' ', scaler_test[100])
# preprocess Y - used LabelEncoder
le = LabelEncoder()
y_train_le = le.fit_transform(y_train)
y_val_le = le.transform(y_val)
y_test_le = le.transform(y_test)
print(y_test_le)
print(le.classes_)
# -
# ## Evaluation Metrics
print(y.value_counts(normalize=True))
# Thus it is not a very balanced dataset. So we opt for the f_beta score.
# ## ML Algorithms
train_scores = []
val_scores = []
test_scores = []
best_models = []
def MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, i):
'''
This function splits the data to other/test (80/20) and then applies KFold with 4 folds to other.
The RMSE is minimized in cross-validation.
'''
nr_states = 10
X_train, X_other, y_train, y_other = train_test_split(X,y,train_size = 0.6,stratify=z,random_state=25*i) # first split
print('training set:',X_train.shape, y_train.shape) # 60% of points are in train
# z = X_train['Park Name']
# print(z.value_counts(normalize=True))
z_other = X_other['Park Name']
X_val, X_test, y_val, y_test = train_test_split(X_other,y_other,train_size = 0.5,stratify=z_other,random_state=25*i) # second split
print('validation set:',X_val.shape, y_val.shape) # 20% of points are in validation
print('test set:',X_test.shape, y_test.shape) # 20% of points are in test
# z_other = X_other['<NAME>']
# print(z.value_counts(normalize=True))
# preprocess categorical variables - used OneHotEncoder
onehot_ftrs = ['Category', 'Order', 'Family', 'Scientific Name', 'Record Status', 'Occurrence', 'Nativeness', 'Seasonality', 'Conservation Status', 'State'] # initialize the encoder
# preprocess continuous variables - used StandardScaler
std_ftrs = ['Acres', 'Latitude', 'Longitude']
# preprocess Y - used LabelEncoder
le = LabelEncoder()
y_train_le = le.fit_transform(y_train)
y_val_le = le.transform(y_val)
y_test_le = le.transform(y_test)
#print(y_test_le)
#print(le.classes_)
# preprocess the data
pipe = make_pipeline(preprocessor, ML_algo)
# loop through the hyperparameter combinations or use GridSearchCV
grid = GridSearchCV(pipe, param_grid=param_grid,scoring = "f1_weighted",
return_train_score = True, n_jobs=-1, verbose=True)
# for each combination, calculate the train and validation scores using the evaluation metric
# find which hyperparameter combination gives the best validation score (1 point)
grid.fit(X_train, y_train_le)
results = pd.DataFrame(grid.cv_results_)
y_pred = grid.predict(X_train)
train_score_ind = f1_score(y_train_le, y_pred, average = 'weighted')
# print(results)
# caclulate the validation score
y_pred = grid.predict(X_val)
val_score_ind = f1_score(y_val_le, y_pred, average = 'weighted')
# calculate the test score
y_pred = grid.predict(X_test)
test_score_ind = f1_score(y_test_le, y_pred, average = 'weighted')
# append the test score and the best model to the lists (1 point)
train_scores.append(train_score_ind)
val_scores.append(val_score_ind)
test_scores.append(test_score_ind)
best_model_each = grid.best_estimator_
best_models.append(grid.best_estimator_)
return best_model_each, train_score_ind, val_score_ind, test_score_ind
# +
y = df['Abundance']
X = df.loc[:, df.columns != 'Abundance'] # all other columns are features
z = df['Park Name']
# collect all the encoders
preprocessor = ColumnTransformer(
transformers=[
('onehot', OneHotEncoder(sparse=False,handle_unknown='ignore'), onehot_ftrs),
('std', StandardScaler(), std_ftrs)])
ML_algo = RandomForestClassifier()
param_grid = {'randomforestclassifier__max_features': [0.5, 0.6, 0.7, 0.8, 0.9, 1],
'randomforestclassifier__max_depth': [1, 3, 10, 30, 70, 100],
'randomforestclassifier__random_state': [25, 50, 75, 100, 125, 150, 175, 200, 225, 250]}
# -
model_output, train_result, val_result, test_result = MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, 1)
print(train_scores)
print(val_scores)
print(test_scores)
print(best_models)
#print("Mean of scores: ", np.mean(scores))
#print("Standard deviation of scores: ", np.std(scores))
model_output, train_result, val_result, test_result = MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, 2)
print(train_scores)
print(val_scores)
print(test_scores)
print(best_models)
model_output, train_result, val_result, test_result = MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, 3)
print(train_scores)
print(val_scores)
print(test_scores)
print(best_models)
model_output, train_result, val_result, test_result = MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, 4)
print(train_scores)
print(val_scores)
print(test_scores)
print(best_models)
model_output, train_result, val_result, test_result = MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, 5)
print(train_scores)
print(val_scores)
print(test_scores)
print(best_models)
model_output, train_result, val_result, test_result = MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, 6)
print(train_scores)
print(val_scores)
print(test_scores)
print(best_models)
model_output, train_result, val_result, test_result = MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, 7)
print(train_scores)
print(val_scores)
print(test_scores)
print(best_models)
model_output, train_result, val_result, test_result = MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, 8)
print(train_scores)
print(val_scores)
print(test_scores)
print(best_models)
model_output, train_result, val_result, test_result = MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, 9)
print(train_scores)
print(val_scores)
print(test_scores)
print(best_models)
model_output, train_result, val_result, test_result = MLpipe_stratsplit_f1(X,y,preprocessor,ML_algo,param_grid, 10)
print(train_scores)
print(val_scores)
print(test_scores)
print(best_models)
print("Mean of train scores: ", np.mean(train_scores), ", SD: ", np.std(train_scores))
print("Mean of validation scores: ", np.mean(val_scores), ", SD: ", np.std(val_scores))
print("Mean of test scores: ", np.mean(test_scores), ", SD: ", np.std(test_scores))
# +
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y = train_scores
plt.figure(figsize = (8, 6))
plt.plot(x, y, c = 'black', label = 'Train score')
#plt.errorbar(x, y, yerr = np.std(y), c = 'black')
y = val_scores
plt.plot(x, y, c = 'blue', label = 'Validation score')
#plt.errorbar(x, y, yerr = np.std(y), c = 'blue')
y = test_scores
plt.plot(x, y, c = 'green', label = 'Test score')
#plt.errorbar(x, y, yerr = np.std(y), c = 'green')
plt.xlabel('n_run', fontsize = 14)
plt.ylabel('Score', fontsize = 14)
plt.semilogx()
plt.xticks(fontsize = 12)
plt.yticks(fontsize = 12)
plt.legend()
plt.title('Scores for RandomForestClassifier', fontsize = 14)
plt.tight_layout()
# plt.savefig('figures/lasso_coefs.png',dpi=300)
plt.show()
| src/Evaluation metrics, ML algorithms, hyperparameter tuning - RandomForestClassifier - Using z to stratify.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tests of Vega/Vega-Lite Magics
# import os, sys; sys.path.insert(0, os.path.abspath('..'))
# %load_ext altair
import pandas as pd
# +
# For notebook (not jupyterlab) enable the vega and vegalite renderers
# import altair as alt
# alt.vegalite.v2.renderers.enable('notebook')
# alt.vega.v3.renderers.enable('notebook')
# -
# %%vegalite
{
"$schema": "https://vega.github.io/schema/vega-lite/v2.json",
"description": "A simple bar chart with embedded data.",
"data": {
"values": [
{"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
{"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
{"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
]
},
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"}
}
}
import pandas as pd
vgl_data = pd.DataFrame({'a': list('ABCDEFGHI'),
'b': [28, 55, 43, 91, 81, 53, 19, 87, 52]})
# %%vegalite vgl_data
{
"$schema": "https://vega.github.io/schema/vega-lite/v2.json",
"description": "A simple bar chart with embedded data.",
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"}
}
}
# %%vega
{
"$schema": "https://vega.github.io/schema/vega/v3.0.json",
"width": 400,
"height": 200,
"padding": 5,
"data": [
{
"name": "table",
"values": [
{"category": "A", "amount": 28},
{"category": "B", "amount": 55},
{"category": "C", "amount": 43},
{"category": "D", "amount": 91},
{"category": "E", "amount": 81},
{"category": "F", "amount": 53},
{"category": "G", "amount": 19},
{"category": "H", "amount": 87}
]
}
],
"signals": [
{
"name": "tooltip",
"value": {},
"on": [
{"events": "rect:mouseover", "update": "datum"},
{"events": "rect:mouseout", "update": "{}"}
]
}
],
"scales": [
{
"name": "xscale",
"type": "band",
"domain": {"data": "table", "field": "category"},
"range": "width",
"padding": 0.05,
"round": true
},
{
"name": "yscale",
"domain": {"data": "table", "field": "amount"},
"nice": true,
"range": "height"
}
],
"axes": [
{ "orient": "bottom", "scale": "xscale" },
{ "orient": "left", "scale": "yscale" }
],
"marks": [
{
"type": "rect",
"from": {"data":"table"},
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "category"},
"width": {"scale": "xscale", "band": 1},
"y": {"scale": "yscale", "field": "amount"},
"y2": {"scale": "yscale", "value": 0}
},
"update": {
"fill": {"value": "steelblue"}
},
"hover": {
"fill": {"value": "red"}
}
}
},
{
"type": "text",
"encode": {
"enter": {
"align": {"value": "center"},
"baseline": {"value": "bottom"},
"fill": {"value": "#333"}
},
"update": {
"x": {"scale": "xscale", "signal": "tooltip.category", "band": 0.5},
"y": {"scale": "yscale", "signal": "tooltip.amount", "offset": -2},
"text": {"signal": "tooltip.amount"},
"fillOpacity": [
{"test": "datum === tooltip", "value": 0},
{"value": 1}
]
}
}
}
]
}
vg_data = pd.DataFrame({'category': list('ABCDEFGH'),
'amount': [28, 55, 43, 91, 81, 53, 19, 87]})
# %%vega table:vg_data
{
"$schema": "https://vega.github.io/schema/vega/v3.0.json",
"width": 400,
"height": 200,
"padding": 5,
"signals": [
{
"name": "tooltip",
"value": {},
"on": [
{"events": "rect:mouseover", "update": "datum"},
{"events": "rect:mouseout", "update": "{}"}
]
}
],
"scales": [
{
"name": "xscale",
"type": "band",
"domain": {"data": "table", "field": "category"},
"range": "width",
"padding": 0.05,
"round": true
},
{
"name": "yscale",
"domain": {"data": "table", "field": "amount"},
"nice": true,
"range": "height"
}
],
"axes": [
{ "orient": "bottom", "scale": "xscale" },
{ "orient": "left", "scale": "yscale" }
],
"marks": [
{
"type": "rect",
"from": {"data":"table"},
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "category"},
"width": {"scale": "xscale", "band": 1},
"y": {"scale": "yscale", "field": "amount"},
"y2": {"scale": "yscale", "value": 0}
},
"update": {
"fill": {"value": "steelblue"}
},
"hover": {
"fill": {"value": "red"}
}
}
},
{
"type": "text",
"encode": {
"enter": {
"align": {"value": "center"},
"baseline": {"value": "bottom"},
"fill": {"value": "#333"}
},
"update": {
"x": {"scale": "xscale", "signal": "tooltip.category", "band": 0.5},
"y": {"scale": "yscale", "signal": "tooltip.amount", "offset": -2},
"text": {"signal": "tooltip.amount"},
"fillOpacity": [
{"test": "datum === tooltip", "value": 0},
{"value": 1}
]
}
}
}
]
}
table = vg_data
# %%vega table
{
"$schema": "https://vega.github.io/schema/vega/v3.0.json",
"width": 400,
"height": 200,
"padding": 5,
"signals": [
{
"name": "tooltip",
"value": {},
"on": [
{"events": "rect:mouseover", "update": "datum"},
{"events": "rect:mouseout", "update": "{}"}
]
}
],
"scales": [
{
"name": "xscale",
"type": "band",
"domain": {"data": "table", "field": "category"},
"range": "width",
"padding": 0.05,
"round": true
},
{
"name": "yscale",
"domain": {"data": "table", "field": "amount"},
"nice": true,
"range": "height"
}
],
"axes": [
{ "orient": "bottom", "scale": "xscale" },
{ "orient": "left", "scale": "yscale" }
],
"marks": [
{
"type": "rect",
"from": {"data":"table"},
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "category"},
"width": {"scale": "xscale", "band": 1},
"y": {"scale": "yscale", "field": "amount"},
"y2": {"scale": "yscale", "value": 0}
},
"update": {
"fill": {"value": "steelblue"}
},
"hover": {
"fill": {"value": "red"}
}
}
},
{
"type": "text",
"encode": {
"enter": {
"align": {"value": "center"},
"baseline": {"value": "bottom"},
"fill": {"value": "#333"}
},
"update": {
"x": {"scale": "xscale", "signal": "tooltip.category", "band": 0.5},
"y": {"scale": "yscale", "signal": "tooltip.amount", "offset": -2},
"text": {"signal": "tooltip.amount"},
"fillOpacity": [
{"test": "datum === tooltip", "value": 0},
{"value": 1}
]
}
}
}
]
}
import numpy as np
rand = np.random.RandomState(42)
ts_data = pd.DataFrame({'date': pd.date_range('2017', freq='D', periods=365),
'price': rand.randn(365).cumsum()})
# %%vegalite ts_data
{
"mark": "line",
"encoding": {
"x": {"field": "date", "type": "temporal", "axis": {"format": "%b %d"}},
"y": {"field": "price", "type": "quantitative"}
},
"width": 600
}
| notebooks/MagicTest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import matplotlib.pyplot as plt
import numpy as np
from rpsavgperf import rpsavgperf
from scipy.signal import savgol_filter
import kk_abock
shotnr = 35900
tBegin = 2.70
tEnd = 2.80
tcentral = (tBegin+tEnd)/2.0
dtime = (tEnd-tBegin)/2.0
sh1 = rpsavgperf(shotnr, time=tcentral,dt=dtime, side=1)
# +
plt.figure(dpi=150)
msk = ~np.isnan(sh1.r)
shr = sh1.r[msk]
shn = sh1.n[msk]
shdr = sh1.dr[msk]
plt.plot(shr, shn)
savshr = savgol_filter(shr, 7, 3)
plt.plot(savshr, shn)
#plt.fill_betweenx(shn, savshr-sh1.dr, savshr+sh1.dr, alpha=0.3)
# -
eq = kk_abock.kk()
eq.Open(shotnr, diag='EQH')
zl = 0.14
rhol = eq.Rz_to_rhopol(tcentral, savshr, np.ones_like(savshr)*zl)
rhol_up = eq.Rz_to_rhopol(tcentral, savshr+shdr, np.ones_like(savshr)*zl)
rhol_dn = eq.Rz_to_rhopol(tcentral, savshr-shdr, np.ones_like(savshr)*zl)
eq.Close()
plt.plot(rhol, shn)
plt.fill_betweenx(shn, rhol_dn, rhol_up, alpha=0.3)
fname = './Files/Ref_O.' + str(shotnr)
np.savetxt(fname, np.c_[rhol, shn, rhol_dn, rhol_up], delimiter=',')
| 35900_Avg_profiles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import matplotlib.pyplot as plt
import bayesloop as bl
import seaborn as sns
from scipy.optimize import fmin
plt.style.use('classic') # bayesloop enables ggplot style, switch back to classic.
# %matplotlib inline
densities_per_category = {}
# +
# Read all data files of one category.
import os
category = 'fibro'
downsample_steps = {'alive': 2, 'fibro': 5, 'plastic': 5} # TODO: Rename var.
data_dir = 'data/JulianTrajs/' + category
#data_dir = 'data/generated/' + category
trajectories = []
filenames = os.listdir(data_dir)
for filename in filenames:
trajectory = np.genfromtxt(os.path.join(data_dir, filename))
trajectories.append(trajectory[::downsample_steps[category]]) # Original trajectories need to be downsampled.
#trajectories.append(trajectory) # Generated trajectories are already sampled at 5 min.
trajectories = np.array(trajectories)
print "Category {}: Found {} files, created {} trajectories".format(category, len(filenames), len(trajectories))
# -
def to_velocities(x, multiple_trajectories=False):
return np.diff(x, axis=int(multiple_trajectories)) / 5.
trajectories = to_velocities(trajectories, multiple_trajectories=True)
# +
timeAveragedPosteriors = []
# Set up bayesloop study and define model parameters.
S = bl.Study()
M = bl.observationModels.ScaledAR1() # process that models data locally
S.setObservationModel(M)
S.setGrid([[-1, 1, 200], [0, 1.5, 200]]) # discrete grid for parameters
K = bl.transitionModels.CombinedTransitionModel(bl.tm.GaussianRandomWalk(sigma=0.03, param='correlation coefficient'), # gradual variations of persistence
bl.tm.GaussianRandomWalk(sigma=0.03, param='standard deviation'), # gradual variations of cell speed
bl.tm.RegimeSwitch(log10pMin=-7)) # abrupt variations of both parameters
S.setTransitionModel(K)
# Load trajectories and fit model to them.
for i, trajectory in enumerate(trajectories):#[::downsample_steps[category]]):
print '-------------------'
print 'Trajectory {} of {}'.format(i+1, len(trajectories))
print '-------------------'
S.loadData(trajectory)
S.fit()
# Average parameter denisites over all time steps.
timeAveragedPosteriors.append(np.mean(S.posteriorSequence, axis=0))
timeAveragedPosteriors = np.array(timeAveragedPosteriors)
# -
# Helper function to make contour plots of densities.
def densityRegion(posterior, alpha, x0=0.0001):
return fmin(lambda h: (np.sum(posterior[posterior > h]) - alpha)**2, x0, xtol=10**-8, ftol=10**-8)[0]
# +
# Plot parameter densities for single category.
to_plot = np.mean(timeAveragedPosteriors, axis=0)
plt.figure(figsize=(4, 4))
plt.contour(to_plot.T, [densityRegion(to_plot, alpha) for alpha in [0.10, 0.25, 0.50]],
extent=[-1, 1, 0, 1.5], linewidths=1, colors='k', alpha=0.7)
plt.imshow((to_plot.T)**1, origin=0, extent=[-1, 1, 0, 1.5], aspect='auto',
cmap=sns.light_palette('blue', as_cmap=True))#sns.light_palette('red', as_cmap=True))
plt.xlabel('Persistence')
plt.ylabel(u'Cell speed (\u00b5m / min)')
plt.xlim(-0.6, 1)
plt.ylim(0, 1.1)
#plt.savefig('plots/density_plastic.png')
# -
densities_per_category[category] = np.mean(timeAveragedPosteriors, axis=0)
# +
# Plot parameter densities for all categories in the same plot.
plt.figure(figsize=(4, 4))
rgb_arr = np.ones((200, 200, 3))
max_intensity = 0.7
rgb_arr[..., 0] -= max_intensity * densities_per_category['alive'].T / np.max(densities_per_category['alive'])
rgb_arr[..., 1] -= max_intensity * densities_per_category['alive'].T / np.max(densities_per_category['alive'])
rgb_arr[..., 1] -= max_intensity * densities_per_category['plastic'].T / np.max(densities_per_category['plastic'])
rgb_arr[..., 2] -= max_intensity * densities_per_category['plastic'].T / np.max(densities_per_category['plastic'])
rgb_arr[..., 0] -= max_intensity * densities_per_category['fibro'].T / np.max(densities_per_category['fibro'])
rgb_arr[..., 2] -= max_intensity * densities_per_category['fibro'].T / np.max(densities_per_category['fibro'])
rgb_arr = rgb_arr.clip(min=0)
plt.imshow(rgb_arr, origin=0, extent=[-1, 1, 0, 1.5], interpolation='none', aspect='auto')
for density in densities_per_category.values():
plt.contour(density.T, [densityRegion(density, alpha) for alpha in [0.10, 0.25, 0.50]],
extent=[-1, 1, 0, 1.5], linewidths=1, colors='k', alpha=0.7)
plt.xlabel('Persistence')
plt.ylabel(u'Cell speed (\u00b5m / min)')
plt.xlim(-0.6, 1)
plt.ylim(0, 1.1)
#Create legend from custom artist/label lists
# plt.legend([plt.Rectangle((0, 0), 0, 0, color=color) for color in ((0, 0, 1), (1, 0, 0), (0, 1, 0)) ],
# ['Collagen', 'Plastic', 'Fibronectin'], frameon=False, borderaxespad=3)
# plt.savefig('plots/blank_density.png', dpi=300)
# -
# Mean parameter values over time
plt.plot(S.posteriorMeanValues[0], label='Mean Persistence') # first parameter (persistence)
plt.plot(S.posteriorMeanValues[1], label='Mean Activity') # second parameter (cell speed)
plt.legend()
# compute mean persistence of cell from parameter distribution and grid
print 'Mean persistence = {}'.format(np.sum(averagePosterior*S.grid[0]))
| lstm-biology/statistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/anjali0503/RECURRENT_NEURAL_NETWORK/blob/main/IMDB_DATASET_MOVIES_REVIEWS.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="iK7Fez3J7Wtj" outputId="1f37c6e8-e67e-40f0-96cc-f1c7b610d87f"
# !pip install tensorflow-hub
# !pip install tensorflow-datasets
# + colab={"base_uri": "https://localhost:8080/"} id="qZIYfTb077Al" outputId="9db9c937-2120-46c2-d80d-baf856eddfb8"
#IMPORTING DEPENDENCIES
import os
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.list_physical_devices("GPU") else "NOT AVAILABLE")
# + [markdown] id="Izb1KCw78J2V"
# #Download the IMDB dataset
#
# + id="SH-NgJm68Bs3"
from tensorflow.python.keras.datasets import imdb
# + [markdown] id="wb9-MDCg8z5j"
# Loading Data
# + colab={"base_uri": "https://localhost:8080/", "height": 306, "referenced_widgets": ["c2ccded8aa3c4754a16f99762728a395", "526ad2321885409d9b39f09b4ecef5d5", "<KEY>", "<KEY>", "<KEY>", "56310f405a8441d6a02ae30f5604b648", "<KEY>", "<KEY>", "42f35ffa46154d2c8671b13dcce89a63", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "bfab79a7721d494eb88db03b6e346ff0", "1230da000c7c44b2b3d9278b0c88cde8", "<KEY>", "<KEY>", "69df185728b844d583a9f909d7a4d467", "640e10c47ec346a7bcdba0255b10eb1d", "def99eaa9f7d452a9c74f1738c6ba9c0", "<KEY>", "eac11a7954de42ccbbcaa7beec2cceb7", "6b6a77fc9cc04d958d07551423a6f5eb", "8c943915744e4d57ade895d0da50dab3", "a4d467af12b94b6e8dc4fe6da89d29f5", "c00fbe2369b24ecabe193ec3d6236e4c", "9d84ac6f0f104730ae3b972e46286df8", "<KEY>", "<KEY>", "61d894e9aeaa49798d42bf5e20ade25e", "<KEY>", "76927660c43947e0b0db6b9df9a0c583", "8ba719ae7c884a1b8a9fcaf545b4a7fe", "3e774d3d763b43ab9f5392b32824c93e", "abefc59074664537a4d05dc33adb0b6a", "<KEY>", "<KEY>", "337d3e61361547459f267e0e475b149c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "4aff083504ea435d83042f6a2c26103c", "8469c5a1460f424393b240810d4e2a58", "c1d74ab1bc6449ea9b1154953361707e", "4d59e5333c3f4202be4e33fc1334f5af", "9e2a0460b3584c6eb485d4a737288f81", "8752b6ef20564bce8626d5f53828e981", "2fd1f4730a01486ebbe9e3170552b924", "<KEY>", "<KEY>", "d3e21d3aed714f47a1ecf9a300c4d174", "<KEY>", "4bec516d622f471ca46b8a590198fb28", "91cb192a68f3435d9e3a6893d9c702ad", "0db0a680336c499491c304c0ebf7f383", "2cea634c646443c7b88aacfca166acef", "7ccf4e7317be4735a03830f23e2b50c5", "<KEY>", "<KEY>", "baae59820adf4fe7bde508962fba4fa8", "<KEY>", "<KEY>", "89d17ed9410b4c4a84acfca5ac93222c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "ef35784ea9bd476db12eb04f7caf8a69", "<KEY>", "<KEY>", "09cbffaf97fc48e1af302d1fa42fe574", "c2c7c1443d77471c9a928c60ec78cffd", "5a33b37d60fc4776b15dac21ac6ea997", "34701314e4244afdbc5e97e8dc0d138f", "7adb27718d25436391780d7373f139be", "5a9c8bf2a7914ffe98dda61568816b75", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "f0063e57a7cc4f6ca5b15c1e74e0d1c8", "<KEY>", "41835328e0314adfacf5d6ff7592c543", "20d91610f34b475ab1ccf7ba46fbffb8", "f9756188f95d4ac1a564e60dbde07836"]} id="HfDhfl0s90Wr" outputId="19e08af0-68a1-44e5-af03-ae06239b9292"
# Split the training set into 60% and 40% to end up with 15,000 examples
# for training, 10,000 examples for validation and 25,000 examples for testing.
train_data, validation_data, test_data = tfds.load(
name="imdb_reviews",
split=('train[:60%]', 'train[60%:]', 'test'),
as_supervised=True)
# + colab={"base_uri": "https://localhost:8080/"} id="6foJB7Vm-RhC" outputId="0ebed294-7600-417f-cdd6-fa3e6b0333a5"
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
train_examples_batch
# + colab={"base_uri": "https://localhost:8080/"} id="pQ0PsCVT-Rcl" outputId="70fa2656-3eb2-407b-df94-06993557fd89"
train_labels_batch
# + colab={"base_uri": "https://localhost:8080/"} id="Ar2t0iBa-RY8" outputId="c37feb36-6f76-4494-fc4f-412bc0e9060d"
from keras.datasets import imdb
# Load the data, keeping only 10,000 of the most frequently occuring words
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words = 10000)
# + colab={"base_uri": "https://localhost:8080/"} id="5O0c1dEY-RUk" outputId="1dd4b77b-2758-4e3a-c607-0a3c839ab4ec"
print(type([max(sequence) for sequence in train_data]))
# + colab={"base_uri": "https://localhost:8080/"} id="yUACW-qp-RMl" outputId="3028a9c1-6067-44d1-c502-989a74e40f15"
# Find the maximum of all max indexes
max([max(sequence) for sequence in train_data])
# + [markdown] id="BsMrmt4MFSIl"
# This how training data looks
# + colab={"base_uri": "https://localhost:8080/"} id="kL5afM81-RCe" outputId="5f3c5a37-a6a4-43c4-9681-8e3917c7a9ab"
print(train_data[0])
# + id="C59MlTDSFw8H"
class_names = ['Negative','Positive'] # 0 FOR NEGATIVE AND 1 FOR POSITIVE
# + [markdown] id="BxY5YH9zME0u"
# DECODING THE REVIEWS
# + colab={"base_uri": "https://localhost:8080/"} id="Ls3X9JazFw5W" outputId="df3fa4d8-830c-4f42-a5c9-5c187f165314"
# step 1: load the dictionary mappings from word to integer index
word_index = imdb.get_word_index()
# + id="R0SJuw-eFw1J"
# step 2: reverse word index to map integer indexes to their respective words
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# + colab={"base_uri": "https://localhost:8080/", "height": 100} id="V5gkdoglFwyV" outputId="f00e1165-bc0c-402e-b4d6-1400088abc5e"
# Step 3: decode the review, mapping integer indices to words
# indices are off by 3 because 0, 1, and 2 are reserverd indices for "padding", "Start of sequence" and "unknown"
decoded_review = ' '.join([reverse_word_index.get(i-3, '?') for i in train_data[1]])
decoded_review
# + id="dOy_2KhZFwu3"
def decode(reviews):
text = ''
for i in reviews:
text += reverse_word_index[i]
text += ''
return text
# + colab={"base_uri": "https://localhost:8080/", "height": 100} id="ViF9f_eyFwrE" outputId="3e4d571d-7861-49a6-f344-dceff439c310"
decode(train_data[0])
# + colab={"base_uri": "https://localhost:8080/"} id="O9FPRaoFFwlk" outputId="8f3e2930-f762-4995-aabc-4133c69f633b"
def show_len():
print('length of the 1st training example :',len(train_data[0]))
print('lenght of the 2nd training example :',len(train_data[1]))
print('lenght of the 1st test eg : ',len(test_data[0]))
print('length of the 2nd test eg :',len(test_data[1]))
show_len()
# + [markdown] id="5yfwA_J2xSdd"
# #Preparing the Data
#
# + [markdown] id="-74YdVs-xbyt"
# As we cannot feed a list of integers into our deep neural network. We will need to convert them into tensors.
#
#
# To prepare our data, we will One-hot Encode our lists and turn them into vectors of 0’s and 1’s. This would blow up all of our sequences into 10,000-dimensional vectors containing 1 at all indices corresponding to integers present in that sequence. This vector will have element 0 at all index, which is not present in the integer sequence.
# Simply put, the 10,000-dimensional vector corresponding to each review will have
# Every index corresponding to a word
# Every index with value 1, is a word that is present in the review and is denoted by its integer counterpart.
# Every index containing 0 is a word not present in the review.
# + id="lCH4ewvz-Q0k"
# Vectorize input data
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension)) # Creates an all zero matrix of shape (len(sequences),10K)
for i,sequence in enumerate(sequences):
results[i,sequence] = 1 # Sets specific indices of results[i] to 1s
return results
# Vectorize training Data
X_train = vectorize_sequences(train_data)
# Vectorize testing Data
X_test = vectorize_sequences(test_data)
# + colab={"base_uri": "https://localhost:8080/"} id="7LFJxrqX3ElP" outputId="779d5326-7fd7-4db3-cfff-878e364d7af4"
X_train[0]
# + colab={"base_uri": "https://localhost:8080/"} id="AI1BWLgk3KVg" outputId="eccc3833-0f33-46f0-8be6-e4a6118efbfd"
X_train.shape
# + id="sPpHg2ZF3N4t"
# Vectorize labels
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
# + [markdown] id="_OHbhy4J3tjG"
# Model Training
# + id="yFr-Zq8V3YLk"
from keras import models
from keras import layers
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
# + id="y2Lcxa9I3wKP"
# compiling model
model.compile(loss ='binary_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="qTkf2LQS358m" outputId="f6033006-def6-4e2e-8348-374471851969"
model.summary()
# + id="9DvwNd0hNpm3"
from tensorflow.python.keras.callbacks import LambdaCallback
# + colab={"base_uri": "https://localhost:8080/"} id="_xX16pm4OIkv" outputId="c9ba7f3b-16fa-4408-aa2f-3b25cc0b05ca"
simple_log =LambdaCallback(on_epoch_end = lambda e,l : print(e,end= '.') )
E = 20
h =model.fit(X_train,y_train,validation_split = 0.2,
epochs = E,
callbacks = [simple_log],
verbose = False
)
# + colab={"base_uri": "https://localhost:8080/"} id="ClkptrZVPAqO" outputId="36f103ad-cf72-4d39-a412-888a040ab6f7"
h
# + id="J-mC2rAOQBiV"
# Input for Validation
X_val = X_train[:10000]
partial_X_train = X_train[10000:]
# Labels for validation
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
# + colab={"base_uri": "https://localhost:8080/"} id="0Op70u_uQ0MC" outputId="ba7617f3-1ac3-4922-fbd3-5030138865ea"
history = model.fit(partial_X_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(X_val, y_val))
# + [markdown] id="LNjteq9kT0lY"
# Plotting the prediction and evalution
# + id="uLIrm17VRHmz"
import matplotlib.pyplot as plt
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/"} id="55d_6xgrU8YO" outputId="a34e12f3-e6a2-4539-bd0e-2d4ba01d3629"
history_dict = history.history
history_dict.keys()
# + colab={"base_uri": "https://localhost:8080/", "height": 303} id="HAzVqtL4UdLZ" outputId="45305f4f-f514-4dad-9ccf-b99ebcec5823"
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo', label="Training Loss")
plt.plot(epochs, val_loss_values, 'b', label="Validation Loss")
plt.title('Training and Validation Loss')
plt.xlabel('Epochs',fontsize = 20)
plt.ylabel('Loss Value',fontsize = 20)
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="S50mY8viYKnG" outputId="4cd787b1-1715-43bb-8344-77bf688e7849"
loss, acc = model.evaluate(X_test , y_test)
print('The loss :',loss)
print('The accuracy of the test set :',acc*100)
# + id="GO5BuFRzZA0Y"
import numpy as np
np.set_printoptions(suppress=True)
result = model.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="jtVovDZaZbQY" outputId="99aec196-7894-45d6-bece-6334794e7679"
print(result,end=' ')
# + colab={"base_uri": "https://localhost:8080/"} id="FyOSQcvXaYCF" outputId="70691d05-5cbc-407a-db07-ac125ff40f03"
result.shape
# + colab={"base_uri": "https://localhost:8080/"} id="yRRI7G_Cak6I" outputId="0f667cfd-107c-4ff3-deb2-2146438432f3"
p = model.predict(np.expand_dims(X_test[76] , axis= 0))
print(class_names[int(p[0]>0.5)])
# + colab={"base_uri": "https://localhost:8080/"} id="VFlJq13dbIlu" outputId="06e84c27-a26c-476d-8212-e753cff4bf8e"
print(X_test[0])
# + id="C-pRlFllbgM4"
y_pred = np.zeros(len(result))
for i, score in enumerate(result):
y_pred[i] = 1 if score > 0.5 else 0
# + id="JTBiyPnWcswE"
from sklearn.metrics import mean_absolute_error
error = mean_absolute_error(y_pred, y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="schEjpiOcvwH" outputId="794bd6eb-b2f5-408b-c65c-6b6f0aab78a2"
error
# + id="D3hCfBenc2tu"
| IMDB_DATASET_MOVIES_REVIEWS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.11 64-bit (''py37'': conda)'
# name: python3
# ---
# # 1. 线性回归入门
# ## 1.1 数据生成
#
# 线性回归是机器学习算法的一个敲门砖,为了能够更方便直观地带大家入门,这里使用人工生成的简单的数据。生成数据的思路是设定一个二维的函数(维度高了没办法在平面上画出来),根据这个函数生成一些离散的数据点,对每个数据点我们可以适当的加一点波动,也就是噪声,最后看看我们算法的拟合或者说回归效果。
# +
import numpy as np
import matplotlib.pyplot as plt
def true_fun(X): # 这是我们设定的真实函数,即ground truth的模型
return 1.5*X + 0.2
np.random.seed(0) # 设置随机种子
n_samples = 30 # 设置采样数据点的个数
'''生成随机数据作为训练集,并且加一些噪声'''
X_train = np.sort(np.random.rand(n_samples))
y_train = (true_fun(X_train) + np.random.randn(n_samples) * 0.05).reshape(n_samples,1)
# -
# ## 1.2 定义模型
# 生成数据之后,我们可以定义我们的算法模型,直接从sklearn库中导入类LinearRegression即可,由于线性回归比较简单,所以这个类的输入参数也比较少,不需要多加设置。
# 定义好模型之后直接训练,就能得到我们拟合的一些参数。
from sklearn.linear_model import LinearRegression # 导入线性回归模型
model = LinearRegression() # 定义模型
model.fit(X_train[:,np.newaxis], y_train) # 训练模型
print("输出参数w:",model.coef_) # 输出模型参数w
print("输出参数b:",model.intercept_) # 输出参数b
# ## 1.3 模型测试与比较
# 可以看到线性回归拟合的参数是1.44和0.22,很接近实际的1.5和0.2,说明我们的算法性能还不错。
# 下面我们直接选取一批数据测试,然后通过画图看看算法模型与实际模型的差距。
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, model.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X_train,y_train) # 画出训练集的点
plt.legend(loc="best")
plt.show()
# 由于我们的数据比较简单,所以从图中也可以看出,我们的算法拟合曲线与实际的很接近。对于更复杂以及高维的情况,线性回归不能满足我们回归的需求,这时候我们需要用到更为高级一些的多项式回归了。
# # 2. 多项式回归
#
# 多项式回归的思路一般是将$m$次多项式方程转化为$m$线性回归方程,即将$y=b_0+b_1*x+...+b_m*x^m$转换为$y=b_0*+b_1*x_1+...+b_m*x_m$(令$x_m=x^m$即可),然后使用线性回归的方法求出相应的参数。
# 一般实际的算法也是如此,我们将多项式特征分析器和线性回归串联,算出线性回归的参数之后倒推过去就行。
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures # 导入能够计算多项式特征的类
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
def true_fun(X): # 这是我们设定的真实函数,即ground truth的模型
return np.cos(1.5 * np.pi * X)
np.random.seed(0)
n_samples = 30 # 设置随机种子
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
degrees = [1, 4, 15] # 多项式最高次
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)]) # 使用pipline串联模型
pipeline.fit(X[:, np.newaxis], y)
scores = cross_val_score(pipeline, X[:, np.newaxis], y,scoring="neg_mean_squared_error", cv=10) # 使用交叉验证
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
# -
# ## 2.1 交叉验证
# 在这个算法训练过程中,我们使用了一个技巧,就是交叉验证,类似的方法还有holdout检验以及自助法(交叉验证的升级版,即每次又放回去的选取数据)。
# 交叉验证法的作用就是尝试利用不同的训练集/测试集划分来对模型做多组不同的训练/测试,来应对测试结果过于片面以及训练数据不足的问题。过程如下图:
#
# 
#
# ## 2.2 过拟合与欠拟合
#
# 我们知道多项式回归根据最高次数的不同,其回归的效果对于同一数据可能也不会不同,高次的多项式能够产生更多的弯曲从而拟合更多非线性规则的数据,低次的则贴近线性回归。
# 但在这过程中,也会产生一个过拟合与欠拟合的问题。
| ml-with-sklearn/LinearRegression/LinearRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import names
import random
import matplotlib.pyplot as plt
import seaborn as sns
from randomtimestamp import randomtimestamp, random_date, random_time
from datetime import datetime
from datetime import timedelta
# Set figure size to (28,14)
plt.rcParams['figure.figsize'] = (28, 14)
# +
#Display the number of customers at checkout (any state) over time
def question_03(df):
df = pd.DataFrame(df.groupby(['location']).count()['customer_no'])
return print(df)
# +
#5 Calculate the total number of customers present in the supermarket over time.
def question_05(df):
df = pd.DataFrame(df.groupby('timestamp').count()['customer_no'])
sns.lineplot(data=df)
# -
def question_04(df):
df.reset_index(inplace=True)
dict = {}
# df_first = pd.DataFrame(columns=df.columns)
# df_second = pd.DataFrame(columns=df.columns)
for i in range(1,df['customer_no'].max(),1):
#print(i)
print(df[df['customer_no'] == i].iloc[0, 0])
a = df[df['customer_no'] == i].iloc[-1, 0] - \
df[df['customer_no'] == i].iloc[0, 0]
dict[i] = a
# df_first = df_first.append(a)
# #print(df_first.head())
# b = df[df['customer_no'] == i].iloc[1:]
# df_second = df_second.append(b)
# fig, axs = plt.subplots(2)
# sns.histplot(df_first, x="location", ax=axs[0])
# sns.histplot(df_second, x='location', ax=axs[1])
return dict
# +
# dict = question_06(df_0)
# -
# Create customer_id
def create_id(df, letters):
df['customer_id'] = letters + '_' + df['customer_no'].astype(str)
# Function for sorting
def sort_df(df):
df.sort_index(inplace=True)
df.sort_values('customer_no', kind='stable', inplace=True)
return df
# Function for filling
def filling_steps(df):
df_n = df.groupby('customer_no').resample('1T').ffill()
df_l = df_n[['customer_no', 'location']].set_index('customer_no')
df_l.reset_index(inplace=True)
return df_l
# Function for adding checkouts to customers without
def checkout(df):
for i in range(1, df['customer_no'].max()+1, 1):
if df[df['customer_no'] == i].iloc[-1, 1] != 'checkout':
a = df[df['customer_no'] == i].iloc[0,0]
a_df = pd.DataFrame({"customer_no":[a],'location':'checkout'})
df = df.append(a_df, ignore_index=True)
return sort_df(df)
# create Markov columns
def markov_columns(df):
df["before"] = df['location']
df['after'] = df['location'].shift(-1)
df.loc[df['before'] == 'checkout', 'after'] = 'checkout'
df.set_index('customer_id', inplace=True)
df.drop(columns=['customer_no','location'], inplace=True)
return df
df_0 = pd.read_csv('data/monday.csv', parse_dates=True, index_col=0, sep=';')
df_1 = pd.read_csv('data/tuesday.csv', parse_dates=True, index_col=0, sep=';')
df_2 = pd.read_csv('data/wednesday.csv', parse_dates=True, index_col=0, sep=';')
df_3 = pd.read_csv('data/thursday.csv', parse_dates=True, index_col=0, sep=';')
df_4 = pd.read_csv('data/friday.csv', parse_dates=True, index_col=0, sep=';')
df_lst = [df_0, df_1, df_2, df_3, df_4]
days = ['MO','TU','WE','TH','FR']
for i in range(len(df_lst)):
df_lst[i] = sort_df(df_lst[i]) # df_0 = sort_df(df_0)
df_lst[i] = filling_steps(df_lst[i])
df_lst[i] = checkout(df_lst[i])
create_id(df_lst[i],days[i]) # create_id(df_0, 'MO')
df_0.info()
df_0.head(20)
df_lst[1].tail(10)
#we create a list with every single df in order to concatenate them together
df = pd.concat(df_lst)
df.head(50)
df.info()
df.tail(30)
# General eda
df['customer_id'].value_counts()
df_mar = markov_columns(df)
df_mar.head()
df_mar.tail(50)
df_mar.to_csv('df_mar.csv')
P = pd.crosstab(df_mar['before'], df_mar['after'], normalize=0)
P
states = ['checkout','dairy','drinks',"fruit",'spices']
random.choice(states)
# +
# convery the transition matrix to a dictionary that maps state to a list of probabilities
# order such that the first value is transition probability to cold and the second value
# is transition probability to hot
probs = P.to_dict(orient='index')
#probs
# +
# convert internal values, which is a dict to a list
for key in probs.keys():
probs[key] = list(probs[key].values())
# -
probs
# +
# class Customer1:
# """
# a single customer that moves through the supermarket
# in a MCMC simulation
# """
# def __init__(self, id, state, transition_probs, budget=100):
# self.id = id
# self.state = state
# self.transition_probs = transition_probs
# self.budget = budget
# def __repr__(self):
# return f'<Customer_no {self.id} in {self.state}>'
# def next_state(self):
# '''
# Propagates the customer to the next state.
# Returns nothing.
# '''
# #self.state = random.choice(['spices', 'drinks', 'fruit'])
# self.state = random.choices(states, weights=self.transition_probs[self.state])[0]
# return self.state
# def is_active(self):
# """Returns True if the customer has not reached the checkout yet."""
# return self.state != 'checkout'
# +
# bagigio = Customer1('bagigio','dairy',probs)
# +
# lst_customers = [bagigio]
# -
# for i in lst_customers:
# i = 0
# min_time = random_time(text=True, pattern='%H:%M:%S')
# # create datetime object from timestamp string
# given_time = datetime.strptime(min_time, "%H:%M:%S")
# print(type(given_time))
# print(f"{given_time.time()} -- {bagigio.id} is in {bagigio.state} section")
# print(type(given_time))
# while bagigio.is_active():
# i+=1
# print(f"{(given_time + timedelta(minutes = i)).time()} -- {bagigio.id} is in {bagigio.next_state()} section")
# +
class Customer:
"""
a single customer that moves through the supermarket
in a MCMC simulation
"""
def __init__(self, name, state, budget=100):
self.name = name
self.state = state
self.budget = budget
def __repr__(self):
return f'<Customer_no {self.id} in {self.state}>'
def is_active(self):
"""Returns True if the customer has not reached the checkout yet."""
return self.state != 'checkout'
def is_not_active(self):
"""Returns True if the customer has not reached the checkout yet."""
return self.state == 'checkout'
# +
"""
Start with this to implement the supermarket simulator.
"""
class Supermarket:
"""manages multiple Customer instances that are currently in the market.
"""
def __init__(self, opening, closing):
### ADD OPENING AND CLOSING HOURS
# a list of Customer objects
self.dict_csv = {'timestamp':[],'customer_no':[],'location':[]}
self.customers = []
self.customers_timers = []
self.time = (datetime.strptime(opening, "%H:%M"))
self.end_time = (datetime.strptime(closing, "%H:%M"))
def __repr__(self):
return 'something about the supermarket'
def get_time(self):
"""current time in HH:MM format,
"""
return f"It's {self.time.time()}"
# ??? just get back the current time
def print_customers(self):
"""print all customers with the current time and id in CSV format.
"""
df = pd.DataFrame(self.dict_csv)
df.to_csv("supermarket.csv")
return df
# return self.d_customers_no, self.d_timestamp, self.d_location
def next_minute(self):
"""propagates all customers to the next state.
"""
minute = 0
#if str(self.time.time()) == str((self.end_time - timedelta(minutes=5)).time()):
while str(self.time.time()) != str(self.end_time.time()):
self.time = (self.time + timedelta(minutes=minute))
#print(str(self.time.time())[:-3])
print()
for i in range(self.number_clients):
if self.customers_timers[i] == str(self.time.time())[:-3]:
self.dict_csv['timestamp'].append(self.customers_timers[i])
self.dict_csv['customer_no'].append(self.customers[i].id)
self.dict_csv['location'].append(self.customers[i].state)
internal_min = 0
while self.customers[i].is_active():
print(internal_min)
self.customers[i].state = random.choices(
states, weights=probs[self.customers[i].state])[0]
internal_min += 1
self.dict_csv['timestamp'].append(str((self.time + timedelta(minutes=internal_min)).time())[:-3])
self.dict_csv['customer_no'].append(self.customers[i].id)
self.dict_csv['location'].append(self.customers[i].state)
else:
continue
minute = 1
def add_new_customers(self):
"""randomly creates new customers.
"""
self.number_clients = random.randint(100, 200)
for i in range((self.number_clients)):
self.customers.append(Customer(i, random.choice(states[1:])))
self.customers_timers.append(random_time(start=self.time.time(), end=self.end_time.time(), text=True, pattern="%H:%M"))
# +
"""
Start with this to implement the supermarket simulator.
"""
class Supermarket_01:
"""manages multiple Customer instances that are currently in the market.
"""
def __init__(self, opening, closing):
self.dict_csv = {'timestamp':[],'customer_name':[],'location':[]}
self.customers = []
self.time = (datetime.strptime(opening, "%H:%M"))
self.end_time = (datetime.strptime(closing, "%H:%M"))
def __repr__(self):
return 'something about the supermarket'
def get_time(self):
"""current time in HH:MM format,
"""
return f"It's {self.time.time()}"
def print_customers(self):
"""print all customers with the current time and id in CSV format.
"""
df = pd.DataFrame(self.dict_csv)
df.to_csv("supermarket_01.csv")
return df, self.dict_csv
def next_minute(self):
"""propagates all customers to the next state.
"""
minute = 0
while str(self.time.time()) != str(self.end_time.time()):
self.time = (self.time + timedelta(minutes=minute))
r_range = int(np.sqrt(random.randint(0,len(self.customers))))
for r in range(r_range):
if self.customers[r].is_active():
self.write_on_dict(r)
self.customers[r].state = random.choices(
states, weights=probs[self.customers[r].state])[0]
elif self.customers[r].is_not_active():
self.write_on_dict(r)
self.customers.pop(r)
if len(self.customers) == 0:
break
else:
continue
minute = 1
def write_on_dict(self, r):
self.dict_csv['timestamp'].append(str(self.time.time()))
self.dict_csv['customer_name'].append(self.customers[r].name)
self.dict_csv['location'].append(self.customers[r].state)
print(
f'{str(self.time.time())} {self.customers[r].name} in {self.customers[r].state}')
def add_new_customers(self):
"""randomly creates new customers.
"""
self.number_clients = random.randint(5, 5)
#print(self.number_clients)
for i in range((self.number_clients)):
self.customers.append(
Customer(names.get_full_name(), random.choice(states[1:])))
print(len(self.customers))
# -
s = Supermarket_01('07:00','22:00')
s.add_new_customers()
s.next_minute()
df, dict = s.print_customers()
df.head(40)
dict
# +
import cv2
import time
import random
import numpy as np
import pandas as pd
TILE_SIZE = 32
MARKET = """
##WWWWWWWWWWWWWWW##
#tttttttttttttttttG
rRtDeeDtSssStFbaFt#
dRtDmmDtSppStFbaFt#
rRtDeeDtSssStFbaFt#
dRtDmmDtSppStFbaFt#
rRtDeeDtSssStFbaFt#
rRtttttttttttttttt#
CCccCCccCCccCCtttt#
##tttttttttttttttt#
##tttttttttttttttt#
##EEEEEEEEEEEEEEE##
""".strip()
class SupermarketMap:
"""Visualizes the supermarket background"""
def __init__(self, layout, tiles):
"""
layout : a string with each character representing a tile
tiles : a numpy array containing all the tile images
"""
self.tiles = tiles
# split the layout string into a two dimensional matrix [['#','#',..],['#','#',.]]
self.contents = [list(row) for row in layout.split("\n")]
self.ncols = len(self.contents[0])
self.nrows = len(self.contents)
# create a 3d np.zeros image with shape (384, 576, 3)
self.image = np.zeros(
(self.nrows*TILE_SIZE, self.ncols*TILE_SIZE, 3), dtype=np.uint8
)
self.prepare_map()
def extract_tile(self, row, col):
"""extract a tile array from the tiles image"""
# we get tile array inserting the row and col in the tiles.png where our tile is
y = row*TILE_SIZE
x = col*TILE_SIZE
return self.tiles[y:y+TILE_SIZE, x:x+TILE_SIZE]
def get_tile(self, char):
"""returns the array for a given tile character"""
if char == "#":
return self.extract_tile(0, 0)
elif char == "G":
return self.extract_tile(7, 3)
elif char == "C":
return self.extract_tile(2, 8)
elif char == "b":
return self.extract_tile(0, 4)
elif char == "a":
return self.extract_tile(5, 4)
elif char == "s":
return self.extract_tile(5, 9)
elif char == "p":
return self.extract_tile(6, 9)
elif char == "e":
return self.extract_tile(-2, -5)
elif char == "m":
return self.extract_tile(-4, 6)
elif char == "d":
return self.extract_tile(3, -3)
elif char == "r":
return self.extract_tile(-3, -3)
elif char == "W":
return self.extract_tile(0, 7)
elif char == "E":
return self.extract_tile(-3, -6)
elif char == "p":
return self.extract_tile(3, 2)
else:
return self.extract_tile(1, 2)
def prepare_map(self):
"""prepares the entire image as a big numpy array"""
# this for loops goes through the MARKET char's layout, and it creates the image
for row, line in enumerate(self.contents):
for col, char in enumerate(line):
# bm give back a real tile from tiles.png
bm = self.get_tile(char)
y = row*TILE_SIZE
x = col*TILE_SIZE
self.image[y:y+TILE_SIZE, x:x+TILE_SIZE] = bm
def draw(self, frame):
"""
draws the image into a frame
"""
###
frame[0:self.image.shape[0], 0:self.image.shape[1]] = self.image
def write_image(self, filename):
"""writes the image into a file"""
cv2.imwrite(filename, self.image)
class Customer_map:
def __init__(self, name, supermarket, avatar, row, col, state=None):
"""
supermarket: A SuperMarketMap object
avatar : a numpy array containing a 32x32 tile image
row: the starting row
col: the starting column
"""
self.name = name
self.supermarket = supermarket
self.state = state
self.avatar = avatar
self.row = row
self.col = col
def draw(self, frame):
x = self.col * TILE_SIZE
y = self.row * TILE_SIZE
frame[y:y+TILE_SIZE, x:x+TILE_SIZE] = self.avatar
def move(self):
#states = ['checkout', 'dairy', 'drinks', "fruit", 'spices']
dairy = [(2, 3), (2, 6), (3, 3), (3, 6), (4, 3),
(4, 6), (5, 3), (5, 6), (6, 3), (6, 6)]
fruits = [(2, 13), (2, 16), (3, 13), (3, 16), (4, 13),
(4, 16), (5, 13), (5, 16), (6, 13), (6, 16)]
drinks = [(2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1)]
spices = [(2, 8), (2, 11), (3, 8), (3, 11), (4, 8),
(4, 11), (5, 8), (5, 11), (6, 8), (6, 11)]
checkout = [(8, 2), (8, 3), (8, 6), (8, 7), (8, 10), (8, 11)]
### Calculate the difference between now and the random choice
### moving in that direction with for loop in order to reach that column and row
if self.state == 'fruit':
self.row, self.col = random.choice(fruits)
if self.state == 'checkout':
self.row, self.col = random.choice(checkout)
new_row = self.row
new_col = self.col
new_row += 3
self.col = new_col
self.row = new_row
if self.state == 'spices':
self.row, self.col = random.choice(spices)
if self.state == 'dairy':
self.row, self.col = random.choice(dairy)
if self.state == 'drinks':
self.row, self.col = random.choice(drinks)
if __name__ == "__main__":
# we are creating a bigger zeros array as background for our image
background = np.zeros((500, 700, 3), np.uint8)
tiles = cv2.imread("tiles.png")
market = SupermarketMap(MARKET, tiles)
df = pd.read_csv("Supermarket_01.csv", index_col=1, parse_dates=True)
list_of_avatars = {}
possible_avatar_pos = [(7, 0), (7, 15), (8, 15), (6, 15), (5, 14)]
for n in range(len(list(df.customer_name.unique()))):
r = possible_avatar_pos[n]
no = list(df.customer_name.unique())[n]
customer = Customer_map(name=no, supermarket=market, avatar=market.extract_tile(r[0],r[1]), row=1, col=18)
list_of_avatars[no] = customer
# while True:
lst_timestamp = dict['timestamp']
print(lst_timestamp)
lst_customer_name = dict['customer_name']
print(lst_customer_name)
lst_location = dict['location']
print(lst_location)
while True:
for i in range(len(lst_customer_name)):
nome = lst_customer_name[i]
for l in list_of_avatars:
frame = background.copy()
market.draw(frame)
#list_of_avatars[i]
if l==nome:
location = lst_location[i]
list_of_avatars[lst_customer_name[i]].state = location
list_of_avatars[lst_customer_name[i]].draw(frame)
time.sleep(1.5)
list_of_avatars[lst_customer_name[i]].move()
#list_of_avatars[lst_customer_name[i]].state = location
list_of_avatars[l].draw(frame)
time.sleep(1.5)
list_of_avatars[l].move()
# https://www.ascii-code.com/
key = cv2.waitKey(1)
if key == 113: # 'q' key
break
cv2.imshow("frame", frame)
cv2.destroyAllWindows()
market.write_image("supermarket.png")
# -
| markov_chains.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center>
# <h1> ILI285 - Computación Científica I / INF285 - Computación Científica </h1>
# <h2> Interpolation: Splines </h2>
# <h2> <a href="#acknowledgements"> [S]cientific [C]omputing [T]eam </a> </h2>
# <h2> Version: 1.22</h2>
# </center>
# ## Table of Contents
# * [Introduction](#intro)
# * [Splines](#sp)
# * [Properties](#pr)
# * [Solving](#so)
# * [The additional Property](#ad)
# * [Exercises](#ex)
# * [Acknowledgements](#acknowledgements)
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import scipy as sp
from scipy import interpolate
import ipywidgets as widgets
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
from scipy.interpolate import CubicSpline
M=8
# <div id='intro' />
#
# ## Introduction
# Previously in our jupyter notebooks, we learn about interpolation. Methods like Newton's Divided Difference, Lagrange, among others. Other alternative for interpolate a set of data points is using ** Cubic Splines**.
# This technique, avoids the Runge's Phenomenon and creates a 3-degree polynomial easily.
# <div id='sp' />
#
# ## Splines
# The most common spline is the linear spline. Given a set of points $(x_{1},y_{1}), (x_{2},y_{2}),...,(x_{n},y_{n}) $, this spline connects each point creating a non-smooth curve. However, this polynomial haves a problem. It's no smooth curve! For to avoid this problem, **the cubic splines creates a set of 3-degree polynomial (specifically n-1 polynomials)**... a much better curve.
#
#
# +
# Code based on Example from: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.html#scipy.interpolate.CubicSpline
# The data
x = np.linspace(0,2*np.pi,12)
y = np.sin(x)*x
# Building interpolation object
cs = CubicSpline(x, y)
# Defining a finer mesh to plot the function
xx = np.linspace(0,2*np.pi,1000)
yyo = np.sin(xx)*xx
yyo1 = np.cos(xx)*xx+np.sin(xx)
yyo2 = -np.sin(xx)*xx+2*np.cos(xx)
yyo3 = -np.cos(xx)*xx-3*np.sin(xx)
yyo4 = np.sin(xx)*xx-4*np.cos(xx)
#Interpolating the date with the spline
yy = cs(xx)
yy1 = cs(xx, 1)
yy2 = cs(xx, 2)
yy3 = cs(xx, 3)
yy4 = cs(xx, 4)
# Plotting the splines and its derivatives
plt.figure(figsize=(M,M))
plt.plot(x,y,'k.',markersize=20,label=r'Data Points')
plt.plot(xx,yy, linewidth=4, label=r'S$(x)$')
plt.plot(xx,yy1, linewidth=4, label=r'$\frac{d}{dx}$S$(x)$')
plt.plot(xx,yy2, linewidth=4, label=r'$\frac{d^2}{dx^2}$S$(x)$')
plt.plot(xx,yy3, linewidth=4, label=r'$\frac{d^3}{dx^3}$S$(x)$')
plt.plot(xx,yy4, linewidth=4, label=r'$\frac{d^4}{dx^4}$S$(x)$')
plt.plot(xx,yyo4,'k--',linewidth=4, label='test', alpha=0.4)
plt.plot(x,y,'k.',markersize=20)
plt.title(r'Cubic Spline is defined as S$(x)$')
plt.axis('tight')
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.grid(True)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
# -
plt.figure()
plt.semilogy(np.abs(yy-yyo))
plt.grid(True)
# The orange curve is generated with cubic splines (using the scipy implementation). The other colors are the **derivatives** of the Cubic Spline as indicated in the legend.
# However, if we thinks about this curve, we can say that exists an **infinitely many** quantity of polynomials such that meets all the points. Our goal is to create an unique polynomial. Given this condition, there is 4 properties that defines the cubic spline we are looking for.
# <div id='pr'/>
#
# ## Properties of Splines
# When we want creates a spline of **n** data points, we obtains a set of **n-1** 3-degree polynomials. For example:
# Given a set of points $(x_{1},y_{1}), (x_{2},y_{2}),...,(x_{n},y_{n})$, the splines is:
#
# \begin{equation} S_{1}(x) = y_{1} + b_{1}(x-x_{1}) + c_{1}(x-x_{1})^{2} + d_{1}(x-x_{1})^{3} \\
# S_{2}(x) = y_{2} + b_{2}(x-x_{2}) + c_{2}(x-x_{2})^{2} + d_{2}(x-x_{2})^{3} \\
# ... \\
# ... \\
# ... \\
# S_{n-1}(x) = y_{n-1} + b_{n-1}(x-x_{n-1}) + c_{n-1}(x-x_{n-1})^{2} + d_{n-1}(x-x_{n-1})^{3}
# \end{equation}
# Thus, our goal is obtains the $y, b, c$ and $d$ coefficients. With this values, we are creating the spline $S(x)$ that meets all the data points. This spline have the next properties:
# ## Property 1 (Are the points connected?)
#
# The first property checks if each x-coordinate reachs the correspondant y-coordinate in the spline $S(x)$. Thus, checks if the spline meets the data points.
#
# $$S_{i}(x_{i}) = y_{i}$$ $$ S_{i}(x_{i+1}) = y_{i+1}$$
#
# $$i \in [1,n-1]$$
#
# ## Property 2 (Slope Continuity)
#
# The second property ensures that the slopes of the splines at the points' neighborhood to be equals. This guarantees the smoothly of $S(x)$
#
# $$S'_{i-1}(x_{i}) = S'_{i}(x_{i})$$
#
# $$i \in [2,n-1]$$
# ## Property 3 (Curvature Continuity)
#
# This property ensure that the curvature between differents polynomials will be equal at the neighborhood of points, avoiding abrupt changes of the curve at the data points.
#
# $$S''_{i-1}(x_{i}) = S''_{i}(x_{i})$$
#
# $$i \in [2,n-1]$$
# <div id='so' />
#
# ## Solving the system
#
# If we have **n points**, we know that our splines will be composed of **n-1 curves** $S_{i}(x)$. We have too, **(3n-3) unknowns variables** ($b_{i}, c_{i}, d_{i}$ for each spline). However, we can build a system of equations for find this variables. How can i do this? Easy.. Using the previous properties!
#
# Using the previously defined splines for n points:
# \begin{equation} S_{1}(x) = y_{1} + b_{1}(x-x_{1}) + c_{1}(x-x_{1})^{2} + d_{1}(x-x_{1})^{3} \\
# S_{2}(x) = y_{2} + b_{2}(x-x_{2}) + c_{2}(x-x_{2})^{2} + d_{2}(x-x_{2})^{3} \\
# \vdots \\
# S_{n-1}(x) = y_{n-1} + b_{n-1}(x-x_{n-1}) + c_{n-1}(x-x_{n-1})^{2} + d_{n-1}(x-x_{n-1})^{3}
# \end{equation}
#
# We need too, the first derivatives of this curves:
# \begin{equation} S'_{1}(x) = b_{1} + 2c_{1}(x-x_{1}) + 3d_{1}(x-x_{1})^{2} \\
# S'_{1}(x) = b_{2} + 2c_{2}(x-x_{2}) + 3d_{2}(x-x_{2})^{2} \\
# \vdots \\
# S'_{n-1}(x) = b_{n-1} + 2c_{n-1}(x-x_{n-1}) + 3d_{n-1}(x-x_{n-1})^{2} \\
# \end{equation}
#
# And its second derivatives:
# \begin{equation} S''_{1}(x) = 2c_{1} + 6d_{1}(x-x_{1}) \\
# S''_{2}(x) = 2c_{2} + 6d_{2}(x-x_{2}) \\
# \vdots \\
# S''_{n-1}(x) = 2c_{n-1} + 6d_{n-1}(x-x_{n-1}) \\
# \end{equation}
#
# Using the first property, we get **(n-1) equations**:
#
# \begin{equation} b_{1}(x_{2}-x_{1}) + c_{1}(x_{2}-x_{1})^2 + d_{1}(x_{2}-x_{1})^3 = y_{2} - y_{1} \hspace{1cm}(1)\\
# b_{2}(x_{3}-x_{2}) + c_{2}(x_{3}-x_{2})^2 + d_{2}(x_{3}-x_{2})^3 = y_{3} - y_{2} \hspace{1cm}(2)\\
# \vdots\\
# b_{n-1}(x_{n}-x_{n-1}) + c_{n-1}(x_{n}-x_{n-1})^2 + d_{n-1}(x_{n}-x_{n-1})^3 = y_{n} - y_{n-1} \hspace{1cm}(n-1)
# \end{equation}
#
# Using the second property, we get **(n-2) equations**:
#
# \begin{equation} b_{1}+2c_{1}(x_{2}-x_{1}) + 3d_{1}(x_{2}-x_{1})^2 - b_{2}= 0 \hspace{1cm}(1)\\
# b_{2}+2c_{2}(x_{3}-x_{2}) + 3d_{2}(x_{3}-x_{2})^2 - b_{3}= 0 \hspace{1cm}(2)\\
# \vdots\\
# b_{n-2}+2c_{n-2}(x_{n-1}-x_{n-2}) + 3d_{n-2}(x_{n-1}-x_{n-2})^2 -b_{n-1}=0 \hspace{1cm}(n-2)\\
# \end{equation}
#
# Using the third property, we get **(n-2) equations**:
#
# \begin{equation} 2c_{1}+6d_{1}(x_{2}-x_{1}) - 2c_{2} = 0 \hspace{1cm}(1)\\
# 2c_{2}+6d_{2}(x_{3}-x_{2}) - 2c_{3}=0 \hspace{1cm}(2)\\
# \vdots\\
# 2c_{n-2}+6d_{n-2}(x_{n-1}-x_{n-2}) - 2c_{n-1} = 0 \hspace{1cm}(n-2)\\
# \end{equation}
#
# If we adds all our equations, we obtains **(3n-5) equations**. Clearly, the matrix in that system is not square (we need 2 equations more). For this, **we have another property**, that defines the edges conditions of the splines
#
#
# <div id='ad'/>
#
# # Splines Ending options
#
# For this special property, we have the following 5 properties:
#
# ## Natural Spline:
# This property create a spline with zero curvature, thus:
# \begin{align*}
# S''_{1}(x_{1}) &= 2c_{1} = 0\\
# S''_{n-1}(x_{n}) &= 2c_{n-1}+6d_{n-1}(x_{n}-x_{n-1}) = 0
# \end{align*}
#
# ## Adjusted curvature:
# This property create a spline which curvature is equal to a parameter previously defined, not necessarily zero.
# \begin{align*}
# S''_{1}(x_{1}) &= 2c_{1} = \kappa_{1}\\
# S''_{n-1}(x_{n}) &= 2c_{n-1}+6d_{n-1}(x_{n}-x_{n-1}) = \kappa_{2}
# \end{align*}
#
# ## Clamped cubic spline:
# This property adjust the slopes at the edges of splines to a value previously defined
# \begin{align*}
# S'_{1}(x_{1}) & = b_{1} = p_{1} \\
# S'_{n-1}(x_{n}) & = b_{n-1}+2c_{n-1}(x_{n}-x_{n-1}) + 3d_{n-1}(x_{n}-x_{n-1})^2 = p_{2}
# \end{align*}
#
# ## Ended Parabolically
# With this property, the edges of the splines are 2-degree polynomials. Hence the coefficients:
# \begin{align*}
# d_{1} &= 0 \\
# d_{n-1} &= 0
# \end{align*}
#
# - Why does this property does not work for $n\le 3$?
#
# ## Not-a-Knot cubic spline
# This condition, checks the continuity at the edges, for the third derivative:
# \begin{align*}
# S'''_{1}(x_{2}) &= S'''_{2}(x_{2})\\
# 6d_{1}&=6d_{2}\\
# S'''_{n-2}(x_{n-1}) &= S'''_{n-1}(x_{n-1})\\
# 6d_{n-2}&=6d_{n-1}
# \end{align*}
#
# - Why does this property does not work for $n\le 4$?
#
# Each property give us the 2 equations needed. Thanks to this, we have **(3n-3) unknowns and equations**
# Finally, for to find the coefficients of the spline, we''ll build the system of equations.
#
# ## Example:
# - If we have the following 3 points: $(x_1,y_1),(x_1,y_1),(x_3,y_3)$
# - We will get 6 unknowns
# - So, to build a Natural spline, we need to solve the following linear system of equation:
#
# $$ \begin{bmatrix} (x_2-x_1) & (x_2-x_1)^2 & (x_2-x_1)^3 & 0 & 0 & 0 \\
# 0 & 0 & 0 &(x_3-x_2) & (x_3-x_2)^2 & (x_3-x_2)^3 \\
# 1 & 2(x_2-x_1) & 3(x_2-x_1)^2 & -1 & 0 & 0 \\
# 0 & 2 & 6(x_2-x_1) & 0 & -2 & 0 \\
# 0 & 2 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 0 & 2 & 6(x_3-x_2) \\
# \end{bmatrix}
# \left[ \begin{array}{c} b_1 \\ c_1 \\ d_1 \\ b_2 \\ c_2 \\ d_2 \end{array} \right] =
# \left[ \begin{array}{c} y_2-y_1 \\ y_3-y_2 \\ 0 \\ 0 \\ 0 \\ 0 \end{array} \right]
# $$
#
# Finally, find the coefficients of splines is reduced to solve an equation system, and we already know this from previous notebooks!
#
# Now the code:
# +
def cubic_spline(x, y, end=None, k1=0, k2=0, p1=0, p2=0):
#x: x-coordinates of points
#y: y-coordinates of points
#end: Natural, Adjusted, Clamped, Parabolically, NaK
n = len(x)
A = np.zeros((3*n-3, 3*n-3))
b = np.zeros(3*n-3)
delta_x=np.diff(x)
#Building the linear system of equations
#1st property
for i in np.arange(n-1):
b[i]= y[i+1]-y[i]
A[i,3*i:3*(i+1)] = [delta_x[i],delta_x[i]**2,delta_x[i]**3]
#2nd property
for i in np.arange(n-2):
A[(n-1)+i,3*i:3*(i+1)+1]=[1, 2*delta_x[i], 3*delta_x[i]**2, -1]
#3rd property
for i in np.arange(n-2):
A[(n-1)+(n-2)+i,3*i:3*(i+1)+2] = [0, 2, 6*delta_x[i], 0, -2]
#Ending conditions (4th property)
if end =='Natural':
A[-2,1]= 2
A[-1,-2] = 2
A[-1,-1] = 6*delta_x[-1]
elif end == 'Adjusted':
A[-2,1]= 2
A[-1,-2] = 2
A[-1,-1] = 6*delta_x[-1]
b[-2:] = [k1,k2]
print('Adjusted',b[-2:])
elif end == 'Clamped':
A[-2,0]=1
A[-1,-3:] = [1,2*delta_x[-1],3*delta_x[-1]**2]
b[-2:] = [p1,p2]
elif end == 'Parabolically':
A[-2,2]=1
A[-1,-1]=1
elif end == 'NaK':
A[-2,2:6]=[6,0,0,-6]
A[-1,-4:]=[6,0,0,-6]
#Solving the system
sol = np.linalg.solve(A,b)
S = {'b':sol[::3],
'c':sol[1::3],
'd':sol[2::3],
'x':x,
'y':y
}
return S
# 'der' computes the 'der'-derivative of the Spline,
# but it has not been implemented. Can you do it? Please do it!
def cubic_spline_eval(xx,S,der=0):
x=S['x']
y=S['y']
b=S['b']
c=S['c']
d=S['d']
n=len(x)
yy=np.zeros_like(xx)
for i in np.arange(n-1):
jj = np.where(np.logical_and(x[i]<=xx,xx<=x[i+1]))
yy[jj]=y[i]+b[i]*(xx[jj]-x[i])+c[i]*(xx[jj]-x[i])**2+d[i]*(xx[jj]-x[i])**3
return yy
# -
# ## Example 1 - Hand made interpolation
# +
x = np.array([1,2,4,5])
y = np.array([2,1,4,3])
S = cubic_spline(x,y,end='Natural')
x1 = np.linspace(1,2,200)
x2 = np.linspace(2,4,200)
x3 = np.linspace(4,5,200)
S1 = y[0]+S['b'][0]*(x1-x[0])+S['c'][0]*(x1-x[0])**2+S['d'][0]*(x1-x[0])**3
S2 = y[1]+S['b'][1]*(x2-x[1])+S['c'][1]*(x2-x[1])**2+S['d'][1]*(x2-x[1])**3
S3 = y[2]+S['b'][2]*(x3-x[2])+S['c'][2]*(x3-x[2])**2+S['d'][2]*(x3-x[2])**3
plt.figure(figsize=(M,M))
plt.plot(x,y,'k.',markersize=20,label='Data Points')
plt.plot(x1,S1,'b',linewidth=5,label=r'S$1(x)$')
plt.plot(x2,S2,'g',linewidth=5,label=r'S$2(x)$')
plt.plot(x3,S3,'r',linewidth=5,label=r'S$2(x)$')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid(True)
plt.show()
# -
# ## Example 2
def show_spline(type_ending='Natural',k1=0, k2=0, p1=0, p2=0):
x = np.array([1,2,4,5,7,9])
y = np.array([2,1,4,3,3,4])
xx=np.linspace(np.min(x),np.max(x),1000)
S = cubic_spline(x,y,end=type_ending, k1=k1, k2=k2, p1=p1, p2=p2)
plt.figure(figsize=(M,M))
plt.plot(xx,cubic_spline_eval(xx,S),'-',linewidth=5,label=r'S$(x)$')
plt.plot(x,y,'k.',markersize=20,label='Data Points')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid(True)
plt.show()
widgets.interact(show_spline, type_ending=['Natural','Adjusted','Clamped','Parabolically','NaK'],
k1=(-20,20,1),k2=(-20,20,1),p1=(-2,2,0.2),p2=(-2,2,0.2))
# <div id='ex'/>
#
# ## Exercises
# Now, a few questions about splines:
# - Create the Clamped Cubic Spline given $(0,0), (1,1), (2,2)$. Consider $S'_1(x_1) = 0$ y $S'_3(x_3) = 1$. Plot it.
# - Can you add another point without recalculating all the coefficients again?
# - Implement the 'der' input of the 'cubic_spline_eval' function so we can visualize the derivatives of the spline.
# - Add the plots of all the derivatives you consider can be computed into Example 2.
# ## References
#
# ### Scipy Cubic Spline Interpolation
# - https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.html#scipy.interpolate.CubicSpline
#
# <div id='acknowledgements' />
#
# # Acknowledgements
# * _Material created by professor <NAME>_ (`<EMAIL>`) _and assistans: <NAME>, <NAME>, <NAME> and <NAME>. DI UTFSM. April 2016._
# * _Updated by <NAME>_ (`<EMAIL>`). _DI UTFSM. June 2017._
# * _Updated by <NAME>_ (`<EMAIL>`). _DI UTFSM. May 2018._
# * _Update July 2020 - v1.22 - C.Torres_ : Fixing formatting issues.
| SC1/08_Interpolation_Splines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 4.25 Waiting at an ER
import math
def standard_error(std, sample_size):
return (std/(math.sqrt(sample_size)))
def zscore(x,mu,sigma):
return (x-mu)/sigma
def ci(pe, std, sample_size,conf_lvl):
z = {80:1.282,85:1.440,90:1.645,95:1.960,99:2.576,99.5:2.807,99.9:3.291}
#needs the standard_error
se = standard_error(std,sample_size)
ci_minus = pe - (z[conf_lvl] * se)
ci_plus = pe + (z[conf_lvl] * se)
ci = (round(ci_minus,3),round(ci_plus,3))
#returns a tuple
return ci
# ### (a) Are conditions for inference satisfied ?
# **Independent : ** Yes. 64 patients less than 10% of all the population patients
# <br/>
# **Randomization : ** Yes. The sample is random
# <br/>
# ** Normal : ** Yes. Sample size larger than 30. CLT defines it as nearly normal
# ### (b) Is the change in wait time times statistically significant ?
# <u>Given : </u>
#sample mean (point estimate)
smean = 137.5
#standard deviation
sd = 39
# sample size
n = 64
#population mean
mu = 127
# significance level
alpha = 0.05
# * Mathematical writing of the hypothesis test
# <br/><br/>
# $H_0 : \mu = 127$
# <br/>
# $H_A : \mu \neq 137.5$
# * Computing the Standard Error
se = standard_error(sd,n)
se
# * Computing the Z-score
z = round(zscore(smean,mu,se),2)
z
# * Computing the P-value
# By looking at the at z-statistic table we find out that the equivalent value is $0.9842$
# We look for the lower tail value : $1 - 0.9842 = 0.0158$
# We need to use a 2 sided test so to compute p-value : $pvalue = 2 \times 0.0158 = 0.0316$
# We can notice that $ pvalue < significance \space level$ (0.0316 < 0.05)
# Which means that we can reject the null hypothesis $H_0$
# ** There are sufficient evidence to support the claim that the change in wait times is statistically significant **
# ### (c) can we say the same with significance level of 0.01 ?
# p-value > 0.01 : we fail to reject $H_0$
# Means that there are no significant evidence to the support the claim that the change in wait times is statistically significant.
| Chapter 4/Exercises/4.25_Waiting_ER.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pytorch
# language: python
# name: pytorch_env
# ---
import json
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import feature_extraction as fex
# get the path of files
filepaths = os.listdir(r'./ann/')
# store the information in different lists
images = []
positions = []
lengths = []
angles = []
# extract the features
for file in filepaths:
images.append(file.split('.')[0])
with open("ann/" + file, "r") as f:
data = json.load(f)
objects = data["objects"]
crack = []
for obj in objects:
if obj["classTitle"] == "crack":
crack.append(obj["points"]["exterior"])
positions.append(crack)
crack_length = []
crack_angle = []
for line in crack:
crack_length.append(fex.extract_length(line))
angle, _ = fex.extract_angle(line)
crack_angle.append(angle)
lengths.append(crack_length)
angles.append(crack_angle)
# +
# save the features into json
crack_info = {}
crack_info["names"] = images
crack_info["positions"] = positions
crack_info["lengths"] = lengths
crack_info["angles"] = angles
with open('crack_info.json', 'w') as data:
json.dump(crack_info, data)
# -
# let's see the distribution of the lengths
lengths_flatten = fex.flatten_list(lengths)
plt.hist(lengths_flatten, bins=100)
# +
# let's view the extraction result. The first image is the original positions
# of cracks, the second one is the fitting result. In the third image, cracks with
# length over 30 are filtered
inx = 4
plt.figure(figsize=(15,5))
for line in positions[inx]:
x = list(zip(*line))[0]
y = list(zip(*line))[1]
plt.subplot(131)
plt.scatter(np.array(x),np.array(y))
plt.xlim(0,400)
plt.ylim(400,0)
ks = []
x0s = []
dists = []
for i, line in enumerate(positions[inx]):
x = list(zip(*line))[0]
x = np.array(x)
y = list(zip(*line))[1]
y = np.array(y)
k, x0 = np.polyfit(x,y,1)
dist = np.average(np.abs(k*x-y+x0)/np.sqrt(k**2+1**2))
dists.append(dist)
ks.append(k)
x0s.append(x0)
y_hat = k*x+x0
plt.subplot(132)
plt.plot(x,y_hat)
plt.xlim(0,400)
plt.ylim(400,0)
if lengths[inx][i] < 30:
continue
plt.subplot(133)
plt.plot(x,y_hat)
plt.xlim(0,400)
plt.ylim(400,0)
#lt.tight_layout()
#plt.savefig('plot', dpi=600)
print(images[inx])
# -
| tutorials/old_feature_extraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# Import all the required libraries.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
import time
from datetime import timedelta
import math
# -
# Print the TensorFlow version.
tf.__version__
# Initialize the filter size and number of filters for both the convolutional layers.
# Also, intialize the total number of nodes in the convolutional layer.
# +
filter_size1 = 5
num_filters1 = 16
filter_size2 = 5
num_filters2 = 36
fc_size = 128
# -
# Download and extract the training and testing images and labels sets from the MNIST dataset.
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('data/MNIST/', one_hot=True)
# Display the sizes of the labels in each sets of the MNIST data.
print("Size of:")
print("- Training-set:\t\t{}".format(len(data.train.labels)))
print("- Test-set:\t\t{}".format(len(data.test.labels)))
print("- Validation-set:\t{}".format(len(data.validation.labels)))
# Calculate the class number for each label in the test set.
data.test.cls = np.argmax(data.test.labels, axis=1)
# Initialize the dimensions of the image and we know that image size is 28 pixels for the MNIST images.
img_size = 28
img_size_flat = img_size * img_size
img_shape = (img_size, img_size)
num_channels = 1
num_classes = 10
# Create a method for displaying images.
def display_images(images, cls_true, cls_pred=None):
assert len(images) == len(cls_true) == 16
fig, axes = plt.subplots(4, 4)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
ax.imshow(images[i].reshape(img_shape), cmap='binary')
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# Plot few sample images.
images = data.test.images[0:16]
cls_true = data.test.cls[0:16]
display_images(images=images, cls_true=cls_true)
# Create a new TensorFlow variables and initialize with the random values.
def new_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def new_biases(length):
return tf.Variable(tf.constant(0.05, shape=[length]))
# Create a convolutional layer method in the computational graph for TensorFlow by using, input, number of channels, filter size, number of filters. Input is assumed as four-dimensional tensor with these (Image number, X-axis of image, Y-axis of image, Channels of each image) dimensions.
def new_conv_layer(input, num_input_channels, filter_size, num_filters, use_pooling=True):
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = new_weights(shape=shape)
biases = new_biases(length=num_filters)
layer = tf.nn.conv2d(input=input,filter=weights,strides=[1, 1, 1, 1],padding='SAME')
layer += biases
if use_pooling:
layer = tf.nn.max_pool(value=layer,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
layer = tf.nn.relu(layer)
return layer, weights
# The output of the convolutional layer is a 4-dimension tensor and so need to reduce to lower dimension. Create a flatter layer method which reduce the 4-dimension tensor to 2-dimension so that the output can be used as input to the fully connected layer. Reshape the input from [image number, image height, image width, number of channels] to [image number, number of features] where the number of features is the product of image height, image width and number of channels.
def flatten_layer(layer):
layer_shape = layer.get_shape()
num_features = layer_shape[1:4].num_elements()
layer_flat = tf.reshape(layer, [-1, num_features])
return layer_flat, num_features
# Now create a fully connected layer method with input of 2-dimension tensor and calculate the layers by calculating the mathematical formula weight*input + biases after creating weights and biases.
def new_fc_layer(input,num_inputs,num_outputs,use_relu=True):
weights = new_weights(shape=[num_inputs, num_outputs])
biases = new_biases(length=num_outputs)
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
return layer
# Define a placeholder variable for the input image.
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
# Reshape the variable x to 4-dimension tensor because the input to the convolutional layer is 4-dimension tensor.
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
# Similarly, create a placeholder variable for true labels.
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
# Also, create placeholder varible for the class number using argmax method.
y_true_cls = tf.argmax(y_true, dimension=1)
# Now create a first convolutional layer by calling convolutional layer method as defined before and use pooling to down sample the image resolution by 2.
layer_conv1, weights_conv1 = new_conv_layer(input=x_image,num_input_channels=num_channels,filter_size=filter_size1,
num_filters=num_filters1,use_pooling=True)
# Verify the dimension or shape of the tensor from the output of the first convolutional layer.
layer_conv1
# Create a second convolutional layer by again calling the convolutional layer method in which the output from the 1st convolutional layer is given as input to the second convolutional layer.
layer_conv2, weights_conv2 = new_conv_layer(input=layer_conv1,num_input_channels=num_filters1,filter_size=filter_size2,
num_filters=num_filters2, use_pooling=True)
# Verify the dimension or shape of the tensor from the output of the second convolutional layer.
layer_conv2
# Flatten the output of the 2nd convolutional layer from 4-dimensional tensor to 2-dimensional tensor.
layer_flat, num_features = flatten_layer(layer_conv2)
# Verify the dimension of the flattened layer and calculate the number of features.
layer_flat
num_features
# Implement the fully connected layer by passing flattened layer as input and also pass number of features.
layer_fc1 = new_fc_layer(input=layer_flat,num_inputs=num_features,num_outputs=fc_size,use_relu=True)
layer_fc1
# Now add another fully connected layer that outputs vectors each of length 10.
layer_fc2 = new_fc_layer(input=layer_fc1,num_inputs=fc_size,num_outputs=num_classes,use_relu=False)
layer_fc2
# Normalize the output of the second fully connected layer by using softmax method.
y_pred = tf.nn.softmax(layer_fc2)
# Also, calculate the class number which is the index of the largest element.
y_pred_cls = tf.argmax(y_pred, dimension=1)
# Now, calculate the cross-entropy for the output of the second fully connected layer using softmax.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,labels=y_true)
# find the average of cross-entropy of all the image classification.
cost = tf.reduce_mean(cross_entropy)
# Create an AdamOptimizer which is an advanced form of Gradient Descent.
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
# Create a boolean vector where the prediction class equals the true class.
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
# Calculate the classification accuracy.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Create a TensorFlow session to execute a TensorFlow graph.
session = tf.Session()
session.run(tf.global_variables_initializer())
# Select certain batch of input images.
train_batch_size = 64
# Create an optimize method to optimize the variables of the network layers and for each iteration, a new batch of data is selected from the training data.
# +
total_iterations = 0
def optimize(num_iterations):
global total_iterations
start_time = time.time()
for i in range(total_iterations,
total_iterations + num_iterations):
x_batch, y_true_batch = data.train.next_batch(train_batch_size)
feed_dict_train = {x: x_batch,y_true: y_true_batch}
session.run(optimizer, feed_dict=feed_dict_train)
if i % 100 == 0:
acc = session.run(accuracy, feed_dict=feed_dict_train)
msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}"
print(msg.format(i + 1, acc))
total_iterations += num_iterations
end_time = time.time()
time_dif = end_time - start_time
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
# -
# Create method to plot images from the test set that are wrongly classified.
def display_errors(cls_pred, correct):
incorrect = (correct == False)
images = data.test.images[incorrect]
cls_pred = cls_pred[incorrect]
cls_true = data.test.cls[incorrect]
display_images(images=images[0:16],cls_true=cls_true[0:16],cls_pred=cls_pred[0:16])
# Create a method to implement the classification accuracy for a test-set. Here, the test-set is splitted in to small number of batches.
# +
test_batch_size = 256
def print_test_accuracy(show_example_errors=False):
num_test = len(data.test.images)
cls_pred = np.zeros(shape=num_test, dtype=np.int)
i = 0
while i < num_test:
j = min(i + test_batch_size, num_test)
images = data.test.images[i:j, :]
labels = data.test.labels[i:j, :]
feed_dict = {x: images,y_true: labels}
cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)
i = j
cls_true = data.test.cls
correct = (cls_true == cls_pred)
correct_sum = correct.sum()
acc = float(correct_sum) / num_test
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, correct_sum, num_test))
if show_example_errors:
print("Errors:")
display_errors(cls_pred=cls_pred, correct=correct)
# -
print_test_accuracy()
optimize(num_iterations=1)
print_test_accuracy()
optimize(num_iterations=99)
print_test_accuracy(show_example_errors=True)
optimize(num_iterations=900) # We performed 100 iterations above.
print_test_accuracy(show_example_errors=True)
optimize(num_iterations=4500)
print_test_accuracy(show_example_errors=True)
| CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Rishit-dagli/GDG-Nashik-2020/blob/master/tfhub_neural_style_transfer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="IiB-sygv0SyJ" colab_type="text"
# # Neural Style Transfer
#
# This notebook shows how you could use TensorFlow Hub to very simply build a high performace style transfer algorithm
# + [markdown] id="DjCi_KGK1_Qw" colab_type="text"
# ## What do we want to do
#
# We are trying to paint one image into the texture of other. Some examples-
#
# <img src="https://raw.githubusercontent.com/Rishit-dagli/GDG-Nashik-2020/master/images/style_transfer_eg1.jpg?token=<KEY>">
# <br>
# <img src="https://raw.githubusercontent.com/Rishit-dagli/GDG-Nashik-2020/master/images/style_transfer_eg2.jpg?token=<KEY>">
#
# <img src="https://raw.githubusercontent.com/Rishit-dagli/GDG-Nashik-2020/master/images/style_transfer_eg3.jpg?token=<KEY>C">
# + [markdown] id="TCwlF_RE3BBJ" colab_type="text"
# ## Imports
# + id="ecdJbaZT1-qg" colab_type="code" colab={}
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_hub as hub
# + [markdown] id="Sh7FLYMV6VNl" colab_type="text"
# ## Cloning the repo
# + id="ciHvl8Db06zn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="f0fe65f7-75bf-4e3e-8877-285905e818da"
# !git clone https://github.com/Rishit-dagli/GDG-Nashik-2020.git
# + [markdown] id="bah-NyKF7y6F" colab_type="text"
# ## Specifying the input images
# + id="1HsPu8uE7ySV" colab_type="code" colab={}
BASE_CONTENT_DIR = "GDG-Nashik-2020/images/examples/images/"
content_image = ["andrew_ng.jpg", "dog.jpg", "flower.jpg", "flower2.jpg", "mona_lisa.jpg"]
BASE_STYLE_DIR = "GDG-Nashik-2020/images/examples/styles/"
style_image = ["style1.jpg", "style2.jpg", "style3.jpg","style4.jpg", "style5.jpg",]
# + id="4RyPtkw90MZK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 216} outputId="97e10cca-2b6a-4101-cfac-3c058a116b71"
content_img = plt.imread(BASE_CONTENT_DIR + content_image[3])
style_img = plt.imread(BASE_STYLE_DIR + style_image[2])
plt.subplot(1, 2, 1)
plt.title('Content Image')
plt.axis('off')
plt.imshow(content_img)
plt.subplot(1, 2, 2)
plt.title('Style Image')
plt.axis('off')
plt.imshow(style_img)
# + [markdown] id="GfrSmYUx9mf_" colab_type="text"
# ## Some helper functions
#
# We will define functions which convert an image to tensor and vice versa
# + id="Xwtl6iHj6eJc" colab_type="code" colab={}
def image_to_tensor(path_to_img):
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3, dtype=tf.float32)
# Resize the image to specific dimensions
img = tf.image.resize(img, [720, 512])
img = img[tf.newaxis, :]
return img
# + id="Fpd6WvlC6gI_" colab_type="code" colab={}
def tensor_to_image(tensor):
tensor = tensor*255
tensor = np.array(tensor, dtype=np.uint8)
tensor = tensor[0]
plt.figure(figsize=(20,10))
plt.axis('off')
return plt.imshow(tensor)
# + [markdown] id="vIiu_jeN-vwt" colab_type="text"
# ## Images to tensors
#
# We will now convert our images to tensors, so they can be fed in the network
# + id="It2MrXOV6gqb" colab_type="code" colab={}
content_image_tensor = list()
style_image_tensor = list()
for i in range(len(content_image)):
content_image_tensor.append(image_to_tensor(BASE_CONTENT_DIR + content_image[i]))
style_image_tensor.append(image_to_tensor(BASE_STYLE_DIR + style_image[i]))
# + [markdown] id="TtJaU0SA-5uA" colab_type="text"
# ## Loading the module
#
# We will now load the model from TF Hub, as this is an end to end module there is no need to add any more layers
# + id="Z_xBOxlI6j9C" colab_type="code" colab={}
hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')
# + [markdown] id="5gZxasRT_EgA" colab_type="text"
# ## Doing inference
# + id="CcCKBmZB7jDZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 592} outputId="c5722646-0b8f-4ba7-ef96-547669bed16b"
combined_result = hub_module(tf.constant(content_image_tensor[0]), tf.constant(style_image_tensor[1]))[0]
tensor_to_image(combined_result)
| tfhub_neural_style_transfer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Debugging a pipeline
# `river` encourages users to make use of pipelines. The biggest pain point of pipelines is that it can be hard to understand what's happening to the data, especially when the pipeline is complex. Fortunately the `Pipeline` class has a `debug_one` method that can help out.
#
# Let's look at a fairly complex pipeline for predicting the number of bikes in 5 bike stations from the city of Toulouse. It doesn't matter if you understand the pipeline or not; the point of this notebook is to learn how to introspect a pipeline.
# + tags=[]
import datetime as dt
from river import compose
from river import datasets
from river import feature_extraction
from river import linear_model
from river import metrics
from river import preprocessing
from river import stats
from river import stream
X_y = datasets.Bikes()
X_y = stream.simulate_qa(X_y, moment='moment', delay=dt.timedelta(minutes=30))
def add_time_features(x):
return {
**x,
'hour': x['moment'].hour,
'day': x['moment'].weekday()
}
model = add_time_features
model |= (
compose.Select('clouds', 'humidity', 'pressure', 'temperature', 'wind') +
feature_extraction.TargetAgg(by=['station', 'hour'], how=stats.Mean()) +
feature_extraction.TargetAgg(by='station', how=stats.EWMean())
)
model |= preprocessing.StandardScaler()
model |= linear_model.LinearRegression()
metric = metrics.MAE()
questions = {}
for i, x, y in X_y:
# Question
is_question = y is None
if is_question:
y_pred = model.predict_one(x)
questions[i] = y_pred
# Answer
else:
metric.update(y, questions[i])
model = model.learn_one(x, y)
if i >= 30000 and i % 30000 == 0:
print(i, metric)
# -
# Let's start by looking at the pipeline. You can click each cell to display the current state for each step of the pipeline.
# + tags=[]
model
# -
# As mentioned above the `Pipeline` class has a `debug_one` method. You can use this at any point you want to visualize what happen to an input `x`. For example, let's see what happens to the last seen `x`.
# + tags=[]
print(model.debug_one(x))
# -
# The pipeline does quite a few things, but using `debug_one` shows what happens step by step. This is really useful for checking that the pipeline is behaving as you're expecting it too. Remember that you can `debug_one` whenever you wish, be it before, during, or after training a model.
| docs/examples/debugging-a-pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: geospade
# language: python
# name: geospade
# ---
# + nbsphinx="hidden" slideshow={"slide_type": "skip"}
from IPython.display import Image
from IPython.core.display import HTML
# + [markdown] slideshow={"slide_type": "slide"}
# ## Mosaic Geometry
#
# Georeferenced EO data sets are often too huge to be stored as one single file and are therefore often tiled and provided in a gridded structure. Geometry-wise, such a structure was realised within the `MosaicGeometry` classes, which represent a well-defined collection of `Tile` objects, which are a light wrapper around a `RasterGeometry`. The basic `MosaicGeometry` sets the following requirements to its tiles:
#
# - a tile is represented by a `RasterGeometry` object, so only rectangular shapes are allowed
# - all tiles must have the same CRS
# - tiles are not allowed to intersect with each other
#
# The neighbourhood relationship is stored in an adjacency matrix (https://en.wikipedia.org/wiki/Adjacency_matrix), which allows performant spatial queries beyond tile boundaries.
#
# ### Tile
#
# A `Tile` object is a slim wrapper around a `RasterGeometry` object. It stores some further attributes, which are relevant for interacting with a mosaic:
#
# - `active`: Flag indicating if a tile is "visible"/active or not.
# - `mosaic_topology`: A string defining the relation with the mosaic boundary. It can be "INNER", "OUTER", or "BOUNDARY". If it is None, then this indicates that the tile is decoupled from the original mosaic, e.g. after an intersection with a geometry.
# - `metadata`: A dictionary storing some additional metadata information.
# - `mask`: This parameter is a place-holder for a pixel mask indicating valid and non-valid pixel coordinates. This parameter is internally used when accessing tiles through the mosaic containing labelled pixels depending on the relation with the mosaic boundary.
#
# ### Initialisation
#
# The constructor of `MosaicGeometry` expects the following arguments:
#
# - `tiles`: List of `Tile` (or other child classes) instances.
# - `boundary`: A polygon strictly defining the valid area of the mosaic, i.e. in which coordinates are allowed to exist. In the default case, the cascaded union of all tiles defines the boundary of the mosaic.
# - `adjacency_matrix`: A quadratic adjacency matrix defining the direct neighbourhood relationship. It should only contain booleans or ones/zeros.
# - `name` (optional): Name of the mosaic
# - `description` (optional): Verbal description of the raster geometry.
# - `check_consistency` (optional): If true, the requirements listed above are checked.
# +
from geospade.crs import SpatialRef
from geospade.raster import Tile
from geospade.raster import MosaicGeometry
# define spatial reference system
sref = SpatialRef(4326)
# define the tiles of the mosaic
n_rows = 50
n_cols = 50
geotrans = (5, 0.2, 0, 50, 0, -0.2)
tile_1 = Tile(n_rows, n_cols, sref, geotrans=geotrans, name="Tile 1", metadata={'test': True})
n_rows = 50
n_cols = 50
geotrans = (15, 0.2, 0, 50, 0, -0.2)
tile_2 = Tile(n_rows, n_cols, sref, geotrans=geotrans, name="Tile 2", active=False, metadata={'test': True})
n_rows = 50
n_cols = 50
geotrans = (25, 0.2, 0, 50, 0, -0.2)
tile_3 = Tile(n_rows, n_cols, sref, geotrans=geotrans, name="Tile 3", metadata={'test': False})
n_rows = 50
n_cols = 75
geotrans = (5, 0.2, 0, 40, 0, -0.2)
tile_4 = Tile(n_rows, n_cols, sref, geotrans=geotrans, name="Tile 4", metadata={'test': False})
n_rows = 50
n_cols = 75
geotrans = (20, 0.2, 0, 40, 0, -0.2)
tile_5 = Tile(n_rows, n_cols, sref, geotrans=geotrans, name="Tile 5", metadata={'test': False})
tiles = [tile_1, tile_2, tile_3, tile_4, tile_5]
# define other attributes of the mosaic
mosaic_name = "Mosaic 1"
mosaic_geom = MosaicGeometry(tiles, name=mosaic_name)
# -
# This is not the only way to create a mosaic geometry. Similar as for a raster geometry, you can create a mosaic geometry from a JSON-like definition, which contains the adjacency matrix and the mosaic properties in addition to the list of tiles:
import json
json_str = """
{
"name": "Mosaic 1",
"description": "",
"boundary": "POLYGON ((5 30,5 40,5 50,15 50,25 50,35 50,35 40,35 30,20 30,5 30))",
"tiles": {
"0": {
"name": "Tile 1",
"active": "true",
"mosaic_topology": "INNER",
"metadata": {},
"number_of_rows": 50,
"number_of_columns": 50,
"spatial_reference": {
"proj": "longlat",
"datum": "WGS84",
"no_defs": true
},
"geotransformation": [
5,
0.2,
0,
50,
0,
-0.2
],
"pixel_origin": "ul",
"description": ""
},
"1": {
"name": "Tile 2",
"active": "false",
"mosaic_topology": "INNER",
"metadata": {},
"number_of_rows": 50,
"number_of_columns": 50,
"spatial_reference": {
"proj": "longlat",
"datum": "WGS84",
"no_defs": true
},
"geotransformation": [
15,
0.2,
0,
50,
0,
-0.2
],
"pixel_origin": "ul",
"description": ""
},
"2": {
"name": "Tile 3",
"active": "true",
"mosaic_topology": "INNER",
"metadata": {},
"number_of_rows": 50,
"number_of_columns": 50,
"spatial_reference": {
"proj": "longlat",
"datum": "WGS84",
"no_defs": true
},
"geotransformation": [
25,
0.2,
0,
50,
0,
-0.2
],
"pixel_origin": "ul",
"description": ""
},
"3": {
"name": "Tile 4",
"active": "true",
"mosaic_topology": "INNER",
"metadata": {},
"number_of_rows": 50,
"number_of_columns": 75,
"spatial_reference": {
"proj": "longlat",
"datum": "WGS84",
"no_defs": true
},
"geotransformation": [
5,
0.2,
0,
40,
0,
-0.2
],
"pixel_origin": "ul",
"description": ""
},
"4": {
"name": "Tile 5",
"active": "true",
"mosaic_topology": "INNER",
"metadata": {},
"number_of_rows": 50,
"number_of_columns": 75,
"spatial_reference": {
"proj": "longlat",
"datum": "WGS84",
"no_defs": true
},
"geotransformation": [
20,
0.2,
0,
40,
0,
-0.2
],
"pixel_origin": "ul",
"description": ""
}
},
"type": "irregular",
"adjacency_matrix": [
[
false,
true,
false,
true,
false
],
[
true,
false,
true,
true,
true
],
[
false,
true,
false,
false,
true
],
[
true,
true,
false,
false,
true
],
[
false,
true,
true,
true,
false
]
]
}
"""
definition = json.loads(json_str)
mosaic_geom_def = MosaicGeometry.from_definition(definition)
# ### Properties
#
# A mosaic geometry shares some common attributes with a raster geometry:
mosaic_geom.sref
mosaic_geom.ori
# In addition, you can access the tiles via
mosaic_geom.tiles
# , and the tile names via
mosaic_geom.tile_names
# Both properties allow access to the tiles set to `active=True`. If you want to retrieve all tiles disregarding their visibility status, use:
mosaic_geom.all_tiles
mosaic_geom.all_tile_names
# ### Plotting
#
# A mosaic geometry offers a simple wrapper to plot each tile, with almost the same keywords as for the `plot()` function of a raster geometry. In addition, you can choose if you want to only show active tiles (`active_only=True`, default) and to plot the mosaic boundary (`plot_boundary=True`, default).
# +
import matplotlib.pyplot as plt
plt.figure(figsize=(12, 12))
mosaic_geom.plot(label_tiles=True, alpha=0.8, extent=[0, 25, 40, 55], plot_boundary=True, active_only=True)
# -
# ### Tile Access
#
# There are several options to access a tile within a mosaic geometry. First you can use brackets with a tile name to get a tile object from a mosaic.
tile_by_brkts = mosaic_geom['Tile 1']
# If you want to know which tile is covering a coordinate tuple, you can use
tile_by_coords = mosaic_geom.xy2tile(5.25, 49.6)
# Both tiles should be the same:
tile_by_brkts == tile_by_coords
# There is also the possibility to get the direct neighbours of one tile:
mosaic_geom.get_neighbouring_tiles('Tile 1')
# As you can see above, all tile access methods (except the `[]` access with a tile name) return a dictionary with the tile names as keys and the tile objects as values.
#
# You can also select tiles by specifying a geometry, which can be intersected with the mosaic as follows:
# +
from osgeo import osr
from osgeo import ogr
from shapely.geometry import Polygon
polygon = Polygon([(10, 47), (17, 41), (15, 35), (7, 33), (10, 47)])
ogr_geom = ogr.CreateGeometryFromWkt(polygon.wkt)
ogr_geom.AssignSpatialReference(sref.osr_sref)
sel_tiles = mosaic_geom.select_tiles_by_geom(ogr_geom, active_only=True, apply_mask=True)
sel_tiles
# -
# Besides specifying if only active tiles should be considered (`active_only=True`, default), you can set `apply_mask`, which generates valid pixel mask for each tile (`apply_mask=True`, default).
sel_tiles['Tile 1'].mask
# A very similar function goes one step further, and intersects the tiles with a given geometry, thus decoupling them from the mosaic.
intsct_tiles = mosaic_geom.slice_tiles_by_geom(ogr_geom, active_only=True)
intsct_tiles
x_coords, y_coords = polygon.exterior.xy
plt.figure(figsize=(12, 12))
ax = mosaic_geom.plot(label_tiles=True, alpha=0.8, extent=[0, 25, 40, 55], plot_boundary=True, active_only=True)
ax = intsct_tiles[0].plot(ax, edgewidth=4, alpha=0.8, facecolor='green')
ax = intsct_tiles[1].plot(ax, edgewidth=4, alpha=0.8, facecolor='green')
ax.plot(x_coords, y_coords, color='orange', linewidth=4)
# ### Subsetting
#
# Instead of retrieving single tiles from a spatial query, you can also change the active tiles in a mosaic. This can be done very similar as before, i.e. by intersecting the original mosaic geometry with another geometry.
# +
mosaic_geom.slice_by_geom(ogr_geom)
plt.figure(figsize=(12, 12))
ax = mosaic_geom.plot(label_tiles=True, edgewidth=1, alpha=0.8, extent=[0, 25, 40, 55])
ax.plot(x_coords, y_coords, color='orange', linewidth=4)
# -
# Another option is to filter the tiles by metadata entries, e.g. we want to only work with tiles, where the metadata attribute `'test'` equals to `True`.
# +
mosaic_geom.filter_tile_metadata({'test': True})
plt.figure(figsize=(12, 12))
mosaic_geom.plot(label_tiles=True, edgewidth=1, alpha=0.8, extent=[0, 25, 40, 55])
# -
# ## Regular Mosaic Geometry
#
# With respect to `MosaicGeometry`, `RegularMosaicGeometry` sets some further requirements to its tiles:
#
# - same pixel size
# - same shape
#
# This structure paves the path for more performant spatial queries and solving neighbourhood relationships. Such relationships are also encoded in an adjacency matrix, but this time not in a binary format. The dimensions of the matrix refer to the dimensions of the mosaic (e.g. number of tiles in S and W direction) and its entries to a tile name.
#
# An additional classmethod `from_rectangular_definition` can help to create a regular mosaic geometry. In the following example we also make use of the `boundary` parameter (same for `MosaicGeometry`) to exemplify its interaction with the tiles.
# + slideshow={"slide_type": "slide"}
from geospade.raster import RegularMosaicGeometry
mosaic_ul_x = 5
mosaic_ul_y = 50
mosaic_rows = 2
mosaic_cols = 3
x_tile_size = 10.
y_tile_size = 10.
geotrans = (mosaic_ul_x, 0.2, 0., mosaic_ul_y, 0., -0.2)
mosaic_boundary = Polygon(((14, 41), (21, 48), (31, 46), (27, 32), (5, 31), (14, 41)))
mosaic_boundary = ogr.CreateGeometryFromWkt(mosaic_boundary.wkt)
mosaic_boundary.AssignSpatialReference(sref.osr_sref)
reg_mosaic_geom = RegularMosaicGeometry.from_rectangular_definition(mosaic_rows, mosaic_cols, x_tile_size,
y_tile_size, sref,
boundary=mosaic_boundary,
geotrans=geotrans)
plt.figure(figsize=(12, 12))
reg_mosaic_geom.plot(label_tiles=True, edgewidth=1, alpha=0.8, extent=[0, 25, 40, 55])
# -
# A regular mosaic geometry has the same properties as a basic mosaic geometry, except an additional shape property.
reg_mosaic_geom.shape
# None of the tiles is now fully inside or outside the mosaic, i.e. they all share the relation "BOUNDARY".
reg_mosaic_geom['S001W001'].mosaic_topology
# This means, if we access a tile by specifying coordinates being outside the mosaic boundary, we get `None` as a return value:
reg_mosaic_geom.xy2tile(6, 50)
# whereas for coordinates being inside we get
reg_mosaic_geom.xy2tile(14, 41).name
# Moreover, the masks of the "BOUNDARY" tiles now directly represent the mosaic boundary at the specified mosaic pixel sampling:
reg_mosaic_geom['S001W001'].mask
plt.figure(figsize=(12, 12))
ax = reg_mosaic_geom.plot(label_tiles=True, edgewidth=1, alpha=0.8, extent=[0, 25, 40, 55])
for tile_name in reg_mosaic_geom.tile_names:
tile = reg_mosaic_geom[tile_name]
outer_boundary_extent = tile.outer_boundary_extent
extent = (outer_boundary_extent[0], outer_boundary_extent[2], outer_boundary_extent[3], outer_boundary_extent[1])
ax.imshow(tile.mask, extent=extent)
| docs/notebooks/mosaic_geometry.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basic exploration
# - For each command, refer to official pandas documentation and explore all options
# - Don't get bogged down into infrastructure aspects, set up etc.
# +
# On jupyterhub for each user do the below
# # ! pip3 install matplotlib --user
# # ! pip3 install pandas==0.25.3 --user
# # ! pip3 install numpy --user
# -
import pandas as pd
# Read a csv file. df stands for DataFrame
df = pd.read_csv('../height_weight.csv')
# # Print Dataframe
df
# # Shape
# - shape returns the size of rows and columns
# No of rows x No of columns
df.shape
# # Info
# - Info gives various meta on the dataframe elements
# Information about column data types
# https://docs.scipy.org/doc/numpy/user/basics.types.html
df.info()
# # Mean
# - Also known as average (sum of elements divided by number of elements)
df.mean()
# # head
# - Borrowed from *nix terminology
# - Returns the top n rows
df.head(10)
# # tail
# - Borrowed from *nix, returns bottom n rows
df.tail(10)
# # Sample
# - Sampling from the dataframe
df.sample(100)
# Some statistical analysis
df.describe()
# TEMP statistics
df['weight'].describe()
# Frequeny of TEMP values
df['weight'].value_counts()
# Correlation coefficient measures strength of relationships between variables. [-1,1]
df['weight'].corr(df['height'])
# Plot histogram
# %matplotlib inline
df['weight'].plot.hist()
# More granular vision - increase bins
df['weight'].plot.hist(bins=100)
# Plot between two columns
df.plot(x='weight', y='height')
# Various styles, in this case using marker - # https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
df.plot(x='weight', y='height', style='o')
# Various styles, in this case using marker - # https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
df.plot(x='weight', y='height', style='*')
# Add color by prepending allowed color values to markers
df.plot(x='weight', y='height', style='r.')
# Example to print pie chart
df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],'radius': [2439.7, 6051.8, 6378.1]},index=['Mercury', 'Venus', 'Earth'])
plot = df.plot.pie(y='mass', figsize=(5, 5))
df.plot.pie(subplots=True, figsize=(6, 3))
| class1_explore/basic_exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NNabla Python API Demonstration Tutorial
#
# Let us import nnabla first, and some additional useful tools.
# +
# If you run this notebook on Google Colab, uncomment and run the following to set up dependencies.
# # !pip install nnabla-ext-cuda100
# # !git clone https://github.com/sony/nnabla.git
# # %cd nnabla/tutorial
# -
# python2/3 compatibility
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
# +
import nnabla as nn # Abbreviate as nn for convenience.
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
# -
# ## NdArray
#
# NdArray is a data container of a multi-dimensional array. NdArray is device (e.g. CPU, CUDA) and type (e.g. uint8, float32) agnostic, in which both type and device are implicitly casted or transferred when it is used. Below, you create a NdArray with a shape of `(2, 3, 4)`.
a = nn.NdArray((2, 3, 4))
# You can see the values held inside `a` by the following. The values are not initialized, and are created as float32 by default.
print(a.data)
# The accessor `.data` returns a reference to the values of NdArray as `numpy.ndarray`. You can modify these by using the Numpy API as follows.
print('[Substituting random values]')
a.data = np.random.randn(*a.shape)
print(a.data)
print('[Slicing]')
a.data[0, :, ::2] = 0
print(a.data)
# Note that the above operation is all done in the host device (CPU). NdArray provides more efficient functions in case you want to fill all values with a constant, `.zero` and `.fill`. They are lazily evaluated when the data is requested (when neural network computation requests the data, or when numpy array is requested by Python) The filling operation is executed within a specific device (e.g. CUDA GPU), and more efficient if you specify the device setting, which we explain later.
a.fill(1) # Filling all values with one.
print(a.data)
# You can create an NdArray instance directly from a Numpy array object.
b = nn.NdArray.from_numpy_array(np.ones(a.shape))
print(b.data)
# NdArray is used in Variable class, as well as NNabla's imperative computation of neural networks. We describe them in the later sections.
# ## Variable
# Variable class is used when you construct a neural network. The neural network can be described as a graph in which an edge represents a function (a.k.a operator and layer) which defines operation of a minimum unit of computation, and a node represents a variable which holds input/output values of a function (Function class is explained later). The graph is called "Computation Graph".
#
# In NNabla, a Variable, a node of a computation graph, holds two `NdArray`s, one for storing the input or output values of a function during forward propagation (executing computation graph in the forward order), while another for storing the backward error signal (gradient) during backward propagation (executing computation graph in backward order to propagate error signals down to parameters (weights) of neural networks). The first one is called `data`, the second is `grad` in NNabla.
# The following line creates a Variable instance with a shape of (2, 3, 4). It has `data` and `grad` as `NdArray`. The flag `need_grad` is used to omit unnecessary gradient computation during backprop if set to False.
x = nn.Variable([2, 3, 4], need_grad=True)
print('x.data:', x.data)
print('x.grad:', x.grad)
# You can get the shape by:
x.shape
# Since both `data` and `grad` are `NdArray`, you can get a reference to its values as NdArray with the `.data` accessor, but also it can be referred by `.d` or `.g` property for `data` and `grad` respectively.
# +
print('x.data')
print(x.d)
x.d = 1.2345 # To avoid NaN
assert np.all(x.d == x.data.data), 'd: {} != {}'.format(x.d, x.data.data)
print('x.grad')
print(x.g)
x.g = 1.2345 # To avoid NaN
assert np.all(x.g == x.grad.data), 'g: {} != {}'.format(x.g, x.grad.data)
# Zeroing grad values
x.grad.zero()
print('x.grad (after `.zero()`)')
print(x.g)
# -
# Like `NdArray`, a `Variable` can also be created from Numpy array(s).
x2 = nn.Variable.from_numpy_array(np.ones((3,)), need_grad=True)
print(x2)
print(x2.d)
x3 = nn.Variable.from_numpy_array(np.ones((3,)), np.zeros((3,)), need_grad=True)
print(x3)
print(x3.d)
print(x3.g)
# Besides storing values of a computation graph, pointing a parent edge (function) to trace the computation graph is an important role. Here `x` doesn't have any connection. Therefore, the `.parent` property returns None.
print(x.parent)
# ## Function
# A function defines an operation block of a computation graph as we described above. The module `nnabla.functions` offers various functions (e.g. Convolution, Affine and ReLU). You can see the list of functions available in the [API reference guide](http://nnabla.readthedocs.io/en/latest/python/api/function.html#module-nnabla.functions).
import nnabla.functions as F
# As an example, here you will defines a computation graph that computes the element-wise Sigmoid function outputs for the input variable and sums up all values into a scalar. (This is simple enough to explain how it behaves but a meaningless example in the context of neural network training. We will show you a neural network example later.)
sigmoid_output = F.sigmoid(x)
sum_output = F.reduce_sum(sigmoid_output)
# The function API in `nnabla.functions` takes one (or several) Variable(s) and arguments (if any), and returns one (or several) output Variable(s). The `.parent` points to the function instance which created it.
# Note that no computation occurs at this time since we just define the graph. (This is the default behavior of NNabla computation graph API. You can also fire actual computation during graph definition which we call "Dynamic mode" (explained later)).
print("sigmoid_output.parent.name:", sigmoid_output.parent.name)
print("x:", x)
print("sigmoid_output.parent.inputs refers to x:", sigmoid_output.parent.inputs)
print("sum_output.parent.name:", sum_output.parent.name)
print("sigmoid_output:", sigmoid_output)
print("sum_output.parent.inputs refers to sigmoid_output:", sum_output.parent.inputs)
# The `.forward()` at a leaf Variable executes the forward pass computation in the computation graph.
sum_output.forward()
print("CG output:", sum_output.d)
print("Reference:", np.sum(1.0 / (1.0 + np.exp(-x.d))))
# The `.backward()` does the backward propagation through the graph. Here we initialize the `grad` values as zero before backprop since the NNabla backprop algorithm always accumulates the gradient in the root variables.
x.grad.zero()
sum_output.backward()
print("d sum_o / d sigmoid_o:")
print(sigmoid_output.g)
print("d sum_o / d x:")
print(x.g)
# NNabla is developed by mainly focused on neural network training and inference. Neural networks have parameters to be learned associated with computation blocks such as Convolution, Affine (a.k.a. fully connected, dense etc.). In NNabla, the learnable parameters are also represented as `Variable` objects. Just like input variables, those parameter variables are also used by passing into `Function`s. For example, Affine function takes input, weights and biases as inputs.
x = nn.Variable([5, 2]) # Input
w = nn.Variable([2, 3], need_grad=True) # Weights
b = nn.Variable([3], need_grad=True) # Biases
affine_out = F.affine(x, w, b) # Create a graph including only affine
# The above example takes an input with B=5 (batchsize) and D=2 (dimensions) and maps it to D'=3 outputs, i.e. (B, D') output.
#
# You may also notice that here you set `need_grad=True` only for parameter variables (w and b). The x is a non-parameter variable and the root of computation graph. Therefore, it doesn't require gradient computation. In this configuration, the gradient computation for x is not executed in the first affine, which will omit the computation of unnecessary backpropagation.
#
# The next block sets data and initializes grad, then applies forward and backward computation.
# +
# Set random input and parameters
x.d = np.random.randn(*x.shape)
w.d = np.random.randn(*w.shape)
b.d = np.random.randn(*b.shape)
# Initialize grad
x.grad.zero() # Just for showing gradients are not computed when need_grad=False (default).
w.grad.zero()
b.grad.zero()
# Forward and backward
affine_out.forward()
affine_out.backward()
# Note: Calling backward at non-scalar Variable propagates 1 as error message from all element of outputs. .
# -
# You can see that affine_out holds an output of Affine.
print('F.affine')
print(affine_out.d)
print('Reference')
print(np.dot(x.d, w.d) + b.d)
# The resulting gradients of weights and biases are as follows.
print("dw")
print(w.g)
print("db")
print(b.g)
# The gradient of `x` is not changed because `need_grad` is set as False.
print(x.g)
# ## Parametric Function
# Considering parameters as inputs of `Function` enhances expressiveness and flexibility of computation graphs.
# However, to define all parameters for each learnable function is annoying for users to define a neural network.
# In NNabla, trainable models are usually created by composing functions that have optimizable parameters.
# These functions are called "Parametric Functions".
# The Parametric Function API provides various parametric functions and an interface for composing trainable models.
# To use parametric functions, import:
import nnabla.parametric_functions as PF
# The function with optimizable parameter can be created as below.
with nn.parameter_scope("affine1"):
c1 = PF.affine(x, 3)
# The first line creates a **parameter scope**. The second line then applies `PF.affine` - an affine transform - to `x`, and creates a variable `c1` holding that result. The parameters are created and initialized randomly at function call, and registered by a name "affine1" using `parameter_scope` context. The function `nnabla.get_parameters()` allows to get the registered parameters.
nn.get_parameters()
# The `name=` argument of any PF function creates the equivalent parameter space to the above definition of `PF.affine` transformation as below. It could save the space of your Python code. The `nnabla.parametric_scope` is more useful when you group multiple parametric functions such as Convolution-BatchNormalization found in a typical unit of CNNs.
c1 = PF.affine(x, 3, name='affine1')
nn.get_parameters()
# It is worth noting that the shapes of both outputs and parameter variables (as you can see above) are automatically determined by only providing the output size of affine transformation(in the example above the output size is 3). This helps to create a graph in an easy way.
c1.shape
# Parameter scope can be nested as follows (although a meaningless example).
with nn.parameter_scope('foo'):
h = PF.affine(x, 3)
with nn.parameter_scope('bar'):
h = PF.affine(h, 4)
# This creates the following.
nn.get_parameters()
# Also, `get_parameters()` can be used in `parameter_scope`. For example:
with nn.parameter_scope("foo"):
print(nn.get_parameters())
# `nnabla.clear_parameters()` can be used to delete registered parameters under the scope.
with nn.parameter_scope("foo"):
nn.clear_parameters()
print(nn.get_parameters())
# ## MLP Example For Explanation
# The following block creates a computation graph to predict one dimensional output from two dimensional inputs by a 2 layer fully connected neural network (multi-layer perceptron).
nn.clear_parameters()
batchsize = 16
x = nn.Variable([batchsize, 2])
with nn.parameter_scope("fc1"):
h = F.tanh(PF.affine(x, 512))
with nn.parameter_scope("fc2"):
y = PF.affine(h, 1)
print("Shapes:", h.shape, y.shape)
# This will create the following parameter variables.
nn.get_parameters()
# As described above, you can execute the forward pass by calling forward method at the terminal variable.
x.d = np.random.randn(*x.shape) # Set random input
y.forward()
print(y.d)
# Training a neural networks needs a loss value to be minimized by gradient descent with backprop. In NNabla, loss function is also a just function, and packaged in the functions module.
# +
# Variable for label
label = nn.Variable([batchsize, 1])
# Set loss
loss = F.reduce_mean(F.squared_error(y, label))
# Execute forward pass.
label.d = np.random.randn(*label.shape) # Randomly generate labels
loss.forward()
print(loss.d)
# -
# As you've seen above, NNabla `backward` accumulates the gradients at the root variables. You have to initialize the grad of the parameter variables before backprop (We will show you the easiest way with `Solver` API).
# Collect all parameter variables and init grad.
for name, param in nn.get_parameters().items():
param.grad.zero()
# Gradients are accumulated to grad of params.
loss.backward()
# ## Imperative Mode
# After performing backprop, gradients are held in parameter variable grads. The next block will update the parameters with vanilla gradient descent.
for name, param in nn.get_parameters().items():
param.data -= param.grad * 0.001 # 0.001 as learning rate
# The above computation is an example of NNabla's "Imperative Mode" for executing neural networks. Normally, NNabla functions (instances of [nnabla.functions](https://nnabla.readthedocs.io/en/latest/python/api/function.html#module-nnabla.functions)) take `Variable`s as their input. When at least one `NdArray` is provided as an input for NNabla functions (instead of `Variable`s), the function computation will be fired immediately, and returns an `NdArray` as the output, instead of returning a `Variable`. In the above example, the NNabla functions `F.mul_scalar` and `F.sub2` are called by the overridden operators `*` and `-=`, respectively.
#
# In other words, NNabla's "Imperative mode" doesn't create a computation graph, and can be used like NumPy. If device acceleration such as CUDA is enabled, it can be used like NumPy empowered with device acceleration. Parametric functions can also be used with NdArray input(s). The following block demonstrates a simple imperative execution example.
# A simple example of imperative mode.
xi = nn.NdArray.from_numpy_array(np.arange(4).reshape(2, 2))
yi = F.relu(xi - 1)
print(xi.data)
print(yi.data)
# Note that in-place substitution from the rhs to the lhs cannot be done by the `=` operator. For example, when `x` is an `NdArray`, writing `x = x + 1` will *not* increment all values of `x` - instead, the expression on the rhs will create a *new* `NdArray` object that is different from the one originally bound by `x`, and binds the new `NdArray` object to the Python variable `x` on the lhs.
#
# For in-place editing of `NdArrays`, the in-place assignment operators `+=`, `-=`, `*=`, and `/=` can be used. The `copy_from` method can also be used to copy values of an existing `NdArray` to another. For example, incrementing 1 to `x`, an `NdArray`, can be done by `x.copy_from(x+1)`. The copy is performed with device acceleration if a device context is specified by using `nnabla.set_default_context` or `nnabla.context_scope`.
# +
# The following doesn't perform substitution but assigns a new NdArray object to `xi`.
# xi = xi + 1
# The following copies the result of `xi + 1` to `xi`.
xi.copy_from(xi + 1)
assert np.all(xi.data == (np.arange(4).reshape(2, 2) + 1))
# Inplace operations like `+=`, `*=` can also be used (more efficient).
xi += 1
assert np.all(xi.data == (np.arange(4).reshape(2, 2) + 2))
# -
# ## Solver
# NNabla provides stochastic gradient descent algorithms to optimize parameters listed in the `nnabla.solvers` module. The parameter updates demonstrated above can be replaced with this Solver API, which is easier and usually faster.
from nnabla import solvers as S
solver = S.Sgd(lr=0.00001)
solver.set_parameters(nn.get_parameters())
# +
# Set random data
x.d = np.random.randn(*x.shape)
label.d = np.random.randn(*label.shape)
# Forward
loss.forward()
# -
# Just call the the following solver method to fill zero grad region, then backprop
solver.zero_grad()
loss.backward()
# The following block updates parameters with the Vanilla Sgd rule (equivalent to the imperative example above).
solver.update()
# ## Toy Problem To Demonstrate Training
# The following function defines a regression problem which computes the norm of a vector.
def vector2length(x):
# x : [B, 2] where B is number of samples.
return np.sqrt(np.sum(x ** 2, axis=1, keepdims=True))
# We visualize this mapping with the contour plot by matplotlib as follows.
# +
# Data for plotting contour on a grid data.
xs = np.linspace(-1, 1, 100)
ys = np.linspace(-1, 1, 100)
grid = np.meshgrid(xs, ys)
X = grid[0].flatten()
Y = grid[1].flatten()
def plot_true():
"""Plotting contour of true mapping from a grid data created above."""
plt.contourf(xs, ys, vector2length(np.hstack([X[:, None], Y[:, None]])).reshape(100, 100))
plt.axis('equal')
plt.colorbar()
plot_true()
# -
# We define a deep prediction neural network.
def length_mlp(x):
h = x
for i, hnum in enumerate([4, 8, 4, 2]):
h = F.tanh(PF.affine(h, hnum, name="fc{}".format(i)))
y = PF.affine(h, 1, name='fc')
return y
nn.clear_parameters()
batchsize = 100
x = nn.Variable([batchsize, 2])
y = length_mlp(x)
label = nn.Variable([batchsize, 1])
loss = F.reduce_mean(F.squared_error(y, label))
# We created a 5 layers deep MLP using for-loop. Note that only 3 lines of the code potentially create infinitely deep neural networks. The next block adds helper functions to visualize the learned function.
# +
def predict(inp):
ret = []
for i in range(0, inp.shape[0], x.shape[0]):
xx = inp[i:i + x.shape[0]]
# Imperative execution
xi = nn.NdArray.from_numpy_array(xx)
yi = length_mlp(xi)
ret.append(yi.data.copy())
return np.vstack(ret)
def plot_prediction():
plt.contourf(xs, ys, predict(np.hstack([X[:, None], Y[:, None]])).reshape(100, 100))
plt.colorbar()
plt.axis('equal')
# -
# Next we instantiate a solver object as follows. We use Adam optimizer which is one of the most popular SGD algorithm used in the literature.
from nnabla import solvers as S
solver = S.Adam(alpha=0.01)
solver.set_parameters(nn.get_parameters())
# The following function generates data from the true system infinitely.
def random_data_provider(n):
x = np.random.uniform(-1, 1, size=(n, 2))
y = vector2length(x)
return x, y
# In the next block, we run 2000 training steps (SGD updates).
num_iter = 2000
for i in range(num_iter):
# Sample data and set them to input variables of training.
xx, ll = random_data_provider(batchsize)
x.d = xx
label.d = ll
# Forward propagation given inputs.
loss.forward(clear_no_need_grad=True)
# Parameter gradients initialization and gradients computation by backprop.
solver.zero_grad()
loss.backward(clear_buffer=True)
# Apply weight decay and update by Adam rule.
solver.weight_decay(1e-6)
solver.update()
# Just print progress.
if i % 100 == 0 or i == num_iter - 1:
print("Loss@{:4d}: {}".format(i, loss.d))
# **Memory usage optimization**: You may notice that, in the above updates, `.forward()` is called with the `clear_no_need_grad=` option, and `.backward()` is called with the `clear_buffer=` option. Training of neural network in more realistic scenarios usually consumes huge memory due to the nature of backpropagation algorithm, in which all of the forward variable buffer `data` should be kept in order to compute the gradient of a function. In a naive implementation, we keep all the variable `data` and `grad` living until the `NdArray` objects are not referenced (i.e. the graph is deleted). The `clear_*` options in `.forward()` and `.backward()` enables to save memory consumption due to that by clearing (erasing) memory of `data` and `grad` when it is not referenced by any subsequent computation. (More precisely speaking, it doesn't free memory actually. We use our memory pool engine by default to avoid memory alloc/free overhead). The unreferenced buffers can be re-used in subsequent computation. See the document of `Variable` for more details. Note that the following `loss.forward(clear_buffer=True)` clears `data` of any intermediate variables. If you are interested in intermediate variables for some purposes (e.g. debug, log), you can use the `.persistent` flag to prevent clearing buffer of a specific `Variable` like below.
loss.forward(clear_buffer=True)
print("The prediction `y` is cleared because it's an intermediate variable.")
print(y.d.flatten()[:4]) # to save space show only 4 values
y.persistent = True
loss.forward(clear_buffer=True)
print("The prediction `y` is kept by the persistent flag.")
print(y.d.flatten()[:4]) # to save space show only 4 value
# We can confirm the prediction performs fairly well by looking at the following visualization of the ground truth and prediction function.
plt.subplot(121)
plt.title("Ground truth")
plot_true()
plt.subplot(122)
plt.title("Prediction")
plot_prediction()
# You can save learned parameters by `nnabla.save_parameters` and load by `nnabla.load_parameters`.
path_param = "param-vector2length.h5"
nn.save_parameters(path_param)
# Remove all once
nn.clear_parameters()
nn.get_parameters()
# Load again
nn.load_parameters(path_param)
print('\n'.join(map(str, nn.get_parameters().items())))
# Both save and load functions can also be used in a parameter scope.
with nn.parameter_scope('foo'):
nn.load_parameters(path_param)
print('\n'.join(map(str, nn.get_parameters().items())))
# !rm {path_param} # Clean ups
| tutorial/python_api.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Import vari
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import tree
import pydotplus
from sklearn.model_selection import train_test_split
from sklearn import metrics
import time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
#Definisco il classificatore SupportVectorMachine(in particoalre RBF SVM) con i parametri con cui si sono ottenuti i risultati migliori
def MySupportVectorMachine():
#Creazione del classificatore
classifier = SVC(C=10, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=1, shrinking=True, tol=0.001,
verbose=False)
#Si allena il classificatore
classifier.fit(X_train, y_train)
# E ora si predice sul Test Set
predicted = classifier.predict(X_test)
#Rinomino i campi per la matrice di confusione
labels = ("Female","Male")
positions = (0,1)
#Stampa dei risultati
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(y_test, predicted)))
disp = metrics.plot_confusion_matrix(classifier, X_test, y_test, cmap="OrRd")
disp.figure_.suptitle("Confusion Matrix")
print("Confusion matrix:\n%s" % disp.confusion_matrix)
#Stampa del tempo intercorso per processare il classificatore
print ("\nTempo trascorso: {:.2f}m\n".format((time.time()-start_time)/60))
#Stampa a video della matrice di confusione
plt.xticks(positions,labels)
plt.yticks(positions,labels)
plt.savefig('OutputSVM/ConfusionMatrix.png', bbox_inches='tight')
plt.show()
#Stampa dell'accuratezza
print("Accuratezza: ")
print(accuracy_score(y_test, predicted))
#Definisco il classificatore DecisionTree con i parametri con cui si sono ottenuti i risultati migliori
def MyDecisionTree():
#The decision tree classifier
clf = tree.DecisionTreeClassifier(criterion = "gini", max_depth=13)
#Alleno il decision tree
clf_train = clf.fit(X_train, y_train)
#Predico la risposta per il dataset
y_pred = clf.predict(X_test)
#Model Accuracy, valuto il modello
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print ("\nTempo trascorso: {:.2f}m\n".format((time.time()-start_time)/60))
#Creo un decision tree in formato DOT utilizzando GraphViz
dot_data = tree.export_graphviz(clf_train, out_file=None, feature_names=X_train.columns.values,
class_names=['Female', 'Male'], rounded=True, filled=True) #Gini decides which attribute/feature should be placed at the root node, which features will act as internal nodes or leaf nodes
#Creo il decision tree in formato Graph partendo dal formato DOT
graph = pydotplus.graph_from_dot_data(dot_data)
#Salvo in png il decision tree creato
test2 = graph.write_png("OutputDT/GraphDecisionTree.png")
#Definisco il classificatore K NearestNeighbors con i parametri con cui si sono ottenuti i risultati migliori
def MyNearestNeighbors():
#NearestNeighbors classifier
classifier = KNeighborsClassifier(n_neighbors=9)
#Alleno il classificatore
clf_train = classifier.fit(X_train, y_train)
#Predico la risposta per il dataset
y_pred = classifier.predict(X_test)
#Model Accuracy, valuto il modello
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print ("\nTempo trascorso: {:.2f}m\n".format((time.time()-start_time)/60))
labels = ("Female","Male")
positions = (0,1)
#Stampa dei risultati
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(y_test, y_pred)))
disp = metrics.plot_confusion_matrix(classifier, X_test, y_test, cmap=plt.cm.Blues)
disp.figure_.suptitle("Confusion Matrix")
print("Confusion matrix:\n%s" % disp.confusion_matrix)
#Stampa a video
plt.xticks(positions,labels)
plt.yticks(positions,labels)
plt.savefig('OutputKNN/ConfusionMatrix4c4s_n9.png', bbox_inches='tight')
plt.show()
#Funzione creazione dataset bilanciato
def CreateBalanced4c4s(dfconc):
#Creo un csv corrispondente al dataset4c4s ma con l'aggiunta della colonna "Gender"
dfconc.to_csv("DatasetCelebA/Dataset4c4sBalanced.csv", header = False, index = False)
#Leggo il csv appena creato per andare ad eseguire le operazioni di manipolazione
DFbalanced = pd.read_csv("DatasetCelebA/Dataset4c4sBalanced.csv",header = None)
#Salvo in un dataframe tutte le righe con gender pari a 1(uomo)
dfBalanceM = DFbalanced.loc[DFbalanced[64] == 1]
#Salvo in un dataframe tutte le righe con gender pari a -1(donna)
dfBalanceF = DFbalanced.loc[DFbalanced[64] == -1]
#Droppo le righe in eccesso del dataframe femminile (rispetto al dataframe maschile)
dfBalanceF = dfBalanceF.iloc[0:84434]
#Unisco i due dataframe aventi lo stesso numero di elementi
DFbalanced = pd.concat([dfBalanceM,dfBalanceF], axis = 0)
#Creo il csv corrispondente
DFbalanced.to_csv("DatasetCelebA/Dataset4c4sBalanced.csv", header = False, index = False)
#Funzione per suddividere il dataset bilanciato in train e test set
def ExecOnBalanced():
#Leggo dataset bilanciato
dataframe = pd.read_csv("DatasetCelebA/dataset4c4sBalanced.csv", header=None)
#Rinomino la colonna 64 in Gender.
dataframe = dataframe.rename(columns={dataframe.columns[64]: "Gender"}) # -1 donna e 1 maschio
#Ottengo feature variables
feature_cols = list(dataframe.columns.values)
X = feature_cols[1:len(feature_cols) - 1]
X = dataframe[X]
#Ottengo target variables
y = dataframe.Gender
#Divido il dataframe in train e test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=1) # 70% training and 30% test
return X_train,X_test,y_train,y_test
#Funzione lettura CSV su cui eseguire i test
def ReadCSV():
# Caricamento dei due dataset
dataframe = pd.read_csv("DatasetCelebA/dataset4c4s.csv", header=None)
feature = pd.read_csv("DatasetCelebA/list_attr_celeba.csv")
# Prendo la colonna delle features riguardante il sesso.
feat = feature.iloc[0:202599, 21]
df_X = pd.DataFrame(feat)
# Rinonimo la colonna da Male a Gender.
rename = df_X.rename(columns={"Male": "Gender"}) # -1 donna e 1 maschio
# Concateno i due dataframe per crearne uno. Non necessario per il dataset bilanciato
dfconc = pd.concat([dataframe, rename], axis=1, sort=False)
return dfconc
# +
#Ottengo l'orario corrente per andare a calcolare il tempo di esecuzione totale di ciascun classificatore
start_time = time.time()
choice = input("Digita 1 se vuoi eseguire i test sul dataset bilanciato, 0 altrimenti.\n")
if choice == "1":
#Eseguo la funzione per leggere il dataset di interesse per i test. In questo caso serve a creare il dataset bilanciato
dfconc = ReadCSV()
#Eseguo la funziona per creare il dataset bilanciato 4c4s
CreateBalanced4c4s(dfconc)
#Salvo nelle variabili il dataframe diviso in train e test set. In modo da potere poi passare ai classificatori
X_train, X_test, y_train, y_test = ExecOnBalanced()
else:
#Eseguo la funzione per leggere il dataset di interesse per i test
dfconc = ReadCSV()
#Ottengo feature variables
feature_cols = list(dfconc.columns.values)
X = feature_cols[1:len(feature_cols)-1]
X = dfconc[X]
#Ottengo target variables
y = dfconc.Gender
#Divido il dataframe in train e test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) # 70% training and 30% test
# -
#Esecuzione classificatori
MyDecisionTree()
MyNearestNeighbors()
MySupportVectorMachine()
| MainJupyter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/gist/parulnith/7f8c174e6ac099e86f0495d3d9a4c01e/untitled9.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="cNnM2w-HCeb1"
# # Music genre classification notebook
# + [markdown] colab_type="text" id="2l3sppZMCydR"
# ## Importing Libraries
# + colab={} colab_type="code" id="Gt3fyg6dCNvX"
# feature extractoring and preprocessing data
import librosa
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import os
from PIL import Image
import pathlib
import csv
# Preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
#Keras
import keras
import warnings
warnings.filterwarnings('ignore')
# + [markdown] colab_type="text" id="DPe_ebYuDqr5"
# ## Extracting music and features
#
# ### Dataset
#
# We use [GTZAN genre collection](http://marsyasweb.appspot.com/download/data_sets/) dataset for classification.
# <br>
# <br>
# The dataset consists of 10 genres i.e
# * Blues
# * Classical
# * Country
# * Disco
# * Hiphop
# * Jazz
# * Metal
# * Pop
# * Reggae
# * Rock
#
# Each genre contains 100 songs. Total dataset: 1000 songs
# + [markdown] colab_type="text" id="neqMS0VoDpN5"
#
# + [markdown] colab_type="text" id="AfBSVfRCD3PE"
# ## Extracting the Spectrogram for every Audio
# + colab={} colab_type="code" id="BHh3pTEVDdrT"
cmap = plt.get_cmap('inferno')
plt.figure(figsize=(10,10))
genres = 'blues classical country disco hiphop jazz metal pop reggae rock'.split()
for g in genres:
pathlib.Path(f'img_data/{g}').mkdir(parents=True, exist_ok=True)
for filename in os.listdir(f'./MIR/genres/{g}'):
songname = f'./MIR/genres/{g}/{filename}'
y, sr = librosa.load(songname, mono=True, duration=5)
plt.specgram(y, NFFT=2048, Fs=2, Fc=0, noverlap=128, cmap=cmap, sides='default', mode='default', scale='dB');
plt.axis('off');
plt.savefig(f'img_data/{g}/{filename[:-3].replace(".", "")}.png')
plt.clf()
# + [markdown] colab_type="text" id="SszVgjYnFNX9"
# All the audio files get converted into their respective spectrograms .WE can noe easily extract features from them.
# + [markdown] colab_type="text" id="3Nw9HpSdFRsW"
#
# + [markdown] colab_type="text" id="piwUwgP5Eef9"
# ## Extracting features from Spectrogram
#
#
# We will extract
#
# * Mel-frequency cepstral coefficients (MFCC)(20 in number)
# * Spectral Centroid,
# * Zero Crossing Rate
# * Chroma Frequencies
# * Spectral Roll-off.
# + colab={} colab_type="code" id="__g8tX8pDeIL"
header = 'filename chroma_stft rmse spectral_centroid spectral_bandwidth rolloff zero_crossing_rate'
for i in range(1, 21):
header += f' mfcc{i}'
header += ' label'
header = header.split()
# + [markdown] colab_type="text" id="TBlT448pEqR9"
# ## Writing data to csv file
#
# We write the data to a csv file
# + colab={} colab_type="code" id="ZsSQmB0PE3Iu"
file = open('data.csv', 'w', newline='')
with file:
writer = csv.writer(file)
writer.writerow(header)
genres = 'blues classical country disco hiphop jazz metal pop reggae rock'.split()
for g in genres:
for filename in os.listdir(f'./MIR/genres/{g}'):
songname = f'./MIR/genres/{g}/{filename}'
y, sr = librosa.load(songname, mono=True, duration=30)
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)
spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
zcr = librosa.feature.zero_crossing_rate(y)
mfcc = librosa.feature.mfcc(y=y, sr=sr)
to_append = f'{filename} {np.mean(chroma_stft)} {np.mean(rmse)} {np.mean(spec_cent)} {np.mean(spec_bw)} {np.mean(rolloff)} {np.mean(zcr)}'
for e in mfcc:
to_append += f' {np.mean(e)}'
to_append += f' {g}'
file = open('data.csv', 'a', newline='')
with file:
writer = csv.writer(file)
writer.writerow(to_append.split())
# + [markdown] colab_type="text" id="0yfdo1cj6V7d"
# The data has been extracted into a [data.csv](https://github.com/parulnith/Music-Genre-Classification-with-Python/blob/master/data.csv) file.
# + [markdown] colab_type="text" id="fgeCZSKQEp1A"
# # Analysing the Data in Pandas
# + colab={"base_uri": "https://localhost:8080/", "height": 253} colab_type="code" id="Kr5_EdpD9dyh" outputId="81fd4a29-93fa-44f8-bf90-2f99981f761a"
data = pd.read_csv('data.csv')
data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="iHrDHCaR9gKR" outputId="7d32943a-1ad5-4a59-c13a-beebeb36e4c2"
data.shape
# + colab={} colab_type="code" id="veD5BgX49hZa"
# Dropping unneccesary columns
data = data.drop(['filename'],axis=1)
# + [markdown] colab_type="text" id="Nyr0aAAsGXjZ"
# ## Encoding the Labels
# + colab={} colab_type="code" id="frI5HH4q-1HS"
genre_list = data.iloc[:, -1]
encoder = LabelEncoder()
y = encoder.fit_transform(genre_list)
# + [markdown] colab_type="text" id="Slm8W0-iGVhI"
#
# + [markdown] colab_type="text" id="_2n8a02zGfvP"
# ## Scaling the Feature columns
# + colab={} colab_type="code" id="uqcqn-nyAofk"
scaler = StandardScaler()
X = scaler.fit_transform(np.array(data.iloc[:, :-1], dtype = float))
# + [markdown] colab_type="text" id="e3VZvbwpGo9R"
# ## Dividing data into training and Testing set
# + colab={} colab_type="code" id="F1GW3VvQA7Rj"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="upuczQ-KBHJ5" outputId="1431a28b-e8b6-4db2-e505-7e149e37c0d7"
len(y_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="LtoE_FqqBzM8" outputId="76555a2b-2030-48e1-b52d-d71b4ebae38e"
len(y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="ir9XaWgQB0lq" outputId="2ec90814-19d8-4f27-934a-1ce54406d4ea"
X_train[10]
# + [markdown] colab_type="text" id="Vp2yc5FWG04e"
# # Classification with Keras
#
# ## Building our Network
# + colab={} colab_type="code" id="Qj3sc2uFEUMt"
from keras import models
from keras import layers
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_shape=(X_train.shape[1],)))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
# + colab={} colab_type="code" id="7yrsmpI6EjJ2"
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/", "height": 697} colab_type="code" id="bP0hVm4aElS7" outputId="aacf234d-d0a9-4de4-91be-5fd45a33b279"
history = model.fit(X_train,
y_train,
epochs=20,
batch_size=128)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="0m1J0_wUFK4C" outputId="ffd3bf36-29ea-437a-987c-9aa600b9dae6"
test_loss, test_acc = model.evaluate(X_test,y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="f6HrjXeUF0Ko" outputId="ea282dbd-6f9e-48c7-de2d-dc9afde8949e"
print('test_acc: ',test_acc)
# + [markdown] colab_type="text" id="3yQmP_f5Kq0w"
# Tes accuracy is less than training dataa accuracy. This hints at Overfitting
# + [markdown] colab_type="text" id="-U2qzRJoHV9O"
# ## Validating our approach
# Let's set apart 200 samples in our training data to use as a validation set:
# + colab={} colab_type="code" id="xJNbvYZoF7ZT"
x_val = X_train[:200]
partial_x_train = X_train[200:]
y_val = y_train[:200]
partial_y_train = y_train[200:]
# + [markdown] colab_type="text" id="L1EkG59EHeEV"
# Now let's train our network for 20 epochs:
# + colab={"base_uri": "https://localhost:8080/", "height": 1071} colab_type="code" id="Dp3G4P3aP4k2" outputId="25e1a389-1ac2-425b-bd5f-05736b6e9b96"
model = models.Sequential()
model.add(layers.Dense(512, activation='relu', input_shape=(X_train.shape[1],)))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(partial_x_train,
partial_y_train,
epochs=30,
batch_size=512,
validation_data=(x_val, y_val))
results = model.evaluate(X_test, y_test)
# + [markdown] colab_type="text" id="dljqHfDPI6lH"
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Mvi9it1SI4aR" outputId="98b01ef2-3935-442b-82d6-45f56e036d39"
results
# + [markdown] colab_type="text" id="r3hb8s1l4rBA"
# ## Predictions on Test Data
# + colab={} colab_type="code" id="gudBAhIXJIi2"
predictions = model.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Xb7bVPSwJQF0" outputId="aca09c75-1d21-4847-bdd9-a0521dc8d948"
predictions[0].shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="llusRQV0JRy9" outputId="a856289d-883a-47cb-c0fb-ec148330a60a"
np.sum(predictions[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="0eoEuSZqJTdU" outputId="94c17d00-dd7f-40a1-84d2-78d1ebde6103"
np.argmax(predictions[0])
# + colab={} colab_type="code" id="Utgt1bXfJVRN"
| .ipynb_checkpoints/untitled9-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 文件读写与 os 库
#
# 本章介绍 Python 中基本的文件读写功能,以及 `os` 标准库。由于涉及到文件操作会使用到文件路径,一般我们在文件读写中都需要加载该库。
import os
# ## 读取文件
#
# 使用 `open(file, mode='r', encoding=None, ...)` 命令来读取文件,其中:
#
# - `file` 是文件的(相对或绝对)路径
# - `mode` 是读写模式,默认是读(`'r'`),也等同于 `'rt'`。
#
# | 参数 | 解释 | 参数 | 解释 |
# | --- | --- | --- | --- |
# | `'r'` | 读取 | `'w'` | 写入(覆盖原内容)|
# | `'a'` | 追加 | `'x'` | 创建(已存在或失败) |
# | `'b'` | 二进制 | `'t'` | 文本文件 |
#
# - 注意最后一行的 `b/+` 与之前的 `r/w/a/x` 可以结合使用,比如 `rb` 是以二进制方式读取文件。
# - 关于 `+` 参数, `w+` 与 `w+b` 将打开文件并清空内容。 模式 `r+` 与 `r+b` 将打开文件而不清空内容。
# - `encoding` 是文件读写时的编码,通常使用的是 `'utf-8'`。其默认值取决于所在的操作系统。
# ### 普通读取:read() 与 readlines()
#
# 文件读取时,需要以 `with open(...) as ...` 的形式开启一个代码块。
#
# 下例给出了读取工作目录下 `conf.py` 文件的例子,使用了 `read()` 函数来读入整个文件。
# +
# 以下路径等价于 os.path.join(os.getcwd(), "conf.py")
fpath = "conf.py"
with open(fpath, 'r', encoding='utf-8') as f:
fstr_read = f.read()
fstr_read[:50] # 显示前50个字符
# -
# 注意上列文件中保留了每一行末尾的换行符(`\n`)。
#
# 另一种方式是用 `readlines()` 将文件读成一个列表,每一行(不含换行符)作为列表的一项。但注意,你必须保证文件的大小不超过可用的内存大小。
# +
fpath = "conf.py"
with open(fpath, 'r', encoding='utf-8') as f:
fstr_readlines = f.readlines()
fstr_readlines[:3] # 显示前3行
# -
# 再提示一下读者,如果想处理成一个每行为一项的列表,并去掉每行的行末的换行符,可以对 `read()` 的结果进行 `splitlines` 操作:
fstr_read.splitlines()[:3]
# ### 逐行读取:readline()
#
# 对于比较大的文件,函数 `readline()` 允许我们借助 Python 的迭代器特性来控制内存占用。例如:
with open (fpath, 'r', encoding='utf-8') as f:
print(f.readline())
print(f.readline())
# 因此,我们可以用循环语句来逐行读取:
#
# ```python
# with open (fpath, 'r', encoding='utf-8') as f:
# while True:
# line = f.readline()
# ... # Do something
# if not line:
# break
# ```
#
# 或者一种等效的写法:
#
# ```python
# with open (fpath, 'r', encoding='utf-8') as f:
# for line in f:
# ... # Do something
# ```
# <div class="alert alert-info">
#
# 注意
#
# 文件流对象(例中的 `f`)拥有成员函数 `tell()` 与 `seek()`,可以移动迭代器”指针“的位置。由于我不推荐使用这种方法,这里就不作介绍了,有兴趣的同学可以自行查阅。
#
# </div>
# ## 写入文件
#
# 用 `w` 模式代替文件读取中的 `r` 模式、用 `write()` 命令代替 `read()`,即可实现写入:
# 向文件中写入全文字符串
with open(fpath, 'w', encoding='utf-8') as f:
f.write(fstr_read)
# 类似地,我们也可以使用 `writelines()` 命令:
# 向文件中写入按行(含行尾换行符)划分的字符串列表
with open(fpath, 'w', encoding='utf-8') as f:
f.writelines(fstr_readlines)
| docsrc/Python/FileIO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import sympy as sy
sy.init_printing(use_unicode=False)
x=sy.symbols('x')
type(x)
# +
x
# -
row,col=2,2
kwds=['x{}(0:{})'.format(i,col) for i in range(row) ]
symbols_row_col=np.array([sy.symbols(kwd) for kwd in kwds]).reshape(row,col)
symbols_row_col=sy.Matrix(symbols_row_col)
symbols_row_col
x,y,z=sy.symbols('x y z')
x,y,z#x,y,zを要素とするタプルです.
#連番記号を定義できるよ.
sy.symbols('a:3')
sy.symbols('c2(3:6)')
sy.symbols(':z')
sy.symbols('x(b:d)')
sy.symbols('(x:y)(0:2)')
#数式表現
x=sy.symbols('x')
1/sy.sqrt(x)-sy.sqrt(1/x)
x=sy.symbols('x',positive=True)
1/sy.sqrt(x)-sy.sqrt(1/x)
myinteger=sy.Integer(5)
myinteger
myinteger-4
x=sy.Rational('1/3')
y=sy.Rational('1/2')
x+y
x**y
x=sy.symbols('x')
f=sy.Lambda(x,2*x)
f(4)
x,y=sy.symbols('x,y')
g=sy.Lambda((x,y),x**2+y**2)
g
#evaluate!
g(3,4)
# +
#define equation
# -
x=sy.symbols('x')
eq1=0.5*x+2*x**3-4/x
eq1
eq2=x**2+2*x*(x+1)+1
eq2
sy.simplify(eq2)
eq2.simplify()
eq3=x**2+2*x+1
eq3
eq3.factor()
x,y,z=sy.symbols('x y z')
eq4=((1+2*x+y+3*z)**2).expand()
eq4
eq4.collect(x)
eq4.collect([x,y])
eq4.collect([y,x])
# # 部分分数分解
x,y=sy.symbols('x y')
eq=x*y/((x+1)*(y+1))
eq
#部分分数分解
eq.apart(x)
eq.apart(y)
eq.apart(x).together(x)
eq.apart(x).simplify()
# # 代入
#
x,y,z=sy.symbols('x y z')
(x+x*y).subs(x,y)
(sy.sin(x)).subs(sy.sin,sy.cos)
#辞書型にまとめて指定することができる.
sy.log(x+y).subs({x:sy.sqrt(y),y:z,sy.log:sy.exp})
x,y,z=sy.symbols('x y z')
eq=x+y+z
eq.subs({x:0.1,y:0.3,z:0.5})
eq.subs({y:x,z:x})
x=sy.symbols('x')
eq=1+x+x**2
[eq.subs(x,xx).evalf() for xx in range(5)]
import numpy as np
x=sy.symbols('x')
eq=1+x+x**2
eqf=sy.lambdify(x,eq,'numpy')
v=np.arange(5)
eqf(v)
# # 微分
#
f=sy.Function('f')(x)
f
sy.diff(f,x)
f.diff(x,2)
g=sy.Function('g')(x,y)
g.diff(x)
g.diff(x,y)
g.diff(x,2,y,3)
eq=sy.cos(x**2)+sy.exp(x)+x**3
eq
eq.diff(x)
sigmoid=1/(1+sy.exp(-x))
sigmoid
sigmoid.diff(x)
# # solve equation
#
import sympy as sy
x=sy.symbols('x')
sy.solve(x**2-1)
a,b=sy.symbols('a b')
sy.solve(a*x+b,x)
ans=sy.solve(x**5-x-1,x)
ans
type(ans[0])
[sy.N(i) for i in ans]
# # 連立方程式
#
x,y=sy.symbols('x y')
eq1=x+y-7
eq2=-3*x-y+5
ans=sy.solve([eq1,eq2],[x,y])
ans
ans[x],ans[y]#辞書型でリターンするので参照が左のようにできる.
# # matrix
sy.Matrix([1,5])
sy.Matrix([[1,3]])
sy.Matrix([[1,3],[5,7]])
x,y=sy.symbols('x y')
M=sy.Matrix([[1,x],[y,2]])
M
N=M.subs({x:3})# deep copy maybe...
M,N
A=sy.Matrix([[1,3],[5,7]])
v=sy.Matrix(sy.symbols('x y'))
v
a=A*v
a
B=sy.Matrix([sy.symbols('a b'),sy.symbols('c d')])
B
w=sy.Matrix(sy.symbols('x y'))
b=B*w
b
b.T#transpose
C=B.inv()
C
# # draw fraction graph
def convert_input(string):
expr=sy.sympify(string)
#find the symbol that expr uses with sym, = expr.free_symbols
sym, = expr.free_symbols
x=sy.Symbol(sym.name, real=True)
expr=expr.subs(sym,x)
frac=sy.cancel(sy.together(expr))
return frac, x
frac, x = convert_input('1/(x+1) + 1/(x-1) + x+1')
frac
den=sy.denom(frac)
poles=sy.solve(den,x)
poles
domain=sy.Interval(-sy.oo,sy.oo)-sy.FiniteSet.fromiter(poles)
domain
deriv=sy.cancel(sy.diff(frac,x))
deriv
extrema=sy.solve(deriv,x)
extrema
extrema_values=[frac.subs(x,x0) for x0 in extrema]
extrema_values
# # calc asymptotes
m=sy.limit(frac/x,x,sy.oo)
p=sy.limit(frac-m*x,x,sy.oo)
m,p
def find_asymptotes(expr,x):
"""
return m,p such that y=x*m+p is an asymptote to the curve y=expr.
If there is no asymptote, return None
"""
m=sy.limit(frac/x,x,sy.oo)
try:
if not m.is_bounded:
return None
else:
p=sy.limit(frac-m*x,x,sy.oo)
return m,p
except:
pass
[find_asymptotes(e,x) for e in (frac, x**2,x**2/(x**2+1))]
# +
import numpy as np
from matplotlib import pyplot as plt
# %matplotlib inline
def plot_curve(expr,x,xmin,xmax,ymin,ymax):
func=sy.lambdify([x],expr)
xs=np.linspace(xmin,xmax,200)
plt.plot(xs,func(xs))
plt.ylim(ymin,ymax)
plt.xlim(xmin,xmax)
plot_curve(frac,x,-5,5,-10,10)
# -
| sympyExercise/sympy-exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import cv2
import os
import pandas as pd
from random import shuffle
from tqdm import tqdm
import time
import pickle
# TF learn imports
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.normalization import batch_normalization
from tflearn.layers.estimator import regression
# Define some constants
TRAIN_DIR = "./data/train"
TEST_DIR = "./data//test"
IMG_WIDTH = 50 # all images will be made square. Downsampled from 96
LR = 1e-3 #learning rate
ORIGINAL_SIZE = 96
CROP_SIZE = 48 # final size after cropping
dm = pd.read_csv("./data/train_labels.csv")
# ========= Some helper functions =========
def normalize_img(img):
"""Normalizes an image to all values between 0 and 1
PARAMS
------
img: array of shape: (width, height, 3)
RETURNS
-------
img_n: array of shape: (width, height, 3), normalized between 0 and 1
"""
img_n = np.zeros([CROP_SIZE, CROP_SIZE, 3])
start_crop = (ORIGINAL_SIZE - CROP_SIZE) // 2
end_crop = start_crop + CROP_SIZE
for c_channel in range(3):
channel_array = img[start_crop:end_crop, start_crop:end_crop, c_channel]/255
img_n[:, :, c_channel] = channel_array
return img_n
def create_train_data(d_meta, class_size=25000, save_fn=""):
"""
Extracts sample_size data points from condition0 and condition1 *each*
s.t. total dataset size = 2*sample_size
Adds k-fold indices (k=5). 5-folds is hardcoded.
"""
# Randomly select names from the metadata set
d0 = d_meta.loc[d_meta["label"]==0].sample(n=class_size)
d1 = d_meta.loc[d_meta["label"]==1].sample(n=class_size)
selected_names_ls = [x+".tif" for x in list(d0["id"])+list(d1["id"])]
# Grab all names
fn_ls = os.listdir(TRAIN_DIR)
training_data = []
for fn in tqdm(os.listdir(TRAIN_DIR)):
if fn in selected_names_ls:
full_fn = TRAIN_DIR+"/"+fn
img = normalize_img(cv2.imread(full_fn))
label = dm.loc[dm["id"]==fn.split(".")[0]].label.values[0]
training_data.append([fn, np.array(img), label])
n_sets = int((class_size*2)/5)
k_indices_ls = [0, 1, 2, 3, 4] * n_sets
df = pd.DataFrame(data=training_data, columns=["filename", "data", "label"])
df = df.sort_values(by="label")
df["k-index"] = k_indices_ls
return df
# -
# ## Create and Save Dataset
#
# Randomly selects a subset of the original ~220k images, and preps them (normalization, cropping, splitting into folds) for input into the CNN. Saves the result, so this need only be run once.
#
# * For n=50k, the resulting df will need to be saved into multiple `pickle` files, because a single `pickle` file would be too big.
# * n=50k is actually overkill, because performance is only a little lower using n=20k. Nevertheless, let's pretend that we need n=50k, just so that this becomes an interesting engineering problem of dealing with data batching.
# +
# Create dataset
# Random sample of n condition0 + n condition1
df = create_train_data(dm, class_size=25000)
# check
df.groupby(["label", "k-index"]).size()
# +
t0 = time.time()
# save each k-index separately
# Because the whole file is too large to save as a single file
for k in range(5):
d_t = df.loc[df["k-index"]==k]
save_fn = "data-50k-48px-fold"+str(k)+".pkl"
d_t.to_pickle(save_fn)
print("Done in %.2fs" % (time.time() - t0))
# -
# ## Load dataset
#
# Load the dataset created previously. If saved as multiple `.pkl` files, merge them into 1 dataframe.
# +
# load and merge the 5 separate data sets
df_ls = []
for k in range(5):
fn = "data-50k-48px-fold"+str(k)+".pkl"
#df_dict[k] = pd.read_pickle(fn)
df_ls.append(pd.read_pickle(fn))
df = pd.concat(df_ls, axis=0)
# Insert sanity check printout here...
# +
# prep data for input into CNN
d_train = df.loc[df["k-index"].isin([1, 2, 3, 4])]
d_test = df.loc[df["k-index"]==0]
X = list(d_train["data"])
X = np.array(X)
Y = []
for lab in list(d_train['label']):
if lab == 0:
Y.append([1, 0])
elif lab == 1:
Y.append([0, 1])
x_test = list(d_test["data"])
x_test = np.array(x_test)
y_test = []
for lab in list(d_test['label']):
if lab == 0:
y_test.append([1, 0])
elif lab == 1:
y_test.append([0, 1])
# +
# Define model and fit
tf.reset_default_graph()
convnet = input_data(shape=[None, 48, 48, 3], name='input')
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = fully_connected(convnet, 512, activation='relu')
convnet = batch_normalization(convnet)
convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet,
optimizer='adam',
learning_rate=LR,
loss='categorical_crossentropy',
name='targets')
# the 2nd param logs tensorboard to /tmp
model = tflearn.DNN(convnet, tensorboard_dir="summaries")
t0 = time.time()
# Note: run_id is for tensorboard later
model.fit({'input': X}, {'targets': Y},
n_epoch=50,
validation_set=({'input': x_test}, {'targets': y_test}),
snapshot_step=500,
show_metric=True,
run_id="cnn-L12-run4")
print("Done in %.2fs" % (time.time() - t0))
# -
| workbook-custom-c12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Домашняя работа №6
# # Студент: <NAME>
import numpy as np
import matplotlib.pyplot as plt
# # Задание 1
# "Вычислите матрицу Грама для базиса $1, x, ..., x^{n-1}$ в пространстве $L_2[0; 1]$."
#
# $A_{ij} = <x^i; x^j> = \int_0^1{x^i\overline{x^j} dx} = \int_0^1{x^{i + j} dx} = \frac{1}{i + j + 1}x^{i + j + 1}|^1_0 = \frac{1}{i + j + 1}$
def get_A(N):
def element(i, j):
return 1 / (i + j + 1)
return np.fromfunction(element, (N, N))
# "С помощью метода прямых итераций найдите макс. собственное значение как функцию от n"
#
# Сравниваться будем со встроенной в numpy функцией.
# +
def get_max_eigenvalue_straight_iterations(A, k=100, u=None):
if u is None:
n = A.shape[0]
u = np.ones(n)
u_k = np.linalg.matrix_power(A, k) @ u
u_k1 = A @ u_k
return np.inner(u_k1, u_k) / np.inner(u_k, u_k)
def get_max_eigenvalue_straight_iterations_normalize(A, k=100, u=None):
if u is None:
n = A.shape[0]
u = np.ones(n)
for k in range(1, k + 1):
u = A @ u
u = u / np.linalg.norm(u)
u_k = u
u_k1 = A @ u_k
return np.inner(u_k1, u_k) / np.inner(u_k, u_k)
def get_max_eigenvalue_numpy(A):
return max(np.linalg.eig(A)[0])
A = get_A(3)
my_max_eig = get_max_eigenvalue_straight_iterations(A)
np_max_eig = get_max_eigenvalue_numpy(A)
print("My implementation: " + str(my_max_eig))
print("Numpy: " + str(np_max_eig))
# -
def draw_max_eig_from_n(max_eig_generator, N_min=1, N_max=500):
data_x = [N for N in range(N_min, N_max + 1)]
data_y = [N ** (max_eig_generator(get_A(N)) / 3) for N in range(N_min, N_max + 1)]
plt.subplot(211)
plt.plot(data_x, data_y)
plt.ylabel("N ^ (max eigenvalue / 3)")
plt.xlabel("N")
plt.figure(figsize=(10, 10), dpi=180)
draw_max_eig_from_n(get_max_eigenvalue_straight_iterations)
draw_max_eig_from_n(get_max_eigenvalue_numpy)
plt.title("Max eigenvalue as function from N")
plt.legend(("Straight iterations method", "numpy"))
plt.show()
# Как видим графики совпадают, значит метод дает примерно такие же результаты, как и реализация numpy.
#
# Также масштаб $N^{y(N) / 3}$. Получившиеся прямая проходит через: $(500; 140); (1; 1)$. Поэтому ее уравнение $y = \frac{139}{499} x + \frac{360}{499}$
#
# $N^{\frac{y(N)}{3}} = k * N + b \Leftrightarrow \frac{y(N)}{3} log(N) = log(k * N + b) \Leftrightarrow y(N) = \frac{3log(k * N + b)}{log(N)} = \frac{3log(\frac{139}{499} N + \frac{360}{499})}{log(N)}$
#
# Таким образом максимальное собственное число зависит от N как отношение логарифмов.
# "С помощью метода итераций со сдвигом постройте минимальное собственное значение как функцию от N"
#
# Если нас просят минимальное, то надо взять $\alpha > \lambda_1$, так как мы уже умеем считать макс. собственное число, то можем взять $\alpha=\lambda + 0.00001$. Как показали мои эксперименты, лучше брать $\alpha$ как можно ближе к макс. собственному числу, так не возникает NaN и значения получаются более точными.
# +
def get_min_eigenvalue_shift_normalized(A, alpha=None, k_straight=10000):
N = A.shape[0]
if alpha is None:
alpha = get_max_eigenvalue_straight_iterations(A) + 0.00001
A_shift = A - alpha * np.identity(N)
return get_max_eigenvalue_straight_iterations_normalize(A_shift, k=k_straight) + alpha
def get_min_eigenvalue_shift(A, alpha=None, k_straight=500):
N = A.shape[0]
if alpha is None:
alpha = get_max_eigenvalue_straight_iterations(A) + 0.00001
A_shift = A - alpha * np.identity(N)
return get_max_eigenvalue_straight_iterations(A_shift, k=k_straight) + alpha
def get_min_eigenvalue_numpy(A):
return min(np.linalg.eig(A)[0])
# -
A = get_A(3)
my_min_eig_shift = get_min_eigenvalue_shift(A)
np_min_eig = get_min_eigenvalue_numpy(A)
print("My implementation with shift: " + str(my_min_eig_shift))
print("Numpy: " + str(np_min_eig))
def draw_min_eig_from_n(min_eig_generator, N_min=1, N_max=500):
data_x = [N for N in range(N_min, N_max + 1)]
data_y = [np.log10(min_eig_generator(get_A(N))) for N in range(N_min, N_max + 1)]
plt.subplot(211)
plt.plot(data_x, data_y)
plt.ylabel("log(min eigenvalue)")
plt.xlabel("N")
plt.figure(figsize=(10, 10), dpi=180)
draw_min_eig_from_n(get_min_eigenvalue_shift, 1, 10)
draw_min_eig_from_n(get_min_eigenvalue_shift_normalized, 1, 10)
draw_min_eig_from_n(get_min_eigenvalue_numpy, 1, 10)
plt.title("Min eigenvalue as function from N")
plt.legend(("shift method", "shift normalized", "numpy"))
plt.show()
# Как видим значения после N=4 начинают сильно отличаться от numpy. С нормализацией все же лучше, чем без нее, так как можно поставить большее количество итераций. Хотя я применяю нормализацию, но и она не помогает при больших N. Связано это с плохой сходимостью метода сдвига (как и обсуждалось на лекции, что при поиске минимального собственного числа сходимость может быть крайне низкой, даже 10 тысяч итераций не помогает).
#
# При этом если не нормировать и взять количество итераций больше 500, то уже не хватает точности и мы получаем NaN.
# "Постойте график спектрального числа обусловленности $\kappa(A) = |\frac{\lambda_{max}}{\lambda_{min}}|$ как функцию от n. Найдите численно примерную зависимость."
def get_kappa(A):
return np.abs(get_max_eigenvalue_straight_iterations(A) / get_min_eigenvalue_shift_normalized(A))
A = get_A(3)
my_kappa = get_kappa(A)
np_kappa = np.linalg.cond(A)
print("My implementation: " + str(my_kappa))
print("Numpy: " + str(np_kappa))
def draw_kappa_from_n(kappa_generator, N_min=1, N_max=500):
data_x = [N for N in range(N_min, N_max + 1)]
data_y = [np.log10(kappa_generator(get_A(N))) for N in range(N_min, N_max + 1)]
plt.subplot(211)
plt.plot(data_x, data_y)
plt.ylabel("log (kappa value)")
plt.xlabel("N")
plt.figure(figsize=(10, 10), dpi=180)
draw_kappa_from_n(get_kappa, 1, 10)
draw_kappa_from_n(np.linalg.cond, 1, 10)
plt.title("Spectral condition number as function from N")
plt.legend(("shift + straight method", "numpy"))
plt.show()
# Как видим спектральное число обусловленности матрицы тоже начинает сильно отличаться от numpy после четырех, что неудивительно раз у нас сильно отличались минимальные значения собственных чисел.
#
# Согласно numpy мы получаем прямую, значит зависимость $\kappa(A)$ растет, как степенная функция, примерно как $10^N$. Это согласуется с данными википедии, там зависимость $\kappa(A) = \frac{(1 + \sqrt{2})^{4N}}{\sqrt{N}}$
#
# По нашему методу зависимость трудно отследить, но если рассматривать только N до 4 включительно, то тоже можно сделать вывод о степенной зависимости. Но на мой взгляд 4 слишком мало, 10 хотя бы надо. Если брать сильно больше 10, то там и numpy начнет ошибаться.
# "Применить метод Эйткена ускорения сходимости при вычислениях в 2)"
def eitken(ys):
s1, s2, s3 = ys
return [ys[1], ys[2], s3 - (s3 - s2) ** 2 / (s3 - s2 - s2 + s1)]
# +
def get_max_eigenvalue_straight_iterations_eitken(A, alpha, k=100, u=None):
if u is None:
n = A.shape[0]
u = np.ones(n)
elem = u
s1 = np.inner(A @ elem, elem) / np.inner(elem, elem)
elem = A @ elem
s2 = np.inner(A @ elem, elem) / np.inner(elem, elem)
elem = A @ elem
s3 = np.inner(A @ elem, elem) / np.inner(elem, elem)
res = s3 - (s3 - s2) ** 2 / (s3 - s2 - s2 + s1)
for k in range(4, k + 1):
s1 = s2
s2 = s3
elem = A @ elem
s3 = np.inner(A @ elem, elem) / np.inner(elem, elem)
if s3 - s2 - s2 + s1 != 0:
res = s3 - (s3 - s2) ** 2 / (s3 - s2 - s2 + s1)
return res
def get_min_eigenvalue_shift_eitken(A, alpha=None, k_straight=799):
N = A.shape[0]
if alpha is None:
alpha = get_max_eigenvalue_straight_iterations(A) + 0.00001
A_shift = A - alpha * np.identity(N)
return get_max_eigenvalue_straight_iterations_eitken(A_shift, alpha, k=k_straight) + alpha
# -
plt.figure(figsize=(10, 10), dpi=180)
draw_min_eig_from_n(get_min_eigenvalue_shift, 1, 10)
draw_min_eig_from_n(get_min_eigenvalue_shift_normalized, 1, 10)
draw_min_eig_from_n(get_min_eigenvalue_shift_eitken, 1, 10)
draw_min_eig_from_n(get_min_eigenvalue_numpy, 1, 10)
plt.title("Min eigenvalue as function from N")
plt.legend(("shift method", "shift normalized", "eitken", "numpy"))
plt.show()
# Как видим Эйткен в сравнении со сдвигом без нормализации работает лучше, но в целом с нормализацией лучше, так как без нормализации Эйткен достаточно быстро получает NaN, но при этом дорабатывает нормально до момента, когда точность идет на спад.
| hw06/hw06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import Sastrawi as sw
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
import pandas as pd
factory = StemmerFactory()
stemmer = factory.create_stemmer()
# -
data = pd.read_csv("data/id_kompetensi_flag_new.csv", sep=";")
data['KOMPETENSI'] = data.loc[:,'KOMPETENSI'].str.lower()
data['KOMPETENSI'] = data.loc[:,'KOMPETENSI'].replace()
data = data.replace(regex={'KOMPETENSI': r'[^0-9a-zA-Z]+'}, value=' ')
new_data = data.copy()
new_data
list_data = list(new_data.loc[:, 'KOMPETENSI'])
list_data
import urllib.parse
import requests
import http.client
import json
kalimat = urllib.parse.quote(list_data[1])
url = "https://translate.googleapis.com/translate_a/single?client=gtx&sl=id&tl=en&dt=t&q="+kalimat
result = requests.get(url)
data = json.loads(result.content)
data[0][0][0]
temp_list = list_data[0:1001]
result_finish = []
for i in range(0, 1001):
url = "https://translate.googleapis.com/translate_a/single?client=gtx&sl=id&tl=en&dt=t&q="+list_data[i]
result = requests.get(url)
print(result.content)
data = json.loads(result.content)
result_finish.append(data[0][0][0])
print (data)
print(i)
| Processing[English].ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/GitMarco27/TMML/blob/main/Notebooks/007_Airfoil_Self_Noise.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="cG_fLgb6C1tf"
# # 3 Minutes Machine Learning
# ## Episode 7: Airfoil Self Noise
#
# #### <NAME>, 2021
# ---
# Welcome to 3 minutes Machine Learning!
#
# Reference: https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise
# + id="mSoTlTKwZwnL"
import tensorflow as tf
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
# + id="UDz7BaSUakRm" colab={"base_uri": "https://localhost:8080/"} outputId="2449ee6f-4f0d-4fbf-9778-d8a7f7c75f94"
print(tf.__version__)
# + id="VksvMLmpC7n9" colab={"base_uri": "https://localhost:8080/", "height": 502} outputId="96431252-a4f3-429a-a886-41c999cb0d9f"
def loadThumb(path):
# Let's import this video thumbnail!
myThumb = plt.imread(path)
fig, ax = plt.subplots(figsize=(15, 10))
plt.axis('off')
ax.imshow(myThumb)
plt.show()
# loadThumb('/tmp/yt_thumb_007.png')
# + [markdown] id="Y5HhwZm0DETT"
# #### Video Topics
# > 1. Load the dataset from UCI.edu
# > 2. Create a model with the keras API
# > 3. Train the model and check the results
# > 4. See you on next video!
# + [markdown] id="p1XK0dMeD_RI"
# # Load the dataset
# ___
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="Tuwrg8aoauey" outputId="5c3b8aad-ec6a-4840-a4a4-e949c2ea3a96"
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat"
cols = ['Frequency',
'Angle of Attack',
'Chord length',
'Free-stream velocity',
'Suction side displacement thickness',
'Sound Pressure']
dataset = pd.read_table(URL, names=cols, dtype='float32')
dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 235} id="kY4YPvfOa4sW" outputId="24bd1622-a31b-46ad-be5c-75ef90f756f5"
dataset.describe().T
# + id="fUbcmKCNktXc"
# sns.pairplot(dataset)
# plt.show()
# + [markdown] id="5ik1Qq0WED2r"
# # Create the model
# ___
# + id="qrDJbNoZltFg"
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
# + colab={"base_uri": "https://localhost:8080/"} id="pvMYxPnpmUik" outputId="05fd7d2c-209c-4a04-cb1a-4a3075d7e90e"
input_data = Input(shape=(5), name='Input')
dense1 = Dense(256, activation=tf.nn.relu)(input_data)
drop1 = tf.keras.layers.Dropout(0)(dense1)
dense2 = Dense(256, activation=tf.nn.relu)(drop1)
drop2 = tf.keras.layers.Dropout(0)(dense2)
output = Dense(1, name='output')(drop2)
model = Model(input_data, output)
model.compile(optimizer=Adam(learning_rate=0.0001), loss='mse', metrics=['mae', 'mse'])
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 644} id="MsP5J3gGEM9l" outputId="cf5f4b2a-f03d-4b3b-9a9a-b5f197457960"
tf.keras.utils.plot_model(
model, to_file='model.png', show_shapes=True, show_dtype=True,
show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96
)
# + id="dWqHF2kInsoL"
def separate(df):
return df[['Sound Pressure']].to_numpy(), df.drop(df[['Sound Pressure']], axis=1).to_numpy()
# + id="naB3M5dBngO6"
min_max_scaler = preprocessing.MinMaxScaler()
df_normed = pd.DataFrame(min_max_scaler.fit_transform(dataset))
df_normed.columns = list(dataset.columns)
train_set, test_set = train_test_split(df_normed)
train_labels, train_features = separate(train_set)
test_labels, test_features = separate(test_set)
# + [markdown] id="wNItDf_LESUC"
# # Train and check the results
# ___
# + colab={"base_uri": "https://localhost:8080/"} id="0dTCj5tupTZ-" outputId="a08ef79c-99d6-4a8c-8b2b-a29ad21d5ae2"
history = model.fit(
train_features,
train_labels,
batch_size = 32,
epochs=1000,
validation_data=(test_features,
test_labels)
)
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="MBttLuuBsLWO" outputId="a590490a-aeae-4d46-e5ef-b3c1b7069271"
loss = history.history['loss']
val_loss = history.history['val_loss']
fig, ax = plt.subplots(figsize=(8, 6))
plt.plot(loss)
plt.plot(val_loss)
plt.grid('both')
plt.xlabel('x')
plt.ylabel('Loss Function')
plt.title('Loss Function trend')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 390} id="he73fIuPstkI" outputId="9a45d165-8618-4ff7-dbc6-186e9be5b00c"
fig, ax = plt.subplots(1, 2, figsize=(12, 6), sharey=True)
ax[0].axis('equal')
ax[0].scatter(train_labels[:, 0], model.predict(train_features)[:, 0], marker='^',
color='r', edgecolor='k')
ax[0].plot([0, 1], [0, 1], c='k')
ax[0].plot([0, 1], [0.2, 1.2],'--', c='orange')
ax[0].plot([0, 1], [-0.2, 0.8],'--', c='orange')
ax[0].plot([0, 1], [0.1, 1.1],'--', c='pink')
ax[0].plot([0, 1], [-0.1, 0.9],'--', c='pink')
ax[0].set_title('Training Set - Y1')
ax[0].set_ylim(0, 1)
ax[0].grid(which='both', alpha=0.8, c='white')
ax[0].set_facecolor('#eaeaf2')
ax[0].spines['bottom'].set_color('white')
ax[0].spines['top'].set_color('white')
ax[0].spines['right'].set_color('white')
ax[0].spines['left'].set_color('white')
ax[1].axis('equal')
ax[1].scatter(test_labels[:, 0], model.predict(test_features)[:, 0], marker='^',
color='g', edgecolor='k')
ax[1].plot([0, 1], [0, 1], c='k')
ax[1].plot([0, 1], [0.2, 1.2],'--', c='orange')
ax[1].plot([0, 1], [-0.2, 0.8],'--', c='orange')
ax[1].plot([0, 1], [0.1, 1.1],'--', c='pink')
ax[1].plot([0, 1], [-0.1, 0.9],'--', c='pink')
ax[1].set_title('Validation Set - Y1')
ax[1].set_ylim(0, 1)
ax[1].grid(which='both', alpha=0.8, c='white')
ax[1].set_facecolor('#eaeaf2')
ax[1].spines['bottom'].set_color('white')
ax[1].spines['top'].set_color('white')
ax[1].spines['right'].set_color('white')
ax[1].spines['left'].set_color('white')
# + colab={"base_uri": "https://localhost:8080/"} id="F-nYe68DtbJg" outputId="63428cfb-1e13-44d1-dea2-247cf5931600"
import numpy as np
from sklearn.metrics import r2_score
from scipy.stats import pearsonr
for i in range(np.shape(train_labels)[1]):
metrics= {
'mae-train': np.mean(np.abs(train_labels[:, i] - model.predict(train_features)[:, i])),
'mse-train': np.mean(np.square(train_labels[:, i] - model.predict(train_features)[:, i])),
'r2-train': r2_score(train_labels[:, i], model.predict(train_features)[:, i]),
'pearson-train': pearsonr(train_labels[:, i], model.predict(train_features)[:, i])[0],
'mae-test': np.mean(np.abs(test_labels[:, i] - model.predict(test_features)[:, i])),
'mse-test': np.mean(np.square(test_labels[:, i] - model.predict(test_features)[:, i])),
'r2-test': r2_score(test_labels[:, i] ,model.predict(test_features)[:, i]),
'pearson-test': pearsonr(test_labels[:, i], model.predict(test_features)[:, i])[0]
}
blue = lambda x: '\033[94m' + x + '\033[0m'
yellow = lambda x: '\033[93m' + x + '\033[0m'
for key in metrics:
if 'train' in key:
print(f'Y{i} - {blue(key)} - {str(metrics[key])[:7]}')
else:
print(f'Y{i} - {yellow(key)} - {str(metrics[key])[:7]}')
# + [markdown] id="x12mGu0LF-AH"
# # Greetings
# ---
# + id="xgWUckLSF7wj" colab={"base_uri": "https://localhost:8080/"} outputId="79b5931a-506a-4bd9-c63e-2c609f5a9111"
# !pip install art
from art import tprint, aprint
tprint('See you on next videos!')
def subscribe():
"""
Attractive subscription form
"""
aprint("giveme", number=5)
print(f'\n\tLike and subscribe to support this work!\n')
aprint("giveme", number=5)
subscribe()
| Notebooks/007_Airfoil_Self_Noise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ex10 - Texturas
#
# Nesta atividade, vocês vão exercitar alguns dos conceitos de textura. Para evitar erros de execução, utilize apenas uma célula de código para cada parte desta atividade.
# ### Parte 1 - Skybox
#
# Skybox é uma técnica de criação de background utilizada em jogos para simular o horizonte que rodeia o jogador. Resumidamente, esta técnica consiste em criar um cubo ao redor da cena criada. Nesse cubo, as faces internas são texturizadas com as imagens da paisagem que se deseja simular. Utilize uma das texturas presentes na pasta ./cg/images/textures/ para criar um skybox simples em torno de uma câmera posicionada no ponto (0,0,0). Além disso, utilize as teclas de seta para rotacionar o ponto de visão da câmera a fim de permitir visualizar o resultado da renderização do cubo. As imagens abaixo ilustram o efeito esperado ao rotacionar o ponto de visão da câmera. Os notebooks ([37_Renderizando_uma_textura_por_completo](37_Renderizando_uma_textura_por_completo.ipynb)) e ([38_Renderizando_parte_de_uma_textura](38_Renderizando_parte_de_uma_textura.ipynb)) exemplificam como utilizar texturas em renderizações.
# <table>
# <tr>
# <td> <img src='cg/images/ex10_image_1.png' style="width:400px"> </td>
# <td> <img src='cg/images/ex10_image_2.png' style="width:400px"></td>
# <td> <img src='cg/images/ex10_image_3.png' style="width:400px"></td>
# </tr>
# </table>
# ### Parte 2 - Objetos texturizados
#
# Adicione à renderização da Parte 1 pelo menos dois objetos texturizados ([41_Objetos_com_texturas_e_height_maps](41_Objetos_com_texturas_e_height_maps.ipynb) e [40_Objetos_com_texturas](40_Objetos_com_texturas.ipynb)) e uma fonte de luz direcional ([34_Terreno_com_esferas_iluminacao_fonte_de_luz_direcional](34_Terreno_com_esferas_iluminacao_fonte_de_luz_direcional.ipynb)) para simular o sol. A pasta ./cg/images/textures/ possui algumas texturas que podem ser utilizadas nos objetos.
# ### Parte 3 - Sprites animados
#
# Em alguns games 2D, a animação de sprites é utilizada no lugar de objetos em movimento compostos por malha de triângulos. O notebook [39_Renderizando_sprites_animados](39_Renderizando_sprites_animados.ipynb) exemplifica essa técnica. Utilize uma das texturas de sprites presentes na pasta ./cg/images/textures/ para montar pelo menos duas animações que são disparadas por eventos de teclado. Por exemplo, se a tecla de seta 'para direita' for pressionada, a animação do sprite andando para direita começa.
| 1S2020/EA979A_Ex10_Texturas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
from jumpscale.loader import j
j.core.dirs.BASEDIR
j.core.dirs.BINDIR
j.core.dirs.CFGDIR
j.core.dirs.CODEDIR
j.core.dirs.HOMEDIR
j.core.dirs.LOGDIR
j.core.dirs.TEMPLATEDIR
j.core.dirs.TMPDIR
| docs/notebooks/dirs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# **[MiCMOR](https://micmor.kit.edu) [SummerSchool "Environmental Data Science: From Data Exploration to Deep Learning"](https://micmor.kit.edu/sites/default/files/MICMoR%20Summer%20School%202019%20Flyer.pdf)**
# IMK-IFU KIT Campus Alpin, Sept. 4 - 13 2019, Garmisch-Partenkirchen, Germany.
#
# ---
# # Deep Learning on tabular/ structured data
#
# Based on: https://www.kaggle.com/abhikjha/fastai-pytorch-hooks-random-forest?source=post_page-----1748aa2b2f8c----------------------
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# +
import pandas as pd
import numpy as np
import fastai
from fastai import *
from fastai.vision import *
from fastai.tabular import *
from fastai.callbacks.hooks import *
from fastai.callbacks.tracker import EarlyStoppingCallback, SaveModelCallback
#from utils import *
import sys
from scipy.special import erfinv
import matplotlib.pyplot as plt
import torch
#from torch.utils.data import *
from torch.optim import *
import torch.utils.data as Data
from fastai.basics import *
from fastai.callbacks.hooks import *
from tqdm import tqdm_notebook as tqdm
np.random.seed(42)
# +
# helper functions
def to_gauss(x):
return np.sqrt(2)*erfinv(x)
def normalize(data, exclude=None):
# if not binary, normalize it
norm_cols = [n for n, c in data.drop(exclude, 1).items() if len(np.unique(c)) > 2]
n = data.shape[0]
for col in norm_cols:
sorted_idx = data[col].sort_values().index.tolist() # list of sorted index
uniform = np.linspace(start=-0.99, stop=0.99, num=n)
normal = to_gauss(uniform) # apply gauss to linspace
normalized_col = pd.Series(index=sorted_idx, data=normal) # sorted idx and normalized space
data[col] = normalized_col # column receives its corresponding rank
return data
# -
# add column names
wilderness = [f'Wilderness_Area{i}' for i in range(1,5)] # 4 one-hot-encoded wilderness types
soiltypes = [f'Soil_Type{i:02}' for i in range(1,41)] # 40 one-hot-encoded soil types
colnames = ['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology', 'Vertical_Distance_To_Hydrology',
'Horizontal_Distance_To_Roadways', 'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points'] + wilderness + soiltypes + ['Cover_Type']
# read and transform one-hot cols into dense form
df = pd.read_csv('../data/uci/covtype/covtype.data.gz', compression='gzip', names=colnames, header=None, sep=',')
df['Soil'] = df.filter(like='Soil_Type').idxmax(axis=1).astype('category').cat.codes
df['Wilderness'] = df.filter(like='Wilderness_Area').idxmax(axis=1).astype('category').cat.codes
df = df[df.columns.drop(list(df.filter(regex='Soil_|Wilderness_')))]
df.head()
df.info()
# +
procs=[FillMissing, Categorify]
cat_vars = ['Soil', 'Wilderness']
cont_vars = ['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology',
'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways',
'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm', 'Horizontal_Distance_To_Fire_Points']
dep_var = 'Cover_Type'
# -
df = normalize(df, exclude=cat_vars + [dep_var])
data = (TabularList.from_df(df, path='.', cat_names=cat_vars, cont_names=cont_vars, procs=procs)
.split_by_rand_pct(0.2, seed=42)
.label_from_df(cols=dep_var) #, label_cls=FloatList
.databunch(bs=1024))
data.show_batch()
learn = tabular_learner(data, layers=[1000,500], metrics=accuracy, model_dir=".",
ps=[0.1, 0.1], emb_drop=0.04, emb_szs={'Soil': (40+1)//2, 'Wilderness': (4+1)//2})
learn.lr_find()
learn.recorder.plot(suggestion=True)
lr = 1e-2
learn.fit_one_cycle(5, max_lr=lr, wd=0.2, pct_start=0.3)
learn.lr_find()
learn.recorder.plot(suggestion=True)
lr = 1e-4
learn.fit_one_cycle(5, lr, wd=0.2, pct_start=0.3)
learn.lr_find()
learn.recorder.plot(suggestion=True)
lr=1e-6
learn.fit_one_cycle(5, max_lr=lr, wd=0.2)
learn.save('1')
learn.recorder.plot_losses()
class SaveFeatures():
features=None
def __init__(self, m):
self.hook = m.register_forward_hook(self.hook_fn)
self.features = None
def hook_fn(self, module, input, output):
out = output.detach().cpu().numpy()
if isinstance(self.features, type(None)):
self.features = out
else:
self.features = np.row_stack((self.features, out))
def remove(self):
self.hook.remove()
learn.model
sf = SaveFeatures(learn.model.layers[4])
_ = learn.get_preds(data.train_ds)
label = [x for x in (list(data.train_ds.y.items))]
len(label)
df_new = pd.DataFrame({'label': label})
df_new.head()
df_new['emb'] = np.array(sf.features).tolist()
df_new.head()
d2 = pd.DataFrame(df_new.emb.values.tolist(), index = df_new.index).rename(columns = lambda x: f'emb{x+1}')
df_new_2 = df_new.join(d2)
df_new_2.shape
# ## Embeddings for Validation
sf = SaveFeatures(learn.model.layers[4])
_ = learn.get_preds(DatasetType.Valid)
label = [x for x in (list(data.valid_ds.y.items))]
df_new_valid = pd.DataFrame({'label': label})
df_new_valid['emb'] = np.array(sf.features).tolist()
d2 = pd.DataFrame(df_new_valid.emb.values.tolist(), index = df_new_valid.index).rename(columns = lambda x: f'emb{x+1}')
df_new_valid_2 = df_new_valid.join(d2)
df_new_valid_2.drop(['emb'], axis=1, inplace=True)
df_new_2.drop(['emb'], axis=1, inplace=True)
corr_matrix = df_new_2.corr()
corr_matrix["label"].sort_values(ascending = False)
# +
X = df_new_2
y = df_new_2.label.copy()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,random_state=42)
# -
X_train.shape, y_train.shape, X_test.shape, y_test.shape
# +
y_train = X_train.label
X_train = X_train.drop("label", axis =1)
y_test = X_test.label
X_test = X_test.drop("label", axis =1)
# -
X_train.shape, y_train.shape, X_test.shape, y_test.shape
# +
from sklearn.base import BaseEstimator, TransformerMixin
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attributes_names):
self.attributes_names = attributes_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attributes_names].values
# +
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
# numerical pipeline
num_pipeline = Pipeline([
('select_data', DataFrameSelector(X_train.columns)),
('Std_Scaler', StandardScaler())
])
X_train_transformed = num_pipeline.fit_transform(X_train)
X_test_transformed = num_pipeline.fit_transform(X_test)
# -
X_train_transformed.shape, X_test_transformed.shape
# +
from sklearn.ensemble import RandomForestClassifier
import time
start = time.time()
rf_clf = RandomForestClassifier(bootstrap=True,
criterion='gini', max_depth=15, max_features=0.5,
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=3,
min_samples_split=8, min_weight_fraction_leaf=0.0,
n_estimators=185, n_jobs=1, oob_score=False, random_state=42,
verbose=0, warm_start=False)
rf_clf.fit(X_train_transformed, y_train)
end = time.time()
print("run_time:", (end-start)/(60*60))
# -
a
# ## Prediction
# +
from sklearn.model_selection import cross_val_predict, cross_val_score
import time
start = time.time()
score_rf = cross_val_score(rf_clf, X_train_transformed, y_train, cv=5, scoring='neg_mean_squared_error', verbose=0)
print(score_rf.mean())
end = time.time()
print("run_time:", (end-start)/(60*60))
# -
y_pred_test_rf = rf_clf.predict(X_test_transformed)
from sklearn.metrics import mean_squared_error
mean_squared_error(y_test, y_pred_test_rf)
X = df_new_valid_2
y = df_new_valid_2.label.copy()
X_val = X.drop("label", axis =1)
y_val = y
# +
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
# numerical pipeline
num_pipeline = Pipeline([
('select_data', DataFrameSelector(X_val.columns)),
('Std_Scaler', StandardScaler())
])
X_val_transformed = num_pipeline.fit_transform(X_val)
# -
y_pred_test_rf_val = rf_clf.predict(X_val_transformed)
from sklearn.metrics import accuracy_score
accuracy_score(y_val, y_pred_test_rf_val)
| nbs/44_dl_tabular.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import cvxpy as cp
import matplotlib.pyplot as plt
# +
m = 10
M = m*np.eye(2)
C = 0
d = 0
R = np.zeros((2, 2))
V_init = np.matrix([[0], [0]])
N = 100
# +
# PATH gen
x = np.linspace(0, 1, N).reshape(-1, 1)
q = np.hstack((np.power(x, 2), 100*x)).T
plt.figure()
plt.plot(q[0, :], q[1, :], "r-.")
plt.grid()
plt.title("Traversing path")
plt.show()
# +
def R(s):
n = s.shape[1]
r = np.zeros((n, 2, 2))
phi = np.arctan(s[1, :]/s[0, :])
for i in range(n):
r[i, :, :] = np.matrix([[np.cos(phi[i]), -1*np.sin(phi[i])], [np.sin(phi[i]), np.cos(phi[i])]])
return r
thetha = np.linspace(0, 1, N).reshape(1, -1)
dthetha = thetha[0, 1] - thetha[0, 0]
def get_bar(thetha, N):
thetha_bar = []
for i in range(1, N):
thetha_bar.append((thetha[:, i] + thetha[:, i-1])/2)
thetha_bar = np.array(thetha_bar).reshape(1, -1)
return thetha_bar
thetha_bar = get_bar(thetha, N)
u = cp.Variable((2, N))
b = cp.Variable((1, N))
b_bar = get_bar(b, N)
a = (cp.diff(b.T, k=1)/(2*dthetha)).T
s_bar_1 = (cp.diff(q.T, k=1)/dthetha).T
s_bar_2 = (cp.diff(q.T, k=2)/dthetha).T
# +
t = cp.Variable((1, N-1))
cost = cp.sum(t)
Objective = cp.Minimize(cost)
# +
R_tilde = R(s_bar_1.value)
m_tilde = M@s_bar_1
c_tilde = M@cp.hstack((s_bar_2, cp.reshape(s_bar_2[:, -1], (2, 1)))) + C*cp.power(s_bar_1, 2)
d_tilde = d
s_1 = cp.diff(q, axis=1)
# +
alpha = 0
Constraints = []
for i in range(1, N-1):
Constraints.append(R_tilde[i]@cp.reshape(u[:, i], (2, 1)) == m_tilde*a[:, i] + c_tilde*0.5*(b[:, i-1] + b[:, i]) + d_tilde)
Constraints.append(b[:, 0] == cp.power(np.linalg.norm(V_init)*dthetha/np.linalg.norm(q[1] - q[0]), 2))
for i in range(1, N):
Constraints.append(2*dthetha*cp.inv_pos(t[0, i-1]) - cp.sqrt(b[0, i]) - cp.sqrt(b[0, i-1]) <= 0)
# -
problem = cp.Problem(objective=Objective, constraints=Constraints)
problem.solve(verbose=True, solver='ECOS_BB')
plt.figure()
plt.plot(u.value[0, :], u.value[1, :])
plt.xlabel('f_lat')
plt.ylabel('f_long')
plt.grid()
plt.show()
t = 0
for i in range(1, N):
t += 2*dthetha/(np.sqrt(b.value[0, i-1]) + np.sqrt(b.value[0, i]))
print("Optimal time for traversal is {}".format(t))
| minimum_time.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
#
# This notebook finds the the point at which the ECMWF tempearture=0 deg C for each
# radar pulse and overlays that on the reflectivity plot to check to see whether
# the bright band occurs at the freezing level
#
# It will also plot the rain rate for the storm track along with the corresponding radar reflectivity to study the eye of the storm
#
# # Storm Details
#
# * ### Name: <NAME> (<NAME>)
# * ### Year: 2010
# * ### Region: SE Asia (landfall in Taiwan)
# * ### Wiki Reference and [Storm Details](https://en.wikipedia.org/wiki/Typhoon_Fanapi)
# ### Read in the height and reflectivity fields
# +
from importlib import reload
import numpy as np
import datetime as dt
from datetime import timezone as tz
from matplotlib import pyplot as plt
import pyproj
from numpy import ma
import a301
from a301.cloudsat import get_geo
from pathlib import Path
from pyhdf.SD import SD, SDC
import pdb
from a301.cloudsat import HDFvd_read, HDFsd_read
import warnings
plt.style.use('ggplot')
# -
z_file= list(a301.data_dir.glob('*GEOPROF_GRANULE*hdf'))[1]
lats,lons,date_times,prof_times,dem_elevation=get_geo(z_file)
radar_reflectivity, radar_attrs = HDFsd_read(z_file,'Radar_Reflectivity')
radar_scale = radar_attrs['factor']
radar_missing = radar_attrs['missing']
radar_height, height_attrs = HDFsd_read(z_file,'Height')
meters2km=1.e3
# ### Get rain rate
warnings.filterwarnings("ignore",category =RuntimeWarning)
r_file= list(a301.data_dir.glob('*2C-RAIN-PROFILE*hdf'))[1]
rain_rate = HDFvd_read(r_file,'rain_rate',vgroup='Data Fields')
invalid = (rain_rate == -9999.)
rain_rate[invalid] = np.nan #creates runtime warning due to nan value
hit = rain_rate < 0.
rain_rate[hit] = np.abs(rain_rate[hit])
plt.plot(rain_rate);
# ### Creating a masked array of the reflectivity so that pcolormesh will plot it
hit=(radar_reflectivity == radar_missing)
radar_reflectivity=radar_reflectivity.astype(np.float)
radar_reflectivity[hit]=np.nan
zvals = radar_reflectivity/radar_scale
zvals=ma.masked_invalid(zvals)
# ### Find the part of the Satellite orbiting track that corresponds to the 3 minutes containing the storm
first_time=date_times[0]
print(f'orbit start: {first_time}')
start_hour=17
start_minute=14
storm_start=starttime=dt.datetime(first_time.year,first_time.month,first_time.day,
start_hour,start_minute,0,tzinfo=tz.utc)
#
# get 3 minutes of data from the storm_start
#
storm_stop=storm_start + dt.timedelta(minutes=3)
print(f'storm start: {storm_start}')
time_hit = np.logical_and(date_times > storm_start,date_times < storm_stop)
storm_lats = lats[time_hit]
storm_lons=lons[time_hit]
storm_prof_times=prof_times[time_hit]
storm_zvals=zvals[time_hit,:]
storm_height=radar_height[time_hit,:]
storm_date_times=date_times[time_hit]
rain_rate=rain_rate[time_hit]
# ### convert time to distance to get the greatcircle distance between shots
great_circle=pyproj.Geod(ellps='WGS84')
distance=[0]
start=(storm_lons[0],storm_lats[0])
for index in np.arange(1,len(storm_lons)):
azi12,azi21,step= great_circle.inv(storm_lons[index-1],storm_lats[index-1],
storm_lons[index],storm_lats[index])
distance.append(distance[index-1] + step)
distance=np.array(distance)/meters2km
# ### Make the plot assuming that height is the same for every shot
#
# * need to customize the subplots to share the x axis between the radar reflectivity and the rain_rate, and adjust the sizes to hold a colorbar
# +
# %matplotlib inline
from matplotlib import cm
from matplotlib.colors import Normalize
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_field2(distance,height,field,fig,cmap=None,norm=None):
"""
draw a 2 panel plot with different panel sizes. Put the radar reflectivity
in the top panel with a colorbar along the bottom, and pass the second
axis back to be filled in later
uses the sharex keyword to give both plots the same x axis (distance)
and the gridspec class to lay out the grid
https://stackoverflow.com/questions/10388462/matplotlib-different-size-subplots
"""
from matplotlib import gridspec
gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1],sharex=ax1)
if cmap is None:
cmap=cm.inferno
col=ax1.pcolormesh(distance,height,field,cmap=cmap,
norm=the_norm)
#https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
# create an axes on the bottom side of ax1. The height of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.55 inch.
divider = make_axes_locatable(ax1)
cax = divider.append_axes("bottom", size="5%", pad=0.55)
ax1.figure.colorbar(col,extend='both',cax=cax,orientation='horizontal')
return ax1, ax2
vmin=-30
vmax=20
the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)
cmap_ref=cm.plasma
cmap_ref.set_over('w')
cmap_ref.set_under('b',alpha=0.2)
cmap_ref.set_bad('0.75') #75% grey
cloud_height_km=radar_height[0,:]/meters2km
fig = plt.figure(figsize=(15, 8))
ax1, ax2 = plot_field2(distance,cloud_height_km,storm_zvals.T,fig,cmap=cmap_ref,norm=the_norm)
ax1.set(ylim=[0,17],xlim=(0,1200))
ax1.set(xlabel='distance (km)',ylabel='height (km)',
title='equivalent radar reflectivity in dbZe');
# -
# ### Add the rain rate
#
# Use the second axis to draw the rain rate and redraw the figure
ax2.plot(distance,rain_rate)
ax2.set(xlabel='distance (km)',ylabel='rain rate (mm/hour)')
display(fig)
# ### Repeat for precipitatable liquid water and retrieval uncertainty
#
# Make a new plot pair -- for variable information see the [dataset docs](http://www.cloudsat.cira.colostate.edu/data-products/level-2c/2c-rain-profile?term=56)
liquid_water, lw_attributes = HDFsd_read(r_file,'precip_liquid_water')
lw_attributes
# +
precip_uncertainty = HDFvd_read(r_file,'rain_rate_uncertainty',vgroup='Data Fields')
storm_liquid=liquid_water[time_hit,:]/lw_attributes['factor']
precip_uncert=precip_uncertainty[time_hit]
vmin=0.01
vmax=0.5
the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)
cmap_ref=cm.plasma
cmap_ref.set_over('w')
cmap_ref.set_under('b',alpha=0.02)
cmap_ref.set_bad('0.75') #75% grey
fig = plt.figure(figsize=(15, 8))
ax1, ax2 = plot_field2(distance,cloud_height_km,storm_liquid.T,fig,cmap=cmap_ref,norm=the_norm)
ax1.set(ylim=[0,6],xlim=(0,1200))
ax1.set(xlabel='distance (km)',ylabel='height (km)',
title='precipitable liquid water content (g/m^3)')
ax2.plot(distance,precip_uncert)
ax2.set(ylim=[0,15],xlabel='distance (km)',ylabel='rain rate uncertainty (%)');
# -
lw_attributes
# ### Get ECMWF temps
z_file= list(a301.data_dir.glob('*ECMWF-AUX_GRANULE*hdf'))[1]
ec_height=HDFvd_read(z_file,'EC_height')
ec_temps, temps_attributes = HDFsd_read(z_file,'Temperature')
ec_missing = temps_attributes['missing']
# ### Subsetting the ECMWF data
bad_temps = (ec_temps == ec_missing)
ec_temps[bad_temps]=np.nan
ec_temps=np.ma.masked_invalid(ec_temps)
ec_temps = ec_temps - 273.15
ec_temps=ec_temps[time_hit,:]
# ### ECMWF temperatures for the segment
# +
def plot_field(distance,height,field,ax,cmap=None,norm=None):
"""
given an axis, draw a cloudsat cross section
"""
if cmap is None:
cmap=cm.inferno
col=ax.pcolormesh(distance,height,field,cmap=cmap,
norm=the_norm)
ax.figure.colorbar(col,extend='both',ax=ax)
return ax
fig, ax =plt.subplots(1,1,figsize=(15,4))
vmin=-30
vmax=30
the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)
cmap_ec= cm.bwr
cmap_ec.set_over('w')
cmap_ec.set_under('b',alpha=0.2)
cmap_ec.set_bad('0.75') #75% grey
ec_height_km=ec_height/meters2km
ax=plot_field(distance,ec_height_km,ec_temps.T,ax,cmap=cmap_ec,
norm=the_norm)
ax.set(ylim=[0,10],xlim=(0,1200))
ax.set(xlabel='distance (km)',ylabel='height (km)',title='ECMWF temps in deg C')
fig.savefig('temps.png')
# -
# ### Read in the heating rate
# +
hr_file= list(a301.data_dir.glob('*FLXHR*hdf'))[1]
lats,lons,date_times,prof_times,dem_elevation=get_geo(hr_file)
lats=lats.squeeze()
lons=lons.squeeze()
qr, qr_attrs = HDFsd_read(hr_file,'QR')
qr_height, height_attrs = HDFsd_read(hr_file,'Height')
factor = HDFvd_read(hr_file,'QR.factor',vgroup='Swath Attributes')[0][0]
missing = HDFvd_read(hr_file,'QR.missing',vgroup='Swath Attributes')[0][0]
units = HDFvd_read(hr_file,'QR.units',vgroup='Swath Attributes')[0][0]
#set_trace()
hit = (qr == missing)
qr = qr.astype(np.float64)/factor
qr[hit]=np.nan
storm_qr=qr[:,time_hit,:]
storm_height=qr_height[time_hit,:]
# -
# ### Split longwave and shortwave heating rate
shortwave_qr=storm_qr[0,:,:]
longwave_qr=storm_qr[1,:,:]
# ### Make a plots for shortwave and longwave heating rates
# +
# %matplotlib inline
from matplotlib import cm
from matplotlib.colors import Normalize
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_field2(distance,height,field,fig,cmap=None,norm=None):
"""
draw a 2 panel plot with identical panel sizes.
uses the sharex keyword to give both plots the same x axis (distance)
and the gridspec class to lay out the grid
https://stackoverflow.com/questions/10388462/matplotlib-different-size-subplots
"""
from matplotlib import gridspec
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 1])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1],sharex=ax1)
if cmap is None:
cmap=cm.inferno
col=ax1.pcolormesh(distance,height,field,cmap=cmap,
norm=the_norm)
#https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
# create an axes on the bottom side of ax1. The height of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.55 inch.
divider = make_axes_locatable(ax1)
cax = divider.append_axes("bottom", size="5%", pad=0.55)
ax1.figure.colorbar(col,extend='both',cax=cax,orientation='horizontal')
return ax1, ax2
meters2km=1.e3
vmin=0
vmax=15
the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)
cmap_ref=cm.plasma
cmap_ref.set_over('c')
cmap_ref.set_under('b',alpha=0.2)
cmap_ref.set_bad('0.75') #75% grey
#
# see http://strftime.org/ for these format strings
#
start_string=storm_date_times[0].strftime("%Y-%m-%d %H:%M:%S UTC")
stop_string=storm_date_times[-1].strftime("%Y-%m-%d %H:%M:%S UTC")
cloud_height_km=qr_height[0,:]/meters2km
fig = plt.figure(figsize=(15, 8))
ax1, ax2 = plot_field2(distance,cloud_height_km,shortwave_qr.T,
fig,cmap=cmap_ref,norm=the_norm)
ax1.set(ylim=[0,17],xlim=(0,1200))
ax1.set(xlabel='distance (km)',ylabel='height (km)',
title=f'shortwave heating rate in (K/day) from {start_string} to '
f'{stop_string}');
vmin=-20
vmax=20.
cmap_ref=cm.bwr
cmap_ref.set_over('c')
cmap_ref.set_under('b',alpha=0.2)
cmap_ref.set_bad('0.75') #75% grey
the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)
divider = make_axes_locatable(ax2)
cax = divider.append_axes("bottom", size="5%", pad=0.55)
col=ax2.pcolormesh(distance,cloud_height_km,longwave_qr.T,cmap=cmap_ref,
norm=the_norm)
ax2.figure.colorbar(col,extend='both',cax=cax,orientation='horizontal')
title = (f'longwave heating rate in (K/day) from {start_string} to '
f'{stop_string}')
ax2.set(xlabel='distance (km)',ylabel='height (km)',title=title,ylim=[0,17]);
| notebooks/cloudsat_precip_Storm_Fanapi-PA.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# +
# 6.7 원유 생산 데이터 : 표 6.19 에 있는 데이터는 1880 ~ 1988 년 동안의 연간 세계 원유 생산량이다.
# (단위 : 백만 배럴). 데이터는 Moore & McCabe(1993), p.147 로부터 취한 것이다.
library(ggplot2) # ggplot
library(ggfortify) # autoplot
library(gridExtra)
#setwd('H:/01.Korea/01.2019-1학기/05.회귀분석 방법론/Report/Chapter6')
setwd('D:/Working/03.Korea/회귀분석/report/Chapter6')
p189_data <- read.table("P189.txt", header = TRUE)
str(p189_data)
# -
# (a) 원유 생산량 (OIL) 대 연도 (Year) 의 산점도를 작성하여라.
p189_reg <- lm(Barrels ~ Year, data = p189_data)
summary(p189_reg)
ggplot(p189_data,aes(Year, Barrels)) + geom_point() + geom_smooth(method='lm')
# 플롯 상의 점들이 선형의 형태가 아님을 살펴보아라. 이 데이터에 선형 모형을 적함하기 위해서 변수 원유 생산량(OIL) 이 변환되어야 한다.
autoplot(p189_reg
, colour = "black"
, label.hjust = -2
, label.colour ="red"
, smooth.colour = "blue"
, ad.colour = "red")
# +
# (b) log(OIL) 대 Year 의 산점도를 작성하여라. 이제 플롯의 점 1880 ~ 1973년 기간에서 직선을 따른다.
# 1973년 이후에는 중동의 정치적 혼란이 원유 생산량의 패턴에 영향을 주었다.
p189_1973_data <- subset(p189_data, Year < 1973)
p189_1973_log_reg <- lm(log(Barrels) ~ Year, data = p189_1973_data)
summary(p189_1973_log_reg)
p1 <- ggplot(p189_data, aes(Year, Barrels)) + geom_point() + geom_smooth(method='lm')
p2 <- ggplot(p189_1973_data, aes(Year, log(Barrels))) + geom_point() + geom_smooth(method='lm')
grid.arrange(p1, p2, ncol=2, nrow=1)
# +
#(c) Year 에 대한 log(OIL) 의 선형 회귀를 적합하여래. 이 모형의 적합도를 평가 하여라.
p189_1973_log_reg <- lm(log(Barrels) ~ Year, data = p189_1973_data)
summary(p189_1973_log_reg)
autoplot(p189_1973_log_reg
, colour = "black"
, label.hjust = -2
, label.colour ="red"
, smooth.colour = "blue"
, ad.colour = "red")
# +
# (d) 표준화잔차의 인덱스(index) 플롯을 작성하여라. 이 플롯은 표준적인 가정중 어느것이 위배되는지를 명확하게 보여준다. 그것이 무엇인가?
par(mfrow = c(2,2))
plot(p189_1973_data$Year, rstandard(p189_1973_log_reg), pch = 20, main = "Standardized-residuals")
abline(h=0)
plot(p189_1973_data$Barrels, rstandard(p189_1973_log_reg), pch = 20, main = "Standardized-residuals")
abline(h=0)
plot(rstandard(p189_1973_log_reg), pch=20, main = "Standardized-residuals")
abline(h=0)
| 1st semester/01.LinearRegression/Chapter6/HW(6-7).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NetworKit Distance Tutorial
# NetworKit provides several graph traversal and pathfinding algorithms within the `distance` module. This notebook covers most of these algorithms, and shows how to use them.
import networkit as nk
# For this tutorial we will use the same graph, and the same source and target node. We will indext the edges of the graph because some algorithms require the edges to be indexed.
# Read a graph
G = nk.readGraph("../input/foodweb-baydry.konect", nk.Format.KONECT)
GDir = G
G = nk.graphtools.toUndirected(G)
source = 0
target = 27
G.indexEdges()
# ## Algebraic Distance
# Algebraic distance assigns a distance value to pairs of nodes according to their structural closeness in the graph. Algebraic distances will become small within dense subgraphs.
#
# The [AlgebraicDistance(G, numberSystems=10, numberIterations=30, omega=0.5, norm=0, withEdgeScores=False)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=alg#networkit.distance.AlgebraicDistance) constructor expects a graph followed by the number of systems to use for algebraic iteration and the number of iterations in each system. `omega` is the overrelaxation parameter while `norm` is the norm factor of the extended algebraic distance. Set `withEdgeScores` to true if the array of scores for edges {u,v} that equal ad(u,v) should be calculated.
# Initialize algorithm
ad = nk.distance.AlgebraicDistance(G, 10, 100, 1, 1, True)
# Run
ad.preprocess()
# The algebraic distance between the source and target node
ad.distance(source, target)
# ## All Simple Paths
# This algorithm computes all existing simple paths from a source node to a target node.
#
# The [AllSimplePaths(G, source, target, cutoff=none)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=all#networkit.distance.AllSimplePaths) constructor expects an unweighted graph, the source node and the target node as mandatory parameters. The maximum length of the paths can be fixed through `cutoff` parameter. This algorithm could take a lot of time on large networks (many edges), especially if the cutoff value is high or not specified.
#
# The algorithm is implemented only for unweighted graphs, so we shall convert the G to an unweighted graph.
GUnweighted = nk.graphtools.toUnweighted(GDir)
# Initialize algorithm
asp = nk.distance.AllSimplePaths(GUnweighted, source, target, 5)
# Run
asp.run()
# The number of simple paths in the graph
print(asp.numberOfSimplePaths())
# The list of all paths
paths = asp.getAllSimplePaths()
# Print first simple path node 0
print(paths[0])
# ## All-Pairs Shortest-Paths (APSP)
# The APSP algorithm computes all pairwise shortest-path distances in a given graph. It is implemented running Dijkstra’s algorithm from each node, or BFS if the graph is unweighted.
#
# The constructor [APSP(G)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=apsp#networkit.distance.APSP) expects a graph.
# Initialize algorithm
apsp = nk.distance.APSP(G)
# Run
apsp.run()
# The distance from source to target node
print(apsp.getDistance(source, target))
# ## A*
# A* is an informed search algorithm , as it uses information about path cost and also uses heuristics to find the shortest path.
#
# The [AStar(G, heu, source, target, storePred=True)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=astar#networkit.distance.AStar) constructor expects a graph, the source and target nodes as mandatory parameters. The algorithm will also store the predecessors and reconstruct a shortest path from the source and the target if `storePred` is true. `heu` is a list of lower bounds of the distance of each node to the target.
# As we do not have any prior knowledge about the graph we choose all zeros as a heuristic because zero is always a lower bound of the distance between two nodes. In this case, the A* algorithm is equivalent to Dijkstra.
# Initialize algorithm
heuristic = [0 for _ in range(G.upperNodeIdBound())]
astar = nk.distance.AStar(G, heuristic, source, target)
# Run
astar.run()
# The distance from source to target node
print(astar.getDistance())
# The path from source to target node
print(astar.getPath())
# ## Breadth-First Search (BFS)
# BFS is an algorithm for traversing a graph which starts from the source node `u`, and explores all of the u's neighbors nodes at the present depth before moving on to the nodes at the next depth level. BFS finds the shortest paths from a source to all the reachable nodes of an unweighted graph.
#
# The [BFS(G, source, storePaths=True, storeNodesSortedByDistance=False, target=none)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=bfs#networkit.distance.BFS) constructor expects a graph and a source node as mandatory parameters. If the paths should be stored, set `storedPaths` to true. If `storeNodesSortedByDistance` is set, a vector of nodes ordered in increasing distance from the source is stored. `target` is the target node.
# Initialize algorithm
bfs = nk.distance.BFS(G, source, True, False, target)
# Run
bfs.run()
# The distance from source to target node
print(bfs.distance(target))
# The number of shortest paths between the source node
print(bfs.numberOfPaths(target))
# Returns a shortest path from source to target
print(bfs.getPath(target))
# ## Bidirectional BFS
# The Bidirectional BFS algorithm explores the graph from both the source and target nodes until the two explorations meet. This version of BFS is more efficient than BFS when the target node is known.
#
# The [BidirectionalBFS(G, source, target, storePred=True)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=bidirec#networkit.distance.BidirectionalBFS) constructor expects a graph, the source and target nodes as mandatory parameters. The algorithm will also store the predecessors and reconstruct a shortest path from the source and the target if `storePred` is true.
# Initialize algorithm
biBFS = nk.distance.BidirectionalBFS(G, source, target)
# Run
biBFS.run()
# Unlike BFS, the `getPath` method does not include the source at the beginning, and the target at the end of the the returned list.
# The distance from source to target node
print(biBFS.getHops())
print(biBFS.getPath())
# ## Dijkstra
# Dijkstra's algorithm finds the shortest path from a source node a target node. This algorithm creates a tree of shortest paths from the source to all other nodes in the graph. Dijkstra's algorithm finds the shortest paths from a source to all the reachable nodes of a weighted graph.
#
# The [Dijkstra(G, source, storePaths=True, storeNodesSortedByDistance=False, target=none)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=dij#networkit.distance.Dijkstra) constructor expects a graph and a source node as mandatory parameters. If the paths should be stored, set `storedPaths` to true. If `storeNodesSortedByDistance` is set, a vector of nodes ordered in increasing distance from the source is stored. `target` is the target node.
# Initialize algorithm
dijkstra = nk.distance.Dijkstra(G, source, True, False, target)
# Run
dijkstra.run()
# The distance from source to target node
print(dijkstra.distance(target))
# The number of shortest paths between the source node
print(dijkstra.numberOfPaths(target))
# Returns a shortest path from source to target
print(dijkstra.getPath(target))
# ## Bidirectional Dijkstra
# The Bidirectional Dijkstra algorithm explores the graph from both the source and target nodes until the two explorations meet. This version of Dijkstra is more efficient than the convential Dijkstra when the target node is known.
#
# The [BidirectionalDijkstra(G, source, target, storePred=True)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=bidirec#networkit.distance.BidirectionalDijkstra) constructor expects a graph, the source and target nodes as mandatory parameters. The algorithm will also store the predecessors and reconstruct a shortest path from the source and the target if `storePred` is true.
# Initialize algorithm
biDij = nk.distance.BidirectionalDijkstra(G, source, target)
# Run
biDij.run()
# Unlike Dijkstra, the `getPath` method does not include the source at the beginning, and the target at the end of the the returned list.
# The distance from source to target node
print(biDij.getDistance())
# The path from source to target node
print(biDij.getPath())
# ## Commute Time Distance
# This class computes the Euclidean Commute Time Distance between each pair of nodes for an undirected unweighted graph.
#
# The [CommuteTimeDistance(G, tol=0.1)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=commute#networkit.distance.CommuteTimeDistance) constructor expects a graph as a mandatory parameter. The optional parameter `tol` is the tolerance parameter used for approximation.
# Initialize algorithm
ctd = nk.distance.CommuteTimeDistance(G)
# Run
ctd.run()
# The distance from source to target node
print(ctd.distance(source, target))
# If one wants to compute the commute time distance between two nodes, then they should use [runSinglePair(u, v)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=runsingle#networkit.distance.CommuteTimeDistance.runSinglePair) method.
ctd.runSinglePair(source,target)
# ## Diameter
# This algorithm gives an estimation of the diameter of a given graph. The algorithm is based on the ExactSumSweep algorithm presented in <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Takes: http://www.sciencedirect.com/science/article/pii/S0304397515001644.
#
# The [Diameter(G, algo=DiameterAlgo.Automatic, error=1.0, nSamples=0)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=diameter#networkit.distance.Diameter) constructor expects a graph as mandatory parameter. `algo` specifies the choice of diameter algorithm while `error` is the maximum allowed relative error. Set to 0 for the exact diameter. `nSamples`is the number of samples to be used. `algo` can be chosen between from
# 0. automatic
# 1. exact
# 2. estimatedRange
# 3. estimatedSamples
# 4. estimatedPedantic
#
# Note that the input graph must be connected, otherwise the resulting diameter will be infinite. As the graph we are using is not connected, we shall extract the largest connected component from it and then compute the diameter of the resulting graph.
# Extract largest connect component
newGraph = nk.components.ConnectedComponents.extractLargestConnectedComponent(G, True)
newGraph.numberOfNodes()
# Initialize algorithm to compute the exact diameter of the input graph
diam = nk.distance.Diameter(newGraph,algo=1)
# Run
diam.run()
# Get diameter of graph
diam.getDiameter()
# The return value of `getDiameter` is a pair of integers, i.e., the lower bound and upper bound of the diameter. In the case, that we computed the exact diameter, the diameter is the first value of the pair.
# ## Eccentricity
# The eccentricity of a node `u` is defined as the distance to the farthest node from node u. In other words, it is the longest shortest-path starting from node `u`.
#
# The eccentricity of a graph can be computed by calling the [getValue(G, v)]() method, and passing a graph and a node. The method returns the node farthest from v, and the length of the shortest path between `v` and the farthest node.
# Run
nk.distance.Eccentricity.getValue(G, source)
# ## Effective Diameter
# The effective diameter is defined as the number of edges on average to reach a given ratio of all other nodes.
#
# The [EffectiveDiameter(G, ratio=0.9)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=effective#networkit.distance.EffectiveDiameter) constructor expects an undirected graph and the ratio of nodes that should be connected. The ratio must be between in the interval (0,1].
# Initialize algorithm
ed = nk.distance.EffectiveDiameter(G)
# Run
ed.run()
# Get effective diameter
ed.getEffectiveDiameter()
# ## Effective Diameter Approximation
# This class approximates the effective diameter according to the algorithm presented in the "A Fast and Scalable Tool for Data Mining in Massive Graphs" by [<NAME> Faloutsos](http://www.cs.cmu.edu/~christos/PUBLICATIONS/kdd02-anf.pdf).
#
# The [EffectiveDiameter(G, ratio=0.9, k=64, r=7)]() constructor expects an undirected graph, the ratio of nodes that should be connected, the number of parallel approximations `k` to get a more robust results, and the number of bits `r` that should be added to the bitmask. The more bits are added to the bitmask, the higher the accuracy. The ratio must be between in the interval (0,1].
# Initialize algorithm
eda = nk.distance.EffectiveDiameterApproximation(G)
# Run
eda.run()
# Get effective diameter
eda.getEffectiveDiameter()
# ## Reverse BFS
# This class does a reverse breadth-first search (following the incoming edges of a node) on a directed graph from a given source node.
#
# The [ReverseBFS(G, source, storePaths=True, storeNodesSortedByDistance=False, target=none)](https://networkit.github.io/dev-docs/python_api/distance.html?highlight=bfs#networkit.distance.BFS) constructor expects a graph and a source node as mandatory parameters. If the paths should be stored, set `storedPaths` to true. If `storeNodesSortedByDistance` is set, a vector of nodes ordered in increasing distance from the source is stored. `target` is the target node.
# Initialize algorithm
rbfs = nk.distance.ReverseBFS(G, source, True, False, target)
# Run
rbfs.run()
# The distance from source to target node
print(rbfs.distance(target))
# The number of shortest paths between source and target
print(rbfs.numberOfPaths(target))
| notebooks/Distance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbgrader={"grade": false, "grade_id": "q3_prompt", "locked": true, "schema_version": 1, "solution": false}
# # Q3
#
# In this question, you'll compute basic statitistics of data and determine the best way to represent it under different circumstances.
# + [markdown] nbgrader={"grade": false, "grade_id": "q3a_prompt", "locked": true, "schema_version": 1, "solution": false}
# ### Part A
#
# In this question, you'll work with 2-dimensional data. The following code uses a subset of the [UCI ML Boston housing prices dataset](http://archive.ics.uci.edu/ml/datasets/Housing). It has been used in many machine learning papers, usually in a regression application. However, in our case, we'll look at how to summarize this data.
#
# At the end of the following code block (which you DON'T modify!), you'll see a matrix `X` that is created for you. This matrix has 506 rows (data points), and 2 columns (dimensions), and is visualized in a scatter plot. You'll notice most of the data clusters together between 0 and 10 on the x-axis, but there are a few data points that are clear outliers.
#
# Your job is to **compute a summary statistic of this data that is robust to these outliers.** Using your knowledge of summary statistics from lecture:
#
# 1. compute a single 2D data point that summarizes the data
# 2. visualize the quality of your summary statistic by using the function **`plot_data_and_stat`** (it takes 2 arguments: the data $X$, and your summary statistic), which will visualize the data in blue dots, and the statistic as a yellow pentagon
#
# If your statistic is robust to outliers, it should fall in the big cluster of data points, not in the open space between them.
#
# Your method can include some data pre-processing! You just can't use any pre-packaged "outlier detection" methods, unless you implement it yourself, of course. But it can be as complex or as simple as you'd like, so long as it adheres to the robust-to-outliers requirement.
# + nbgrader={"grade": false, "grade_id": "q3a_setup", "locked": true, "schema_version": 1, "solution": false}
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_boston
# Call this with just the data X to visualize it.
def plot_data(X, alpha = 1.0):
plt.scatter(X[:, 0], X[:, 1], alpha = alpha)
##############
# Call this WITH YOUR 2D SUMMARY STATISTIC to visualize it simultaneously with the data.
# "statistic" should be a 2-element array.
# - 1st element is x-value
# - 2nd element is y-value
##############
def plot_data_and_stat(X, statistic):
plt.scatter(X[:, 0], X[:, 1], alpha = 0.05, label = 'Data')
plt.plot(statistic[0], statistic[1], c = 'y', marker = 'p', ms = 12.0, mec = 'k', mew = 1.0, label = 'Mean')
plt.legend(loc = 0)
##########################
#
# SETUP CODE STARTS HERE
#
##########################
X = load_boston()['data'][:, [8, 10]] # two clusters
print(X.shape)
plot_data(X)
# + nbgrader={"grade": true, "grade_id": "q3a", "locked": false, "points": 15.0, "schema_version": 1, "solution": true}
# + [markdown] nbgrader={"grade": false, "grade_id": "q3b_prompt", "locked": true, "schema_version": 1, "solution": false}
# ### Part B
#
# Justify your solution in the previous question. Are there any circumstances where you'd advise against this approach (i.e., are there any weaknesses to your solution)?
# + [markdown] nbgrader={"grade": true, "grade_id": "q3b", "locked": false, "points": 15.0, "schema_version": 1, "solution": true}
#
| assignments/A7/A7_Q3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
from os import path
# Third-party
import astropy.coordinates as coord
from astropy.table import Table, vstack
from astropy.io import fits
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
from pyvo.dal import TAPService
from pyia import GaiaData
import gala.coordinates as gc
# -
# Bigger polygon!
# +
for r in gc.GD1(phi1=[280, 280, 300, 300]*u.deg,
phi2=[-5, 5, 5, -5]*u.deg).transform_to(coord.ICRS):
print('{0:.3f}, {1:.3f},'.format(r.ra.degree, r.dec.degree))
# for r in gc.GD1(phi1=[340, 340, 360, 360]*u.deg,
# phi2=[-5, 5, 5, -5]*u.deg).transform_to(coord.ICRS):
# print('{0:.3f}, {1:.3f},'.format(r.ra.degree, r.dec.degree))
# +
# tap = TAPService("http://tapvizier.u-strasbg.fr/TAPVizieR/tap")
# +
# query = """SELECT TOP 100 *
# FROM \"I/345/gaia2\"
# WHERE parallax < 1 AND
# DISTANCE(POINT('ICRS', 34.59, 29.73),
# POINT('ICRS', ra, dec)) > 87 AND
# DISTANCE(POINT('ICRS', 34.59, 29.73),
# POINT('ICRS', ra, dec)) < 93
# """
# # res = tap.search(query, maxrec=100000)
# job = tap.submit_job(query)
# -
t1 = Table(fits.getdata('../data/gd1-polygon.fits'))
t2 = Table(fits.getdata('../data/gd1-polygon2.fits'))
t3 = Table(fits.getdata('../data/gd1-polygon3.fits'))
t = vstack((t1, t2, t3))
g = GaiaData(t)
c = coord.SkyCoord(ra=g.ra, dec=g.dec,
pm_ra_cosdec=g.pmra, pm_dec=g.pmdec)
# g = g[g.parallax > 0]
def gd1_dist(phi1):
# 0, 10
# -60, 7
m = (10-7) / (60)
return (m*phi1.wrap_at(180*u.deg).value + 10) * u.kpc
# +
gd1_c = c.transform_to(gc.GD1)
gd1_c_dist = gc.GD1(phi1=gd1_c.phi1, phi2=gd1_c.phi2,
distance=gd1_dist(gd1_c.phi1),
pm_phi1_cosphi2=gd1_c.pm_phi1_cosphi2,
pm_phi2=gd1_c.pm_phi2,
radial_velocity=[0]*len(gd1_c)*u.km/u.s)
# Correct for reflex motion
v_sun = coord.Galactocentric.galcen_v_sun
observed = gd1_c_dist.transform_to(coord.Galactic)
rep = observed.cartesian.without_differentials()
rep = rep.with_differentials(observed.cartesian.differentials['s'] + v_sun)
# gd1_c_nosunv = coord.Galactic(rep).transform_to(gc.GD1)
gd1_c = coord.Galactic(rep).transform_to(gc.GD1)
# +
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
ax = axes[0]
ax.plot(g.pmra, g.pmdec, marker=',', linestyle='none', alpha=0.1)
ax.set_xlim(-15, 15.)
ax.set_ylim(-15, 15)
ax.set_xlabel(r'$\mu_\alpha$')
ax.set_ylabel(r'$\mu_\delta$')
ax = axes[1]
ax.plot(gd1_c.pm_phi1_cosphi2, gd1_c.pm_phi2, marker=',', linestyle='none', alpha=0.1)
ax.set_xlim(-30, 30.)
ax.set_ylim(-30, 30)
ax.set_xlabel(r'$\mu_{\phi,1}$')
ax.set_ylabel(r'$\mu_{\phi,2}$')
fig.tight_layout()
# -
# pm_mask = ((gd1_c.pm_phi1_cosphi2 < -10*u.mas/u.yr) & (gd1_c.pm_phi1_cosphi2 > -25*u.mas/u.yr) &
# (gd1_c.pm_phi2 < -2.5*u.mas/u.yr) & (gd1_c.pm_phi2 > -7.5*u.mas/u.yr) &
# (g.bp_rp < 1.5*u.mag) & (g.bp_rp > 0*u.mag))
pm_mask = ((gd1_c.pm_phi1_cosphi2 < -5*u.mas/u.yr) & (gd1_c.pm_phi1_cosphi2 > -10*u.mas/u.yr) &
(gd1_c.pm_phi2 < 1*u.mas/u.yr) & (gd1_c.pm_phi2 > -2*u.mas/u.yr) &
(g.bp_rp < 1.5*u.mag) & (g.bp_rp > 0*u.mag))
pm_mask.sum()
# +
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
ax.plot(gd1_c.phi1[pm_mask], gd1_c.phi2[pm_mask], marker='.', alpha=0.5, linestyle='none')
# ax.plot(gd1_c.phi1, gd1_c.phi2, marker='.', alpha=0.05, linestyle='none')
# +
phi2_mask = np.abs(gd1_c.phi2) < 1*u.deg
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
ax = axes[0]
ax.plot(g.pmra[phi2_mask], g.pmdec[phi2_mask], marker=',', linestyle='none', alpha=0.25)
ax.set_xlim(-30, 30.)
ax.set_ylim(-30, 30)
ax.set_xlabel(r'$\mu_\alpha$')
ax.set_ylabel(r'$\mu_\delta$')
ax = axes[1]
ax.plot(gd1_c.pm_phi1_cosphi2.to(u.mas/u.yr)[phi2_mask],
gd1_c.pm_phi2.to(u.mas/u.yr)[phi2_mask], marker=',', linestyle='none', alpha=0.25)
ax.set_xlim(-30, 30.)
ax.set_ylim(-30, 30)
ax.set_xlabel(r'$\mu_{\phi,1}$')
ax.set_ylabel(r'$\mu_{\phi,2}$')
fig.set_facecolor('w')
fig.tight_layout()
# +
color_mask = ((g.phot_g_mean_mag > (5*g.bp_rp + 12.5*u.mag)) |
(g.phot_g_mean_mag < (5*g.bp_rp + 9.5*u.mag)))
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.plot(g.bp_rp[pm_mask], g.phot_g_mean_mag[pm_mask],
marker='.', linestyle='none', alpha=0.2)
ax.plot(g.bp_rp[pm_mask & color_mask], g.phot_g_mean_mag[pm_mask & color_mask],
marker='.', linestyle='none', alpha=0.2)
xx = np.linspace(0, 2, 128)
ax.plot(xx, 5*xx + 12.5)
ax.plot(xx, 5*xx + 9.5)
ax.set_ylim(20, 10)
ax.set_xlim(-0.5, 3.)
fig.tight_layout()
# +
phi2_mask = np.abs(gd1_c.phi2) < 1*u.deg
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
ax = axes[0]
ax.plot(g.pmra[phi2_mask & color_mask], g.pmdec[phi2_mask & color_mask],
marker=',', linestyle='none', alpha=0.25)
ax.set_xlim(-30, 30.)
ax.set_ylim(-30, 30)
ax.set_xlabel(r'$\mu_\alpha$')
ax.set_ylabel(r'$\mu_\delta$')
ax = axes[1]
ax.plot(gd1_c.pm_phi1_cosphi2.to(u.mas/u.yr)[phi2_mask & color_mask],
gd1_c.pm_phi2.to(u.mas/u.yr)[phi2_mask & color_mask],
marker='.', linestyle='none', alpha=0.25)
ax.set_xlim(-12, 0.)
ax.set_ylim(-4, 5)
ax.set_xlabel(r'$\mu_{\phi,1}$')
ax.set_ylabel(r'$\mu_{\phi,2}$')
fig.set_facecolor('w')
fig.tight_layout()
# -
better_pm_mask = ((gd1_c.pm_phi1_cosphi2 < -6*u.mas/u.yr) & (gd1_c.pm_phi1_cosphi2 > -8.5*u.mas/u.yr) &
(gd1_c.pm_phi2 < 0.5*u.mas/u.yr) & (gd1_c.pm_phi2 > -2*u.mas/u.yr) &
(g.bp_rp < 1.5*u.mag) & (g.bp_rp > 0*u.mag))
better_pm_mask.sum()
# +
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
ax.plot(gd1_c.phi1[color_mask & better_pm_mask],
gd1_c.phi2[color_mask & better_pm_mask],
marker='.', alpha=0.5, linestyle='none')
ax.set_xlabel('$\phi_1$ [degree]')
ax.set_ylabel('$\phi_2$ [degree]')
fig.savefig('/Users/adrian/Downloads/GD1.pdf')
# -
bins1 = np.arange(280, 360+0.5, 0.3)
bins2 = np.arange(-5, 5+0.5, 0.3)
from scipy.ndimage import gaussian_filter1d
thing = np.log(H.T).ravel()
plt.hist(thing[np.isfinite(thing)], bins='auto');
H, xe, ye = np.histogram2d(gd1_c.phi1[color_mask & better_pm_mask],
gd1_c.phi2[color_mask & better_pm_mask],
bins=(bins1, bins2))
H = gaussian_filter1d(H, 0.5)
fig, ax = plt.subplots(1, 1, figsize=(15, 3))
# plt.pcolormesh(xe, ye, H.T)
plt.pcolormesh(xe, ye, np.log(H.T), vmin=-1, vmax=0.5)
# +
OMG_mask = ((gd1_c.phi1 > 320*u.deg) & (gd1_c.phi1 < 330*u.deg) &
(gd1_c.phi2 > 0.5*u.deg) & (gd1_c.phi2 < 1.7*u.deg))
OMG_stream_mask = ((gd1_c.phi1 > 320*u.deg) & (gd1_c.phi1 < 330*u.deg) &
(gd1_c.phi2 > -0.5*u.deg) & (gd1_c.phi2 < 0.5*u.deg))
OMG_pm_mask = np.sqrt( (gd1_c.pm_phi1_cosphi2 - (-8*u.mas/u.yr))**2 +
(gd1_c.pm_phi2 - (-0.6*u.mas/u.yr))**2 ) < 0.7*u.mas/u.yr
OMG_control_pm_mask = np.sqrt( (gd1_c.pm_phi1_cosphi2 - (-8*u.mas/u.yr))**2 +
(gd1_c.pm_phi2 - (1.6*u.mas/u.yr))**2 ) < 0.7*u.mas/u.yr
(color_mask & OMG_mask).sum()
# -
(color_mask & OMG_mask).sum()
# +
fig, axes = plt.subplots(1, 2, figsize=(12, 6), sharex=True, sharey=True)
ax = axes[0]
ax.plot(gd1_c.pm_phi1_cosphi2.to(u.mas/u.yr)[color_mask & OMG_mask],
gd1_c.pm_phi2.to(u.mas/u.yr)[color_mask & OMG_mask],
marker='.', linestyle='none', alpha=0.25)
ax.set_xlim(-12, 0.)
ax.set_ylim(-4, 5)
ax.set_xlabel(r'$\mu_{\phi,1}$')
ax.set_ylabel(r'$\mu_{\phi,2}$')
ax.scatter(-8, -0.6, marker='o', facecolor='none',
edgecolor='k', linewidth=1, s=1200)
ax = axes[1]
ax.plot(gd1_c.pm_phi1_cosphi2.to(u.mas/u.yr)[color_mask & OMG_stream_mask],
gd1_c.pm_phi2.to(u.mas/u.yr)[color_mask & OMG_stream_mask],
marker='.', linestyle='none', alpha=0.25)
# ax.scatter(-13.2, -3.4, marker='o', facecolor='none',
# edgecolor='k', linewidth=1, s=700)
ax.scatter(-7.9, -0.4, marker='o', facecolor='none',
edgecolor='k', linewidth=1, s=1500)
fig.set_facecolor('w')
fig.tight_layout()
# -
d_pm = np.sqrt((-13.2 - (-12.9))**2 + .4**2) * u.mas/u.yr
(d_pm * 8*u.kpc).to(u.km/u.s, u.dimensionless_angles())
# +
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.plot(g.bp_rp[pm_mask], g.phot_g_mean_mag[pm_mask],
marker='.', linestyle='none', alpha=0.2)
ax.plot(g.bp_rp[color_mask & OMG_mask & OMG_pm_mask], g.phot_g_mean_mag[color_mask & OMG_mask & OMG_pm_mask],
marker='o', linestyle='none', alpha=1., color='yellow', markeredgecolor='k', markeredgewidth=1)
ax.plot(g.bp_rp[color_mask & OMG_mask & OMG_control_pm_mask],
g.phot_g_mean_mag[color_mask & OMG_mask & OMG_control_pm_mask],
marker='o', linestyle='none', alpha=1., color='tab:red', markeredgecolor='k', markeredgewidth=1)
ax.set_ylim(20, 10)
ax.set_xlim(-0.5, 3.)
fig.tight_layout()
# -
| notebooks/GD1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Tutorial 5: Borders
# ===================
#
# In the previous tutorial, we told our `Inversion` to use a border. Here, we'll discuss what this border does.
# +
# %matplotlib inline
from pyprojroot import here
workspace_path = str(here())
# %cd $workspace_path
print(f"Working Directory has been set to `{workspace_path}`")
from os import path
import autolens as al
import autolens.plot as aplt
# -
# we'll use the same strong lensing data as the previous tutorial, where:
#
# - The lens `Galaxy`'s light is omitted.
# - The lens `Galaxy`'s total mass distribution is an `EllipticalIsothermal`.
# - The source `Galaxy`'s `LightProfile` is an `EllipticalSersic`.
# +
dataset_name = "mass_sie__source_sersic__2"
dataset_path = path.join("dataset", "howtolens", "chapter_4", dataset_name)
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.05,
)
aplt.Imaging.subplot_imaging(imaging=imaging)
# -
# So, what is a border? In the image-plane, a border is the set of exterior pixels in a mask that are at, well, its
# border. Lets plot the image with a circular `Mask2D`, and tell our `Imaging` `Plotter`.to plot the border as well.
# +
mask_circular = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, sub_size=2, radius=2.5
)
aplt.Imaging.subplot_imaging(
imaging=imaging, mask=mask_circular, include=aplt.Include(border=True)
)
# -
# As you can see, for a circular `Mask2D`, the border *is* the edge of our `Mask2D` (the ring of black dots we're used to
# seeing whenever we plot a `Mask2D`.. For an annular `Mask2D`, not every pixel on the edge of the mask is necessarily a part
# of its border!
# +
mask_annular = al.Mask2D.circular_annular(
shape_2d=imaging.shape_2d,
pixel_scales=imaging.pixel_scales,
sub_size=2,
inner_radius=0.8,
outer_radius=2.5,
)
aplt.Imaging.subplot_imaging(
imaging=imaging, mask=mask_annular, include=aplt.Include(border=True)
)
# -
# Indeed, a border is *only* the pixels at the exterior edge of our `Mask2D`, which for the annular _Mask- above means
# non of the pixels at the inner radius = 0.8" edge are part of the border.
#
# So, what does a border actually do? To show you, we'll need to fit this image with a lens model and `Mapper` and we'll
# do that by using the same function as the previous tutorial (to perform a quick source galaxy fit) but with the option
# to input a mask and use a border.
def perform_fit_with_source_galaxy_mask_and_border(
imaging, source_galaxy, mask, settings_pixelization
):
masked_imaging = al.MaskedImaging(
imaging=imaging, mask=mask, settings=al.SettingsMaskedImaging(sub_size=2)
)
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), elliptical_comps=(0.1, 0.0), einstein_radius=1.6
),
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
return al.FitImaging(
masked_imaging=masked_imaging,
tracer=tracer,
settings_pixelization=settings_pixelization,
)
# Okay, so lets first look at our `Mapper` without using a border using our annular `Mask2D`.
#
# First, note how we set up the border. We use a `SettingsPixelization` object, which is analogous to the
# `SettingsMaskedImaging` and `SettingsLens` objects we used in previous tutorials. Later, you'll see how these
# settings can also be passed to a `SettingsPhaseImaging` object, to control the behaviour of the `Pixelization` during a
# model-fit.
# +
source_galaxy = al.Galaxy(
redshift=1.0,
pixelization=al.pix.Rectangular(shape=(40, 40)),
regularization=al.reg.Constant(coefficient=1.0),
)
fit = perform_fit_with_source_galaxy_mask_and_border(
imaging=imaging,
source_galaxy=source_galaxy,
mask=mask_annular,
settings_pixelization=al.SettingsPixelization(use_border=False),
)
aplt.Inversion.reconstruction(
inversion=fit.inversion, include=aplt.Include(inversion_grid=True)
)
# -
# Everything looks fine - we get a reconstructed source on a visually appeasing source-plane grid. So, why are we so
# worried about borders? Lets see what happens if we use a circular mask instead.
# +
fit = perform_fit_with_source_galaxy_mask_and_border(
imaging=imaging,
source_galaxy=source_galaxy,
mask=mask_circular,
settings_pixelization=al.SettingsPixelization(use_border=False),
)
aplt.Inversion.reconstruction(
inversion=fit.inversion, include=aplt.Include(inversion_grid=True)
)
# -
# Woah - whats happened? There are lots of extra points on our source-plane `Grid` which trace to extremely large radii
# away from the central regions of the source-plane! These points are traced image-pixels (just like all the other points)
# which correspond to the central image-pixels that our annular `Mask2D` masked but that our circular `Mask2D` didn`t!
#
# Lets quickly check this using a `Mapper` `Plotter`.
aplt.Mapper.subplot_image_and_mapper(
image=fit.masked_imaging.image,
mapper=fit.inversion.mapper,
include=aplt.Include(inversion_grid=True),
image_pixel_indexes=[
[3578, 3579, 3580, 3581, 3582],
[3678, 3679, 3680, 3681, 3682],
[3778, 3779, 3780, 3781, 3782],
[3878, 3879, 3880, 3881, 3882],
[3978, 3979, 3980, 3981, 3982],
[4078, 4079, 4080, 4081, 4082],
[4178, 4179, 4180, 4181, 4182],
],
)
# So, whats happening physically? Towards the centre of our `EllipticalIsothermal` `MassProfile`.the density profile
# becomes extremely cuspy (rising very sharply). This cause extremely large deflection angles to be computed, lets have
# a quick look.
aplt.Tracer.deflections_y(tracer=fit.tracer, grid=fit.grid)
aplt.Tracer.deflections_x(tracer=fit.tracer, grid=fit.grid)
# This means that our central image pixels are highly demagnified, tracing to extremely large values in the source plane!
#
# Physically, this isn't a problem, it just means that we don't see a `central image` in most strong lenses as light-rays
# which trace through the centre of the lens are demagnified. However, if the lens galaxy had a cored mass distribution
# we would see the central image.
#
# This is a problem for our `Pixelization` and `Mapper`, which in the source-plane fits these demagnified pixels like
# any other pixels. This has two negative consequences:
#
# 1) The `Rectangular` `Pixelization`.we `overlay` over the source-plane is much larger than for the annular `Mask2D`
# because it has to expand to include the demagnified image-pixels. As a result, large source-pixels are used to
# reconstruct the central regions of the source-plane (where the source galaxy is actually located), meaning we
# reconstruct the source-galaxy at a lower effective resolution.
#
# 2) The `Rectangular` `Pixelization`.reconstructs the flux of the demanigified image pixels using source-pixels
# which contain *only* demagnified image pixels. However, these source-pixels *should* have other image-pixels
# traced within them from pixels at large radii from the centre of the lens galaxy. Unfortunately, our circular
# `Mask2D` masks these pixels out, meaning they do not make it to our source-plane and are omitted from the source
# reconstruction.
#
# Lets quickly use a larger circular `Mask2D` to confirm that these pixels do exist, if we don't mask them.
# +
mask_circular_large = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, radius=4.0
)
fit = perform_fit_with_source_galaxy_mask_and_border(
imaging=imaging,
source_galaxy=source_galaxy,
mask=mask_circular,
settings_pixelization=al.SettingsPixelization(use_border=False),
)
aplt.Inversion.reconstruction(
inversion=fit.inversion, include=aplt.Include(inversion_grid=True)
)
# -
# This second point is a *huge* problem, as allowing source-pixels to fit regions of our mask in this completely
# unphysical way introduces extremely dangerous systematics into our source reconstruction and lens model analysis.
# You can see this in the weird patterns these pixels make in the exterior regions of our source-reconstruction!
#
# Borders are the solution to this problem. We simply take the `Mask2D` border in the image-plane we showed above, trace
# it to the source-plane and relocate all traced image-pixels pixels outside this source-plane border to its edge. Lets
# take a look.
# +
fit = perform_fit_with_source_galaxy_mask_and_border(
imaging=imaging,
source_galaxy=source_galaxy,
mask=mask_circular,
settings_pixelization=al.SettingsPixelization(use_border=True),
)
aplt.Inversion.reconstruction(
inversion=fit.inversion, include=aplt.Include(inversion_grid=True)
)
aplt.Mapper.subplot_image_and_mapper(
image=fit.masked_imaging.image,
mapper=fit.inversion.mapper,
include=aplt.Include(inversion_grid=True),
image_pixel_indexes=[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]],
)
# -
# This successfully addresses both of the issues above! However, you might be thinking, isn't that a bit of a hack? Its
# not really a physical treatment of the ray-tracing, is it?
#
# Well, you're right. However, the *only* physical way to do this would be to use a `Mask2D` so large that all demangified
# central pixels are surrounded by traced image-pixels. This would require a `Mask2D` so large our computer would crash,
# That`s not a good solution, thus borders provide us with a workaround, one that I've extensively tested and have found
# that, provided your `Mask2D` isn't too small, doesn`t lead to systematic biases.
#
# Next, I'm going to quickly highlight how important borders are when modeling multiple lens galaxies. Their complex
# mass distribution and lensing configuration often produce very nasty edge effects where image pixels not just in the
# centre of mask, but anywhere in the mask, trace beyond the source-plane border.
# we'll use new strong lensing data as the previous tutorial, where:
#
# - The lens `Galaxy`'s light is omitted.
# - There are two lens galaxies whose `MassProfile`'s are `EllipticalIsothermal``..
# - The source `Galaxy`'s `LightProfile` is an `EllipticalSersic`.
# +
dataset_name = "mass_sie_x2__source_sersic"
dataset_path = path.join("dataset", "howtolens", "chapter_4", dataset_name)
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.05,
)
# -
# We again must define a mask around this image, lets start with a 2.8" mask. we'll use larger masks to illustrate the
# effects of the border in a moment.
# +
mask_circular = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, sub_size=2, radius=2.8
)
aplt.Imaging.subplot_imaging(
imaging=imaging, mask=mask_circular, include=aplt.Include(border=True)
)
# -
# We need to redefine our perform fit function, to use the x2 lens galaxy model.
def perform_fit_x2_lenses_with_source_galaxy_mask_and_border(
imaging, source_galaxy, mask, settings_pixelization
):
masked_imaging = al.MaskedImaging(imaging=imaging, mask=mask)
lens_galaxy_0 = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(1.1, 0.51), elliptical_comps=(0.0, 0.15), einstein_radius=1.07
),
)
lens_galaxy_1 = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(-0.20, -0.35), elliptical_comps=(0.06, 0.1053), einstein_radius=0.71
),
)
tracer = al.Tracer.from_galaxies(
galaxies=[lens_galaxy_0, lens_galaxy_1, source_galaxy]
)
return al.FitImaging(
masked_imaging=masked_imaging,
tracer=tracer,
settings_pixelization=settings_pixelization,
)
# Now, lets fit this image using the input model and perform the source reconstruction without a border. As you can see,
# we get many demagnified image pixels which trace well beyond our source-plane border if we don't relocate them!
# +
fit = perform_fit_x2_lenses_with_source_galaxy_mask_and_border(
imaging=imaging,
source_galaxy=source_galaxy,
mask=mask_circular,
settings_pixelization=al.SettingsPixelization(use_border=False),
)
aplt.Inversion.reconstruction(
inversion=fit.inversion, include=aplt.Include(inversion_grid=True, border=True)
)
# -
# However, when we relocate them, we get a good-looking source-plane with a well defined border and edge, thus ensuring
# our analysis will (hopefully) be free of systematic biases.
fit = perform_fit_x2_lenses_with_source_galaxy_mask_and_border(
imaging=imaging,
source_galaxy=source_galaxy,
mask=mask_circular,
settings_pixelization=al.SettingsPixelization(use_border=True),
)
aplt.Inversion.reconstruction(
inversion=fit.inversion, include=aplt.Include(inversion_grid=True, border=True)
)
# Multi-galaxy modeling is rife for border effects and if you have multiple lens galaxies I heartily recommend you pay
# a close eye to your source-plane borders!
#
# Before we end,I want to quickly highlight that care must be taken when choosing the size of your mask. If you don't
# choose a big enough mask, the border won't be able to relocate all of the demanigified image pixels to the border edge.
# +
mask_circular = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, sub_size=2, radius=2.5
)
fit = perform_fit_x2_lenses_with_source_galaxy_mask_and_border(
imaging=imaging,
source_galaxy=source_galaxy,
mask=mask_circular,
settings_pixelization=al.SettingsPixelization(use_border=True),
)
aplt.Inversion.reconstruction(
inversion=fit.inversion, include=aplt.Include(inversion_grid=True, border=True)
)
mask_circular = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, sub_size=2, radius=2.7
)
fit = perform_fit_x2_lenses_with_source_galaxy_mask_and_border(
imaging=imaging,
source_galaxy=source_galaxy,
mask=mask_circular,
settings_pixelization=al.SettingsPixelization(use_border=True),
)
aplt.Inversion.reconstruction(
inversion=fit.inversion, include=aplt.Include(inversion_grid=True, border=True)
)
mask_circular = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, sub_size=2, radius=2.9
)
fit = perform_fit_x2_lenses_with_source_galaxy_mask_and_border(
imaging=imaging,
source_galaxy=source_galaxy,
mask=mask_circular,
settings_pixelization=al.SettingsPixelization(use_border=True),
)
aplt.Inversion.reconstruction(
inversion=fit.inversion, include=aplt.Include(inversion_grid=True, border=True)
)
mask_circular = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, sub_size=2, radius=3.1
)
fit = perform_fit_x2_lenses_with_source_galaxy_mask_and_border(
imaging=imaging,
source_galaxy=source_galaxy,
mask=mask_circular,
settings_pixelization=al.SettingsPixelization(use_border=True),
)
aplt.Inversion.reconstruction(
inversion=fit.inversion, include=aplt.Include(inversion_grid=True, border=True)
)
# -
# And with that, borders are done. In truth, borders should pretty much take care of themselves when you're using
# **PyAutoLens** and you probably won't think about them much. However, as I showed above, if you don't choose a large enough
# mask things can go wrong - thus, its important you know what borders are, so you can look out for this potential
# source of systematics!
| howtolens/chapter_4_inversions/tutorial_5_borders.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="ideal-characteristic"
# # Efficient Frontier
# + id="young-welding"
import pandas as pd
ind = pd.read_csv('data/ind30_m_vw_rets.csv', header=0, index_col=0, parse_dates=True)/100
# + id="offensive-pixel" outputId="ca85f297-9e92-44be-c6f7-136395ce81e0"
ind.head()
# + id="uniform-stretch" outputId="27b2f628-ef7c-441d-d2da-f3a2475f8a55"
ind.index
# + id="deluxe-comparison" outputId="6637150f-cde1-46f9-95c8-224db3548e52"
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind
# + id="coupled-inspector" outputId="eae4e49e-ba45-48bb-dd63-5727dae86077"
ind.columns
# + id="expensive-spanking" outputId="08ea2712-a81d-442f-d3d6-972b5879fe93"
ind.columns = ind.columns.str.strip()
ind.columns
# + id="amended-posting" outputId="cdc9b545-ac9a-4479-a565-89512691fed3"
ind.shape
# + id="reflected-intro"
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# + id="statistical-camera"
import edhec_risk_kit as erk
# + id="champion-silicon" outputId="ba40cae4-deb9-43aa-c359-a242d77d8b5f"
ind = erk.get_ind_returns()
ind.shape, ind.head()
# + id="compact-newsletter" outputId="61e75186-40be-40db-fb33-fd9c911fd2f1"
erk.drawdown(ind['Food'])['Drawdown'].plot.line(figsize=(12,6))
# + id="fifteen-threshold" outputId="6bd0aa6a-33c4-4aae-9860-0e94bf60023e"
erk.var_gaussian(ind[['Food', 'Smoke', 'Beer', 'Fin']], modified=True)
# + id="southern-abuse" outputId="3cbe6015-4656-456d-a67b-1f3b79990268"
erk.var_gaussian(ind, modified=True).sort_values().tail()
# + id="blocked-occurrence" outputId="36d4d634-9ac3-4fb5-9eb4-ea6411ffe618"
erk.var_gaussian(ind, modified=True).sort_values().head()
# + id="circular-scope" outputId="1c8e449e-3717-477c-ec25-6387a395ac8d"
erk.var_gaussian(ind, modified=True).sort_values().plot.bar()
# + id="immune-unknown" outputId="1c322ea1-ca1c-4949-bc1b-a5589b446272"
erk.sharpe_ratio(ind, riskfree_rate=0.03, periods_per_year=12).sort_values().plot.bar(title='Industry Sharpe ratios 1926-2018', color='green', figsize=(12,6))
# + id="declared-green" outputId="d3d4a7ce-5b57-4532-d1af-178e6c68a84a"
erk.sharpe_ratio(ind['2000':], riskfree_rate=0.03, periods_per_year=12).sort_values().plot.bar(title='Industry Sharpe ratios 2000-2018', color='goldenrod', figsize=(12,6))
# + id="italic-survey"
er = erk.annualize_rets(ind['1995':'2000'], 12)
# + id="printable-helmet" outputId="2d9990b6-3e87-4167-dc5a-04f54a2242c4"
er.sort_values().plot.bar()
# + id="commercial-norman" outputId="9ee09e4d-db54-4a4b-e56d-c5d4dd0c5b6a"
cov = ind['1995':'2000'].cov()
cov.shape
# + id="satisfactory-active"
| course1portfolio_construction/Copy_of_EfficientFrontier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # An example of Logistic Regression
#
# This notebook is a brief explanation of how we can build a linear Machine Learning model in Python using [scikit-learn](http://scikit-learn.org). If you are unfamiliar with Logistic Regression, you can start by reading on [Wikipedia](https://en.wikipedia.org/wiki/Logistic_regression), or watch this very interesting series of [videos](https://www.youtube.com/watch?v=-la3q9d7AKQ) to get you up to speed.
#
# This example is built on the dataset provided for the [Kaggle competition](https://www.kaggle.com/c/titanic) "Titanic: Machine Learning for disaster". We are basically given information on the people who were on board, and we must build a model to predict what sort of people would have a better chance of survival ([spoiler](https://en.wikipedia.org/wiki/Women_and_children_first)).
#
# This is a classic example of a binary classification problem, since there are only two possible outcomes for each person. Logistic Regression is a popular choice because of its simplicity, and it can be accurate enough in certain situations. In this example, we will use this method to predict whether a person will survive or not based on their age, sex and the class of their ticket. So... let's do it.
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set(style="white")
# Importing the dataset
dataset = pd.read_csv('../Data/train_titanic.csv')
dataset = dataset[dataset['Age'].notnull()]
cat_cols = ['Sex' ]
dataset = pd.get_dummies(dataset, columns = cat_cols)
X = dataset[['Pclass','Age','Sex_male']].values
y = dataset['Survived'].values
# ### Training set and Test set
# Kaggle gives us a test set, but for this example we'd rather split it ourselves so we can visualize how the model is performing. We will keep 80% of the data in the training set.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# ### Feature scaling
# We apply feature scaling to normalize the range of each variable. This ensures that each feature contributes approximately the same to the distances computed in the objective function. Note that both the training and the test set must be scaled.
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# ### Fitting Logistic Regression to the Training set
# Once the data is clean and ready, we can build the classifier and fit it to the training data.
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
# ### Predicting on the test set
# Once the model has learned from the training data, we can make predictions on the test data using predict.
y_pred = classifier.predict(X_test)
# ### Assessing the model's performance
# There are several ways to assess how good our predictions are. One of them is the [confusion matrix](http://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/). This will quickly let us see how many good and bad predictions the model is making. Alternatively, we can use the accuracy score, which gives us the ratio of correctly classified samples.
from sklearn.metrics import confusion_matrix, accuracy_score
cmatrix = confusion_matrix(y_test, y_pred)
ascore = accuracy_score(y_test, y_pred)
print "Accuracy score:"
print ascore
print "\nConfusion matrix:"
print cmatrix
# ### Bananas!
# So we have built a model with a fairly good accuracy (circa 85%). I am sure we can do better, but we can be proud of these numbers with a linear model that uses only three features.
# ## Visualizing our results
# We have used the accuracy score to assess our model's performance. This is very useful, but plotting the results is a more exciting way of looking at our work. So let's do that. We will first plot the predicted results, together with the decision boundary.
#
# The decision boundary separates our vector space in two regions (for the two possible outcomes in this case study, died or survived). Since we have three features, and we are using a linear model, the decision boundary is a plane that we can visualize on a 3-dimensional plot.
# +
x_surf = np.linspace(np.min(X_test[:,0]), np.max(X_test[:,0]), 10) # generate a mesh
y_surf = np.linspace(np.min(X_test[:,1]), np.max(X_test[:,1]),10)
x_surf, y_surf = np.meshgrid(x_surf, y_surf)
z_surf = -(classifier.intercept_ + classifier.coef_[0][0]*x_surf + classifier.coef_[0][1]*y_surf)/classifier.coef_[0][2]
# +
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, projection='3d')
died = np.where(y_pred==0)[0]
survived = np.where(y_pred==1)[0]
p=ax.scatter(xs = X_test[died,0], ys =X_test[died,1], zs=X_test[died,2],
zdir='z', s=20, c='red',label = 'Died')
p=ax.scatter(xs = X_test[survived,0], ys =X_test[survived,1], zs=X_test[survived,2],
zdir='z', s=20, c='blue',label = 'Survived')
ax.set_xlabel('Class')
ax.set_ylabel('Age')
ax.set_zlabel('Sex')
ax.legend()
ax.set_title('Model Predictions')
ax.zaxis.set_ticks(np.unique(X_test[:,2]))
ax.zaxis.set_ticklabels(['Female','Male'])
ax.xaxis.set_ticks(np.unique(X_test[:,0]))
ax.xaxis.set_ticklabels(['1','2','3'])
ax.plot_surface(x_surf, y_surf,
z_surf,
rstride=1,
cstride=1,
color='None',
alpha = 0.4)
plt.show()
# -
# ## Visualizing our results (II)
# Since we know what our model is predicting on each side of the decision boundary, we can color the points using the actual data. This will let us visualize where our model is failing.
# +
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, projection='3d')
died = np.where(y_test==0)[0]
survived = np.where(y_test==1)[0]
p=ax.scatter(xs = X_test[died,0], ys =X_test[died,1], zs=X_test[died,2],
zdir='z', s=20, c='red',label = 'Died')
p=ax.scatter(xs = X_test[survived,0], ys =X_test[survived,1], zs=X_test[survived,2],
zdir='z', s=20, c='blue',label = 'Survived')
ax.set_xlabel('Class')
ax.set_ylabel('Age')
ax.set_zlabel('Sex')
ax.legend()
ax.set_title('Actual Data')
ax.zaxis.set_ticks(np.unique(X_test[:,2]))
ax.zaxis.set_ticklabels(['Female','Male'])
ax.xaxis.set_ticks(np.unique(X_test[:,0]))
ax.xaxis.set_ticklabels(['1','2','3'])
ax.plot_surface(x_surf, y_surf,
z_surf,
rstride=1,
cstride=1,
color='None',
alpha = 0.4)
plt.show()
# -
# ### And...
#
# that's it! I hope you liked it. Please get in touch if you have any suggestions or comments.
| LogisticRegression1/LogisticRegressionTitanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="o3y0qY3D4JhX" colab_type="text"
# # Using ARIMA to predict COVID19 cases
# + [markdown] id="Nz8ds2qh4Wdu" colab_type="text"
# ## Getting data
# + id="rODXb_0rJXLX" colab_type="code" outputId="eafe86e3-2ad2-49d1-805d-f4d2797a5bb1" colab={"base_uri": "https://localhost:8080/", "height": 126}
# !git clone https://github.com/lrssv/TimeSeriesForecastingCOVID19
# Useful turorials
# https://www.digitalocean.com/community/tutorials/a-guide-to-time-series-forecasting-with-arima-in-python-3
# https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_sarimax_stata.html
# + id="9Hp9MHOOJYNk" colab_type="code" outputId="ac11f9ab-152e-4f8a-d2ee-f4db6691f255" colab={"base_uri": "https://localhost:8080/", "height": 54}
import warnings
import itertools
import pandas as pd
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from datetime import timedelta
# + id="3ognAV-OJbL5" colab_type="code" outputId="a9f2b1df-ece0-45e0-d74a-57628df5ece2" colab={"base_uri": "https://localhost:8080/", "height": 254}
# Get data
series = pd.read_csv('/content/TimeSeriesForecastingCOVID19/up_dataset_who.csv',header=0, parse_dates=[0], index_col=0, squeeze=True)
# The term bfill means that we use the value before filling in missing values
series = pd.Series(series.fillna(series.bfill()))
print(series)
# + [markdown] id="9m4CNHUI4hII" colab_type="text"
# ## Getting the training set
# + id="sjS12tyaJqQR" colab_type="code" outputId="098c72bf-15ea-445c-a1c4-676dc2c09029" colab={"base_uri": "https://localhost:8080/", "height": 333}
# Training set = some 30 days
ini = pd.to_datetime('2020-01-11 00:00:00+00:00')
end = pd.to_datetime('2020-03-11 00:00:00+00:00')
train = series[ini:end]
train.plot()
# + [markdown] id="JLpj85hF4lgA" colab_type="text"
# # Fit model
# + id="8-x3TA7OJtB5" colab_type="code" outputId="00d0afd6-1475-4dd9-da5d-484f08988cd8" colab={"base_uri": "https://localhost:8080/", "height": 54}
# Fit ARIMA
mod = sm.tsa.statespace.SARIMAX(train, trend='c', order=(1,1,1)) # Selected the parameters randomly
res = mod.fit(disp=False)
# + [markdown] id="gMOnB6jq4oWs" colab_type="text"
# ## Test
# + id="cSLj1AiuQCMW" colab_type="code" colab={}
# Forecast 7 days ahead
ini_test = end + timedelta(days=1)
end_test = ini_test + timedelta(days=6)
pred = res.get_prediction(start=pd.to_datetime(ini_test), end=pd.to_datetime(end_test), dynamic=False)
# + id="mDiXjkigO4Uj" colab_type="code" outputId="751b2c1d-7d9d-44d4-d777-a633ad344ce5" colab={"base_uri": "https://localhost:8080/", "height": 314}
# Plot Results
ax = series[ini:end_test].plot(label='Observed')
pred.predicted_mean.plot(ax=ax, label='Forecast', alpha=.7)
ax.set_xlabel('Date')
ax.set_ylabel('COVID_19 Cases')
plt.legend()
plt.show()
# + id="DT1vIkrNSYay" colab_type="code" outputId="3cb54c98-84d4-4d85-a5b7-200036fee592" colab={"base_uri": "https://localhost:8080/", "height": 90}
# Compute error
predictions = np.array(pred.predicted_mean.values)
observations = np.array(series[ini_test:end_test].values)
MAE = np.sum(np.abs(predictions-observations))/len(predictions)
print(observations)
print(predictions)
print('Mean absolute error (MAE):' + str(MAE))
# + id="n37af8maHMKl" colab_type="code" colab={}
| Tutorial_ARIMA_COVID19.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import string
import pandas as pd
import ipywidgets as widgets
import traitlets
from ast import literal_eval
from utils.preprocessing import tokenize
# +
# load data that has already been manually labeled
df = pd.read_csv('data/interim/row_df_fi_labeled.csv', index_col=0)
def eval_lists(s):
if s == s:
return literal_eval(s)
return s
df['label'] = df['label'].apply(eval_lists)
# shuffle
# df = df.sample(frac=1)
df
# -
# number of labeled rows
(~df['label'].isna()).sum()
# +
# table = str.maketrans({c: f' {c} ' for c in string.punctuation})
# def tokenize(s):
# return s.translate(table).strip().split()
# +
bio = {
'B': {'button_style': 'warning'},
'I': {'button_style': 'info'},
'O': {'button_style': ''}
}
labels = list(bio.keys())
# +
TEXT_COL = 'answer'
LABEL_COL = 'label'
N_COLS = 12
if LABEL_COL not in df.columns:
df[LABEL_COL] = None
idxmap = dict(enumerate(df.index))
i = 0
# +
def next_prev_button_click(next_or_prev, button):
assert next_or_prev in ['next', 'prev']
global i
# @@@ set labels for the previous text @@@
if i >= 0:
# in the beginning container does not contain HBoxes so we must handle that special case
hboxes = container.children
# get BIO labels for each token
labels = [w.tooltip for hbox in hboxes for w in hbox.children]
df.loc[idxmap[i], LABEL_COL] = labels
# @@@ increase or decrease index @@@
new_i = i + {'next': 1, 'prev': -1}[next_or_prev]
if 0 <= new_i and new_i < len(df):
i = new_i
# @@@ if the new text has been labeled, use those labels @@@
text = df.loc[idxmap[i], TEXT_COL]
labels = df.loc[idxmap[i], LABEL_COL]
hboxes = get_hboxes(text, labels)
container.children = hboxes
txt.value = text
def prev_button_click(button):
return next_prev_button_click('prev', button)
def next_button_click(button):
return next_prev_button_click('next', button)
def handle_label_click(button):
next_label = labels[(labels.index(button.tooltip) + 1) % len(labels)]
button.button_style = bio[next_label]['button_style']
button.tooltip = next_label
# -
def get_hboxes(text, labels):
ws = []
tokens = tokenize(text)
if labels is None or labels != labels:
labels = ['O'] * len(tokens)
assert len(tokens) == len(labels)
for token, label in zip(tokens, labels):
w = widgets.Button(description=token, tooltip=label, button_style=bio[label]['button_style'])
w.on_click(handle_label_click)
ws.append(w)
n_cols = 12
hboxes = []
while ws:
# add n_cols buttons to a horizontal box
hbox = widgets.HBox(ws[:n_cols])
hboxes.append(hbox)
ws = ws[n_cols:]
return hboxes
# +
i = -1
txt = widgets.Label('')
container = widgets.VBox([widgets.Label('Click next to start')])
prev_button = widgets.Button(description='Previous', button_style='success')
prev_button.on_click(prev_button_click)
next_button = widgets.Button(description='Next', button_style='success')
next_button.on_click(next_button_click)
buttons = widgets.HBox([prev_button, next_button])
# txt = show_text(lbl.value)
debug = widgets.Output()
widgets.VBox([txt, container, buttons, debug])
# -
df
df['label'].apply(lambda v: isinstance(v, list)).sum()
# +
def view_labels(df):
text_col = df[TEXT_COL]
label_col = df[LABEL_COL]
html = """
<style>
.B {
color: orange
}
.I {
color: blue
}
.O {
color: black
}
</style>
"""
ws = []
for a, b in zip(text_col, label_col):
a = tokenize(a)
assert len(a) == len(b)
lbl = widgets.HTML(html + ' '.join(f'<span class="{b_}">{a_}</span>' for a_, b_ in zip(a, b)))
ws.append(lbl)
return widgets.VBox(ws)
view_labels(df[df['label'].apply(lambda x: isinstance(x, list))])
# +
# if this notebook is used to label more data, remember to save the new labels!
| notebooks/3-labeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
/titanic_data_exploration
from __future__ import division
import math
from matplotlib import pyplot as plt
plt.style.use('ggplot')
import pandas as pd
import numpy as np
# %matplotlib inline
# %matplotlib inline
df = pd.read_csv("data/train.csv")
df.head()
df.tail()
df.dtypes
df.info()
df.describe()
df.shape
len(df.columns)
my_famous_passenger = df[df["Name"] == "Guggenheim, Mr. Benjamin"]
print(my_famous_passenger)
my_rich_passenger = df[df["Fare"] == 512.3292]
print(my_rich_passenger)
df["Fare"].describe()
####Lets rearrange some columns. This would be very hard to do using a csv library and doing this by hand.
####Panda allows us to do this very intuitively
cols = list(df.columns.values)
print(cols)
cols = ['Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked', 'PassengerId']
new_df = df[cols]
new_df.head()
cols[0]
cols[-1
cols[0]
cols[-1]
cols[-1]
cols[0]
#### We can create new dataframes from a few attributes
new_df = df[["Sex","Age"]]
new_df.head()
df_of_women = df[df["Sex"] == "female"] #### Elaborate and find out what it means to df a df? Look up panda series
df_of_men = df[df["Sex"] == "male"]
new_df[0]
df_pclass_1 = df[df["Pclass"] == 1]
df_pclass_1.head()
df_pclass_1.shape
df_pclass_2 = df[df["Pclass"] == 2]
df_pclass_2.head()
df_pclass_2.shape
df_pclass_3 = df[df["Pclass"] == 3]
df_pclass_3.head()
df_pclass_3.shape
df[:Pclass].sort()
df["Pclass"].sort()
new_array=df["Pclass"]
new_array.sort()
print (new_array)
print (df["Pclass"])
print (df["Pclass"].unique)
df['FamilySize'] = df['SibSp'] + df['Parch']
df.head()
df["Age"].dropna().hist(bins=16, range=(0,80))
df["Age"].dropna().hist(bins=16, range=(0,80))
df["Age"].hist(bins=16, range=(0,80))
#For scatter plot
plt.scatter(df['Fare'], df['Survived'])
plt.show()
# +
### Side step
# Lets create a scatter plot
d = {'one' : np.random.rand(10),
'two' : np.random.rand(10)}
print(d)
# -
df_scrap = pd.DataFrame(d)
df_scrap.plot(style=['ro','bx'])
import statsmodels.api as sm
help(df.apply)
# +
new_df = df
def gender_to_numeric(x):
if x == "male":
return 0
else:
return 1
new_df['Sex'] = new_df['Sex'].apply(gender_to_numeric) #Passing a function as an argument to apply function
new_df = new_df[["Survived", "Age","Sex", "Pclass"]]
new_df = new_df.dropna()
train_cols = new_df.columns[1:]
train_cols
logit = sm.Logit(new_df['Survived'], new_df[train_cols])
#Fit the model
result = logit.fit()
print(result.summary())
# -
print(result.summary())
print(result.summary())
| day2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mindaeng/rep1/blob/main/HW2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="OwBySVVU5h6M"
# ## HW 2
# Answer folloing questions
#
#
# + id="aZSgHuiR5W1k"
# + [markdown] id="qEqK8L1k6PSd"
# ### Below is one of the example code for binary-class classification problem on IMDB dataset
# + id="WKvVq8pQ6YNt"
from tensorflow.keras.datasets import imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(
num_words=10000)
# + id="IFml1LhPXCsk"
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
for j in sequence:
results[i, j] = 1.
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
# + id="FCxmgWVYXHoD"
y_train = np.asarray(train_labels).astype("float32")
y_test = np.asarray(test_labels).astype("float32")
# + colab={"base_uri": "https://localhost:8080/"} id="RMI28KpIXVYE" outputId="588bc006-9946-43f5-a507-12ad1dcc9396"
y_train[:5]
# + id="xgDMHxOVXKRE"
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(1, activation="sigmoid")
])
# + id="tT4KACL9XdNE"
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
# + id="k-Ufwj3iXc8l"
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
# + id="5kJLbfX3Xp_U"
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
# + [markdown] id="AFsXMZmW6e6F"
# ### Q1 Binary classification is multi-class classification with two classes so we can modify our model as below:
# + id="mXXKBxo36kPl"
model2 = keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(2, activation="softmax")
])
# + [markdown] id="PsOrD_13YWn1"
# ### Complete the codes for compile and model fitting.
# + id="Hj4AauQvYT7F"
#model2.compile()
#model2.fit()
# + [markdown] id="6SGxXX70Ylr_"
# ### Q2 Below is visualization of the validation loss. We can see overfitting after 5 epochs.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="X8rcODQ7Yk_E" outputId="87c6d0bc-2aea-4a06-d24e-9bd37ddaa8dc"
import matplotlib.pyplot as plt
history_dict = history.history
loss_values = history_dict["loss"]
val_loss_values = history_dict["val_loss"]
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, "bo", label="Training loss")
plt.plot(epochs, val_loss_values, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
# + [markdown] id="4LbAGmB-aD-d"
# ### Add dropout layers and visualize the validation loss of your new model.
# + id="uY9QQxR1Z013"
## Add dropout layers
model2_dr = keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(2, activation="softmax")
])
## retrain and visualize the validation loss of new model
# + [markdown] id="Xs7jKhJKahhM"
# ### Q3 Add l1 regularization on the second dense layer from the original model below.
# + id="VeuwleNJa9r9"
## Add l1 regularization on the second dense layers
model2_dr = keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(16, activation="relu"), ### add l1 regularization here
layers.Dense(2, activation="softmax")
])
# + [markdown] id="e9yx1WLrbJUd"
# ### Finish the training and compare the dropout model(model from Q2) and l1 regularization model after visualizing their validation losses.
# + id="EyL_jfmNbWe_"
| HW2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SpotifyWatch
# language: python
# name: spotifywatch
# ---
# +
# default_exp evolution
# -
# # Taste Evolution Visualisation
#
# <br>
#
# ### Imports
# +
#exports
import numpy as np
import pandas as pd
import datetime
from moepy import lowess, eda
from ipypb import track
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
# -
# +
df_dw = pd.read_csv('../data/playlists/discover_weekly.csv')
df_dw.head()
# -
# +
is_dt_dtype = lambda potential_dt: isinstance(potential_dt, datetime.datetime) or isinstance(potential_dt, pd.Timestamp)
def fit_lowess(s_x, s_y, x_pred=None, x_dt_type=None, **lowess_kwargs):
if x_dt_type is None:
x_dt_type = is_dt_dtype(s_x[0])
if x_dt_type == True:
s_x = s_x.astype(int)
x = s_x.values
y = s_y.values
lowess_model = lowess.Lowess()
lowess_model.fit(x, y, **lowess_kwargs)
if x_pred is None:
x_pred = np.linspace(x.min(), x.max(), 100)
y_pred = lowess_model.predict(x_pred)
if x_dt_type == True:
x_pred = pd.to_datetime(x_pred)
return x_pred, y_pred
# -
def smoothed_plot(df_dw, added_dts, numeric_col, ax=None):
# Smoothing
s_x = added_dts
s_y = df_dw[numeric_col]
x_pred, y_pred = fit_lowess(s_x, s_y, frac=0.25, num_fits=100)
jitter_scale = 5e13
s_x_jittered = pd.to_datetime(s_x.astype(int) + np.random.normal(0, jitter_scale, s_x.size))
# Plotting
if ax is None:
fig, ax = plt.subplots(dpi=150)
ax.scatter(s_x_jittered, s_y, s=0.5, color='k')
ax.plot(x_pred, y_pred, color='r')
dt_fmt = mdates.DateFormatter('%d %b')
ax.xaxis.set_major_formatter(dt_fmt)
eda.hide_spines(ax)
ax.set_ylabel(numeric_col.title())
# +
numeric_col = 'popularity'
smoothed_plot(df_dw, added_dts, numeric_col)
# -
# +
numeric_cols = ['popularity', 'danceability', 'energy', 'key',
'loudness', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence',
'tempo', 'mode', 'time_signature']
# -
| nbs/05-taste-trends.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp data.unwindowed
# -
# # Unwindowed datasets
#
# > This functionality will allow you to create a dataset that applies sliding windows to the input data on the fly. This heavily reduces the size of the input data files, as only the original, unwindowed data needs to be stored.
#export
from tsai.imports import *
from tsai.utils import *
from tsai.data.validation import *
from tsai.data.core import *
# +
#export
class TSUnwindowedDataset():
_types = TSTensor, TSLabelTensor
def __init__(self, X, y=None, y_func=None, window_size=1, stride=1, drop_start=0, drop_end=0, seq_first=True, **kwargs):
store_attr()
if X.ndim == 1: X = np.expand_dims(X, 1)
shape = X.shape
assert len(shape) == 2
if seq_first:
seq_len = shape[0]
else:
seq_len = shape[-1]
max_time = seq_len - window_size + 1 - drop_end
assert max_time > 0, 'you need to modify either window_size or drop_end as they are larger than seq_len'
self.all_idxs = np.expand_dims(np.arange(drop_start, max_time, step=stride), 0).T
self.window_idxs = np.expand_dims(np.arange(window_size), 0)
if 'split' in kwargs: self.split = kwargs['split']
else: self.split = None
self.n_inp = 1
if y is None: self.loss_func = MSELossFlat()
else:
_,yb=self[:2]
if (is_listy(yb[0]) and isinstance(yb[0][0], Integral)) or isinstance(yb[0], Integral): self.loss_func = CrossEntropyLossFlat()
else: self.loss_func = MSELossFlat()
def __len__(self):
if self.split is not None:
return len(self.split)
else:
return len(self.all_idxs)
def __getitem__(self, idxs):
if self.split is not None:
idxs = self.split[idxs]
widxs = self.all_idxs[idxs] + self.window_idxs
if self.seq_first:
xb = self.X[widxs]
if xb.ndim == 3: xb = xb.transpose(0,2,1)
else: xb = np.expand_dims(xb, 1)
else:
xb = self.X[:, widxs].transpose(1,0,2)
if self.y is None:
return (self._types[0](xb),)
else:
yb = self.y[widxs]
if self.y_func is not None:
yb = self.y_func(yb)
return (self._types[0](xb), self._types[1](yb))
@property
def vars(self):
s = self[0][0] if not isinstance(self[0][0], tuple) else self[0][0][0]
return s.shape[-2]
@property
def len(self):
s = self[0][0] if not isinstance(self[0][0], tuple) else self[0][0][0]
return s.shape[-1]
class TSUnwindowedDatasets(FilteredBase):
def __init__(self, dataset, splits):
store_attr()
def subset(self, i):
return type(self.dataset)(self.dataset.X, y=self.dataset.y, y_func=self.dataset.y_func, window_size=self.dataset.window_size,
stride=self.dataset.stride, drop_start=self.dataset.drop_start, drop_end=self.dataset.drop_end,
seq_first=self.dataset.seq_first, split=self.splits[i])
@property
def train(self):
return self.subset(0)
@property
def valid(self):
return self.subset(1)
def __getitem__(self, i): return self.subset(i)
# -
def y_func(y): return y.astype('float').mean(1)
# This approach works with both univariate and multivariate data.
#
# * Univariate: we'll use a simple array with 20 values, one with the seq_len first (X0), the other with seq_len second (X1).
# * Multivariate: we'll use 2 time series arrays, one with the seq_len first (X2), the other with seq_len second (X3). No sliding window has been applied to them yet.
# Univariate
X0 = np.arange(20)
X1 = np.arange(20).reshape(1, -1)
X0.shape, X0, X1.shape, X1
# Multivariate
X2 = np.arange(20).reshape(-1,1)*np.array([1, 10, 100]).reshape(1,-1)
X3 = np.arange(20).reshape(1,-1)*np.array([1, 10, 100]).reshape(-1,1)
X2.shape, X3.shape, X2, X3
# Now, instead of applying SlidingWindow to create and save the time series that can be consumed by a time series model, we can use a dataset that creates the data on the fly. In this way we avoid the need to create and save large files. This approach is also useful when you want to test different sliding window sizes, as otherwise you would need to create files for every size you want to test.The dataset will create the samples correctly formatted and ready to be passed on to a time series architecture.
wds0 = TSUnwindowedDataset(X0, window_size=5, stride=2, seq_first=True)[:][0]
wds1 = TSUnwindowedDataset(X1, window_size=5, stride=2, seq_first=False)[:][0]
test_eq(wds0, wds1)
wds0, wds0.data, wds1, wds1.data
wds2 = TSUnwindowedDataset(X2, window_size=5, stride=2, seq_first=True)[:][0]
wds3 = TSUnwindowedDataset(X3, window_size=5, stride=2, seq_first=False)[:][0]
test_eq(wds2, wds3)
wds2, wds3, wds2.data, wds3.data
#hide
out = create_scripts(); beep(out)
| nbs/002b_data.unwindowed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Path Overview
# `Path` contains `Lines` and `Curves` which can be stroked or filled. `Contour` is composed of a series of connected `Lines` and `Curves`. `Path` may contain zero, one, or more `Contours`. Each `Line` and `Curve` are described by `Verb`, `Points`, and optional `Path_Conic_Weight`.
#
# Each pair of connected `Lines` and `Curves` share common `Point`; for instance, `Path` containing two connected `Lines` are described the `Path_Verb` sequence: `Path.kMove`, `Path.kLine`, `Path.kLine`; and a `Point` sequence with three entries, sharing the middle entry as the end of the first Line and the start of the second Line.
#
# `Path` components `Arc`, `Rect`, `Round_Rect`, `Circle`, and `Oval` are composed of `Lines` and `Curves` with as many `Verbs` and `Points` required for an exact description. Once added to `Path`, these components may lose their identity; although `Path` can be inspected to determine if it describes a single `Rect`, `Oval`, `Round_Rect`, and so on.
#
# ### Example
# +
import skia
from IPython.display import display, Image
def show(draw):
surface = skia.Surface(256, 192)
draw(surface.getCanvas())
image = surface.makeImageSnapshot()
display(Image(data=image.encodeToData()))
# -
def draw(canvas: skia.Canvas):
paint = skia.Paint()
paint.setAntiAlias(True)
path = skia.Path()
path.moveTo(124, 108)
path.lineTo(172, 24)
path.addCircle(50, 50, 30)
path.moveTo(36, 148)
path.quadTo(66, 188, 120, 136)
canvas.drawPath(path, paint)
paint.setStyle(skia.Paint.kStroke_Style);
paint.setColor(skia.ColorBLUE)
paint.setStrokeWidth(3)
canvas.drawPath(path, paint)
# `Path` contains three Contours: `Line`, `Circle`, and `Quad`. `Line` is stroked but not filled. `Circle` is stroked and filled; `Circle` stroke forms a loop. `Quad` is stroked and filled, but since it is not closed, `Quad` does not stroke a loop.
show(draw)
# `Path` contains a `Path_Fill_Type` which determines whether overlapping `Contours` form fills or holes. `Path_Fill_Type` also determines whether area inside or outside `Lines` and `Curves` is filled.
def draw(canvas: skia.Canvas):
paint = skia.Paint()
paint.setAntiAlias(True)
path = skia.Path()
path.moveTo(36, 48)
path.quadTo(66, 88, 120, 36)
canvas.drawPath(path, paint)
paint.setStyle(skia.Paint.kStroke_Style)
paint.setColor(skia.ColorBLUE)
paint.setStrokeWidth(8)
canvas.translate(0, 50)
canvas.drawPath(path, paint)
paint.setStyle(skia.Paint.kStrokeAndFill_Style)
paint.setColor(skia.ColorRED)
canvas.translate(0, 50)
canvas.drawPath(path, paint)
# `Path` is drawn filled, then stroked, then stroked and filled.
show(draw)
# `Path` contents are never shared. Copying `Path` by value effectively creates a new `Path` independent of the original. Internally, the copy does not duplicate its contents until it is edited, to reduce memory use and improve performance.
# ---
#
# `Contour` contains one or more `Verbs`, and as many `Points` as are required to satisfy `Path_Verb_Array`. First `Path_Verb` in `Path` is always `PathVerb.kMove` each `PathVerb.kMove` that follows starts a new `Contour`.
#
# ### Example
def draw(canvas: skia.Canvas):
font = skia.Font()
paint = skia.Paint()
paint.setAntiAlias(True)
canvas.drawString("1st contour", 150, 100, font, paint)
canvas.drawString("2nd contour", 130, 160, font, paint)
canvas.drawString("3rd contour", 40, 30, font, paint)
paint.setStyle(skia.Paint.kStroke_Style)
path = skia.Path()
path.moveTo(124, 108)
path.lineTo(172, 24)
path.moveTo(36, 148)
path.quadTo(66, 188, 120, 136)
path.close()
path.conicTo(70, 20, 110, 40, 0.6)
canvas.drawPath(path, paint)
# Each `Path.moveTo` starts a new `Contour`, and content after `Path.close()` also starts a new `Contour`. Since `Path.conicTo` is not preceded by `Path.moveTo`, the first `Point` of the third `Contour` starts at the last `Point` of the second `Contour`.
show(draw)
# If final `PathVerb` in `Contour` is `PathVerb.kClose`, `Line` connects `Path_Last_Point` in `Contour` with first `Point`. A closed `Contour`, stroked, draws `Paint_Stroke_Join` at `Path_Last_Point` and first `Point`. Without `PathVerb.kClose` as final `Verb`, `Path_Last_Point` and first `Point` are not connected; `Contour` remains open. An open `Contour`, stroked, draws `Paint_Stroke_Cap` at `Path_Last_Point` and first `Point`.
#
# ### Example
def draw(canvas: skia.Canvas):
paint = skia.Paint()
paint.setAntiAlias(True)
paint.setStyle(skia.Paint.kStroke_Style)
paint.setStrokeWidth(8)
path = skia.Path()
path.moveTo(36, 48)
path.quadTo(66, 88, 120, 36)
canvas.drawPath(path, paint)
path.close()
canvas.translate(0, 50)
canvas.drawPath(path, paint)
# `Path` is drawn stroked, with an open `Contour` and a closed `Contour`.
show(draw)
# ---
#
# `Contour` length is distance traveled from first `Point` to `Path_Last_Point`, plus, if `Contour` is closed, distance from `Path_Last_Point` to first `Point`. Even if `Contour` length is zero, stroked `Lines` are drawn if `Paint_Stroke_Cap` makes them visible.
#
# ### Example
# +
def draw(canvas: skia.Canvas):
paint = skia.Paint()
paint.setAntiAlias(True)
paint.setStyle(skia.Paint.kStroke_Style)
paint.setStrokeWidth(8)
paint.setStrokeCap(skia.Paint.kRound_Cap)
path = skia.Path()
path.moveTo(36, 48)
path.lineTo(36, 48)
canvas.drawPath(path, paint)
path.reset()
paint.setStrokeCap(skia.Paint.kSquare_Cap)
path.moveTo(56, 48)
path.close()
canvas.drawPath(path, paint)
show(draw)
# -
| notebooks/Path-Overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
# GPU: 32*40 in 27.9s = 46/s
# CPU: 32*8 in 70s = 4/s
# -
import os
# TOGGLE THIS (hard to kill CNTK):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# ...
os.environ['KERAS_BACKEND'] = "cntk"
import sys
import numpy as np
import keras as K
import cntk as C
from keras.applications.resnet50 import ResNet50
print("OS: ", sys.platform)
print("Python: ", sys.version)
print("Numpy: ", np.__version__)
print("Keras: ", K.__version__)
print("CNTK: ", C.__version__)
print("Keras using {}".format(K.backend.backend()))
print("Keras channel ordering is {}".format(K.backend.image_data_format()))
# !cat /proc/cpuinfo | grep processor | wc -l
# !nvidia-smi --query-gpu=gpu_name --format=csv
# Set channels first (default)
K.backend.set_image_data_format('channels_first')
BATCH_SIZE = 32
RESNET_FEATURES = 2048
BATCHES_GPU = 40
BATCHES_CPU = 8
def give_fake_data(batches):
""" Create an array of fake data to run inference on"""
np.random.seed(0)
dta = np.random.rand(BATCH_SIZE*batches, 224, 224, 3).astype(np.float32)
return dta, np.swapaxes(dta, 1, 3)
def yield_mb(X, batchsize):
""" Function yield (complete) mini_batches of data"""
for i in range(len(X)//batchsize):
yield i, X[i*batchsize:(i+1)*batchsize]
# Create batches of fake data
fake_input_data_cl, fake_input_data_cf = give_fake_data(BATCHES_GPU)
print(fake_input_data_cl.shape, fake_input_data_cf.shape)
def predict_fn(classifier, data, batchsize):
""" Return features from classifier """
out = np.zeros((len(data), RESNET_FEATURES), np.float32)
for idx, dta in yield_mb(data, batchsize):
out[idx*batchsize:(idx+1)*batchsize] = classifier.predict_on_batch(dta).squeeze()
return out
# ## 1. GPU
# Download Resnet weights
model = ResNet50(include_top=False, input_shape=(3,224,224))
#model.summary()
fake_input_data_cf = np.ascontiguousarray(fake_input_data_cf)
cold_start = predict_fn(model, fake_input_data_cf, BATCH_SIZE)
# %%time
# GPU: 27.9s
features = predict_fn(model, fake_input_data_cf, BATCH_SIZE)
# ## 2. CPU
# +
# Need to restart notebook
# +
# Kill all GPUs ...
# Done at top of script on re-run (before loading CNTK)
# -
model = ResNet50(include_top=False, input_shape=(3,224,224))
# Create batches of fake data
fake_input_data_cl, fake_input_data_cf = give_fake_data(BATCHES_CPU)
print(fake_input_data_cl.shape, fake_input_data_cf.shape)
fake_input_data_cf = np.ascontiguousarray(fake_input_data_cf)
cold_start = predict_fn(model, fake_input_data_cf, BATCH_SIZE)
# %%time
# CPU: 70s
features = predict_fn(model, fake_input_data_cf, BATCH_SIZE)
| DeepLearningFrameworks/inference/ResNet50-Keras(CNTK).ipynb |
# ---
# layout: post
# title: "영상분류 해보기"
# author: <NAME>
# date: 2017-02-04 10:00:00
# categories: Keras
# comments: true
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# 본 강좌에서는 영상분류(Image Classification)을 컨볼루션망으로 해보겠습니다. 다음과 같은 순서로 진행하겠습니다.
#
# 1. 데이터셋 준비하기
# 1. 모델 구성하기
# 1. 모델 엮기
# 1. 모델 학습시키기
# 1. 모델 사용하기
# ---
#
# ### 데이터셋 준비하기
#
# In this tutorial we will use the standard machine learning problem called the iris flowers dataset. This dataset is well studied and is a good problem for practicing on neural networks because all of the 4 input variables are numeric and have the same scale in centimeters. Each instance describes the properties of an observed flower measurements and the output variable is specific iris species. The attributes for this dataset can be summarized as follows:
# 1. Sepal length in centimeters.
# 2. Sepal width in centimeters.
# 3. Petal length in centimeters.
# 4. Petal width in centimeters.
# 5. Class.
#
# This is a multiclass classification problem, meaning that there are more than two classes to be predicted, in fact there are three flower species. This is an important type of problem on which to practice with neural networks because the three class values require specialized handling. Below is a sample of the first five of the 150 instances:
#
# 5.1,3.5,1.4,0.2,Iris-setosa
# 4.9,3.0,1.4,0.2,Iris-setosa
# 4.7,3.2,1.3,0.2,Iris-setosa
# 4.6,3.1,1.5,0.2,Iris-setosa
# 5.0,3.6,1.4,0.2,Iris-setosa
#
# [다운](http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data)
# +
# Binary Classification with Sonar Dataset: Standardized
import numpy
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# -
# load dataset
dataframe = pandas.read_csv("warehouse/iris.data", header=None)
dataset = dataframe.values
X = dataset[:,0:4].astype(float)
Y = dataset[:,4]
# Iris-setosa
# Iris-versicolor
# Iris-virginica
#
# Iris-setosa, Iris-versicolor, Iris-virginica
# 1, 0, 0
# 0, 1, 0
# 0, 0, 1
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_Y)
# ---
#
# ### 모델 구성하기
#
# Dense 클래스를 사용하여 완전 연결 레이어(Fully Connected Layer)를 정의할 수 있다.
#
# - 첫번째 인자 : 뉴런의 수
# - 두번째 인자 : 네트워크 가중치(network weight) 초기화 방법
# - uniform : 균등분포 (uniform distribution)의 작은 난수들로 초기화 (0~0.05 사이)
# - normal : 가우시안 분포 (Gaussian distribution)로 생성된 작은 난수들로 초기화
# - 세번째 인자 : 활성화 함수(activation function) 지정
# - relu : rectifier 활성화 함수
# - sigmoid : sigmoid 활성화 함수
# - tanh : tanh 활성화 함수
#
# 마지막 레이어는 sigmoid 할성화 함수를 사용하는데, 이유는 결과가 0과 1사이로 나오는 것을 보장하며, 양성 클래스의 확률로 쉽게 매핑할 수 있기 때문이다. 또한 0.5 임계치(threshold)을 같은 클래스의 범주형 분류(hard classification)를 할 수 있다.
#
# - 첫번째 은닉층(hidden layer)는 12개 뉴런을 가지고, 8개 입력을 받아들인다.
# - 두번째 은닉층은 8개 뉴런을 가진다.
# - 마지막 레이어는 클래스를 예측하는 1개의 뉴런을 가진다.
# create model
model = Sequential()
model.add(Dense(4, input_dim=4, init= 'normal' , activation= 'relu' ))
model.add(Dense(3, init= 'normal' , activation= 'sigmoid' ))
# +
from IPython.display import SVG
from keras.utils.visualize_util import model_to_dot
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
# -
# 
# ---
#
# ### 모델 엮기
#
# 컴파일 시에 정의해야하는 것들
# - 가중치 세트를 평가하는 데 사용할 손실함수(loss function)
# - binary_crossentropy : 이진 분류를 위한 logarithmic loss
# - 네트워크의 다른 가중치를 검객하는 데 사용되는 최적화 알고리즘
# - adam : 효율적인 경사 하강법(gradient descent) 알고리즘
# - 학습과정에서 수집하기 싶은 측정 기준
# Compile model
model.compile(loss= 'categorical_crossentropy' , optimizer= 'adam' , metrics=[ 'accuracy' ])
# ---
#
# ### 모델 학습시키기
#
# - nb_epoch : 데이터셋에 대한 반복 횟수
# - batch_size : 네트워크에서 가중치 개갱신 전에 평가되는 인스턴스의 수
# Fit the model
model.fit(X, dummy_y, nb_epoch=200, batch_size=5) # nb_epoch 200
# ---
#
# ### 모델 사용하기
# +
# evaliuate
scores = model.evaluate(X, dummy_y)
print("")
print("%s: %.2f%%" %(model.metrics_names[1], scores[1]*100))
# -
# ---
#
# ### 같이 보기
#
# * [강좌 목차](https://tykimos.github.io/2017/01/27/Keras_Lecture_Plan/)
# * 이전 : [딥러닝 이야기/레이어 이야기](https://tykimos.github.io/2017/01/27/Layer_Talk/)
# * 다음 : [딥러닝 기본 실습/컨볼루션 신경망 모델 만들어보기](https://tykimos.github.io/2017/02/04/CNN_Getting_Started/)
| _writing/Temp 2017-2-4-ImageClassification_Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # **Classification II Lab: Working with classifiers**
#
# Author: <NAME> (http://vanessa.webs.tsc.uc3m.es/)
#
# Updated: 07/02/2017 (working with sklearn 0.18.1)
#
# In this lab session we are going to continue working with classification algorithms, mainly, we are going to focus on decision trees and their use in ensembles.
#
# #### ** During this lab we will cover: **
# #### * Part 1: Trees*
# #### * Part 2: Random forests*
# #### * Part 3: Ensembles of classifiers: bagging and boosting*
#
# %matplotlib inline
# ### ** Part 0: Load and preprocess data**
#
# In the following sections, we are going to use all input features of the Iris dataset. So, let's start running the following cell to load the complete Iris data.
# +
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Initialize the random generator seed to compare results
np.random.seed(0)
iris = datasets.load_iris()
X = iris.data # All input features are used
Y = iris.target
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.4)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# -
# ### ** Part 1: Trees**
#
# #### ** 1.1: Training a decision Tree**
#
# Decision Trees learn simple decision rules selecting iteratively a input feature and setting a threshold over it, so they are simple tool, easy to understand and interpret.
#
# Use the [DecisionTreeClassifier( )](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier) function to train a decision tree. Although the tree depth is usually a parameter to select, here we are working with only for input features, so you can use all default parameter and obtain a good performance. Complete the following code to return in the variable acc\_tree the tree accuracy.
# +
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
clf_tree = DecisionTreeClassifier()
clf_tree.fit(X_train,Y_train)
acc_tree= clf_tree.score(X_test,Y_test)
print("The test accuracy of the decision tree is %2.2f" %(100*acc_tree))
# +
###########################################################
# TEST CELL
###########################################################
from test_helper import Test
# TEST accuracy values
Test.assertEquals(np.round(acc_tree, 2), 0.95 , 'incorrect result: The value of C_opt is uncorrect')
# -
# #### ** 1.2: Analyzing the results**
#
# Here, let us analyze the structure of the trained tree. For this purpose, you may need install some libraries (in class computers is alredy installed).
#
# Please, follow these steps:
#
# 1. Check that you are working with scikit-learn 0.18.1 You can checked this from a python terminal:
# >> import sklearn
# >> sklearn.\__version\__
#
# If it is necessary, you can upgrade sklearn from a terminal with:
# >> pip install --upgrade scikit-learn
#
# 2. Go to a terminal and run the following command lines:
#
# 2.A. For MAC user:
# >> pip install pydotplus
# >> brew install graphviz
# >> pip install graphviz
#
# Note: to install brew
# >> /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
#
# 2.B. For Linux users:
# >> Install Graphviz:
# ** (In Fedora / Centos) >> sudo dnf -y install graphviz
# ** (Ubuntu / Debian) >> sudo apt-get install graphviz
# >> pip install graphviz
# >> pip install pydotplus
#
# You have to restart the notebook kernel to make this changes work. Then, run the following cell to visualize the tree structure.
import pydotplus
from IPython.display import Image
dot_data = tree.export_graphviz(clf_tree, out_file=None,
feature_names=iris.feature_names,
class_names=iris.target_names,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png())
# Try to use the following [example](http://scikit-learn.org/stable/auto_examples/tree/plot_iris.html#example-tree-plot-iris-py) of the scikit-learn help, to plot the classification regions for different pairs of input features. Modify the necessary code line to plot our training data over the decision regions.
#
# Be careful, this examples retrains different classifiers for each pair of input features; therefore, its solution differs from the above one that we have just computed.
# +
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
import matplotlib.pyplot as plt
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
# -
# ### ** Part 2: Random Forest**
#
# #### ** 2.1: Training a Random Forest**
#
# A Random Forest (RF) trains several decision tree classifiers, where each one is trained with different samples and features of the training data, and averages their outputs to improve the final accuracy.
#
# Use the [RandomForestClassifier( )](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) function to train a RF classifier and select by cross validation the number of trees to use. The remaining parameters, such as the number of subsampled data or features, can be used with their default values. Return the optimal number of trees to be used and the final accuracy of the RF classifier.
# +
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
rang_n_trees=np.arange(1,10)
tuned_parameters = [{'n_estimators': rang_n_trees}]
nfold = 10
clf_RF = RandomForestClassifier()
clf_RF = GridSearchCV(clf_RF, tuned_parameters, cv=nfold)
clf_RF.fit(X_train,Y_train)
n_trees_opt = clf_RF.best_params_['n_estimators']
acc_RF = clf_RF.score(X_test,Y_test)
print "The number of selected trees is " + str(n_trees_opt)
print("The test accuracy of the RF is %2.2f" %(100*acc_RF))
# -
# Run the above code again, do you obtain the same accuracy?
# #### ** 2.2: Obtaining results statistically significant**
#
# Random forest have a random component when the training data are subsampled, so you can obtain a different result for different runnings of the algorithm. In this case, to be able to provide a statistically significant measurement of the performance of the classifier, we need to average the result over a large number of runs.
#
# Complete the following code, to train again the RF classifier, but averaging its test accuracies over 50 runs. Provide its average accuracy and the average number of selected trees (include their standard deviations).
# +
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Initialize the random generator seed to be able to check test results
np.random.seed(0)
print 'This can take a some minutes, be patient'
# Create RF classifier object with CV
clf_RF = RandomForestClassifier()
clf_RF = GridSearchCV(clf_RF, tuned_parameters, cv=nfold)
acc_RF_vector=[]
n_trees_vector=[]
for run in np.arange(50):
# For each run, train it, compute its accuracy and examine the number of optimal trees
clf_RF.fit(X_train,Y_train)
acc = clf_RF.score(X_test,Y_test)
acc_RF_vector.append(acc)
n_trees = clf_RF.best_params_['n_estimators']
n_trees_vector.append(n_trees)
# Compute averaged accuracies and number of used trees
mean_acc_RF = np.mean(acc_RF_vector)
std_acc_RF = np.std(acc_RF_vector)
mean_n_trees = np.mean(n_trees_vector)
std_n_trees = np.std(n_trees_vector)
# Print the results
print('Averaged accuracy for RF classifier is %2.2f +/- %2.2f '%(100*mean_acc_RF, 100*std_acc_RF))
print('Averaged number of selected trees is %2.2f +/- %2.2f '%(mean_n_trees, std_n_trees))
# +
###########################################################
# TEST CELL
###########################################################
from test_helper import Test
# TEST accuracy values
Test.assertEquals(np.round(mean_acc_RF, 2), 0.91 , 'incorrect result: The value of mean_acc_RF is uncorrect')
Test.assertEquals(np.round(std_acc_RF, 4), 0.034 , 'incorrect result: The value of std_acc_RF is uncorrect')
Test.assertEquals(np.round(mean_n_trees, 2), 3.40 , 'incorrect result: The value of mean_n_trees is uncorrect')
Test.assertEquals(np.round(std_n_trees, 2), 2.42 , 'incorrect result: The value of std_n_trees is uncorrect')
# -
# ### ** Part 3: Ensembles**
# The goal of ensemble methods is to combine the predictions of several base estimators or learners to obtain a classifier of improved performance. We are going to work with two ensemble methods:
#
# * Bagging methods: their driving principle is to build several estimators with diversity among them and then to average their predictions.
# * Boosting methods: in this case, base estimators are built sequentially forcing new learners to pay more attention to samples missclassified by previous learners.
#
# #### ** 3.1. Bagging methods**
#
# Here, to implement bagged classifiers, we are going to use [BaggingClassifier( )](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html#sklearn.ensemble.BaggingClassifier) object which includes different degrees of freedom in the learners design: with or without samples replacement, selecting random subsets of features instead of samples or selecting subsets of both samples and features.
#
# For the sake of simplicity, we are going to use as base learner a decision stump (i.e., a decision tree with one depth level). Note that in the case of using decision trees as learners, the resulting ensemble results in a random forest.
#
# Complete the following code to train a ensemble of bagged decision stumps. Set max\_samples (percentage of training data used to train each learner) and max\_features parameters (percentage of input features used to train each learner) to 0.5, and fix to 10 the number of learners used.
# +
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
from sklearn.ensemble import BaggingClassifier
from sklearn import tree
np.random.seed(0)
base_learner = tree.DecisionTreeClassifier(max_depth=1)
bagging = BaggingClassifier(base_learner, max_samples=0.5, max_features=0.5, n_estimators=10)
bagging.fit(X_train,Y_train)
acc_test = bagging.score(X_test,Y_test)
print('Accuracy of bagged ensemble is %2.2f '%(100*acc_test))
# +
###########################################################
# TEST CELL
###########################################################
from test_helper import Test
# TEST accuracy values
Test.assertEquals(np.round(acc_test, 2), 0.88 , 'incorrect result: The value final of acc_test is uncorrect')
# -
# Analyze the final ensemble performance according to the number of learners. Average the result over 20 or more different runs to obtain statically significant results (note that the above accuracy change if we hadn't set the seed and we run it several times).
# +
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Initialize the random generator seed to test results
np.random.seed(0)
acc_test_evol = []
rang_n_learners = range(1,50,2)
for n_learners in rang_n_learners:
acc_test_run=[]
for run in range(50):
bagging = BaggingClassifier(base_learner, max_samples=0.5, max_features=0.5, n_estimators=n_learners)
bagging.fit(X_train,Y_train)
acc = bagging.score(X_test,Y_test)
acc_test_run.append(acc)
acc_test_evol.append(np.mean(acc_test_run))
# Ploting results
plt.figure()
plt.plot(rang_n_learners,acc_test_evol)
plt.xlabel('Number of learners')
plt.ylabel('Accuracy')
plt.title('Evolution of the bagged ensemble accuracy with the number of learners ')
plt.show()
# +
###########################################################
# TEST CELL
###########################################################
from test_helper import Test
# TEST accuracy values
Test.assertEquals(np.round(acc_test_evol[-1], 2), 0.93 , 'incorrect result: The value final of acc_test_evol is uncorrect')
# -
# #### ** 3.2. Adaboost**
#
# To train an AdaBoost classifier, scikit-learn provides [AdaBoostClassifier()](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html#sklearn.ensemble.AdaBoostClassifier) method which includes two versions of the Adaboost algorithm:
# * Discrete Adaboost: the learners' outputs are discretized (they provide an estimation of the labels).
# * Real Adaboost: the learners' outputs are real values (they are the soft-outputs or the class probabilities).
#
# As in previous subsection, use a decision stump as base learner. Fix to 50 the number of learners and compare the results of both approaches: Discrete Adaboost (set algorithm parameter to 'SAMME') and Real Adaboost (algorithm='SAMME.R').
# +
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Initialize the random generator seed to test results
np.random.seed(0)
from sklearn.ensemble import AdaBoostClassifier
base_learner = tree.DecisionTreeClassifier(max_depth=1)
# Train a discrete Adaboost classifier and obtain its accuracy
AB_D = AdaBoostClassifier(base_learner,n_estimators=50,algorithm='SAMME')
AB_D.fit(X_train,Y_train)
acc_AB_D = AB_D.score(X_test,Y_test)
# Train a real Adaboost classifier and obtain its accuracy
AB_R = AdaBoostClassifier(base_learner,n_estimators=50,algorithm='SAMME.R')
AB_R.fit(X_train,Y_train)
acc_AB_R = AB_R.score(X_test,Y_test)
print('Accuracy of discrete adaboost ensemble is %2.2f '%(100*acc_AB_D))
print('Accuracy of real adaboost ensemble is %2.2f '%(100*acc_AB_R))
# +
###########################################################
# TEST CELL
###########################################################
from test_helper import Test
# TEST accuracy values
Test.assertEquals(np.round(acc_AB_D, 2), 0.95 , 'incorrect result: The value of acc_AB_D is uncorrect')
Test.assertEquals(np.round(acc_AB_R, 2), 0.88 , 'incorrect result: The value of acc_AB_R is uncorrect')
# -
# Unlike BaggingClassifier() method, AdaBoostClassifier() let you analyze the evolution of error without having to train the ensemble for different number of learners. For this task, you can use the classifier method .staged_score() which returns the evolution of the ensemble accuracy. Note that it returns this information with a generator object, so you have to iterate over it to access to each element.
#
# The following code lines let you plot the evolution of the ensemble accuracy (over the test data) for both discrete and real Adaboost approaches.
# +
acc_AB_D_evol=[acc for acc in AB_D.staged_score(X_test, Y_test)]
acc_AB_R_evol=[acc for acc in AB_R.staged_score(X_test, Y_test)]
# Ploting results
rang_n_learners=np.arange(50)+1
plt.figure()
plt.subplot(211)
plt.plot(rang_n_learners,acc_AB_D_evol)
plt.xlabel('Number of learners')
plt.ylabel('Accuracy')
plt.title('Discrete AB accuracy')
plt.subplot(212)
plt.plot(rang_n_learners,acc_AB_R_evol)
plt.xlabel('Number of learners')
plt.ylabel('Accuracy')
plt.title('Real AB accuracy')
plt.show()
# -
# If you want, you can check the following scikit-learn [example](http://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_iris.html) where the performance of different ensembles over the Iris dataset is analyzed.
| Classification/Machine Learning Classifiers II.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # if, elif, else Statements
#
# <code>if</code> Statements in Python allows us to tell the computer to perform alternative actions based on a certain set of results.
#
# We can then expand the idea further with <code>elif</code> and <code>else</code> statements.
# if case1:
# perform action1
# elif case2:
# perform action2
# else:
# perform action3
if True:
print('It was true!')
# +
x = False
if x:
print('x was True!')
else:
print('I will be printed in any case where x is not true')
# -
# ### Multiple Branches
# +
loc = 'Bank'
if loc == 'Auto Shop':
print('Welcome to the Auto Shop!')
elif loc == 'Bank':
print('Welcome to the bank!')
else:
print('Where are you?')
# -
# ## Indentation
#
# It is important to keep a good understanding of how indentation works in Python to maintain the structure and order of your code.
# ## Ternary operator expression
#
# That's more specifically a ternary operator expression than an if-then, here's the python syntax
#
#
# ```python
# >>> 1. value_when_true if condition else value_when_false
# >>> 2. [value_false, value_true][<test>]
# ```
age = 15
print('kid' if age < 13 else 'teenager' if age < 18 else 'adult')
>>> print({True: 'kid', False: 'adult'}[age < 20])
print(['adult','kid'][age<20])
| 1.Chapter-Python/2-Python_Basis/courses/11-if, elif, and else Statements.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.dates as md
import matplotlib.pyplot as plt
# %matplotlib inline
dataset = pd.read_csv("data/training_set_VU_DM.csv")
dataset.columns
df = dataset.loc[dataset["prop_id"] == 104517]
df = df.loc[df["srch_room_count"] == 1]
df = df.loc[df["visitor_location_country_id"] == 219]
df = df[['date_time', 'price_usd', 'srch_booking_window', 'srch_saturday_night_bool']]
df
df["price_usd"].describe()
dataset.loc[(dataset['price_usd'] == 5584) & (dataset['visitor_location_country_id'] == 219)]
df = df.loc[df['price_usd'] < 5584]
df
df.plot(x='date_time', y = 'price_usd', figsize = (15,5))
plt.xlabel('Date time')
plt.ylabel('Price in USD')
plt.title('Time Series of room price by data time')
t = df.loc[df['srch_saturday_night_bool'] == 0, 'price_usd']
f = df.loc[df['srch_saturday_night_bool'] == 1, 'price_usd']
plt.figure(figsize=(15, 5))
plt.hist(t, bins = 50, alpha=0.5, label='Search Non-Sat Night')
plt.hist(f, bins = 50, alpha=0.5, label='Search Sat Night')
plt.legend(loc='upper right')
plt.xlabel('Price\n Price is more stable and lower when searching Non-Saturday night and price goes up when searching Saturday night', fontsize = 18)
plt.ylabel('Count', fontsize = 18)
plt.title("Price Comperision between Non-Saturday Night vs Saturday Night")
plt.show()
# +
df_class0 = df.loc[df['srch_saturday_night_bool'] == 0, 'price_usd']
df_class1 = df.loc[df['srch_saturday_night_bool'] == 1, 'price_usd']
fig, axs = plt.subplots(1,2, figsize= (20,5))
df_class0.hist(ax=axs[0], bins=30)
df_class1.hist(ax=axs[1], bins=30)
axs[0].set_title("Non Saturday Night")
axs[1].set_title("Saturday Night")
# -
| assignment02/Assignment_2_eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Importing IPython Notebooks as Modules
# It is a common problem that people want to import code from IPython Notebooks.
# This is made difficult by the fact that Notebooks are not plain Python files,
# and thus cannot be imported by the regular Python machinery.
#
# Fortunately, Python provides some fairly sophisticated [hooks](http://www.python.org/dev/peps/pep-0302/) into the import machinery,
# so we can actually make IPython notebooks importable without much difficulty,
# and only using public APIs.
# + jupyter={"outputs_hidden": false}
import io, os, sys, types
# + jupyter={"outputs_hidden": false}
import nbformat
from IPython import get_ipython
from IPython.core.interactiveshell import InteractiveShell
# -
# Import hooks typically take the form of two objects:
#
# 1. a Module **Loader**, which takes a module name (e.g. `'IPython.display'`), and returns a Module
# 2. a Module **Finder**, which figures out whether a module might exist, and tells Python what **Loader** to use
# + jupyter={"outputs_hidden": false}
def find_notebook(fullname, path=None):
"""find a notebook, given its fully qualified name and an optional path
This turns "foo.bar" into "foo/bar.ipynb"
and tries turning "Foo_Bar" into "Foo Bar" if Foo_Bar
does not exist.
"""
name = fullname.rsplit('.', 1)[-1]
if not path:
path = ['']
for d in path:
nb_path = os.path.join(d, name + ".ipynb")
if os.path.isfile(nb_path):
return nb_path
# let import Notebook_Name find "Notebook Name.ipynb"
nb_path = nb_path.replace("_", " ")
if os.path.isfile(nb_path):
return nb_path
# -
# ## Notebook Loader
# Here we have our Notebook Loader.
# It's actually quite simple - once we figure out the filename of the module,
# all it does is:
#
# 1. load the notebook document into memory
# 2. create an empty Module
# 3. execute every cell in the Module namespace
#
# Since IPython cells can have extended syntax,
# the IPython transform is applied to turn each of these cells into their pure-Python counterparts before executing them.
# If all of your notebook cells are pure-Python,
# this step is unnecessary.
# + jupyter={"outputs_hidden": false}
class NotebookLoader(object):
"""Module Loader for IPython Notebooks"""
def __init__(self, path=None):
self.shell = InteractiveShell.instance()
self.path = path
def load_module(self, fullname):
"""import a notebook as a module"""
path = find_notebook(fullname, self.path)
print ("importing notebook from %s" % path)
# load the notebook object
nb = nbformat.read(path, as_version=4)
# create the module and add it to sys.modules
# if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
mod.__dict__['get_ipython'] = get_ipython
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb.cells:
if cell.cell_type == 'code':
# transform the input to executable Python
code = self.shell.input_transformer_manager.transform_cell(cell.source)
# run the code in themodule
exec(code, mod.__dict__)
finally:
self.shell.user_ns = save_user_ns
return mod
# -
# ## The Module Finder
# The finder is a simple object that tells you whether a name can be imported,
# and returns the appropriate loader.
# All this one does is check, when you do:
#
# ```python
# import mynotebook
# ```
#
# it checks whether `mynotebook.ipynb` exists.
# If a notebook is found, then it returns a NotebookLoader.
#
# Any extra logic is just for resolving paths within packages.
# + jupyter={"outputs_hidden": false}
class NotebookFinder(object):
"""Module finder that locates IPython Notebooks"""
def __init__(self):
self.loaders = {}
def find_module(self, fullname, path=None):
nb_path = find_notebook(fullname, path)
if not nb_path:
return
key = path
if path:
# lists aren't hashable
key = os.path.sep.join(path)
if key not in self.loaders:
self.loaders[key] = NotebookLoader(path)
return self.loaders[key]
# -
# ## Register the hook
# Now we register the `NotebookFinder` with `sys.meta_path`
# + jupyter={"outputs_hidden": false}
sys.meta_path.append(NotebookFinder())
# -
# After this point, my notebooks should be importable.
#
# Let's look at what we have in the CWD:
# + jupyter={"outputs_hidden": false}
# ls nbpackage
# -
# So I should be able to `import nbimp.mynotebook`.
#
# ### Aside: displaying notebooks
# Here is some simple code to display the contents of a notebook
# with syntax highlighting, etc.
# + jupyter={"outputs_hidden": false}
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from IPython.display import display, HTML
formatter = HtmlFormatter()
lexer = PythonLexer()
# publish the CSS for pygments highlighting
display(HTML("""
<style type='text/css'>
%s
</style>
""" % formatter.get_style_defs()
))
# + jupyter={"outputs_hidden": false}
def show_notebook(fname):
"""display a short summary of the cells of a notebook"""
nb = nbformat.read(fname, as_version=4)
html = []
for cell in nb.cells:
html.append("<h4>%s cell</h4>" % cell.cell_type)
if cell.cell_type == 'code':
html.append(highlight(cell.source, lexer, formatter))
else:
html.append("<pre>%s</pre>" % cell.source)
display(HTML('\n'.join(html)))
show_notebook(os.path.join("nbpackage", "mynotebook.ipynb"))
# -
# So my notebook has a heading cell and some code cells,
# one of which contains some IPython syntax.
#
# Let's see what happens when we import it
# + jupyter={"outputs_hidden": false}
from nbpackage import mynotebook
# -
# Hooray, it imported! Does it work?
# + jupyter={"outputs_hidden": false}
mynotebook.foo()
# -
# Hooray again!
#
# Even the function that contains IPython syntax works:
# + jupyter={"outputs_hidden": false}
mynotebook.has_ip_syntax()
# -
# ## Notebooks in packages
# We also have a notebook inside the `nb` package,
# so let's make sure that works as well.
# + jupyter={"outputs_hidden": false}
# ls nbpackage/nbs
# -
# Note that the `__init__.py` is necessary for `nb` to be considered a package,
# just like usual.
# + jupyter={"outputs_hidden": false}
show_notebook(os.path.join("nbpackage", "nbs", "other.ipynb"))
# + jupyter={"outputs_hidden": false}
from nbpackage.nbs import other
other.bar(5)
# -
# So now we have importable notebooks, from both the local directory and inside packages.
#
# I can even put a notebook inside IPython, to further demonstrate that this is working properly:
# + jupyter={"outputs_hidden": false}
import shutil
from IPython.paths import get_ipython_package_dir
utils = os.path.join(get_ipython_package_dir(), 'utils')
shutil.copy(os.path.join("nbpackage", "mynotebook.ipynb"),
os.path.join(utils, "inside_ipython.ipynb")
)
# -
# and import the notebook from `IPython.utils`
# + jupyter={"outputs_hidden": false}
from IPython.utils import inside_ipython
inside_ipython.whatsmyname()
# -
# This approach can even import functions and classes that are defined in a notebook using the `%%cython` magic.
| notebooks/ipython-master/examples/IPython Kernel/Importing Notebooks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# # Recitation 2 - Formulations
#
# In today's recitation, we will build different formulations of the same problem, and explore the computational implications.
#
# First, we load packages.
# +
using JuMP, Gurobi
using DataFrames, CSV, Combinatorics
using LinearAlgebra
# -
import Pkg; Pkg.add("Combinatorics")
# ## Part 1: Facility location
#
# ### 1.1 Problem setup
#
# We are now ready to formulate our first integer optimization problem. The facility location problem takes as inputs two sets and two parameters:
# - Sets:
# - facilities $j\in \mathcal{J}=\{1,\ldots,n\}$
# - customers $i\in \mathcal{I}=\{1, \ldots, m\}$
# - Parameters:
# - $d_{ij}$: distance from customer $i$ to facility $j$
# - $c_j$: cost of facility $j$
#
# Let's define the size of the problem.
n = 50
m = 500
# In real-life, we would have data on the location of facilities and customers. For simplicity, we assume that the facilities and customers are uniformly sampled over the unit square $[0,1]^2$. The first column of the following arrays denotes the x coordinate, and the second column denotes the y coordinate.
facilities = rand(n,2);
customers = rand(m,2); # notice we add a semicolon to suppress Jupyter output
# We can now define the distance matrix:
dist = [LinearAlgebra.norm(customers[i, :] .- facilities[j, :]) for i=1:m, j=1:n];
@show size(dist);
# Finally, we sample a vector of facility costs uniformly between 10 and 20:
c = rand(n)*10 .+ 10;
# ### 1.2 Formulations
#
# We covered two formulations of the problem in class, one with many constraints but a tight formulation, the other with fewer constraints but a less tight formulation. Time to implement!
#
# #### Formulation 1
#
# $$\min \sum_{j=1}^nc_j y_j + \sum_{i=1}^m\sum_{j=1}^nd_{ij}x_{ij}$$
# $$\text{subject to}$$
# $$\sum_{j=1}^n x_{ij}=1 \quad \forall i\in[m]$$
# $$x_{ij}\le y_j \quad\forall i\in[m], j\in[n]$$
# $$x_{ij}\in\{0,1\}\quad\forall i\in[m], j\in[n]$$
# $$y_j \in \{0,1\}\quad \forall j\in[n]$$
"Build facility location model 1"
function facility_model_1(distance::Matrix, cost::Vector)
# extract problem dimensions from distance matrix and verify coherence of input data
m, n = size(distance)
@assert length(cost) == n
model = Model(Gurobi.Optimizer)
set_optimizer_attribute(model, "TimeLimit", 1800)
# VARIABLES
# Whether to open each facility
@variable(model, y[1:n], Bin)
# Whether to solve a particular customer from a particular facility
@variable(model, x[1:m, 1:n], Bin)
# CONSTRAINTS
@constraint(
model, serve_every_customer[i = 1:m],
sum(x[i, j] for j = 1:n) == 1
)
@constraint(
model, only_serve_from_open_facility[i = 1:m, j=1:n],
x[i, j] <= y[j]
)
# OBJECTIVE
@objective(
model, Min, sum(cost[j] * y[j] for j = 1:n) + sum(distance[i, j] * x[i, j] for i=1:m, j=1:n)
)
return model, x, y
end
# Now we can call our function to build the model with our sampled data, then solve and report the elapsed time.
buildtime1 = @elapsed model1, x1, y1 = facility_model_1(dist, c);
solvetime1 = @elapsed optimize!(model1)
@show buildtime1
@show solvetime1
# #### Formulation 2
#
# $$\min \sum_{j=1}^nc_j y_j + \sum_{i=1}^m\sum_{j=1}^nd_{ij}x_{ij}$$
# $$\text{subject to}$$
# $$\sum_{j=1}^n x_{ij}=1 \quad \forall i\in[m]$$
# $$\sum_{i=1}^m x_{ij}\le my_j \quad\forall j\in[n]$$
# $$x_{ij}\in\{0,1\}\quad\forall i\in[m], j\in[n]$$
# $$y_j \in \{0,1\}\quad \forall j\in[n]$$
"Build facility location model 1"
function facility_model_2(distance::Matrix, cost::Vector)
# extract problem dimensions from distance matrix and verify coherence of input data
m, n = size(distance)
@assert length(cost) == n
model = Model(Gurobi.Optimizer)
set_optimizer_attribute(model, "TimeLimit", 1800)
# VARIABLES
# Whether to open each facility
@variable(model, y[1:n], Bin)
# Whether to solve a particular customer from a particular facility
@variable(model, x[1:m, 1:n], Bin)
# CONSTRAINTS
@constraint(
model, serve_every_customer[i = 1:m],
sum(x[i, j] for j = 1:n) == 1
)
@constraint(
model, only_serve_from_open_facility[j=1:n],
sum(x[i, j] for i=1:m) <= m * y[j]
)
# OBJECTIVE
@objective(
model, Min, sum(cost[j] * y[j] for j = 1:n) + sum(distance[i, j] * x[i, j] for i=1:m, j=1:n)
)
return model, x, y
end
buildtime2 = @elapsed model2, x2, y2 = facility_model_2(dist, c);
solvetime2 = @elapsed optimize!(model2)
@show buildtime2
@show solvetime2
# ## Part 2: TSP
#
# In the traveling salesman problem, we have $n$ locations, or "cities", indexed by $[n]=\{1,\ldots,n\}$ with $d_{ij}$ denoting the distance between location $i$ and location $j$.
#
# The goal is to find a tour that visits each location exactly once, while minimizing total distance traveled.
#
# ### 2.1 Problem setup
#
# We have a few TSP instances saved in the `tsp` directory. The data are stored as ASCII text files, which we can read as follows.
file = open("tsp/berlin52.tsp");
data = readlines(file)
data[1:10]
# The first few lines describe the file and the problem type, and the following lines list the (x,y) coordinates of each point in a grid. We would to extract the coordinates and define a distance matrix. The function below is one way to do this, but there are many others.
function get_distances(filename)
file = open(filename)
data = readlines(file)
close(file)
# get useful lines
dimension_line = findfirst(x -> occursin("DIMENSION", x), data)
metric_line = findfirst(x -> occursin("EDGE_WEIGHT_TYPE", x), data)
first_data_line = findfirst(x -> occursin("NODE_COORD_SECTION", x), data) + 1
last_data_line = findfirst(x -> occursin(r"EOF", x), data) - 1
# extract dimension
n = parse(Int64, match(r"[0-9]+", data[dimension_line]).match)
# check metric is Euclidean 2D
occursin("EUC_2D", data[metric_line]) || error("Unsupported metric type")
# Create coordinates
coords = zeros(n, 2)
for line in data[first_data_line:last_data_line]
temp = split(line)
parse(Int64,temp[1])
coords[parse(Int64,temp[1]),:] = [parse(Float64,temp[2]) parse(Float64,temp[3])]
end
# Create distance matrix
distances = [norm(coords[i, :] .- coords[j, :]) for i=1:n, j=1:n]
return distances
end
# We can now use our shiny new `get_distances` function to convert a TSP text file to a distance matrix. We can visually check the matrix is 52x52, is symmetric, has nonnegative values, and zeros on the diagonal.
get_distances("tsp/berlin52.tsp")
# ### 2.2 Formulations
#
# #### Basic formulation
#
# $$\begin{align}
# \min\quad & \sum_{i=1}^n\sum_{j=1}^n d_{ij}x_{ij}\\
# \text{s.t.}\quad & \sum_{i=1}^n x_{ij}=1 & \forall j\in[n]\\
# &\sum_{j=1}^nx_{ij} =1 & \forall i\in[n]\\
# &x_{ii}=0 & \forall i\in[n]\\
# & x_{ij}\in\{0,1\}&\forall i, j \in [n]
# \end{align}$$
#
# _Note: even though our distance matrix is symmetric, it is slightly easier to implement a more general formulation where we do not assume this property. If we know our distance matrix is symmetric, we can cut the number of variables in half by only defining $x_{ij}$ for $i < j$ (i.e. not defining both $x_{12}$ and $x_{21}$), but this is beyond the scope of this recitation._
"Construct TSP pre-model, without cycle elimination mechanism"
function prebuild_tsp(dist::Matrix)
n = size(dist, 1)
# Definition of model
model = Model(Gurobi.Optimizer)
# Main variable: x_ij=1 if the tour visits i and j in that order, 0 otherwise
@variable(model, x[1:n, 1:n], Bin)
# Objective: minimizing the total cost (distance) of the tour
@objective(model, Min, sum(dist[i, j] * x[i, j] for i = 1:n, j = 1:n))
# SHARED CONSTRAINTS
@constraint(
model, no_self_edges[i = 1:n], x[i,i] == 0
)
@constraint(
model, exactly_one_successor[i = 1:n], sum(x[i, j] for j = 1:n) == 1
)
@constraint(
model, exactly_one_predecessor[j = 1:n], sum(x[i, j] for i = 1:n) == 1
)
return model, x
end
# #### Issues with the formulation
#
# The formulation we've built so far is nice, but it has one big flaw! It doesn't ensure that every city must be part of the same tour.
#
# For instance, the following solution would be feasible:
#
# 
#
# We need to somehow eliminate these "subtours".
# #### Attempt 1: MTZ formulation (compact)
#
# This formulation eliminates subtours using a $u_i$ variable for each node $i$, which defines the "order" of the visit starting from node 1 ($u_1=1$). More precisely, we impose the following constraints:
#
# $u_1=1$
#
# $2\le u_i \le n \quad\forall i=2, \ldots, n$
#
# $u_j \ge u_i + 1 - (n-1)(1-x_{ij}) \quad \forall i, j \in [n]$
#
# The first two are straightforward. What does the third one mean?
#
# - If $x_{ij}=1$, then we impose that $u_j\ge u_i +1$ (the order of $j$ is at least 1 + the order of $i$). Since all $u_i$ are upper-bounded by $n$, this leads to the order of $j$ being exactly 1 + the order of $i$).
# - If $x_{ij}=0$, then we impose that $u_j\ge u_i + 2 - n$. Since $u_i$ is at most $n$, in the worst case we impose $u_j\ge 2$ which holds as long as $j>1$ (recall our convention that $u_1=1$). So $x_{ij}=0$ effectively "turns off" the constraint.
#
# We implement the MTZ formulation by appending these constraints to the core assignment model.
"Solve TSP using MTZ formulation, return runtime and objective"
function solveMTZ(dist::Matrix; time_limit_seconds::Real=1800)
model, x = prebuild_tsp(dist, time_limit_seconds);
set_optimizer_attribute(model, "TimeLimit", time_limit_seconds)
n = size(dist,1);
# Lower bound: 1 for node 1, 2 for all other nodes
lb = [1 ; 2*ones(n-1)]
# Upper bound: 1 for node 1, n for all other nodes
ub = [1 ; n*ones(n-1)]
# We define the u variable with lower and upper bounds, ensuring in particular that u_1=1
@variable(model, lb[i] <= u[i = 1:n] <= ub[i])
# Constraint
@constraint(
model, [i = 2:n, j = 2:n], u[i] - u[j] + 1 <= (n-1) * (1 - x[i, j])
)
# We then solve the model and store the runtime
start = time()
optimize!(model)
solvetime = time() - start
return solvetime, objective_value(model)
end
# Let's try it!
dist52 = getDistances("tsp/berlin52.tsp");
solvetime52_MTZ, obj52_MTZ = solveMTZ(dist52);
println("Runtime: $solvetime52_MTZ seconds")
println("Objective: $obj52_MTZ")
# Let's try it on a larger instance now.
dist76 = getDistances("tsp/pr76.tsp");
solvetime76_MTZ, obj76_MTZ = solveMTZ(dist76, time_limit_seconds=60)
println("Runtime: $solvetime76_MTZ seconds")
println("Objective: $obj76_MTZ")
# On 52 cities, we solve in about one second. On 76 cities, we are much slower! Even after 60 seconds, our objective gap is still 6.9%. Clearly this will not scale to hundreds of cities.
#
# #### Attempt 2: combinatorial formulation (cutset)
#
# OK, so MTZ is a bit of a dud (no offense). Can we do better? Yes, provided we're not afraid of large numbers.
#
# *Intuition:* since we seem to dislike subtours so much, let's write a specific constraint for every possible subtour, preventing it from existing.
#
# One way to do this is the following pair of "cutset" constraints:
#
# $$\sum_{i\in S}\sum_{j\notin S} x_{ij}\ge 1 \quad\forall S\subset V, S\neq\emptyset, V$$
#
# $$\sum_{i\notin S}\sum_{j\in S} x_{ij}\ge 1 \quad\forall S\subset V, S\neq\emptyset, V$$
#
# _What's going on here?_ For a subset of nodes $S$, the constraints above ensure that there is at least one edge leaving the subset $S$, and at least another edge entering the subset $S$.
#
# 
# Unfortunately, a direct implementation is simply impossible, even for small-scale instances. Indeed, the number of subsets of $\{1,n\}$ is equal to $2^n$. For $n=52$, this means $4.5\cdot 10^15$ -- clearly impossible with our computer's memory.
#
# Instead, we will add constraints *only when we need them*. We have two options:
# 1. We will solve the model naively, without subtour elimination constraints, and add constraints that are violated by the incumbent solution.
#
# 2. We can also define *lazy constraints* in a callback function. This is a bit harder, but the idea is to give the constraints to the solver, and the solver will use them as needed through the branch-and-cut process. Specifically, the solver will keep the constraints in a "pool". As solutions are generated (in the branching tree), the solver checks which constraints are violated and adds them to the active formulation. Hence, the name "lazy" constraints because we let the solver use them as needed.
#
# Either way, we need to write a function to find a subtour in a candidate solution. If we were trying to make our code as efficient as possible, our best bet would be to write this function ourselves. In this recitation, we're going to pick an easier route, using the `LightGraphs` package.
using LightGraphs
# The `LightGraphs` package is a way to easily work with graphs in Julia.
#
# Given a solution $\boldsymbol{x}^*$, we can define the _induced_ graph $G(\boldsymbol{x}^*)$ as the graph with one node per TSP node, and an edge between each pair of nodes $(i,j)$ for which $x^*_{ij}=1$.
#
# We know that a graph induced by a feasible solution will have exactly one edge into each node, and one edge out of each node. There is therefore a one-to-one mapping between subtours in the induced TSP graph and connected components of the induced graph.
"""
Given the induced graph as an adjacency list (i.e., next[i] is the next node to visit after node i),
compute all subtours.
Return them as a list of lists of nodes in the same component
"""
function find_subtours(next::Vector{Int})
n = length(next)
g = DiGraph(n)
for i = 1:n
add_edge!(g, i, next[i])
end
components = strongly_connected_components(g)
return sort(components, by=length)
end
# Let's try using this function
dist52 = getDistances("tsp/berlin52.tsp")
model, x = prebuild_tsp(dist52)
optimize!(model)
next = [findfirst(x -> x > 0.5, value.(x[i, :])) for i = 1:size(x, 1)]
find_subtours(next)
# We notice two things here:
#
# 1. We have a lot of subtours of length 2 - this is due to the directed formulation, different from the undirected formulation in lecture.
#
# 2. We have lost some information by using connected components, namely the order of the cycle. But it's ok, because the cutset constraints doesn't care about the order of the cycle anyway.
#
# We also notice we have quite a few subtours to eliminate. For the subtours of length 2, we could decide to eliminate all of them at once, or to eliminate them as they come up. We can even see which approach is faster!
"Solve the TSP using an iterative approach"
function solve_iterative(dist::Matrix; time_limit_seconds::Real = 1800,
eliminate_length_2::Bool=false,
verbose::Bool = true)
# We first solve the model without any subtour elimination consideration
model, x = prebuild_tsp(dist)
n = size(dist,1)
if eliminate_length_2
@constraint(model, no_length_2[i = 1:n, j = 1:n], x[i, j] + x[j, i] <= 1)
end
verbose || set_optimizer_attribute(model, "OutputFlag", 0)
start=time()
optimize!(model)
while true
# We store the incumbent solution
next = [findfirst(x -> x > 0.5, value.(x[i, :])) for i = 1:n]
# Note: checking for >0.5 is conservative (x is binary!) but it avoids numerical errors
subtours = find_subtours(next)
println("Found $(length(subtours)) subtours after $(time() - start) seconds")
if length(subtours) == 1 # only one cycle, the TSP solution
solvetime = time() - start
return solvetime, objective_value(model)
else
# eliminate subtours
for subtour in subtours
@constraint(model, sum(x[i, j] for i=subtour, j=setdiff(1:n, subtour)) >= 1)
@constraint(model, sum(x[i, j] for i=setdiff(1:n, subtour), j=subtour) >= 1)
end
end
optimize!(model)
time() - start > time_limit_seconds && return solvetime, objective_value(model)
end
end
solvetime52_iterative, obj52_iterative = solve_iterative(dist52, verbose=false, eliminate_length_2=false)
println("Runtime: $solvetime52_iterative seconds")
println("Objective: $obj52_iterative")
# Now let's try that 76-city instance. Remember it didn't solve in one minute using the MTZ formulation.
solvetime76_iterative, obj76_iterative = solve_iterative(dist76, verbose=false,
eliminate_length_2=false)
println("Runtime: $solvetime76_iterative seconds")
println("Objective: $obj76_iterative")
# Let's try to solve even larger instances! This one has 280 locations.
dist280 = getDistances("tsp/a280.tsp")
solvetime280_iterative, obj280_iterative = solve_iterative(dist280, verbose=false);
println("Runtime: $solvetime280_iterative seconds")
println("Objective: $obj280_iterative")
| Rec2_code/Recitation-2-Complete.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Session 3 Quiz and Solution
# In Session 3, you loaded Sentinel-2 data into the Sandbox using `load_ard()`. The first section of the exercise showed that `load_ard()` applies a cloud mask to each timestep in the dataset. The second part of the exercise used the same dataset to calculate and plot a cloud-free geomedian, using `xr_geomedian()` and `rgb()`.
# ## Quiz
# If you would like to be awarded a certificate of achievement at the end of the course, we ask that you [complete the quiz](https://docs.google.com/forms/d/e/1FAIpQLSfrW9iPikaKStkWRvp7o0CjmPeXj_xQatR58xQPDPEq_Et0CA/viewform?usp=sf_link). You will need to supply your email address to progress towards the certificate. After you complete the quiz, you can check if your answers were correct by pressing the **View Accuracy** button.
#
# The quiz will ask you to use the notebook you developed for this session's exercise on loading data. If you would like to confirm that your notebook works as expected, you can check it against the solution notebook provided below.
# + raw_mimetype="text/restructuredtext" active=""
# .. note::
# The solution notebook below does not contain the answer to the quiz. Use it to check that you implemented the exercise correctly, then use your exercise notebook to answer the quiz. Accessing the solution notebook will not affect your progression towards the certificate.
# -
# ## Solution notebook
# + raw_mimetype="text/restructuredtext" active=""
# .. note::
# We strongly encourage you to attempt the exercises on the previous two pages before downloading the solution below. This will help you learn how to use the Sandbox independently for your own analyses.
# -
# [Download the solution notebook for the Session 3 exercises](../_static/session_3/Geomedian_composite_solution.ipynb)
#
# To view this notebook on the Sandbox, you will need to first download it to your computer, then upload it to the Sandbox. Follow these instructions:
#
# 1. Download the notebook by clicking the link above.
# 2. On the Sandbox, open the **Training** folder.
# 3. Click the **Upload Files** button as shown below.
#
# <img align="middle" src="../_static/session_2/05_solution_uploadbutton.png" alt="Upload button." width=400>
#
# 4. Select the downloaded notebook using the file browser. Click **OK**.
# 5. The solution notebook will appear in the **Training** folder. Double-click to open it.
| docs/session_3/04_s3_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import toolbox
import fcn
import yaml
import shutil
import sys
import chainer
# %matplotlib inline
sns.set()
# %load_ext autoreload
# Note: this reload all lib at each cell exec, just for convenience
# %autoreload 2
chainer.print_runtime_info()
# +
# Plotting
import skimage.color
import skimage.segmentation
from matplotlib.lines import Line2D
def fill_semantic_seg(img, labels, n_class, masks,
captions=None, thickness=1, alpha=0.5):
# Sort all by mask size
masks, labels, captions = list(zip(*sorted(
zip(masks, labels, captions), key=lambda x: np.sum(x[0]), reverse=True)))
labels = np.asarray(labels).copy()
captions = np.asarray(captions).copy()
masks = np.asarray(masks).copy()
nb_pixels = masks.shape[-2] * masks.shape[-1]
img_viz = img.copy()
# Turn to 3 channels gray
img_viz = cv2.cvtColor(cv2.cvtColor(img_viz, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
cmap_inst = np.array(sns.color_palette(palette="colorblind"))
legends = []
for i_box, mask_inst in enumerate(masks):
inst_class = labels[i_box]
mask_inst = masks[i_box]
color_inst = cmap_inst[i_box % len(cmap_inst)]
legends.append((color_inst, "{:5.1%}: {}".format(
np.sum(mask_inst) / nb_pixels, captions[i_box])))
color_inst = (color_inst * 255)
img_viz[mask_inst] = (
img_viz[mask_inst] * (1 - alpha) +
color_inst * alpha
)
mask_boundary = skimage.segmentation.find_boundaries(
mask_inst, connectivity=1)
img_viz[mask_boundary] = [200, 200, 200]
assert img_viz.dtype == np.uint8
return img_viz, legends
def plot_semantic_seg(img, semaseg, labelmap, labels_sema, *, ax=None, fig=None):
buff = []
for i, label in enumerate(labels_sema):
m = labelmap == label
m = cv2.resize(m.astype(np.uint8), img.shape[1::-1], interpolation=cv2.INTER_NEAREST)
buff.append(m > 0)
buff = np.array(buff)
captions = ["{}".format(semaseg._classes[label]) for label in labels_sema]
out, legends = fill_semantic_seg(
img=img,
labels=labels_sema + 1,
# n_class=len(semaseg._classes) + 1,
n_class=182,
captions=captions,
masks=buff,
alpha=0.5
)
if ax is None:
fig, ax = plt.subplots(figsize=(12, 8))
ax.imshow(out)
ax.set_axis_off()
_legend = [Line2D([0], [0], marker='o', color="white", label=lab,
markerfacecolor=color, markersize=12, alpha=0.5) for color, lab in legends]
ax.legend(handles=_legend, loc=6, bbox_to_anchor=(1, 0.5), title="Classes sorted by area")
ax.set_title("Semantic")
return fig, ax
def plot_instance_seg(img, instaseg, bbox, mask, label, score, semaseg, *, ax=None, fig=None):
if bbox is None:
return
captions = ['{:4.1%}: {}'.format(s, semaseg._classes[l]) for l, s in zip(label, score)]
# captions = ['{:4.1%}'.format(s) for l, s in zip(label, score)]
out = toolbox.utils.draw_instance_bboxes(
img=img,
bboxes=bbox,
# labels=label + 1,
labels=(label + 1) % 82,
# n_class=len(instaseg._class_names) + 1,
n_class=182,
captions=captions,
masks=mask,
)
if ax is None:
fig, ax = plt.subplots(figsize=(12, 8))
ax.imshow(out)
ax.set_axis_off()
ax.set_title("Instance")
return fig, ax
# +
# %%time
from models import InstanceSeg, SemanticSeg, PanopticSeg
print("Preparing instance segmentation model...", end=" ")
instaseg = InstanceSeg(
"./2018-12-03_23-24/20181203_232507/params.yaml",
"./2018-12-03_23-24/20181203_232507/snapshot_model_45000.npz",
gpu=0,
)
print("Done")
print("Preparing semantic segmentation model...", end=" ")
semaseg = SemanticSeg(
"./toolbox/deeplab/config/cocostuff164k.yaml",
"./cocostuff164k_iter100k.pth",
gpu=0,
)
print("Done")
print("Preparing panotpic model...", end=" ")
panoseg = PanopticSeg(instaseg, semaseg, thresh=0.7, frac=0.2)
print("Done")
# +
# Load an external image
import skimage
img = skimage.io.imread("./external_test/barrage.jpg")
# img = skimage.io.imread("./external_test/basket_bw.jpg")
# img = skimage.io.imread("./external_test/bride_stars.jpg")
# img = skimage.io.imread("./external_test/fanac.jpg")
# img = skimage.io.imread("./external_test/light_painting.jpg")
# img = skimage.io.imread("./external_test/mountain.jpg")
# + active=""
# # Load an image from val dataset
#
# # stdout capturing somewhere in the dependecies, bring it back
# try:
# ds
# except NameError:
# _old_std = sys.stdout
# ds = toolbox.datasets.COCOInstanceSegmentationDataset("val")
# sys.stdout = _old_std
#
# img, true_bbox, true_labels, true_masks = ds[
# np.random.randint(len(ds))
# ]
# -
# %%time
# segment, RGB = panoseg.predict(img)
bbox, mask, label, score = panoseg._predict_instance(img)
labelmap, labels_sema = panoseg._predict_semantic(img)
# +
import random
import cv2
_ids = random.sample(range(1, 16711422), len(label) + len(labels_sema[labels_sema != -1]))
ids_instance = _ids[:len(label)]
ids_semantic = _ids[len(label):]
fig, ax = plt.subplots(figsize=(16, 10), nrows=2, ncols=2, sharex=True, sharey=True)
[a.set_axis_off() for a in ax.ravel()]
ax[0][0].imshow(img)
ax[0][0].set_title("Input")
plot_instance_seg(img, instaseg, bbox, mask, label, score, semaseg, ax=ax[1][0])
plot_semantic_seg(img, semaseg, labelmap, labels_sema, ax=ax[1][1])
fig.tight_layout()
plt.show()
# +
fig, ax = plt.subplots(figsize=(16, 12), nrows=2, ncols=2, sharex=True, sharey=True)
[a.set_axis_off() for a in ax.ravel()]
canvas = np.zeros(img.shape[:2])
# Semantic first
for idx, lab in enumerate(labels_sema[labels_sema != -1]):
canvas[labelmap == lab] = ids_semantic[idx]
ax[0][0].imshow(canvas)
ax[0][0].set_title("Sematic segmentation")
# Instance next
for idx, m in enumerate(mask):
canvas[m] = ids_instance[idx]
ax[0][1].imshow(canvas)
ax[0][1].set_title("Instance segmentation overlay")
RGB = np.zeros(img.shape, dtype=np.uint8)
for u in np.unique(canvas):
r, g, b = panoseg.id_to_color(u)
RGB[canvas == u, 0] = r
RGB[canvas == u, 1] = g
RGB[canvas == u, 2] = b
ax[1][0].imshow(RGB)
ax[1][0].set_title("To 3 channels")
plt.show()
| Predict_and_visualize_image.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="UoQjqjBOIzfG" colab_type="text"
# # Colaboratory
# + [markdown] id="BcXbc54tbXq7" colab_type="text"
# ## Introduction
# + [markdown] id="fPP3Zw5iV2DP" colab_type="text"
#
#
# Some important links to keep open during the workshop – open these tabs **now**!:
#
# - [TF documentation](https://www.tensorflow.org/api_docs/python/tf) : Use the search box (top right) to get documentation on Tensorflow's rich API.
#
# - [solutions/](https://github.com/tensorflow/workshops/tree/master/extras/amld/notebooks/solutions) : Every notebook in the `exercises/` directory has a corresponding notebook in the `solutions/` directory.
#
# ---
#
# Colaboratory (Colab) is a Jupyter notebook environment which allows to work with data and code in an interactive manner. You can decide where you want to run your code:
#
# * Using a hosted runtime provided by Google (**default**)
# * Locally using your own machine and resources
#
# Currently, it supports Python 2.7 and Python 3.6 and comes with a set of pre-installed libraries like Tensorflow and Matplotlib but also gives you the option to install more libraries on demand. The resulting notebooks can be shared in a straightforward way.
#
# Caveats:
#
# * The virtual machines used for the runtimes are **ephemeral** so make sure to safe your data in a persistent location like locally (downloading), in the Google Cloud Storage or Google Drive.
# * The service is free of use but the performance of default runtimes can be insufficient for your purposes.
# * You have the option to select a runtime with GPU or TPU support.
# * "Colaboratory is intended for interactive use. Long-running background computations, particularly on GPUs, may be stopped. [...] We encourage users who wish to run continuous or long-running computations through Colaboratory’s UI to use a local runtime." - See [Colaboratory FAQ](https://research.google.com/colaboratory/faq.html "Colaboratory FAQ")
# + [markdown] id="iDbiROodhaFQ" colab_type="text"
# ** Getting started **
#
# 1. Connect to a runtime now by clicking `connect` in the top right corner if you don't already see a green checkmark there.
# 2. To get a better overview you might want to activate the *Table of contents* by clicking on the arrow on the left.
# + [markdown] id="M54Z135xV2DW" colab_type="text"
# ### Important shortcuts
#
# Action | Colab Shortcut | Jupyter Shortcut
# ---|---|---
# Executes current cell | `<CTRL-ENTER>` | `<CTRL-ENTER>`
# Executes current cell and moves to next cell | `<SHIFT-ENTER>` | `<SHIFT-ENTER>`
# Insert cell above | `<CTRL-M> <A>` | `<A>`
# Append cell below | `<CTRL-M> <B>` | `<B>`
# Shows searchable command palette | `<CTRL-SHIFT-P>` | `<CTRL-SHIFT-P>`
# Convert cell to code | `<CTRL-M> <Y>` | `<Y>`
# Convert cell to Markdown | `<CTRL-M> <M>` | `<M>`
# Autocomplete | `<TAB>` | `<TAB>`
# Goes from edit to "command" mode | `<ESC>` | `<ESC>`
# Goes from "command" to edit mode | `<ENTER>` | `<ENTER>`
# Show keyboard shortcuts | `<CTRL-M> <H>` | `<H>`
# <p align="center"><b>Note:</b> On OS X you can use `<COMMAND>` instead of `<CTRL>`</p>
#
# Give it a try!
# + id="iJ4YliVgZNCq" colab_type="code" outputId="29d8f3fa-79ff-4cf0-fbfd-1bbfa3aeda5b" executionInfo={"status": "ok", "timestamp": 1548457331003, "user_tz": 480, "elapsed": 759, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# YOUR ACTION REQUIRED:
# Execute this cell first using <CTRL-ENTER> and then using <SHIFT-ENTER>.
# Note the difference in which cell is selected after execution.
print("Hello world!")
# + [markdown] id="_Hlf6HDER7YY" colab_type="text"
# ** What to do if you get stuck **
#
# If you should get stuck and the documentation doesn't help you consider using additional help.
# + id="QIKn9TOwV2De" colab_type="code" outputId="05c9e0e9-1329-4afd-89f8-9fbba72eace4" executionInfo={"status": "ok", "timestamp": 1548457331268, "user_tz": 480, "elapsed": 1022, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
def xor_str(a, b):
return ''.join([chr(ord(a[i % len(a)]) ^ ord(b[i % len(b)]))
for i in range(max(len(a), len(b)))])
# YOUR ACTION REQUIRED:
# Try to find the "correct value" for the variable below...
workshop_secret = 'Tensorflow rocks' #workshop_secret = '(replace me!)'
xor_str(workshop_secret, '\x03\x00\x02\x10\x00\x1f\x03L\x1b\x18\x00\x06\x07\x06K2\x19)*S;\x17\x08\x1f\x00\x05F\x1e\x00\x14K\x115\x16\x07\x10\x1cR1\x03\x1d\x1cS\x1a\x00\x13J')
# Hint: You might want to checkout the ../solutions directory
# (you should already have opened this directory in a browser tab :-)
# + [markdown] id="UPQzxvqbMXqr" colab_type="text"
# ### First steps
# + [markdown] id="rScwW1Q69xJ-" colab_type="text"
# **Tensorflow version**
#
# Make sure you are using the version: **1.12.0** or higher because we tested the notebooks against `1.12.0`
# + id="8F9LEdkdV2DS" colab_type="code" outputId="5dfd9252-c8f3-46c2-f70d-984466c4ccd9" executionInfo={"status": "ok", "timestamp": 1548457332277, "user_tz": 480, "elapsed": 2029, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# Include basic dependencies and display the tensorflow version.
import tensorflow as tf
tf.__version__
# + [markdown] id="1uKqdyHFB8NY" colab_type="text"
# **Running shell commands**
#
# You can run shell commands directly in Colab: simply prepend the command with a **!**.
# + id="FY3pksK4V2DY" colab_type="code" outputId="f3b0f63b-f3dd-40aa-b265-116a1434c72c" executionInfo={"status": "ok", "timestamp": 1548457335071, "user_tz": 480, "elapsed": 4822, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 153}
# Print the current working directory and list all files in it.
# !pwd
# !ls
# + id="8BbKSuKslVaI" colab_type="code" outputId="42b9cd45-695c-4eb1-e8d4-6dac1d347bae" executionInfo={"status": "ok", "timestamp": 1548457338512, "user_tz": 480, "elapsed": 8262, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 341}
# Especially useful : Install new packages...
# !pip install qrcode
import qrcode
qrcode.make('Colab rocks!')
# + [markdown] id="ywxYXQ-lCc2_" colab_type="text"
# **Autocompletion and docstrings**
#
# Jupyter shows possible completions of partially typed
# commands.
#
# Try it for yourself by displaying all available `tf.` methods that start with `one`.
# + id="a_jmV9woV2Db" colab_type="code" outputId="cee3ab59-7108-44ce-b321-b42dc99596ab" executionInfo={"status": "ok", "timestamp": 1548457338513, "user_tz": 480, "elapsed": 8263, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# YOUR ACTION REQUIRED:
# Set the cursor to after tf.one and press <TAB>.
tf.one_hot #tf.one
# + [markdown] id="PuBe6L8WOWuS" colab_type="text"
# In addition, you can also display docstrings to see the function signature and possible parameters.
# + id="hPiM64eaOi2y" colab_type="code" colab={}
# YOUR ACTION REQUIRED:
# Set the cursor inside the () and press <TAB>.
#tf.maximum()
# + [markdown] id="NK1yV3Ye0pbC" colab_type="text"
# Alternatively, you might also inspect function details with docstrings if available by appending a "?".
# + id="P-IF1l5u0MLT" colab_type="code" colab={}
# tf.maximum?
# + [markdown] id="-wBzGaDMaCg2" colab_type="text"
# **Note:** This also works for any other type of object as can be seen below.
# + id="nLXoW6IXZEvP" colab_type="code" colab={}
test_dict = {'key0': 'Tensor', 'key1': 'Flow'}
# test_dict?
# + [markdown] id="_mahgn1AHL61" colab_type="text"
# ## Runtimes
#
# As noted in the introduction above, Colab provides multiple runtimes with different hardware accelerators:
#
# * CPU (default)
# * GPU
# * TPU
#
# which can be selected by choosing the `"Runtime"` tab above and then `"Change runtime type"`.
#
# Please be aware that selecting a new runtime will assign a new virtual machine (VM).
# In general, assume that any changes you make to the VM environment including data storage are **ephemeral**. Particularly, this might require to **execute previous cells again** as their content is unknown to a new runtime otherwise.
#
# Let's take a closer look at one of such provided VMs.
#
# + [markdown] id="whv1zmERgI5v" colab_type="text"
# Once we have been assigned a runtime we can inspect it further.
# + id="AwLSimGKgHFd" colab_type="code" outputId="d727edf0-9795-4cb2-be1a-c071702f4cd7" executionInfo={"status": "ok", "timestamp": 1548457372671, "user_tz": 480, "elapsed": 3088, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# Display how long the system has been running.
# !uptime
# + [markdown] id="f69GlgeggRED" colab_type="text"
# As can be seen, the machine has been allocated just very recently for our purposes.
#
# **VM specifications**
# + colab_type="code" id="AAXhttijkq8P" outputId="768e47ef-a24c-4c8a-f9cf-8956940abf5a" executionInfo={"status": "ok", "timestamp": 1548457376853, "user_tz": 480, "elapsed": 7262, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 802}
# Display available and used memory.
# !free -h
print("-"*70)
# Display the CPU specification.
# !lscpu
print("-"*70)
# Display the GPU specification (if available).
!(nvidia-smi | grep -q "has failed") && echo "No GPU found!" || nvidia-smi
# + [markdown] id="bqCA9-VDIjsu" colab_type="text"
# ## Plotting
#
# The notebook environment also provides options to visualize and interact with data.
#
# We'll take a short look at the plotting/visualization libraries Matplotlib and Altair.
# + [markdown] id="1iJfoQ86AsfO" colab_type="text"
# ### Matplotlib
#
# Matplotlib is one of the most famous Python plotting libraries and can be used to plot results within a cell's output (see [Matplotlib Introduction](https://matplotlib.org/users/intro.html "Matplotlib Introduction")).
#
# Let's try to plot something with it.
# + id="gyk4SEk_V2DW" colab_type="code" outputId="e45ab29c-170e-4f96-c392-627723df2cc1" executionInfo={"status": "ok", "timestamp": 1548457377221, "user_tz": 480, "elapsed": 7623, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 282}
# Display the Matplotlib outputs within a cell's output.
# %matplotlib inline
import numpy as np
from matplotlib import pyplot
# Create a randiomized scatterplot using matplotlib.
x = np.random.rand(100).astype(np.float32)
noise = np.random.normal(scale=0.3, size=len(x))
y = np.sin(x * 7) + noise
pyplot.scatter(x, y)
# + [markdown] id="lhUudwcUafIm" colab_type="text"
# ### Altair
#
# Another declarative visualization library for Python is Altair (see [Altair: Declarative Visualization in Python](https://altair-viz.github.io/)).
#
# Try to zoom in/out and to hover over individual data points in the resulting plot below.
# + id="uO3MmbhbIw_t" colab_type="code" outputId="e6f54c95-ddb6-4838-8ccc-7da4df308c72" executionInfo={"status": "ok", "timestamp": 1548457377652, "user_tz": 480, "elapsed": 8042, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 372}
# Load an example dataset.
from vega_datasets import data
cars = data.cars()
# Plot the dataset, referencing dataframe column names.
import altair as alt
alt.Chart(cars).mark_point().encode(
x='Horsepower',
y='Miles_per_Gallon',
color='Origin',
tooltip=['Name', 'Origin', 'Horsepower', 'Miles_per_Gallon']
).interactive()
# + [markdown] id="cBUraBMAHE0J" colab_type="text"
# ## Notebook Magics
#
# The IPython and Colab enviroment support built-in magic commands called magics (see: [IPython - Magics](https://ipython.readthedocs.io/en/stable/interactive/magics.html)).
#
# In addition to default Python, these commands might be handy for example when it comes to interacting directly with the VM or the Notebook itself.
#
#
# + [markdown] id="KO1K1UnIfU0u" colab_type="text"
# ### Cell magics
#
# Cell magics define a mode for a complete cell and are prefixed with **%%**.
#
# Examples include:
#
# * **%%bash** or **%%sh**
# * **%%html**
# * **%%javascript**
#
# + id="2qT4YGbOQ9FG" colab_type="code" outputId="a6089b96-76e0-4ea4-bb0f-ee1ad6b86d32" executionInfo={"status": "ok", "timestamp": 1548457377653, "user_tz": 480, "elapsed": 8036, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 326} language="sh"
# echo "This is a shell script!"
# # List all running VM processes.
# ps -ef
# echo "Done"
# + id="qUSc9sT2JskX" colab_type="code" outputId="958283c3-ecd8-48c4-c783-80e282bae548" executionInfo={"status": "ok", "timestamp": 1548457378016, "user_tz": 480, "elapsed": 8390, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 38}
# Embed custom HTML directly into a cell's output.
# %%html
<marquee>HTML rocks</marquee>
# + [markdown] id="Uy0j4iJ5d8v3" colab_type="text"
# ### Line magics
#
# You can also make use of line magics which can be inserted anywhere at the beginning of a line inside a cell and need to be prefixed with **%**.
#
# Examples include:
#
#
# * **%time** - display the required time to execute the current line
# * **%cd** - change the current working directory
# * **%pdb** - invoke an interactive Python debugger
# * **%lsmagic** - list all available line magic and cell magic functions
#
#
# + [markdown] id="mrdhMGsyN2Gl" colab_type="text"
# For example, if you want to find out how long one specific line requires to be executed you can just prepend **%time**.
#
# + id="Kz0d1QoINx95" colab_type="code" outputId="e01b54d2-27bf-4077-c0a8-9b75e851d00e" executionInfo={"status": "ok", "timestamp": 1548457378017, "user_tz": 480, "elapsed": 8384, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 102}
n = 1000000
# %time list1 = [i for i in range(n)]
print("")
# %time list2 = [i for i in range(int(n/2))]
# + [markdown] id="Y_Q3lleVPMVJ" colab_type="text"
# **Note:** Some line magics like **%time** can also be used for complete cells by writing **%%time**.
# + id="LCIrCKs9a1eu" colab_type="code" outputId="d2a8f37e-becf-4192-81f2-fbb906163de6" executionInfo={"status": "ok", "timestamp": 1548457378018, "user_tz": 480, "elapsed": 8378, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
# %%time
n = 1000000
list1 = [i for i in range(n)]
list2 = [i for i in range(int(n/2))]
# + [markdown] id="W0Uq0G0LrtjG" colab_type="text"
# ** Custom line magic **
#
# You can also define your own line/cell magic in the following way.
# + id="Y8bjXSjBJ2oP" colab_type="code" colab={}
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def mymagic(line_content, cell_content=None):
print('line_content="%s" cell_content="%s"' % (line_content, cell_content))
# + id="lu7Z7VTEKPWh" colab_type="code" outputId="8404ba13-090d-49fc-fab8-f4359f15341e" executionInfo={"status": "ok", "timestamp": 1548457378020, "user_tz": 480, "elapsed": 8367, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# %mymagic Howdy Alice!
# + id="GGBRfLm6LTvU" colab_type="code" outputId="7b4671be-0895-4a9a-edae-a92d155739ea" executionInfo={"status": "ok", "timestamp": 1548457378021, "user_tz": 480, "elapsed": 8360, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
# %%mymagic simple question
Howdy Alice!
how are you?
# + [markdown] id="grr29b3-gCd-" colab_type="text"
# # ----- Optional part -----
# + [markdown] id="BGurTn7rVH_I" colab_type="text"
# ## Data handling
#
# There are multiple ways to provide data to a Colabs's VM environment.
# > **Note:** This section only applies to Colab.
# > Jupyter has a file explorer and other options for data handling.
#
# The options include:
# * Uploading files from the local file system.
# * Connecting to Google Cloud Storage (explained below).
# * Connecting to Google Drive (see: [Snippets: Drive](https://colab.sandbox.google.com/notebooks/snippets/drive.ipynb); will be used in the next Colabs).
# + [markdown] id="Zdb5RpNmxoJl" colab_type="text"
# **Uploading files from the local file system**
# + id="qvxTx9m9xZJ2" colab_type="code" outputId="35527304-8b2d-48b4-f126-ce53e2c48f09" executionInfo={"status": "ok", "timestamp": 1548457414989, "user_tz": 480, "elapsed": 45316, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 290}
# Snippet as provided in https://colab.research.google.com/notebooks/io.ipynb
# YOUR ACTION REQUIRED:
# Upload any test file to the target system.
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
# The file should now appear in the current working directory.
# !pwd
# !ls -l
# + id="toQxJSVfxjew" colab_type="code" colab={}
# A file can also be downloaded by using:
from google.colab import files
with open('test_file.txt', 'w') as f:
f.write('test content')
files.download('test_file.txt')
# + [markdown] id="swVo-jxCW6KK" colab_type="text"
# ** Connecting to Google Cloud Storage **
#
# [Google Cloud Storage](https://cloud.google.com/storage/?hl=de) (GCS) is a cloud file storage service with a RESTful API.
#
# We can utilize it to store our own data or to accceess data provided by the following identifier:
#
#
# ```
# gs://[BUCKET_NAME]/[OBJECT_NAME]
# ```
#
# We'll use the data provided in **gs://amld-datasets/zoo_img** as can be seen below.
#
# Before we can interact with the cloud environment, we need to grant permissions accordingly (also see [External data: Cloud Storage](https://colab.research.google.com/notebooks/io.ipynb#scrollTo=S7c8WYyQdh5i)).
# + id="1aJ1e7JFVIsQ" colab_type="code" colab={}
from google.colab import auth
auth.authenticate_user()
# + [markdown] id="f6_4yqiOfWKt" colab_type="text"
# List a subset of the contained files using the [gsutil tool](https://cloud.google.com/storage/docs/gsutil?hl=en).
# + id="wJAutwkEWWxg" colab_type="code" outputId="120c7512-c6ed-45f4-fa6e-4469dc52e742" executionInfo={"status": "ok", "timestamp": 1548457444114, "user_tz": 480, "elapsed": 74424, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 187}
# !gsutil ls gs://amld-datasets/zoo_img | head
# + [markdown] id="d10y2KZpfsed" colab_type="text"
# Conveniently, TensorFlow natively supports multiple file systems such as:
#
# * GCS - Google Cloud Storage
# * HDFS - Hadoop
# * S3 - Amazon Simple Storage
#
# An example for the GCS filesystem can be seen below.
# + id="MDUnLXGXWnO8" colab_type="code" outputId="b4c4b20d-b67c-421d-becb-e75028e1e0a2" executionInfo={"status": "ok", "timestamp": 1548457445571, "user_tz": 480, "elapsed": 75875, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 187}
# Note: This cell hangs if you forget to call auth.authenticate_user() above.
import tensorflow as tf
tf.gfile.Glob('gs://amld-datasets/zoo_img/*')[:10]
# + [markdown] id="AHhyyTLRHDlH" colab_type="text"
# ## Snippets
#
# Finally, we can take a look at the Snippets support in Colab.
# > If you're using Jupyter please see [Jupyter contrib nbextensions - Snippets menu](https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/nbextensions/snippets_menu/readme.html) as this is not natively supported.
#
# Snippets are a way to quickly "bookmark" pieces of code or text that you might want to insert into specific cells.
#
#
#
#
#
# + id="4Dph99hAr61H" colab_type="code" colab={}
# YOUR ACTION REQUIRED:
# Explore existing snippets by going to the `Code snippets` section.
# You can either select it on the left sidebar next to `Table of contents` tab
# or by pressing `<CTRL><ALT><P>` (or `<COMMAND><OPTION><P>` for OS X).
# + [markdown] id="cNL2UsRBf2zL" colab_type="text"
# ### Create your own snippets
#
#
# + id="t3jbeMqkHlFa" colab_type="code" outputId="8dc5d03c-59d6-404a-fd42-1354bdd0ba5e" executionInfo={"status": "ok", "timestamp": 1548457445575, "user_tz": 480, "elapsed": 75865, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 85}
print("Example snippets file:\n\t{}".format(
"https://colab.research.google.com/drive/1OFSjEmqC-UC66xs-LR7-xmgkvxYTrAcN"))
# YOUR ACTION REQUIRED:
# Visit the following resulting link to create a small test snippet on your own.
# 1) Add a markdown cell with a title like # TEST SNIPPET
# 2) Add a code cell
print("Create your own snippets in a new Colab at:\n\t{}".format(
"https://colab.research.google.com/notebook#create=true&language=python3"))
# + id="INGeOiyJHF5x" colab_type="code" colab={}
from google.colab import snippets
# YOUR ACTION REQUIRED:
# Register a snippets file by providing the Colab URL below.
# Example:
# snippets.register('https://colab.research.google.com/drive/1OFSjEmqC-UC66xs-LR7-xmgkvxYTrAcN')
# YOUR ACTION REQUIRED: Add the URL to your own snippets file.
#snippets.register()
# + [markdown] id="7Pj410uZf8Aq" colab_type="text"
# Once you've registered the Snippets, they should be available under the `"Code snippets"` tab.
# + [markdown] id="hZUSUu5A1mAD" colab_type="text"
# ## Forms
#
# You can simplify cells by hiding their code and displaying a form instead.
#
#
# **Note:** You can display or hide the code by double clicking the form which might be on the right side.
# + id="2EkpzaQA1u7y" colab_type="code" cellView="form" outputId="4d8ecb49-d874-47e6-b659-27674b2ff877" executionInfo={"status": "ok", "timestamp": 1548457472700, "user_tz": 480, "elapsed": 724, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
#@title Execute me
# Hidden cell content.
print("Double click the cell to see its content.")
# + id="adKaHZNZ2jZo" colab_type="code" cellView="both" outputId="3d48deae-792f-4e74-d7c1-8e605afc53b4" executionInfo={"status": "ok", "timestamp": 1548457472700, "user_tz": 480, "elapsed": 713, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
# Form example mostly taken from "Adding form fields" Snippet.
#@title Example form
#@markdown Specify some test data and execute this cell.
string_type = 'test_string' #@param {type: "string"}
slider_value = 145 #@param {type: "slider", min: 100, max: 200}
number = 1339 #@param {type: "number"}
date = '2019-01-26' #@param {type: "date"}
pick_me = "a" #@param ['a', 'b', 'c']
#@markdown ---
print("Submitted data:")
print(string_type, slider_value, number, date, pick_me)
# + [markdown] id="kx_ETyD5r0vJ" colab_type="text"
# ## Interactive debugging
# + [markdown] id="8rXJALYPr-Eg" colab_type="text"
# An example of an IPython tool that you can utilize is the interactiver debugger
# provided inside an IPython environment like Colab.
#
# For instance, by using **%pdb on**, you can automatically trigger the debugger on exceptions to further analyze the state.
#
# Some useful debugger commands are:
#
# Description | Command
# ---|---|---
# **h**(elp) | Display available commands
# **p**(rint) `x` | Show content of object `x`
# **w**(here) | Show current instruction pointer position
# **q**(uit) | Leave the debugger
# + id="sMGORnaE2yJ-" colab_type="code" outputId="63a03e58-33fa-4f8a-b12f-d98676621378" executionInfo={"status": "ok", "timestamp": 1548457472702, "user_tz": 480, "elapsed": 707, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-eofaMMIfen4/AAAAAAAAAAI/AAAAAAAAABE/dzY58NhFiBE/s64/photo.jpg", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# YOUR ACTION REQURIED:
# Execute this cell, print the variable contents of a, b and exit the debugger.
# %pdb on
a = 67069 / 47 - 0x5a
b = a - 0x539
#c = a / b # Will throw exception.
# + [markdown] id="eHQDeKx06WPq" colab_type="text"
# We'll not dive further into debugging but it's useful to know that this option exists.
#
# Please see [Python Docs - pdb The Python Debugger](https://docs.python.org/2/library/pdb.html) for more information.
# + [markdown] id="kPD8cAscKCes" colab_type="text"
# ## A Word of Warning
#
# While notebook environments like Colab/Jupyter provide many benefits, they also come with some caveats that you should be aware of.
# One example is that you might quickly execute cells in a wrong order leading to unexpected behavior.
#
# If you're interested in more examples feel free to take a look at:
#
# [Youtube - I don't like notebooks by <NAME>](https://www.youtube.com/watch?v=7jiPeIFXb6U) (duration ~56 minutes)
| extras/amld/notebooks/solutions/0_colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import text_to_word_sequence
import re, os
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Embedding
from keras.layers import LSTM, Bidirectional
# %matplotlib notebook
import matplotlib.pyplot as plt
from nltk import download
from nltk.corpus import stopwords
from tqdm import tqdm
import keras
import tensorflow as tf
# -
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# +
seed = 7
np.random.seed(seed)
# the model gonna be export at this file
filename = 'model/model_saved_lstm.h5'
epochs = 5
#word embedding dimensionality in pre trained
word_embedding_dim = 50
# sample number to be use in each update of gradient
batch_size = 32
# reflect the max amount of words which we keep in vocabolary
max_features = 5000
#dimension of output in embedding layer
embed_dim = 128
# size limit of sequence when generate the tokenizer
max_sequence_length = 300
pre_trained_wv = False
bilstm = False
# +
# remove charactere no desired
def clean_str(string):
string = re.sub(r"[^A-Za-z0-9(),!?\1\'\`]", " ", string)
string = re.sub(r"\'s'", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"\'t", " \'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \(", string)
string = re.sub(r"\)", " \)", string)
string = re.sub(r"\?", " \?", string)
string = re.sub(r"\s{2,}", " ", string)
cleanr = re.compile('<.*?>')
string = re.sub(f'\d+', '', string)
string = re.sub(cleanr, '', string)
string = re.sub("'", '', string)
string = re.sub("'", '', string)
string = re.sub(r'\W+', ' ', string)
string = string.replace('_', '')
return string.strip().lower()
# -
def prepare_data(data):
download('stopwords')
data = data[['text', 'sentiment']]
data['text'] = data['text'].apply(lambda x: x.lower())
data['text'] = data['text'].apply(lambda x: clean_str(x))
data['text'] = data['text'].apply((lambda x: re.sub('[^a-zA-Z0-9\s]', '', x)))
stop_words = set(stopwords.words('english'))
text = []
for row in data['text'].values:
word_list = text_to_word_sequence(row)
no_stop_words = [w for w in word_list if not w in stop_words]
no_stop_words = " ".join(no_stop_words)
text.append(no_stop_words)
tokenizer = Tokenizer(num_words=max_features, split=' ')
tokenizer.fit_on_texts(text)
x = tokenizer.texts_to_sequences(text)
x = pad_sequences(x, maxlen=max_sequence_length)
word_index = tokenizer.word_index
y = pd.get_dummies(data['sentiment']).values
X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.20, random_state=42)
return X_train, X_test, Y_train, Y_test, word_index, tokenizer
# +
data = pd.read_excel('./dataset/imdb.xlsx', engine='openpyxl')
X_train, X_test, Y_train, Y_test, word_index, tokenizer = prepare_data(data)
print(X_train.shape, Y_train.shape)
print(X_test.shape, Y_test.shape)
# -
def model():
input_shape = (max_sequence_length,)
model_input = Input(shape=input_shape, name="input", dtype='int32')
embedding = Embedding(
max_features,
embed_dim,
name="embedding",
input_length=max_sequence_length
)(model_input)
lstm = LSTM(embed_dim, dropout=0.2, recurrent_dropout=0.2, name='lstm')(embedding)
model_output = Dense(2, activation='softmax', name='softmax')(lstm)
model = Model(inputs=model_input, outputs=model_output)
return model
model = model()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# +
# if not os.path.exists(f'./{filename}'):
hist = model.fit(
X_train,
Y_train,
validation_data=(X_test, Y_test),
epochs=epochs,
batch_size=batch_size,
shuffle=True,
verbose=1
)
model.save_weights(filename)
# +
hist.history
plt.figure()
plt.plot(hist.history['loss'], lw=2.0, color='b', label='train')
plt.plot(hist.history['val_loss'], lw=2.0, color='r', label='val')
plt.title('Classificação de Sentimentos')
plt.xlabel('Epochs')
plt.ylabel('Cross-Entropy')
plt.legend(loc='upper right')
plt.show()
plt.figure()
plt.plot(hist.history['accuracy'], lw=2.0, color='b', label='train')
plt.plot(hist.history['val_accuracy'], lw=2.0, color='r', label='val')
plt.title('Classificação de Sentimentos')
plt.xlabel('Epochs')
plt.ylabel('Acurácia')
plt.legend(loc='upper left')
plt.show()
# -
score = model.evaluate(X_test, Y_test, verbose=0, batch_size=batch_size)
print(f"Acc: {score[1]*100}%")
while True:
sentence = input("input> ")
if sentence == "exit":
break
next_text = [sentence]
next_text = tokenizer.texts_to_sequences(next_text)
new_text = pad_sequences(next_text, maxlen=max_sequence_length, dtype='int32', value=0)
sentiment = model.predict(new_text, batch_size=1, verbose=2)[0]
if (np.argmax(sentiment) == 0):
print(f"negativo => {round(sentiment[0]*100, 3)}")
else:
print(f"positivo => {round(sentiment[1]*100, 3)}")
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
| sentiment_analysis_text/sentimental_analysis_rnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# first read in and data clean, joinable on metadata so get body site
pca_df = pd.read_csv('sample3000_pcoa_results.csv')
print(pca_df.shape)
pca_df.head()
pca_df = pca_df[['index', 'PC1', 'PC2', 'PC3']]
pca_df.head(2)
pca_df.columns = ['sample_name', 'PC1', 'PC2', 'PC3']
def meta_df_id_clean(meta_df):
meta_df['sample_id'] = meta_df['sample_name'].apply(lambda x: x.split('.')[1])
meta_df['sample_id'] = pd.to_numeric(meta_df['sample_id'], errors='coerce', downcast='integer')
meta_df_clean = meta_df.dropna(subset=['sample_id']).reset_index()
meta_df_clean['sample_id'] = meta_df_clean['sample_id'].apply(lambda x: int(x))
return meta_df_clean
def biom_id_clean(biom_df):
biom_df['sample_id'] = biom_df['sample_name'].apply(lambda x: x.split('.')[1])
biom_df['sample_id'] = pd.to_numeric(biom_df['sample_id'], errors='coerce', downcast='integer')
biom_join_df = biom_df.dropna(subset=['sample_id'])
biom_join_df['sample_id'] = biom_join_df['sample_id'].apply(lambda x: int(x))
return biom_join_df
pca_df = biom_id_clean(pca_df)
pca_df.head(2)
pca_df.shape
meta_df = pd.read_csv('all_body_4.16.agp_only_meta.csv', low_memory=False)
print(meta_df.shape)
# +
#meta_df.head()
# -
meta_df = meta_df[['sample_id', 'env_material']]
PC2_df = pd.merge(pca_df, meta_df, on='sample_id')
print(PC2_df.shape)
PC2_df.env_material.value_counts()
PC2_df.head()
PC2_df = PC2_df[['sample_id', 'PC1', 'PC2', 'PC3', 'env_material']]
PC2_df.columns = ['sample_id', 'PC1', 'PC2', 'PC3', 'label']
PC2_df.label.value_counts()
PC2_df.to_csv('../../data/6.2.19.pca.data/output_data/beta_pcoa_3000sample.csv')
# basic logistic regression body site classification
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
X = PC2_df[['PC1', 'PC2', 'PC3']]
y = PC2_df['label'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X_train, y_train)
clf.score
train_predict = clf.predict(X_train)
test_predict = clf.predict(X_test)
f1_score(y_train, train_predict, average='weighted')
f1_score(y_test, test_predict, average='weighted')
| beta_diversity_pcOa/pcoa_body_site_classify.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
# +
w = 10
K = 0.02
Jmax0 = 0.0374*24
alpha = 0.151
Jmax = Jmax0 * np.exp(-alpha * w)
print(Jmax)
Cn = np.arange(0,10,0.01)
dNup = Jmax * Cn * w / (Cn + K)
# -
# %matplotlib inline
plt.plot(Cn, dNup)
| aquaponics-master/notebooks/Uptake Limitations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import pickle
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm.notebook import trange
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, AutoLocator
from function import conserve
from function.utilities import get_taxid_dict
from function.utilities import get_protein_name
from function.utilities import get_fasta_seq_info
from function.utilities import get_uniprot_rawdata
from function.utilities import find_human_sequence
from function.seqfilter import SeqFilter
# -
# # Param
#####CHANGE HERE#####
algorithm = "VSL2" #VLXT, VSL2, VL3-BA, fidpnn
uniprot_id = "Q13148" #'Q13148','P35637', 'P49790', 'Q8NDV7'
conserve_method = 'jsd' #['shannon_entropy','jsd','r4s']
#####CHANGE HERE#####
# +
#see tax
tax_ids = [7711, 7742, 32523, 40674] #chordata, vertebrata, tetrapoda, mammalia
#order/disorder length criteria
order_filter_length = 10
disorder_filter_length = 40
#conserve_method config, only for shannon_entropy and jsd
#same as https://compbio.cs.princeton.edu/conservation/score.html
normalize = True
windows_size = 0
sequence_weight = False
#fig2b amino acid show order
conserve_peraa_sort = ["A","C","D","E","G","H","I","K","L","M","N","P","Q","R","S","T","V","F","W","Y"]
#path
input_path = Path("./output/fasta/d_extre_filtered")
fig2_path = Path("./output/fig2") / Path(conserve_method) #for figure 2
fig2_path.mkdir(parents=True, exist_ok=True)
table_homologous_path = Path("./output/table_homologous") #for homologous table
table_homologous_path.mkdir(parents=True, exist_ok=True)
# -
# # Load uniprot and order/disorder identification data
# +
human_uniprot_path = Path("./rawdata/human_uniprot.tab")
human_df = get_uniprot_rawdata(human_uniprot_path)
pondr_disorder_ident_path = Path("./output/pondr_disorder_ident")
od_human_df = pd.read_pickle(pondr_disorder_ident_path / "{}_od_human_df.pkl".format(algorithm))
# -
human_df
# # Conservation Score
# +
def get_only_human_score(score, fasta_path):
"""
get entropy score when alignment positions on human sequences aren't gap
score: list, entropy score calculated by get_conserve_score()
fasta_path: fasta file path
return: list, score as same length with human sequence
"""
nogap_score = []
nogap = np.array(list(find_human_sequence(fasta_path)["sequence"])) != "-"
nogap_index = np.where(nogap)[0].tolist()
for index in nogap_index:
nogap_score.append(score[index])
return nogap_score
def get_normalize_by_chordata(conserve_list):
"""
get normalized score, mean and std are derived from chordata sequence
conserve_list: list, list of conservation info from get_conserve_score()
return: list, list of conservation info with normalized score
"""
#get mean and std
z_score_base = np.array(conserve_list[0]['score'])
mean = z_score_base.mean()
std = z_score_base.std()
#get z-score
score = np.stack([i['score'] for i in conserve_list])
score = (score - mean) / std
#put back to conserve_list
for index, element in enumerate(score):
element = element.tolist()
conserve_list[index]['score'] = element
return conserve_list
# -
def get_conserve_score(tax_id, uniprot_id):
"""
get conserve score with many info for plot
tax_id: int, taxonomy id, i.e. 7711
uniprot_id: str, uniprot id
return: dict, conserve score with many info
"""
#read fasta
fasta_path = input_path / str(tax_id) / "{}.fasta".format(uniprot_id)
#get conserve score
conserve_score = conservefunc.get_conserve_score(fasta_path,
method=conserve_method,
windows_size=windows_size,
sequence_weight=sequence_weight)
#only nogap conserve_score, r4s removes gap's score by itself base on reference sequence(human sequence)
if conserve_method != 'r4s':
conserve_score = get_only_human_score(conserve_score, fasta_path)
#get order disorder region by PONDR algorithm
od_ident = od_human_df[od_human_df["uniprot_id"] == uniprot_id]["od_ident"].values[0]
od_ident = seqfilter.length_filter_by_od_ident(od_ident=od_ident,
disorder_filter_length=disorder_filter_length,
order_filter_length=order_filter_length)
#conservation score and od_ident length error handle,
#due to different sequence record between OMA and uniprot but their uniprot_ids are the same
if len(conserve_score) != len(od_ident):
raise Exception("{} lenth of conservation score is not equal with od_ident".format(uniprot_id))
#get disorder region after length check
region = seqfilter.get_od_index(od_ident)
#get human sequence without gap for disorder plot
human_sequence = find_human_sequence(fasta_path)["remove_gap_sequence"]
#get infos
taxonomy = get_taxid_dict()[tax_id]
gene_name = get_protein_name(uniprot_id, human_df)["gene_name"]
homologous_info = get_fasta_seq_info(fasta_path)["homologous_info"]
return {
"uniprot_id": uniprot_id,
"gene_name": gene_name,
"tax_id": tax_id,
"taxonomy": taxonomy,
"score": conserve_score,
"od_ident": od_ident,
"human_sequence": human_sequence,
"region": region,
"homologous_info":homologous_info
}
def get_conserve_peraa_score(conserve_list):
"""
get conserve score per amino acid
conserve_list: list, conserve info by get_conserve_score()
return: dict, conserve score with many info
"""
#array's dim: 2(aa content/peraa conserve), 2(order/disorder), 20(A, C, D, E...), 4(taxonomy id)
peraa_score = np.zeros((2, 2, 20, len(conserve_list)))
taxonomy_list = []
for tax_index, i in enumerate(conserve_list):
#make tax list
taxonomy_list.append(i['taxonomy'])
#read fasta
fasta_path = input_path / str(i['tax_id']) / "{}.fasta".format(i['uniprot_id'])
#get conserve score and aa content
info_dict = conserveperaa.get_aa_info(fasta_path, i['score'], i['od_ident'])
order_conserve, order_content = (info_dict["conserve"]["order"], info_dict["content"]["order"])
disorder_conserve, disorder_content = (info_dict["conserve"]["disorder"], info_dict["content"]["disorder"])
#put into array
for aa_index, element in enumerate(conserve_peraa_sort):
peraa_score[0][0][aa_index][tax_index] = order_content[element]
peraa_score[0][1][aa_index][tax_index] = disorder_content[element]
peraa_score[1][0][aa_index][tax_index] = order_conserve[element]
peraa_score[1][1][aa_index][tax_index] = disorder_conserve[element]
return {
"uniprot_id": i['uniprot_id'],
"gene_name": i['gene_name'],
"taxonomy": taxonomy_list,
"conserve_peraa_sort": conserve_peraa_sort,
"peraa_score": peraa_score,
}
# +
#init some func
seqfilter = SeqFilter()
conserveperaa = conserve.ConservePeraa()
if conserve_method == 'jsd' or conserve_method == 'shannon_entropy':
#source code: <NAME> and <NAME>. Predicting functionally important residues from sequence conservation.
# Bioinformatics, 23(15):1875-82, 2007. [Bioinformatics]
#by web crawler
# conservefunc = conserve.ConserveByWeb()
#standalone version
conservefunc = conserve.ConserveStandalone()
elif conserve_method == 'r4s':
conservefunc = conserve.Rate4Site()
# +
#calcuate conserve score given tax_id
conserve_list = []
t = trange(len(tax_ids), leave=True)
for i in t:
t.set_description(str(tax_ids[i]))
t.refresh()
conserve_score = get_conserve_score(tax_id=tax_ids[i], uniprot_id=uniprot_id)
conserve_list.append(conserve_score)
#conserve score normalization
if normalize:
conserve_list = get_normalize_by_chordata(conserve_list)
#calculate peraa score
conserve_peraa = get_conserve_peraa_score(conserve_list)
# -
# # Plot
# +
f, (a0) = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(20, 3.63), dpi=200)
for i in conserve_list:
# line
if i["taxonomy"] == "chordata":
a0.plot(i["score"], color="#335079", zorder=100, linewidth=3)
elif i["taxonomy"] == "vertebrata":
a0.plot(i["score"], color="#b5b5b5", zorder=2, linewidth=0.9)
elif i["taxonomy"] == "tetrapoda":
a0.plot(i["score"], color="#b5b5b5", zorder=2, linewidth=0.9)
elif i["taxonomy"] == "mammalia":
a0.plot(i["score"], color="#63a2fe", zorder=90, linewidth=3)
#disorder plot
for region in conserve_list[0]["region"]["disorder_region"]:
start = region["start"]
start = start / len(conserve_list[0]["human_sequence"])
end = region["end"]
end = end / len(conserve_list[0]["human_sequence"])
a0.axhline(-3, start, end, color="#ff0000", linewidth=3, zorder=200)
#0 line
a0.axhline(0 , color="#a2a2a2", linewidth=0.5, zorder=100)
a0.axhline(1 , color="#a2a2a2", linewidth=0.5, linestyle='--', zorder=100)
a0.axhline(-1, color="#a2a2a2", linewidth=0.5, linestyle='--', zorder=100)
#WYF point
for index, (od, aa, score) in enumerate(
zip(conserve_list[0]["od_ident"], conserve_list[0]["human_sequence"], conserve_list[0]["score"])):
if od == "1":
if aa == "Y":
a0.scatter(index, score, s=320, color="#ffff00", edgecolors="black", linewidths=0.5, zorder=200)
elif aa == "F":
a0.scatter(index, score, s=320, color="#ffa300", edgecolors="black", linewidths=0.5, zorder=200)
elif aa == "W":
a0.scatter(index, score, s=320, color="#9400d1", edgecolors="black", linewidths=0.5, zorder=200)
#style
a0.set_xlim(0, len(i["score"]))
a0.set_ylim(-3.9, 3)
a0.xaxis.set_major_locator(MultipleLocator(100))
a0.xaxis.set_minor_locator(MultipleLocator(20))
a0.yaxis.set_major_locator(MultipleLocator(1))
# a0.yaxis.set_minor_locator(MultipleLocator(0.5))
a0.tick_params(which='major', axis="x",direction="in", length=5)
a0.tick_params(which='minor', axis="x",direction="in", length=2)
a0.tick_params(which='major', axis="y",direction="in", length=5)
a0.tick_params(which='minor', axis="y",direction="in", length=2)
a0.axes.xaxis.set_ticklabels([])
a0.axes.yaxis.set_ticklabels([])
title = "{} ({})".format(conserve_list[0]["gene_name"], conserve_list[0]["uniprot_id"], fontsize=12)
# a0.set_title(title, loc="left")
# a0.set_xlabel("Residue Number", fontsize=12)
# a0.set_ylabel("Conservation Level (Z-score)", fontsize=12)
plt.tight_layout(pad=0.3)
#savefig
# path = fig2_path / "{}_{}_fig2up".format(conserve_list[0]["gene_name"], conserve_list[0]["uniprot_id"])
# f.savefig("{}.svg".format(str(path)))
# +
#array's dim: 2(aa content/peraa conserve), 2(order/disorder), 20(A, C, D, E...), 4(taxonomy id)
f, (order,disorder) = plt.subplots(2, 20, sharex=True, sharey=True, figsize=(20, 2.2), dpi=200)
y_max = np.nanmax(conserve_peraa['peraa_score'][1]) + 0.3
y_min = np.nanmin(conserve_peraa['peraa_score'][1]) - 0.5
for index, sub in enumerate(order):
sub.plot(conserve_peraa['peraa_score'][1][0][index], linestyle="solid", color="#a2a2a2",linewidth=3)
sub.set_xlim(-0.5, 3.5)
sub.set_ylim(y_min, y_max)
# sub.axes.yaxis.set_ticklabels([])
sub.axes.xaxis.set_ticklabels([])
sub.get_xaxis().set_visible(False)
sub.tick_params(which='major', axis="y",direction="in", length=2)
sub.tick_params(which='minor', axis="y",direction="in", length=1)
sub.yaxis.set_major_locator(MultipleLocator(1))
sub.yaxis.set_minor_locator(MultipleLocator(0.5))
yticks = sub.yaxis.get_major_ticks()
# yticks[1].label.set_visible(False)
for index, sub in enumerate(disorder):
sub.plot(conserve_peraa['peraa_score'][1][1][index], linestyle="solid", color="#a2a2a2",linewidth=3)
# sub.axes.yaxis.set_ticklabels([])
sub.axes.xaxis.set_ticklabels([])
sub.get_xaxis().set_visible(False)
sub.tick_params(which='major', axis="y",direction="in", length=2)
sub.tick_params(which='minor', axis="y",direction="in", length=1)
sub.yaxis.set_major_locator(MultipleLocator(1))
sub.yaxis.set_minor_locator(MultipleLocator(0.5))
yticks = sub.yaxis.get_major_ticks()
# yticks[1].label.set_visible(False)
plt.subplots_adjust(wspace=0.1)
plt.tight_layout(pad=0.3)
#savefig
# path = fig2_path / "{}_{}_fig2down".format(conserve_list[0]["gene_name"], conserve_list[0]["uniprot_id"])
# f.savefig("{}.svg".format(str(path)))
# -
# # Homologous Table Output
# +
oma_protein_id = [i['oma_protein_id'] for i in conserve_list[0]['homologous_info']]
species = [i['species'] for i in conserve_list[0]['homologous_info']]
taxon_id = [int(i['taxon_id']) for i in conserve_list[0]['homologous_info']]
oma_cross_reference = [i['oma_cross_reference'] for i in conserve_list[0]['homologous_info']]
chordata = [i['oma_protein_id'] for i in conserve_list[0]['homologous_info']]
vetebrata = [i['oma_protein_id'] for i in conserve_list[1]['homologous_info']]
tetrapoda = [i['oma_protein_id'] for i in conserve_list[2]['homologous_info']]
mammalia = [i['oma_protein_id'] for i in conserve_list[3]['homologous_info']]
tax_indicator_lsit = []
for i in oma_protein_id:
tax_indicator = 0
if i in mammalia:
tax_indicator = 4
elif i in tetrapoda:
tax_indicator = 3
elif i in vetebrata:
tax_indicator = 2
elif i in chordata:
tax_indicator = 1
tax_indicator_lsit.append(tax_indicator)
df = pd.DataFrame(columns=['oma_protein_id','species','taxon_id','oma_cross_reference','tax_indicator'],
data = {'oma_protein_id':oma_protein_id,
'species':species,
'taxon_id':taxon_id,
'oma_cross_reference':oma_cross_reference,
'tax_indicator':tax_indicator_lsit})
# path = table_homologous_path / "{}_{}_homologous.xlsx".format(conserve_list[0]["gene_name"], conserve_list[0]["uniprot_id"])
# df.to_excel(path,index=False)
# -
# # Overall trend
# +
# from function.utilities import get_subset
# input_path = Path("/home/wenlin/d/rbp/oma_all/e_extre_filtered_rbp")
# uniprot_ids = get_subset(human_df, 'rbp')['uniprot_id'].tolist()
# all_list = []
# failed_uids = []
# j = trange(len(uniprot_ids), leave=True)
# for uniprot_id in j:
# try:
# conserve_list = []
# t = trange(len(tax_ids), leave=False)
# for i in t:
# t.set_description(str(tax_ids[i]))
# t.refresh()
# conserve = get_conserve_score(tax_id=tax_ids[i], uniprot_id=uniprot_ids[uniprot_id])
# conserve_list.append(conserve)
# #conserce score normalize
# if normalize:
# conserve_list = get_normalize_by_chordata(conserve_list)
# #calculate peraa score
# conserve_peraa = get_conserve_peraa_score(conserve_list)
# all_list.append({"uniprot_id":conserve_peraa['uniprot_id'],
# "gene_name":conserve_peraa['gene_name'],
# "peraa_score":conserve_peraa['peraa_score']})
# except Exception as e: print(e)
# no_zero_all_list = []
# for i in all_list:
# if not (i['peraa_score'][0][0] == 0).all():
# no_zero_all_list.append(i)
# order_conserve = np.stack([i['peraa_score'][1][0] for i in no_zero_all_list])
# order_conserve = np.nanmean(order_conserve,axis=0)
# # order_conserve = order_conserve.mean(axis=0)
# no_zero_all_list = []
# for i in all_list:
# if not (i['peraa_score'][0][1] == 0).all():
# no_zero_all_list.append(i)
# disorder_conserve = np.stack([i['peraa_score'][1][1] for i in no_zero_all_list])
# disorder_conserve = np.nanmean(disorder_conserve,axis=0)
# # disorder_conserve = disorder_conserve.mean(axis=0)
# overall_conserve = np.stack([order_conserve, disorder_conserve])
# np.save('overall_conserve.pkl',overall_conserve)
# +
# overall_trend_path = Path("./output/peraa_overall_trend")
# overall_conserve = np.load('./output/peraa_overall_trend/overall_conserve.pkl.npy')
# +
# f, (order,disorder) = plt.subplots(2, 20, sharex=True, sharey=True, figsize=(20, 2.2), dpi=200)
# y_max = np.nanmax(overall_conserve) + 0.15
# y_min = np.nanmin(overall_conserve) - 0.15
# for index, sub in enumerate(order):
# sub.plot(overall_conserve[0][index], linestyle="solid", color="#a2a2a2",linewidth=3)
# # sub.axes.yaxis.set_ticklabels([])
# sub.axes.xaxis.set_ticklabels([])
# sub.get_xaxis().set_visible(False)
# sub.tick_params(which='major', axis="y",direction="in", length=2)
# sub.tick_params(which='minor', axis="y",direction="in", length=1)
# sub.yaxis.set_major_locator(MultipleLocator(1))
# sub.yaxis.set_minor_locator(MultipleLocator(0.5))
# yticks = sub.yaxis.get_major_ticks()
# # yticks[1].label.set_visible(False)
# for index, sub in enumerate(disorder):
# sub.plot(overall_conserve[1][index], linestyle="solid", color="#a2a2a2",linewidth=3)
# sub.set_xlim(-0.5, 3.5)
# sub.set_ylim(y_min, y_max)
# # sub.axes.yaxis.set_ticklabels([])
# sub.axes.xaxis.set_ticklabels([])
# sub.get_xaxis().set_visible(False)
# sub.tick_params(which='major', axis="y",direction="in", length=2)
# sub.tick_params(which='minor', axis="y",direction="in", length=1)
# sub.yaxis.set_major_locator(MultipleLocator(1))
# sub.yaxis.set_minor_locator(MultipleLocator(0.5))
# yticks = sub.yaxis.get_major_ticks()
# # yticks[1].label.set_visible(False)
# plt.subplots_adjust(wspace=0.1)
# plt.tight_layout(pad=0.3)
# path = overall_trend_path / "overall_trend"
# f.savefig("{}.svg".format(str(path)))
# -
| fig2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df=pd.read_csv(r"C:\Users\<NAME>\Downloads\Ingredients.csv")
df.head(10)
df.info
#listing some of restricted words
res_words=['red','black','green','crushed','powder','oil','refreshed','refined','cut into','split','chopped','seed',
'root','juice','leaves','flour','finely chopped','fresh','cut','into','in','on','fine','finely','seeds',
'split','+','teaspoon','tspn','piece']
df.columns
df_copy=df.copy()
df_copy.columns
df_copy['Ingredient Name'][2]
length=len(df_copy['Ingredient Name'])
import re
for i in range(length):
x=re.split(' ',df_copy['Ingredient Name'][i])
df_copy['Ingredient Name'][i]=x
df_copy.head(20)
for i in df_copy['Ingredient Name']:
if len(i)>1:
for x in i:
if x.lower() in res_words:
i.remove(x)
df_copy.info()
# +
pattern1=".*ed$"
for i in df_copy['Ingredient Name']:
for x in i:
a=re.search(pattern1,x)
if a != None:
i.remove(a.string)
# -
for i in df_copy['Ingredient Name']:
if len(i)>1:
for x in i:
if x.lower() in res_words:
i.remove(x)
df_copy[6350:6370]
pattern1=".*ed$"
for i in df_copy['Ingredient Name']:
for x in i:
a=re.search(pattern1,x)
if a != None:
i.remove(a.string)
df_copy[6350:6370]
prepositions=['into','and','in','about','like','for','with','of','a','an','the']
units=['hour','minutes','hours','day','days','seconds','small','medium','large','tablespoon','tablespoons','inch',
'cm','mm','raw','cube','whole','half']
for i in df_copy['Ingredient Name']:
if len(i)>1:
for x in i:
if x.lower() in prepositions or x.lower() in units:
i.remove(x)
df_copy[6350:6370]
for i in df_copy['Ingredient Name']:
if len(i)>1:
for x in i:
if x.isnumeric()==True:
i.remove(x)
df_copy[6350:6370]
pattern2="\d"
for i in df_copy['Ingredient Name']:
for x in i:
a=re.search(pattern2,x)
if a != None:
i.remove(a.string)
#print(a.string)
for i in df_copy['Ingredient Name']:
if len(i)>1:
for x in i:
if x.lower() in res_words:
i.remove(x)
pattern1=".*ed$"
for i in df_copy['Ingredient Name']:
for x in i:
a=re.search(pattern1,x)
if a != None:
i.remove(a.string)
for i in df_copy['Ingredient Name']:
if len(i)>1:
for x in i:
if x.lower() in prepositions or x.lower() in units:
i.remove(x)
for i in df_copy['Ingredient Name']:
if len(i)>1:
for x in i:
if x.isnumeric()==True:
i.remove(x)
pattern2="\d"
for i in df_copy['Ingredient Name']:
for x in i:
a=re.search(pattern2,x)
if a != None:
i.remove(a.string)
#print(a.string)
df1=df_copy.copy()
pattern1=".*ed,$"
for i in df1['Ingredient Name']:
for x in i:
a=re.search(pattern1,x)
if a != None:
i.remove(a.string)
df1[1000:1020]
df1.info()
from nltk.corpus import stopwords
stop_words = set(stopwords.words("english"))
print(stop_words)
for i in df1['Ingredient Name']:
if len(i)>1:
for x in i:
if x.lower() in stop_words:
i.remove(x)
df1['Ingredient Name'][5][0]
from nltk.stem import WordNetLemmatizer
lemmtizer = WordNetLemmatizer()
length=len(df1['Ingredient Name'])
for i in range(length):
for index,x in enumerate(df1['Ingredient Name'][i]):
#for y in x:
#index=df1['Ingredient Name'][i].index(x)
#print(index)
y=lemmtizer.lemmatize(x.lower())
#print(y)
df1['Ingredient Name'][i][index]=y
#x = [lemmtizer.lemmatize(word) for word in x]
pattern1=".*less$"
for i in df1['Ingredient Name']:
for x in i:
a=re.search(pattern1,x)
if a != None:
i.remove(a.string)
pattern2="nuts"
for i in df1['Ingredient Name']:
for index,x in enumerate(i):
#a=re.search(pattern2,x)
#if a != None:
#print(a.string)
i[index]=re.sub(pattern2,"",x)
pattern3=".*s$"
for i in df1['Ingredient Name']:
for index,x in enumerate(i):
#a=re.search(pattern3,x)
#if a != None:
#print(a.string)
i[index]=re.sub(pattern3,"",x)
df1.tail(20)
pattern4=".*ly$"
for i in df1['Ingredient Name']:
for index,x in enumerate(i):
#a=re.search(pattern3,x)
#if a != None:
#print(a.string)
i[index]=re.sub(pattern4,"",x)
pattern1=".*less$"
for i in df1['Ingredient Name']:
for x in i:
a=re.search(pattern1,x)
if a != None:
i.remove(a.string)
pattern2="nuts"
for i in df1['Ingredient Name']:
for index,x in enumerate(i):
#a=re.search(pattern2,x)
#if a != None:
#print(a.string)
i[index]=re.sub(pattern2,"",x)
pattern3=".*s$"
for i in df1['Ingredient Name']:
for index,x in enumerate(i):
#a=re.search(pattern3,x)
#if a != None:
#print(a.string)
i[index]=re.sub(pattern3,"",x)
pattern4=".*ly$"
for i in df1['Ingredient Name']:
for index,x in enumerate(i):
#a=re.search(pattern3,x)
#if a != None:
#print(a.string)
i[index]=re.sub(pattern4,"",x)
df2=df1.copy()
s=' '
for index,i in enumerate(df2['Ingredient Name']):
df2['Ingredient Name'][index]=s.join(i)
df2.head()
df2[255:265]
df2.info()
# +
#THE END
| AssignmentGlyka.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 02 - OLAP Cubes - Solution
# All the databases table in this demo are based on public database samples and transformations
# - `Sakila` is a sample database created my `MySql` [Link](https://dev.mysql.com/doc/sakila/en/sakila-structure.html)
# - The postgresql version of it is called `Pagila` [Link](https://github.com/devrimgunduz/pagila)
# - The facts and dimension tables design is based on O'Reilly's public dimensional modelling tutorial schema [Link](http://archive.oreilly.com/oreillyschool/courses/dba3/index.html)
# !PGPASSWORD=student createdb -h 127.0.0.1 -U student pagila_star
# !PGPASSWORD=student psql -q -h 127.0.0.1 -U student -d pagila_star -f Data/pagila-star.sql
# %load_ext sql
import sql
# # STEP1 : Connect to the local database where Pagila is loaded
# +
DB_ENDPOINT = "127.0.0.1"
DB = 'pagila'
DB_USER = 'student'
DB_PASSWORD = '<PASSWORD>'
DB_PORT = '5432'
# postgresql://username:password@host:port/database
conn_string = "postgresql://{}:{}@{}:{}/{}" \
.format(DB_USER, DB_PASSWORD, DB_ENDPOINT, DB_PORT, DB)
print(conn_string)
# -
# %sql $conn_string
# # STEP2 : Facts & Dimensions are supposed to be loaded from Demo01
# <img src="pagila-star.png" width="50%"/>
# # Start by a simple cube
# %%time
# %%sql
SELECT dimDate.day,dimMovie.rating, dimCustomer.city, sum(sales_amount) as revenue
FROM factSales
JOIN dimMovie on (dimMovie.movie_key = factSales.movie_key)
JOIN dimDate on (dimDate.date_key = factSales.date_key)
JOIN dimCustomer on (dimCustomer.customer_key = factSales.customer_key)
group by (dimDate.day, dimMovie.rating, dimCustomer.city)
order by revenue desc
limit 20;
# ## Slicing
#
# - Slicing is the reduction of the dimensionality of a cube by 1 e.g. 3 dimensions to 2, fixing one of the dimensions to a single value
# - In the following example we have a 3-deminensional cube on day, rating, and country
# - In the example below `rating` is fixed and to "PG-13" which reduces the dimensionality
# %%time
# %%sql
SELECT dimDate.day,dimMovie.rating, dimCustomer.city, sum(sales_amount) as revenue
FROM factSales
JOIN dimMovie on (dimMovie.movie_key = factSales.movie_key)
JOIN dimDate on (dimDate.date_key = factSales.date_key)
JOIN dimCustomer on (dimCustomer.customer_key = factSales.customer_key)
WHERE dimMovie.rating = 'PG-13'
GROUP by (dimDate.day, dimCustomer.city, dimMovie.rating)
ORDER by revenue desc
LIMIT 20;
# ## Dicing
# - Creating a subcube, same dimensionality, less values for 2 or more dimensions
# - e.g. PG-13
# %%time
# %%sql
SELECT dimDate.day,dimMovie.rating, dimCustomer.city, sum(sales_amount) as revenue
FROM factSales
JOIN dimMovie on (dimMovie.movie_key = factSales.movie_key)
JOIN dimDate on (dimDate.date_key = factSales.date_key)
JOIN dimCustomer on (dimCustomer.customer_key = factSales.customer_key)
WHERE dimMovie.rating in ('PG-13', 'PG')
AND dimCustomer.city in ('Bellevue', 'Lancaster')
AND dimDate.day in ('1', '15', '30')
GROUP by (dimDate.day, dimCustomer.city, dimMovie.rating)
ORDER by revenue desc
LIMIT 20;
# ## Roll-up
# - Stepping up the level of aggregation to a large grouping
# - e.g.`city` is summed as `country`
# %%time
# %%sql
SELECT dimDate.day,dimMovie.rating, dimCustomer.country, sum(sales_amount) as revenue
FROM factSales
JOIN dimMovie on (dimMovie.movie_key = factSales.movie_key)
JOIN dimDate on (dimDate.date_key = factSales.date_key)
JOIN dimCustomer on (dimCustomer.customer_key = factSales.customer_key)
GROUP by (dimDate.day, dimMovie.rating, dimCustomer.country)
ORDER by revenue desc
LIMIT 20;
# ## Drill-down
# - Breaking up one of the dimensions to a lower level.
# - e.g.`city` is broken up to `districts`
# %%time
# %%sql
SELECT dimDate.day,dimMovie.rating, dimCustomer.district, sum(sales_amount) as revenue
FROM factSales
JOIN dimMovie on (dimMovie.movie_key = factSales.movie_key)
JOIN dimDate on (dimDate.date_key = factSales.date_key)
JOIN dimCustomer on (dimCustomer.customer_key = factSales.customer_key)
GROUP by (dimDate.day, dimCustomer.district, dimMovie.rating)
ORDER by revenue desc
LIMIT 20;
# # Grouping Sets
# - It happens a lot that for a 3 dimensions, you want to aggregate a fact:
# - by nothing (total)
# - then by the 1st dimension
# - then by the 2nd
# - then by the 3rd
# - then by the 1st and 2nd
# - then by the 2nd and 3rd
# - then by the 1st and 3rd
# - then by the 1st and 2nd and 3rd
#
# - Since this is very common, and in all cases, we are iterating through all the fact table anyhow, there is a move clever way to do that using the SQL grouping statement "GROUPING SETS"
# ## total revenue
# + language="sql"
# SELECT sum(sales_amount) as revenue
# FROM factSales
# -
# ## revenue by country
# + language="sql"
# SELECT dimStore.country,sum(sales_amount) as revenue
# FROM factSales
# JOIN dimStore on (dimStore.store_key = factSales.store_key)
# GROUP by dimStore.country
# order by dimStore.country, revenue desc;
# -
# ## revenue by month
# + language="sql"
# SELECT dimDate.month,sum(sales_amount) as revenue
# FROM factSales
# JOIN dimDate on (dimDate.date_key = factSales.date_key)
# GROUP by dimDate.month
# order by dimDate.month, revenue desc;
# -
# ## revenue by month & country
# + language="sql"
# SELECT dimDate.month,dimStore.country,sum(sales_amount) as revenue
# FROM factSales
# JOIN dimDate on (dimDate.date_key = factSales.date_key)
# JOIN dimStore on (dimStore.store_key = factSales.store_key)
# GROUP by (dimDate.month, dimStore.country)
# order by dimDate.month, dimStore.country, revenue desc;
# -
# ## revenue total, by month, by country, by month & country All in one shot
# - watch the nones
# %%time
# %%sql
SELECT dimDate.month,dimStore.country,sum(sales_amount) as revenue
FROM factSales
JOIN dimDate on (dimDate.date_key = factSales.date_key)
JOIN dimStore on (dimStore.store_key = factSales.store_key)
GROUP by grouping sets ((), dimDate.month, dimStore.country, (dimDate.month, dimStore.country));
# # CUBE
# - Group by CUBE (dim1, dim2, ..) , produces all combinations of different lenghts in one go.
# - This view could be materialized in a view and queried which would save lots repetitive aggregations
# ```SQL
# SELECT dimDate.month,dimStore.country,sum(sales_amount) as revenue
# FROM factSales
# JOIN dimDate on (dimDate.date_key = factSales.date_key)
# JOIN dimStore on (dimStore.store_key = factSales.store_key)
# GROUP by cube(dimDate.month, dimStore.country);
# ```
#
# %%time
# %%sql
SELECT dimDate.month,dimStore.country,sum(sales_amount) as revenue
FROM factSales
JOIN dimDate on (dimDate.date_key = factSales.date_key)
JOIN dimStore on (dimStore.store_key = factSales.store_key)
GROUP by cube(dimDate.month, dimStore.country);
# ## revenue total, by month, by country, by month & country All in one shot, NAIVE way
# %%time
# %%sql
SELECT NULL as month, NULL as country, sum(sales_amount) as revenue
FROM factSales
UNION all
SELECT NULL, dimStore.country,sum(sales_amount) as revenue
FROM factSales
JOIN dimStore on (dimStore.store_key = factSales.store_key)
GROUP by dimStore.country
UNION all
SELECT cast(dimDate.month as text) , NULL, sum(sales_amount) as revenue
FROM factSales
JOIN dimDate on (dimDate.date_key = factSales.date_key)
GROUP by dimDate.month
UNION all
SELECT cast(dimDate.month as text),dimStore.country,sum(sales_amount) as revenue
FROM factSales
JOIN dimDate on (dimDate.date_key = factSales.date_key)
JOIN dimStore on (dimStore.store_key = factSales.store_key)
GROUP by (dimDate.month, dimStore.country)
| course-2-cloud-data-warehouses/lesson-1-exercises/2-OLAP-cubes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import dgl
import dgl.function as fn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, Sampler
from torch.distributions import Normal
import numpy as np
import torch
import torch.nn as nn
import math
import uproot#3 as uproot
import numpy as np
import pandas as pd
from tqdm import tqdm
import os, sys
# -
# #### For node pair (𝑖,𝑗) (from 𝑖 to 𝑗) with node $𝑥_𝑖$, $𝑥_𝑗$ $ \in \mathcal{R}_𝑛$, the score of their connection is defined as :\
# ### $\bf 𝑞_𝑗=𝑊_𝑞⋅𝑥_𝑗$, $\bf 𝑘_𝑖=𝑊_𝑘⋅𝑥_𝑖$, $\bf 𝑣_𝑖=𝑊_𝑣⋅𝑥_𝑖$, $\bf score=𝑞^{𝑇}_{𝑗}.𝑘_𝑖$
class MultiHeadAttention(nn.Module):
"Multi-Head Attention"
def __init__(self, h, dim_model):
"h: number of heads; dim_model: hidden dimension"
super(MultiHeadAttention, self).__init__()
self.d_k = dim_model // h
self.h = h
# W_q, W_k, W_v, W_o
self.linears = clones(nn.Linear(dim_model, dim_model), 4)
def get(self, x, fields='qkv'):
"Return a dict of queries / keys / values."
batch_size = x.shape[0]
ret = {}
if 'q' in fields:
ret['q'] = self.linears[0](x).view(batch_size, self.h, self.d_k)
if 'k' in fields:
ret['k'] = self.linears[1](x).view(batch_size, self.h, self.d_k)
if 'v' in fields:
ret['v'] = self.linears[2](x).view(batch_size, self.h, self.d_k)
return ret
def get_o(self, x):
"get output of the multi-head attention"
batch_size = x.shape[0]
return self.linears[3](x.view(batch_size, -1))
def message_func(edges):
return {'score': ((edges.src['k'] * edges.dst['q'])
.sum(-1, keepdim=True)),
'v': edges.src['v']}
# +
import torch as th
import torch.nn.functional as F
def reduce_func(nodes, d_k=64):
v = nodes.mailbox['v']
att = F.softmax(nodes.mailbox['score'] / th.sqrt(d_k), 1)
return {'dx': (att * v).sum(1)}
# -
#import functools.partial as partial
def naive_propagate_attention(self, g, eids):
g.send_and_recv(eids, message_func, reduce_func)
# +
def src_dot_dst(src_field, dst_field, out_field):
def func(edges):
return {out_field: (edges.src[src_field] * edges.dst[dst_field]).sum(-1, keepdim=True)}
return func
def scaled_exp(field, scale_constant):
def func(edges):
# clamp for softmax numerical stability
return {field: th.exp((edges.data[field] / scale_constant).clamp(-5, 5))}
return func
def propagate_attention(self, g, eids):
# Compute attention score
g.apply_edges(src_dot_dst('k', 'q', 'score'), eids)
g.apply_edges(scaled_exp('score', np.sqrt(self.d_k)))
# Update node state
g.send_and_recv(eids,
[fn.src_mul_edge('v', 'score', 'v'), fn.copy_edge('score', 'score')],
[fn.sum('v', 'wv'), fn.sum('score', 'z')])
# +
class Encoder(nn.Module):
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.N = N
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def pre_func(self, i, fields='qkv'):
layer = self.layers[i]
def func(nodes):
x = nodes.data['x']
norm_x = layer.sublayer[0].norm(x)
return layer.self_attn.get(norm_x, fields=fields)
return func
def post_func(self, i):
layer = self.layers[i]
def func(nodes):
x, wv, z = nodes.data['x'], nodes.data['wv'], nodes.data['z']
o = layer.self_attn.get_o(wv / z)
x = x + layer.sublayer[0].dropout(o)
x = layer.sublayer[1](x, layer.feed_forward)
return {'x': x if i < self.N - 1 else self.norm(x)}
return func
class Decoder(nn.Module):
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.N = N
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def pre_func(self, i, fields='qkv', l=0):
layer = self.layers[i]
def func(nodes):
x = nodes.data['x']
if fields == 'kv':
norm_x = x # In enc-dec attention, x has already been normalized.
else:
norm_x = layer.sublayer[l].norm(x)
return layer.self_attn.get(norm_x, fields)
return func
def post_func(self, i, l=0):
layer = self.layers[i]
def func(nodes):
x, wv, z = nodes.data['x'], nodes.data['wv'], nodes.data['z']
o = layer.self_attn.get_o(wv / z)
x = x + layer.sublayer[l].dropout(o)
if l == 1:
x = layer.sublayer[2](x, layer.feed_forward)
return {'x': x if i < self.N - 1 else self.norm(x)}
return func
# -
class Transformer(nn.Module):
def __init__(self, encoder, decoder, src_embed, tgt_embed, pos_enc, generator, h, d_k):
super(Transformer, self).__init__()
self.encoder, self.decoder = encoder, decoder
self.src_embed, self.tgt_embed = src_embed, tgt_embed
self.pos_enc = pos_enc
self.generator = generator
self.h, self.d_k = h, d_k
def propagate_attention(self, g, eids):
# Compute attention score
g.apply_edges(src_dot_dst('k', 'q', 'score'), eids)
g.apply_edges(scaled_exp('score', np.sqrt(self.d_k)))
# Send weighted values to target nodes
g.send_and_recv(eids,
[fn.src_mul_edge('v', 'score', 'v'), fn.copy_edge('score', 'score')],
[fn.sum('v', 'wv'), fn.sum('score', 'z')])
def update_graph(self, g, eids, pre_pairs, post_pairs):
"Update the node states and edge states of the graph."
# Pre-compute queries and key-value pairs.
for pre_func, nids in pre_pairs:
g.apply_nodes(pre_func, nids)
self.propagate_attention(g, eids)
# Further calculation after attention mechanism
for post_func, nids in post_pairs:
g.apply_nodes(post_func, nids)
def forward(self, graph):
g = graph.g
nids, eids = graph.nids, graph.eids
# Word Embedding and Position Embedding
src_embed, src_pos = self.src_embed(graph.src[0]), self.pos_enc(graph.src[1])
tgt_embed, tgt_pos = self.tgt_embed(graph.tgt[0]), self.pos_enc(graph.tgt[1])
g.nodes[nids['enc']].data['x'] = self.pos_enc.dropout(src_embed + src_pos)
g.nodes[nids['dec']].data['x'] = self.pos_enc.dropout(tgt_embed + tgt_pos)
for i in range(self.encoder.N):
# Step 1: Encoder Self-attention
pre_func = self.encoder.pre_func(i, 'qkv')
post_func = self.encoder.post_func(i)
nodes, edges = nids['enc'], eids['ee']
self.update_graph(g, edges, [(pre_func, nodes)], [(post_func, nodes)])
for i in range(self.decoder.N):
# Step 2: Dncoder Self-attention
pre_func = self.decoder.pre_func(i, 'qkv')
post_func = self.decoder.post_func(i)
nodes, edges = nids['dec'], eids['dd']
self.update_graph(g, edges, [(pre_func, nodes)], [(post_func, nodes)])
# Step 3: Encoder-Decoder attention
pre_q = self.decoder.pre_func(i, 'q', 1)
pre_kv = self.decoder.pre_func(i, 'kv', 1)
post_func = self.decoder.post_func(i, 1)
nodes_e, nodes_d, edges = nids['enc'], nids['dec'], eids['ed']
self.update_graph(g, edges, [(pre_q, nodes_d), (pre_kv, nodes_e)], [(post_func, nodes_d)])
return self.generator(g.ndata['x'][nids['dec']])
# +
#from dgl.contrib.transformer import get_dataset, GraphPool
# -
| Tutorial6/Graph_Transformer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
sys.path.insert(0, "/cndd/fangming/CEMBA/snmcseq_dev")
from multiprocessing import Pool,cpu_count
from functools import partial
from scipy import sparse
from scipy import stats
import importlib
from __init__ import *
from __init__jupyterlab import *
import snmcseq_utils
importlib.reload(snmcseq_utils)
import re
import pickle
import datetime
from scipy import optimize
import tqdm
import importlib
sys.path.insert(0, '../')
import enhancer_gene_utils
importlib.reload(enhancer_gene_utils)
# -
today = datetime.date.today()
print(today)
# +
# color palette
cm = plt.cm.tab20c.colors
# settle down on a color palette
colors_mc = [cm[8], cm[9], cm[10],]
colors_atac = [cm[0], cm[1], cm[2],]
colors_null = ['black', cm[16], cm[17], cm[18],]
colors_both = [cm[4], cm[5], cm[6], cm[7]]
color_tracks = [
cm,
colors_mc,
colors_atac,
colors_null,
colors_both,
]
labels = [
'All',
'mC',
'ATAC',
'Null',
'Both',
]
for color_track, label in zip(color_tracks, labels):
n = len(color_track)
fig, ax = plt.subplots(figsize=(1*n,1))
ax.bar(np.arange(n), 1,
color=color_track)
ax.set_xticks(np.arange(n))
ax.set_ylabel(label, rotation=0, ha='right')
sns.despine(ax=ax, left=True)
ax.set_yticks([])
plt.show()
# -
# basic setup
color_palette = {
'mc': colors_mc[0],
'atac': colors_atac[0],
'both': colors_both[1],
}
# # read in the grand summary
# ### - 80% cells for 5-time downsampling
output_figures = '/cndd2/fangming/projects/scf_enhancers/results/figures/{}_metacell_corr_res_{{}}.pdf'.format(today)
print(output_figures)
input_summary = '/cndd2/fangming/projects/scf_enhancers/results/2020-01-08_metacell_corr_res_grand_res_summary_table.tsv.gz'
iterator_both = pd.read_csv(input_summary, sep='\t')
print(iterator_both.shape)
iterator_both.head()
# # Plot 1 knn
# +
def plot1(_table, _table_std,
mc_baseline, atac_baseline, both_baseline,
fdr_threshold,
title_label='',
output='',
):
"""
"""
plot_cols = [
'num_linked_pairs',
'num_linked_genes',
'num_linked_enhs',
'num_total_pairs',
'num_total_genes',
'num_total_enhs',
]
fig, axs = plt.subplots(2, 3, figsize=(5*3, 4*2))
fig.suptitle('mC/ATAC-RNA (10x_cells_v3) cell correlation analysis\n(k={}; FDR={}; {})'
.format(knn, fdr_threshold, title_label,),
fontsize=15, y=1)
for i_ax, (ax, col) in enumerate(zip(axs.flat, plot_cols)):
ymax = 0
for _suffix, _type in zip(['_mc', '_atac', '_both'],
['mC-RNA', 'ATAC-RNA', 'both'],
):
color = color_palette[_suffix.strip('_')]
_x = _table['num_clst']
_x_std = _table_std['num_clst']
_y = _table[col+_suffix]
_y_std = _table_std[col+_suffix]
# ax.errorbar(_x, _y, yerr=_y_std, xerr=_x_std.values,
# fmt='-o',
# markersize=5,
# color=color,
# label=_type)
ax.plot(_x, _y, '-o', markersize=3, label=_type, color=color)
ax.fill_between(_x, _y-_y_std, _y+_y_std, alpha=0.5, color=color)
ymax = np.max([ymax, np.max(_y)])
if i_ax == 0:
ax.axhline(mc_baseline, color=color_palette['mc'], linestyle='--', alpha=0.5, label='mC-RNA (38 clusters)')
ax.axhline(atac_baseline, color=color_palette['atac'], linestyle='--', alpha=0.5, label='ATAC-RNA (38 clusters)')
ax.axhline(both_baseline, color=color_palette['both'], linestyle='--', alpha=0.5, label='both (38 clusters)')
ymax = max(ymax, mc_baseline, atac_baseline, both_baseline)
ax.set_xscale('log')
ax.set_xlim([5, 20000])
ax.set_xticks([10, 100, 1000, 10000])
ax.set_ylim(ymin=0, ymax=1.1*ymax)
ax.set_ylabel(col.replace('_', ' ')+(" +/- std"))
ax.xaxis.set_major_formatter(mtick.EngFormatter())
ax.yaxis.set_major_formatter(mtick.EngFormatter())
ax.set_xlabel('Num metacells')
# axs[-1].axis('off')
handles, labels = axs.flat[0].get_legend_handles_labels()
# handles, labels = snmcseq_utils.dedup_legends(handles, labels)
axs.flat[2].legend(handles, labels, bbox_to_anchor=(1,1), loc='upper left')
fig.subplots_adjust(wspace=0.4, hspace=0.3)
if output:
snmcseq_utils.savefig(fig, output)
plt.show()
# +
knn = 30
iterator_toplot = iterator_both[iterator_both['k']==knn].sort_values(['r', 'i'])
# for a fixed k, groupby r (collapsing i)
res_mean = iterator_toplot.groupby(['r']).mean().reset_index()
res_std = iterator_toplot.groupby(['r']).std().reset_index().fillna(0)
fdr_threshold = 0.2
mc_baseline = 6094+1300
atac_baseline = 2700+1300
both_baseline = 1300
title_label = '80% cell downsampling for 5 times'
output = output_figures.format('p1_k{}'.format(knn))
print(output)
plot1(res_mean, res_std,
mc_baseline, atac_baseline, both_baseline,
fdr_threshold,
title_label=title_label,
output=output,
)
# -
# ## Plot with different knn
knns = np.sort(iterator_both['k'].unique())
print(knns)
for knn in knns:
iterator_toplot = iterator_both[iterator_both['k']==knn].sort_values(['r', 'i'])
# for a fixed k, groupby r (collapsing i)
res_mean = iterator_toplot.groupby(['r']).mean().reset_index()
res_std = iterator_toplot.groupby(['r']).std().reset_index().fillna(0)
mc_baseline = 6094+1300
atac_baseline = 2700+1300
both_baseline = 1300
output = output_figures.format('p1_k{}'.format(knn))
print(output)
plot1(res_mean, res_std,
mc_baseline, atac_baseline, both_baseline,
fdr_threshold,
output=output,
)
| archives/correlation_analysis_celllevel/correlation_analysis_celllevel-visualize-grand-summary-Jan8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# # Read Data
train_df_low = pd.read_csv(r'Data\Toxic Comment Classification Challenge\train.csv')
train_df_high = pd.read_csv(r'Data\Jigsaw Unintended Bias in Toxicity Classification\train.csv')
train_df_high
train_df_low
# # Concatenate Datasets
# +
x_data = pd.DataFrame()
text = train_df_high["comment_text"]
target = train_df_high["target"]
text = np.concatenate((text,train_df_low['comment_text']) )
target = np.concatenate((target, train_df_low["toxic"]))
target = np.where(target >= 0.5, 1,0)
# -
len(text)
np.unique(target, return_counts=True)
len(target)
uniq, count = np.unique(target, return_counts=True)
print("dist : ",count)
plt.bar(uniq, count)
# # Data Balance
import random
toxic = text[target == 1]
nonToxic = text[target == 0]
len(toxic)
len(nonToxic)
nonToxic_ch = random.sample(list(nonToxic), len(toxic))
x_data["text"] = np.concatenate((toxic, nonToxic_ch))
x_data["text"]
y_data = [1]*len(toxic)+[0]*len(nonToxic_ch)
uniq, count = np.unique(y_data, return_counts=True)
print("dist : ",count)
plt.bar(uniq, count)
# # Data Preprocessing
import re
import nltk
import pickle
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
orn = "The best course's Natural Language Processs that I have ever studied!! "
t = re.sub(r'\W', ' ', str(orn))
t = re.sub(r'\s+[a-zA-Z]\s+', ' ', t)
t = re.sub(r'\^[a-zA-Z]\s+', ' ', t)
t = re.sub(r'\s+', ' ', t, flags=re.I)
t = re.sub(r'^b\s+', '', t)
t = t.lower()
t = t.split()
t = [lemmatizer.lemmatize(word) for word in t]
t = ' '.join(t)
t
# +
# %%time
texts = x_data['text']
for i in range(len(texts)):
if i%10000 == 0:
print('Step -> ',i)
t = re.sub(r'\W', ' ', str(texts[i]))
t = re.sub(r'\s+[a-zA-Z]\s+', ' ', t)
t = re.sub(r'\^[a-zA-Z]\s+', ' ', t)
t = re.sub(r'\s+', ' ', t, flags=re.I)
t = re.sub(r'^b\s+', '', t)
t = t.lower()
t = t.split()
t = [lemmatizer.lemmatize(word) for word in t]
t = ' '.join(t)
texts[i] = t
x_data['text'] = texts
# -
x_data['target'] = y_data
x_data.to_csv('Data\\preprocessed_data.csv',index=False)
x_data
| Preprocess.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''Eve'': virtualenv)'
# language: python
# name: python37464bitevevirtualenvb0f75805c85f4820bb95817f7852b730
# ---
# # Completed Grid
# 
# # Answer
# $10 * 5 * 5 * 5 * 2 * 2 * 2 * 1 * 1 * 1 * 1 * 1 = 10000$
| 2019.11_Hooks_#6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VQE を利用しての分子シミュレーションを行う
# このチュートリアルでは、Variational Quantum Eigensolver (VQE)とその利用の狙いを紹介し、必要な理論を説明します。また、分子の基底状態エネルギーを見つけるための実装例を紹介します。
#
# ## 目次
# 1. [はじめに](#introduction)
# 2. [量子力学での変分法](#varmethod)
# 1. [数学的背景](#backgroundmath)
# 2. [基底状態の下限](#groundstate)
# 3. [Variational Quantum Eigensolver](#vqe)
# 1. [変分フォーム](#varforms)
# 2. [単純な変分フォーム](#simplevarform)
# 3. [パラメーター最適化](#optimization)
# 4. [単一量子ビットの変分フォームの例](#example)
# 5. [一般的な変分フォームの構造](#commonvarforms)
# 4. [Qiskit での VQE 実装](#implementation)
# 1. [VQE を状態ベクトルシミュレーターで実行する](#implementationstatevec)
# 2. [VQE をノイズのあるシミュレーターで実行する](#implementationnoisy)
# 5. [問題](#problems)
# 6. [参考文献](#references)
# ## はじめに <a id='introduction'></a>
# 多くのアプリケーションでは、行列固有値の最小値を見つけるのが重要です。例えば、化学では、分子を特徴付けるエルミート行列の最小固有値は、そのシステムの基底状態のエネルギーになります。将来的には、最小固有値を見つけるのに量子位相推定アルゴリズムが使われる見込みですが、実用的な応用問題の実装に必要な回路の深さが、NISQ時代に利用可能なハードウェアの限界を超えています。それゆえに、2014年にPeruzzoらが、浅い回路を利用した分子の基底状態エネルギーを推定する VQE を提案しました[1]。
#
# 形式的に述べると、最小値が不明な固有値 $\lambda_{min}$ と関連する固有状態 $|\psi_{min}\rangle$ をもったエルミート行列 $H$ が与えられた場合、VQEは $\lambda_{min}$ を下に有界として、$\lambda_{\theta}$ を推定します(下限):
#
# \begin{align*}
# \lambda_{min} \le \lambda_{\theta} \equiv \langle \psi(\theta) |H|\psi(\theta) \rangle
# \end{align*}
#
# ここで、$|\psi(\theta)\rangle$ は、$\lambda_{\theta}$ に関係する固有状態です。適当な状態 $|\psi\rangle$ から開始し、$U(\theta)$ で表現されるパラメータ化された回路を適用することで、このアルゴリズムにより、$|\psi_{min}\rangle$ に対する推定 $U(\theta)|\psi\rangle \equiv |\psi(\theta)\rangle$ が得られます。パラメータ $\theta$ を期待値 $\langle \psi(\theta) |H|\psi(\theta) \rangle$ が最小になるように古典的制御を繰り返し実施しながら推定します。
#
# ## 量子力学における変分法<a id='varmethod'></a>
# ### 数学的背景<a id='backgroundmath'></a>
#
# VQEは、量子力学の変分法アプリケーションです。変分法をよりよく理解するために、基礎的な数学的背景を説明します。行列 $A$ の固有ベクトル $|\psi_i\rangle$ は、$A$ による変換でスカラー乗数($A$ の固有値$\lambda_i$)分の違いだけで変化しません。すなわち、
#
# \begin{align*}
# A |\psi_i\rangle = \lambda_i |\psi_i\rangle
# \end{align*}
#
# です。また、行列 $H$ が随伴行列(転置と複素共役)の場合は、エルミートと呼ばれます。
#
# \begin{align*}
# H = H^{\dagger}
# \end{align*}
#
# スペクトル定理によると、エルミート行列の固有値は実数になります。したがって、$H$ の任意の固有値は $\lambda_i = \lambda_i^*$ を満たします。測定できる量は実数である必要があるため、量子システムのハミルトニアンを記述するために、エルミート行列が適切です。さらには、$H$ は以下のように表現できます。
#
# \begin{align*}
# H = \sum_{i = 1}^{N} \lambda_i |\psi_i\rangle \langle \psi_i |
# \end{align*}
#
# ここで、各 $\lambda_i$ は、対応する固有ベクトル $|\psi_i\rangle$ の固有値です。また、任意の量子状態に対するオブザーバブル $H$ の期待値は、以下の式で求められます。
#
# \begin{align}
# \langle H \rangle_{\psi} &\equiv \langle \psi | H | \psi \rangle
# \end{align}
#
# $H$ を重み付きの固有ベクトルの和として代入すると、次のようになります。
#
# \begin{align}
# \langle H \rangle_{\psi} = \langle \psi | H | \psi \rangle &= \langle \psi | \left(\sum_{i = 1}^{N} \lambda_i |\psi_i\rangle \langle \psi_i |\right) |\psi\rangle\\
# &= \sum_{i = 1}^{N} \lambda_i \langle \psi | \psi_i\rangle \langle \psi_i | \psi\rangle \\
# &= \sum_{i = 1}^{N} \lambda_i | \langle \psi_i | \psi\rangle |^2
# \end{align}
#
# 最後の式は、任意の状態に対するオブザーバブルの期待値は、$H$ に関係する固有値を利用して重みのついた線形結合として示せることを現しています。さらには、$| \langle \psi_i | \psi\rangle |^2 \ge 0$ であるため、線型結合における重みは 0以上の数値になるので、次は自明です。
#
# \begin{align}
# \lambda_{min} \le \langle H \rangle_{\psi} = \langle \psi | H | \psi \rangle = \sum_{i = 1}^{N} \lambda_i | \langle \psi_i | \psi\rangle |^2
# \end{align}
#
# 上記の式が**変分法**と呼ばれるものです(テキストによっては変分原理と呼んでいます)[2]。この式の結果から、任意の波動関数の期待値は、最小でも $H$ に関係する最小固有値にしかならないことがわかります。また、$|\psi_{min}\rangle$ 状態の期待値は、$\langle \psi_{min}|H|\psi_{min}\rangle = \langle \psi_{min}|\lambda_{min}|\psi_{min}\rangle = \lambda_{min}$ となり、期待通り $\langle H \rangle_{\psi_{min}}=\lambda_{min}$ になります。
#
#
# ### 基底状態の下限<a id='groundstate'></a>
# 系のハミルトニアンがエルミート行列 $H$ で表現されている場合、系の基底状態(ground state)エネルギー $E_{gs}$ は $H$ に関係する最小の固有値になります。*ansatz* と呼ばれる適当な波動関数 $|\psi \rangle$ を $|\psi_{min}\rangle$ に近似させるため初期推測をして、期待値 $\langle H \rangle_{\psi}$ を計算します。波動関数を繰り返し更新することで、ハミルトニアンの基底状態エネルギーに近い下限を得ることができます。
# ## Variational Quantum Eigensolver<a id='vqe'></a>
# ### 変分フォーム<a id='varforms'></a>
# 量子コンピューター上で変分法を実装するには、系統的に ansatz を変更する方法が必要です。VQE はこれを固定フォームのパラメータ化された回路の利用を通じて行います。この回路は、しばしば *変分フォーム(variational form)* と呼ばれ、その実行は線形変換 $U(\theta)$ で表現されます。変分フォームは開始状態 $|\psi\rangle$ (例えば、真空 $|0\rangle$ あるいは、Hartree Fock 状態)に適用され、出力状態 $U(\theta)|\psi\rangle\equiv |\psi(\theta)\rangle$ を生成します。$|\psi(\theta)\rangle$ に対して繰り返し最適化を適用することで、期待値 $\langle \psi(\theta)|H|\psi(\theta)\rangle \approx E_{gs} \equiv \lambda_{min}$ が出力されることを狙います。理想的には、$|\psi(\theta)\rangle$ は $|\psi_{min}\rangle$ に近くなるでしょう(ここで、'近く' というのは、状態フィデリティー、もしくはマンハッタン距離(Manhattan distance)で特徴付けられます)。実際には、この場合でなくても、$E_{gs}$ に限りなく近い有効値が得られます。
#
# さらには、多項式パラメータ数をもった固定変分フォームは、指数関数サイズのヒルベルト空間内の全ての状態を、多項式サイズの部分空間へのみ変換します。その結果として、様々な変分フォームがあります。いくつか、例えば、Ry や RyRz は目的とするドメインを考慮せず、いわば発見的に設計されたものがあります。その他、UCCSD などは、問題の構造に基づいて近似を生成するために、特定のドメイン知識を利用しています。よく利用される変分フォームの構造は、このドキュメントの後半で詳しく説明します。
#
# ### 単純な変分フォーム<a id='simplevarform'></a>
# 変分フォームを構築する際には、2つの相反する目的のバランスを考える必要があります。理想的には、$n$ 量子ビットの変分フォームは、$|\psi\rangle \in \mathbb{C}^N$ かつ $N=2^n$の任意の状態 $|\psi\rangle$ を生成できます。しかしながら、可能な限り少ないパラメータで変分フォームを構築したいでしょう。ここでは、簡単に、最初のゴールを満たし、2つめのゴールを満たさない変分フォームの構築を見てみたいと思います。
#
# $n=1$ の場合を考えます。U3 ゲートは3つのパラメータ、$\theta, \phi$ 及び $\lambda$ を受け取り、以下の変換を表現します:
#
# \begin{align}
# U3(\theta, \phi, \lambda) = \begin{pmatrix}\cos(\frac{\theta}{2}) & -e^{i\lambda}\sin(\frac{\theta}{2}) \\ e^{i\phi}\sin(\frac{\theta}{2}) & e^{i\lambda + i\phi}\cos(\frac{\theta}{2}) \end{pmatrix}
# \end{align}
#
# グローバル位相を除いて、3つのパラメータを適切に設定して実装することで、どのような単一量子ビットへも変換が行えます。その結果、単一量子ビットの場合、以下の回路で、どのような状態も生成できる変分フォームを作成できます:
#
# <img src="./images/U3_var_form.png"
# alt="U3 Variational Form"
# width="350"/>
#
#
# その上、このユニバーサルな'変分フォーム'は3つしかパラメータがないため、効率的に最適化できます。強調すべきは、任意の状態を生成できるということは、変分フォームが生成できるのは、$H$ が取りうる期待値上での達成可能な状態のセットに限定されないということです。理想的には、最小の期待値を求める部分は、古典計算の最適化の能力だけに依存します。
#
# あまり自明でないユニバーサル変分フォームは、2量子ビットの場合から導出できます。この場合、2体相互作用があり、従って、ユニバーサルを達成するためには、エンタングルメントを考慮に入れる必要があります。*Shende et al.* [3] らによる調査に従うと、以下の例がパラメータ化された、ユニバーサル2量子ビット回路になります:
#
# <img src="./images/two_qubit_var_form.png"
# alt="Two Qubit Variational Form"
# width="800"/>
#
# 上記回路で実行される変換を $U(\theta)$ とします。変分を利用して最適化した場合、$H$ の期待値は、$U(\theta)|\psi\rangle \equiv |\psi(\theta)\rangle \approx |\psi_{min}\rangle$ の時に、最小化されます。形式から、$U(\theta)$ は任意の可能な状態に変換するので、この変分フォームは2量子ビットの基底エネルギーに強く関係したものを得ることができるでしょう。制限は、古典最適化の能力だけになります。
#
# ### パラメーター最適化<a id='optimization'></a>
# 効率的にパラメータ化された変分フォームを選択したら、変分法に従って、そのパラメータをターゲットとなるハミルトニアンの期待値を最小化するように最適化する必要があります。パラメータの最適化のプロセスには様々な課題があります。例えば、量子ハードウェアには様々なタイプのノイズがあり、目的関数の評価(エネルギーの計算)は実際の目的関数を反映しないかも知れません。また、いくつかのオプティマイザーはパラメーター集合の濃度(パラメーターの数)に依存して、目的関数の評価を数多く実施します。アプリケーションの要求を考慮しながら、最適なオプティマイザーを選択する必要があります。
#
# もっとも一般的な最適化戦略は、エネルギーの変化が極大になるような方向にパラメータを更新する最急降下法です。結果として、評価の数は、最適化すべきパラメータの数に依存します。これにより、探索スペースにおいてローカル最適値をクイックに発見するアルゴリズムとなります。しかしながら、この最適化方法はしばしば局所最適時に止まることがあり、実施される回路評価数によっては比較的時間がかかります。直感的な最適化戦略ですが、VQEで利用するにはお勧めできません。
#
# ノイズのある目的関数を最適化する適切なオプティマイザーとして、*Simultaneous Perturbation Stochastic Approximation* オプティマイザー (SPSA)があります。SPSAは2回の測定だけで、目的関数の勾配を近似します。最急降下法では各パラメータを独立に摂動させるのに対して、全てのパラメータを同時にランダムに摂動させます。VQEをノイズ込みのシミュレーター、もしくは実ハードウェアで利用する場合には、古典オプティマイザーとして SPSA が推奨されます。
#
# コスト関数の評価にノイズがない場合(例えば、VQEを状態ベクトルシミュレーターで利用する場合など)は、多様な古典オプティマイザーを利用できます。Qiskit Aqua でサポートされている2つのオプティマイザーは、*Sequential Least Squares Programming* オプティマイザー (SLSQP) と *Constrained Optimization by Linear Approximation* オプティマイザー (COBYLA) です。COBYLAでは、目的関数の評価を最適化の繰り返しで1回のみ実施(つまり評価の数はパラメータセットの濃度には依存しない)ということに着目します。従って、目的関数がノイズがない場合、及び評価の数を最小化したい場合は、COBYLAの利用がお勧めです。
#
# ### 単一量子ビットの変分フォームの例<a id='example'></a>
# ではここで、基底エネルギー推定問題と似たような単一量子ビットの変分フォームを利用してみましょう。問題は、ランダムな確率ベクトルが$\vec{x}$ が与えられており、出力の確率分布が $\vec{x}$ に近くなるように、可能な単一量子ビット変分フォームのパラメーターを決定します(ここで近くは2つの確率ベクトル間のマンハッタン距離によって定義します)。
#
# 最初に python でランダム確率ベクトルを作成します。
import numpy as np
np.random.seed(999999)
target_distr = np.random.rand(2)
# We now convert the random vector into a valid probability vector
target_distr /= sum(target_distr)
# 次に、単一の U3 変分フォームの3つのパラメーターを引数として受け取り、対応する量子回路をリターンする関数を定義します:
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
def get_var_form(params):
qr = QuantumRegister(1, name="q")
cr = ClassicalRegister(1, name='c')
qc = QuantumCircuit(qr, cr)
qc.u3(params[0], params[1], params[2], qr[0])
qc.measure(qr, cr[0])
return qc
# また、変分フォームのパラメータのリストを入力とし、パラメータに対応したコストを計算する目的関数を定義します:
# +
from qiskit import Aer, transpile, assemble
backend = Aer.get_backend("qasm_simulator")
NUM_SHOTS = 10000
def get_probability_distribution(counts):
output_distr = [v / NUM_SHOTS for v in counts.values()]
if len(output_distr) == 1:
output_distr.append(1 - output_distr[0])
return output_distr
def objective_function(params):
# Obtain a quantum circuit instance from the paramters
qc = get_var_form(params)
# Execute the quantum circuit to obtain the probability distribution associated with the current parameters
t_qc = transpile(qc, backend)
qobj = assemble(t_qc, shots=NUM_SHOTS)
result = backend.run(qobj).result()
# Obtain the counts for each measured state, and convert those counts into a probability vector
output_distr = get_probability_distribution(result.get_counts(qc))
# Calculate the cost as the distance between the output distribution and the target distribution
cost = sum([np.abs(output_distr[i] - target_distr[i]) for i in range(2)])
return cost
# -
# 最後に、COBYLA オプティマイザーのインスタンスを作成し、アルゴリズムを実行します。出力は実行の度に異なることに注意してください。また、近いとはいえ、得られた分布はターゲットの分布とは完全に同じではありません。しかしながら、ショットの数を増やすことで出力の確度を向上させることができるでしょう。
# +
from qiskit.aqua.components.optimizers import COBYLA
# Initialize the COBYLA optimizer
optimizer = COBYLA(maxiter=500, tol=0.0001)
# Create the initial parameters (noting that our single qubit variational form has 3 parameters)
params = np.random.rand(3)
ret = optimizer.optimize(num_vars=3, objective_function=objective_function, initial_point=params)
# Obtain the output distribution using the final parameters
qc = get_var_form(ret[0])
t_qc = transpile(qc, backend)
qobj = assemble(t_qc, shots=NUM_SHOTS)
counts = backend.run(qobj).result().get_counts(qc)
output_distr = get_probability_distribution(counts)
print("Target Distribution:", target_distr)
print("Obtained Distribution:", output_distr)
print("Output Error (Manhattan Distance):", ret[1])
print("Parameters Found:", ret[0])
# -
# ### 一般的な変分フォームの構造<a id='commonvarforms'></a>
# 既に説明したように、多項式パラメーターの変分フォームでは、任意の状態への変換を生成できません。変分フォームはこの制限をどのように扱うかによって2つのカテゴリーに分類されます。変分フォームの最初のカテゴリーはドメイン、あるいはアプリケーション固有の知識を生かして可能な出力状態のセットを制限するというものです。2つ目は、ドメインやアプリケーション固有の知識なしに、発見的な回路を利用するアプローチです。
#
# 最初のカテゴリーの変分フォームは、必要となる変換のセットを制限するために、課題ドメインの特徴を流用します。例えば、分子の基底状態エネルギーの計算には、粒子の数は*前もって*知られているとします。従って、正しい粒子数をもった開始状態を利用し、変分フォームを粒子数を保存する変換にのみ制限することで、新しい変換の部分空間への展開に必要なパラメーター数をを大きく削減することができます。実際に、結合クラスター(Coupled-Cluster)理論と同じような情報を利用して、変分フォームの UCCSD は、Hartree Fock状態から開始する場合には、分子の基底状態エネルギー値を非常に正確に推定することが出来ます。ドメイン固有の知識を利用する別の例として、実量子ハードウェアで実現可能な回路を考慮するというものが挙げられます。超電導型量子ビットに基づくような現存する量子コンピューターは量子ビット間接続に制限があります。つまり、(スワップゲートを挟み込むことなく)任意の量子ビットペアに基づく、2量子ビットゲートを実装することが出来ません。与えられた利用可能な接続性とゲートを最大限活用した回路により特定の量子コンピューターに特化した変分フォームの構築がなされています。このような変分フォームにより 2017年にIBM量子コンピューター上で、 BeH$_2$ までの大きさをもつ分子の基底状態エネルギーの推定をするVQE実装に成功しました[4]。
#
# 2番目のアプローチでは、広範囲の状態に対してよい近似が得られるようにゲートがレイヤー化されます。Qiskit Aqua では、3つのこのような変分フォーム: RyRz、Ry、及び SwapRzをサポートします(ここでは最初の2つのみ説明します)。これらの変分フォームはすべて、複数のユーザー指定の構成を受け付けます。3つの不可欠な構成は、システム上の量子ビット数、深さの設定、及びエンタグルメント設定です。変分フォームの一つのレイヤーでは、特定のパターンの単一量子ビットの回転と CX ゲートを指定します。深さの設定では、例えば、変分フォームがこのパターンを何回繰り返すかを指定します。深さの設定を増やすことで、最適化すべきパラメーター数を増やすことで生じるコストが上がりますが、変分フォームが生成できる状態のセットも増加します。最後に、エンタングルメント設定は、CXゲートの構成、内部的には数、を選択します。例えば、エンタングルメント設定が linear の場合、CXゲートは順番に直近の量子ビットペアに適用されます(したがって$n-1$ CX ゲートがレイヤーに追加されます)。エンタングルメント設定が full の場合、CXゲートは、各レイヤーのそれぞれの量子ビットペアに適用されます。`entanglement="full"` と `entanglement="linear"`の場合の、RxRz回路は以下のコードを実行することで確認できます:
from qiskit.circuit.library import EfficientSU2
entanglements = ["linear", "full"]
for entanglement in entanglements:
form = EfficientSU2(num_qubits=4, entanglement=entanglement)
if entanglement == "linear":
print("=============Linear Entanglement:=============")
else:
print("=============Full Entanglement:=============")
# We initialize all parameters to 0 for this demonstration
display(form.draw(fold=100))
print()
# 深さの設定を $d$ とします。この時、RyRz は $n\times (d+1)\times 2$ 個のパラメーターを持ち、linear エンタングルメントの Ry は $2n\times(d + \frac{1}{2})$ 個のパラメーター、また Ryで full エンタングルメントの場合は、$d\times n\times \frac{(n + 1)}{2} + n$ 個のパラメーターを持つようになります。
# ## Qiskit での VQE 実装<a id='implementation'></a>
# このセクションでは、プログラミングアプローチでのVQEの実装例を示します。Qiskit Aqua では宣言的な実装もできますが、この場合動作しているアルゴリズムの情報が表面に出てきません。このコードは、特に量子ビット操作の準備は、Qiskit チュートリアルリポジトリに基づいています(リポジトリは 2019年7月の時点で https://github.com/Qiskit/qiskit-tutorials にあります)。
#
# 次のライブラリを最初にインポートします。
#
from qiskit.aqua.algorithms import VQE, NumPyEigensolver
import matplotlib.pyplot as plt
import numpy as np
from qiskit.chemistry.components.variational_forms import UCCSD
from qiskit.chemistry.components.initial_states import HartreeFock
from qiskit.circuit.library import EfficientSU2
from qiskit.aqua.components.optimizers import COBYLA, SPSA, SLSQP
from qiskit.aqua.operators import Z2Symmetries
from qiskit import IBMQ, BasicAer, Aer
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.chemistry import FermionicOperator
from qiskit.aqua import QuantumInstance
from qiskit.ignis.mitigation.measurement import CompleteMeasFitter
from qiskit.providers.aer.noise import NoiseModel
# ### VQEを状態ベクトルシミュレーターで実行する<a id='implementationstatevec'></a>
# 様々な原子間距離での LiH の基底エネルギーの計算をデモンストレーションしてみます。各距離での分子ドライバーを作成する必要があります。この実験では、利用する量子ビットの数を減らすため、内核固定(frozen core)し、2つの非占有起動を削除していることに注意してください。最初に、原子間距離を引数とし、適切な量子ビット演算子 $H$ と演算子に関するその他の情報を返却する関数を定義します。
#
def get_qubit_op(dist):
driver = PySCFDriver(atom="Li .0 .0 .0; H .0 .0 " + str(dist), unit=UnitsType.ANGSTROM,
charge=0, spin=0, basis='sto3g')
molecule = driver.run()
freeze_list = [0]
remove_list = [-3, -2]
repulsion_energy = molecule.nuclear_repulsion_energy
num_particles = molecule.num_alpha + molecule.num_beta
num_spin_orbitals = molecule.num_orbitals * 2
remove_list = [x % molecule.num_orbitals for x in remove_list]
freeze_list = [x % molecule.num_orbitals for x in freeze_list]
remove_list = [x - len(freeze_list) for x in remove_list]
remove_list += [x + molecule.num_orbitals - len(freeze_list) for x in remove_list]
freeze_list += [x + molecule.num_orbitals for x in freeze_list]
ferOp = FermionicOperator(h1=molecule.one_body_integrals, h2=molecule.two_body_integrals)
ferOp, energy_shift = ferOp.fermion_mode_freezing(freeze_list)
num_spin_orbitals -= len(freeze_list)
num_particles -= len(freeze_list)
ferOp = ferOp.fermion_mode_elimination(remove_list)
num_spin_orbitals -= len(remove_list)
qubitOp = ferOp.mapping(map_type='parity', threshold=0.00000001)
qubitOp = Z2Symmetries.two_qubit_reduction(qubitOp, num_particles)
shift = energy_shift + repulsion_energy
return qubitOp, num_particles, num_spin_orbitals, shift
# 最初に、正確な基底状態エネルギーを、量子ビット演算子及び古典的固有値計算法(ExactEigensolver)を使って計算します。次に、初期状態 $|\psi\rangle$ を作成し、これを使って VQE が最終的 ansatz $\min_{\theta}(|\psi(\theta)\rangle)$ を生成するようにします。各原子間距離での正確な値と VQEの結果を記録します。`vqe.run(backend)['energy'] + shift` で与えられる結果と $\min_{\theta}\left(\langle \psi(\theta)|H|\psi(\theta)\rangle\right)$ 量は同じであることに着目してください。ここでは、最小はかならずしももっとも最小ではありません。
#
# `VQE(qubitOp, var_form, optimizer, 'matrix')` で VQE インスタンスを初期化する時には、$|\psi(\theta)\rangle$ に対する $H$ の期待値は行列の掛け算で直接計算しています。しかしながら、実量子デバイスや、`VQE(qubitOp, var_form, optimizer, 'paulis')` を使った `qasm_simulator`などの実シミュレーターの場合は、期待値の計算はより複雑になります。ハミルトニアンはパウリストリング (Pauli strings - パウリ行列のテンソル積)の足し算で表現され、使用したマッピングで指定されたパウリ項が、量子ビットに適用されます。$|\psi(\theta)\rangle$ に対応した回路の後に、各パウリストリングに対応する回路が追加されます。結果的に、これらの回路が実行されると、$|\psi(\theta)\rangle$ に対する $H$ の期待値の決定に利用できます。以下の例では、VQE のインスタンスを `matrix` モードで初期化しているので、期待値の計算には行列の掛け算を通じて直接計算しています。
#
# 注意: 以下のコードは完了までしばらく時間がかかります。
# +
backend = BasicAer.get_backend("statevector_simulator")
distances = np.arange(0.5, 4.0, 0.1)
exact_energies = []
vqe_energies = []
optimizer = SLSQP(maxiter=5)
for dist in distances:
qubitOp, num_particles, num_spin_orbitals, shift = get_qubit_op(dist)
result = NumPyEigensolver(qubitOp).run()
exact_energies.append(np.real(result.eigenvalues) + shift)
initial_state = HartreeFock(
num_spin_orbitals,
num_particles,
qubit_mapping='parity'
)
var_form = UCCSD(
num_orbitals=num_spin_orbitals,
num_particles=num_particles,
initial_state=initial_state,
qubit_mapping='parity'
)
vqe = VQE(qubitOp, var_form, optimizer)
vqe_result = np.real(vqe.run(backend)['eigenvalue'] + shift)
vqe_energies.append(vqe_result)
print("Interatomic Distance:", np.round(dist, 2), "VQE Result:", vqe_result, "Exact Energy:", exact_energies[-1])
print("All energies have been calculated")
# -
plt.plot(distances, exact_energies, label="Exact Energy")
plt.plot(distances, vqe_energies, label="VQE Energy")
plt.xlabel('Atomic distance (Angstrom)')
plt.ylabel('Energy')
plt.legend()
plt.show()
# VQEの結果は、正確な値と非常に近いことに着目してください。そのため、正確な値のカーブは VQEカーブに隠れてしまっています。
# ### VQE をノイズのあるシミュレーターで実行する<a id='implementationnoisy'></a>
#
# ここでは、H$_2$ の基底状態エネルギーの計算に、ノイズのあるシミュレーターとエラー軽減を利用してみます。
#
# 最初に、分子のハミルトニアンを表現する量子ビット演算子を用意します:
driver = PySCFDriver(atom='H .0 .0 -0.3625; H .0 .0 0.3625', unit=UnitsType.ANGSTROM, charge=0, spin=0, basis='sto3g')
molecule = driver.run()
num_particles = molecule.num_alpha + molecule.num_beta
qubitOp = FermionicOperator(h1=molecule.one_body_integrals, h2=molecule.two_body_integrals).mapping(map_type='parity')
qubitOp = Z2Symmetries.two_qubit_reduction(qubitOp, num_particles)
# 次に、デバイスのカップリングマップとノイズモデルを IBMQ プロバイダーからロードし、量子インスタンスを作成し、エラー軽減を有効にします:
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = Aer.get_backend("qasm_simulator")
device = provider.get_backend("ibmq_lima")
coupling_map = device.configuration().coupling_map
noise_model = NoiseModel.from_backend(device.properties())
quantum_instance = QuantumInstance(backend=backend,
shots=8192,
noise_model=noise_model,
coupling_map=coupling_map,
measurement_error_mitigation_cls=CompleteMeasFitter,
cals_matrix_refresh_period=30)
# 最後に、オプティマイザー、変分フォーム、および VQE インスタンスを構成します。2量子ビット回路の深さが増すにつれて、ノイズの効果が増加するため、ここでは UCCSD ではなく、発見的な変分フォーム (RYRZ)を利用します。RYRZは、UCCSDに比べ浅い回路で、実質的により少ない2量子ビットゲート数となります。
#
# 注意:以下のコードは完了までしばらく時間がかかります。
exact_solution = NumPyEigensolver(qubitOp).run()
print("Exact Result:", np.real(exact_solution.eigenvalues) + molecule.nuclear_repulsion_energy)
optimizer = SPSA(maxiter=100)
var_form = EfficientSU2(qubitOp.num_qubits, entanglement="linear")
vqe = VQE(qubitOp, var_form, optimizer=optimizer)
ret = vqe.run(quantum_instance)
vqe_result = np.real(ret['eigenvalue']+ molecule.nuclear_repulsion_energy)
print("VQE Result:", vqe_result)
# たとえ結果が化学有効値(正しい結果に対して 0.0016 Hartree と定義されている)になっていませんが、ノイズ軽減が有効な場合、正確な値にかなり近いと言えるでしょう。
# ## 問題<a id='problems'></a>
# 1. 基底状態が $n$ 量子ビットの最大エンタングルに近いことが約束されている、ハミルトニアン $H$ が与えられているとする。どの変分フォームが、$H$ の基底状態エネルギーを効率的かつ正確に与えるかを説明せよ。ここでは自作の変分フォームを使ってもよい。その場合、このハミルトニアンになぜそれが適切かを説明せよ。
#
# 2. COPYLAオプティマイザーと 1000ショットの `qasm_simulator`、および 60パウリストリングをもったハミルトニアンを使う場合、1回の最適化の際に行われる回路評価数を計算せよ。
#
# 3. 原子間距離が $1.3$Å の BeH$_2$ の基底状態エネルギーを推定せよ。`get_qubit_op(dist)` 関数を再利用する場合、`atom="Li .0 .0 .0; H .0 .0 " + str(dist)` を、`atom="Be .0 .0 .0; H .0 .0 -" + str(dist) + "; H .0 .0 " + str(dist)` で置き換え、`get_qubit_op(1.3)` を実行する。この分子の場合、非占有軌道の削除をすると、化学的な正確性が失われることに注意すること。しかしながら、必要となる量子ビット数を6まで下げる(多くのラップトップで効果的なシミュレーションができるようにする)場合は、このロスは許容範囲である。興味のある読者は、必要する量子ビット数を 7 までにして化学正確性を失わないように試みてもよい。
# ## 参考文献<a id='references'></a>
# 1. Peruzzo, Alberto, et al. "A variational eigenvalue solver on a photonic quantum processor." *Nature communications* 5 (2014): 4213.
# 2. Griffiths, <NAME>., and <NAME>. Introduction to quantum mechanics. *Cambridge University Press*, 2018.
# 3. Shende, <NAME>., <NAME>, and <NAME>. "Minimal universal two-qubit cnot-based circuits." arXiv preprint quant-ph/0308033 (2003).
# 4. Kandala, Abhinav, et al. "Hardware-efficient variational quantum eigensolver for small molecules and quantum magnets." Nature 549.7671 (2017): 242.
import qiskit
qiskit.__qiskit_version__
| i18n/locales/ja/ch-applications/vqe-molecules.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Follow these samples:
# * https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-batch-transform.html
# * https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker_batch_transform
# * https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-python-sdk/scikit_learn_iris
| 09_deploy/archive/99_Batch_Transform_Reviews_TODO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Baseline forecasting for Air Passenger data
#
# - naive
# - seasonal naive
# - random walk
# - random walk with drift
library(readr)
library(dplyr)
library(ggplot2)
library(gridExtra)
library(xts)
library(fpp2)
library(forecast)
library(DMwR)
library(Metrics)
library(hydroGOF)
options(repr.plot.width=8, repr.plot.height=4)
# ## 1. Data import
data <- read_csv('../data/air-passenger/AirPassengers.csv')
head(data)
# +
# change the name of column
names(data) <- c('Month', 'passengers')
head(data)
tail(data)
nrow(data)
# -
# ## 2. Convert to ts object
# +
passenger <-
ts(
data[-1],
frequency = 12,
start = c(1949, 1),
end = c(1960, 12)
)
head(passenger)
tail(passenger)
# -
autoplot(passenger) + ylab('#passengers') + ggtitle('Air passenger data')
# ## 3. Train test split
# +
## training and test split
train <- head(passenger, 115)
test <- tail(passenger, 29)
autoplot(train, series = 'train') + autolayer(test, series = 'test') +
ylab('#passengers')
# -
# ## 4. Naive model or Random walk without drift
# +
naive_model <- Arima(train, order = c(0, 1, 0))
naive_frcst <- Arima(test, model = naive_model)
autoplot(passenger, series = 'actual data') +
autolayer(fitted(naive_model), series = 'train prediction') +
autolayer(fitted(naive_frcst), series = 'test prediction') +
xlab('Year') +
ylab('#passengers') +
ggtitle('Naive method forecasting')
# -
summary(naive_model)
checkresiduals(naive_model)
# +
train_pred <- fitted(naive_model)
test_pred <- fitted(naive_frcst)
train_rmse <- sqrt(mse(train, train_pred))
train_mae <- mae(train, train_pred)
train_nrmse <- train_rmse/sd(train)
test_rmse <- sqrt(mse(test, test_pred))
test_mae <- mae(test, test_pred)
test_nrmse <- test_rmse/sd(test)
print(paste0('Training RMSE :', round(train_rmse, 3)))
print(paste0('Training NRMSE :', round(train_nrmse, 3)))
print(paste0('Training MAE :', round(train_mae, 3)))
print(paste0('Test RMSE :', round(test_rmse, 3)))
print(paste0('Test NRMSE :', round(test_nrmse, 3)))
print(paste0('Test MAE :', round(test_mae, 3)))
# -
# ## 5. Seasonal naive
# +
snaive_model <- Arima(train, order = c(0, 0, 0), seasonal = list(order = c(0, 1, 0)))
snaive_frcst <- Arima(test, model = snaive_model)
autoplot(passenger, series = 'actual data') +
autolayer(fitted(snaive_model), series = 'train prediction') +
autolayer(fitted(snaive_frcst), series = 'test prediction') +
xlab('Year') +
ylab('#passengers') +
ggtitle('Seasonal Naive method forecasting')
# -
summary(snaive_model)
checkresiduals(snaive_model)
# +
train_pred <- fitted(snaive_model)
test_pred <- fitted(snaive_frcst)
train_rmse <- sqrt(mse(train, train_pred))
train_mae <- mae(train, train_pred)
train_nrmse <- train_rmse/sd(train)
test_rmse <- sqrt(mse(test, test_pred))
test_mae <- mae(test, test_pred)
test_nrmse <- test_rmse/sd(test)
print(paste0('Training RMSE :', round(train_rmse, 3)))
print(paste0('Training NRMSE :', round(train_nrmse, 3)))
print(paste0('Training MAE :', round(train_mae, 3)))
print(paste0('Test RMSE :', round(test_rmse, 3)))
print(paste0('Test NRMSE :', round(test_nrmse, 3)))
print(paste0('Test MAE :', round(test_mae, 3)))
# -
# ## 6. Random walk with Drift
# +
rwf_model <- Arima(train, order = c(0, 1, 0), include.drift = TRUE)
rwf_frcst <- Arima(test, model = rwf_model)
autoplot(passenger, series = 'actual data') +
autolayer(fitted(rwf_model), series = 'train prediction') +
autolayer(fitted(rwf_frcst), series = 'test prediction') +
xlab('Year') +
ylab('#passengers') +
ggtitle('Random walk with drift method forecasting')
# -
summary(rwf_model)
checkresiduals(rwf_model)
# +
train_pred <- fitted(rwf_model)
test_pred <- fitted(rwf_frcst)
train_rmse <- sqrt(mse(train, train_pred))
train_mae <- mae(train, train_pred)
train_nrmse <- train_rmse/sd(train)
test_rmse <- sqrt(mse(test, test_pred))
test_mae <- mae(test, test_pred)
test_nrmse <- test_rmse/sd(test)
print(paste0('Training RMSE :', round(train_rmse, 3)))
print(paste0('Training NRMSE :', round(train_nrmse, 3)))
print(paste0('Training MAE :', round(train_mae, 3)))
print(paste0('Test RMSE :', round(test_rmse, 3)))
print(paste0('Test NRMSE :', round(test_nrmse, 3)))
print(paste0('Test MAE :', round(test_mae, 3)))
# -
# ## Combined plots
autoplot(train, series = 'train') +
autolayer(test, series = 'test') +
autolayer(fitted(rwf_model), series = 'rwf-train') +
autolayer(fitted(rwf_frcst), series = 'rwf-test') +
autolayer(fitted(snaive_model), series = 'snaive-train') +
autolayer(fitted(snaive_frcst), series = 'snaive-test') +
autolayer(fitted(naive_model), series = 'naive-train') +
autolayer(fitted(naive_frcst), series = 'naive-test') +
ggtitle('Comparison of naive, snaive and rwf') +
xlab('Year') +
ylab('#passengers') +
guides(colour=guide_legend(title="Forecast"))
| 01.air_passenger/01.passenger-forecasting-baseline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Easy Ab initio calculation with ASE-Siesta-Pyscf
#
# ## No installation necessary, just download a ready to go container for any system, or run it into the cloud
# ### Are we really on the Amazon cloud??
# cat /proc/cpuinfo
# ### I do not have on my laptop an: Intel(R) Xeon(R) CPU E5-2676 v3 @ 2.40GHz
# ### We first import the necessary libraries and define the system using ASE
# +
# import libraries and set up the molecule geometry
from ase.units import Ry, eV, Ha
from ase.calculators.siesta import Siesta
from ase import Atoms
import numpy as np
import matplotlib.pyplot as plt
H2O = Atoms('H2O', positions = [[-0.757, 0.586, 0.000],
[0.757, 0.586, 0.000],
[0.0, 0.0, 0.0]],
cell=[20, 20, 20])
# visualization of the particle
from ase.visualize import view
view(H2O, viewer='x3d')
# -
# ### We can then run the DFT calculation using Siesta
# +
# enter siesta input and run siesta
siesta = Siesta(
mesh_cutoff=150 * Ry,
basis_set='DZP',
pseudo_qualifier='lda',
energy_shift=(10 * 10**-3) * eV,
fdf_arguments={
'SCFMustConverge': False,
'COOP.Write': True,
'WriteDenchar': True,
'PAO.BasisType': 'split',
'DM.Tolerance': 1e-4,
'DM.MixingWeight': 0.1,
'MaxSCFIterations': 300,
'DM.NumberPulay': 4,
'XML.Write': True})
H2O.set_calculator(siesta)
e = H2O.get_potential_energy()
# -
# ### The TDDFT calculations with PySCF-NAO
# +
# compute polarizability using pyscf-nao
siesta.pyscf_tddft(label="siesta", jcutoff=7, iter_broadening=0.15/Ha,
xc_code='LDA,PZ', tol_loc=1e-6, tol_biloc=1e-7, freq = np.arange(0.0, 15.0, 0.05))
# +
# plot polarizability with matplotlib
# %matplotlib inline
fig = plt.figure(1)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(siesta.results["freq range"], siesta.results["polarizability nonin"][:, 0, 0].imag)
ax2.plot(siesta.results["freq range"], siesta.results["polarizability inter"][:, 0, 0].imag)
ax1.set_xlabel(r"$\omega$ (eV)")
ax2.set_xlabel(r"$\omega$ (eV)")
ax1.set_ylabel(r"Im($P_{xx}$) (au)")
ax2.set_ylabel(r"Im($P_{xx}$) (au)")
ax1.set_title(r"Non interacting")
ax2.set_title(r"Interacting")
fig.tight_layout()
# -
| pyscf/nao/notebook/AWS/example-ase-siesta-pyscf-h2o.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3 '
# language: python
# name: python3
# ---
# # Introducing CNNS with TensorFlow
# ## Load libraries
# +
# data tools
import os
import numpy as np
import matplotlib.pyplot as plt
# sklearn tools
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
# tf tools
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Conv2D,
MaxPooling2D,
Activation,
Flatten,
Dense)
from tensorflow.keras.utils import plot_model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import backend as K
# -
# __Define plotting function__
# + tags=[]
def plot_history(H, epochs):
plt.style.use("seaborn-colorblind")
plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss", linestyle=":")
plt.title("Loss curve")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.tight_layout()
plt.legend()
plt.subplot(1,2,2)
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], label="val_acc", linestyle=":")
plt.title("Accuracy curve")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.tight_layout()
plt.legend()
plt.show()
# -
# __Load cifar10 data__
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# + tags=[]
X_train = X_train/255
X_test = X_test/255
# + tags=[]
# create one-hot encodings
lb = LabelBinarizer()
y_train = lb.fit_transform(y_train)
y_test = lb.fit_transform(y_test)
# + tags=[]
# initialize label names for CIFAR-10 dataset
label_names = ["airplane", "automobile",
"bird", "cat",
"deer", "dog",
"frog", "horse",
"ship", "truck"]
# -
# ## ShallowNet
# ShallowNet architecture: </br>
# >INPUT => CONV => ReLU => FC
# + tags=[]
#initalise model
model = Sequential()
# define CONV => ReLU
model.add(Conv2D(32,
(3,3),
padding = "same",
input_shape = (32, 32, 3)))
model.add(Activation("relu"))
# FC classifier
model.add(Flatten())
model.add(Dense(128))
model.add(Activation("relu"))
model.add(Dense(10))
model.add(Activation("softmax"))
# -
# __Compile model__
# + tags=[]
sgd = SGD(learning_rate =.001)
model.compile(loss="categorical_crossentropy",
optimizer=sgd,
metrics=["accuracy"])
# -
# __Model summary__
# + tags=[]
model.summary()
# -
# __Plot model__
# + tags=[]
plot_model(model, show_shapes=True, show_layer_names=True)
# -
# __Train model__
# + tags=[]
H = model.fit(X_train, y_train,
validation_data=(X_test, y_test),
batch_size=32,
epochs=20,
verbose=1)
# -
# __Evaluate model__
# + tags=[]
predictions = model.predict(X_test, batch_size=32)
# + tags=[]
print(classification_report(y_test.argmax(axis=1),
predictions.argmax(axis=1),
target_names=label_names))
# + tags=[]
plot_history(H, 20)
# -
# ## LeNet
# LeNet architecture:
# >INPUT => CONV => ReLU => MAXPOOL => CONV => ReLU => MAXPOOL => FC => ReLU => FC
# __Define model__
# + tags=[]
# define model
model = Sequential()
# first set of layers CONV => RELU => MAXPOOL
model.add(Conv2D(32, (3,3),
padding="same",
input_shape=(32,32,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2),
strides = (2,2)))
# second set of layers CONV => RELU => MAXPOOL
model.add(Conv2D(50, (5,5),
padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2),
strides = (2,2)))
# FC => RELU
model.add(Flatten())
model.add(Dense(128))
model.add(Activation("relu"))
# softmax classifier
model.add(Dense(10))
model.add(Activation("softmax"))
# -
# __Compile model__
# + tags=[]
sgd = SGD(learning_rate=0.001)
model.compile(loss="categorical_crossentropy",
optimizer=sgd,
metrics=["accuracy"])
# -
# __Model summary__
# + tags=[]
model.summary()
# -
# __Train model__
# + tags=[]
# train model
H = model.fit(X_train, y_train,
validation_data=(X_test, y_test),
batch_size=32,
epochs=20,
verbose=1)
# + tags=[]
plot_history(H,20)
# + tags=[]
predictions = model.predict(X_test, batch_size=32)
print(classification_report(y_test.argmax(axis=1),
predictions.argmax(axis=1),
target_names=label_names))
# -
| notebooks/session9_inclass_rdkm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (struct-lmm2)
# language: python
# name: struct-lmm2
# ---
import numpy as np
from numpy import ones
from numpy_sugar import ddot
import os
import sys
import pandas as pd
from pandas_plink import read_plink1_bin, Chunk
from numpy.linalg import cholesky
from numpy_sugar.linalg import economic_svd
import xarray as xr
from struct_lmm2 import StructLMM2
from limix.qc import quantile_gaussianize
import struct_lmm2 # check that it refers to code in repo
struct_lmm2
chrom = 1
## this file will map cells to donors, it will also only including donors we have single cell data (a subset of all of HipSci donors)
sample_mapping_file = "/hps/nobackup/stegle/users/acuomo/all_scripts/struct_LMM2/sc_endodiff/sample_mapping_file.tsv"
sample_mapping = pd.read_csv(sample_mapping_file, sep='\t', dtype={"genotype_individual_id": str, "phenotype_sample_id": str})
## extract unique individuals
donors = sample_mapping["genotype_individual_id"].unique()
donors.sort()
print("Number of unique donors: {}".format(len(donors)))
## read in genotype file
plink_file = "/hps/nobackup/hipsci/scratch/genotypes/imputed/2017-03-27/Full_Filtered_SNPs_Plink/hipsci.wec.gtarray.HumanCoreExome.imputed_phased.20170327.genotypes.norm.renamed.bed"
# G = read_plink1_bin(plink_file, chunk=Chunk(nsamples=10, nvariants=3))
G = read_plink1_bin(plink_file)
## read in GRM kinship matrix
kinship_file = "/hps/nobackup/hipsci/scratch/genotypes/imputed/2017-03-27/Full_Filtered_SNPs_Plink-F/hipsci.wec.gtarray.HumanCoreExome.imputed_phased.20170327.genotypes.norm.renamed.kinship"
K = pd.read_csv(kinship_file, sep="\t", index_col=0)
assert all(K.columns == K.index)
K = xr.DataArray(K.values, dims=["sample_0", "sample_1"], coords={"sample_0": K.columns, "sample_1": K.index})
K = K.sortby("sample_0").sortby("sample_1")
donors = sorted(set(list(K.sample_0.values)).intersection(donors))
print("Number of donors after kinship intersection: {}".format(len(donors)))
## subset to relevant donors
K = K.sel(sample_0=donors, sample_1=donors)
assert all(K.sample_0 == donors)
assert all(K.sample_1 == donors)
## and decompose such as K = L @ L.T
L_kinship = cholesky(K.values)
L_kinship = xr.DataArray(L_kinship, dims=["sample", "col"], coords={"sample": K.sample_0.values})
assert all(L_kinship.sample.values == K.sample_0.values)
del K
print("Sample mapping number of rows BEFORE intersection: {}".format(sample_mapping.shape[0]))
sample_mapping = sample_mapping[sample_mapping["genotype_individual_id"].isin(donors)]
print("Sample mapping number of rows AFTER intersection: {}".format(sample_mapping.shape[0]))
# expand from donors to cells
L_expanded = L_kinship.sel(sample=sample_mapping["genotype_individual_id"].values)
assert all(L_expanded.sample.values == sample_mapping["genotype_individual_id"].values)
# environments
# cells by MOFA factors (10)
# E_file = "/hps/nobackup/stegle/users/acuomo/all_scripts/struct_LMM2/sc_endodiff/mofa_logcounts_model_factors.csv"
# cells by PCs (500)
E_file = "/hps/nobackup/stegle/users/acuomo/all_scripts/struct_LMM2/sc_endodiff/pcs_500hvgs.csv"
E = pd.read_csv(E_file, index_col = 0)
E = xr.DataArray(E.values, dims=["cell", "pc"], coords={"cell": E.index.values, "pc": E.columns.values})
E = E.sel(cell=sample_mapping["phenotype_sample_id"].values)
assert all(E.cell.values == sample_mapping["phenotype_sample_id"].values)
# subselect to only SNPs on right chromosome
G_sel = G.where(G.chrom == str(chrom), drop=True)
G_sel
G_exp = G_sel.sel(sample=sample_mapping["genotype_individual_id"].values)
assert all(L_expanded.sample.values == G_exp.sample.values)
n_factors = 10
E = E.values[:,0:n_factors]
# get eigendecomposition of EEt
[U, S, _] = economic_svd(E)
us = U * S
# get decomposition of K*EEt
Ls = [ddot(us[:,i], L_expanded) for i in range(us.shape[1])]
Ls[1].shape
# Phenotype
phenotype_file = "/hps/nobackup/stegle/users/acuomo/all_scripts/struct_LMM2/sc_endodiff/exprs_alldays_allgenes.tsv.pkl"
phenotype = pd.read_pickle(phenotype_file)
phenotype
print("Phenotype shape BEFORE selection: {}".format(phenotype.shape))
phenotype = xr.DataArray(phenotype.values, dims=["trait", "cell"], coords={"trait": phenotype.index.values, "cell": phenotype.columns.values})
phenotype = phenotype.sel(cell=sample_mapping["phenotype_sample_id"].values)
print("Phenotype shape AFTER selection: {}".format(phenotype.shape))
assert all(phenotype.cell.values == sample_mapping["phenotype_sample_id"].values)
phenotype
# Filter on specific gene-SNP pairs
# eQTL from endodiff (ips+mesendo+defendo)
endo_eqtl_file = "/hps/nobackup/stegle/users/acuomo/all_scripts/struct_LMM2/sc_endodiff/eqtl_allstages.tsv"
endo_eqtl = pd.read_csv(endo_eqtl_file, sep="\t", index_col = False)
endo_eqtl["chrom"] = [int(i[:i.find("_")]) for i in endo_eqtl["snp_id"]]
genes = endo_eqtl[endo_eqtl['chrom']==int(chrom)]['feature'].unique()
# genes
len(genes)
# Set up model
n_samples = phenotype.shape[1]
M = ones((n_samples, 1))
i = 2
trait_name = genes[i]
# trait_name = "ENSG00000111671_SPSB2"
trait_name
y = phenotype.sel(trait=trait_name)
y = quantile_gaussianize(y)
E = quantile_gaussianize(E)
y = np.asarray(y)
slmm2 = StructLMM2(y, M, E, Ls)
leads = endo_eqtl[endo_eqtl['feature']==trait_name]['snp_id'].unique()
G_tmp = G_exp[:,G_exp['snp'].isin(leads)]
G_tmp.shape
G_maf = G_sel[G_sel['sample'].isin(donors),G_sel['snp'].isin(leads)]
G_maf
from __future__ import division
def compute_maf(X):
r"""Compute minor allele frequencies.
It assumes that ``X`` encodes 0, 1, and 2 representing the number
of alleles (or dosage), or ``NaN`` to represent missing values.
Parameters
----------
X : array_like
Genotype matrix.
Returns
-------
array_like
Minor allele frequencies.
Examples
--------
.. doctest::
>>> from numpy.random import RandomState
>>> from limix.qc import compute_maf
>>>
>>> random = RandomState(0)
>>> X = random.randint(0, 3, size=(100, 10))
>>>
>>> print(compute_maf(X)) # doctest: +FLOAT_CMP
[0.49 0.49 0.445 0.495 0.5 0.45 0.48 0.48 0.47 0.435]
"""
import dask.array as da
import xarray as xr
from pandas import DataFrame
from numpy import isnan, logical_not, minimum, nansum
if isinstance(X, da.Array):
s0 = da.nansum(X, axis=0).compute()
denom = 2 * (X.shape[0] - da.isnan(X).sum(axis=0)).compute()
elif isinstance(X, DataFrame):
s0 = X.sum(axis=0, skipna=True)
denom = 2 * logical_not(X.isna()).sum(axis=0)
elif isinstance(X, xr.DataArray):
if "sample" in X.dims:
kwargs = {"dim": "sample"}
else:
kwargs = {"axis": 0}
s0 = X.sum(skipna=True, **kwargs)
denom = 2 * logical_not(isnan(X)).sum(**kwargs)
else:
s0 = nansum(X, axis=0)
denom = 2 * logical_not(isnan(X)).sum(axis=0)
s0 = s0 / denom
s1 = 1 - s0
maf = minimum(s0, s1)
if hasattr(maf, "name"):
maf.name = "maf"
return maf
mafs = compute_maf(G_maf).compute()
mafs
b = slmm2.predict_interaction(G_tmp, mafs)
# beta_G value for each of the SNPs tested
beta_G = b[0]
beta_G.shape
# beta_GxE values (should be n_cells x nSNPs)
beta_GxE = b[1][0]
beta_GxE.shape
# +
cells = phenotype["cell"].values
snps = G_tmp["variant"].values
betas_df = pd.DataFrame(data = beta_GxE, columns = snps, index = cells)
betas_df.head()
# +
# save
# -
folder = "/hps/nobackup/stegle/users/acuomo/all_scripts/struct_LMM2/sc_endodiff/new/PCA/"
outfilename0 = f"{folder}beta_GxE/PCA{n_factors}/{trait_name}.tsv"
outfilename0
betas_df.to_csv(outfilename0, sep='\t')
betaG_df = pd.DataFrame(data = beta_G, index = snps, columns = ['beta_G'])
betaG_df.head()
outfilename1 = f"{folder}beta_G/PCA{n_factors}/{trait_name}.tsv"
outfilename1
betaG_df.to_csv(outfilename1, sep='\t')
import numpy as np
from math import ceil
import matplotlib.pyplot as plt
##
beta = np.array([])
for i in range(beta_GxE.shape[1]):
# for i in range(5):
# get single-cell effect sizes
v = beta_GxE[:,i]
# get min and max
ymin = min(v)
ymax = max(v)
# rank values
order = v.argsort()
y = v[order]
x = range(len(v))
plt.scatter(x,y)
# define 10% of cells
xx = ceil(0.1*len(v))
plt.vlines(x=xx,ymin=ymin, ymax=ymax)
plt.hlines(y=y[xx],xmin=0, xmax=len(v))
plt.vlines(x=len(v)-xx,ymin=ymin, ymax=ymax)
plt.hlines(y=y[len(v)-xx],xmin=0, xmax=len(v))
# add title
# plt.title(df.columns.values[i])
# plt.title(str(i))
plt.show()
beta = np.append(beta,y[len(v)-xx]-y[xx])
beta
| endodiff/old_files/test_sc_structLMM_endo_predict_GxE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false
# <div style="margin:-5px;color:#2369a6;font-weight:bold;">
# <p style="text-align:right;font-size:20px;">play@aip</p>
# </div>
# <br>
# <div style="margin:-5px;color:#white;border:1px solid #2369a6;">
# <p style="font-size:20px;
# background: rgba(35,105,166,1);
# background: -moz-linear-gradient(left, rgba(35,105,166,1) 0%, rgba(35,105,166,1) 9%, rgba(48,151,219,1) 56%, rgba(62,158,223,1) 63%, rgba(109,181,237,1) 86%);
# background: -webkit-gradient(left top, right top, color-stop(0%, rgba(35,105,166,1)), color-stop(9%, rgba(35,105,166,1)), color-stop(56%, rgba(48,151,219,1)), color-stop(63%, rgba(62,158,223,1)), color-stop(86%, rgba(109,181,237,1)));
# background: -webkit-linear-gradient(left, rgba(35,105,166,1) 0%, rgba(35,105,166,1) 9%, rgba(48,151,219,1) 56%, rgba(62,158,223,1) 63%, rgba(109,181,237,1) 86%);
# background: -o-linear-gradient(left, rgba(35,105,166,1) 0%, rgba(35,105,166,1) 9%, rgba(48,151,219,1) 56%, rgba(62,158,223,1) 63%, rgba(109,181,237,1) 86%);
# background: -ms-linear-gradient(left, rgba(35,105,166,1) 0%, rgba(35,105,166,1) 9%, rgba(48,151,219,1) 56%, rgba(62,158,223,1) 63%, rgba(109,181,237,1) 86%);color:white;padding:20px;font-weight:bold;">CodeGround Python Test 1 for Batch B and D</p>
# <div style="padding:20px">
# <p><span style="font-weight:bold;">Concepts :</span> Problems</p>
# <p><span style="font-weight:bold;">Description :</span> CodeGround questions to solve using python for batches B & D.</p>
# <p><span style="font-weight:bold;">Posted by :</span> hadiuzzama<span style="float:right;"><span style="font-weight:bold;">Posted on :</span> 2019-11-28 16:20:21</span></p>
# <hr style="border: 1px solid #2369a6">
# <p><span style="font-weight:bold;">Name of Candidate :</span> abhaym3345 ( abhaym3345 )</p>
# <p><span style="font-weight:bold;">Group :</span> Ignite, Trainee</p>
# <p><span style="font-weight:bold;">Start time :</span> 2019-11-29 15:25:00<span style="float:right;"><span style="font-weight:bold;">Stop time :</span> 2019-11-29 16:30:00</span></p>
# </div>
# </div>
# + deletable=false editable=false
from evaluator import evaluate, Session, Test, File
# + deletable=false editable=false
# Start the session an wait for it before running the next cells
Session.start()
# + deletable=false editable=false
# Get the test methods that you have to pass
Test.get_test_methods()
# + [markdown] deletable=false editable=false
# #### Get Started
# -
# #### Problem 1 of 2
# ### Vowel frequency
# <pre>A paragraph will be given to you as input. Your task is to find the frequency of each vowel print the list of the characters and their frequency.
# Note: Consider upper and lowercase characters as same.
# Print the vowels in alphabetical order in lower case.
# If a vowel does not occur print 0 for it.</pre>
#
# <p><strong>Sample Input:</strong></p>
# <pre>In Formula 1 before the race there is a qualifying round to determine who will start the race in which position. Till a few years back the rule was that all the racers will drive one lap and the fastest will start the race from the first postion second fastest in second position and so on. You will be given the number of racers participating in the race and their times for one flying lap. You have to determine the order the drivers will start the race the next day along with their lap-times.</pre>
#
# <p><strong>Sample Output:</strong></p>
# <pre>a 32
# e 51
# i 34
# o 25
# u 7</pre>
#
# +
# Sample Input
sample_input = \
'''In Formula 1 before the race there is a qualifying round to \
determine who will start the race in which position. Till a few \
years back the rule was that all the racers will drive one lap and \
the fastest will start the race from the first postion second fastest \
in second position and so on. You will be given the number of racers \
participating in the race and their times for one flying lap. You have \
to determine the order the drivers will start the race the next day \
along with their lap-times.'''
# Sample Output
sample_output = \
'''a 32
e 51
i 34
o 25
u 7'''
'''SEE THE FORMAT OF INPUT AND OUTPUT AS SHOWN BELOW'''
print('SAMPLE INPUT:', sample_input, sep='\n', end='\n\n')
print('SAMPLE OUTPUT:', sample_output, sep='\n', end='\n\n')
# +
# Solve the problem below
@evaluate
def vowel_frequency(text_input):
'''
Read the data from the raw string passed as input
Return the output as a raw string.
'''
text_output = ''
return text_output
# RUN & TEST
print(vowel_frequency(sample_input))
# +
# SUBMIT
print(vowel_frequency(sample_input, test=vowel_frequency.__name__))
# -
# #### Problem 2 of 2
# ### Buy 1 get 1
# <pre>One day Sheetu visited Pothys to purchase sarees for an upcoming family function.
# In Pothys, every saree shop has their own discount methods to attract the customers. One discount method called Buy1-Get1 caught Sheetu's attention. That is, Sheetu buys one Saree, then she can get one additional Saree with the same color without charge by Buy1-Get1.
# Sheetu lists the needed Sarees as a string S, each letter denotes one saree, and the same letters denote the same colors of sarees, and the different letters denote the different colors of sarees. The cost of each saree is 1. Your task is to calculate the minimum cost for getting all the sarees Sheetu listed.
#
# Input
#
# The first line of input contains a single line T, which represents the number of test cases. Then T lines will follow, and each will contain a string S, which represents the sarees Sheetu needed.
#
# Output
#
# Output the minimum cost for each test case.
# Constraints
#
# 1 ? T ? 100
# 1 ? |S| ? 200, where |S| represents the length of the string S.
# The string S is case sensitive, and will contain only English characters in the range [a-z], [A-Z].</pre>
#
#
# <p><strong>Sample Input/Output:</strong></p>
#
# <p><strong>Sample Input:</strong></p>
# <pre>4
# ssss
# ssas
# sa
# s</pre>
#
# <p><strong>Sample Output:</strong></p>
# <pre>2
# 3
# 2
# 1</pre>
#
# <p><strong>Explanation:</strong></p>
#
# <pre>In the first sample case, Sheetu needs 4 sarees of color s. One of the optimal way is the following:
# Buy the first s with cost 1, and she can get the second s without charge. Then buy the third s with cost 1, and she can get the last s without charge. In this case, she get 4 sarees with only cost 2.
# In the second sample case, Sheetu needs 3 sarees of color s and 1 saree of color a. One of the optimal way is the following:
# Buy the second s with cost 1, and she can get the last s without charge. Then buy the a and the first s with cost 2. In this case, she get 4 sarees with only cost 3.
# In the third and fourth sample cases, she cannot save her money by using Buy1-Get1.</pre>
# +
# Sample Input
sample_input = \
'''4
ssss
ssas
sa
s'''
# Sample Output
sample_output = \
'''2
3
2
1'''
'''SEE THE FORMAT OF INPUT AND OUTPUT AS SHOWN BELOW'''
print('SAMPLE INPUT:', sample_input, sep='\n', end='\n\n')
print('SAMPLE OUTPUT:', sample_output,sep='\n', end='\n\n')
# +
# Solve the problem below
@evaluate
def buy_1_get_1(text_input):
'''
Read the data from the raw string passed as input
Return the output as a raw string.
'''
text_output = ''
return text_output
# RUN & TEST
print(buy_1_get_1(sample_input))
# +
# SUBMIT
print(buy_1_get_1(sample_input, test=buy_1_get_1.__name__))
# -
# #### Thanks for participating!
# + deletable=false ediatble=false
# Submit the test before the stop time.
Test.submit()
# + deletable=false ediatble=false
# Check your position in the leaderboard
Test.leaderboard(latest=False) # latest=True to see the latest leaderboard
# + deletable=false ediatble=false
# To see only your submission scores
USERNAME = input("Enter your AIPlayground username:")
lbd = Test.leaderboard(all_trials=True)
lbd.loc[lbd.username==USERNAME]
| data/submissions/CodeGround Python Test 1 for Batch B and D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# Copyright 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# -
# # 1.Overview
#
# In this notebook, we want to provide an tutorial how to train a standard dlrm model using HugeCTR High-level python API. We will use original Criteo dataset as training data
#
# 1. [Overview](#1)
# 2. [Dataset Preprocessing](#2)
# 3. [DLRM Model Training](#3)
# 4. [Save the Model Files](#4)
# # 2. Dataset Preprocessing
# ## 2.1 Generate training and validation data folders
# +
# define some data folder to store the original and preprocessed data
# Standard Libraries
import os
from time import time
import re
import shutil
import glob
import warnings
BASE_DIR = "/dlrm_train"
train_path = os.path.join(BASE_DIR, "train")
val_path = os.path.join(BASE_DIR, "val")
CUDA_VISIBLE_DEVICES = os.environ.get("CUDA_VISIBLE_DEVICES", "0")
n_workers = len(CUDA_VISIBLE_DEVICES.split(","))
frac_size = 0.15
allow_multi_gpu = False
use_rmm_pool = False
max_day = None # (Optional) -- Limit the dataset to day 0-max_day for debugging
if os.path.isdir(train_path):
shutil.rmtree(train_path)
os.makedirs(train_path)
if os.path.isdir(val_path):
shutil.rmtree(val_path)
os.makedirs(val_path)
# -
# ## 2.2 Download the Original Criteo Dataset
# !apt-get install wget
# !wget -P $train_path http://azuremlsampleexperiments.blob.core.windows.net/criteo/day_0.gz
#Download the split data set to training and validation
# !gzip -d -c $train_path/day_0.gz > day_0
# !head -n 45840617 day_0 > $train_path/train.txt
# !tail -n 2000000 day_0 > $val_path/test.txt
# ## 2.3 Preprocessing by NVTabular
# +
# %%writefile /dlrm_train/preprocess.py
import os
import sys
import argparse
import glob
import time
import re
import warnings
from cudf.io.parquet import ParquetWriter
import numpy as np
import pandas as pd
import concurrent.futures as cf
from concurrent.futures import as_completed
import shutil
import dask_cudf
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
from dask.utils import parse_bytes
from dask.delayed import delayed
import cudf
import numpy as np
import cupy as cp
import rmm
import nvtabular as nvt
from nvtabular.io import Shuffle
from nvtabular.ops import Categorify, Clip, FillMissing, HashBucket, LambdaOp, LogOp, Normalize, Rename, get_embedding_sizes
from nvtabular.utils import _pynvml_mem_size, device_mem_size
# #%load_ext memory_profiler
import logging
logging.basicConfig(format='%(asctime)s %(message)s')
logging.root.setLevel(logging.NOTSET)
logging.getLogger('numba').setLevel(logging.WARNING)
logging.getLogger('asyncio').setLevel(logging.WARNING)
# define dataset schema
CATEGORICAL_COLUMNS=["C" + str(x) for x in range(1, 27)]
CONTINUOUS_COLUMNS=["I" + str(x) for x in range(1, 14)]
LABEL_COLUMNS = ['label']
COLUMNS = LABEL_COLUMNS + CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS
#/samples/criteo mode doesn't have dense features
criteo_COLUMN=LABEL_COLUMNS + CATEGORICAL_COLUMNS
#For new feature cross columns
CROSS_COLUMNS = []
NUM_INTEGER_COLUMNS = 13
NUM_CATEGORICAL_COLUMNS = 26
NUM_TOTAL_COLUMNS = 1 + NUM_INTEGER_COLUMNS + NUM_CATEGORICAL_COLUMNS
# Initialize RMM pool on ALL workers
def setup_rmm_pool(client, pool_size):
client.run(rmm.reinitialize, pool_allocator=True, initial_pool_size=pool_size)
return None
#compute the partition size with GB
def bytesto(bytes, to, bsize=1024):
a = {'k' : 1, 'm': 2, 'g' : 3, 't' : 4, 'p' : 5, 'e' : 6 }
r = float(bytes)
return bytes / (bsize ** a[to])
#process the data with NVTabular
def process_NVT(args):
if args.feature_cross_list:
feature_pairs = [pair.split("_") for pair in args.feature_cross_list.split(",")]
for pair in feature_pairs:
CROSS_COLUMNS.append(pair[0]+'_'+pair[1])
logging.info('NVTabular processing')
train_input = os.path.join(args.data_path, "train/train.txt")
val_input = os.path.join(args.data_path, "val/test.txt")
PREPROCESS_DIR_temp_train = os.path.join(args.out_path, 'train/temp-parquet-after-conversion')
PREPROCESS_DIR_temp_val = os.path.join(args.out_path, 'val/temp-parquet-after-conversion')
if not os.path.exists(PREPROCESS_DIR_temp_train):
os.makedirs(PREPROCESS_DIR_temp_train)
if not os.path.exists(PREPROCESS_DIR_temp_val):
os.makedirs(PREPROCESS_DIR_temp_val)
PREPROCESS_DIR_temp = [PREPROCESS_DIR_temp_train, PREPROCESS_DIR_temp_val]
train_output = os.path.join(args.out_path, "train")
val_output = os.path.join(args.out_path, "val")
# Make sure we have a clean parquet space for cudf conversion
for one_path in PREPROCESS_DIR_temp:
if os.path.exists(one_path):
shutil.rmtree(one_path)
os.mkdir(one_path)
## Get Dask Client
# Deploy a Single-Machine Multi-GPU Cluster
device_size = device_mem_size(kind="total")
cluster = None
if args.protocol == "ucx":
UCX_TLS = os.environ.get("UCX_TLS", "tcp,cuda_copy,cuda_ipc,sockcm")
os.environ["UCX_TLS"] = UCX_TLS
cluster = LocalCUDACluster(
protocol = args.protocol,
CUDA_VISIBLE_DEVICES = args.devices,
n_workers = len(args.devices.split(",")),
enable_nvlink=True,
device_memory_limit = int(device_size * args.device_limit_frac),
dashboard_address=":" + args.dashboard_port
)
else:
cluster = LocalCUDACluster(
protocol = args.protocol,
n_workers = len(args.devices.split(",")),
CUDA_VISIBLE_DEVICES = args.devices,
device_memory_limit = int(device_size * args.device_limit_frac),
dashboard_address=":" + args.dashboard_port
)
# Create the distributed client
client = Client(cluster)
if args.device_pool_frac > 0.01:
setup_rmm_pool(client, int(args.device_pool_frac*device_size))
#calculate the total processing time
runtime = time.time()
#test dataset without the label feature
if args.dataset_type == 'test':
global LABEL_COLUMNS
LABEL_COLUMNS = []
##-----------------------------------##
# Dask rapids converts txt to parquet
# Dask cudf dataframe = ddf
## train/valid txt to parquet
train_valid_paths = [(train_input,PREPROCESS_DIR_temp_train),(val_input,PREPROCESS_DIR_temp_val)]
for input, temp_output in train_valid_paths:
ddf = dask_cudf.read_csv(input,sep='\t',names=LABEL_COLUMNS + CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS)
## Convert label col to FP32
if args.parquet_format and args.dataset_type == 'train':
ddf["label"] = ddf['label'].astype('float32')
# Save it as parquet format for better memory usage
ddf.to_parquet(temp_output,header=True)
##-----------------------------------##
COLUMNS = LABEL_COLUMNS + CONTINUOUS_COLUMNS + CROSS_COLUMNS + CATEGORICAL_COLUMNS
train_paths = glob.glob(os.path.join(PREPROCESS_DIR_temp_train, "*.parquet"))
valid_paths = glob.glob(os.path.join(PREPROCESS_DIR_temp_val, "*.parquet"))
num_buckets=10000000
categorify_op = Categorify(out_path="./", max_size=num_buckets)
cat_features = CATEGORICAL_COLUMNS >> categorify_op
logging.info('Fillmissing processing')
logging.info('Nomalization processing')
cont_features = CONTINUOUS_COLUMNS >> FillMissing() >> Clip(min_value=0) >> Normalize()
features = cat_features + cont_features + LABEL_COLUMNS
workflow = nvt.Workflow(features, client=client)
##Define the output format##
output_format='hugectr'
if args.parquet_format:
output_format='parquet'
##--------------------##
# just for /samples/criteo model
train_ds_iterator = nvt.Dataset(train_paths, engine='parquet', part_size=int(args.part_mem_frac * device_size))
valid_ds_iterator = nvt.Dataset(valid_paths, engine='parquet', part_size=int(args.part_mem_frac * device_size))
shuffle = None
if args.shuffle == "PER_WORKER":
shuffle = nvt.io.Shuffle.PER_WORKER
elif args.shuffle == "PER_PARTITION":
shuffle = nvt.io.Shuffle.PER_PARTITION
dict_dtypes={}
for col in CATEGORICAL_COLUMNS:
dict_dtypes[col] = np.int64
for col in CONTINUOUS_COLUMNS:
dict_dtypes[col] = np.float32
for col in LABEL_COLUMNS:
dict_dtypes[col] = np.float32
logging.info('Train Datasets Preprocessing.....')
workflow.fit(train_ds_iterator)
workflow.transform(train_ds_iterator).to_parquet(output_path=train_output,
shuffle=shuffle,
dtypes=dict_dtypes,
labels=LABEL_COLUMNS,
conts=CONTINUOUS_COLUMNS,
cats=CATEGORICAL_COLUMNS)
logging.info('Valid Datasets Preprocessing.....')
workflow.transform(valid_ds_iterator).to_parquet(output_path=val_output,
dtypes=dict_dtypes,
labels=LABEL_COLUMNS,
conts=CONTINUOUS_COLUMNS,
cats=CATEGORICAL_COLUMNS)
#--------------------##
#Output slot_size for each categorical feature
embeddings = [c[0] for c in categorify_op.get_embedding_sizes(CATEGORICAL_COLUMNS).values()]
embeddings = np.clip(a=embeddings, a_min=None, a_max=num_buckets).tolist()
print(embeddings)
##--------------------##
## Shutdown clusters
client.close()
logging.info('NVTabular processing done')
runtime = time.time() - runtime
print("\nDask-NVTabular Criteo Preprocessing")
print("--------------------------------------")
print(f"data_path | {args.data_path}")
print(f"output_path | {args.out_path}")
print(f"partition size | {'%.2f GB'%bytesto(int(args.part_mem_frac * device_size),'g')}")
print(f"protocol | {args.protocol}")
print(f"device(s) | {args.devices}")
print(f"rmm-pool-frac | {(args.device_pool_frac)}")
print(f"out-files-per-proc | {args.out_files_per_proc}")
print(f"num_io_threads | {args.num_io_threads}")
print(f"shuffle | {args.shuffle}")
print("======================================")
print(f"Runtime[s] | {runtime}")
print("======================================\n")
def parse_args():
parser = argparse.ArgumentParser(description=("Multi-GPU Criteo Preprocessing"))
#
# System Options
#
parser.add_argument("--data_path", type=str, help="Input dataset path (Required)")
parser.add_argument("--out_path", type=str, help="Directory path to write output (Required)")
parser.add_argument(
"-d",
"--devices",
default=os.environ.get("CUDA_VISIBLE_DEVICES", "0"),
type=str,
help='Comma-separated list of visible devices (e.g. "0,1,2,3"). '
)
parser.add_argument(
"-p",
"--protocol",
choices=["tcp", "ucx"],
default="tcp",
type=str,
help="Communication protocol to use (Default 'tcp')",
)
parser.add_argument(
"--device_limit_frac",
default=0.5,
type=float,
help="Worker device-memory limit as a fraction of GPU capacity (Default 0.8). "
)
parser.add_argument(
"--device_pool_frac",
default=0.9,
type=float,
help="RMM pool size for each worker as a fraction of GPU capacity (Default 0.9). "
"The RMM pool frac is the same for all GPUs, make sure each one has enough memory size",
)
parser.add_argument(
"--num_io_threads",
default=0,
type=int,
help="Number of threads to use when writing output data (Default 0). "
"If 0 is specified, multi-threading will not be used for IO.",
)
#
# Data-Decomposition Parameters
#
parser.add_argument(
"--part_mem_frac",
default=0.125,
type=float,
help="Maximum size desired for dataset partitions as a fraction "
"of GPU capacity (Default 0.125)",
)
parser.add_argument(
"--out_files_per_proc",
default=8,
type=int,
help="Number of output files to write on each worker (Default 8)",
)
#
# Preprocessing Options
#
parser.add_argument(
"-f",
"--freq_limit",
default=0,
type=int,
help="Frequency limit for categorical encoding (Default 0)",
)
parser.add_argument(
"-s",
"--shuffle",
choices=["PER_WORKER", "PER_PARTITION", "NONE"],
default="PER_PARTITION",
help="Shuffle algorithm to use when writing output data to disk (Default PER_PARTITION)",
)
parser.add_argument(
"--feature_cross_list", default=None, type=str, help="List of feature crossing cols (e.g. C1_C2, C3_C4)"
)
#
# Diagnostics Options
#
parser.add_argument(
"--profile",
metavar="PATH",
default=None,
type=str,
help="Specify a file path to export a Dask profile report (E.g. dask-report.html)."
"If this option is excluded from the command, not profile will be exported",
)
parser.add_argument(
"--dashboard_port",
default="8787",
type=str,
help="Specify the desired port of Dask's diagnostics-dashboard (Default `3787`). "
"The dashboard will be hosted at http://<IP>:<PORT>/status",
)
#
# Format
#
parser.add_argument('--parquet_format', type=int, default=1)
parser.add_argument('--dataset_type', type=str, default='train')
args = parser.parse_args()
args.n_workers = len(args.devices.split(","))
return args
if __name__ == '__main__':
args = parse_args()
process_NVT(args)
# -
# !python3 ./preprocess.py --data_path /dlrm_train --out_path /dlrm_train --freq_limit 6 --device_limit_frac 0.5 --device_pool_frac 0.5 --out_files_per_proc 1 --devices "0" --num_io_threads 2
# ## 3. DLRM Model Training
# +
# %%writefile './model.py'
import hugectr
from mpi4py import MPI
# 1. Create Solver, DataReaderParams and Optimizer
solver = hugectr.CreateSolver(max_eval_batches = 300,
batchsize_eval = 16384,
batchsize = 16384,
lr = 0.001,
vvgpu = [[0,1,2,3]],
repeat_dataset = True,
i64_input_key = True)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Parquet,
source = ["./train/_file_list.txt"],
eval_source = "./val/_file_list.txt",
slot_size_array = [4976199, 25419, 14705, 7112, 19283, 4, 6391, 1282, 60, 3289052, 282487, 138210, 11, 2203, 8901, 67, 4, 948, 15, 5577159, 1385790, 4348882, 178673, 10023, 88, 34],
check_type = hugectr.Check_t.Non)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.SGD,
update_type = hugectr.Update_t.Local,
atomic_update = True)
# 2. Initialize the Model instance
model = hugectr.Model(solver, reader, optimizer)
# 3. Construct the Model graph
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 13, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("data1", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 10000,
embedding_vec_size = 128,
combiner = "sum",
sparse_embedding_name = "sparse_embedding1",
bottom_name = "data1",
slot_size_array = [4976199, 3289052, 282487, 138210, 11, 2203, 8901, 67, 4, 948, 15, 25419, 5577159, 1385790, 4348882, 178673, 10023, 88, 34, 14705, 7112, 19283, 4, 6391, 1282, 60],
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dense"],
top_names = ["fc1"],
num_output=512))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc1"],
top_names = ["relu1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu1"],
top_names = ["fc2"],
num_output=256))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc2"],
top_names = ["relu2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu2"],
top_names = ["fc3"],
num_output=128))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc3"],
top_names = ["relu3"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Interaction,
bottom_names = ["relu3","sparse_embedding1"],
top_names = ["interaction1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["interaction1"],
top_names = ["fc4"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc4"],
top_names = ["relu4"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu4"],
top_names = ["fc5"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc5"],
top_names = ["relu5"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu5"],
top_names = ["fc6"],
num_output=512))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc6"],
top_names = ["relu6"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu6"],
top_names = ["fc7"],
num_output=256))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc7"],
top_names = ["relu7"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu7"],
top_names = ["fc8"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc8", "label"],
top_names = ["loss"]))
# 4. Dump the Model graph to JSON
model.graph_to_json(graph_config_file = "dlrm.json")
# 5. Compile & Fit
model.compile()
model.summary()
model.fit(max_iter = 21000, display = 1000, eval_interval = 4000, snapshot = 20000, snapshot_prefix = "dlrm")
# -
# !python model.py
# ## 4. Save the Model Files & Inference Validation
# !ls -l *20000.model
# !ls -l /dlrm_train/val
import pandas as pd
df = pd.read_parquet("/dlrm_train/val/part_0.parquet")
df.head()
df.head(10).to_csv('/dlrm_train/infer_test.csv', sep=',', index=False,header=True)
# +
# %%writefile /dlrm_train/dlrm2predict.py
from hugectr.inference import InferenceParams, CreateInferenceSession
import hugectr
import pandas as pd
import numpy as np
import sys
from mpi4py import MPI
config_file = "/dlrm_train/dlrm.json"
CATEGORICAL_COLUMNS=["C" + str(x) for x in range(1, 27)]
CONTINUOUS_COLUMNS=["I" + str(x) for x in range(1, 14)]
LABEL_COLUMNS = ['label']
emb_size = [4976199, 3289052, 282487, 138210, 11, 2203, 8901, 67, 4, 948, 15, 25419, 5577159, 1385790, 4348882, 178673, 10023, 88, 34, 14705, 7112, 19283, 4, 6391, 1282, 60]
shift = np.insert(np.cumsum(emb_size), 0, 0)[:-1]
test_df=pd.read_csv("/dlrm_train/infer_test.csv",sep=',')
row_ptrs = list(range(0,261))
dense_features = list(test_df[CONTINUOUS_COLUMNS].values.flatten())
test_df[CATEGORICAL_COLUMNS].astype(np.int64)
embedding_columns = list((test_df[CATEGORICAL_COLUMNS]+shift).values.flatten())
# create parameter server, embedding cache and inference session
inference_params = InferenceParams(model_name = "dlrm",
max_batchsize = 64,
hit_rate_threshold = 0.5,
dense_model_file = "/dlrm_train/dlrm_dense_20000.model",
sparse_model_files = ["/dlrm_train/dlrm0_sparse_20000.model"],
device_id = 0,
use_gpu_embedding_cache = True,
cache_size_percentage = 0.2,
i64_input_key = True,
use_mixed_precision = False)
inference_session = CreateInferenceSession(config_file, inference_params)
output = inference_session.predict(dense_features, embedding_columns, row_ptrs)
print(output)
# -
# !python /dlrm_train/dlrm2predict.py
| samples/dlrm/HugeCTR_DLRM_Training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud
# %matplotlib inline
plt.style.use('ggplot')
cuisines_df = pd.read_csv('../data/Cuisines.csv', header = None)
cuisines_df = cuisines_df.rename(columns = {1: 'cuisine_label'})
recipes_df = pd.read_csv('../data/recipes.csv')
display(cuisines_df.head(2))
display(recipes_df.head(2))
recipes_df.shape
# # PCA
X = recipes_df.drop('cuisine', axis=1)
y = pd.DataFrame(recipes_df.cuisine)
y.set_index(X.index, inplace=True)
# +
from sklearn.decomposition import PCA
# pca = PCA(n_components = 12)
pca = PCA().fit(X)
cum_expl_var = []
for i in range(len(pca.explained_variance_)):
if i == 0:
cum_expl_var.append(pca.explained_variance_[i])
else:
cum_expl_var.append(pca.explained_variance_[i] + cum_expl_var[i-1])
cum_expl_var_ratio_pca = np.cumsum(pca.explained_variance_ratio_)
# +
cum_90 = np.where(cum_expl_var_ratio_pca >= 0.9)[0][0]
plt.figure(figsize=[8,5])
plt.style.use('ggplot')
plt.plot(cum_expl_var_ratio_pca, color="green")
plt.plot(cum_90, 0.9,'go')
plt.title('Cumulative Explained Variance in PCA')
plt.xlabel('Number of Components')
plt.ylabel('Cumulative Explained Variance Ratio')
plt.xticks(np.arange(0, len(pca.components_), 50))
plt.annotate("CVR= 90%", (cum_90+20, 0.88)).set_fontsize(12)
plt.annotate("Component {}".format(cum_90+1), (cum_90+20, 0.83)).set_fontsize(12)
# plt.savefig('../figures/1-4.png', bbox_inches = 'tight', pad_inches = 0)
# -
X_pca = PCA(n_components=175).fit_transform(X)
# +
palette = sns.color_palette("tab20", n_colors = 12)
sns.set_style("whitegrid")
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
plt.title("Recipes by origin (PCA)", size=22, fontweight="bold")
for i, cur_origin in enumerate(np.unique(y)):
plot = plt.scatter(X_pca[(y == cur_origin)['cuisine'], 1], X_pca[(y == cur_origin)['cuisine'], 2],
c = palette[i], label = cur_origin, s = 5, linewidth=3.5, alpha=0.8)
plot.set_label(cuisines_df.cuisine_label[i])
plt.legend(fontsize=14, loc='best')
plt.savefig("pca.png")
plt.show()
| notebooks/PCA.ipynb |