code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="2lo-uyOVHC35" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="416a29ea-d7d8-49f6-c978-ca96c709ecdb"
from google.colab import drive
drive.mount('/data/')
data_dir = '/data/My Drive/Colab Notebooks/Experiment'
# !ls '/data/My Drive/Colab Notebooks/Experiment'
# !pip install matplotlib
# + id="2j4H-1ikHmmZ"
import pandas as pd
df = pd.read_csv(data_dir+'/diamonds.csv')
# + id="qvj4VaakKE36" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="a81c8c1b-afc1-4c2d-e863-3b77345ac318"
df.info()
# + id="LILx7j_RKGhV" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="7f736aa3-d7fc-4681-884c-0671c695027b"
df.corr()
# + id="S9XGekgYKJv5" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="49bb5585-bacb-4e0d-e6df-393db07ef7a5"
df['color'].unique()
# + id="CqcWVsrdKw-c" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="af16ae86-1219-4f73-ffc6-6785be9ae5fd"
df.head()
# + id="tVvNCYFlLRBV"
df['color_int'] = df['color'].astype('category').cat.codes
transform = {
'Ideal':0, 'Premium':1, 'Good':2, 'Very Good':3, 'Fair':4
}
df['cut_int']=df['cut'].apply(lambda x: transform[x])
transform_clarity ={
'SI2':6, 'SI1':5, 'VS1':3, 'VS2':4, 'VVS2':2, 'VVS1':1, 'I1':7, 'IF':0
}
df['clarity_int']=df['clarity'].apply(lambda x: transform_clarity[x])
# + id="7XdOC9lfxHuF" colab={"base_uri": "https://localhost:8080/", "height": 111} outputId="e6e2e18a-04fa-4bd5-a2ed-0894eff65367"
df.head(2)
# + id="z_SJwkqOxMq5" colab={"base_uri": "https://localhost:8080/", "height": 390} outputId="b352f1d0-9449-4e37-c61e-ab46c6883434"
df.corr()
# + id="_v24s3LIxzKm"
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn import svm
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
# + id="jcXfP1zYycXG" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="e02450d5-b277-4090-84e9-5a675f2939f5"
train, test = train_test_split(df, test_size = 0.3)
print(train.shape)
print(test.shape)
# + id="4-2tVrcozF2D"
train_X = train[['carat', 'cut_int', 'clarity_int', 'color_int']]
train_y = train.price
test_X = test[['carat', 'cut_int', 'clarity_int', 'color_int']]
test_y = test.price
# + id="OYAdnq1c0IPQ" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="4b9db802-7342-44d3-a6e3-606fcc3f7018"
train_y.head(2)
# + id="I70g8CYxCB3J"
def classify(x):
return((x > 2000))
y_train_classify = train_y.apply(classify)
y_test_classify = test_y.apply(classify)
# + [markdown] id="p1qcemuHDtXD"
# Logistic Regression Model
# + id="FLQ1PeGBRfuE" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="97a17a55-a6a5-4efb-812a-b56c01578467"
model_re = LinearRegression()
X = df[['carat','color_int','cut_int','clarity_int']]
y = df.price
model_re.fit(X, y)
# + id="H4DybIy7CBf9" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="b54fed60-a504-425f-be26-d82d372362a8"
model = LogisticRegression()
model.fit(train_X,y_train_classify)
# + id="JWYwPK--B_IJ" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="0679168e-8fcd-498c-ccf9-ee0b1ebcffd9"
import sklearn.metrics
sklearn.metrics.confusion_matrix(y_test_classify, model.predict(test_X))
# + id="f0csNFaAB_ay" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="65f8119c-1eec-42e1-98c2-e1494e773c63"
precision = 7120/(7120+177)
recall = 8708/(8708+177)
f_score = 2* (precision*recall)/(precision+recall)
f_score
# + id="dU3dAZaER31r" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="3b4646b7-e8f0-4244-957c-e489b3e9d0ae"
errors = test_y - model_re.predict(test_X)
e = pd.DataFrame(errors)
e[e['price']>2500].count()
# + [markdown] id="A7o_mlUGDzYy"
# Decision Tree
# + id="i6ufOKq1Lybk" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="0ed4fa6b-1ac3-4d51-cdde-3f6af63f372b"
from sklearn import tree
clf_re = tree.DecisionTreeRegressor()
X = df[['carat','color_int','cut_int','clarity_int']]
y = df.price
clf_re.fit(X, y)
# + id="jiCHRrD4Dm-s" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="e6fb1397-f5c1-4dc6-f5b9-43c0b789e740"
clf = DecisionTreeClassifier()
clf.fit(train_X, y_train_classify)
# + id="hG78q0RzDnSP" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="abf0ce2c-9c95-4f5c-da37-de033404908c"
sklearn.metrics.confusion_matrix(y_test_classify, clf.predict(test_X))
# + id="E07tVM21B_Ob" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="488809ca-268b-4ce0-ddf2-6d20b5eeb7fc"
precision = 7199/(7199+192)
recall = 8659/(8659+132)
f_score = 2* (precision*recall)/(precision+recall)
f_score
# + id="PRd-4LbqNwS7" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="cf9c7699-7005-4a57-b378-25d4b6e8cc1e"
errors = test_y - clf_re.predict(test_X)
e = pd.DataFrame(errors)
e[e['price']>2500].count()
# + [markdown] id="HUYV-1w8Fakt"
# KNN
# + id="lbQLn1nSGyk7"
import matplotlib.pyplot as plt
# + id="K1vG5Dn_B_BW" colab={"base_uri": "https://localhost:8080/", "height": 486} outputId="11e3916d-437e-4ed1-bbe1-e875ba530234"
a_index=list(range(1,11))
a=pd.Series()
x=[1,2,3,4,5,6,7,8,9,10]
for i in list(range(1,11)):
model_k=KNeighborsClassifier(n_neighbors=i)
model_k.fit(train_X,y_train_classify)
prediction=model_k.predict(test_X)
a=a.append(pd.Series(metrics.accuracy_score(prediction,y_test_classify)))
plt.plot(a_index, a)
plt.xticks(x)
# + id="sKK5bXOTPcJd" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="8d836e4f-cf28-4e8d-deff-2dd1b463d8ed"
model_k_re = KNeighborsRegressor()
X = df[['carat','color_int','cut_int','clarity_int']]
y = df.price
model_k_re.fit(X, y)
# + id="oLCt67A6-HrM" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="bea6d3c8-a25c-437f-c55a-15fdbf1fba4e"
model_k=KNeighborsClassifier(n_neighbors=5)
model_k.fit(train_X,y_train_classify)
# + id="DT454M-6HNle" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="201977a5-ef3f-4366-9ec2-02f8ffe7c523"
sklearn.metrics.confusion_matrix(y_test_classify, model_k.predict(test_X))
# + id="Y6g78ImZHSQO" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0a03024f-44c9-410f-ac5a-215662aad387"
precision = 7179/(7179+171)
recall = 8680/(8680+152)
f_score = 2* (precision*recall)/(precision+recall)
f_score
# + id="ixNSiP5wHfu9" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="be65fdbe-a5a8-4e3d-d5e3-da9042bda98d"
errors = test_y - model_k_re.predict(test_X)
e = pd.DataFrame(errors)
e[e['price']>2500].count()
# + id="pMMme3IAHvs_"
| assets/EMSE6574/Week6_Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Novel allele detection with MentaLiST 0.2
#
# MentaLiST has a new calling algorithm and also detects and reconstructs putative novel alleles, also calling non-present loci, allowing the use for wgMLST schemes.
#
# ## Running MentaLiST 0.2
#
# Because of the new calling algorithm, new information has to be stored on the MentaLiST database, so databases created with previous 0.1.x versions are not compatible. The command for creating a database is exactly the same as before.
mentalist build_db -h
# In this example, the folder 'MTB_scheme' has around 100 FASTA files, a subset from a large M. tuberculosis cgMLST scheme with around 3000 loci. To build a MentaLiST database for this scheme, similarly as the previous MentaLiST version, run:
mentalist build_db --db mtb_cgMLST.db -k 31 -f MTB_scheme/*.fa
ls -lh mtb_cgMLST.db
# Now, on this example we have a sample.fastq.gz file with a WGS sample from M. tuberculosis. To run the MLST caller:
mentalist call -o sample.call -s sample --db mtb_cgMLST.db --kt 10 --output_votes --output_special sample.fastq.gz
# ### Description of output files
#
# Here, a brief description of each output file. All output files have the same prefix, given by the -o option when running MentaLiST call. 'sample.call' in this example.
#
ls -l sample.call*
# The 'main' fil is sample.call, with the allele calls:
cat sample.call
# The file sample.call.coverage.txt has a description of each the call for each locus. There are different types of call possible:
#
# 'Regular' called alleles, the most voted allele that has 100% coverage; this is the most common case.
grep Called sample.call.coverage.txt | head -n5
# Non present loci; might have some k-mers seen, but below threshold, so it is declared missing.
grep "Not present" sample.call.coverage.txt
# Novel alleles; no existing allele has 100% coverage, so MentaLiST looks for variations that have 100% coverage,
# using existing alleles as "template" for creating a novel allele.
grep Novel sample.call.coverage.txt
# Multiple possible alleles: when more than one allele has 100% coverage.
# Here, the depth of coverage and number of votes of each allele is shown; the best one is chosen on the call file,
# with a flag "+" after the allele number.
grep Multiple sample.call.coverage.txt
# Partially covered alleles, where no novel allele was found; Most likely either an undetected novel allele or
# an existing allele that was not fully covered in the WGS sample, for some reason.
grep Partially sample.call.coverage.txt
# #### Novel alleles output
#
# Two files: one FASTA with the sequences, and one with a description.
# novel alleles found:
cat sample.call.novel.fa
# Description of novel alleles; number of mutations, and description of the mutation;
cat sample.call.novel.txt
# Alternative call, without novel/missing genes algorithm, purely by votes;
# only included if --output-votes is specified.
cat sample.call.byvote
# ## Updating a database with the detected novel alleles
#
# You can update your MLST scheme with the novel alleled detected by MentaLiST, specially after running it on many different samples. In the scripts folder, there are python scripts to help select alleles and build an updated scheme. To do that, you will perform three steps:
# 1. Select a subset of all novel alleles that satisfy some restrictions.
# 1. Create a new MLST from the existing one, by adding the novel alleles.
# 1. Run MentaLiST to create a k-mer database for this MLST scheme.
#
# Each step is described below.
#
# ### Selecting the novel alleles
# optional: select the python environment to run the scripts;
PYTHON=~/.conda/envs/pathogist/bin/python
# The 'parse_novel_alleles.py' script collects all novel alleles, creates a report and also outputs a FASTA file with selected alleles, to include in an updated MLST scheme.
$PYTHON ../scripts/parse_novel_alleles.py -h
# You must give all the novel allele FASTA files found by MentaLiST as parameter -f. Any given novel allele will be included in the output file (parameter -o) if this exact allele is present in at least (-t) samples. Also, if the parameter -m is given, any novel allele that has -m or less mutations is also included, if you want to include any allele that is very close to existing alleles.
#
# In the following example, I have a folder 'results' with MentaLiST results for >1000 samples. I want to select only alleles that have been detected in at least 5 samples.
$PYTHON ../scripts/parse_novel_alleles.py -f results/*novel.fa -o new_alleles.fa -t 5 > new_alleles_report.txt
# The report has one line for each locus, with the total number of alleles, how many times each allele is present on all samples and the number of mutations in relation to an existing allele.
head new_alleles_report.txt
# For instance, there are 14 novel alleles for locus Rv0021c, the two most common being present in 88 and 5 samples, respectively. Both are 1 mutation away from an existing allele.
#
# The FASTA output will have all alleles that have been seen at least on 5 samples. So we know that for locus Rv0021c we will have two alleles:
grep Rv0021 new_alleles.fa -A1
# ### Creating a new MLST scheme with the novel alleles
#
# To create a new MLST scheme with the novel alleles included, provide the original MLST scheme and the novel alleles FASTA file to the script 'create_new_scheme_with_novel.py'
$PYTHON ../scripts/create_new_scheme_with_novel.py -h
# So, to add the novel alleles from the previous step in the small MLST scheme from the initial example, we run:
$PYTHON ../scripts/create_new_scheme_with_novel.py MTB_scheme/*fa -o MTB_novel_scheme -n new_alleles.fa
# We can see that the new scheme has more alleles on some loci:
#
grep -c ">" MTB_scheme/*fa | head -n5
grep -c ">" MTB_novel_scheme/*fa | head -n5
# ### Run MentaLiST to create a new MLST database file
# Similarly as before, but now with the new MLST scheme.
mentalist build_db --db mtb_novel_cgMLST.db -k 31 -f MTB_novel_scheme/*.fa
# Now, we can rerun the MLST calling phase with the new DB:
#
mentalist call -o sample_novel.call -s sample --db mtb_novel_cgMLST.db --kt 10 --output_votes --output_special sample.fastq.gz
# Comparing this call with the previous, we can see that the novel alleles (marked as "N") have been called in the new output:
# OLD:
cat sample.call
# New:
cat sample_novel.call
| docs/Novel allele detection with MentaLiST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="0qi0S2S9LRfV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="38760e32-83be-4c20-a6c9-3d93b1a77e4c" executionInfo={"status": "ok", "timestamp": 1577950671577, "user_tz": -540, "elapsed": 57687, "user": {"displayName": "FooQoo", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCATSPIVk8eQ-zGlJ_ijdpKHWt4baaI-o56K85qZw=s64", "userId": "13364521585663132459"}}
# !pip install transformers tensorflow --upgrade
# + id="cRagFp_gLcMk" colab_type="code" colab={}
import random
import numpy as np
from tqdm import tqdm_notebook as tqdm
import time
import logging
from sklearn.model_selection import StratifiedKFold
import os
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from sklearn.metrics import accuracy_score, f1_score
from transformers import *
# + id="IF4XfPwqLmLV" colab_type="code" colab={}
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# + id="dJ2VUjL1MTl6" colab_type="code" colab={}
seed_everything()
# + id="Focepn7uMVyi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="045a2150-0084-4302-c9e8-77cf2e27b9ec" executionInfo={"status": "ok", "timestamp": 1577950808270, "user_tz": -540, "elapsed": 30854, "user": {"displayName": "FooQoo", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCATSPIVk8eQ-zGlJ_ijdpKHWt4baaI-o56K85qZw=s64", "userId": "13364521585663132459"}}
from google.colab import drive
drive.mount('/content/drive')
# + id="d8wZ6bNCMg_d" colab_type="code" colab={}
import os
os.chdir('/content/drive/My Drive/workspace/kaggle-workspace/')
# + id="VJe8g1cDMzfZ" colab_type="code" colab={}
train = pd.read_csv('./data/train.csv')
test = pd.read_csv('./data/test.csv')
# + id="CNFBGLiYM7rN" colab_type="code" colab={}
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, id, text, label=None):
"""Constructs a InputExample.
Args:
id: Unique id for the example.
text: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.id = id
self.text = text
self.label = label
class InputFeatures(object):
def __init__(self,
example_id,
choices_features,
label
):
self.example_id = example_id
_, input_ids, input_mask, segment_ids = choices_features[0]
self.choices_features = {
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
self.label = label
# + id="FMEmmV3bNsjZ" colab_type="code" colab={}
def read_examples(df, is_training):
if not is_training:
df['target'] = np.zeros(len(df), dtype=np.int64)
examples = []
for val in df[['id', 'text', 'target']].values:
examples.append(InputExample(id=val[0], text=val[1], label=val[2]))
return examples, df
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
# + id="6Vs7JQI2Nvec" colab_type="code" colab={}
def read_examples(df, is_training):
if not is_training:
df['target'] = np.zeros(len(df), dtype=np.int64)
examples = []
for val in df[['id', 'text', 'target']].values:
examples.append(InputExample(id=val[0], text=val[1], label=val[2]))
return examples, df
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_examples_to_features(examples, tokenizer, max_seq_length,
is_training):
features = []
for example_index, example in enumerate(examples):
text = tokenizer.tokenize(example.text)
MAX_TEXT_LEN = max_seq_length - 2
text = text[:MAX_TEXT_LEN]
choices_features = []
tokens = ["[CLS]"] + text + ["[SEP]"]
segment_ids = [0] * (len(text) + 2)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
input_ids += ([0] * padding_length)
input_mask += ([0] * padding_length)
segment_ids += ([0] * padding_length)
choices_features.append((tokens, input_ids, input_mask, segment_ids))
label = example.label
if example_index < 1 and is_training:
logger.info("*** Example ***")
logger.info("idx: {}".format(example_index))
logger.info("id: {}".format(example.id))
logger.info("tokens: {}".format(' '.join(tokens).replace('\u2581', '_')))
logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
logger.info("input_mask: {}".format(len(input_mask)))
logger.info("segment_ids: {}".format(len(segment_ids)))
logger.info("label: {}".format(label))
features.append(
InputFeatures(
example_id=example.id,
choices_features=choices_features,
label=label
)
)
return features
def metric(y_true, y_pred):
acc = accuracy_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred, average='macro')
return acc, f1
# + id="nJ7yBV5EOxrc" colab_type="code" colab={}
# + id="Q6pE6b89N4R2" colab_type="code" colab={}
# hyperparameters
max_seq_length = 512
learning_rate = 1e-5
num_epochs = 3
batch_size = 8
patience = 2
file_name = 'model'
# + id="k2urAklLN6bW" colab_type="code" colab={}
logger = logging.getLogger('mylogger')
logger.setLevel(logging.DEBUG)
timestamp = time.strftime("%Y.%m.%d_%H.%M.%S", time.localtime())
fh = logging.FileHandler('log_model.txt')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ## %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
# + id="-dCAEu_nN8uA" colab_type="code" colab={}
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# + id="-UIOOAxnN-3N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="261aec69-ab30-4edc-f40c-44f3b5408456" executionInfo={"status": "ok", "timestamp": 1577951482325, "user_tz": -540, "elapsed": 5160, "user": {"displayName": "FooQoo", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCATSPIVk8eQ-zGlJ_ijdpKHWt4baaI-o56K85qZw=s64", "userId": "13364521585663132459"}}
train_examples, train_df = read_examples(train, is_training=True)
labels = train_df['target'].astype(int).values
train_features = convert_examples_to_features(train_examples, tokenizer, max_seq_length, True)
all_input_ids = np.array(select_field(train_features, 'input_ids'))
all_input_mask = np.array(select_field(train_features, 'input_mask'))
all_segment_ids = np.array(select_field(train_features, 'segment_ids'))
all_label = np.array([f.label for f in train_features])
# + id="XrH-kPAvOByg" colab_type="code" colab={}
class NeuralNet(nn.Module):
def __init__(self, hidden_size=768, num_class=2):
super(NeuralNet, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-uncased',
output_hidden_states=True,
output_attentions=True)
for param in self.bert.parameters():
param.requires_grad = True
self.weights = nn.Parameter(torch.rand(13, 1))
self.dropouts = nn.ModuleList([
nn.Dropout(0.5) for _ in range(5)
])
self.fc = nn.Linear(hidden_size, num_class)
def forward(self, input_ids, input_mask, segment_ids):
all_hidden_states, all_attentions = self.bert(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[-2:]
batch_size = input_ids.shape[0]
ht_cls = torch.cat(all_hidden_states)[:, :1, :].view(
13, batch_size, 1, 768)
atten = torch.sum(ht_cls * self.weights.view(
13, 1, 1, 1), dim=[1, 3])
atten = F.softmax(atten.view(-1), dim=0)
feature = torch.sum(ht_cls * atten.view(13, 1, 1, 1), dim=[0, 2])
for i, dropout in enumerate(self.dropouts):
if i == 0:
h = self.fc(dropout(feature))
else:
h += self.fc(dropout(feature))
h = h / len(self.dropouts)
return h
# + id="eb4PEkbgOLVZ" colab_type="code" colab={}
skf = StratifiedKFold(n_splits=7, shuffle=True, random_state=42)
# off: out-of-fold
oof_train = np.zeros((len(train_df), 2), dtype=np.float32)
oof_test = np.zeros((len(test_df), 2), dtype=np.float32)
# + [markdown] id="NL-ouY6SR3PI" colab_type="text"
#
# + id="BjY91TLOPk9N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["52fbd2165d0345e09f2d83142beed7ec", "35f9faf09e0b4fb7b16df842734d8d88", "84ce069ae7e9458591847f5962f42ebd", "829a3d126e51481497098c58f19ad695", "9e00ef8ef0a84ba3933ac74cee465a92", "12fac846e1fa42cba282fc710b2a2921", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "af0639d04126439a9cedf14ff5910d73", "<KEY>", "<KEY>", "81585a17c44c4233b614e73fa6ba9436", "d330425719504a0fa1c99409dda6c250", "1c69e70656884d9f88e2a267c0cbdbd3", "8840f32424a3477bb1856ee09e0b4a64", "<KEY>", "<KEY>", "05f015e74cc54efd8951709fcc78f993", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "0639ea718e1d44bea6beeb0fe77cfc67", "<KEY>", "a7715d8dc61d47e7aa049dfa6b841c0a", "bbcdc2e3106440b38ccbed14ed2a32b3", "<KEY>", "<KEY>", "c1acba1c162c4376a6facbd97fd9b862", "24c7e71046f24c90a275144e746676d8", "9e123eff7b8a4b45874a60a4906f6f2b", "de3a2da76f014be9b5ff9e0e591bed54", "412f4f94afad44c28ad07db4b9106b14", "6a5222d0563342c689cf89a66af7f2ee", "267d45c651464eeab20701785505b8c2", "3b0b86fd6e044e5fb95e51490364e4d0", "<KEY>", "<KEY>", "<KEY>", "7f513d6b6f714f28978ced81f7867933", "<KEY>", "1975285e7629434c94854d4c5a0c2de6", "<KEY>", "<KEY>", "<KEY>", "a652d8cef8e841c0beb7c65a808f2ceb", "<KEY>", "<KEY>", "<KEY>", "b88631fb0a9b4640ab356809e589a8f4", "60c3e76507a345d68a0b6e046855086b", "<KEY>", "<KEY>", "78013526265f47d5ad70af1c4e35d94f", "<KEY>", "87f3eb35fef64f52b462d2a33fab6161", "<KEY>", "fc121649e72249b29f23ca4f0c590e4a", "<KEY>", "<KEY>", "c2b50749e5374cca9395c9133a55593f", "<KEY>", "<KEY>", "3be026edec7e4caca34614c6c2d5229c", "397e86178e514d2094468d22a2f74ec8", "68913a02ccf943deba242fcffe44469e", "a3e58673adaf4abeb291333bdf58bbce", "<KEY>", "d3532ef694e04a1c9ba3c8ab5bebb039", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "7f75796eadd04e87b8df375a997dd578", "7249bf4e233342f596b8ea01feed27ce", "<KEY>", "79283c6bedc14d92afdb7a4218b4790a", "<KEY>", "<KEY>", "532bae92223f4fa69108f21f4ddb898d", "53cec850954a4b2bb768baeba941fea1", "9001d655f5ab45a589ab397a98afc947", "fab075d05f3840eb80ba09448f37ad6a", "<KEY>", "<KEY>", "2231ac27e7ac44818cb230d5c85a0b1a", "<KEY>", "<KEY>", "edfd36720b6d45d9acd9a2f1122fcec6", "2c298217f5bc42c9abed3d20f152489f", "<KEY>", "31a555649c414be586f0abb8b52ec987", "6e84003e6f704ee08d111d3a08a1eb3c", "<KEY>", "<KEY>", "<KEY>", "26f176ee1ced4ea38f86b76f4804a994", "<KEY>", "48e052ac58424f159f67b9ef202fa8ad", "897a57c6f02b4aaba0aedbad197522af", "f88b8ef3bedb4e71aeaee48dca70549d", "<KEY>", "<KEY>", "5985de4de0854453a7592918d6234186", "<KEY>", "e6f8232051a8457dba67bba12866d707", "4de38d94b58f4ba583aef49ed9f8e348", "e54f42c960074bc483249abe8dde0fae", "<KEY>", "<KEY>", "e3466907ed4a4dddb555afd0caea1d40", "<KEY>", "<KEY>", "815a8d7aa9af4a1c88845a60c0aae628", "<KEY>", "9b5e2cdb05234660b25e37fa6a28660e", "8e8fe4de15f9475eb214973f5a58f4de", "<KEY>", "70fad6b716d94657bb928353f00af107", "d555f9df69374e5882eda4fd16660606", "c53d903227ea4e998018c36fdbdc1ad1", "617c2885664244e383a7ee53925aacc5", "2f00ebac445d4db8933310ab2a8e7fd9", "3e9c34d5070e46d0a75312f8cc30bc9a", "a6612e4ac3a547729f24116ac76f7b85"]} outputId="c6c95ad8-af96-402c-9c70-5670dfa84172" executionInfo={"status": "ok", "timestamp": 1577954345791, "user_tz": -540, "elapsed": 252401, "user": {"displayName": "FooQoo", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCATSPIVk8eQ-zGlJ_ijdpKHWt4baaI-o56K85qZw=s64", "userId": "13364521585663132459"}}
for fold, (train_index, valid_index) in enumerate(skf.split(all_label, all_label)):
# remove this line if you want to train for all 7 folds
if fold == 2:
break # due to kernel time limit
logger.info('================ fold {} ==============='.format(fold))
train_input_ids = torch.tensor(all_input_ids[train_index], dtype=torch.long)
train_input_mask = torch.tensor(all_input_mask[train_index], dtype=torch.long)
train_segment_ids = torch.tensor(all_segment_ids[train_index], dtype=torch.long)
train_label = torch.tensor(all_label[train_index], dtype=torch.long)
valid_input_ids = torch.tensor(all_input_ids[valid_index], dtype=torch.long)
valid_input_mask = torch.tensor(all_input_mask[valid_index], dtype=torch.long)
valid_segment_ids = torch.tensor(all_segment_ids[valid_index], dtype=torch.long)
valid_label = torch.tensor(all_label[valid_index], dtype=torch.long)
train = torch.utils.data.TensorDataset(train_input_ids, train_input_mask, train_segment_ids, train_label)
valid = torch.utils.data.TensorDataset(valid_input_ids, valid_input_mask, valid_segment_ids, valid_label)
test = torch.utils.data.TensorDataset(test_input_ids, test_input_mask, test_segment_ids)
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False)
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)
model = NeuralNet()
model.cuda()
loss_fn = torch.nn.CrossEntropyLoss()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=1e-6)
model.train()
best_f1 = 0.
valid_best = np.zeros((valid_label.size(0), 2))
early_stop = 0
for epoch in range(num_epochs):
train_loss = 0.
for batch in tqdm(train_loader):
batch = tuple(t.cuda() for t in batch)
x_ids, x_mask, x_sids, y_truth = batch
y_pred = model(x_ids, x_mask, x_sids)
loss = loss_fn(y_pred, y_truth)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item() / len(train_loader)
model.eval()
val_loss = 0.
valid_preds_fold = np.zeros((valid_label.size(0), 2))
with torch.no_grad():
for i, batch in tqdm(enumerate(valid_loader)):
batch = tuple(t.cuda() for t in batch)
x_ids, x_mask, x_sids, y_truth = batch
y_pred = model(x_ids, x_mask, x_sids).detach()
val_loss += loss_fn(y_pred, y_truth).item() / len(valid_loader)
valid_preds_fold[i * batch_size:(i + 1) * batch_size] = F.softmax(y_pred, dim=1).cpu().numpy()
acc, f1 = metric(all_label[valid_index], np.argmax(valid_preds_fold, axis=1))
if best_f1 < f1:
early_stop = 0
best_f1 = f1
valid_best = valid_preds_fold
torch.save(model.state_dict(), 'model_fold_{}.bin'.format(fold))
else:
early_stop += 1
logger.info(
'epoch: %d, train loss: %.8f, valid loss: %.8f, acc: %.8f, f1: %.8f, best_f1: %.8f\n' %
(epoch, train_loss, val_loss, acc, f1, best_f1))
torch.cuda.empty_cache()
if early_stop >= patience:
break
test_preds_fold = np.zeros((len(test_df), 2))
valid_preds_fold = np.zeros((valid_label.size(0), 2))
model.load_state_dict(torch.load('model_fold_{}.bin'.format(fold)))
model.eval()
with torch.no_grad():
for i, batch in tqdm(enumerate(valid_loader)):
batch = tuple(t.cuda() for t in batch)
x_ids, x_mask, x_sids, y_truth = batch
y_pred = model(x_ids, x_mask, x_sids).detach()
valid_preds_fold[i * batch_size:(i + 1) * batch_size] = F.softmax(y_pred, dim=1).cpu().numpy()
with torch.no_grad():
for i, batch in tqdm(enumerate(test_loader)):
batch = tuple(t.cuda() for t in batch)
x_ids, x_mask, x_sids = batch
y_pred = model(x_ids, x_mask, x_sids).detach()
test_preds_fold[i * batch_size:(i + 1) * batch_size] = F.softmax(y_pred, dim=1).cpu().numpy()
valid_best = valid_preds_fold
oof_train[valid_index] = valid_best
acc, f1 = metric(all_label[valid_index], np.argmax(valid_best, axis=1))
logger.info('epoch: best, acc: %.8f, f1: %.8f, best_f1: %.8f\n' %
(acc, f1, best_f1))
#oof_test += test_preds_fold / 7 # uncomment this for 7 folds
oof_test += test_preds_fold / 2 # comment this line when training for 7 folds
# + id="hoQ6BbgQP6an" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="0b314397-6f79-4361-b3d8-1b5d716f4007" executionInfo={"status": "ok", "timestamp": 1577955352965, "user_tz": -540, "elapsed": 1073, "user": {"displayName": "FooQoo", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCATSPIVk8eQ-zGlJ_ijdpKHWt4baaI-o56K85qZw=s64", "userId": "13364521585663132459"}}
logger.info(f1_score(labels, np.argmax(oof_train, axis=1)))
train_df['pred_target'] = np.argmax(oof_train, axis=1)
# + id="33ICJY92QAI0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="295744e2-490b-42ff-e01d-4b31804a2ece" executionInfo={"status": "ok", "timestamp": 1577955357211, "user_tz": -540, "elapsed": 1282, "user": {"displayName": "FooQoo", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCATSPIVk8eQ-zGlJ_ijdpKHWt4baaI-o56K85qZw=s64", "userId": "13364521585663132459"}}
train_df.head()
# + id="-5mR17EOQCEL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="ac7d62d7-0031-4ec8-c9db-1a378c58f94a" executionInfo={"status": "ok", "timestamp": 1577955365184, "user_tz": -540, "elapsed": 636, "user": {"displayName": "FooQoo", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCATSPIVk8eQ-zGlJ_ijdpKHWt4baaI-o56K85qZw=s64", "userId": "13364521585663132459"}}
test_df['target'] = np.argmax(oof_test, axis=1)
logger.info(test_df['target'].value_counts())
# + id="cfrE1y0OQEua" colab_type="code" colab={}
submit['target'] = np.argmax(oof_test, axis=1)
submit.to_csv('submission_3fold.csv', index=False)
| notebook/bert_baseline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Scraping /r/jokes from reddit
# I do feel there's a need for explanation:
# Why reddit jokes?
# The answer is simple: the ease of scraping and the format of the jokes.
# /r/jokes reddit posts follow similar format: Short question or first line in the title, and punch line in the body of the post.
# Added goodies are: score to seperate good jokes from the bad ones, and time of the post.
# This leaves room plenty for exploration:
# -- what kind of jokes are made the most often when?
# -- are people posting more jokes during which month?
#
# etc. etc.
# ## set up PRAW
# praw is the go to API for reddit scraping, so this is not an exception.
# +
import string
import praw
redditBot = praw.Reddit(user_agent='XXX',
client_id='XXX',
client_secret='XXX',
username='XXX',
password='<PASSWORD>')
# -
# View the first few submissions to see what we're dealing with:
jokesSub = redditBot.subreddit('Jokes')
from datetime import datetime
for submission in jokesSub.hot(limit=3):
print(submission.title, submission.selftext)
print("time:", datetime.utcfromtimestamp(submission.created_utc).strftime('%Y-%m-%d, %H::%M'))
print("author: ",submission.author.name)
print("score: ", submission.score,)
print("id:", submission.id)
#print("ups & ratio:", submission.ups,',', submission.upvote_ratio)
print("----------------")
# From this, a few things are apparent:
# -- reddit no longer displays downvote counts, only upvote counts and upvote ratio
# -- not all posts are jokes, some are mod posts
# -- upvote ratio is very slow to get, for some reason.
# ## Getting the jokes, get it? get it?
# Previously, I have tried to mine reddit from a large range of dates, or stream. One thing usually happens: notebook crashes, all data or lost, or both.
# So I am downloading jokes piece wise, month per month, from 2010.
#
# Second thing is that I'm only getting jokes with score>5, to weed out the really bad ones.
# +
from datetime import datetime
years = range(2010, 2018)
months = range(1, 13)
timestamp_list = []
for y in years:
for m in months:
timestamp_list.append(datetime(y, m, 1).timestamp())
timestamp_list.append(datetime(2018, 1, 1).timestamp())
timestamp_list.append(datetime(2018, 2, 1).timestamp())
for d in timestamp_list:
value = datetime.fromtimestamp(d)
#print(value.strftime('%Y-%m-%d'))
# -
# Finally, getting file --- I do have to say, this scraping took 3 hours, without getting the upvote_ratio.
# I'm not sure how long a complete data set will take.
# The resulting CSV file is about 50mb.
start_timestamp = timestamp_list[0]
jokelist = []
counter = 0
import csv
with open('all_jokes_plus.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, quoting=csv.QUOTE_ALL, quotechar="|", delimiter=",")
for i in range(1, len(timestamp_list)):
end_timestamp = timestamp_list[i]
s = datetime.fromtimestamp(start_timestamp)
e = datetime.fromtimestamp(end_timestamp)
print("getting top jokes from ", s.strftime('%Y-%m-%d'), " to ", e.strftime('%Y-%m-%d'))
print(start_timestamp, end_timestamp)
currJokes = jokesSub.submissions(start_timestamp, end_timestamp)
for submission in currJokes:
#print(submission.title, submission.author)
if (submission.author in jokes_mods):
continue
if (submission.score <5):
continue
q = submission.title
a = submission.selftext
if (len(q) == 0 or len(a) == 0):
continue
#jokelist = jokelist + [[submission.id,submission.created_utc, q, a]]
spamwriter.writerow([submission.id, submission.score,q, a,
submission.created_utc,submission.author.name, submission.ups, submission.upvote_ratio])
counter = counter + 1
if (counter %500 ==0):
print("\t processed ", counter, " jokes")
start_timestamp = end_timestamp
#for j in jokelist:
# spamwriter.writerow(j)
#jokelist=[]
print("processed ", counter, " jokes so far")
# ... sample output:....
# + active=""
#
# getting top jokes from 2010-01-01 to 2010-02-01
# 1262322000.0 1265000400.0
# processed 4 jokes so far
# getting top jokes from 2010-02-01 to 2010-03-01
# 1265000400.0 1267419600.0
# processed 9 jokes so far
# getting top jokes from 2010-03-01 to 2010-04-01
# 1267419600.0 1270094400.0
# processed 10 jokes so far
# getting top jokes from 2010-04-01 to 2010-05-01
# 1270094400.0 1272686400.0
# ... ...
# which goes on for hours.
| jupyter/ScrapeJokes_blog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# open wordnet_to_labels_txt file
with open('imagenet1000_clsidx_to_labels.txt','r') as f:
wordnet_to_labels_dict = eval(f.read())
# -
wordnet_to_labels_dict[370]
# +
# open csv folder
import csv
CSV_FOLDER = "../result"
# real_label
with open(f"{CSV_FOLDER}/real_label.csv") as f:
real_label_dict = {}
reader = csv.reader(f)
for row in reader:
key = row[0]
value = row[1:]
# if value[0] == "9999":
# continue
real_label_dict[key] = value
# gmlp
with open(f"{CSV_FOLDER}/gmlp.csv") as f:
gmlp_predict_label_dict = {}
reader = csv.reader(f)
for row in reader:
key = row[0]
value = row[1:]
gmlp_predict_label_dict[key] = value
# efficent
with open(f"{CSV_FOLDER}/efficientnet_b3.csv") as f:
efficientnet_predict_label_dict = {}
reader = csv.reader(f)
for row in reader:
key = row[0]
value = row[1:]
efficientnet_predict_label_dict[key] = value
# -
print(real_label_dict["ILSVRC2012_val_00000002.JPEG"])
print(gmlp_predict_label_dict["ILSVRC2012_val_00000001.JPEG"])
print(efficientnet_predict_label_dict["ILSVRC2012_val_00000001.JPEG"])
i = 1
validation_file_sum = 50001
# 英語のラベルを日本語に変換する
def translation_en_to_ja(text):
from googletrans import Translator
tr = Translator()
return tr.translate(text=text, src="en", dest="ja").text
# +
# 画像を表示
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
if i < validation_file_sum:
image_file_name = f"ILSVRC2012_val_{str(i).zfill(8)}.JPEG"
real_label = real_label_dict[image_file_name]
print(real_label)
plt.imshow(mpimg.imread(f"../imagenet/validation/{image_file_name}"),cmap="gray")
if real_label[0] != "9999":
plt.title(f"real:{wordnet_to_labels_dict[int(real_label[0])]}", loc='left', y=1)
i += 1
# -
# gmlpがtop-1で正しく予測したもののリストを取得する
# ただし,real_labelはラベル数がひとつのもののみ扱う
gmlp_predict_correct_list = []
gmlp_predict_incorrect_list = []
for i in range(1,50001):
image_file_name = f"ILSVRC2012_val_{str(i).zfill(8)}.JPEG"
if len(real_label_dict[image_file_name]) > 1:
continue
if int(real_label_dict[image_file_name][0]) == 9999:
continue
gmlp_predict_top1 = int(gmlp_predict_label_dict[image_file_name][0])
real_label_only_one = int(real_label_dict[image_file_name][0])
if gmlp_predict_top1 == real_label_only_one:
gmlp_predict_correct_list.append(i)
else:
gmlp_predict_incorrect_list.append(i)
len(gmlp_predict_incorrect_list)
# 同様にefficentnetがtop-1で正しく予測したもののリストを取得する
efficientnet_predict_correct_list = []
efficientnet_predict_incorrect_list = []
for i in range(1,50001):
image_file_name = f"ILSVRC2012_val_{str(i).zfill(8)}.JPEG"
if len(real_label_dict[image_file_name]) > 1:
continue
if int(real_label_dict[image_file_name][0]) == 9999:
continue
efficientnet_predict_top1 = int(efficientnet_predict_label_dict[image_file_name][0])
real_label_only_one = int(real_label_dict[image_file_name][0])
if efficientnet_predict_top1 == real_label_only_one:
efficientnet_predict_correct_list.append(i)
else:
efficientnet_predict_incorrect_list.append(i)
len(efficientnet_predict_incorrect_list)
# gmlpが正解でefficientnetが不正解のリストを取得する
gmlp_predict_correct_set = set(gmlp_predict_correct_list)
efficient_predict_incorrect_set = set(efficientnet_predict_incorrect_list)
gmlp_correct_but_efficient_incorrect_list = list(gmlp_predict_correct_set & efficient_predict_incorrect_set)
gmlp_correct_but_efficient_incorrect_list.sort()
print(len(gmlp_correct_but_efficient_incorrect_list))
print(type(gmlp_correct_but_efficient_incorrect_list))
print(gmlp_correct_but_efficient_incorrect_list)
# gmlpが不正解でefficientnetが正解のリストを取得する
gmlp_predict_incorrect_set = set(gmlp_predict_incorrect_list)
efficient_predict_correct_set = set(efficientnet_predict_correct_list)
gmlp_incorrect_but_efficient_correct_list = list(gmlp_predict_incorrect_set & efficient_predict_correct_set)
gmlp_incorrect_but_efficient_correct_list.sort()
print(len(gmlp_incorrect_but_efficient_correct_list))
print(type(gmlp_incorrect_but_efficient_correct_list))
print(gmlp_incorrect_but_efficient_correct_list)
translation_en_to_ja("baboon")
j = 50
gmlp_correct_but_efficient_incorrect_num = len(gmlp_correct_but_efficient_incorrect_list)
# +
# gmlpが正解でefficientnetが不正解のリストから画像を確認する
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
if j < gmlp_correct_but_efficient_incorrect_num:
image_num = gmlp_correct_but_efficient_incorrect_list[j]
print("---------------------------------------------------------------")
image_file_name = f"ILSVRC2012_val_{str(image_num).zfill(8)}.JPEG"
real_label = real_label_dict[image_file_name]
print(f"{image_file_name}")
print(f"real label:{real_label}")
gmlp_predict_label_name = wordnet_to_labels_dict[int(gmlp_predict_label_dict[image_file_name][0])]
gmlp_predict_label_ja = translation_en_to_ja(gmlp_predict_label_name)
print(f"gmlp(correct):{gmlp_predict_label_name}({gmlp_predict_label_ja})")
print(f"gmlp_top5_label:{gmlp_predict_label_dict[image_file_name]}")
efficientnet_predict_label_name = wordnet_to_labels_dict[int(efficientnet_predict_label_dict[image_file_name][0])]
efficientnet_predict_label_ja = translation_en_to_ja(efficientnet_predict_label_name)
print(f"efficientnet(incorrect):{efficientnet_predict_label_name}({efficientnet_predict_label_ja})")
print(f"efficientent_top5_label:{efficientnet_predict_label_dict[image_file_name]}")
plt.imshow(mpimg.imread(f"../imagenet/validation/{image_file_name}"),cmap="gray")
plt.title(f"label{real_label[0]}:{wordnet_to_labels_dict[int(real_label[0])]}", loc='left', y=1)
j += 1
# -
list = ['596', '677', '113', '301', '502']
for l in list:
num = int(l)
label_name = wordnet_to_labels_dict[num]
print(l)
print(label_name)
print(translation_en_to_ja(label_name))
print("\n")
k = 0
gmlp_incorrect_but_efficient_correct_num = len(gmlp_incorrect_but_efficient_correct_list)
# +
# gmlpが不正解でefficientnetが正解のリストから画像を確認する
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
if k < gmlp_incorrect_but_efficient_correct_num:
image_num = gmlp_incorrect_but_efficient_correct_list[k]
image_file_name = f"ILSVRC2012_val_{str(image_num).zfill(8)}.JPEG"
real_label = real_label_dict[image_file_name]
print("---------------------------------------------------------------")
print(f"{image_file_name}")
print(f"real label:{real_label}")
gmlp_predict_label_name = wordnet_to_labels_dict[int(gmlp_predict_label_dict[image_file_name][0])]
gmlp_predict_label_ja = translation_en_to_ja(gmlp_predict_label_name)
print(f"gmlp(incorrect):{gmlp_predict_label_name}({gmlp_predict_label_ja})")
print(f"gmlp_top5_label:{gmlp_predict_label_dict[image_file_name]}")
efficientnet_predict_label_name = wordnet_to_labels_dict[int(efficientnet_predict_label_dict[image_file_name][0])]
efficientnet_predict_label_ja = translation_en_to_ja(efficientnet_predict_label_name)
print(f"efficientnet(correct):{efficientnet_predict_label_name}({efficientnet_predict_label_ja})")
print(f"efficientent_top5_label:{efficientnet_predict_label_dict[image_file_name]}")
plt.imshow(mpimg.imread(f"../imagenet/validation/{image_file_name}"),cmap="gray")
plt.title(f"label{real_label[0]}:{wordnet_to_labels_dict[int(real_label[0])]}", loc='left', y=1)
k += 1
# -
list = ['456', '558', '902', '796', '764']
for l in list:
num = int(l)
label_name = wordnet_to_labels_dict[num]
print(l)
print(label_name)
print(translation_en_to_ja(label_name))
print("\n")
| notebooks/error_analysis_top1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import argparse
import musdb
import museval
import test
import multiprocessing
import functools
from pathlib import Path
import torch
import tqdm
import numpy as np
# %matplotlib inline
# +
model = 'umxhq'
model_name = 'umxhq'
#model = '../out_unmix/model_new_data_aug'
#model_name = 'model_new_data_aug'
# model = '../out_unmix/model_new_data'
# model_name = 'model_new_data'
# model = '../out_unmix/model8'
# model_name = 'model8'
targets = ['vocals']
outdir = '../test_out/BSS_eval_plots/'
evaldir = '../out_dir_evals/Exp1_umxhq'
root = '../test_out/Exp_1/exp1_tracks/'
subset = 'train'
cores = 1
no_cuda = False
is_wav = True
# -
use_cuda = not no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
mus = musdb.DB(
root=root,
download=root is None,
subsets=subset,
is_wav=is_wav
)
mus.tracks
track = mus.tracks[1]
'''0 - Yaman+comp50 == test data
1 - musdb-eg
2 - Sakhi_mori == train data
3 - yaman+comp__ == valid data
4 - osf_malhar
'''
track.audio.shape
estimates = test.separate(
audio=track.audio,
targets=targets,
model_name=model,
niter=2,
alpha=1,
softmask=False,
#device=device
)
audio_estimates = []
audio_reference = []
eval_targets = []
for key, target in list(track.targets.items()):
try:
# try to fetch the audio from the user_results of a given key
estimates[key]
except KeyError:
# ignore wrong key and continue
continue
eval_targets.append(key)
mode='v4'
win=1.0
hop=1.0
data = museval.aggregate.TrackStore(win=win, hop=hop, track_name=track.name)
# check if vocals and accompaniment is among the targets
has_acc = all(x in eval_targets for x in ['vocals', 'accompaniment'])
if has_acc:
# remove accompaniment from list of targets, because
# the voc/acc scenario will be evaluated separately
eval_targets.remove('accompaniment')
audio_estimates.append(estimates['vocals'])
audio_reference.append(track.targets['vocals'].audio)
def pad_or_truncate(
audio_reference,
audio_estimates
):
"""Pad or truncate estimates by duration of references:
- If reference > estimates: add zeros at the and of the estimated signal
- If estimates > references: truncate estimates to duration of references
Parameters
----------
references : np.ndarray, shape=(nsrc, nsampl, nchan)
array containing true reference sources
estimates : np.ndarray, shape=(nsrc, nsampl, nchan)
array containing estimated sources
Returns
-------
references : np.ndarray, shape=(nsrc, nsampl, nchan)
array containing true reference sources
estimates : np.ndarray, shape=(nsrc, nsampl, nchan)
array containing estimated sources
"""
est_shape = audio_estimates.shape
ref_shape = audio_reference.shape
if est_shape[1] != ref_shape[1]:
if est_shape[1] >= ref_shape[1]:
audio_estimates = audio_estimates[:, :ref_shape[1], :]
else:
# pad end with zeros
audio_estimates = np.pad(
audio_estimates,
[
(0, 0),
(0, ref_shape[1] - est_shape[1]),
(0, 0)
],
mode='constant'
)
return audio_reference, audio_estimates
def evaluate(
references,
estimates,
win=1*44100,
hop=1*44100,
mode='v4',
padding=True
):
"""BSS_EVAL images evaluation using metrics module
Parameters
----------
references : np.ndarray, shape=(nsrc, nsampl, nchan)
array containing true reference sources
estimates : np.ndarray, shape=(nsrc, nsampl, nchan)
array containing estimated sources
window : int, defaults to 44100
window size in samples
hop : int
hop size in samples, defaults to 44100 (no overlap)
mode : str
BSSEval version, default to `v4`
Returns
-------
SDR : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
ISR : np.ndarray, shape=(nsrc,)
vector of Source to Spatial Distortion Image (ISR)
SIR : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
SAR : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
"""
estimates = np.array(estimates)
references = np.array(references)
if padding:
references, estimates = pad_or_truncate(references, estimates)
SDR, ISR, SIR, SAR, _ = museval.metrics.bss_eval(
references,
estimates,
compute_permutation=False,
window=win,
hop=hop,
framewise_filters=(mode == "v3"),
bsseval_sources_version=False
)
return SDR, ISR, SIR, SAR
SDR, ISR, SIR, SAR = evaluate(
audio_reference,
audio_estimates,
win=int(win*track.rate),
hop=int(hop*track.rate),
mode=mode
)
from matplotlib import pyplot as plt
# +
plt.figure(figsize=(16,12))
plt.subplot(2, 2, 2)
plt.title("SDR Box-plot")
plt.ylabel("SDR")
plt.legend()
#plt.subplot(1,2,1)
plt.boxplot(SDR[0] , showfliers=False)
#plt.subplot(1,2,2)
#plt.boxplot(ISR[0])
#plt.savefig(outdir + "SDR_plot_" + track.name + "_" + model_name + ".pdf" )
# -
plt.figure(figsize=(16,12))
plt.subplot(2, 2, 2)
plt.title("ISR Box-plot")
plt.ylabel("ISR")
plt.boxplot(ISR[0])
#plt.savefig(outdir + "ISR_plot_" + track.name + "_" + model_name + ".pdf" )
plt.figure(figsize=(16,12))
plt.subplot(2, 2, 2)
plt.title("SAR Box-plot")
plt.ylabel("SAR")
plt.boxplot(SAR[0])
#plt.savefig(outdir + "SAR_plot_" + track.name + "_" + model_name + ".pdf" )
type(track.name)
np.median(SDR[0])
| evaluate_final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/data-newbie/atom/blob/master/6_1_data_visualization_exercise.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="DImkhpU42aHq" colab_type="text"
# ## Import `pyplot` from `matplotlib` as `plt`.
# + id="xwsZkxhi2aHw" colab_type="code" colab={}
import matplotlib.pyplot as plt
# + [markdown] id="f6mpXZkL2aIE" colab_type="text"
# ## Add `matplotlib` magic for inline plots.
#
#
#
#
# + id="urkVvJAh2aIH" colab_type="code" colab={}
# %matplotlib inline
# + [markdown] id="2YxANGGn2aIN" colab_type="text"
# ## Create an empty plot using `.plot()` method from the `matplotlib.pyplot module`.
# + id="yKhsK1a92aIP" colab_type="code" outputId="ed6264b3-ac3e-4d22-b648-0ed29645a1e1" colab={"base_uri": "https://localhost:8080/", "height": 282}
plt.plot()
# + [markdown] id="6aCcA1Xb2aIS" colab_type="text"
# ## Create a simple line with the points [1,1] and [2,2] using `plt.plot()`.
# + id="nzUGzDiD2aIT" colab_type="code" outputId="1b4766ca-63b6-4692-a9f5-188be2833bd5" colab={"base_uri": "https://localhost:8080/", "height": 282}
plt.plot([1,2],[1,2])
# + [markdown] id="1nvs-FVE2aIW" colab_type="text"
# ## Now make it a scatterplot with the same points, [1,1] and [2,2], but use `plt.scatter()` instead.
# + id="P9ErO9ub2aIX" colab_type="code" colab={}
# + [markdown] id="Baljlzit2aIZ" colab_type="text"
# ## Add a `ylabel` to it called `"some numbers"`.
# + id="fFqkbh4l2aIa" colab_type="code" colab={}
# + [markdown] id="vML3YKde2aId" colab_type="text"
# ## Import `seaborn` as `sns`
#
# If this fails, open up a terminal, type `conda install seaborn`, and restart the Jupyter Notebook server.
# + id="nptAz_uO2aIe" colab_type="code" colab={}
# + [markdown] id="L5iIobty2aIh" colab_type="text"
# ## Set `seaborn` to "on" to style your plots
# + [markdown] id="PzFofM0Y2aIj" colab_type="text"
# Use the command `sns.set()`.
# + id="4ZvKjhng2aIk" colab_type="code" colab={}
# + [markdown] id="BEEnGXDP2aIn" colab_type="text"
# ## Create an empty plot using `plt.plot()`.
# + id="bd1FyAtf2aIo" colab_type="code" colab={}
# + [markdown] id="Gc6WVADp2aIs" colab_type="text"
# ## Create a simple line with the points [1,1] and [2,2] using `plt.plot()`.
# + id="onC-lWa42aIt" colab_type="code" colab={}
# + [markdown] id="6mTTVIxf2aIw" colab_type="text"
# ## Now make it a scatterplot with the same points, [1,1] and [2,2], but use `plt.scatter()` instead.
# Add a `ylabel` to it called `"some numbers"`.
# + id="rBj36fiN2aIw" colab_type="code" colab={}
| 6_1_data_visualization_exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Compare trials by sessions
# +
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import matplotlib.pyplot as plt
import qgrid
#from nma_class import loader
from nma_class_Copy1 import loader
# %matplotlib inline
# -
# ### To compare the trials
# Output:
#
# Table with
#
# session name; all trials; correct trials; diff contrast; no contrast; %Correct; %Contrast; % 0contrast
# +
# for the first session:
# link to parent folder with all datasets
main_folder = '/Volumes/GoogleDrive/My Drive/NMA_Dataset/Dataset_1'
from nma_class_Copy1 import loader
#to load older sessions
loader = loader(main_folder)
all_session_folders, all_session_names = loader.get_available_session()
# load default session
# put here any integer instead of 0 or directly the folder of the session
session_folder = all_session_folders[0]
session_name = all_session_names[0]
session = loader.load_session(session_folder, fast=True)
spikes_df = session['spikes_df']
clusters_df = session['clusters_df']
trials_df = session['trials_df']
#separate data for the correct trials
trials_select_df = trials_df.loc[trials_df['included']==True]
correct_df = trials_select_df.loc[(trials_select_df['feedback type']==1)]
diff_contrasts_df = correct_df.loc[(correct_df['stim contrast left']!=(correct_df['stim contrast right']))]
no_contrasts_df = correct_df.loc[(correct_df['stim contrast left']==0) & (correct_df['stim contrast right']==0)]
all_t = len(trials_select_df)
crrct = len(correct_df)
cont = len(diff_contrasts_df)
nocont = len(no_contrasts_df)
df1 = ''
df1 = {'session name': [session_name],
'all trials': [all_t],
'correct trials': [crrct],
'diff contrast t': [cont],
'no contras': [nocont],
'%Correct': [round(crrct/all_t*100,2)],
'%Contrast':[round(cont/crrct*100,2)],
'%0contrast':[round(nocont/crrct*100,2)]}
df2 = pd.DataFrame(data=df1)
df3 = df2
# -
df3
# +
# for all the other sessions
# link to parent folder with all datasets
main_folder = '/Volumes/GoogleDrive/My Drive/NMA_Dataset/Dataset_1'
from nma_class_Copy1 import loader
#to load older sessions
loader = loader(main_folder)
all_session_folders, all_session_names = loader.get_available_session()
# load default session
# put here any integer instead of 0 or directly the folder of the session
for i in range(1,len(all_session_folders)):
session_folder = all_session_folders[i]
session_name = all_session_names[i]
session = loader.load_session(session_folder, fast=True)
spikes_df = session['spikes_df']
clusters_df = session['clusters_df']
trials_df = session['trials_df']
trials_select_df = trials_df.loc[trials_df['included']==True]
correct_df = trials_select_df.loc[(trials_select_df['feedback type']==1)]
diff_contrasts_df = correct_df.loc[(correct_df['stim contrast left']!=(correct_df['stim contrast right']))]
no_contrasts_df = correct_df.loc[(correct_df['stim contrast left']==0) & (correct_df['stim contrast right']==0)]
all_t = len(trials_select_df)
crrct = len(correct_df)
cont = len(diff_contrasts_df)
nocont = len(no_contrasts_df)
df5 = ''
df5 = {'session name': [session_name],
'all trials': [all_t],
'correct trials': [crrct],
'diff contrast t': [cont],
'no contras': [nocont],
'%Correct': [round(crrct/all_t*100,2)],
'%Contrast':[round(cont/crrct*100,2)],
'%0contrast':[round(nocont/crrct*100,2)]}
df6 = pd.DataFrame(data=df5)
df3 = df3.append(df6)
# -
df3
# +
# adjust the df index
df3.set_index("session name", inplace = True)
#add new column to existing df
# creating a new column session_number
df3['session_number'] = ''
df3['session_number'][0] = 0
for i in range(1,len(df3)):
df3['session_number'][i] = (df3['session_number'][i-1]) +1
df3
# -
#to save
df3.to_csv (r'/Users/kcenia/Documents/GitHub/NMA_project/kce/trials_comparison_df.csv', index = False, header=True)
# ## To plot the trials
# +
df4 = df3['%Correct']
#df4 = pd.concat([df4, df3['all trials']], axis=1, sort=False)
df4 = pd.concat([df4, df3['%Contrast']], axis=1, sort=False)
df4 = pd.concat([df4, df3['%0contrast']], axis=1, sort=False)
figure = df4.plot.bar(figsize=(20,10));
# +
#save the figure
#figure.figure.savefig('trials_comparison_included.png')
# -
| kce/A. Compare trials by session.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Python: Handling missing values
# **Goal**: Clean and organise your data!
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Introduction-to-dataset" data-toc-modified-id="Introduction-to-dataset-1"><span class="toc-item-num">1 </span>Introduction to dataset</a></span></li><li><span><a href="#Find-missing-values" data-toc-modified-id="Find-missing-values-2"><span class="toc-item-num">2 </span>Find missing values</a></span></li><li><span><a href="#Problems-with-missing-values" data-toc-modified-id="Problems-with-missing-values-3"><span class="toc-item-num">3 </span>Problems with missing values</a></span><ul class="toc-item"><li><span><a href="#Training" data-toc-modified-id="Training-3.1"><span class="toc-item-num">3.1 </span>Training</a></span></li></ul></li><li><span><a href="#Introduction-to-pivot-table" data-toc-modified-id="Introduction-to-pivot-table-4"><span class="toc-item-num">4 </span>Introduction to pivot table</a></span><ul class="toc-item"><li><span><a href="#Training" data-toc-modified-id="Training-4.1"><span class="toc-item-num">4.1 </span>Training</a></span></li></ul></li><li><span><a href="#Remove-missing-values" data-toc-modified-id="Remove-missing-values-5"><span class="toc-item-num">5 </span>Remove missing values</a></span></li><li><span><a href="#Iloc-to-access-rows" data-toc-modified-id="Iloc-to-access-rows-6"><span class="toc-item-num">6 </span>Iloc to access rows</a></span></li><li><span><a href="#Column-indexes" data-toc-modified-id="Column-indexes-7"><span class="toc-item-num">7 </span>Column indexes</a></span></li><li><span><a href="#Re-index-rows-in-a-dataframe" data-toc-modified-id="Re-index-rows-in-a-dataframe-8"><span class="toc-item-num">8 </span>Re-index rows in a dataframe</a></span></li><li><span><a href="#Apply-functions-to-a-dataframe" data-toc-modified-id="Apply-functions-to-a-dataframe-9"><span class="toc-item-num">9 </span>Apply functions to a dataframe</a></span><ul class="toc-item"><li><span><a href="#Training" data-toc-modified-id="Training-9.1"><span class="toc-item-num">9.1 </span>Training</a></span></li></ul></li><li><span><a href="#Practice:-compute-the-percentage-of-survival-by-classe-group" data-toc-modified-id="Practice:-compute-the-percentage-of-survival-by-classe-group-10"><span class="toc-item-num">10 </span>Practice: compute the percentage of survival by classe group</a></span></li></ul></div>
# -
# ## Introduction to dataset
# In this chapter, we will clean and analyze the data of a dataset containing the ``survivors of titanic``.
import pandas as pd
titanic_survival = pd.read_csv("titanic_survival.csv")
titanic_survival.head()
# We will proceed to a brief presentation of some columns. The column ``pclass`` is the class of the passenger cabin which goes from 1 to 3 knowing that 1 is the highest class. The column ``survived`` represents the survival of a passenger. It takes the value 1 if the passenger survived and 0 otherwise. The ``fare`` column represents the amount paid by the passenger for the boarding ticket. The column ``embarked`` represents the place of boarding of the passenger, it takes 3 values: C, Q and S. In this dataset, it should be noted that many columns like ``age`` and ``sex`` have missing values. These ``missing values`` can cause numerical errors in our calculations. So we have to handle with them before starting the analyses. So it is important to learn how to handle with missing values and this is what we will do throughout this chapter.
# ## Find missing values
# In this section, we will proceed to the ``discovery of missing values``. As a reminder, there are several types of missing values. There are values of type ``None`` which indicates no value, there is also the value ``Nan`` which means ``not a number`` which indicates a missing value. In general, we can consider None and Nan as ``null values``. In pandas, there is a function that allows to see which values are null or none, it is the ``isnull()`` function.
# example with sex column
sex = titanic_survival["sex"]
sex_is_null = pd.isnull(sex)
sex_is_null
sex_null = sex[sex_is_null]
sex_null
# example with age column
age_null = titanic_survival["age"][pd.isnull(titanic_survival.age)]
age_null
# We can clearly see that our age column has 264 missing values that we can recheck using the ``len()`` function.
count_age_null = len(age_null)
count_age_null
# ## Problems with missing values
# We have seen previously that there are columns with missing values. In this section, we will show what ``problems`` these missing values ``cause``.
# example of problem
mean_age = sum(titanic_survival["age"]) / len(titanic_survival["age"])
mean_age
# This example above simply illustrates that a computation containing a missing value will return a missing value. So it is necessary to ``filter`` out the missing values before proceeding with the computations.
# filter the nan values
age_not_null = titanic_survival["age"][pd.isnull(
titanic_survival["age"]) == False]
age_not_null
# compute the mean of age column without nan values
mean_age = sum(age_not_null) / len(age_not_null)
mean_age
# For ``information``, there is a method of pandas to compute more simply an average directly on a column. This function which the ``mean()`` method ignores the missing values and computes directly the average of a numerical series.
# example with age column
mean_age = titanic_survival["age"].mean()
mean_age
# ### Training
# In this practice, we will try to answer the following questions:
#
# * create an empty dictionary that we will name fares_by_class
# * create the list passenger_classes which contains the elements [1,2,3]
# * use a for loop to browse the passenger_classes list:
# * select just the rows of titanic_survival for which the column pclass is equal to the temporary variable (the iterator) of the for loop, i.e. corresponding to the class number (1, 2 or 3)
# * select only the fare column for this subset of rows (corresponding to the class)
# * use the series.mean() method to calculate the average of this subset
# * add this calculated average of the class to the fares_by_class dictionary with the class number as key (and thus the average fare as value)
# * once the loop is completed, the dictionary fares_by_class should have 1,2 and 3 as keys with the corresponding average values
# * display the result
# +
fares_by_class = {}
passenger_classes = [1, 2, 3]
for this_class in passenger_classes:
pclass_rows = titanic_survival[titanic_survival["pclass"] == this_class]
pclass_fares = pclass_rows["fare"]
fare_for_class = pclass_fares.mean()
fares_by_class[this_class] = fare_for_class
# -
fares_by_class
# ## Introduction to pivot table
# In this section we will look at pivot tables which are a way of creating a subset from a column and performing computations, for example calculating an average, etc. The idea of pivot tables is to group and then apply a function. The ``pivot_table()`` method of pandas allows to perform these operations. This following example shows how to perform our previous task just by using the ``pivot_table()`` method.
import numpy as np
fares_by_pclass = titanic_survival.pivot_table(index="pclass",
values="fare",
aggfunc=np.mean)
fares_by_pclass
# The first parameter ``index`` indicates the column you want to group. The second parameter ``values`` indicates the column on which we want to apply a function (sum, average, etc). And the last parameter ``aggfunc`` indicates the function we want to apply on the ``values`` parameter. With this same method ``pivot_table()``, we can also perform calculations on several columns.
# ### Training
# In this practice, we will try to answer the following questions:
#
# * make a table pivot that calculates the total money collected ("fare") and the total number of survivors ("survived") for each embarked port ("embarked") using the numpy.sum function
# * assign the result to the variable port_stats
# * display the result
port_stats = titanic_survival.pivot_table(index="embarked",
values=["fare", "survived"],
aggfunc=np.sum)
port_stats
# ## Remove missing values
# To ``remove`` missing values directly from a dataframe, we use the ``dropna()`` method. This method allows to ``delete`` all rows or columns with at least one missing value.
drop_na_rows = titanic_survival.dropna(axis=0)
drop_na_rows
drop_na_columns = titanic_survival.dropna(axis=1)
drop_na_columns
# These examples above show that all rows or columns in our dataset have at least one missing value. It is also possible to remove missing values by specifying a ``subset of variables`` using the ``subset`` parameter.
titanic_survival.shape
name_drop_na_rows = titanic_survival.dropna(axis=0, subset=["name"])
name_drop_na_rows.shape
# This last example says that there is a row that has at least one missing value for the ``name`` column.
# ## Iloc to access rows
# In this section we will see the ``iloc`` method for accessing rows. More advanced than the ``loc`` method, the ``iloc`` method allows to display the elements according to the position where it is located. This method also allows to do other tasks much more advanced than the ``loc`` method.
unordered_titanic_survival = titanic_survival.sort_values(
by="age", ascending=False)
unordered_titanic_survival.head()
unordered_titanic_survival.loc[0]
unordered_titanic_survival.iloc[0]
unordered_titanic_survival.loc[0:5]
unordered_titanic_survival.iloc[0:5]
# The above example clearly shows that the ``iloc`` method does not take into account the order of the elements on the dataframe but only focuses on the positions of the elements in the dataframe. So when you are slicing a dataframe, you should use the ``iloc`` method because slicing using the ``loc`` method assumes that the index of our dataframe is sorted in ascending order. And on the other hand, if you want to access elements, you should use the ``loc`` method if you are accessing by index number, otherwise you should use the ``iloc`` method if you are accessing by the position of the element in the dataframe.
# ## Column indexes
# As we have seen in the previous chapters, the indexing of columns is done with the ``loc`` and ``iloc`` methods. With the ``loc`` method, we use the name of the column, and for the ``iloc`` method, we use an integer that corresponds to the positions of the column in the dataframe.
# example
unordered_titanic_survival.head()
unordered_titanic_survival.iloc[0, 2]
unordered_titanic_survival.loc[14, "name"]
# ## Re-index rows in a dataframe
# In this section, we will see how to re-index the rows of a dataframe. We have seen that sorting changes the order of a dataframe but keeps the indexes of each row. However, it can sometimes be useful to re-index a dataframe by starting the indexes at 0. To do this, we use the ``reset_index()`` method of pandas.
# example
unordered_titanic_survival.reset_index().head()
# In the example above, we know that our dataframe re-indexed has kept the old index, to remove it, we can use the ``drop`` parameter which will take the value ``True``.
unordered_titanic_survival.reset_index(drop=True).head()
# ## Apply functions to a dataframe
# In this section, we will see how to apply functions on a dataframe. To do so, we will learn the ``apply()`` method. By default, this method applies a function on each column or row of the dataframe.
# example 1
fare = titanic_survival["fare"]
fare
squared_fare = fare.apply(lambda x: x**2)
squared_fare
# By changing the ``axis (0 by default)`` parameter of the ``apply()`` method, we can also apply a function on the rows of the datadrame by setting the parameter to ``1``.
# example 2
def is_minor(row):
if row["age"] < 18:
return True
else:
return False
minors = titanic_survival.apply(is_minor, axis=1)
minors
# example 3
def which_class(row):
pclass = row["pclass"]
if pd.isnull(pclass):
return "Unknown"
elif pclass == 1:
return "First Class"
elif pclass == 2:
return "Second Class"
else:
return "Third Class"
classes = titanic_survival.apply(which_class, axis=1)
classes
# ### Training
# In this practice, we will try to answer the following questions:
#
# * write a function that counts the number of missing values of a series object
# * use the DataFrame.apply() method to apply your function to titanic_survival
# * assign the result to the column_null_value_count variable
# * display the result
def null_value_count(column):
null = column[pd.isnull(column)]
null_value = len(null)
return null_value
column_null_value_count = titanic_survival.apply(null_value_count)
column_null_value_count
# All of the above code could be simplified to a single line of code by using the methods already predefined on pandas namely ``isnull()`` and ``sum()`` which determine respectively the rows with null values and the sum of these null values as shown in the following code.
titanic_survival.isnull().sum()
# ## Practice: compute the percentage of survival by classe group
# In this practical case, we will apply a pivot table to compute the percentage of survival by classe group. So, we will try to answer the following questions:
#
# * add the column "classe_labels" to the dataframe titanic_survival containing the variable classes that we created in the previous example 3
# * create a pivot table that calculates the average chance of survival (column "survived") for each class (column "classes") of the dataframe titanic_survival
# * assign the resulting series object to the classe_group_survival variable
# * display the result
titanic_survival["classe_labels"] = classes
classe_group_survival = titanic_survival.pivot_table(
index="classe_labels", values="survived")
classe_group_survival
| courses/5. Handling missing values in Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/danieljaouen/DS-Unit-2-Regression-Classification/blob/master/module2/assignment_regression_classification_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="7IXUfiQ2UKj6" colab_type="text"
# Lambda School Data Science, Unit 2: Predictive Modeling
#
# # Regression & Classification, Module 2
#
# ## Assignment
#
# You'll continue to **predict how much it costs to rent an apartment in NYC,** using the dataset from renthop.com.
#
# - [ ] Do train/test split. Use data from April & May 2016 to train. Use data from June 2016 to test.
# - [ ] Engineer at least two new features. (See below for explanation & ideas.)
# - [ ] Fit a linear regression model with at least two features.
# - [ ] Get the model's coefficients and intercept.
# - [ ] Get regression metrics RMSE, MAE, and $R^2$, for both the train and test data.
# - [ ] What's the best test MAE you can get? Share your score and features used with your cohort on Slack!
# - [ ] As always, commit your notebook to your fork of the GitHub repo.
#
#
# #### [Feature Engineering](https://en.wikipedia.org/wiki/Feature_engineering)
#
# > "Some machine learning projects succeed and some fail. What makes the difference? Easily the most important factor is the features used." — <NAME>, ["A Few Useful Things to Know about Machine Learning"](https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf)
#
# > "Coming up with features is difficult, time-consuming, requires expert knowledge. 'Applied machine learning' is basically feature engineering." — <NAME>, [Machine Learning and AI via Brain simulations](https://forum.stanford.edu/events/2011/2011slides/plenary/2011plenaryNg.pdf)
#
# > Feature engineering is the process of using domain knowledge of the data to create features that make machine learning algorithms work.
#
# #### Feature Ideas
# - Does the apartment have a description?
# - How long is the description?
# - How many total perks does each apartment have?
# - Are cats _or_ dogs allowed?
# - Are cats _and_ dogs allowed?
# - Total number of rooms (beds + baths)
# - Ratio of beds to baths
# - What's the neighborhood, based on address or latitude & longitude?
#
# ## Stretch Goals
# - [ ] If you want more math, skim [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 3.1, Simple Linear Regression, & Chapter 3.2, Multiple Linear Regression
# - [ ] If you want more introduction, watch [<NAME>, Statistics 101: Simple Linear Regression](https://www.youtube.com/watch?v=ZkjP5RJLQF4)
# (20 minutes, over 1 million views)
# - [ ] Do the [Plotly Dash](https://dash.plot.ly/) Tutorial, Parts 1 & 2.
# - [ ] Add your own stretch goal(s) !
# + id="o9eSnDYhUGD7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f25ee7d5-cd42-4c52-9078-f1be3e22bc34"
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
# !pip install --upgrade pandas-profiling plotly
# Pull files from Github repo
os.chdir('/content')
# !git init .
# !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git
# !git pull origin master
# Change into directory for module
os.chdir('module1')
# + id="ipBYS77PUwNR" colab_type="code" colab={}
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# + id="cvrw-T3bZOuW" colab_type="code" colab={}
import numpy as np
import pandas as pd
# Read New York City apartment rental listing data
df = pd.read_csv('../data/renthop-nyc.csv')
assert df.shape == (49352, 34)
# Remove the most extreme 1% prices,
# the most extreme .1% latitudes, &
# the most extreme .1% longitudes
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
# + id="7r2-ira6GrOc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 513} outputId="51ea7fb7-e68e-4235-b430-710bb3d22ab7"
df.head()
# + id="02FSrn09GtR0" colab_type="code" colab={}
train = df[(df['created'] >= '2016-04-01') & (df['created'] <= '2016-05-31')]
# + id="YETznim4HO-n" colab_type="code" colab={}
test = df[(df['created'] >= '2016-06-01') & (df['created'] <= '2016-06-30')]
# + id="fA9jnD9RHZ31" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 513} outputId="59bdb9a4-6fe6-415f-db6a-ba6f1eb19f2c"
train.head()
# + id="8I3UZ6U8Ha9l" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 513} outputId="b3ffad0f-4689-4554-d4d5-f33ddb736706"
test.head()
# + id="M4I1wmz1Hc4d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7088b00a-906f-4772-c544-0a0064d3f262"
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrix & y target vector
features = ['bathrooms', 'bedrooms']
target = 'price'
X_train = train[features]
y_train = train[target]
# 4. Fit the model
model.fit(X_train, y_train)
# + id="MoTHYUqcH36k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="30d201a9-0b9b-4ee3-ab96-af4f5d107e62"
model.intercept_
# + id="GkmYxjR-H9pz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c8dba110-6450-4a76-c414-6156dec5003b"
model.coef_
# + id="z2LXHXyCIlVH" colab_type="code" colab={}
y_train_pred = model.predict(X_train)
# + id="dcd8LJBvH-Xi" colab_type="code" colab={}
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
# + id="fjYaaDphIe6X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="754968d6-4ed6-4f11-c303-dc8bc2751090"
mean_absolute_error(y_train, y_train_pred)
# + id="XXwiC327Iuru" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="44a44afc-9db3-4559-a1eb-1901e820ced0"
import math
math.sqrt(mean_squared_error(y_train, y_train_pred))
# + id="BwgtWmPoJ5ae" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c131e1cd-89c4-4d58-e521-0fc619906f10"
r2_score(y_train, y_train_pred)
# + id="VnRld4GxIUcI" colab_type="code" colab={}
X_test = test[features]
y_test = test[target]
# + id="8PZsygtEKBrU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d4632d2d-6521-4bcc-b6c9-00654f7c0ae1"
model.fit(X_test, y_test)
# + id="d8twboAfKEOU" colab_type="code" colab={}
y_test_pred = model.predict(X_test)
# + id="QkHe7SNLKHgj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ed4e08b9-42d7-46ae-fbaf-ebc3eb80f2f8"
mean_absolute_error(y_test, y_test_pred)
# + id="6if_1VahKLhL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="82ecb747-0cc3-4c07-c815-196c063f4ba9"
math.sqrt(mean_squared_error(y_test, y_test_pred))
# + id="VGZg5yNMKQO7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="055362bc-4235-489a-ac21-628ce7a238e9"
r2_score(y_test, y_test_pred)
# + id="bg8mHJzRKVEj" colab_type="code" colab={}
| module2/assignment_regression_classification_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from dataset import transformers
from dataset.datasets import EscData
import numpy as np
import matplotlib.pyplot as plt
import librosa
import wave
from torchvision.transforms import Compose
from os import path
from pathlib import Path
import torch
# +
# data path
path_to_data = path.expanduser("~/data/esc/audio10")
path_to_meta = path.expanduser("~/data/esc/meta/esc10.csv")
sr = 22050
duration = 5
pad_to = 5
transform = Compose([transformers.AudioRead(sr=sr, duration=duration), transformers.PadAudio(sr=sr, pad_to=pad_to)])
original_esc_data = EscData(path_to_data, path_to_meta, folds=[1,2,3,4,5], transform=transform)
# + jupyter={"outputs_hidden": true} tags=[]
max_vals = []
min_vals = []
for i in range(len(original_esc_data)):
max_vals.append(np.max(original_esc_data[i][2]))
min_vals.append(np.min(original_esc_data[i][2]))
max_vals, min_vals
# -
# max: 1.3134922, min: -1.2824932
np.max(max_vals), np.min(min_vals)
| dev_rawaudio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # web scraping RIP.ie with beautifulsoup
# +
import time
import pandas as pd
from bs4 import BeautifulSoup
import requests
import datefinder
def get_dn_page(n):
"""Get death notice text from page matching the id number"""
url = 'https://rip.ie/showdn.php?dn=%s' %n
req = requests.get(url)
soup = BeautifulSoup(req.content, 'html.parser')
if soup.title == None:
title = ''
else:
title = soup.title.text.strip()
name=''
for s in ['Death Notice of ','The death has occurred of ']:
if title.startswith(s):
name = title.split(s)[1]
elem = soup.find_all("div", id="dn_photo_and_text")
if len(elem) == 0:
return name, '', '', '', ''
rows = elem[0].find_all('p')
if len(rows) == 0:
rows = elem[0].find_all('td')
text = ';'.join([r.text.strip() for r in rows]).replace('\n','')
#address
addrelem = soup.find("span", class_='small_addr')
if addrelem != None:
address = addrelem.text.strip()
else:
address = ''
#county
ctyelem = soup.find("li", class_='fd_county')
if ctyelem != None:
county = ctyelem.text.strip()
else:
county = ''
#date
dateelem = soup.find("div", class_='ddeath')
if dateelem == None:
dateelem = soup.find("div", class_='dpubl')
s = dateelem.text.strip()
try:
date = list(datefinder.find_dates(s))[0]
except:
date = ''
print (n, date, name, address, county)
return name, date, county, address, text
# -
get_dn_page(390045)
#df = pd.read_csv('rip_dn_scrape.csv')
df = pd.read_pickle('rip_dn_scrape.pkl')
len(df)
# ## iterate over a range of ids and get info
# +
#read current table in so we skip those already done
df = pd.read_pickle('rip_dn_scrape.pkl')
print (len(df))
ids = list(df.id)
results={}
for n in range(486000,488515):
if n in ids:
continue
name,date,cty,addr,txt = get_dn_page(n)
if name == '':
continue
results[n] = [name,date,cty,addr,txt]
time.sleep(0.05)
# -
res = pd.DataFrame.from_dict(results,orient='index',columns=['name','date','county','address','notice']).reset_index()
res = res.rename(columns={'index':'id'})
res
new = pd.concat([df,res]).reset_index(drop=True)
new=new[~new.id.duplicated(keep='first')]
print (len(df),len(res),len(new))
new.to_pickle('rip_dn_scrape.pkl')
# +
#x.to_csv('rip_dn_scrape.csv')
# -
# ## clean data
x=new
print (len(x))
x=x.replace('',None).dropna(axis=0,subset=['date'])
x['date'] = pd.to_datetime(x['date']).apply(lambda x: x.strftime('%d/%m/%Y'))
x=x.drop_duplicates(['name','notice'])
x=x.drop_duplicates(['name','address'])
x=x.drop_duplicates(['name','date','county'])
x = x[~x.address.isnull()]
nc = ['Fermanagh','Armagh','Tyrone','Down','Antrim','Derry']
x = x[~x.county.isin(nc)]
x = x[~x.address.str.contains('|'.join(nc))]
x=x.sort_values('id')
print (len(x))
#x.to_csv('rip_dn_scrape_processed.csv')
x.to_pickle('rip_dn_scrape_processed.pkl')
| ireland_deaths/scraping_rip.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# name: ir
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/ClaudiuPapasteri/Google-colab/blob/main/PA4/PA4_script_surveymonkey.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="AtxyCnlCcAi1"
# # **PA4**
#
# ## ***Screening form***
#
# <NAME>
#
# <i>Native R Google Colab Template</i>
#
# [My Github notebooks](https://github.com/ClaudiuPapasteri/notebooks)
# + [markdown] id="Jcsu76adARUv"
# ---
# + [markdown] id="1M8Wrd50Zyfr"
# ## Install & load packages
# + colab={"base_uri": "https://localhost:8080/", "height": 706} id="tg4XUaNygDBp" outputId="5322a6d8-f358-47aa-cf80-cef310670a61"
options(rlang_interactive = TRUE) # needed to mount Google Drive
load.libs <-
c(
"tidyverse", "stringr",
"googledrive", "googlesheets4"
)
install.libs <- load.libs[!load.libs %in% installed.packages()]
for(libs in install.libs) install.packages(libs, dependences = TRUE)
sapply(load.libs, require, character = TRUE)
rm(list = ls())
# + [markdown] id="zScZN2RLed8P"
# ---
# + [markdown] id="_YlmC7m8HSLz"
# # Mount Google Drive
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="FuY8vyMbHXNX" outputId="3cb0266b-2bff-49e5-e875-56ae4632f8d0"
# Mount Google Drive in an R kernel
library("googledrive")
library("googlesheets4")
# Check if is running in Colab and redefine is_interactive()
if (file.exists("/usr/local/lib/python3.7/dist-packages/google/colab/_ipython.py")) {
suppressMessages({
install.packages("R.utils")
library(R.utils)
library(httr)
library(rlang)
})
my_check <- function() {return(TRUE)}
reassignInPackage("is_interactive", pkgName = "httr", my_check)
reassignInPackage("is_interactive", pkgName = "rlang", my_check)
}
# Call Google Drive authentication forcing interactive login and save in cache
googledrive::drive_auth(use_oob = TRUE, cache = TRUE)
# Reuse token to Sheet authentification
googlesheets4::gs4_auth(token = drive_token())
# Load data from Google Sheets
sheet_url <- 'https://docs.google.com/spreadsheets/d/1mPpvLKHVQ5G-B2frnssEJSoSv-mrFPF1yhUdxh8QC1k/edit?usp=sharing'
Data <- googlesheets4::range_read(ss = sheet_url,
sheet = 3)
# + [markdown] id="mCzNSnaewzO-"
# # Run code from Github
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="LNP7YSjTw4L1" outputId="ecf44389-8fa4-491e-a897-fcfc0c9f86bd"
# Define function to source in global env from github url (?raw method) with encoding
source_github_encod <- function(url, encoding = 'UTF-8') {
raw <- paste(url, "?raw=TRUE", sep = "", collapse = "")
l <- readLines(raw, encoding = encoding)
eval(parse(text = l), envir = .GlobalEnv)
} #devtools::source_url(x, encoding = 'UTF-8') # doesn't work every time
source_github_encod("https://github.com/ClaudiuPapasteri/Google-colab/blob/main/PA4/script_surveymonkey-colab-github_code.R")
# outputs: Data, df_screening
# + [markdown] id="p2_-ognh3S9z"
# # Save data to Google Sheet
# + colab={"base_uri": "https://localhost:8080/"} id="22F-OD873bFj" outputId="80fe31e9-9f7c-4da9-9243-c9aae82eaf62"
# Reuse token to Sheet authentification
googlesheets4::gs4_auth(token = drive_token())
# Data for export
data_output <- df_screening
# Name Sheet that will be created
gsheet_name <- paste ("PA4_Screening", Sys.time(), sep = " ")
# Create Sheet in Google Drive
if(googlesheets4::gs4_has_token()) {
gsheet_obj <- googlesheets4::gs4_create(
name = gsheet_name,
sheets = list(Sheet1 = data_output) # Data goes in here
)
}
# Print a Sheet URL
googlesheets4::gs4_get(googlesheets4::gs4_find(gsheet_name))$spreadsheet_url %>%
httr::BROWSE()
# + [markdown] id="MZrQdc7P_S1J"
# ## Delete Sheet from Google Drive
# + colab={"base_uri": "https://localhost:8080/"} id="n0W13Kch_Y9u" outputId="28b9e359-a06e-4d97-bd6a-c1e8c0da9d49"
# Delete Sheet form Google Drive
googledrive::drive_trash(gsheet_obj) # good for removing in same session (obj still in memory)
# googlesheets4::gs4_find(gsheet_name) %>% # good for removing in different session
# googledrive::drive_trash()
# + [markdown] id="SqtJXKlD1jnu"
# ---
# + [markdown] id="uVqcvAzr95fd"
# ## Sesion Info
#
#
# ```
# # Info formatted as code
# ```
# + colab={"base_uri": "https://localhost:8080/", "height": 50} id="RiYqSKT3bpCg" outputId="ada4b530-9087-48ef-f65c-94937867b5ca"
Sys.info()
| PA4/PA4_script_surveymonkey.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SLU14 - k-Nearest Neighbors (kNN)
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error
from scipy.spatial.distance import cosine as cos_dist
# ### Classification with kNN in the iris dataset
# Loading the dataset and doing a train test split
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.33, random_state=42)
# Use kNN with k=5 as classification model, evaluating it with accuracy score
clf = KNeighborsClassifier(n_neighbors=5)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
accuracy_score(y_test, y_pred)
# ### Regression with kNN in the diabetes dataset
# Loading the dataset and doing a train test split
diabetes = datasets.load_diabetes()
X_train, X_test, y_train, y_test = train_test_split(diabetes.data, diabetes.target, test_size=0.33, random_state=42)
# Use kNN with k=5 and cosine distance as regression model, evaluating it with mean squared error
reg = KNeighborsRegressor(n_neighbors=5, metric=cos_dist)
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
mean_squared_error(y_test, y_pred)
| stats-279/SLU14 - k-Nearest Neighbors/Examples notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: env
# language: python
# name: env
# ---
# # 15.077: Problem Set 4
# <NAME> (aberke)
#
# From
#
# <NAME>., Mathematical Statistics and Data Analysis (with CD Data Sets), 3rd ed., Duxbury, 2007 (ISBN 978-0-534-39942-9).
#
# <NAME>., <NAME>., and <NAME>., The Elements of Statistical Learning: Data Mining, Inference and Prediction, Springer, 2nd ed., 2009 (ISBN 978-0-387-84857-0). https://web.stanford.edu/~hastie/ElemStatLearn/
# +
# %config Completer.use_jedi = False # autocomplete
import math
import re
import numpy as np
import pandas as pd
import scipy.special
from scipy import stats
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# -
# ## Problems
# ### 1. Rice 12.29: The performance of a semiconductor depends upon the thickness of a layer of silicon dioxide. In an experiment (Czitrom and Reece, 1997), layer thicknesses were measured at three furnace locations for three types of wafers (virgin wafers, recycled in-house wafers, and recycled wafers from an external source). The data are contained in the file waferlayers. Conduct a two-way analysis of variance and test for significance of main effects and interactions. Construct a graph such as that shown in Figure 12.3. Does the comparison of layer thicknesses depend on furnace location?
#
waferlayers = pd.read_csv('waferlayers.txt').apply(lambda s: s.replace("'", ""), axis=1)
waferlayers.columns = [c.replace("'", "") for c in waferlayers.columns]
waferlayers['Furnace'] = waferlayers['Furnace'].str.replace("'", "")
waferlayers['Wafer Type'] = waferlayers['Wafer Type'].str.replace("'", "")
waferlayers
# +
fig, ax = plt.subplots(1,1,figsize=(5,5))
wafer_types = waferlayers['Wafer Type'].unique()
for wt in wafer_types:
wafers = waferlayers[waferlayers['Wafer Type']==wt]
wafers_mean = wafers.groupby('Furnace').mean().reset_index()
ax.scatter(wafers['Furnace'], wafers['Thickness'], label=wt, alpha=0.8)
ax.plot(wafers_mean['Furnace'], wafers_mean['Thickness'])
ax.set_ylabel('Thickness')
ax.set_xlabel('Furnace')
_ = ax.legend()
# -
# #### Plot
# In the plot, points show point thickness values for wafer types and the lines show the mean thickness values for those wafer types.
#
# #### Two-way ANOVA test for significance of main effects and interactions
#
# Does the comparison of layer thicknesses depend on furnace location?
#
# We would like to use normal theory and must check the assumptions that the errors or standard deviations are independently distributed from the means.
# This assumption holds, as shown below.
#
# The following Two-way ANOVA test shows that the main effects are not statistically significant but that there is a statistically significant interaction (p < 0.05) between Furnace and Wafer Type.
w_grouped = waferlayers.groupby(['Furnace','Wafer Type']).agg(['mean','std'])
plt.scatter(w_grouped[('Thickness', 'mean')],w_grouped[('Thickness', 'std')])
plt.xlabel('Mean thickness')
plt.ylabel('Standard deviation')
w_grouped
# +
# 2-way ANOVA
import statsmodels.api as sm
from statsmodels.formula.api import ols
waferlayers = waferlayers.rename(columns={'Wafer Type': 'WaferType'}) # Can't handle space in variable name
model = ols('Thickness ~ C(Furnace) + C(WaferType) + C(Furnace):C(WaferType)', data=waferlayers).fit()
sm.stats.anova_lm(model, typ=2)
# -
# ### 2. An experiment was conducted using an unreplicated $2^4$ factorial design to determine the effects of the amount of glue (x1), predrying temperature (x2), tunnel temperature (x3), and pressure (x4) on the adhesive force obtained in an adhesive process.
df = pd.DataFrame({
'x1':[-1,1]*8,
'x2':[-1,-1,1,1]*4,
'x3':(([-1] * 4) + ([1] * 4)) * 2,
'x4':[-1 * (1 if i < 8 else -1) for i in range(16)],
'y': [3.8, 4.34, 3.54, 4.59, 3.95, 4.83, 4.86, 5.28, 3.29, 2.82, 4.59, 4.68, 2.73, 4.31, 5.16, 6.06],
})
df
# ### a. Estimate all the main effects and interactions.
#
# The effects can be estimated by estimating a linear model estimated with all interactions.
#
# The effects are then the parameters (except the intercept) x 2.
model = ols('y ~ x1 * x2 * x3 * x4', data=df).fit()
# model.summary()
effects = 2 * model.params.drop('Intercept')
print('main effects and interactions:')
print(effects)
# #### b. Plot the estimated effects on a normal probability plot.
fig, ax = plt.subplots(1, 1, figsize=(5,5))
stats.probplot(effects.values, plot=ax)
_ = ax.set_title('Normal probability plot: Estimated effects')
# #### c. Interpret your results.
#
# The estimated effects follow more of an S-shaped distribution than a normal distribution. There seems evidence to further explore the significance of the effects and rejecct the null hypothesis.
# ## 3. A $2^{8-4}$ fractional factorial design was run to identify sources of plutonium contamination in the radioactivity material analysis of dried shellfish at the National Institute of Standards and Technology (NIST). The data are in the table below. No contamination occurred at runs 1,4, and 9. Questions related to this problem are below the table.
y = [
0,
3.31,
0.0373,
0,
.0649,
0.133,
0.0461,
0.0297,
0,
0.287,
0.133,
0.0476,
0.133,
5.75,
0.0153,
2.47,
]
df = pd.DataFrame({
'run': list(range(1, 16+1)),
'x1': [-1,1]*8,
'x2': [-1,-1,1,1]*4,
'x3': ([-1] * 4 + [1] * 4) * 2,
'x4': [-1 if i < 8 else 1 for i in range(16)],
'x5': [-1]*2 + [1]*4 + [-1]*2 + [1]*2 + [-1]*4 + [1]*2,
'x6': [-1,1]*2 + [1,-1]*4 + [-1,1]*2,
'x7': [-1,1,1,-1,1,-1,-1,1,-1,1,1,-1,1,-1,-1,1],
'x8': [-1,1,1,-1,-1,1,1,-1,1,-1,-1,1,1,-1,-1,1],
'y': y,
}).set_index('run')
df
# ### (a) Write down the alias relationships.
#
# x1:x2:x3 = x7
#
# x1:x2:x4 = x8
#
# x1:x3:x4 = x6
#
# x2:x3:x4 = x5
#
# How / Why that answer:
#
# Let’s assume the experiment was designed to optimize for the best possible resolution, using interactions between variables as aliases for other variables in order to estimate main effects
#
# There are 8 variables and 16 runs.
# The best possible resolution is 4 (IIII).
#
# “Design resolution refers to the length (number of letters or variables) in the smallest defining or “generalized” interaction”
#
# Therefore we would expect combinations of 3 elements from x1, x2, x3, x4 to be aliased with 1 of x5, x6, x7, x8.
#
# The below code checks combinations of three elements from x1, x2, x3, x4 and compares with x5, x6, x7, x8.
#
#
# +
resolution = 4
elts = list(df.columns[:4])
other_elts = list(df.columns[4:8])
alias_combos = []
for i in range(4):
for j in range(i+1, 4):
for k in range(j+1, 4):
alias_combos += [(elts[i], elts[j], elts[k])]
print('elements used to alias:', elts)
print('elements needing alias:', other_elts)
print('alias combinations:', alias_combos)
# -
for i, (a1, a2, a3) in enumerate(alias_combos):
for elt in other_elts:
if ((df[a1] * df[a2] * df[a3]) == df[elt]).all():
print('%s:%s:%s = %s' % (a1, a2, a3, elt))
# ### (b) Estimate the main effects.
#
# We use a model that estimates an intercept and coefficients for x1 to x8 and interactions of x1 with x2 to x8.
# Interactions between x1 and x2 to x8
model = ols(
'y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x1:x2 + x1:x3 + x1:x4 + x1:x5 + x1:x6 + x1:x7 + x1:x8',
data=df).fit()
# model.summary()
main_effects = 2 * model.params[model.params.index.map(lambda x: not ':' in x)].drop('Intercept')
print('main effects:')
main_effects
# ### (c) Prepare a normal probability plot for the effects and interpret the results.
#
# As shown below, the estimated main effects do not strongly deviate from the normal distribution, showing little evidence that the main effects are statistically significant.
fig, ax = plt.subplots(1, 1, figsize=(5,5))
stats.probplot(main_effects.values, plot=ax)
_ = ax.set_title('Normal probability plot: Estimated main effects')
# ## This dataset is from an experiment to investigate inorganic impurities (ash) in paper. Two variables, temperature T in degrees Celsius and time t in hours, were studied. The coded predictor variables shown in the following table are:
#
# x1 = (T −775)/115
#
# x2 = (t −3)/1.5
#
# and the response y is impurity percentage times $10^3$.
df = pd.DataFrame({
'x1':[-1,1,-1,1,-1.5,1.5] + [0]*6,
'x2':[-1,-1,1,1,0,0,-1.5,1.5] + [0]*4,
'y':[211,92,216,99,222,48, 168, 179,122,175,157,146],
})
df
# ### (a) What type of design has been used in this study? Can the design be rotated?
#
# This study uses a central composite design (CCD), with axial points where w = 1.5.
#
# There are 4 point center runs with x1 = x2 = 0.
#
# For the design to be rotatable, we need w = $n_f^{0.25}$ where $n_f$ is the number of factorial points.
#
# In this case, $n_f = 4$. $n_f^{0.25} = 4^{0.25} = 1.4$.
#
# The design is nearly rotatable but not quite.
4**(0.25)
# ### (b) Fit a quadratic model to the data. Is this model satisfactory?
#
# We fit a model with squared terms and interaction terms, defined below.
#
# $y = B_0 + B_1 * x1 + B_2 * x2 + B_3 * x1^2 + B_4 * x2^2 + B_5 * x1 * x2 $
#
# The model has a high R-squared value but only one term, x1, has a statistically significant value, and it is not a quadratic term.
#
# So the quadratic model is not so satisfactory.
model = ols('y ~ x1 * x2 + np.power(x1, 2) + np.power(x2, 2)', data=df).fit()
model.summary()
ols('y ~ x1', data=df).fit().summary()
# ### (c) If it is important to minimize the ash value, where would you run the process?
#
# Only the effect for x1 is statistically significant. Thereore I would run thee process as the minimum x1 value: x1 = 775 - 115 = 660
# ## 5. The copper content of a manufacturing process is measured three times per day and the results are reported as parts per million. The values for 25 days are given in the table below.
df = pd.DataFrame({
'sample': list(range(1,26)),
1: [5.1,5.7,6.31,6.83,5.42,7.03,6.57, 5.96,8.15,6.11,6.49,5.12,5.59,7.59,6.72,6.3,6.33,6.91,8.05,6.39,5.63,6.51,6.91,6.28,5.07],
2: [6.1,5.59,5,8.1,5.29,7.29,5.89,7.52,6.69,5.14,5.68,4.26,5.21,7.93,6.79,5.37,6.33,6.05,6.52,5.07,6.42,6.9,6.87,6.09,7.17],
3: [5.5,5.29,6.07,7.96,6.71,7.54,7.08,7.29,6.06,6.68,5.51,4.49,4.94,6.9,5.23,7.08,5.8,6.03,8.51,6.86,5.39,7.4,6.83,6.71,6.11],
}).set_index('sample')
# df
# ### (a) Using all the data, find trial control limits for $\bar{X}$ and R charts, construct the chart, and plot the data. Is the process in statistical control?
#
# The control limits are printed below.
#
# As shown in the plots, the process is not in statistical control based on these limits. The mean x values go wildly out of the mean control limits.
# add in mean column and range columns
df['mean'] = df.apply(lambda row: row.mean(), axis=1)
df['range'] = df.apply(lambda row: row.max() - row.min(), axis=1)
df
def get_control_limit_values(n):
"""Returns A2, D3, D4 values"""
if n == 25:
A2 = 0.153
D3 = 0.459
D4 = 1.541
elif n == 24:
A2 = 0.157
D3 = 0.452
D4 = 1.548
elif n == 23:
A2 = 0.162
D3 = 0.443
D4 = 1.557
elif n == 22:
A2 = 0.167
D3 = 0.434
D4 = 1.566
elif n == 21:
A2 = 0.173
D3 = 0.425
D4 = 1.575
elif n == 20:
A2 = 0.180
D3 = 0.414
D4 = 1.586
elif n == 19:
A2 = 0.187
D3 = 0.404
D4 = 1.596
elif n == 18:
A2 = 0.194
D3 = 0.392
D4 = 1.608
elif n == 17:
A2 = 0.203
D3 = 0.379
D4 = 1.621
elif n == 16:
A2 = 0.212
D3 = 0.364
D4 = 1.636
elif n == 15:
A2 = 0.223
D3 = 0.348
D4 = 1.652
elif n == 14:
A2 = 0.203
D3 = 0.379
D4 = 1.621
elif n == 13:
A2 = 0.249
D3 = 0.308
D4 = 1.692
elif n == 12:
A2 = 0.266
D3 = 0.284
D4 = 1.716
elif n == 11:
A2 = 0.285
D3 = 0.256
D4 = 1.744
elif n == 4:
A2 = 0.729
D3 = 0
D4 = 2.282
elif n == 3:
A2 = 1.023
D3 = 0
D4 = 2.575
else:
raise Exception('control limits not implemented for n = %s' % n)
return (A2, D3, D4)
# +
def get_control_limits(df):
n = len(df)
# compute control limits using constants from a table:
(A2, D3, D4) = get_control_limit_values(n)
print('n = %s' % n)
print('----------------')
print('constants used:')
print('A2 = %s' % A2)
print('D3 = %s' % D3)
print('D4 = %s' % D4)
print('----------------')
x_bar_bar = df['mean'].mean()
r_bar = df['range'].mean()
x_UCL = x_bar_bar + A2*r_bar
x_CL = x_bar_bar
x_LCL = x_bar_bar - A2*r_bar
r_UCL = D4 * r_bar
r_LCL = D3 * r_bar
return (x_CL, x_UCL, x_LCL, r_bar, r_UCL, r_LCL)
(x_CL, x_UCL, x_LCL, r_bar, r_UCL, r_LCL) = get_control_limits(df)
print('x UCL = %s '% x_UCL)
print('x CL = %s '% x_CL)
print('x LCL = %s '% x_LCL)
print('R UCL = %s '% r_UCL)
print('R = %s '% r_bar)
print('R LCL = %s '% r_LCL)
# df
# -
def control_charts(df, x_CL, x_UCL, x_LCL, r_bar, r_UCL, r_LCL):
fig, (ax_bar, ax_r) = plt.subplots(2,1, sharex=True)
# x chart
ax_bar.plot(df['mean'], '-o', c='blue')
ax_bar.plot(df.index, [x_CL]*len(df.index), c='gray')
ax_bar.plot(df.index, [x_UCL]*len(df.index), c='gray')
ax_bar.plot(df.index, [x_LCL]*len(df.index), c='gray')
ax_bar.set_ylabel('means')
# r chart
ax_r.plot(df['range'], '-o', c='blue')
ax_r.plot(df.index, [r_bar]*len(df.index), c='gray')
ax_r.plot(df.index, [r_UCL]*len(df.index), c='gray')
ax_r.plot(df.index, [r_LCL]*len(df.index), c='gray')
ax_r.set_ylabel('ranges')
fig.suptitle('Control Charts')
plt.show()
control_charts(df, x_CL, x_UCL, x_LCL, r_bar, r_UCL, r_LCL)
# ### (b) If necessary, revise the control limits computed in part (a), assuming any samples that plot outside the control limits can be eliminated. Continue to eliminate points outside the control limits and revise, until all points plot between control limits.
#
# We iteratively prune samples that are most outside the control limits until we have a set of control points within the control limits defined by them.
def out_of_cl(v, ucl, lcl):
v_l = np.abs(lcl - v if v < lcl else 0)
v_u = np.abs(v - ucl if v > ucl else 0)
return max(v_l, v_u)
dfi = df
for i in range(len(df)):
if len(dfi) < 3:
break
# compute CL's
(x_CLi, x_UCLi, x_LCLi, r_bari, r_UCLi, r_LCLi) = get_control_limits(dfi)
# plot
control_charts(dfi, x_CLi, x_UCLi, x_LCLi, r_bari, r_UCLi, r_LCLi)
# compute how much the ranges differ from r_CL's control limits
# compute how much the means differ from x_CL's control limits
dfi['mean out of cl'] = dfi['mean'].apply(lambda m: out_of_cl(m, x_UCLi, x_LCLi))
dfi['r out of cl'] = dfi['range'].apply(lambda r: out_of_cl(r, r_UCLi, r_LCLi))
# drop most out of control
if (dfi['mean out of cl'] > 0).any():
dfi = dfi.drop(dfi['mean out of cl'].idxmax())
elif (dfi['r out of cl'] > 0).any():
dfi = dfi.drop(dfi['r out of cl'].idxmax())
else:
break
print('Control limits for in-control points:\n')
print('x UCL = %s '% x_UCLi)
print('x CL = %s '% x_CLi)
print('x LCL = %s '% x_LCLi)
print('R UCL = %s '% r_UCLi)
print('R = %s '% r_bari)
print('R LCL = %s '% r_LCLi)
control_charts(dfi, x_CLi, x_UCLi, x_LCLi, r_bari, r_UCLi, r_LCLi)
dfi
# ### (c) Given that the specifications are at 6.0 + / − 0.5, estimate Cp and Cpk for the in-control process and interpret these ratios.
# +
USL = 6.5
LSL = 5.5
n = len(dfi)
print('n = %s' % n)
d2 = 3.258
print('d2 = %s' % d2)
sigma = dfi['range'].mean()/d2
print('σ = %s' % sigma)
mu = dfi['mean'].mean()
print('μ = %s' % mu)
print('')
Cp = (USL - LSL)/(6 * sigma)
print('Cp = (USL - LSL)/(6σ) = %s' % Cp)
Cpk = min((USL - mu)/(3*sigma), (mu - LSL)/(3*sigma))
print('Cpk = min[ (USL - μ)/3σ, (μ - LSL)/3σ ] = %s' % Cpk)
# -
# The ratios are both very small. This is because sigma is relatively large compared to the specified spread (6.5 - 5.5).
#
# The Cpk is slightly smaller than the centered process control limit, Cp, indicative of how the mean value is slightly off-centered between the specified control limits.
| pset-4/pset4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 1
# Add the specified code for each code cell, running the cells _in order_.
# Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).
# - _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line.
num = 5
while num <= 100:
print(num, end=',')
num += 5
# Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**
# - _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration!
num = 0
count = 0
while count <= 14:
num = count * (count + 1)/2
count += 1
print(int(num), end=",")
# _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).
# - _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!)
# Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module.
# Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row.
# flip a coin until it shows up heads
still_flipping = True
while still_flipping:
flip = randint(0,1)
if flip == 0:
flip = "Heads"
else:
flip = "Tails"
print(flip, end=", ")
if flip == "Heads":
still_flipping = False
# Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.
#
# Be sure and call your function and print its results to test it!
| exercise-1/exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/saeedrafieyan/SciDownl/blob/master/iris_analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="TD7VUo0pTh8D"
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#import missingno as msno
#from sklearn.metrics import make_scorer, accuracy_score,precision_score
#from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score ,precision_score,recall_score,f1_score
from sklearn.model_selection import KFold,train_test_split,cross_val_score
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
import keras
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Input, Dense,TimeDistributed
from keras.layers import LSTM
# + colab={"base_uri": "https://localhost:8080/"} id="YY_uNrxOT17U" outputId="09678f28-ec3a-453e-c7f2-a58eca867582"
from google.colab import drive
drive.mount('/gdrive')
# %cd /gdrive
# + id="1xi-yLIgUZrM"
import pandas as pd
iris = pd.read_csv('/gdrive/My Drive/dataset.csv')
# + id="7G_tiU7tXYb6"
X=iris.iloc[:,0:4].values
y=iris.iloc[:,4].values
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1,random_state=0)
# + colab={"base_uri": "https://localhost:8080/"} id="qrPDfsblX9ld" outputId="5eeec0ac-e476-41b4-c4ca-eee08f45b2bc"
# Gaussian Naive Bayes
print ('*** Naive Bayes result ***')
gaussian = GaussianNB()
gaussian.fit(X_train, y_train)
Y_pred = gaussian.predict(X_test)
accuracy_nb=round(accuracy_score(y_test,Y_pred)* 100, 2)
acc_gaussian = round(gaussian.score(X_train, y_train) * 100, 2)
cm = confusion_matrix(y_test, Y_pred)
accuracy = accuracy_score(y_test,Y_pred)
precision =precision_score(y_test, Y_pred,average='micro')
recall = recall_score(y_test, Y_pred,average='micro')
f1 = f1_score(y_test,Y_pred,average='micro')
print('Confusion matrix for Naive Bayes\n',cm)
print('accuracy_Naive Bayes: %.3f' %accuracy)
print('precision_Naive Bayes: %.3f' %precision)
print('recall_Naive Bayes: %.3f' %recall)
print('f1-score_Naive Bayes : %.3f' %f1)
# + colab={"base_uri": "https://localhost:8080/"} id="cV2er1I3X_mX" outputId="ea8b4a0f-ccc7-481a-cf47-b01a763f6f31"
# SVM
print ('*** SVM result ***')
linear_svc = LinearSVC(max_iter=4000)
linear_svc.fit(X_train, y_train)
Y_pred = linear_svc.predict(X_test)
accuracy_svc=round(accuracy_score(y_test,Y_pred)* 100, 2)
acc_linear_svc = round(linear_svc.score(X_train, y_train) * 100, 2)
cm = confusion_matrix(y_test, Y_pred)
accuracy = accuracy_score(y_test,Y_pred)
precision =precision_score(y_test, Y_pred,average='micro')
recall = recall_score(y_test, Y_pred,average='micro')
f1 = f1_score(y_test,Y_pred,average='micro')
print('Confusion matrix for SVC\n',cm)
print('accuracy_SVC: %.3f' %accuracy)
print('precision_SVC: %.3f' %precision)
print('recall_SVC: %.3f' %recall)
print('f1-score_SVC : %.3f' %f1)
# + colab={"base_uri": "https://localhost:8080/"} id="RF8h5MmKYE5_" outputId="d4757cf5-46a7-4a7e-dffa-02a492c45bd9"
#LSTM
batch_size = 32
num_classes = 10
epochs = 5
row_hidden = 128
col_hidden = 128
(x_train, y_train) , (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0],28,28,1)
x_test = x_test.reshape(x_test.shape[0],28,28,1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0],'train sample')
print(x_test.shape[0], 'test sample')
# + id="L9l7ngyXbgij"
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
row, col, pixel = x_train.shape[1:]
x = Input(shape=(row, col, pixel))
# + id="BZfFsVagcL07"
encoded_rows = TimeDistributed(LSTM(row_hidden))(x)
encoded_columns = LSTM(col_hidden)(encoded_rows)
# + id="_rl0tBX4co5c"
prediction = Dense(num_classes,activation='softmax')(encoded_columns)
model = Model(x, prediction)
model.compile(loss='categorical_crossentropy',
optimizer= 'rmsprop',
metrics= ['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="rXrLKpuEdjDZ" outputId="55c14c41-0798-453e-b960-be7c88b1d749"
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose = 1,
validation_data = (x_test, y_test))
# + colab={"base_uri": "https://localhost:8080/"} id="oT8m-NEUfCTE" outputId="79bedceb-ed26-45a9-c7f8-338d73774f9f"
scores = model.evaluate(x_test, y_test, verbose=0)
print('test loss: ', scores[0])
print('test LSTM accuracy: ', scores[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 497} id="scjCxfg3h-Y7" outputId="864f9137-4fee-4990-e710-0e4d55672ef3"
results = pd.DataFrame({
'Model': [ 'Naive Bayes',
' Support Vector Machine',
'LSTM'
],
'Score': [acc_gaussian,
acc_linear_svc,
scores[1],
],
"Accuracy_score":[
accuracy_nb,
accuracy_svc,
scores[1]*100
]})
result_df = results.sort_values(by='Accuracy_score', ascending=False)
result_df = result_df.reset_index(drop=True)
result_df.head(9)
plt.subplots(figsize=(12,8))
ax=sns.barplot(x='Model',y="Accuracy_score",data=result_df)
labels = (result_df["Accuracy_score"])
# add result numbers on barchart
for i, v in enumerate(labels):
ax.text(i, v+1, str(v), horizontalalignment = 'center', size = 15, color = 'black')
| iris_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab inline
import jax
import jax.numpy as np
import numpy as onp
import flax
import pandas as pd
from tensorflow_probability.python.internal.backend import jax as tf
import tensorflow_probability as tfp; tfp = tfp.experimental.substrates.jax
# Load the data
parameters_raw = pd.read_hdf('dalek-2020-06-02/data/grid1_v2_log_uniform_params_16feb20_part1_train_98k.h5')
spectra_raw = onp.load('dalek-2020-06-02/data/grid1_v2_log_uniform_fluxes_16feb20_part1_train_interp_98k.npy')
# +
# We'll normalize the inputs/outputs a bit
from sklearn import preprocessing
parameters_n = parameters_raw.copy()
parameters_n = onp.log10(parameters_n)
parameters_n_min = parameters_n.min(axis=0)
parameters_n_max = parameters_n.max(axis=0)
parameters_n = (parameters_n - parameters_n_min)/(parameters_n_max - parameters_n_min)
parameters_n = parameters_n.values
scaler_parameters = preprocessing.StandardScaler().fit(parameters_n)
n_parameters_n = scaler_parameters.transform(parameters_n)
spectra_n = (spectra_raw - spectra_raw.min())
spectra_n = spectra_n/spectra_n.max()
# -
plot(spectra_raw[1,:])
plot(spectra_n[1,:])
batch_size = 128
# Ok, cool, we can define a trivial model which returns the
# interpolated value
from flax import nn, optim
class emulator(nn.Module):
def apply(self, p):
# Where p are inputs parameters
net = nn.leaky_relu(nn.Dense(p, 256))
net = nn.leaky_relu(nn.Dense(net, 256))
vals = nn.Dense(net, 500)
scale = nn.softplus(nn.Dense(net, 500)) + 1e-2
return vals, scale
# Ok, sweet, let's try it out:
_, initial_params = emulator.init_by_shape(jax.random.PRNGKey(0),
[((batch_size,12), np.float32)])
model = flax.nn.Model(emulator, initial_params)
# +
# Perfect, now.... let's try to learn a spline :-D
# -
def get_batch():
""" Simple function that extracts a random batch
"""
inds = onp.random.choice(len(n_parameters_n), batch_size)
return {'x': n_parameters_n[inds].astype('float32'),
'y': spectra_n[inds].astype('float32')}
@jax.jit
def train_step(optimizer,batch):
def loss_fn(model):
mu, scale = model(batch['x'])
# Compute likelihood
likelihood = tfp.distributions.Normal(mu, scale)
loss = - likelihood.log_prob(batch['y']).mean()
return loss
l, grad = jax.value_and_grad(loss_fn)(optimizer.target)
optimizer = optimizer.apply_gradient(grad)
return optimizer,l
# We also need an optimizer
optimizer = flax.optim.Momentum(
learning_rate=0.001, beta=0.9).create(model)
losses = []
batch = get_batch()
for i in range(10000):
# Let's try to learn
batch = get_batch()
optimizer, l = train_step(optimizer, batch)
losses.append(l)
if i%1000 ==0:
print(l)
plot((losses))
plot(optimizer.target(batch['x'])[0][0], label='emulator')
plot(batch['y'][0], label='data')
legend()
plot(optimizer.target(batch['x'])[1][0], label='emulator')
x = np.linspace(0,1.0, 500)
fill_between(x,
optimizer.target(batch['x'])[0][0] - optimizer.target(batch['x'])[1][0],
optimizer.target(batch['x'])[0][0] + optimizer.target(batch['x'])[1][0],
label='emulator +/- 1 sigma', alpha=0.5)
plot(x, optimizer.target(batch['x'])[0][0])
plot(x, batch['y'][0], '--', label='data')
legend()
| Experimentations-TFP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.4 64-bit ('env')
# name: python394jvsc74a57bd00615a048ddbccd178d78d39579d8f375b3e2586f56dbde83d62a1c513c66395e
# ---
# +
# Part 1. ETL Pipeline for Pre-Processing the Files
# -
# Import Python packages
import pandas as pd
import cassandra
import re
import os
import glob
import numpy as np
import json
import csv
# +
#### Creating list of filepaths to process original event csv data files
# +
# checking current working directory
print(f"Current working directory : {os.getcwd()}")
# get current folder and subfolder event data
filepath = os.getcwd() + '/event_data'
# create a list of files and collect each filepath
for root, dirs, files in os.walk(filepath):
# join the file path and roots with the subdirectories using glob
file_path_list = glob.glob(os.path.join(root,'*'))
#print(file_path_list)
# +
#### Processing the files to create the data file csv that will be used for Apache Casssandra tables
# +
# initiating an empty list of rows that will be generated from each file
full_data_rows_list = []
# for every filepath in the file path list
for f in file_path_list:
# reading csv file
with open(f, 'r', encoding = 'utf8', newline='') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
next(csvreader)
# extracting each data row one by one and append it
for line in csvreader:
full_data_rows_list.append(line)
print(f"Total rows : {len(full_data_rows_list)}")
print(f"Sample data:\n {full_data_rows_list[:5]}")
# creating a smaller event data csv file called event_datafile_full csv that will be used to insert data into the \
# Apache Cassandra tables
csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True)
with open('event_datafile_new.csv', 'w', encoding = 'utf8', newline='') as f:
writer = csv.writer(f, dialect='myDialect')
writer.writerow(['artist','firstName','gender','itemInSession','lastName','length',\
'level','location','sessionId','song','userId'])
for row in full_data_rows_list:
if (row[0] == ''):
continue
writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[12], row[13], row[16]))
# -
# checking the number of rows in new event csv file
with open('event_datafile_new.csv', 'r', encoding = 'utf8') as f:
print(sum(1 for line in f))
# Now we are ready to work with the CSV file titled event_datafile_new.csv, located within the Workspace directory. The event_datafile_new.csv contains the following columns:
# * artist
# * firstName of user
# * gender of user
# * item number in session
# * last name of user
# * length of the song
# * level (paid or free song)
# * location of the user
# * sessionId
# * song title
# * userId
# ## Creating A Cluster
# +
# This should make a connection to a Cassandra instance your local machine
# (127.0.0.1)
from cassandra.cluster import Cluster
try:
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
print("Connection Established !!")
except Exception as e:
print(f"Connection Failed !! Error : {e}")
# -
# ## Creating Keyspace
# +
keyspace_query = """CREATE KEYSPACE IF NOT EXISTS sparkify
with REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }
"""
# Creating Keyspace
try:
session.execute(keyspace_query)
except Exception as e:
print(f"Failed to create keyspace!! Error : {e}")
# -
# ## Setting Keyspace
# Setting KEYSPACE to the keyspace specified above
session.set_keyspace('sparkify')
# ### Now we need to create tables to run the following queries. Remember, with Apache Cassandra we model the database tables on the queries we want to run.
# ## Below are the queries following which we will build out data model
#
# ### 1. Give the artist, song title and song's length in the music app history that was heard during sessionId = 338, and itemInSession = 4
#
#
# ### 2. Give only the following: name of artist, song (sorted by itemInSession) and user (first and last name) for userid = 10, sessionid = 182
#
#
# ### 3. Give every user name (first and last) in my music app history who listened to the song 'All Hands Against His Own'
#
#
#
# ## Query 1
#
# ### For query 1, we need a way to run query on sessionId and itemInSession. So, our primary key must have these columns. We can partition the data on sessionId.
#
# ### Our Select query : SELECT artist, song, length FROM session_item where sessionId = 338 and itemInSession = 4
# ### Our Primary key will be (sessionId, itemInSession), where sessionId is the partition key and itemInSession is the clustering column.
# ### Columns we included in the table :
# +
# Creating table for query1
create_query1 = """CREATE TABLE IF NOT EXISTS session_item (artist text, song text, length float, sessionId int, itemInSession int, PRIMARY KEY (sessionId, itemInSession))"""
try:
session.execute(create_query1)
print("Table Created!!")
except Exception as e:
print(f"Table creation failed!! Error : {e}")
# +
# Using the event file
file = 'event_datafile_new.csv'
# Reading csv file and inserting rows into cassandra tables.
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
query = "INSERT INTO session_item (artist, song, length, sessionId, itemInSession) "
query = query + " VALUES (%s, %s, %s, %s, %s) "
session.execute(query, (line[0], line[10], float(line[5]), int(line[8]), int(line[3])) )
# -
# #### Do a SELECT to verify that the data have been inserted into each table
# +
# SELECT statement to verify the data was entered into the table
select_query1 = "SELECT artist, song, length FROM session_item where sessionId = 338 and itemInSession = 4"
try:
rows = session.execute(select_query1)
except Exception as e:
print(e)
for row in rows:
print(row)
# -
# ## Query 2
#
# ### For query 2, we need a way to run query on sessionId and userId. Also, we need the data sorted on itemInSession. So, our primary key must have these columns. We can partition the data on a composite key (sessionId, userId).
#
# ### Our Select query : SELECT artist, song, firstName, lastName FROM user_session where sessionId = 182 and userId = 10
# ### Our Primary key will be ((sessionId, userId), itemInSession)), where (sessionId, userId) is the partition key and itemInSession is the clustering column.
# ### Also, we are using the clause - WITH CLUSTERING ORDER BY (itemInSession ASC), to sort our data based on itemInSession
# ### Columns we included in the table : sessionId, userId, artist, song, firstName, lastName, itemInSession
# +
# Creating table for query2
create_query2 = """CREATE TABLE IF NOT EXISTS user_session (sessionId int, userId int, artist text, song text, firstName text, lastName text, itemInSession int, PRIMARY KEY ((sessionId, userId), itemInSession)) WITH CLUSTERING ORDER BY (itemInSession ASC) """
try:
session.execute(create_query2)
print("Table Created!!")
except Exception as e:
print(f"Table creation failed!! Error : {e}")
# +
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
query = "INSERT INTO user_session (sessionId, userId, artist, song, firstName, lastName, itemInSession) "
query = query + " VALUES (%s, %s, %s, %s, %s, %s, %s) "
session.execute(query, (int(line[8]), int(line[10]), line[0], line[9], line[1], line[4], int(line[3]) ) )
# +
# SELECT statement to verify the data was entered into the table
select_query2 = "SELECT artist, song, firstName, lastName FROM user_session where sessionId = 182 and userId = 10"
try:
rows = session.execute(select_query2)
except Exception as e:
print(e)
for row in rows:
print(row)
# -
# ### Drop the tables before closing out the sessions
session.execute("DROP TABLE IF EXISTS sparkify.session_item")
session.execute("DROP TABLE IF EXISTS sparkify.user_session")
session.execute("DROP TABLE IF EXISTS sparkify.user_song")
# ### Close the session and cluster connection¶
session.shutdown()
cluster.shutdown()
| Data_Modeling_with_Cassandra/cassandra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
# %matplotlib inline
h = 10
x = y = np.array([h*i for i in range(64)])
# +
das_template_x = np.array([5*np.sqrt(2)*i for i in range(12)])
das_template_y = np.array([5*np.sqrt(2)*i for i in range(12)])
das_template_x2 = np.hstack([das_template_x,das_template_x[::-1],das_template_x,das_template_x[::-1]])
das_template_y2 = np.hstack([das_template_y,das_template_y+das_template_y[-1],das_template_y+2*das_template_y[-1],das_template_y+3*das_template_y[-1]])
das_x = np.hstack([das_template_x2+i*das_template_x[-1] for i in range(4)])
das_y = np.hstack([das_template_y2 for i in range(4)])
offset = (320-np.max(das_x))/2
das_x += offset
das_y += offset
azimuth_template_1 = np.array([[[45 for i in range(12)], [-45 for i in range(12)]] for i in range(2)]).flatten()
azimuth_template_2 = np.array([[[135 for i in range(12)], [215 for i in range(12)]] for i in range(2)]).flatten()
das_az = np.hstack([azimuth_template_1, azimuth_template_2,
azimuth_template_1, azimuth_template_2])
das_azr = np.deg2rad(das_az)
# +
t = np.linspace(0,320,17)
s = np.linspace(0,320,17)
x = y = (t[:-1]+t[1:]) / 2
xst = yst = (s[:-1]+s[1:]) / 2
xs, ys = np.meshgrid(xst,yst)
xs = xs.flatten()
ys = ys.flatten()
# np.random.seed(43771120)
# xs = np.random.uniform(0,320,128)
# ys = np.random.uniform(0,320,128)
# -
cax = plt.scatter(das_x, das_y,c=das_az)
plt.scatter(xs, ys, marker='^', color='k', alpha=0.5)
plt.xlim(0,320)
plt.ylim(0,320)
plt.colorbar(cax, label="Cable Azimuth")
plt.xlabel("Easting (m)")
plt.ylabel("Northing (m)")
plt.gca().set_aspect("equal")
# +
from scipy.io import loadmat
from scipy.interpolate import RectBivariateSpline as rbs
from scipy.integrate import romb
import scipy.sparse as sp
import os
cscale = 2
generate_kernels = True
raz = np.deg2rad(das_az)
L = 10 #gauge length
ll = np.linspace(-L/2, L/2, 2**5+1)
dl = ll[1]-ll[0]
p1 = das_x[:,np.newaxis]+np.sin(raz[:,np.newaxis])*ll[np.newaxis,:]
p2 = das_y[:,np.newaxis]+np.cos(raz[:,np.newaxis])*ll[np.newaxis,:]
if generate_kernels:
os.makedirs("Kernels", exist_ok=True)
crv = loadmat("../Curvelet_Basis_Construction/G_16_16.mat")
G_mat = np.reshape(crv["G_mat"].T, (crv["G_mat"].shape[1], 16, 16))
crvscales = crv["scales"].flatten()
cvtscaler = 2.0**(cscale*crvscales)
G1 = np.zeros((len(raz), G_mat.shape[0]))
G2 = np.zeros((len(raz), G_mat.shape[0]))
G3 = np.zeros((len(xs), G_mat.shape[0]))
for j in range(G_mat.shape[0]):
frame = rbs(x,y,G_mat[j])
#average derivatives of frame along gauge length
fd1 = romb(frame.ev(p1, p2, dx=1), dl) / L
fd2 = romb(frame.ev(p1, p2, dy=1), dl) / L
G1[:,j] = (np.sin(raz)**2*fd1 +
np.sin(2*raz)*fd2/2) / cvtscaler[j]
G2[:,j] = (np.cos(raz)**2*fd2 +
np.sin(2*raz)*fd1/2) / cvtscaler[j]
G3[:,j] = frame.ev(xs, ys) / cvtscaler[j]
np.save("Kernels/G1.npy", G1)
np.save("Kernels/G2.npy", G2)
np.save("Kernels/G3.npy", G3)
# -
res = np.load("resmin.npy")
plt.scatter(das_x, das_y, c='k', alpha=0.5)
cax = plt.scatter(xs, ys, c=res, marker='^')
plt.xlim(0,320)
plt.ylim(0,320)
plt.colorbar(cax, label="Inclusion Probability")
plt.xlabel("Easting (m)")
plt.ylabel("Northing (m)")
plt.gca().set_aspect("equal")
res = np.load("resround.npy")
plt.scatter(das_x, das_y, c='k', alpha=0.5)
cax = plt.scatter(xs, ys, c=res, marker='^')
plt.xlim(0,320)
plt.ylim(0,320)
plt.colorbar(cax, label="Inclusion Probability")
plt.xlabel("Easting (m)")
plt.ylabel("Northing (m)")
plt.gca().set_aspect("equal")
np.sum(res)
res1 = np.load("res_spectral.npy")
plt.scatter(das_x, das_y, c='k', alpha=0.5)
cax = plt.scatter(xs, ys, c=res1, marker='^')
plt.xlim(0,320)
plt.ylim(0,320)
plt.colorbar(cax, label="Inclusion Probability")
plt.xlabel("Easting (m)")
plt.ylabel("Northing (m)")
plt.gca().set_aspect("equal")
res2 = np.load("res_spectral2.npy")
plt.scatter(das_x, das_y, c='k', alpha=0.5)
cax = plt.scatter(xs, ys, c=res2, marker='^')
plt.xlim(0,320)
plt.ylim(0,320)
plt.colorbar(cax, label="Inclusion Probability")
plt.xlabel("Easting (m)")
plt.ylabel("Northing (m)")
plt.gca().set_aspect("equal")
# +
from scipy.signal import butter, lfilter
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, axis=0):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data, axis=axis)
return y
def butter_lowpass(lowcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
b, a = butter(order, [low], btype='low')
return b, a
def butter_low_filter(data, lowcut, fs, order=5, axis=0):
b, a = butter_lowpass(lowcut, fs, order=order)
y = lfilter(b, a, data, axis=axis)
return y
# -
np.sqrt(2)*320 / 2000 * 8000 # = length of diagonal * approx samples to cross diagonal * assumed samples / s = m / s velocity
shot = np.reshape(np.fromfile("Testing/TestData/shot1.dat", dtype=np.float32), (4001,64,64))
t = np.linspace(0, 0.5, 4001)
shotf = butter_low_filter(shot, 50, 8000)
tf = t[::20]
shotf = shotf[::20,:,:]
tf_freq = 1/(tf[1]-tf[0])
plt.imshow(shotf[49])
plt.plot(tf, shotf[:, 10, 10])
from scipy.interpolate import RectBivariateSpline as rbs
shotf_itps = [rbs()]
exxr = fftintr.ev(das_x,das_y, dx=2)
eyyr = fftintr.ev(das_x,das_y, dy=2)
exyr = (fftintr.ev(das_x,das_y, dx=1,dy=1)+fftintr.ev(das_x,das_y, dy=1,dx=1))/2
edasr = (np.sin(das_azr)**2*exxr+np.sin(2*das_azr)*exyr+np.cos(das_azr)**2*eyyr)
| OptimalDesign/Archive/DAS_Designs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import os
df=pd.read_csv("D:\\Project\\Python\\Pandas-Data-Science-Tasks-master\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data\\Sales_April_2019.csv")
all_months_data=pd.DataFrame()
files= [file for file in os.listdir("D:\\Project\\Python\\Pandas-Data-Science-Tasks-master\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data\\")]
for file in files:
df=pd.read_csv("D:\\Project\\Python\\Pandas-Data-Science-Tasks-master\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data\\"+file)
all_months_data=pd.concat([all_months_data,df])
all_months_data.to_csv("D:\\Project\\Python\\Pandas-Data-Science-Tasks-master\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data\\all_data.csv",index=False)
import numpy as np
data=pd.read_csv("D:\\Project\\Python\\Pandas-Data-Science-Tasks-master\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data\\all_data.csv")
data.head()
data.shape
data.isna().sum().sort_values(ascending=False)
data.dropna(how='all',inplace=True)
data.isna().sum().sort_values(ascending=False)
data.info()
data.dtypes
data['Month']=data['Order Date'].str[0:2]
data['Month'].unique()
all_data= data[data['Month'] != "Or"]
all_data.head()
all_data['Month'].unique()
all_data['Date']=all_data['Order Date'].str[3:5]
all_data.head()
all_data['Year']=all_data['Order Date'].str[6:9]
all_data.head()
all_data['Time']=all_data['Order Date'].str[9:]
all_data.head()
all_data['Price Each'].unique()
all_data['Product'].value_counts()
all_data['Quantity Ordered']=pd.to_numeric(all_data['Quantity Ordered'])
all_data['Price Each']=pd.to_numeric(all_data['Price Each'])
all_data['Total Price'] =all_data['Quantity Ordered'] * all_data['Price Each']
all_data.head()
import matplotlib.pyplot as plt
import seaborn as sns
# #### Question 1: What was the best month for sales? How much was earned that month?
results=all_data.groupby('Month').sum()
results.head()
month=range(1,13)
plt.bar(month,results['Total Price'])
plt.show()
# #### Which city has maximum sell
all_data.head()
all_data['City']=all_data['Purchase Address'].apply(lambda x : x.split(",")[1])
all_data.head()
all_data['State']=all_data['Purchase Address'].apply(lambda x : x.split(",")[2])
all_data.head()
result_city=all_data.groupby('City').sum()
result_city
cities=[city for city ,all_data in all_data.groupby('City')]
plt.bar(cities,result_city['Total Price'])
plt.xticks(cities,rotation='vertical',size=8)
plt.show()
# ##### Question 3:What time should we display advertisements to maximise likelihood of customers buying product?
import datetime as dt
all_data['Time']=pd.to_datetime(all_data['Time'])
all_data['Hour']=all_data['Time'].dt.hour
all_data['Minute']=all_data['Time'].dt.minute
all_data['Count']=1
hours=[hour for hour , df in all_data.groupby('Hour')]
plt.plot(hours,all_data.groupby(['Hour']).count())
plt.xticks(hours)
plt.grid()
plt.show()
# #### Question 4. What products are most sold together?
all_data.head()
group_data=all_data[all_data['Order ID'].duplicated(keep=False)]
group_data['Grouped']=group_data.groupby("Order ID")['Product'].transform(lambda x : ','.join(x))
group_data.head()
group_data=group_data[['Order ID' , 'Grouped']].drop_duplicates()
group_data.head()
group_data.shape
from itertools import combinations
from collections import Counter
counter =Counter()
for row in group_data['Grouped']:
row_list=row.split(',')
count.update(Counter(combinations(row_list,2)))
print(count.most_common(10))
for key ,value in count.most_common(10):
print(key,value)
# ###### What product sold the most ? Why do you think it sold the most?
all_data.head()
product_group=all_data.groupby('Product')
quantity_order=product_group.sum()
quantity_order
quantity=product_group.sum()['Quantity Ordered']
products=[product for product , df in product_group]
# +
plt.bar(products,quantity)
plt.xticks(products,rotation='vertical')
prices=all_data.groupby('Product').mean()['Price Each']
ax1=plt.subplot()
ax2=plt.twinx()
ax1.bar(products,quantity)
ax2.plot(products,prices)
plt.show()
# -
| Business Analytics Insights.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # !wget https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-dev.conllu
# # !wget https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-train.conllu
# # !wget https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-test.conllu
# -
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# +
import malaya
import re
from malaya.texts._text_functions import split_into_sentences
from malaya.texts import _regex
import numpy as np
import itertools
tokenizer = malaya.preprocessing._tokenizer
splitter = split_into_sentences
# +
def is_number_regex(s):
if re.match("^\d+?\.\d+?$", s) is None:
return s.isdigit()
return True
def preprocessing(w):
if is_number_regex(w):
return '<NUM>'
elif re.match(_regex._money, w):
return '<MONEY>'
elif re.match(_regex._date, w):
return '<DATE>'
elif re.match(_regex._expressions['email'], w):
return '<EMAIL>'
elif re.match(_regex._expressions['url'], w):
return '<URL>'
else:
w = ''.join(''.join(s)[:2] for _, s in itertools.groupby(w))
return w
# +
word2idx = {'PAD': 0,'UNK':1, '_ROOT': 2}
tag2idx = {'PAD': 0, '_<ROOT>': 1}
char2idx = {'PAD': 0,'UNK':1, '_ROOT': 2}
word_idx = 3
tag_idx = 2
char_idx = 3
special_tokens = ['<NUM>', '<MONEY>', '<DATE>', '<URL>', '<EMAIL>']
for t in special_tokens:
word2idx[t] = word_idx
word_idx += 1
char2idx[t] = char_idx
char_idx += 1
word2idx, char2idx
# +
PAD = "_PAD"
PAD_POS = "_PAD_POS"
PAD_TYPE = "_<PAD>"
PAD_CHAR = "_PAD_CHAR"
ROOT = "_ROOT"
ROOT_POS = "_ROOT_POS"
ROOT_TYPE = "_<ROOT>"
ROOT_CHAR = "_ROOT_CHAR"
END = "_END"
END_POS = "_END_POS"
END_TYPE = "_<END>"
END_CHAR = "_END_CHAR"
def process_corpus(corpus, until = None):
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
sentences, words, depends, labels, pos, chars = [], [], [], [], [], []
temp_sentence, temp_word, temp_depend, temp_label, temp_pos = [], [], [], [], []
first_time = True
for sentence in corpus:
try:
if len(sentence):
if sentence[0] == '#':
continue
if first_time:
print(sentence)
first_time = False
sentence = sentence.split('\t')
for c in sentence[1]:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if sentence[7] not in tag2idx:
tag2idx[sentence[7]] = tag_idx
tag_idx += 1
sentence[1] = preprocessing(sentence[1])
if sentence[1] not in word2idx:
word2idx[sentence[1]] = word_idx
word_idx += 1
temp_word.append(word2idx[sentence[1]])
temp_depend.append(int(sentence[6]))
temp_label.append(tag2idx[sentence[7]])
temp_sentence.append(sentence[1])
temp_pos.append(sentence[3])
else:
if len(temp_sentence) < 2 or len(temp_word) != len(temp_label):
temp_word = []
temp_depend = []
temp_label = []
temp_sentence = []
temp_pos = []
continue
words.append([word2idx['_ROOT']] + temp_word)
depends.append([0] + temp_depend)
labels.append([tag2idx['_<ROOT>']] + temp_label)
sentences.append([ROOT] + temp_sentence)
pos.append([ROOT_POS] + temp_pos)
char_ = [[char2idx['_ROOT']]]
for w in temp_sentence:
if w in char2idx:
char_.append([char2idx[w]])
else:
char_.append([char2idx[c] for c in w])
chars.append(char_)
temp_word = []
temp_depend = []
temp_label = []
temp_sentence = []
temp_pos = []
except Exception as e:
print(e, sentence)
return sentences[:-1], words[:-1], depends[:-1], labels[:-1], pos[:-1], chars[:-1]
# +
def _obtain_child_index_for_left2right(heads):
child_ids = [[] for _ in range(len(heads))]
# skip the symbolic root.
for child in range(1, len(heads)):
head = heads[child]
child_ids[head].append(child)
return child_ids
def _obtain_child_index_for_inside_out(heads):
child_ids = [[] for _ in range(len(heads))]
for head in range(len(heads)):
# first find left children inside-out
for child in reversed(range(1, head)):
if heads[child] == head:
child_ids[head].append(child)
# second find right children inside-out
for child in range(head + 1, len(heads)):
if heads[child] == head:
child_ids[head].append(child)
return child_ids
def _obtain_child_index_for_depth(heads, reverse):
def calc_depth(head):
children = child_ids[head]
max_depth = 0
for child in children:
depth = calc_depth(child)
child_with_depth[head].append((child, depth))
max_depth = max(max_depth, depth + 1)
child_with_depth[head] = sorted(child_with_depth[head], key=lambda x: x[1], reverse=reverse)
return max_depth
child_ids = _obtain_child_index_for_left2right(heads)
child_with_depth = [[] for _ in range(len(heads))]
calc_depth(0)
return [[child for child, depth in child_with_depth[head]] for head in range(len(heads))]
def _generate_stack_inputs(heads, types, prior_order):
if prior_order == 'deep_first':
child_ids = _obtain_child_index_for_depth(heads, True)
elif prior_order == 'shallow_first':
child_ids = _obtain_child_index_for_depth(heads, False)
elif prior_order == 'left2right':
child_ids = _obtain_child_index_for_left2right(heads)
elif prior_order == 'inside_out':
child_ids = _obtain_child_index_for_inside_out(heads)
else:
raise ValueError('Unknown prior order: %s' % prior_order)
stacked_heads = []
children = []
siblings = []
stacked_types = []
skip_connect = []
prev = [0 for _ in range(len(heads))]
sibs = [0 for _ in range(len(heads))]
stack = [0]
position = 1
while len(stack) > 0:
head = stack[-1]
stacked_heads.append(head)
siblings.append(sibs[head])
child_id = child_ids[head]
skip_connect.append(prev[head])
prev[head] = position
if len(child_id) == 0:
children.append(head)
sibs[head] = 0
stacked_types.append(tag2idx['PAD'])
stack.pop()
else:
child = child_id.pop(0)
children.append(child)
sibs[head] = child
stack.append(child)
stacked_types.append(types[child])
position += 1
return stacked_heads, children, siblings, stacked_types, skip_connect
# +
with open('en_ewt-ud-dev.conllu') as fopen:
dev = fopen.read().split('\n')
sentences_dev, words_dev, depends_dev, labels_dev, _, seq_dev = process_corpus(dev)
# -
stacked_heads_test, children_test, siblings_test, stacked_types_test = [], [], [], []
for i in range(len(sentences_dev)):
stacked_heads, children, siblings, stacked_types, _ = _generate_stack_inputs(depends_dev[i],
labels_dev[i], 'deep_first')
stacked_heads_test.append(stacked_heads)
children_test.append(children)
siblings_test.append(siblings)
stacked_types_test.append(stacked_types)
# +
with open('en_ewt-ud-test.conllu') as fopen:
test = fopen.read().split('\n')
sentences_test, words_test, depends_test, labels_test, _, seq_test = process_corpus(test)
# +
for i in range(len(sentences_test)):
stacked_heads, children, siblings, stacked_types, _ = _generate_stack_inputs(depends_test[i],
labels_test[i], 'deep_first')
stacked_heads_test.append(stacked_heads)
children_test.append(children)
siblings_test.append(siblings)
stacked_types_test.append(stacked_types)
sentences_test.extend(sentences_dev)
words_test.extend(words_dev)
depends_test.extend(depends_dev)
labels_test.extend(labels_dev)
seq_test.extend(seq_dev)
# +
with open('en_ewt-ud-train.conllu') as fopen:
train = fopen.read().split('\n')
sentences_train, words_train, depends_train, labels_train, _, _ = process_corpus(train)
stacked_heads_train, children_train, siblings_train, stacked_types_train = [], [], [], []
for i in range(len(sentences_train)):
stacked_heads, children, siblings, stacked_types, _ = _generate_stack_inputs(depends_train[i],
labels_train[i], 'deep_first')
stacked_heads_train.append(stacked_heads)
children_train.append(children)
siblings_train.append(siblings)
stacked_types_train.append(stacked_types)
# -
len(sentences_train), len(sentences_test)
idx2word = {v:k for k, v in word2idx.items()}
idx2tag = {v:k for k, v in tag2idx.items()}
len(idx2word)
import tensorflow as tf
# +
from enum import Enum
class PriorOrder(Enum):
DEPTH = 0
INSIDE_OUT = 1
LEFT2RIGTH = 2
class BiAAttention:
def __init__(self, input_size_encoder, input_size_decoder, num_labels):
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.W_d = tf.get_variable("W_d", shape=[self.num_labels, self.input_size_decoder],
initializer=tf.contrib.layers.xavier_initializer())
self.W_e = tf.get_variable("W_e", shape=[self.num_labels, self.input_size_encoder],
initializer=tf.contrib.layers.xavier_initializer())
self.U = tf.get_variable("U", shape=[self.num_labels, self.input_size_decoder, self.input_size_encoder],
initializer=tf.contrib.layers.xavier_initializer())
def forward(self, input_d, input_e, mask_d=None, mask_e=None):
batch = tf.shape(input_d)[0]
length_decoder = tf.shape(input_d)[1]
length_encoder = tf.shape(input_e)[1]
out_d = tf.expand_dims(tf.matmul(self.W_d, tf.transpose(input_d, [0, 2, 1])), 3)
out_e = tf.expand_dims(tf.matmul(self.W_e, tf.transpose(input_e, [0, 2, 1])), 2)
output = tf.matmul(tf.expand_dims(input_d, 1), self.U)
output = tf.matmul(output, tf.transpose(tf.expand_dims(input_e, 1), [0, 1, 3, 2]))
output = output + out_d + out_e
if mask_d is not None:
d = tf.expand_dims(tf.expand_dims(mask_d, 1), 3)
e = tf.expand_dims(tf.expand_dims(mask_e, 1), 2)
output = output * d * e
return output
class BiLinear:
def __init__(self, left_features, right_features, out_features):
self.left_features = left_features
self.right_features = right_features
self.out_features = out_features
self.U = tf.get_variable("U-bi", shape=[out_features, left_features, right_features],
initializer=tf.contrib.layers.xavier_initializer())
self.W_l = tf.get_variable("Wl", shape=[out_features, left_features],
initializer=tf.contrib.layers.xavier_initializer())
self.W_r = tf.get_variable("Wr", shape=[out_features, right_features],
initializer=tf.contrib.layers.xavier_initializer())
def forward(self, input_left, input_right):
left_size = tf.shape(input_left)
output_shape = tf.concat([left_size[:-1], [self.out_features]], axis = 0)
batch = tf.cast(tf.reduce_prod(left_size[:-1]), tf.int32)
input_left = tf.reshape(input_left, (batch, self.left_features))
input_right = tf.reshape(input_right, (batch, self.right_features))
tiled = tf.tile(tf.expand_dims(input_left, axis = 0), (self.out_features,1,1))
output = tf.transpose(tf.reduce_sum(tf.matmul(tiled, self.U), axis = 2))
output = output + tf.matmul(input_left, tf.transpose(self.W_l))\
+ tf.matmul(input_right, tf.transpose(self.W_r))
return tf.reshape(output, output_shape)
class StackPointer:
def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size,
input_size_decoder, hidden_size, layers,
num_labels, arc_space, type_space):
def cells(size, reuse=False):
return tf.nn.rnn_cell.LSTMCell(size,
initializer=tf.orthogonal_initializer(),reuse=reuse,
state_is_tuple=False)
self.word_embedd = tf.Variable(tf.random_uniform([num_words, word_dim], -1, 1))
self.char_embedd = tf.Variable(tf.random_uniform([num_chars, char_dim], -1, 1))
self.conv1d = tf.layers.Conv1D(num_filters, kernel_size, 1, padding='VALID')
self.num_labels = num_labels
self.prior_order = PriorOrder.DEPTH
self.char_dim = char_dim
self.layers = layers
self.encoder = tf.nn.rnn_cell.MultiRNNCell([cells(hidden_size) for _ in range(layers)],
state_is_tuple=False)
self.encoder_char = tf.nn.rnn_cell.MultiRNNCell([cells(hidden_size) for _ in range(layers)],
state_is_tuple=False)
self.decoder = tf.nn.rnn_cell.MultiRNNCell([cells(hidden_size) for _ in range(layers)],
state_is_tuple=False)
self.hidden_size = hidden_size
self.arc_space = arc_space
self.src_dense = tf.layers.Dense(hidden_size)
self.hx_dense = tf.layers.Dense(hidden_size)
self.arc_h = tf.layers.Dense(arc_space)
self.arc_c = tf.layers.Dense(arc_space)
self.attention = BiAAttention(arc_space, arc_space, 1)
self.type_h = tf.layers.Dense(type_space)
self.type_c = tf.layers.Dense(type_space)
self.bilinear = BiLinear(type_space, type_space, self.num_labels)
def encode(self, input_word, input_char):
word = tf.nn.embedding_lookup(self.word_embedd, input_word)
char = tf.nn.embedding_lookup(self.char_embedd, input_char)
s = tf.shape(char)
char = tf.reshape(
char, shape = [s[0] * s[1], s[-2], self.char_dim]
)
output, _ = tf.nn.dynamic_rnn(self.encoder_char, char, dtype = tf.float32,
scope = 'encoder-char')
output = tf.reshape(
output[:, -1], shape = [s[0], s[1], self.hidden_size]
)
word_embedded = tf.concat([word, output], axis = -1)
output, hn = tf.nn.dynamic_rnn(self.encoder, word_embedded, dtype = tf.float32,
scope = 'encoder')
return output, hn
def decode(self, output_encoder, heads, heads_stack, siblings, hn):
batch = tf.shape(output_encoder)[0]
batch_index = tf.range(0, batch)
t = tf.transpose(heads_stack)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(t, axis = 0)], axis = 0))
src_encoding = tf.gather_nd(output_encoder, concatenated)
mask_sibs = tf.expand_dims(tf.cast(tf.not_equal(siblings, 0), tf.float32), axis = 2)
t = tf.transpose(siblings)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(t, axis = 0)], axis = 0))
output_enc_sibling = tf.gather_nd(output_encoder, concatenated) * mask_sibs
src_encoding = src_encoding + output_enc_sibling
t = tf.transpose(heads_stack)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(t, axis = 0)],axis = 0))
g = tf.transpose(tf.gather_nd(heads, concatenated))
broadcasted = tf.broadcast_to(batch_index, tf.shape(g))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(g, axis = 0)],axis = 0))
output_enc_gpar = tf.gather_nd(output_encoder, concatenated)
src_encoding = src_encoding + output_enc_gpar
src_encoding = tf.nn.elu(self.src_dense(src_encoding))
output, hn = tf.nn.dynamic_rnn(self.decoder, src_encoding, dtype = tf.float32,
initial_state = hn,
scope = 'decoder')
return output, hn
def loss(self, input_word, input_char,
heads, stacked_heads, children, siblings, stacked_types,
mask_e, mask_d,
label_smooth = 1.0):
output_enc, hn_enc = self.encode(input_word, input_char)
arc_c = tf.nn.elu(self.arc_c(output_enc))
type_c = tf.nn.elu(self.type_c(output_enc))
output_dec, _ = self.decode(output_enc, heads, stacked_heads, siblings, hn_enc)
arc_h = tf.nn.elu(self.arc_h(output_dec))
type_h = tf.nn.elu(self.type_h(output_dec))
max_len_d = tf.shape(arc_h)[1]
out_arc = tf.squeeze(self.attention.forward(arc_h, arc_c, mask_d=mask_d, mask_e=mask_e), axis = 1)
batch = tf.shape(arc_c)[0]
max_len_e = tf.shape(arc_c)[1]
batch_index = tf.range(0, batch)
t = tf.transpose(children)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(t, axis = 0)], axis = 0))
type_c = tf.gather_nd(type_c, concatenated)
out_type = self.bilinear.forward(type_h, type_c)
print(out_arc.shape,out_type.shape)
minus_inf = -1e8
minus_mask_d = (1 - mask_d) * minus_inf
minus_mask_e = (1 - mask_e) * minus_inf
out_arc = out_arc + tf.expand_dims(minus_mask_d, 2) + tf.expand_dims(minus_mask_e, 1)
loss_arc = tf.nn.log_softmax(out_arc, axis = 2)
loss_type = tf.nn.log_softmax(out_type, axis = 2)
coverage = tf.cumsum(tf.exp(loss_arc), axis = 1)
mask_leaf = tf.cast(tf.equal(children, stacked_heads), tf.float32)
mask_non_leaf = (1.0 - mask_leaf)
mask_d_2 = tf.expand_dims(mask_d, 2)
mask_e_1 = tf.expand_dims(mask_e, 1)
loss_arc = loss_arc * mask_d_2 * mask_e_1
coverage = coverage * mask_d_2 * mask_e_1
loss_type = loss_type * mask_d_2
mask_leaf = mask_leaf * mask_d
mask_non_leaf = mask_non_leaf * mask_d
num_leaf = tf.reduce_sum(mask_leaf)
num_non_leaf = tf.reduce_sum(mask_non_leaf)
head_index = tf.tile(tf.expand_dims(tf.range(0, max_len_d), 1), [1, batch])
t = tf.transpose(children)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(head_index, axis = 0),
tf.expand_dims(t, axis = 0)], axis = 0))
loss_arc = tf.gather_nd(loss_arc, concatenated)
t = tf.transpose(stacked_types)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(head_index, axis = 0),
tf.expand_dims(t, axis = 0)], axis = 0))
loss_type = tf.gather_nd(loss_type, concatenated)
loss_arc_leaf = loss_arc * mask_leaf
loss_arc_non_leaf = loss_arc * mask_non_leaf
loss_type_leaf = loss_type * mask_leaf
loss_type_non_leaf = loss_type * mask_non_leaf
loss_cov = tf.clip_by_value(coverage - 2.0, 0.0, 100.0)
return (tf.reduce_sum(-loss_arc_leaf) / num_leaf,
tf.reduce_sum(-loss_arc_non_leaf) / num_non_leaf,
tf.reduce_sum(-loss_type_leaf) / num_leaf,
tf.reduce_sum(-loss_type_non_leaf) / num_non_leaf,
tf.reduce_sum(loss_cov) / (num_leaf + num_non_leaf),
num_leaf,
num_non_leaf)
class Model:
def __init__(self, learning_rate = 1e-3, cov = 0.0):
self.stackpointer = StackPointer(word_dim = 128,
num_words = len(word2idx),
char_dim = 128,
num_chars = len(char2idx),
num_filters = 128,
kernel_size = 3,
input_size_decoder = 256,
hidden_size = 256,
layers = 1,
num_labels = len(tag2idx),
arc_space = 128,
type_space = 128)
self.words = tf.placeholder(tf.int32, (None, None))
self.chars = tf.placeholder(tf.int32, (None, None, None))
self.heads = tf.placeholder(tf.int32, (None, None))
self.stacked_heads = tf.placeholder(tf.int32, (None, None))
self.siblings = tf.placeholder(tf.int32, (None, None))
self.childrens = tf.placeholder(tf.int32, (None, None))
self.stacked_types = tf.placeholder(tf.int32, (None, None))
self.mask_e = tf.placeholder(tf.float32, (None, None))
self.mask_d = tf.placeholder(tf.float32, (None, None))
loss_arc_leaf, loss_arc_non_leaf, \
loss_type_leaf, loss_type_non_leaf, \
loss_cov, num_leaf, num_non_leaf = self.stackpointer.loss(self.words, self.chars, self.heads,
self.stacked_heads, self.childrens,
self.siblings, self.stacked_types,
self.mask_e, self.mask_d)
loss_arc = loss_arc_leaf + loss_arc_non_leaf
loss_type = loss_type_leaf + loss_type_non_leaf
self.cost = loss_arc + loss_type + cov * loss_cov
self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)
self.encode_output, self.encode_hidden = self.stackpointer.encode(self.words, self.chars)
self.encode_arc_c = tf.nn.elu(self.stackpointer.arc_c(self.encode_output))
self.type_c = tf.nn.elu(self.stackpointer.type_c(self.encode_output))
self.src_encoding = tf.placeholder(tf.float32, (None, self.stackpointer.hidden_size))
self.arc_c = tf.placeholder(tf.float32, (None, self.stackpointer.arc_space))
self.hx = tf.placeholder(tf.float32, (None,
self.stackpointer.hidden_size * 2 * self.stackpointer.layers))
src_encoding = tf.nn.elu(self.stackpointer.src_dense(self.src_encoding))
output_dec, hx = self.stackpointer.decoder(src_encoding, self.hx)
arc_h = tf.nn.elu(self.stackpointer.arc_h(tf.expand_dims(output_dec, axis = 1)))
type_h = tf.nn.elu(self.stackpointer.type_h(output_dec))
out_arc = self.stackpointer.attention.forward(arc_h, tf.expand_dims(self.arc_c, 0))
out_arc = tf.squeeze(tf.squeeze(out_arc, axis = 1), axis = 1)
self.hyp_scores = tf.nn.log_softmax(out_arc, axis = 1)
self.type_h = type_h
self.decode_hidden = hx
self.holder_type_h = tf.placeholder(tf.float32, (None, self.stackpointer.arc_space))
self.holder_type_c = tf.placeholder(tf.float32, (None, self.stackpointer.arc_space))
out_type = self.stackpointer.bilinear.forward(self.holder_type_h, self.holder_type_c)
self.hyp_type_scores = tf.nn.log_softmax(out_type, axis = 1)
# -
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model()
sess.run(tf.global_variables_initializer())
# +
train_X = words_train
train_Y = labels_train
train_depends = depends_train
train_char = sentences_train
test_X = words_test
test_Y = labels_test
test_depends = depends_test
test_char = sentences_test
# +
prior_order = model.stackpointer.prior_order
def decode_sentence(output_enc, arc_c, type_c, hx, beam, length, ordered, leading_symbolic):
def valid_hyp(base_id, child_id, head):
if constraints[base_id, child_id]:
return False
elif not ordered or prior_order == PriorOrder.DEPTH or child_orders[base_id, head] == 0:
return True
elif prior_order == PriorOrder.LEFT2RIGTH:
return child_id > child_orders[base_id, head]
else:
if child_id < head:
return child_id < child_orders[base_id, head] < head
else:
return child_id > child_orders[base_id, head]
length = output_enc.shape[0] if length is None else length
stacked_heads = [[0] for _ in range(beam)]
grand_parents = [[0] for _ in range(beam)]
siblings = [[0] for _ in range(beam)]
children = np.zeros((beam, 2 * length - 1))
stacked_types = np.zeros((beam, 2 * length - 1))
children = np.zeros((beam, 2 * length - 1))
stacked_types = np.zeros((beam, 2 * length - 1))
hypothesis_scores = [0]
constraints = np.zeros([beam, length], dtype=np.bool)
constraints[:, 0] = True
child_orders = np.zeros([beam, length], dtype=np.int64)
new_stacked_heads = [[] for _ in range(beam)]
new_grand_parents = [[] for _ in range(beam)]
new_siblings = [[] for _ in range(beam)]
new_skip_connects = [[] for _ in range(beam)]
new_children = np.zeros((beam, 2 * length - 1))
new_stacked_types = np.zeros((beam, 2 * length - 1))
num_hyp = 1
num_step = 2 * length - 1
for t in range(num_step):
heads = np.array([stacked_heads[i][-1] for i in range(num_hyp)])
gpars = np.array([grand_parents[i][-1] for i in range(num_hyp)])
sibs = np.array([siblings[i].pop() for i in range(num_hyp)])
src_encoding = output_enc[heads]
mask_sibs = np.expand_dims((np.array(sibs) != 0).astype(np.float32), axis = 1)
output_enc_sibling = output_enc[sibs] * mask_sibs
src_encoding = src_encoding + output_enc_sibling
output_enc_gpar = output_enc[gpars]
src_encoding = src_encoding + output_enc_gpar
hyp_scores, type_h, hx = sess.run([model.hyp_scores, model.type_h, model.decode_hidden],
feed_dict = {model.src_encoding: src_encoding,
model.arc_c: arc_c,
model.hx: hx})
new_hypothesis_scores = np.expand_dims(hypothesis_scores[:num_hyp], axis = 1) + hyp_scores
new_hypothesis_scores = new_hypothesis_scores.reshape((-1))
hyp_index = np.argsort(new_hypothesis_scores)[::-1]
new_hypothesis_scores = np.sort(new_hypothesis_scores)[::-1]
base_index = (hyp_index // length)
child_index = hyp_index % length
cc = 0
ids = []
new_constraints = np.zeros([beam, length], dtype=np.bool)
new_child_orders = np.zeros([beam, length], dtype=np.int64)
for id_ in range(num_hyp * length):
base_id = base_index[id_]
if base_id:
ids.append(id_)
continue
child_id = child_index[id_]
head = heads[base_id]
new_hyp_score = new_hypothesis_scores[id_]
if child_id == head:
if head != 0 or t + 1 == num_step:
new_constraints[cc] = constraints[base_id]
new_child_orders[cc] = child_orders[base_id]
new_stacked_heads[cc] = [stacked_heads[base_id][i] for i in range(len(stacked_heads[base_id]))]
new_stacked_heads[cc].pop()
new_grand_parents[cc] = [grand_parents[base_id][i] for i in range(len(grand_parents[base_id]))]
new_grand_parents[cc].pop()
new_siblings[cc] = [siblings[base_id][i] for i in range(len(siblings[base_id]))]
new_children[cc] = children[base_id]
new_children[cc, t] = child_id
hypothesis_scores[cc] = new_hyp_score
ids.append(id_)
cc += 1
elif valid_hyp(base_id, child_id, head):
new_constraints[cc] = constraints[base_id]
new_constraints[cc, child_id] = True
new_child_orders[cc] = child_orders[base_id]
new_child_orders[cc, head] = child_id
new_stacked_heads[cc] = [stacked_heads[base_id][i] for i in range(len(stacked_heads[base_id]))]
new_stacked_heads[cc].append(child_id)
new_grand_parents[cc] = [grand_parents[base_id][i] for i in range(len(grand_parents[base_id]))]
new_grand_parents[cc].append(head)
new_siblings[cc] = [siblings[base_id][i] for i in range(len(siblings[base_id]))]
new_siblings[cc].append(child_id)
new_siblings[cc].append(0)
new_children[cc] = children[base_id]
new_children[cc, t] = child_id
hypothesis_scores[cc] = new_hyp_score
ids.append(id_)
cc += 1
if cc == beam:
break
num_hyp = len(ids)
if num_hyp == 0:
return None
else:
index = np.array(ids)
base_index = base_index[index]
child_index = child_index[index]
hyp_type_scores = sess.run(model.hyp_type_scores,
feed_dict = {
model.holder_type_h: type_h[base_index],
model.holder_type_c: type_c[child_index]
})
hyp_types = np.argmax(hyp_type_scores, axis = 1)
hyp_type_scores = np.max(hyp_type_scores, axis = 1)
hypothesis_scores[:num_hyp] = hypothesis_scores[:num_hyp] + hyp_type_scores
for i in range(num_hyp):
base_id = base_index[i]
new_stacked_types[i] = stacked_types[base_id]
new_stacked_types[i, t] = hyp_types[i]
stacked_heads = [[new_stacked_heads[i][j] for j in range(len(new_stacked_heads[i]))] for i in range(num_hyp)]
grand_parents = [[new_grand_parents[i][j] for j in range(len(new_grand_parents[i]))] for i in range(num_hyp)]
siblings = [[new_siblings[i][j] for j in range(len(new_siblings[i]))] for i in range(num_hyp)]
constraints = new_constraints
child_orders = new_child_orders
children = np.copy(new_children)
stacked_types = np.copy(new_stacked_types)
children = children[0].astype(np.int32)
stacked_types = stacked_types[0].astype(np.int32)
heads = np.zeros(length, dtype=np.int32)
types = np.zeros(length, dtype=np.int32)
stack = [0]
for i in range(num_step):
head = stack[-1]
child = children[i]
type_ = stacked_types[i]
if child != head:
heads[child] = head
types[child] = type_
stack.append(child)
else:
stacked_types[i] = 0
stack.pop()
return heads, types, length, children, stacked_types
def decode(input_word, input_char, length = None, beam = 1, leading_symbolic=0, ordered=True):
arc_c, type_c, output, hn = sess.run([model.encode_arc_c, model.type_c,
model.encode_output, model.encode_hidden],
feed_dict = {model.words: input_word, model.chars: input_char})
batch, max_len_e, _ = output.shape
heads = np.zeros([batch, max_len_e], dtype=np.int32)
types = np.zeros([batch, max_len_e], dtype=np.int32)
children = np.zeros([batch, 2 * max_len_e - 1], dtype=np.int32)
stack_types = np.zeros([batch, 2 * max_len_e - 1], dtype=np.int32)
for b in range(batch):
sent_len = None if length is None else length[b]
preds = decode_sentence(output[b], arc_c[b], type_c[b], [hn[b]],
beam, sent_len, ordered, leading_symbolic)
if preds is None:
preds = decode_sentence(output[b], arc_c[b], type_c[b], [hn[b]], beam,
sent_len, False, leading_symbolic)
hids, tids, sent_len, chids, stids = preds
heads[b, :sent_len] = hids
types[b, :sent_len] = tids
children[b, :2 * sent_len - 1] = chids
stack_types[b, :2 * sent_len - 1] = stids
return heads, types, children, stack_types
# -
def generate_char_seq(batch, UNK = 2):
maxlen_c = max([len(k) for k in batch])
x = [[len(i) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((len(batch),maxlen_c,maxlen),dtype=np.int32)
for i in range(len(batch)):
for k in range(len(batch[i])):
for no, c in enumerate(batch[i][k]):
temp[i,k,-1-no] = char2idx.get(c, UNK)
return temp
# +
from tensorflow.keras.preprocessing.sequence import pad_sequences
batch_x = train_X[:5]
batch_x = pad_sequences(batch_x,padding='post')
batch_char = train_char[:5]
batch_char = generate_char_seq(batch_char)
batch_y = train_Y[:5]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = train_depends[:5]
batch_depends = pad_sequences(batch_depends,padding='post')
batch_stacked_heads = stacked_heads_train[:5]
batch_stacked_heads = pad_sequences(batch_stacked_heads,padding='post')
batch_children = children_train[:5]
batch_children = pad_sequences(batch_children,padding='post')
batch_siblings = siblings_train[:5]
batch_siblings = pad_sequences(batch_siblings,padding='post')
batch_stacked_types = stacked_types_train[:5]
batch_stacked_types = pad_sequences(batch_stacked_types,padding='post')
batch_e = np.zeros(batch_x.shape)
batch_d = np.zeros(batch_stacked_heads.shape)
nonzero = np.count_nonzero(batch_x, axis = 1)
for no, i in enumerate(nonzero):
batch_e[no,:i] = 1.0
for no, i in enumerate(nonzero * 2 - 1):
batch_d[no,:i] = 1.0
batch_x.shape, batch_stacked_heads.shape
# -
feed_dict = {model.words: batch_x,
model.chars: batch_char,
model.heads: batch_depends,
model.stacked_heads: batch_stacked_heads,
model.childrens: batch_children,
model.siblings: batch_siblings,
model.stacked_types: batch_stacked_types,
model.mask_e: batch_e,
model.mask_d: batch_d}
sess.run(model.cost, feed_dict = feed_dict)
# %%time
decode(batch_x, batch_char)
# +
from tqdm import tqdm
batch_size = 32
epoch = 15
for e in range(epoch):
test_loss, train_loss = [], []
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(train_X))
batch_x = train_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_char = train_char[i: index]
batch_char = generate_char_seq(batch_char)
batch_y = train_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = train_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
batch_stacked_heads = stacked_heads_train[i: index]
batch_stacked_heads = pad_sequences(batch_stacked_heads,padding='post')
batch_children = children_train[i: index]
batch_children = pad_sequences(batch_children,padding='post')
batch_siblings = siblings_train[i: index]
batch_siblings = pad_sequences(batch_siblings,padding='post')
batch_stacked_types = stacked_types_train[i: index]
batch_stacked_types = pad_sequences(batch_stacked_types,padding='post')
batch_e = np.zeros(batch_x.shape)
batch_d = np.zeros(batch_stacked_heads.shape)
nonzero = np.count_nonzero(batch_x, axis = 1)
for no, i in enumerate(nonzero):
batch_e[no,:i] = 1.0
for no, i in enumerate(nonzero * 2 - 1):
batch_d[no,:i] = 1.0
feed_dict = {model.words: batch_x,
model.chars: batch_char,
model.heads: batch_depends,
model.stacked_heads: batch_stacked_heads,
model.childrens: batch_children,
model.siblings: batch_siblings,
model.stacked_types: batch_stacked_types,
model.mask_e: batch_e,
model.mask_d: batch_d}
cost, _ = sess.run([model.cost, model.optimizer], feed_dict = feed_dict)
train_loss.append(cost)
pbar.set_postfix(cost = cost)
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'test minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = test_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_char = test_char[i: index]
batch_char = generate_char_seq(batch_char)
batch_y = test_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = test_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
batch_stacked_heads = stacked_heads_test[i: index]
batch_stacked_heads = pad_sequences(batch_stacked_heads,padding='post')
batch_children = children_test[i: index]
batch_children = pad_sequences(batch_children,padding='post')
batch_siblings = siblings_test[i: index]
batch_siblings = pad_sequences(batch_siblings,padding='post')
batch_stacked_types = stacked_types_test[i: index]
batch_stacked_types = pad_sequences(batch_stacked_types,padding='post')
batch_e = np.zeros(batch_x.shape)
batch_d = np.zeros(batch_stacked_heads.shape)
nonzero = np.count_nonzero(batch_x, axis = 1)
for no, i in enumerate(nonzero):
batch_e[no,:i] = 1.0
for no, i in enumerate(nonzero * 2 - 1):
batch_d[no,:i] = 1.0
feed_dict = {model.words: batch_x,
model.chars: batch_char,
model.heads: batch_depends,
model.stacked_heads: batch_stacked_heads,
model.childrens: batch_children,
model.siblings: batch_siblings,
model.stacked_types: batch_stacked_types,
model.mask_e: batch_e,
model.mask_d: batch_d}
cost = sess.run(model.cost, feed_dict = feed_dict)
test_loss.append(cost)
pbar.set_postfix(cost = cost)
print(
'epoch: %d, training loss: %f, valid loss: %f\n'
% (e, np.mean(train_loss), np.mean(test_loss)))
# -
def evaluate(heads_pred, types_pred, heads, types, lengths,
symbolic_root=False, symbolic_end=False):
batch_size, _ = heads_pred.shape
ucorr = 0.
lcorr = 0.
total = 0.
ucomplete_match = 0.
lcomplete_match = 0.
corr_root = 0.
total_root = 0.
start = 1 if symbolic_root else 0
end = 1 if symbolic_end else 0
for i in range(batch_size):
ucm = 1.
lcm = 1.
for j in range(start, lengths[i] - end):
total += 1
if heads[i, j] == heads_pred[i, j]:
ucorr += 1
if types[i, j] == types_pred[i, j]:
lcorr += 1
else:
lcm = 0
else:
ucm = 0
lcm = 0
if heads[i, j] == 0:
total_root += 1
corr_root += 1 if heads_pred[i, j] == 0 else 0
ucomplete_match += ucm
lcomplete_match += lcm
return ucorr / total, lcorr / total, corr_root / total_root
heads, types, _, _ = decode(batch_x, batch_char)
arc_accuracy, type_accuracy, root_accuracy = evaluate(heads, types, batch_depends, batch_y,
np.count_nonzero(batch_x, axis = 1))
arc_accuracy, type_accuracy, root_accuracy
# +
arcs, types, roots = [], [], []
for i in range(0, len(test_X), 5):
index = min(i + 5, len(test_X))
batch_x = test_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_char = test_char[i: index]
batch_char = generate_char_seq(batch_char)
batch_y = test_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = test_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
heads, tags_seq, _, _ = decode(batch_x, batch_char)
arc_accuracy, type_accuracy, root_accuracy = evaluate(heads, tags_seq, batch_depends, batch_y,
np.count_nonzero(batch_x, axis = 1))
arcs.append(arc_accuracy)
types.append(type_accuracy)
roots.append(root_accuracy)
# -
print('arc accuracy:', np.mean(arcs))
print('types accuracy:', np.mean(types))
print('root accuracy:', np.mean(roots))
| dependency-parser/7.stackpointer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pwd
f = open("./test_code/test_file.txt")
f.read()
f.read()
f.seek(0)
print(f.read())
f.seek(0)
f.readlines()
# +
# #%%writefile is a magic function only valid in Jupyter Notebook
#The function is used to write a file quickly.
# -
for line in open("new.txt") :
print(line)
# %%writefile ./test_code/new_2.txt
First line
Second line
Third line
for line in open("./test_code/new.txt") :
print(line)
for line in open("./test_code/new_2.txt") :
print(line)
pwd
f.close()
with open("./test_code/new_2.txt") as my_new_file :
contents = my_new_file.readlines()
contents
with open("./test_code/new_3_write_by_script.txt", mode='r') as my_file :
content = my_file.readlines()
content
with open("./test_code/new_3_write_by_script.txt", mode='r') as my_file :
print(my_file.read())
with open("./test_code/new_3_write_by_script.txt", mode='a') as my_file :
my_file.write("Fifth Line\n")
with open("./test_code/new_3_write_by_script.txt", mode='r') as my_file :
print(my_file.read())
with open("./test_code/new_5.txt", mode='w') as my_file :
my_file.write("I CREATED THIS BY <NAME>!\n")
with open("./test_code/new_5.txt", mode='r') as my_file :
print(my_file.read())
| Files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Introduction**
#
# One of the most controversial issues in the U.S. educational system is the efficacy of standardized tests, and whether they're unfair to certain groups. The SAT, or Scholastic Aptitude Test, is an exam that U.S. high school students take before applying to college. Colleges take the test scores into account when deciding who to admit, so it's fairly important to perform well on it.
#
# The test consists of three sections, each of which has 800 possible points. The combined score is out of 2,400 possible points (while this number has changed a few times, the data set for our project is based on 2,400 total points). Organizations often rank high schools by their average SAT scores. The scores are also considered a measure of overall school district quality.
#
# **Some interrelated datasets we'll be using for our analysis**:
#
# [SAT scores by school](https://data.cityofnewyork.us/Education/SAT-Results/f9bf-2cp4) - SAT scores for each high school in New York City
# [School attendance](https://data.cityofnewyork.us/Education/School-Attendance-and-Enrollment-Statistics-by-Dis/7z8d-msnt) - Attendance information for each school in New York City
# [Class size](https://data.cityofnewyork.us/Education/2010-2011-Class-Size-School-level-detail/urz7-pzb3) - Information on class size for each school
# [AP test results](https://data.cityofnewyork.us/Education/AP-College-Board-2010-School-Level-Results/itfs-ms3e) - Advanced Placement (AP) exam results for each high school (passing an optional AP exam in a particular subject can earn a student college credit in that subject)
# [Graduation outcomes](https://data.cityofnewyork.us/Education/Graduation-Outcomes-Classes-Of-2005-2010-School-Le/vh2h-md7a) - The percentage of students who graduated, and other outcome information
# [Demographics](https://data.cityofnewyork.us/Education/School-Demographics-and-Accountability-Snapshot-20/ihfw-zy9j) - Demographic information for each school
# [School survey](https://data.cityofnewyork.us/Education/NYC-School-Survey-2011/mnz3-dyi8) - Surveys of parents, teachers, and students at each school
#
# **Notable aspects about the datasets**:
# * Only high school students take the SAT, so we'll want to focus on high schools.
# * New York City is made up of five boroughs, which are essentially distinct regions.
# * New York City schools fall within several different school districts, each of which can contains dozens of schools.
# * Our data sets include several different types of schools. We'll need to clean them so that we can focus on high schools only.
# * Each school in New York City has a unique code called a DBN, or district borough number.
# * Aggregating data by district will allow us to use the district mapping data to plot district-by-district differences.
# ## Import libraries and data
# %autosave 10
import pandas as pd
import numpy as np
import re
import warnings
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
# %matplotlib inline
# Mapping datafile names to location
# +
data_files = [
"ap_2010.csv",
"class_size.csv",
"demographics.csv",
"graduation.csv",
"hs_directory.csv",
"sat_results.csv"
]
data_loc = {}
pattern = r'([\w_]+).csv'
for x in data_files:
y = re.findall(pattern, x)[0]
data_loc[y] = 'nyc_highschool_data/schools/'+x
# -
# Loading the data into dictionary
data = {}
for key,value in data_loc.items():
data[key] = pd.read_csv("{}".format(value))
# Stripping white-spaces from column names of each dataset
for key in data.keys():
data[key].columns = data[key].columns.str.strip()
data['sat_results'].head(5)
# **Observations**:
#
# * The DBN appears to be a unique ID for each school.
# * We can tell from the first few rows of names that we only have data about high schools.
# * There's only a single row for each high school, so each DBN is unique in the SAT data.
#
# We may eventually want to combine the three columns that contain SAT scores -- SAT Critical Reading Avg. Score, SAT Math Avg. Score, and SAT Writing Avg. Score -- into a single column to make the scores easier to analyze.
# Inspecting all datasets available
for key,val in data.items():
print(data[key].head(5))
# **Observations**:
#
# * Each dataset has the column DBN to interrelate them with one another
# * Some of the data sets appear to contain multiple rows for each school (because the rows have duplicate DBN values). That means we’ll have to do some preprocessing to ensure that each DBN is unique within each data set.
# ## Reading the survey information
all_survey = pd.read_csv("nyc_highschool_data/schools/survey_all.txt", delimiter="\t", encoding='windows-1252')
d75_survey = pd.read_csv("nyc_highschool_data/schools/survey_d75.txt", delimiter="\t", encoding='windows-1252')
survey = pd.concat([all_survey, d75_survey], axis=0)
print(survey.head())
# **Observations**:
#
# * There are over 2000 columns, nearly all of which we don't need. We'll have to filter the data to remove the unnecessary ones. Working with fewer columns will make it easier to print the dataframe out and find correlations within it.
# * The survey data has a dbn column that we'll want to convert to uppercase (DBN). The conversion will make the column name consistent with the other data sets.
#
# Using the publicly available [data dictionary](https://data.cityofnewyork.us/Education/NYC-School-Survey-2011/mnz3-dyi8) to filter out the unnecessary columns
survey_fields = ["DBN", "rr_s", "rr_t", "rr_p", "N_s", "N_t", "N_p",
"saf_p_11", "com_p_11", "eng_p_11", "aca_p_11",
"saf_t_11", "com_t_11", "eng_t_11", "aca_t_11",
"saf_s_11", "com_s_11", "eng_s_11", "aca_s_11",
"saf_tot_11", "com_tot_11", "eng_tot_11", "aca_tot_11"]
# Before we filter columns out, we'll want to copy the data from the dbn column into a new column called DBN
survey["DBN"] = survey["dbn"]
# Filtering the dataset
survey = survey.loc[:,survey_fields]
# Assign the dataframe survey to the key survey in the dictionary data
data['survey'] = survey
# ## Fixing interrelation
# When we explored all of the data sets, we noticed that some of them, like class_size and hs_directory, don't have a DBN column. hs_directory does have a dbn column, though, so we can just rename it.
#
# However, class_size doesn't appear to have the column at all. Here are the first few rows of the data set:
data['class_size'].head(5)
# From looking at these rows, we can tell that the DBN in the sat_results data is just a combination of the CSD and SCHOOL CODE columns in the class_size data.
data['hs_directory']['DBN'] = data['hs_directory']['dbn']
# Function to pad the csd columns of a series
def pad_csd(row):
item = str(row)
if(len(item)==2):
return item
else:
return '0'+item
padded_csd= data["class_size"]["CSD"].apply(pad_csd)
data["class_size"]['DBN'] = padded_csd+data["class_size"]['SCHOOL CODE']
# Checking progress
data["class_size"].head()
# ## Generating column sat_score
# Convert the SAT Math Avg. Score, SAT Critical Reading Avg. Score, and SAT Writing Avg. Score columns into the sat_score column for ease of analysis
data['sat_results']['SAT Math Avg. Score'] = pd.to_numeric(data['sat_results']['SAT Math Avg. Score'],errors = "coerce")
data['sat_results']['SAT Critical Reading Avg. Score'] = pd.to_numeric(data['sat_results']['SAT Critical Reading Avg. Score'],errors = "coerce")
data['sat_results']['SAT Writing Avg. Score'] = pd.to_numeric(data['sat_results']['SAT Writing Avg. Score'],errors = "coerce")
# Adding up scores to sat_score column
data['sat_results']['sat_score'] = data['sat_results']['SAT Math Avg. Score']+data['sat_results']['SAT Writing Avg. Score']+data['sat_results']['SAT Critical Reading Avg. Score']
# ## Parsing the location fields
# Parsing the latitude and longitude coordinates for each school to enable us to map the schools and uncover any geographic patterns in the data.
# Function to parse through a string and return the latitide data from it
def find_lat(loc):
coords = re.findall("\(.+\)", loc)
lat = coords[0].split(",")[0].replace("(", "")
return lat
# Function to parse through a string and return the longitide data from it
def find_lon(loc):
coords = re.findall("\(.+\)", loc)
lon = coords[0].split(",")[1].replace(")", "")
return lon
data["hs_directory"]["lat"] = data["hs_directory"]["Location 1"].apply(find_lat)
data["hs_directory"]["lon"] = data["hs_directory"]["Location 1"].apply(find_lon)
data["hs_directory"]["lon"] = pd.to_numeric(data["hs_directory"]["lon"])
data["hs_directory"]["lat"] = pd.to_numeric(data["hs_directory"]["lat"])
# Progress check
data["hs_directory"].head(5)
# ## Condensation of datasets
#
# class_size, graduation, and demographics data sets are condensed so that each DBN is unique in each, to avoid problems during merging of the datasets.
# Looking at unique values in 'GRADE' column in the class_size dataset
data['class_size']['GRADE'].unique()
# Because we're dealing with high schools, we're only concerned with grades 9 through 12. That means we only want to pick rows where the value in the GRADE column is 09-12
# Looking at unique values in 'PROGRAM TYPE' column in the class_size dataset
data['class_size']['PROGRAM TYPE'].unique()
# Each school can have multiple program types. Because GEN ED is the largest category by far, let's only select rows where PROGRAM TYPE is GEN ED
# Activity:
# +
class_size = data["class_size"]
class_size = class_size.loc[class_size['GRADE'] == '09-12',:]
class_size = class_size.loc[class_size['PROGRAM TYPE'] == 'GEN ED',:]
# -
# Progress check
class_size.loc[:,["GRADE","PROGRAM TYPE"]].describe()
class_size.loc[:,"DBN"].value_counts().sort_values(ascending = False).head(5)
# As we saw when we displayed class_size on the last screen, DBN still isn't completely unique. This is due to the CORE COURSE (MS CORE and 9-12 ONLY) and CORE SUBJECT (MS CORE and 9-12 ONLY) columns
# Looking at unique values in 'CORE COURSE (MS CORE and 9-12 ONLY)' column in the class_size dataset
data['class_size']['CORE COURSE (MS CORE and 9-12 ONLY)'].unique()
# Looking at unique values in 'CORE SUBJECT (MS CORE and 9-12 ONLY)' column in the class_size dataset
data['class_size']['CORE SUBJECT (MS CORE and 9-12 ONLY)'].unique()
# Activity:
class_size = class_size.groupby('DBN').agg(numpy.mean)
class_size.reset_index(inplace = True)
data['class_size'] = class_size
# In case of the demographics dataset, the only column that prevents a given DBN from being unique is schoolyear. We only want to select rows where schoolyear is 20112012. This will give us the most recent year of data, and also match our SAT results data.
data["demographics"] = data["demographics"][data["demographics"]['schoolyear'] == 20112012]
# The Demographic and Cohort columns are what prevent DBN from being unique in the graduation dataset.
#
# A Cohort appears to refer to the year the data represents, and the Demographic appears to refer to a specific demographic group. In this case, we want to pick data from the most recent Cohort available, which is 2006. We also want data from the full cohort, so we'll only pick rows where Demographic is Total Cohort
data["graduation"] = data["graduation"][data["graduation"]["Cohort"] == "2006"]
data["graduation"] = data["graduation"][data["graduation"]["Demographic"] == "Total Cohort"]
# Convert the Advanced Placement (AP) test scores from strings to numeric values
cols = ['AP Test Takers', 'Total Exams Taken', 'Number of Exams with scores 3 4 or 5']
for item in cols:
data['ap_2010'][item] = pd.to_numeric(data['ap_2010'][item], errors = "coerce")
# ## Merging the datasets
#
# We'll merge two data sets at a time. Because this project is concerned with determing demographic factors that correlate with SAT score, we'll want to preserve as many rows as possible from sat_results while minimizing null values.
#
# Some of the data sets have a lot of missing DBN values. This makes a left join more appropriate, because we don't want to lose too many rows when we merge.
combined = data["sat_results"]
combined = combined.merge(data["ap_2010"], on="DBN", how="left")
combined = combined.merge(data["graduation"], on="DBN", how="left")
combined.shape
# Now that we've performed the left joins, we still have to merge class_size, demographics, survey, and hs_directory into combined. Because these files contain information that's more valuable to our analysis and also have fewer missing DBN values, we'll use the inner join type.
data['hs_directory'].describe()
to_merge = ["class_size", "demographics", "survey", "hs_directory"]
for m in to_merge:
combined = combined.merge(data[m], on="DBN", how="inner")
combined.shape
# ## Missing value imputation
#
# Filling the missing values fields columns of numeric datatype with column means,
# Filling the rest of the missing values with 0
combined = combined.fillna(combined.mean())
combined = combined.fillna(0)
# The school district is just the first two characters of the DBN. We can apply a function over the DBN column of combined that pulls out the first two letters and place the substring in new column named 'school_dist'
def get_first_two_chars(string):
return(string[0:2])
combined['school_dist']= combined['DBN'].apply(get_first_two_chars)
# ## Exploratory Analysis
#
# Using correlations to infer how closely related a pair column is
correlations = combined.corr()['sat_score']
correlations
# **Observations**:
#
# * total_enrollment has a strong positive correlation with sat_score. This is surprising because we'd expect smaller schools where students receive more attention to have higher scores. However, it looks like the opposite is true -- larger schools tend to do better on the SAT.
# * Both the percentage of females at a school correlate positively with SAT score, whereas the percentage of malescorrelate negatively. This could indicate that women do better on the SAT than men.
# * Teacher and student ratings of school safety correlate with sat_score
# * There is significant racial inequality in SAT scores
# * The percentage of English language learners at the school (ell_percent, frl_percent) has a strong negative correlation with SAT scores
#
# Analyzing the total_enrollement vs sat_score
# **Enrollment vs. SAT score**
combined.plot(x = "total_enrollment", y = "sat_score", kind = "scatter",
xlabel = "Total enrollments", ylabel = "SAT score",
title = "SAT score vs. Enrollment", alpha = 0.3)
# Judging from the plot we just created, it doesn't appear that there's an extremely strong correlation between the two columns.
# Filtering the data to analyse the information for schools with less than 1000 enrollments and less that 1000 SAT scores
low_enrollment = combined[combined['total_enrollment']<1000]
low_enrollment = low_enrollment[low_enrollment['sat_score']<1000]
low_enrollment['School Name']
# We observe that most of the schools that have low enrollment and low SATs have high number of english learners and therefore it is the ell_percent field that correlates strongly with the SAT score
# **English learners vs. SAT score**
combined.plot(x = 'ell_percent', y = 'sat_score', kind = 'scatter',
xlabel = "Total enrollments", ylabel = "SAT score",
title = "English learners vs. SAT score", alpha = 0.3)
# Aggregating the combined dataset by district, which will enable us to understand how ell_percent varies district-by-district instead of the unintelligibly granular school-by-school variation.
districts = combined.groupby('school_dist').agg(np.mean)
districts.reset_index(inplace = True)
# Remove DBN since it's a unique identifier, not a useful numerical value for correlation.
survey_fields.remove("DBN")
# **SAT score vs. Survey fields**
fig = plt.figure()
combined.corr().loc['sat_score', survey_fields].plot(kind = "bar",xlabel = "Survey fields",
ylabel = "SAT score", title = "SAT score vs. Survey fields")
plt.ylabel('sat_score')
# **SAT score vs. Survey field: saf_s_11**
# students's safety rating of their respective school
combined.plot(x='sat_score', y='saf_s_11', kind = "scatter", xlabel = "saf_s_11",
ylabel = "SAT score", title = "SAT score vs. Survey field: saf_s_11", alpha = 0.3)
# ### Mapping out average score by boroughs
#
# Calculating average score by school district
avg_school_dist = combined.groupby('school_dist').agg(np.mean)
longitudes = avg_school_dist['lon'].tolist()
latitudes = avg_school_dist['lat'].tolist()
# +
# AREA LEFT FOR MAP
# -
# ### Investigating racial difference in SAT scores
# **SAT score vs. Racial background**
races = ['white_per', 'asian_per', 'black_per', 'hispanic_per']
fig = plt.figure()
combined.corr().loc['sat_score', races].plot(kind = "bar", xlabel = "Racial background", ylabel = "SAT score",
title = "SAT score vs. Racial background")
plt.ylabel('sat_score')
# White and asian groups have higher correlation with SAT score whereas black and hispanic groups have negative correlations
#
# Examining SAT scores across all school with varying percentages of Hispanic students
#
#
# **SAT score vs. Hispanic student percentage**
combined.plot(x='hispanic_per', y='sat_score', kind = "scatter",
xlabel = "Hispanic background", ylabel = "SAT score",
title = "SAT score vs. Hispanic student percentage", alpha = 0.3)
# A majority of the points occupy the bottom portion of the graph, signifying that regardless of hispanic student percentage the college has lower SAT scores still, hence hispanic_per is not the only field responsible for the low SAT score
# Examing schools that have high percentage of hispanic students
high_hispanic = combined[combined['hispanic_per']>95]
high_hispanic['SCHOOL NAME']
# ### Examing significance of gender in SAT scores
gender = ['male_per', 'female_per']
fig = plt.figure()
combined.corr().loc['sat_score', gender].plot(kind = 'bar', xlabel = "Gender", ylabel = "SAT score",
title = "SAT score vs. Gender")
# Although each have very low correlation, it can be observed that females on average have higher correlation whereas males on average have lower
# **SAT score vs. Female Percentages**
combined.plot(x='female_per', y='sat_score', kind = "scatter", xlabel = "Female percentage", ylabel = "SAT score",
title = "SAT score vs. Female Percentages", alpha = 0.3)
# Schools which have more than 60% female and their SAT score being above 1700
high_female = combined[combined['female_per']>60]
high_SAT_female = high_female[high_female['sat_score']>1700]
high_SAT_female['SCHOOL NAME']
# Examing how percentage of AP test Takers affect the SAT score of an institude
combined['ap_taker_per'] = combined['AP Test Takers'] / combined['total_enrollment']
# **SAT score vs. AP Test Takers Percentage**
combined.plot(x='ap_taker_per', y='sat_score', kind = "scatter", xlabel = "AP Test Takers Percentage", ylabel = "SAT score",
title = "SAT score vs. AP Test Takers Percentage", alpha = 0.3)
combined.corr().loc['sat_score', 'ap_taker_per']
# Observing the correlation coefficient we can conclude that the number of AP Test takers doesn't affect the on average SAT score of an institute
# # To-do
#
# * Determing whether there's a correlation between class size and SAT scores
# * Figuring out which neighborhoods have the best schools
# * If we combine this information with a dataset containing property values, we could find the least expensive neighborhoods that have good schools.
# * Investigating the differences between parent, teacher, and student responses to surveys.
# * Assigning scores to schools based on sat_score and other attributes.
| Case Studies/Python/Analysing the Fairness of SATs 2012/Basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JimKing100/DS-Unit-2-Kaggle-Challenge/blob/master/Kaggle_Challenge_Assignment4e.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="MJVb6fS7521u" colab_type="code" colab={}
# Installs
# %%capture
# !pip install --upgrade category_encoders plotly
# + id="WC18XNwu6uVK" colab_type="code" outputId="ec3e5c7a-4d60-46ad-bdc5-3134e1c27ce0" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Imports
import os, sys
os.chdir('/content')
# !git init .
# !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
# !git pull origin master
# !pip install -r requirements.txt
os.chdir('module1')
# + id="x7MRA9WvoSpp" colab_type="code" colab={}
# Disable warning
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# + id="T4O_RRn17vRQ" colab_type="code" colab={}
# Imports
import pandas as pd
import numpy as np
import math
import sklearn
sklearn.__version__
# Import the models
from sklearn.linear_model import LogisticRegressionCV
from sklearn.pipeline import make_pipeline
# Import encoder and scaler and imputer
import category_encoders as ce
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
# Import random forest classifier
from sklearn.ensemble import RandomForestClassifier
# + id="bMfVT1-Z7F-b" colab_type="code" outputId="8590b21e-dc0b-4299-ecbc-f85589c47c0b" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Import, load data and split data into train, validate and test
train_features = pd.read_csv('../data/tanzania/train_features.csv')
train_labels = pd.read_csv('../data/tanzania/train_labels.csv')
test_features = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
assert train_features.shape == (59400, 40)
assert train_labels.shape == (59400, 2)
assert test_features.shape == (14358, 40)
assert sample_submission.shape == (14358, 2)
# Load initial train features and labels
from sklearn.model_selection import train_test_split
X_train = train_features
y_train = train_labels['status_group']
# Split the initial train features and labels 80% into new train and new validation
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, train_size = 0.80, test_size = 0.20,
stratify = y_train, random_state=42
)
X_train.shape, X_val.shape, y_train.shape, y_val.shape
# + id="x3pMJkj47Zfv" colab_type="code" colab={}
# Wrangle train, validate, and test sets
def wrangle(X):
# Set bins value
bins=20
# Prevent SettingWithCopyWarning
X = X.copy()
# Clean installer
X['installer'] = X['installer'].str.lower()
X['installer'] = X['installer'].str.replace('danid', 'danida')
X['installer'] = X['installer'].str.replace('disti', 'district council')
X['installer'] = X['installer'].str.replace('commu', 'community')
X['installer'] = X['installer'].str.replace('central government', 'government')
X['installer'] = X['installer'].str.replace('kkkt _ konde and dwe', 'kkkt')
X['installer'].value_counts(normalize=True)
tops = X['installer'].value_counts()[:5].index
X.loc[~X['installer'].isin(tops), 'installer'] = 'Other'
# Clean funder and bin
X['funder'] = X['funder'].str.lower()
X['funder'] = X['funder'].str[:3]
X['funder'].value_counts(normalize=True)
tops = X['funder'].value_counts()[:20].index
X.loc[~X['funder'].isin(tops), 'funder'] = 'Other'
# Use mean for gps_height missing values
X.loc[X['gps_height'] == 0, 'gps_height'] = X['gps_height'].mean()
# Bin lga
tops = X['lga'].value_counts()[:10].index
X.loc[~X['lga'].isin(tops), 'lga'] = 'Other'
# Bin ward
tops = X['ward'].value_counts()[:bins].index
X.loc[~X['ward'].isin(tops), 'ward'] = 'Other'
# Bin subvillage
tops = X_train['subvillage'].value_counts()[:10].index
X_train.loc[~X_train['subvillage'].isin(tops), 'subvillage'] = 'Other'
# Clean latitude and longitude
average_lat = X.groupby('region').latitude.mean().reset_index()
average_long = X.groupby('region').longitude.mean().reset_index()
shinyanga_lat = average_lat.loc[average_lat['region'] == 'Shinyanga', 'latitude']
shinyanga_long = average_long.loc[average_lat['region'] == 'Shinyanga', 'longitude']
X.loc[(X['region'] == 'Shinyanga') & (X['latitude'] > -1), ['latitude']] = shinyanga_lat[17]
X.loc[(X['region'] == 'Shinyanga') & (X['longitude'] == 0), ['longitude']] = shinyanga_long[17]
mwanza_lat = average_lat.loc[average_lat['region'] == 'Mwanza', 'latitude']
mwanza_long = average_long.loc[average_lat['region'] == 'Mwanza', 'longitude']
X.loc[(X['region'] == 'Mwanza') & (X['latitude'] > -1), ['latitude']] = mwanza_lat[13]
X.loc[(X['region'] == 'Mwanza') & (X['longitude'] == 0) , ['longitude']] = mwanza_long[13]
# Impute mean for tsh based on mean of source_class/basin/waterpoint_type_group
def tsh_calc(tsh, source, base, waterpoint):
if tsh == 0:
if (source, base, waterpoint) in tsh_dict:
new_tsh = tsh_dict[source, base, waterpoint]
return new_tsh
else:
return tsh
return tsh
temp = X[X['amount_tsh'] != 0].groupby(['source_class',
'basin',
'waterpoint_type_group'])['amount_tsh'].mean()
tsh_dict = dict(temp)
X['amount_tsh'] = X.apply(lambda x: tsh_calc(x['amount_tsh'], x['source_class'], x['basin'], x['waterpoint_type_group']), axis=1)
# Impute mean for the feature based on latitude and longitude
def latlong_conversion(feature, pop, long, lat):
radius = 0.1
radius_increment = 0.3
if pop <= 1:
pop_temp = pop
while pop_temp <= 1 and radius <= 2:
lat_from = lat - radius
lat_to = lat + radius
long_from = long - radius
long_to = long + radius
df = X[(X['latitude'] >= lat_from) &
(X['latitude'] <= lat_to) &
(X['longitude'] >= long_from) &
(X['longitude'] <= long_to)]
pop_temp = df[feature].mean()
if math.isnan(pop_temp):
pop_temp = pop
radius = radius + radius_increment
else:
pop_temp = pop
if pop_temp <= 1:
new_pop = X_train[feature].mean()
else:
new_pop = pop_temp
return new_pop
# Impute gps_height based on location
#X['population'] = X.apply(lambda x: latlong_conversion('population', x['gps_height'], x['longitude'], x['latitude']), axis=1)
# Impute gps_height based on location
#X['gps_height'] = X.apply(lambda x: latlong_conversion('gps_height', x['gps_height'], x['longitude'], x['latitude']), axis=1)
# quantity & quantity_group are duplicates, so drop quantity_group
X = X.drop(columns='quantity_group')
X = X.drop(columns='num_private')
# return the wrangled dataframe
return X
# + id="BseDmCdj5Cx4" colab_type="code" outputId="ca86bc38-4c3c-4bb7-e7bb-3ae89aeef0e8" colab={"base_uri": "https://localhost:8080/", "height": 179}
# Wrangle the data
X_train = wrangle(X_train)
X_val = wrangle(X_val)
# + id="GRfrHKHsFnaL" colab_type="code" colab={}
# Feature engineering
def feature_engineer(X):
# Create new feature pump_age
X['pump_age'] = 2013 - X['construction_year']
X.loc[X['pump_age'] == 2013, 'pump_age'] = 0
X.loc[X['pump_age'] == 0, 'pump_age'] = 10
# Create new feature region_district
X['region_district'] = X['region_code'].astype(str) + X['district_code'].astype(str)
return X
# + id="ck8Q3F8oGw9b" colab_type="code" colab={}
# Feature engineer the data
X_train = feature_engineer(X_train)
X_val = feature_engineer(X_val)
# + id="HOVA3GqdUC47" colab_type="code" colab={}
# Encode a feature
def encode_feature(X, y, str):
X['status_group'] = y
X.groupby(str)['status_group'].value_counts(normalize=True)
X['functional']= (X['status_group'] == 'functional').astype(int)
X[['status_group', 'functional']]
return X
# + id="mEfqrCtxWhiZ" colab_type="code" colab={}
# Encode all the categorical features
train = X_train.copy()
train = encode_feature(train, y_train, 'quantity')
train = encode_feature(train, y_train, 'waterpoint_type')
train = encode_feature(train, y_train, 'extraction_type')
train = encode_feature(train, y_train, 'installer')
train = encode_feature(train, y_train, 'funder')
train = encode_feature(train, y_train, 'water_quality')
train = encode_feature(train, y_train, 'basin')
train = encode_feature(train, y_train, 'region')
train = encode_feature(train, y_train, 'payment')
train = encode_feature(train, y_train, 'source')
train = encode_feature(train, y_train, 'lga')
train = encode_feature(train, y_train, 'ward')
train = encode_feature(train, y_train, 'scheme_management')
train = encode_feature(train, y_train, 'management')
train = encode_feature(train, y_train, 'region_district')
train = encode_feature(train, y_train, 'subvillage')
# + id="O8Y1U6xKHiGW" colab_type="code" outputId="2dc7494e-f1f6-4a1e-9e21-3bf1c1d51c31" colab={"base_uri": "https://localhost:8080/", "height": 165}
# use quantity feature and the numerical features but drop id
categorical_features = ['quantity', 'waterpoint_type', 'extraction_type', 'installer',
'funder', 'water_quality', 'basin', 'region', 'payment',
'source', 'lga', 'ward', 'scheme_management', 'management',
'region_district', 'subvillage']
numeric_features = X_train.select_dtypes('number').columns.drop('id').tolist()
features = categorical_features + numeric_features
# make subsets using the quantity feature all numeric features except id
X_train = X_train[features]
X_val = X_val[features]
# Create the logistic regression pipeline
pipeline = make_pipeline (
ce.OneHotEncoder(use_cat_names=True),
StandardScaler(),
LogisticRegressionCV(random_state=42, n_jobs=-1)
)
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + id="vq_J0c5GpPx3" colab_type="code" outputId="dcff1da1-2c7a-4bb8-e253-5aead29b3691" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Create the random forest pipeline
pipeline = make_pipeline (
ce.OneHotEncoder(use_cat_names=True),
StandardScaler(),
RandomForestClassifier(n_estimators=1000,
random_state=42,
min_samples_leaf=1,
max_features = 'auto',
n_jobs=-1,
verbose = 1)
)
pipeline.fit(X_train, y_train)
print ('\n\nResults for min_samples_leaf', i,)
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + id="FCowPAYHaWnA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="2d0a3db4-31d3-4a9f-c749-b1a20f603688"
model = pipeline.named_steps['randomforestclassifier']
encoder = pipeline.named_steps['onehotencoder']
encoded_columns = encoder.transform(X_train).columns
importances = pd.Series(model.feature_importances_, encoded_columns)
importances.sort_values(ascending=False)
# + id="fwZMpgC0GLX6" colab_type="code" colab={}
test_features['pump_age'] = 2013 - test_features['construction_year']
test_features.loc[test_features['pump_age'] == 2013, 'pump_age'] = 0
test_features.loc[test_features['pump_age'] == 0, 'pump_age'] = 10
test_features['region_district'] = test_features['region_code'].astype(str) + test_features['district_code'].astype(str)
test_features.drop(columns=['num_private'])
X_test = test_features[features]
assert all(X_test.columns == X_train.columns)
y_pred = pipeline.predict(X_test)
# + id="6PQd-OKqGPT9" colab_type="code" colab={}
#submission = sample_submission.copy()
#submission['status_group'] = y_pred
#submission.to_csv('/content/submission-01.csv', index=False)
| Kaggle_Challenge_Assignment4e.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/usm.jpg" width="480" height="240" align="left"/>
# # MAT281 - Laboratorio N°04
#
# ## Objetivos del laboratorio
#
# * Reforzar conceptos básicos de reducción de dimensionalidad.
# ## Contenidos
#
# * [Problema 01](#p1)
#
# <a id='p1'></a>
# ## I.- Problema 01
#
#
# <img src="https://www.goodnewsnetwork.org/wp-content/uploads/2019/07/immunotherapy-vaccine-attacks-cancer-cells-immune-blood-Fotolia_purchased.jpg" width="360" height="360" align="center"/>
#
#
# El **cáncer de mama** es una proliferación maligna de las células epiteliales que revisten los conductos o lobulillos mamarios. Es una enfermedad clonal; donde una célula individual producto de una serie de mutaciones somáticas o de línea germinal adquiere la capacidad de dividirse sin control ni orden, haciendo que se reproduzca hasta formar un tumor. El tumor resultante, que comienza como anomalía leve, pasa a ser grave, invade tejidos vecinos y, finalmente, se propaga a otras partes del cuerpo.
#
# El conjunto de datos se denomina `BC.csv`, el cual contine la información de distintos pacientes con tumosres (benignos o malignos) y algunas características del mismo.
#
#
# Las características se calculan a partir de una imagen digitalizada de un aspirado con aguja fina (FNA) de una masa mamaria. Describen las características de los núcleos celulares presentes en la imagen.
# Los detalles se puede encontrar en [<NAME> and <NAME>: "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34].
#
#
# Lo primero será cargar el conjunto de datos:
# +
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# %matplotlib inline
sns.set_palette("deep", desat=.6)
sns.set(rc={'figure.figsize':(11.7,8.27)})
# -
# cargar datos
df = pd.read_csv(os.path.join("data","BC.csv"), sep=",")
df['diagnosis'] = df['diagnosis'] .replace({'M':1,'B':0})
df.head()
# Basado en la información presentada responda las siguientes preguntas:
#
#
# 1. Normalizar para las columnas numéricas con procesamiento **StandardScaler**.
# 2. Realice un gráfico de correlación. Identifique la existencia de colinealidad.
# 3. Realizar un ajuste PCA con **n_components = 10**. Realice un gráfico de la varianza y varianza acumulada. Interprete.
# 4. Devuelva un dataframe con las componentes principales.
# 5. Aplique al menos tres modelos de clasificación. Para cada modelo, calule el valor de sus métricas.
#1
#normalización de las columnas con datos númericos
scaler = StandardScaler()
df[df.columns[2:].tolist()]=scaler.fit_transform(df[df.columns[2:].tolist()])
df.head()
#2
corr = df[df.columns[1:]].corr()
sns.heatmap(corr)
# __Respuesta__: A partir del mapa de calor, es posible notar que hay una buena correlacion entre el perimetro y radio del objeto.
# +
features = df.columns[2:].tolist()
x = df.loc[:, features].values
pca = PCA(n_components=10)
principalComponents = pca.fit_transform(x)
# gráfico varianza por componente
percent_variance = np.round(pca.explained_variance_ratio_* 100, decimals =2)
columns = ['PC1', 'PC2', 'PC3', 'PC4','PC5','PC6','PC7','PC8','PC9','PC10']
plt.figure(figsize=(12,4))
plt.bar(x= range(1,11), height=percent_variance, tick_label=columns)
plt.ylabel('Percentate of Variance Explained')
plt.xlabel('Principal Component')
plt.title('PCA Scree Plot')
plt.show()
# +
# gráfico varianza por la suma acumulada de los componente
percent_variance_cum = np.cumsum(percent_variance)
columns = ['PC1', 'PC2', 'PC3', 'PC4','PC5','PC6','PC7','PC8','PC9','PC10']
plt.figure(figsize=(12,4))
plt.bar(x= range(1,11), height=percent_variance_cum, tick_label=columns)
plt.ylabel('Percentate of Variance Explained')
plt.xlabel('Principal Component Cumsum')
plt.title('PCA Scree Plot')
plt.show()
## Interpretar PCi como PC1+...+PCi , con i=1,...,10 ##
# -
# __Respuesta:__ A partir de la varianza acumulada se puede notar que en la acumulación de los primeros 5 componentes principales ya se tiene un aproximado del 82% de varianza acumulada del total, lo que implica estos componentes principales abarcan gran parte de las características del conjunto de datos, por lo que se trabajara solo con esos cinco y se omitirá el resto
# +
#4
#Creación de dataframe con las cinco componentes y el target del diagnostico
pca = PCA(n_components=5)
principalComponents = pca.fit_transform(x)
principalDataframe = pd.DataFrame(data = principalComponents, columns = ['PC1', 'PC2','PC3','PC4','PC5'])
targetDataframe = df[['diagnosis']]
newDataframe = pd.concat([principalDataframe, targetDataframe],axis = 1)
newDataframe.head()
# -
#5
Y= np.ravel(df[['diagnosis']])
X_new = pca.fit_transform(df[df.columns[2:]])
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X_new, Y, test_size=0.2, random_state = 2)
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score,recall_score,precision_score,f1_score
#Modelos usados: LogisticRegression, RandomForestClassifier , DecisionTreeClassifier
frames=pd.DataFrame({ #Dataframe de las métricas por modelo
'modelo': [],
'accuracy_score':[],
'recall_score':[],
'precision_score':[],
'f1_score':[]
})
frames['modelo']=['LogisticRegression','RandomForestClassifier','DecisionTreeClassifier'] #se agregan modelos
models=[LogisticRegression(),RandomForestClassifier(random_state=0),DecisionTreeClassifier(random_state=0)]
acc=[]
rec=[]
prec=[]
f1=[]
metrics=[acc,rec,prec,f1]
#se aplican las métricas a los 3 modelos
for model in models:
mod = model
mod.fit(X_train, Y_train)
mod_pred = mod.predict(X_test)
acc.append(accuracy_score(Y_test, mod_pred))
rec.append(recall_score(Y_test, mod_pred))
prec.append(precision_score(Y_test, mod_pred))
f1.append(f1_score(Y_test, mod_pred))
#se agregan al dataframe
i=0
for column in frames.columns[1:]:
frames[column]=metrics[i]
i+=1
frames=frames.set_index('modelo')
frames
| labs/laboratorio_10_andres_riveros.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:wildfires]
# language: python
# name: conda-env-wildfires-py
# ---
# +
import logging
import logging.config
import warnings
import iris
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import HTML
from joblib import Memory, Parallel, delayed
from matplotlib import animation, rc
from tqdm import tqdm, tqdm_notebook
from wildfires.analysis.plotting import cube_plotting, get_cubes_vmin_vmax
from wildfires.data.cube_aggregation import Datasets, get_ncpus, prepare_selection
from wildfires.data.datasets import (
DATA_DIR,
ERA5_DryDayPeriod,
data_map_plot,
get_memory,
)
from wildfires.logging_config import LOGGING
from wildfires.utils import Time, TqdmContext, get_land_mask, match_shape, polygon_mask
logger = logging.getLogger(__name__)
logging.config.dictConfig(LOGGING)
# tqdm_notebook does not work for some reason
warnings.filterwarnings("ignore", ".*Collapsing a non-contiguous coordinate.*")
memory = get_memory("analysis_ERA5_dry_day_period")
# -
dry_day_period = Datasets(ERA5_DryDayPeriod())
print(dry_day_period.dataset.cube)
# +
min_value = dry_day_period.dataset.cube.collapsed(
["time", "latitude", "longitude"], iris.analysis.MIN
)
max_value = dry_day_period.dataset.cube.collapsed(
["time", "latitude", "longitude"], iris.analysis.MAX
)
mean_periods = dry_day_period.dataset.cube.collapsed("time", iris.analysis.MEAN)
min_periods = dry_day_period.dataset.cube.collapsed("time", iris.analysis.MIN)
max_periods = dry_day_period.dataset.cube.collapsed("time", iris.analysis.MAX)
# Improve performance when accessing lazy data by grouping calculations like this.
iris.cube.CubeList(
[min_value, max_value, mean_periods, min_periods, max_periods]
).realise_data()
print("Min dry days:\n{}\n{}".format(min_value, min_value.data))
print("Max dry days:\n{}\n{}".format(max_value, max_value.data))
mpl.rcParams["figure.figsize"] = (10, 4)
for data, title in zip(
(mean_periods, min_periods, max_periods),
("Mean Dry Day Period", "Min Dry Day Period", "Max Dry Day Period"),
):
cube_plotting(data, log=True, title=title, auto_log_title=True)
# +
(
monthly_dry_day_period,
mean_dry_day_period,
climatology_dry_day_period,
) = prepare_selection(dry_day_period.copy(deep=True))
# Define and use relevant masks.
land_mask = ~get_land_mask()
# Define a latitude mask which ignores data beyond 60 degrees, as the precipitation data does not extend to those latitudes.
lat_mask = ~polygon_mask([(180, -60), (-180, -60), (-180, 60), (180, 60), (180, -60)])
# Apply the masks.
monthly_dry_day_period.dataset.cube.data.mask |= match_shape(
land_mask, monthly_dry_day_period.dataset.cube.shape
) | match_shape(lat_mask, monthly_dry_day_period.dataset.cube.shape)
# +
warnings.filterwarnings("ignore", ".*divide by zero.*")
warnings.filterwarnings("ignore", ".*invalid value encountered.*")
mpl.rcParams["figure.figsize"] = (14, 9)
# Define a latitude mask which ignores data beyond 60 degrees, as the precipitation data does not extend to those latitudes.
lat_bounds = monthly_dry_day_period.dataset.cube.coord("latitude").contiguous_bounds()
lon_bounds = monthly_dry_day_period.dataset.cube.coord("longitude").contiguous_bounds()
n_lat = len(lat_bounds) - 1
n_lon = len(lon_bounds) - 1
vmin, vmax = get_cubes_vmin_vmax(monthly_dry_day_period.cubes)
@memory.cache
def get_js_animation(N_frames=monthly_dry_day_period.dataset.cube.shape[0]):
log = True
fig, ax, mesh, suptitle_text = cube_plotting(
monthly_dry_day_period.dataset.cube[0],
log=log,
animation_output=True,
title="",
vmin=vmin,
vmax=vmax,
transform_vmin_vmax=True,
)
title_text = ax.text(
0.5, 1.08, "bla", transform=ax.transAxes, ha="center", fontsize=15
)
plt.close() # Prevent display of (duplicate) static figure due to %matplotlib inline
# N_frames = len(new_dataset)
# N_frames = 4
interval = 1000 / 12 # One second per year.
def init():
mesh.set_array(np.zeros(n_lat * n_lon))
title_text.set_text("")
return (mesh,)
with TqdmContext(unit=" plots", desc="Plotting", total=N_frames) as t:
def animate(i):
single_time_cube = monthly_dry_day_period.dataset.cube[i]
_ = cube_plotting(
single_time_cube,
log=log,
ax=ax,
mesh=mesh,
animation_output=False,
new_colorbar=False,
title="",
)
title_text.set_text(
# Ignore the time, which flip-flops between 00:00:00 and 12:00:00.
"Dry Day Period "
+ str(single_time_cube.coord("time").cell(0).point)[:10]
)
t.update()
return (mesh, title_text)
anim = animation.FuncAnimation(
fig, animate, init_func=init, frames=N_frames, interval=interval, blit=True
)
js_output = anim.to_jshtml()
return js_output
HTML(get_js_animation())
| analyses/ERA5_dry_day_period.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from astropy.time import Time
import astropy.units as u
from rms import Planet, Star, Spot, STSP
# +
d = Planet(per=4.049959*u.day, inc=90*u.deg, a=39.68, t0=0,
rp=(0.3566/100)**0.5, lam=0, ecc=0, w=90)
times = Time(np.arange(-0.025, 0.025, 1/60/60/24), format='jd')
star = Star(planet=d, rotation_period=3.3*u.day, inc_stellar=90, spot_contrast=0.7,
rho_s=51.1)
tiny_spot = [Spot(89*u.deg, 0*u.deg, 0.00001)]
with STSP(times, star, tiny_spot, quiet=True) as stsp:
spotless_lc = stsp.generate_lightcurve(n_ld_rings=1000)
# -
spotless_lc.plot()
# +
r = 0.05
dlong = 8
lat1 = 30*u.deg
lat2 = -30*u.deg
spots = [Spot(lat1, i*u.deg, r) for i in range(0, 360, dlong)]
spots.extend([Spot(lat1+dlong/1.5*u.deg, (i+dlong/2)*u.deg, r) for i in range(0, 360, dlong)])
spots.extend([Spot(lat1-dlong/1.5*u.deg, (i+dlong/2)*u.deg, r) for i in range(0, 360, dlong)])
spots.extend([Spot(lat2, i*u.deg, r) for i in range(0, 360, dlong)])
spots.extend([Spot(lat2+dlong/1.5*u.deg, (i+dlong/2)*u.deg, r) for i in range(0, 360, dlong)])
spots.extend([Spot(lat2-dlong/1.5*u.deg, (i+dlong/2)*u.deg, r) for i in range(0, 360, dlong)])
# +
from matplotlib.patches import Polygon
def add_circle_patch(ax, radius, lat1, lon1, n_points=20, **kwargs):
"""
add patch with circle of radius [rad] with lat/lon [radians]
kwargs passed to matplotlib.patches.Polygon.
Conversion formula discovered here:
http://williams.best.vwh.net/avform.htm#LL
"""
d = radius
thetas = np.linspace(0, -2*np.pi, n_points)[:, np.newaxis]
lat = np.arcsin(np.sin(lat1) * np.cos(d) + np.cos(lat1) *
np.sin(d) * np.cos(thetas))
dlon = np.arctan2(np.sin(thetas) * np.sin(d) * np.cos(lat1),
np.cos(d) - np.sin(lat1) * np.sin(lat))
lon = ((lon1 - dlon + np.pi) % (2*np.pi)) - np.pi
xy = np.hstack([lon, lat])
if (xy[:, 0].min() > np.radians(-170)) and (xy[:, 0].max() < np.radians(170)):
# If circle isn't split between E/W-ern hemispheres:
polygon = Polygon(xy, True, rasterized=True, **kwargs)
ax.add_patch(polygon)
else:
# If circle is split between E/W-ern hemispheres,
# plot western hemisphere first
xy_1 = xy.copy()
xy_1[:, 0][xy_1[:, 0] > 0] -= 2 * np.pi
polygon_1 = Polygon(xy_1, True, rasterized=True, **kwargs)
ax.add_patch(polygon_1)
# then plot eastern hemisphere
xy_2 = xy.copy()
xy_2[:, 0][xy_2[:, 0] < 0] += 2 * np.pi
polygon_2 = Polygon(xy_2, True, rasterized=True, **kwargs)
ax.add_patch(polygon_2)
def custom_grid(ax, color='gray'):
# plot latitude/longitude grid
ax.xaxis.set_major_locator(plt.FixedLocator(np.pi / 3 * np.linspace(-2, 2, 5)))
ax.xaxis.set_minor_locator(plt.FixedLocator(np.pi / 6 * np.linspace(-5, 5, 11)))
ax.yaxis.set_major_locator(plt.FixedLocator(np.pi / 12 * np.linspace(-5, 5, 11)))
ax.yaxis.set_minor_locator(plt.FixedLocator(np.pi / 12 * np.linspace(-5, 5, 11)))
ax.grid(True, which='minor', color=color, ls=':')
if color != 'gray':
for l in ax.get_xticklabels():
l.set_color(color)
# +
projection = 'Hammer'
cmap = plt.cm.Greys # plt.cm.copper
fraction_of_colorbar = 0.7
circle_color = 'k'
# Plot the built-in projections
fig = plt.figure(figsize=(8, 4))
ax = plt.subplot(111, projection=projection.lower())
# plot spots
for spot in spots:
add_circle_patch(ax, np.arctan(spot.r), np.radians(spot.latitude).value, np.radians(spot.longitude).value,
facecolor='k', edgecolor='none', lw=2,
alpha=0.3)
custom_grid(ax, color='k')
plt.savefig('spot_map_2.pdf', dpi=200, bbox_inches='tight')
# +
with STSP(times, star, spots) as stsp:
spotted_lc = stsp.generate_lightcurve(n_ld_rings=1000)
spotted_lc.plot()
# -
plt.plot(spotted_lc.times.jd, spotted_lc.fluxes - spotless_lc.fluxes)
plt.ylabel('Transit Residuals')
plt.xlabel('Time [d]')
plt.savefig('residuals_2.png', dpi=150, bbox_inches='tight')
np.savetxt('ring2.txt', np.vstack([spotted_lc.times.jd, spotted_lc.fluxes, spotless_lc.fluxes]).T)
| radspots/rings2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yvishyst/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/LS_DS_111_A_First_Look_at_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Okfr_uhwhS1X" colab_type="text"
# # Lambda School Data Science - A First Look at Data
#
#
# + [markdown] id="9dtJETFRhnOG" colab_type="text"
# ## Lecture - let's explore Python DS libraries and examples!
#
# The Python Data Science ecosystem is huge. You've seen some of the big pieces - pandas, scikit-learn, matplotlib. What parts do you want to see more of?
# + id="WiBkgmPJhmhE" colab_type="code" colab={}
# TODO - we'll be doing this live, taking requests
# and reproducing what it is to look up and learn things
# + [markdown] id="lOqaPds9huME" colab_type="text"
# ## Assignment - now it's your turn
#
# Pick at least one Python DS library, and using documentation/examples reproduce in this notebook something cool. It's OK if you don't fully understand it or get it 100% working, but do put in effort and look things up.
# + id="TGUS79cOhPWj" colab_type="code" colab={}
import requests
from bs4 import BeautifulSoup
# + id="l9U-TV9laTHh" colab_type="code" colab={}
url = 'https://www.zerohedge.com'
page = requests.get(url)
# + id="blYbPfnbavyU" colab_type="code" outputId="4179343d-5509-4293-8c41-92ff061c61ff" colab={"base_uri": "https://localhost:8080/", "height": 34}
page
# + id="zJmFgEnbceel" colab_type="code" colab={}
soup = BeautifulSoup(page.text,"html.parser")
# + id="FTzFZ5sodUAo" colab_type="code" colab={}
newstags = soup.findAll("span",{"class":"rdf-meta hidden"})
newstags
# + id="rj9Gc1AIel5l" colab_type="code" outputId="28915f46-f01e-4543-8b21-136344ce0c47" colab={"base_uri": "https://localhost:8080/", "height": 392}
newstags
# + id="07gtOHHHrRtW" colab_type="code" outputId="c24c99e4-5156-4f34-a56b-0af8ce33fc7b" colab={"base_uri": "https://localhost:8080/", "height": 372}
for news in newstags:
print(news['content'])
# + [markdown] id="BT9gdS7viJZa" colab_type="text"
# ### Assignment questions
#
# After you've worked on some code, answer the following questions in this text block:
#
# 1. Describe in a paragraph of text what you did and why, as if you were writing an email to somebody interested but nontechnical.
#
# 2. What was the most challenging part of what you did?
#
# 3. What was the most interesting thing you learned?
#
# 4. What area would you like to explore with more time?
#
#
#
# + id="TQGZekHEaudq" colab_type="code" colab={}
# + [markdown] id="4wdvQynVsJfY" colab_type="text"
# Today, I looked at a jupyter notebook which was published to show how data can be scraped from web.
#
# The notebook talks about scraping data about a professor's rating using the library BeautifulSoup.
#
# For my assignment, I decided to scrap my favorite website "www.zerohedge.com" and find the headlines of the news article on the main page.
#
# For this used the BeautifulSoup library as mentioned in the notebook and went along the same way as done in the notebook
#
#
# The most challenging part of this assignment was to figure out how to extract the content headlines as the website used in the notebook was not similar to the website i was scraping. I had to try a lot of various methods to get the correct content.
#
#
# The interesting thing to learn was about the library, it seems to easy to scrap the website and get the exact data which we are looking for.
#
# With time, I want to scrap a finacial website where financial results are published in tabular format. As with datascience, i want to get the financial results of all companies and analyse the results using machine learning.
# + [markdown] id="_XXg2crAipwP" colab_type="text"
# ## Stretch goals and resources
#
# Following are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub (and since this is the first assignment of the sprint, open a PR as well).
#
# - [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/)
# - [scikit-learn documentation](http://scikit-learn.org/stable/documentation.html)
# - [matplotlib documentation](https://matplotlib.org/contents.html)
# - [Awesome Data Science](https://github.com/bulutyazilim/awesome-datascience) - a list of many types of DS resources
#
# Stretch goals:
#
# - Find and read blogs, walkthroughs, and other examples of people working through cool things with data science - and share with your classmates!
# - Write a blog post (Medium is a popular place to publish) introducing yourself as somebody learning data science, and talking about what you've learned already and what you're excited to learn more about.
| LS_DS_111_A_First_Look_at_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Testing of the `switch` Extended GEKKO Functionality #
import numpy as np
import matplotlib.pyplot as plt
from gekko import GEKKO
from aquaponics.gekko_extensions import register_extensions
# ## One Switch ##
# +
m = register_extensions(GEKKO())
tf = 1
steps = tf * 100 + 1
m.time = np.linspace(0,tf,steps)
x = m.Var(value=0)
xright = m.Var(value=1)
y = m.Var(value=0)
sig = m.Var(value=0)
k = 100
m.Equation(x.dt() == 1)
m.Equation(xright == 1 - x)
m.Equation(y == m.switch(x, xright, x, .5, k=k))
m.Equation(sig == 1 / (1 + m.exp(-k * (x - 0.5))))
m.options.IMODE = 4
m.solve(disp=False)
# +
# %matplotlib inline
plt.figure(figsize=(12,6))
ax = plt.subplot(211)
plt.plot(m.time, x, label='$x$', linestyle='--')
plt.plot(m.time, xright, label='$0.5 - x$', linestyle='-.')
plt.plot(m.time, y, label='$y$')
plt.legend()
plt.grid()
plt.subplot(212, sharex=ax)
plt.plot(m.time, sig, label='Logistics Switch')
plt.grid()
plt.legend()
plt.xlim(0, tf)
plt.xlabel('Time')
# -
# ## Two Switches ##
# +
m = register_extensions(GEKKO())
tf = 2
steps = tf * 100 + 1
m.time = np.linspace(0,tf,steps)
x = m.Var(value=0.2)
y = m.Var(value=0.3)
k = 100
m.Equation(x.dt() == x)
yup = m.Intermediate(m.switch(x, 0.7, x, .7))
m.Equation(y == m.switch(0.3, yup, yup, 0.3))
m.options.IMODE = 4
m.solve(disp=False)
# +
# %matplotlib inline
plt.figure(figsize=(12,4))
plt.plot(m.time, x, label='$x$', linestyle='--')
plt.plot(m.time, y, label='$y$')
plt.legend()
plt.grid()
plt.legend()
plt.xlim(0, tf)
plt.xlabel('Time')
# -
| aquaponics-master/notebooks/Test Switch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/albisbub/dighum/blob/master/_notebooks/2020-07-28-Meeting-Week-7.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="ZwqQCAVd5bEK" colab_type="text"
# # JATA Tools V1
# > Data Fetchers for Interactivitys: AJHS Records, Finding Guides, and Loeb Images.
#
# - toc:true
# - branch: master
# - badges: true
# - comments: true
# - author: Blaise
# - permalink: /JATA1/
# - categories: [fastpages, jupyter, dev, wip, check-in]
#
#
#
# + id="88vIYTX3LnEM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 158} outputId="d1703391-0222-42a6-c73c-a3a37b15dc8a"
#hide
# !pip install requests
# !pip install selenium
# + id="uVLheGv2LnER" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="4ba91324-03ae-44cd-9c8c-870d200d5af8"
#hide
# !pip install beautifulsoup4
# + id="MNd4cMCqLnEU" colab_type="code" colab={}
#hide
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import bs4
import lxml.etree as xml
import urllib.request
import re
from pandas.io.html import read_html
from selenium import webdriver
from timeit import default_timer as timer
#hide
def striplist(list):
out = []
# print(list)
for i in list:
stin = str(i)
split = (stin.split('>'))
otherside = (split[1].split('<'))
out_app = otherside[0]
out.append(out_app)
# b = str(i.split('>')[1])
# print(b)
# out.append(i)
return out
def find_between( s, first, last ):
try:
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
start = s.index( first ) + len( first )
# end = s.index( last, start )
return s[start:]
# return "ERROR"
except BaseException as e:
print(e, first, last)
return 'NA'
# + [markdown] id="pRDubhFFAp7x" colab_type="text"
# # Center For Jewish History Archives Scraper
# - Built in support for AJHS
# - Can be easily used for any repo on CJH's archive space
# - Could also be used in other archivespace scraping situations.
#
# + id="uKG7__Dayekd" colab_type="code" colab={}
#collapse-hide
class CJH_Archives:
def scrape_all_records(object_type='records',start_page=1, stop_after_pages=0):
if start_page <= 0:
print("Must start at minimum of page 1")
start_page=1
page=start_page
else:
page = start_page
if object_type.upper() == 'RECORDS':
print("Scraping All Individual Records")
# page = start_page
headless_url = "https://archives.cjh.org/repositories/3/objects?q%5B%5D=%2A&op%5B%5D=OR&field%5B%5D=keyword&from_year%5B%5D=&to_year%5B%5D=&limit=digital_object,archival_object&sort=title_sort%20asc&page="
base_URL = str(headless_url + str(page))
elif object_type.upper() == 'COLLECTIONS':
# page = start_page
print("Scraping Collections (Finding Aids)")
headless_url = "https://archives.cjh.org/repositories/3/resources?q[]=%2A&op[]=&field[]=title&from_year[]=&to_year[]=&limit=resource&sort=title_sort%20asc&page="
base_URL = str(headless_url + str(page))
def scrape_record(name, link, web_page, object_type):
# print(web_page, link)
# (.+?)
# meta_dict = find_between(str(i),'<script type="application/ld+json">',' </script>' )
# meta_dict = re.findall(r'>(', str(web_page))
title = (web_page.title)
part_of = web_page.find_all('ul',{'class':'breadcrumb'})
part_of = part_of[0].find_all('a')
location_tupes = []
for i in part_of:
link = (str(i).split('"')[1])
found_loc_name = (str(i).split('>')[1]).split('<')[0]
tupp = (found_loc_name,link)
location_tupes.append(tupp)
# location_name = (str(i.split('>')[1])).split('<')[0]
# stri = "<a href="
# part_of = list(map(lambda st: str.replace(st,stri, ""), part_of))
locs = (location_tupes)
subnotes = web_page.find_all('div', {'class': 'upper-record-details'})[0].text
# print(subnotes)
div_data_1 = [("Name", name), ("Link",link)]
acord = web_page.find_all('div', {'class': 'acc_holder clear'})[0].text
acc_data = []
if object_type.upper() == 'RECORDS':
possible_fields_1=[
"Scope and Contents",
"Dates",
"Language of Materials",
"Access Restrictions",
"Extent",
]
possible_fields_2 = [
"Related Names",
"Digital Material",
"Physical Storage Information",
"Repository Details",
]
elif object_type.upper() == 'COLLECTIONS':
possible_fields_1=[
"Scope and Content Note",
"Dates",
"Creator",
"Access Restrictions",
"Use Restrictions",
"Conditions Governing Access",
"Conditions Governing Use",
"Extent",
"Language of Materials"
]
possible_fields_2 = [
"Additional Description",
"Subjects",
"Related Names",
"Finding Aid & Administrative Information",
'Physical Storage Information',
'Repository Details',
]
##subnotes
b1 = []
for i in possible_fields_1:
if i in str(subnotes):
out=True
else:
out = False
missingTuple = (i, '')
div_data_1.append(missingTuple)
b1.append(out)
##accordian
b2=[]
for i in possible_fields_2:
if i in str(acord):
out=True
else:
out = False
missingTuple = (i, '')
div_data_1.append(missingTuple)
b2.append(out)
# print(b1, b2)
xs=possible_fields_1
ys=b1
# sec_1_heads = [x for x, y in zip(xs, ys) if y == 'True']
filtered1 = np.array(xs)[np.array(ys)]
xs=possible_fields_2
ys=b2
filtered2 = np.array(xs)[np.array(ys)]
# sec_2_heads = [x for x, y in zip(xs, ys) if y == 'True']
# print(filtered1,filtered2,'xyz')
indexer = 0
for i in filtered1:
# print(len(filtered1),len(filtered2), (indexer))
first = i
try:
next = filtered1[indexer+1]
except BaseException as e:
next = '$$$'
# print(first, next)
value = find_between(subnotes, first, next)
# print(first, next, value)
value = value.replace('\n',' ').strip().replace('\t', ' ')
# print(first, next, value)
val = (i,value)
div_data_1.append(val)
indexer+=1
# print(indexer, first, next)
indexer = 0
for i in filtered2:
first = i
try:
next = filtered1[indexer+1]
except BaseException as e:
next = '$$$'
# print(first,next)
value = find_between(acord, first, next)
# print(first, next, value)
value = value.replace('\n',' ').strip().replace('\t', ' ')
val = (i,value)
div_data_1.append(val)
indexer+=1
# print(indexer, first, next)
# exit
bigList = (div_data_1)
return tuple(bigList)
URL = base_URL
web_page = BeautifulSoup(requests.get(URL, {}).text, "lxml")
pagnation = web_page.find_all('ul',{'class':'pagination'})[0].find_all('li')
next_link = (web_page.find_all('li',{'class':'next'})[0]).find('a',href=True)
linkky = str(next_link)
nextPage_ = str("https://archives.cjh.org" + (linkky.split('"')[1]))
# exit
# print(pagnation)
pageList = []
s_pages = []
for i in pagnation:
number = str(i).split('>')[2].split('<')[0]
pageList.append((number))
# print("Pages", pageList)
# break
test_list=[]
for i in pageList:
try:
# print(i)
# print( int(i))
test_list.append(int(i))
except:
pass
# test_list = [int(i) for i in pageList if not (i).isdigit()]
# print(test_list)
last_page__ = (max(test_list))
__lastPage = last_page__ - (last_page__ - stop_after_pages)
print()
# exit
page_counter = 1
while page_counter < __lastPage:
row_list = []
pagez= page_counter
print("Scraping Page", page_counter)
page_current = page_counter
URL = str(headless_url + str(page_current))
web_page = BeautifulSoup(requests.get(URL, {}).text, "lxml")
h3s = web_page.find_all('h3')
# summs = web_page
tupleList = []
for i in h3s:
# print(i)
try:
link = ((str(i).split('href="')[1]).split('"'))[0]
name = (str(i).split('">'))[1].split("</a")[0]
# print(link, name)
# break
data_tuple = (name ,str("https://archives.cjh.org" + link), link)
tupleList.append(data_tuple)
except BaseException as e:
print(e, i)
page_counter+=1
archIndex = pd.DataFrame.from_records(tupleList, columns = ['Names', 'Link', 'Location'])
# ...
counter = 0
for i in archIndex.itertuples():
counter +=1
name = i.Names
link = i.Link
link123 = link
Location=i.Location
web_page = BeautifulSoup(requests.get(link, {}).text, "lxml")
record_row = scrape_record(name, link123, web_page,object_type.upper() )
row_list.extend(record_row)
print("Record: ",counter, link123)
s_pages.extend(row_list)
d = {}
for x, y in s_pages:
d.setdefault(x, []).append(y)
df = pd.DataFrame.from_records(d).drop_duplicates()
if object_type.upper() == 'RECORDS':
df[['Date_1','Date_2']] = (df['Dates'].str.split('–', n=1,expand=True))
else:
df['Use Terms'] = df['Use Restrictions']+df['Conditions Governing Use']
# df1.replace('NA',np.nan,inplace=True)
df['Access Terms'] = df[ 'Access Restrictions']+df['Conditions Governing Access']
dropThese = [
'Use Restrictions',
'Conditions Governing Use',
'Access Restrictions',
'Conditions Governing Access',
]
df.drop(columns=dropThese,inplace=True)
# df1 = df1.apply(lambda x: None if x.isnull().all() else ';'.join(x.dropna()), axis=1)
return (df)
def __init__(self, repo):
self.repo = repo
def get_meta_data(self, object_type,page_to_start_at,maximum_pages_to_scrape):
if self.repo.upper() == 'AJHS':
print('Creating CJHA Scraper Object for AJHS')
self.meta_df = scrape_all_records(object_type,page_to_start_at,maximum_pages_to_scrape)
return self.meta_df
else:
print("WIP WIP WIP WIP WIP WIP")
pass
# + [markdown] id="ncaad6rQ34Ff" colab_type="text"
# ## Building AJHS Archive Datasets
#
# The below line of code can be used to scrape the archive for a given number of pages (input 0 for all records). There are two object types, records and collections. Collections are digitized finding aids and records are all contained in some sort of collection. Some are under multiple collections. The below lines of code generate dataframes for the first 3 pages of records and collections
# + id="u9K5i1zA38Ve" colab_type="code" colab={}
#collapse-hide
# # %%capture
#records
ajhs_recs = CJH_Archives('ajhs').get_meta_data('records', 1, 3)
#collections
ajhs_cols= CJH_Archives('ajhs').get_meta_data('collections', 1, 3)
# + [markdown] id="qx61lMQG40yk" colab_type="text"
# ## Output For Records
# + id="yT218QX5ET94" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b2f2fd08-7c88-41f7-a388-8ade234faead"
#hide-input
ajhs_recs
# + [markdown] id="mnfpgurf5qiP" colab_type="text"
# ## Output for Collections
# + id="N6RJ8UNJ5o5-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="3d86d653-01ad-427e-db01-00484a12d952"
#hide-input
ajhs_cols
# + [markdown] id="9fUo_NA8yKf3" colab_type="text"
# # Loeb Data Scraper
# + [markdown] id="x990M4Fu7_8Q" colab_type="text"
# The [Loeb data scraper](https://loebjewishportraits.com)fetches meta deta and can download images for paintings, silhouettes, and photographs from the archive (or all of the above).
# + id="A0hOXCi93sem" colab_type="code" colab={}
#collapse-hide
class loeb:
"""
This class can be used to interact with the loeb image data base.
The init funciton takes 1 argument which is the type of data to retreive.
The input should be one of the following : 'paintings', silhouettes, photographs, or 'all'
"""
def __init__(self, data_set='paintings'):
def scrape_loeb(URL):
requests.get(URL)
web_page = bs4.BeautifulSoup(requests.get(URL, {}).text, "lxml")
table = web_page.find_all('portfolio')
div = web_page.find(id="portfolio")
linkList = web_page.find_all('div',{'class':'work-info'})
df_dict = []
for links in linkList:
twolinks = links.find_all('a', href=True)
details = str(twolinks[0]).split('"')[1]
img = str(twolinks[1]).split('"')[3]
new_df_tuple = {'info_link':details, 'img_link':img}
df_dict.append(new_df_tuple)
listOfDfs = []
counter = 0
df = pd.DataFrame.from_records(df_dict)
for i in df.itertuples():
img = i.img_link
info = i.info_link
# print(info)
# print(info)
# download_image(img,'test.jpg')
profile = bs4.BeautifulSoup(requests.get(info, {}).text, "lxml")
img = str(profile.find_all('img',src=True)[0]).split('"')[3]
# print(img)
# print(profile)
# print(profile)
a = profile.find_all('h4')
# print(a)
b = profile.find_all("h3")
# bio = profile
linkts = str(profile.find_all('a',{'id':'viewzoom'},href=True)[1]).split('"')[1]
def scrape_bio_loeb(url):
bio = bs4.BeautifulSoup(requests.get(url, {}).text, "lxml")
abc=str(bio.find_all('p')[1]).replace("<p>", " ")
abcd=(str(abc).replace('</p>', " "))
bio_text = str(str(abcd.replace('<i>',' ')).replace("</i>",' '))
s = bio_text
bio_plain = re.sub(r'<.+?>', '', s)
if 'Lorem ipsum dolor sit amet,' in bio_plain:
bio_plain = ''
if "Lorem ipsum dolor sit amet," in s:
s = ''
# bio_escapes = re.sub(r'>.+?<', '', s)
return bio_plain, s
bio__ = scrape_bio_loeb(linkts)
# print(bio__)
# print(linkts,len(linkts), "hkgdfsjhsfgakljhashlf")
# break
headers4 = striplist((a))
headers4_ = ['Name']
for i in headers4:
headers4_.append(i)
# headers4_ = .extend(headers4)
headers3 = striplist( b)
# print(headers4_, headers3)
# break
headers4_ = headers4_[:-1]
headers4_.append('Bio_Plain')
headers3.append(bio__[0])
headers4_.append('Bio_Links')
headers3.append(bio__[1])
df1 = pd.DataFrame({'Label':headers4_ , 'Value': headers3})
# name_for_file = headers[0][1]
# print(name_for_file, headers, headers[0])
self.image_cache.append((img, df1))
listOfDfs.append(df1)
# download_image(img, str(str(counter) + '.jpg'))
counter+=1
self.list_of_dfs.extend(listOfDfs)
self.list_of_dfs = []
self.image_cache = []
if data_set.upper() == 'ALL':
data_options = ['paintings', 'silhouettes', 'photographs']
for i in data_options:
print(i)
URL = str("http://loebjewishportraits.com/" + i + '/')
scrape_loeb(URL)
else:
try:
URL = str("http://loebjewishportraits.com/" + data_set + '/')
scrape_loeb(URL)
except BaseException as e:
print(e)
print("Could not find a data set for: ", data_set, "Make sure you input either 'paintings', 'silhouettes', or 'photographs'!")
def get_meta_data(self, export=False):
"""
returns a meta dataframe with each painting as an entry in a row
export can be csv or excel
"""
listy = self.list_of_dfs
transposed = [thing.transpose() for thing in listy]
cc = 1
newList = []
for i in transposed:
# print(len(i.columns))
new_cols = (i.iloc[0])
i.columns = new_cols
i.drop(i.index[0], inplace= True)
long_df_of_entrys = pd.concat(transposed)
long_df_of_entrys.set_index('Name')
return long_df_of_entrys.reset_index()
def download_images(self):
def download_image(link,filename):
urllib.request.urlretrieve(link, filename)
# print('image saved to temp directory')
for i in self.image_cache:
name = (i[1].Value.iloc[0])
fileName = str(name + '.jpg')
try:
download_image(i[0],fileName)
print('Saved', fileName, 'to current directory')
except BaseException as e:
print("Could not download:", fileName, "Error:",e)
# + [markdown] id="ezAQVAsJ_J-Z" colab_type="text"
# ## Scraping Meta Data and Download Locations for Selected Image Type
# + id="LHjrmYTQLnEX" colab_type="code" colab={}
paintings = loeb()
# + [markdown] id="KCZ7FBaRBrLl" colab_type="text"
# ## Building a MetaData Dataset for the Paintings
# + id="RfnHEHfIwiUr" colab_type="code" colab={}
meta_data = paintings.get_meta_data()
# + [markdown] id="ZAXkmKxWB_p4" colab_type="text"
# ## Output For Painting MetaData
# + id="5o4oRwpZwj85" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="6a2f5652-8930-41bb-b4f3-ef8f1d3b4814"
#hide-input
meta_data
# + [markdown] id="n7lpZgMEF6bC" colab_type="text"
# ## Batch Downloading Paintings (Takes a while!)
# + id="bKpRoMDg3QlT" colab_type="code" colab={}
# paintings.download_images()
| _notebooks/2020-07-28-Meeting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
#
# You've built up your SQL skills enough that the remaining hands-on exercises will use different datasets than you see in the explanations. If you need to get to know a new dataset, you can run a couple of **SELECT** queries to extract and review the data you need.
#
# The next exercises are also more challenging than what you've done so far. Don't worry, you are ready for it!
#
# Run the code in the following cell to get everything set up:
# Set up feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.sql.ex4 import *
print("Setup Complete")
# The World Bank has made tons of interesting education data available through BigQuery. Run the following cell to see the first few rows of the `international_education` table from the `world_bank_intl_education` dataset.
# +
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# Construct a reference to the "world_bank_intl_education" dataset
dataset_ref = client.dataset("world_bank_intl_education", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
# Construct a reference to the "international_education" table
table_ref = dataset_ref.table("international_education")
# API request - fetch the table
table = client.get_table(table_ref)
# Preview the first five lines of the "international_education" table
client.list_rows(table, max_results=5).to_dataframe()
# -
# # Exercises
#
# The value in the `indicator_code` column describes what type of data is shown in a given row.
#
# One interesting indicator code is `SE.XPD.TOTL.GD.ZS`, which corresponds to "Government expenditure on education as % of GDP (%)".
#
# ### 1) Government expenditure on education
#
# Which countries spend the largest fraction of GDP on education?
#
# To answer this question, consider only the rows in the dataset corresponding to indicator code `SE.XPD.TOTL.GD.ZS`, and write a query that returns the average value in the `value` column for each country in the dataset between the years 2010-2017 (including 2010 and 2017 in the average).
#
# Requirements:
# - Your results should have the country name rather than the country code. You will have one row for each country.
# - The aggregate function for average is **AVG()**. Use the name `avg_ed_spending_pct` for the column created by this aggregation.
# - Order the results so the countries that spend the largest fraction of GDP on education show up first.
#
# In case it's useful to see a sample query, here's a query you saw in the tutorial (using a different dataset):
# ```
# # Query to find out the number of accidents for each day of the week
# query = """
# SELECT COUNT(consecutive_number) AS num_accidents,
# EXTRACT(DAYOFWEEK FROM timestamp_of_crash) AS day_of_week
# FROM `bigquery-public-data.nhtsa_traffic_fatalities.accident_2015`
# GROUP BY day_of_week
# ORDER BY num_accidents DESC
# """
# ```
# +
# Your code goes here
country_spend_pct_query = """
SELECT _____
FROM `bigquery-public-data.world_bank_intl_education.international_education`
WHERE ____
GROUP BY ____
ORDER BY ____
"""
# Set up the query (cancel the query if it would use too much of
# your quota, with the limit set to 1 GB)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
country_spend_pct_query_job = client.query(country_spend_pct_query, job_config=safe_config)
# API request - run the query, and return a pandas DataFrame
country_spending_results = country_spend_pct_query_job.to_dataframe()
# View top few rows of results
print(country_spending_results.head())
# Check your answer
q_1.check()
# -
# For a hint or the solution, uncomment the appropriate line below.
# +
#q_1.hint()
#q_1.solution()
# -
# ### 2) Identify interesting codes to explore
#
# The last question started by telling you to focus on rows with the code `SE.XPD.TOTL.GD.ZS`. But how would you find more interesting indicator codes to explore?
#
# There are 1000s of codes in the dataset, so it would be time consuming to review them all. But many codes are available for only a few countries. When browsing the options for different codes, you might restrict yourself to codes that are reported by many countries.
#
# Write a query below that selects the indicator code and indicator name for all codes with at least 175 rows in the year 2016.
#
# Requirements:
# - You should have one row for each indicator code.
# - The columns in your results should be called `indicator_code`, `indicator_name`, and `num_rows`.
# - Only select codes with 175 or more rows in the raw database (exactly 175 rows would be included).
# - To get both the `indicator_code` and `indicator_name` in your resulting DataFrame, you need to include both in your **SELECT** statement (in addition to a **COUNT()** aggregation). This requires you to include both in your **GROUP BY** clause.
# - Order from results most frequent to least frequent.
# +
# Your code goes here
code_count_query = """____"""
# Set up the query
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
code_count_query_job = client.query(code_count_query, job_config=safe_config)
# API request - run the query, and return a pandas DataFrame
code_count_results = code_count_query_job.to_dataframe()
# View top few rows of results
print(code_count_results.head())
# Check your answer
q_2.check()
# -
# For a hint or the solution, uncomment the appropriate line below.
# +
#q_2.hint()
#q_2.solution()
# -
# # Keep Going
# **[Click here](#$NEXT_NOTEBOOK_URL$)** to learn how to use **AS** and **WITH** to clean up your code and help you construct more complex queries.
| notebooks/sql/raw/ex4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from sklearn.model_selection import ShuffleSplit
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
matplotlib.rcParams['figure.dpi'] = 120
import pandas as pd
import numpy as np
mnist = input_data.read_data_sets("./MNIST_data/", one_hot=True)
# #### Define DNN
# +
# Parameters
learning_rate = 0.1
alpha = 0.0001
dropout = 0.2
momentum = 0.99
num_steps = 5000
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 128
n_hidden_2 = 128
num_input = 28*28
num_classes = 10
# -
# tf Graph input
X = tf.placeholder(tf.float64, [None, num_input])
Y = tf.placeholder(tf.float64, [None, num_classes])
training = tf.placeholder(tf.bool, name="training")
# +
# Create model
def neural_net(x):
drop1 = tf.layers.dropout(x, dropout, training=True)
layer1 = tf.layers.dense(drop1,
n_hidden_1,
activation=None,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(stddev=num_input**-0.5),
kernel_regularizer=tf.contrib.layers.l1_regularizer(alpha))
bn1 = tf.layers.batch_normalization(layer1, training=True, momentum=momentum)
act1 = tf.nn.sigmoid(bn1)
drop2 = tf.layers.dropout(act1, dropout, training=True)
layer2 = tf.layers.dense(drop2,
n_hidden_2,
activation=None,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(stddev=n_hidden_1**-0.5),
kernel_regularizer=tf.contrib.layers.l1_regularizer(alpha))
bn2 = tf.layers.batch_normalization(layer2, training=True, momentum=momentum)
act2 = tf.nn.sigmoid(bn2)
drop3 = tf.layers.dropout(act2, dropout, training=True)
out_layer = tf.layers.dense(drop3,
num_classes,
activation=tf.nn.sigmoid,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(stddev=n_hidden_2**-0.5),
kernel_regularizer=tf.contrib.layers.l1_regularizer(alpha))
return out_layer
# Construct model
logits = neural_net(X)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(loss_op)
# Evaluate model
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)), tf.float32))
# -
# #### Training
# +
init = tf.global_variables_initializer()
with tf.Session() as sess:
metrics = {
'train_loss':[],
'train_acc':[],
'test_loss':[],
'test_acc':[]
}
# Run the initializer
sess.run(init)
for step in range(1, num_steps+1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, training: True})
if step % display_step == 0 or step == 1:
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: mnist.train.images, Y: mnist.train.labels, training: False})
print("Step " + str(step) + " Train loss: %0.5f, Train acc: %0.5f" % (loss, acc))
metrics['train_loss'].append(loss)
metrics['train_acc'].append(acc)
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: mnist.test.images, Y: mnist.test.labels, training: False})
print("Step " + str(step) + " Test loss: %0.5f, Test acc: %0.5f" % (loss, acc))
metrics['test_loss'].append(loss)
metrics['test_acc'].append(acc)
print("Optimization Finished!")
# +
snc = sns.color_palette()
plt.plot(metrics['test_loss'], color=snc[0])
plt.plot(metrics['train_loss'], color=snc[0], ls='--')
plt.ylim(min(metrics['test_loss']), max(metrics['test_loss']))
plt.legend(['DNN(test)', 'DNN(train)'])
plt.ylabel('Loss')
| MNIST_DNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Linear Models and Stochastic Gradient Descent
#
# In this seminar you will implement a logistic regression and train it using stochastic gradient descent modiffications, numpy and your brain.
#load our dakka
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
# ## Two-dimensional classification
#
# To make things more intuitive, let's solve a 2D classification problem with syntetic data.
# +
from sklearn import datasets, preprocessing
# datasets.make_circles?
# +
(X, y) = datasets.make_circles(n_samples=1024, shuffle=True, noise=0.2, factor=0.4) #making dataset
ind = np.logical_or(y==1, X[:,1] > X[:,0] - 0.5)
X = X[ind,:]
m = np.array([[1, 1], [-2, 1]])
X = preprocessing.scale(X)
y = y[ind]
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.show()
# -
print("X:\n{}\ny:\n{}".format(X[:3],y[:3]))
# **Your task starts here**
#
# Since the problem above isn't linearly separable, we add quadratic features to the classifier.
#
# Implement this transformation in the __expand__ function.
# +
def expand(X):
"""
Adds quadratic features.
This function allows your linear model to make non-linear separation.
For each sample (row in matrix), compute an expanded row:
[feature0, feature1, feature0^2, feature1^2, feature1*feature2, 1]
:param X: matrix of features, shape [n_samples,2]
:returns: expanded features of shape [n_samples,6]
"""
X_expanded = np.zeros((X.shape[0], 6))
X_mult = X[:,0]*X[:,1]
X_mult = X_mult.reshape((-1, 1))
#print(X_mult.shape, X.shape, (X**2).shape)
X_expanded= np.hstack((X, X**2, X_mult, np.ones((len(X), 1))))
#X_expanded= np.concatenate((X_expanded,X_mult),axis=1)
X_expanded.shape
#X_expanded[:,2:4] = X**2
#print()
return X_expanded
expand(X[:3])
# +
#simple test on random numbers
#[all 8 random numbers are 100% random :P]
dummy_X = np.array([
[0,0],
[1,0],
[2.61,-1.28],
[-0.59,2.1]
])
#call your expand function
dummy_expanded = expand(dummy_X)
#what it should have returned: x0 x1 x0^2 x1^2 x0*x1 1
dummy_expanded_ans = np.array([[ 0. , 0. , 0. , 0. , 0. , 1. ],
[ 1. , 0. , 1. , 0. , 0. , 1. ],
[ 2.61 , -1.28 , 6.8121, 1.6384, -3.3408, 1. ],
[-0.59 , 2.1 , 0.3481, 4.41 , -1.239 , 1. ]])
#tests
assert isinstance(dummy_expanded,np.ndarray), "please make sure you return numpy array"
assert dummy_expanded.shape==dummy_expanded_ans.shape, "please make sure your shape is correct"
assert np.allclose(dummy_expanded,dummy_expanded_ans,1e-3), "Something's out of order with features"
print("Seems legit!")
# -
# ### Logistic regression
# Now, let's write function that predicts class given X as in logistic regression.
#
# The math should look like this:
#
# $$ P(y| \vec x, \vec w) = \sigma(\vec x \cdot \vec w )$$
#
# where x represents features, w are weights and $$\sigma(a) = {1 \over {1+e^{-a}}}$$
#
# We shall omit $ \vec {arrows} $ in further formulae for simplicity.
def classify(X, w):
X_expand = expand(X)
print(X)
print(w)
"""
Given input features and weights
return predicted probabilities of y==1 given x, P(y=1|x), see description above
__don't forget to expand X inside classify and other functions__
:param X: feature matrix X of shape [n_samples,2] (non-exanded)
:param w: weight vector w of shape [6] for each of the expanded features
:returns: an array of predicted probabilities in [0,1] interval.
"""
return 1
# +
#sample usage / test just as the previous one
dummy_weights = np.linspace(-1,1,6)
dummy_probs = classify(dummy_X,dummy_weights)
dummy_answers = np.array([ 0.73105858, 0.450166 , 0.02020883, 0.59844257])
assert isinstance(dummy_probs,np.ndarray), "please return np.array"
assert dummy_probs.shape == dummy_answers.shape, "please return an 1-d vector with answers for each object"
assert np.allclose(dummy_probs,dummy_answers,1e-3), "There's something non-canonic about how probabilties are computed"
# -
# The loss you should try to minimize is the Logistic Loss aka crossentropy aka negative log-likelihood:
#
# $$ L = - {1 \over N} \sum_i {y \cdot log P(y|x,w) + (1-y) \cdot log (1-P(y|x,w))}$$
#
#
def compute_loss(X, y, w):
"""
Given feature matrix X [n_samples,2], target vector [n_samples] of +1/-1,
and weight vector w [6], compute scalar loss function using formula above.
"""
return <your code here>
# +
dummy_y = np.array([0,1,0,1])
dummy_loss = compute_loss(dummy_X,dummy_y,dummy_weights)
assert np.allclose(dummy_loss,0.66131), "something wrong with loss"
# -
# Since we train our model with gradient descent, we gotta compute gradients.
#
# To be specific, we need a derivative of loss function over each weight [6 of them].
#
# $$ \nabla L = {\partial L \over \partial w} = ...$$
#
# No, we won't be giving you the exact formula this time. Instead, try figuring out a derivative with pen and paper.
#
# As usual, we've made a small test for you, but if you need more, feel free to check your math against finite differences (estimate how L changes if you shift w by $10^-5$ or so).
# +
def compute_grad(X, y, w):
"""
Given feature matrix X [n_samples,2], target vector [n_samples] of +1/-1,
and weight vector w [6], compute vector [6] of derivatives of L over each weights.
"""
return <your code here>
# +
#tests
dummy_grads = compute_grad(dummy_X,dummy_y,dummy_weights)
#correct answers in canonic form
dummy_grads_ans = np.array([-0.06504252, -0.21728448, -0.1379879 , -0.43443953, 0.107504 , -0.05003101])
assert isinstance(dummy_grads,np.ndarray)
assert dummy_grads.shape == (6,), "must return a vector of gradients for each weight"
assert len(set(np.round(dummy_grads/dummy_grads_ans,3))), "gradients are wrong"
assert np.allclose(dummy_grads,dummy_grads_ans,1e-3), "gradients are off by a coefficient"
# -
# Here's an auxiliary function that visualizes the predictions
# +
from IPython import display
h = 0.01
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
def visualize(X, y, w, history):
"""draws classifier prediction with matplotlib magic"""
Z = classify(np.c_[xx.ravel(), yy.ravel()], w)
Z = Z.reshape(xx.shape)
plt.subplot(1,2,1)
plt.contourf(xx, yy, Z, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.subplot(1,2,2)
plt.plot(history)
plt.grid()
ymin, ymax = plt.ylim()
plt.ylim(0, ymax)
display.clear_output(wait=True)
plt.show()
# -
visualize(X,y,dummy_weights,[1,0.5,0.25],)
# ### Training
# In this section, we'll use the functions you wrote to train our classifier using stochastic gradient descent.
#
# Try to find an optimal learning rate for gradient descent for the given batch size.
#
# **Don't change the batch size!**
# +
w = np.array([0,0,0,0,0,1])
alpha = <learning rate>
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
for i in range(n_iter):
ind = np.random.choice(X.shape[0], batch_size)
loss[i] = compute_loss(X, y, w)
visualize(X[ind,:], y[ind], w, loss)
w = w - alpha * compute_grad(X[ind,:], y[ind], w)
visualize(X, y, w, loss)
plt.clf()
# -
# # Bonus quest
#
# Let's try logistic regression on some real data.
#
# The full dataset can be found at https://archive.ics.uci.edu/ml/machine-learning-databases/00280/
import numpy as np
# +
DATA_ROOT='/mnt/mlhep2018/datasets/'
SAMPLES_LIMIT=100000
NUMBER_OF_FEATURES = 28
# +
import os.path as osp
X = np.ndarray(shape=(SAMPLES_LIMIT, NUMBER_OF_FEATURES), dtype='float32')
y = np.ndarray(shape=(SAMPLES_LIMIT, ), dtype='float32')
with open(osp.join(DATA_ROOT, 'HIGGS.csv')) as f:
for i in range(SAMPLES_LIMIT):
line = f.readline()
sample = np.fromstring(line, dtype='float32', sep=',')
X[i, :] = sample[1:]
y[i] = sample[0]
### again to save training time only 10% of the loaded data is used for training.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9)
# +
### alternatively, you can use small subset of the data from the repository
# f = np.load('./HIGGS_small.npz')
# X = f['X']
# y = f['y']
# print(X.shape)
# print(y.shape)
# +
### if you want to download the whole dataset, please, use terminal in Jupyter (New -> Terminal):
### wget https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz
### gunzip HIGGS.csv.gz
# +
### your logistic regression here
# -
# ## For inspiration
#
# [Searching for Exotic Particles in High-Energy Physics with Deep Learning](https://arxiv.org/pdf/1402.4735.pdf) reports results for Decision Tree Boosting and Deep Neaural Networks. Soon, you should be able to reproduce these results.
| MyNotes/day1-Mon/seminar-01-linear/linear-models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.2 64-bit
# name: python3
# ---
import csv
import matplotlib.pyplot as plt
import numpy
# - empty_tick=5[harvest_period,height_limit]1629940583.csv
# - empty_tick=5[harvest_period,height_limit]output_by_ishland.csv
# - empty_tick=5[harvest_period,height_limit]output_by_ishland_reordered.csv
# - empty_tick=5[harvest_period,height_limit]Thu Aug 26 08.15.01 2021.csv
path = './multiProcessResults/empty_tick=5[harvest_period,height_limit]output_by_ishland_reordered.csv'
path = './multiProcessResults/empty_tick=5[harvest_period,height_limit]1629959120.csv'
path = './multiProcessResults/empty_tick=5[harvest_period,height_limit]1630027024.csv'
path = './multiProcessResults/empty_tick=5[harvest_period,height_limit]Sun Aug 29 21.47.12 2021.csv'
# +
first_label = ''
second_label = ''
first_axis = []
second_axis = []
values = {}
with open(path, 'r') as csvfile:
reader = csv.reader(csvfile)
first_row = next(reader)
[second_label, first_label] = first_row[0].split('|')
first_axis = first_row[1:]
for row in reader:
second_axis.append(row[0])
values[row[0]] = list(float(str) for str in row[1:])
# +
# better read
def read_csv(csvpath) -> dict:
re = {
'first_label' : '',
'second_label' : '',
'first_axis' : [],
'second_axis' : [],
'values' : {}
}
with open(csvpath, 'r', newline='') as csvfile:
reader = csv.reader(csvfile)
first_row = next(reader)
[re['second_label'], re['first_label']] = first_row[0].split('|')
re['first_axis'] = first_row[1:]
for row in reader:
re['second_axis'].append(row[0])
re['values'][row[0]] = list(float(str) for str in row[1:])
return re
result = read_csv(path)
# -
fig, ax = plt.subplots()
fig.set_size_inches(6,4)
for key in second_axis:
plt.plot(first_axis, values[key], label=key)
fig.set_facecolor('white')
ax.set_xlabel(first_label)
ax.set_ylabel('Unit/Hour')
ax.set_xticks(first_axis[int(len(first_axis)/24)::int(len(first_axis)/12)])
ax.minorticks_on()
fig.legend()
# +
# better plot
def to_float_list(t: list) -> list:
re = []
for val in t:
re.append(float(val))
return re
fig, ax = plt.subplots()
fig.set_size_inches(6,4)
fig.set_facecolor('white')
'''for key in result['second_axis']:
re = []
for i in range(len(result['values'][key]) - 1):
if result['values'][key][i] > 0 and result['values'][key][i+1] > 0:
re.append(result['values'][key][i+1] - result['values'][key][i] / result['values'][key][i])
result['values'][key] = re'''
start = 0
end = len(result['first_axis']) - 1
start = 600
end = 719
print(len(result['first_axis']))
i = 0
for key in result['second_axis']:
i += 1
if i != 25:
pass
ax.plot(result['first_axis'][start:end], result['values'][key][start:end], label=key, lw = 0.5)
ax.set_xlabel(result['first_label'])
ax.set_ylabel('Unit/Hour')
#ax.set_xticks(result['first_axis'][1000:1100:int(len(result['first_axis'])/12)])
ax.set_xticks(result['first_axis'][start:end:int((start - end) / 20.)])
ax.minorticks_on()
fig.legend()
# -
fig, ax = plt.subplots()
plt.plot(first_axis, values['16'])
| PlotCSV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3
# ---
# # Keras exercise
#
# In this exercise you will be creating a Keras model by loading a data set, preprocessing input data, building a Sequential Keras model and compiling the model with a training configuration. Afterwards, you train your model on the training data and evaluate it on the test set. To finish this exercise, you will past the accuracy of your model to the Coursera grader.
#
# This notebook is tested in IBM Watson Studio under python 3.6
#
# ## Data
#
# For this exercise we will use the Reuters newswire dataset. This dataset consists of 11,228 newswires from the Reuters news agency. Each wire is encoded as a sequence of word indexes, just as in the IMDB data we encountered in lecture 5 of this series. Moreover, each wire is categorised into one of 46 topics, which will serve as our label. This dataset is available through the Keras API.
#
# ## Goal
#
# We want to create a Multi-layer perceptron (MLP) using Keras which we can train to classify news items into the specified 46 topics.
#
# ## Instructions
#
# We start by installing and importing everything we need for this exercise:
# !pip install tensorflow==2.2.0rc0
import tensorflow as tf
if not tf.__version__ == '2.2.0-rc0':
print(tf.__version__)
raise ValueError('please upgrade to TensorFlow 2.2.0-rc0, or restart your Kernel (Kernel->Restart & Clear Output)')
# IMPORTANT! => Please restart the kernel by clicking on "Kernel"->"Restart and Clear Outout" and wait until all output disapears. Then your changes are beeing picked up
#
# As you can see, we use Keras' Sequential model with only two types of layers: Dense and Dropout. We also specify a random seed to make our results reproducible. Next, we load the Reuters data set:
# +
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, GlobalMaxPool1D, Embedding
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.sequence import pad_sequences
seed = 1337
np.random.seed(seed)
from tensorflow.keras.datasets import reuters
max_words = 1000
(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=max_words,
test_split=0.2,
seed=seed)
num_classes = np.max(y_train) + 1 # 46 topics
# -
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# Note that we cap the maximum number of words in a news item to 1000 by specifying the *num_words* key word. Also, 20% of the data will be test data and we ensure reproducibility by setting our random seed.
#
# Our training features are still simply sequences of indexes and we need to further preprocess them, so that we can plug them into a *Dense* layer. For this we use a *Tokenizer* from Keras' text preprocessing module. This tokenizer will take an index sequence and map it to a vector of length *max_words=1000*. Each of the 1000 vector positions corresponds to one of the words in our newswire corpus. The output of the tokenizer has a 1 at the i-th position of the vector, if the word corresponding to i is in the description of the newswire, and 0 otherwise. Even if this word appears multiple times, we still just put a 1 into our vector, i.e. our tokenizer is binary. We use this tokenizer to transform both train and test features:
t = set()
for x in x_train:
for e in set(x):
t.add(e)
vocab_size = len(t) + 1
del t
# +
from tensorflow.keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer(num_words=max_words)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
# -
# ## 1. Exercise part: label encoding
#
# Use to_categorical, as we did in the lectures, to transform both *y_train* and *y_test* into one-hot encoded vectors of length *num_classes*:
y_train = to_categorical(y_train, num_classes=num_classes)
y_test = to_categorical(y_test, num_classes=num_classes)
# ## 2. Exercise part: model definition
#
# Next, initialise a Keras *Sequential* model and add three layers to it:
#
# Layer: Add a *Dense* layer with in input_shape=(max_words,), 512 output units and "relu" activation.
# Layer: Add a *Dropout* layer with dropout rate of 50%.
# Layer: Add a *Dense* layer with num_classes output units and "softmax" activation.
model = Sequential()
model.add(Dense(512, input_shape=(max_words,), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# ## 3. Exercise part: model compilation
#
# As the next step, we need to compile our Keras model with a training configuration. Compile your model with "categorical_crossentropy" as loss function, "adam" as optimizer and specify "accuracy" as evaluation metric. NOTE: In case you get an error regarding h5py, just restart the kernel and start from scratch
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
# ## 4. Exercise part: model training and evaluation
#
# Next, define the batch_size for training as 32 and train the model for 5 epochs on *x_train* and *y_train* by using the *fit* method of your model. Then calculate the score for your trained model by running *evaluate* on *x_test* and *y_test* with the same batch size as used in *fit*.
batch_size = 32
history = model.fit(x_train, y_train, validation_data=(x_test, y_test),
verbose=True, batch_size=batch_size, epochs=5)
# If you have done everything as specified, in particular set the random seed as we did above, your test accuracy should be around 80%
# +
score_train = model.evaluate(x_train, y_train, verbose=False)
score_test = model.evaluate(x_test, y_test, verbose=False)
print(f'Train: loss = {np.round(score_train[0], 5)}, accuracy={np.round(score_train[1], 5)}')
print(f'Test: loss = {np.round(score_test[0], 5)}, accuracy={np.round(score_test[1], 5)}')
# -
def plot_history(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Training acc')
plt.plot(x, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plot_history(history)
# Congratulations, now it's time to submit your result to the Coursera grader by executing the following cells (Programming Assingment, Week2).
#
# We have to install a little library in order to submit to coursera
#
# ## Improoving
# +
embedding_dim = 50
model = Sequential()
model.add(Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
input_length=max_words))
model.add(GlobalMaxPool1D())
# model.add(Dense(256, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
# model.add(Dropout(0.2))
# model.add(Dense(32, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# -
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
batch_size = 32
history = model.fit(x_train, y_train, validation_data=(x_test, y_test),
verbose=True, batch_size=batch_size, epochs=50)
# +
score_train = model.evaluate(x_train, y_train, verbose=False)
score_test = model.evaluate(x_test, y_test, verbose=False)
print(f'Train: loss = {np.round(score_train[0], 5)}, accuracy={np.round(score_train[1], 5)}')
print(f'Test: loss = {np.round(score_test[0], 5)}, accuracy={np.round(score_test[1], 5)}')
# -
plot_history(history)
# !rm -f rklib.py
# !wget https://raw.githubusercontent.com/IBM/coursera/master/rklib.py
# Please provide your email address and obtain a submission token (secret) on the grader’s submission page in coursera, then execute the cell
# +
from rklib import submit
import json
key = "<KEY>"
part = "HCvcp"
email = "<EMAIL>"
token = "<KEY>H"
submit(email, token, '<KEY>', part, [part], json.dumps(score_test[1]*100))
# -
| applied-ai-with-deep-learning/Week 2/Building and serialising Keras models (PA).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example: Regenerating Data from
# # [<NAME>. / JPS 173 (2007) 277–290](http://www.sciencedirect.com/science/article/pii/S0378775307009056)
#
# ## Getting Started
#
# In this tutorial, we will regenerate data from <NAME>'s 2007 paper [[1]](http://www.sciencedirect.com/science/article/pii/S0378775307009056). This will both show that OpenPNM can recreate results accurately, and will also show some more specific uses of OpenPNM. While this paper deals with both SGL and Toray GDLs, we will deal only with SGL.
#
# There will be a general layout to complete this simulation:
#
# 1. Set up network
# 2. Set up geometry and geometrical methods
# 3. constrict throat's by a constriction factor
# 4. Set up phases and methods
# 5. Set up phase physics and methods
# 6. Run invasion percolation
# 7. Run Stokes and Fickian algorithms
# 8. generate effective permeability and effective diffusivity values at different saturations
# 9. plot generated data
#
# We first import the openpnm code and some other useful modules.
import numpy as np
import openpnm as op
import matplotlib.pyplot as plt
import openpnm.models as mods
# %matplotlib inline
np.random.seed(10)
# ## Setting up Network and Geometry
#
# To begin our simulation, we must first generate our SGL network and geometry. This includes:
#
# 1. creating a cubic network object and an SGL10 geometry object
# 2. sending our geometry object our internal pores
# 3. calculating values for throat and pore properties for both internal and boundary pores
# 4. accounting for pores and throats that are too big (making maximum pore size the lattice parameter)
Lc = 40.5e-6
# 1. Set up network
sgl = op.network.Cubic(shape=[26, 26, 10], spacing=Lc, name='SGL10BA')
sgl.add_boundary_pores()
proj = sgl.project
wrk = op.Workspace()
wrk.settings['loglevel'] = 50
# 2. Set up geometries
Ps = sgl.pores('*boundary', mode='not')
Ts = sgl.find_neighbor_throats(pores=Ps, mode='xnor', flatten=True)
geo = op.geometry.GenericGeometry(network=sgl,pores=Ps,throats=Ts,name='geo')
geo.add_model(propname='pore.seed',
model=mods.misc.random,
element='pore',
num_range=[0, 0.8834],
seed=None)
geo.add_model(propname='throat.seed',
model=mods.misc.from_neighbor_pores,
prop='pore.seed',
mode='min')
geo.add_model(propname='pore.diameter',
model=mods.geometry.pore_size.weibull,
shape=3.07,
loc=19.97e-6,
scale=1.6e-5)
geo.add_model(propname='throat.diameter',
model=mods.geometry.throat_size.weibull,
shape=3.07,
loc=19.97e-6,
scale=1.6e-5)
geo.add_model(propname='pore.area',
model=mods.geometry.pore_cross_sectional_area.sphere)
geo.add_model(propname='pore.volume',
model=mods.geometry.pore_volume.sphere)
geo.add_model(propname='throat.length',
model=mods.geometry.throat_length.ctc)
geo.add_model(propname='throat.volume',
model=mods.geometry.throat_volume.cylinder)
geo.add_model(propname='throat.area',
model=mods.geometry.throat_cross_sectional_area.cylinder)
geo.add_model(propname='throat.surface_area',
model=mods.geometry.throat_surface_area.cylinder)
geo.add_model(propname='throat.endpoints',
model=mods.geometry.throat_endpoints.spherical_pores)
geo.add_model(propname='throat.conduit_lengths',
model=mods.geometry.throat_length.conduit_lengths)
Ps = sgl.pores('*boundary')
Ts = sgl.find_neighbor_throats(pores=Ps, mode='or')
boun = op.geometry.Boundary(network=sgl, pores=Ps, throats=Ts, name='boun')
# Before we move on to setting up our fluid and physics objects, we must constrict throats in the z and y direction by a factor (Gostick et al included this tightening of throats in only these two directions to create realistic anisotropy in the model). For his SGL simulation, Gostick uses a constriction factor of .95. Finally, because we have changed values for pore and throat diameters (first by accounting for pores and throats that are too big, and the finally constricting throats in the y and z directions), we must recalculate all pore and throat values relying on these diameters.
throats = geo.throats()
connected_pores = sgl.find_connected_pores(throats)
x1 = [sgl['pore.coords'][pair[0]][0] for pair in connected_pores]
x2 = [sgl['pore.coords'][pair[1]][0] for pair in connected_pores]
same_x = [x - y == 0 for x, y in zip(x1,x2)]
factor = [s*.95 + (not s)*1 for s in same_x]
throat_diameters = sgl['throat.diameter'][throats]*factor
geo['throat.diameter'] = throat_diameters
geo.regenerate_models(exclude=['throat.diameter'])
# OpenPNM makes it very easy to visualize the network we have generated through the "Visualization" methods. We can create vtk files to be viewed using ParaView (downloadable at http://www.paraview.org/download/ ). If we visualize our pore network model it would appear like this (the pores have been visualized using boxes- darker boxes are larger. Because the network is so big, visualization of the throats has been left out for clarity):
import openpnm.io.VTK as iovtk
iovtk.save(network=sgl, filename='network_SGL')
# An example is seen here:
# <img src="http://i.imgur.com/fPZ8lZK.png" style="width: 60%" align="left"/>
# ## Setting up the Phases and Physics
# Now we are ready to set up our phases (water and air) and the physics corresponding to each of these phases. openpnm has built in air and water phases, so we can use those. However, Gostick specifies using a water pore contact angle of 100, so we will reset this value after regenerating our fluids.
air = op.phases.Air(network = sgl, name = 'air')
water = op.phases.Water(network = sgl, name = 'water')
# Reset pore contact angle
water['pore.contact_angle'] = 100.0
# We are now ready to establish physical properties for our fluid objects. To do this, we will: 1) create physics objects associated with our fluids (by using StandardPhyics we don't have to add methods for calculating each property because they are already included) 2) use our regenerate_physics() method to calculate these properties. One physics object is required for each combination of phase and geometry.
phys_water = op.physics.Standard(network=sgl, phase=water, geometry=geo)
phys_air = op.physics.Standard(network=sgl, phase=air, geometry=geo)
phys_water_b = op.physics.Standard(network=sgl, phase=water, geometry=boun)
phys_air_b = op.physics.Standard(network=sgl, phase=air, geometry=boun)
# ## Running Ordinary Percolation, Fickian Diffusion, and Stokes Flow
# Gostick uses ordinary percolation to spread water through his GDL before calculating relative permeability and relative diffusivity. This way, a graph showing the relationship between saturation and relative permeability and between saturation and relative diffusivity can be created.
#
# To run our ordinary percolation, we will:
#
# 1. pick inlet pores
# 2. create an Ordinary Percolation algorithm object
# 3. setup our algorithm object
# 4. run our algorithm object
# 5. call results() so that occupancy of pores and throats for each fluid will can be set and multiphysics updated
# +
inlets = sgl.pores('bottom_boundary')
used_inlets = [inlets[x] for x in range(0, len(inlets), 2)]
OP_1 = op.algorithms.OrdinaryPercolation(project=proj)
OP_1.set_inlets(pores=used_inlets)
OP_1.setup(phase=water, pore_volume='pore.volume', throat_volume='throat.volume')
OP_1.run(points=100)
# -
# This algorithm performed a start to finish simulation, which fully flooded the network. The 'results()' command can be used to update the phase occupancy values throughout the network. To save some computation, we will filter the invasion points so that relative transport properties can be calculated approximately every 5% increment in saturation. The OrdinaryPercolation object has a method to return the intrusion data as a named tuple of Capillary Pressure (Pcap) and Saturation of the non-wetting phase (Snwp).
data = OP_1.get_intrusion_data()
# Filter for evenly spaced sat inc. first and last
filter_pc = [data.Pcap[0]]
sat = [data.Snwp[0]]
for i, pc in enumerate(data.Pcap):
if data.Snwp[i] - sat[-1] > 0.05:
filter_pc.append(pc)
sat.append(data.Snwp[i])
filter_pc.append(data.Pcap[-1])
sat.append(data.Snwp[-1])
# We now define a helper function to update the phases and properties with the results of the OP algorithm. The multiphase conduit conductance model looks at the phase occupancy in the conduits made by the 1/2 pore - throat - 1/2 pore neighbor elements. When the mode is 'strict' the phase must occupy all three elements for the conduit to be considered open to flow for that phase. If the phase is not present in at least one of the elements in the conduit then the throat conductance is divided by 6 orders of magnitude. In this way the conductivity is severely reduced by the presence of the other phase and flow must go around, thus decreasing the permeability/diffusivity of the network.
def update_phase_and_phys(results):
water['pore.occupancy'] = results['pore.occupancy']
air['pore.occupancy'] = 1-results['pore.occupancy']
water['throat.occupancy'] = results['throat.occupancy']
air['throat.occupancy'] = 1-results['throat.occupancy']
# Add multiphase conductances
mode='strict'
phys_air.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_diffusive_conductance',
throat_conductance='throat.diffusive_conductance',
mode=mode)
phys_water.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_diffusive_conductance',
throat_conductance='throat.diffusive_conductance',
mode=mode)
phys_air.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_hydraulic_conductance',
throat_conductance='throat.hydraulic_conductance',
mode=mode)
phys_water.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_hydraulic_conductance',
throat_conductance='throat.hydraulic_conductance',
mode=mode)
phys_air_b.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_diffusive_conductance',
throat_conductance='throat.diffusive_conductance',
mode=mode)
phys_water_b.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_diffusive_conductance',
throat_conductance='throat.diffusive_conductance',
mode=mode)
phys_air_b.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_hydraulic_conductance',
throat_conductance='throat.hydraulic_conductance',
mode=mode)
phys_water_b.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_hydraulic_conductance',
throat_conductance='throat.hydraulic_conductance',
mode=mode)
# The following call will get the pore and throat phase occupancy which is an array of 1s and 0s representing that the phase occupies a particular pore or throat, update the phase objects and and multiphase conductanct models to the physics objects
update_phase_and_phys(OP_1.results(Pc=1e3))
# The next step will be to calculate effective diffusivity and permeability at different saturations. Note that we want to run Fickian diffusion and Stokes flow algorithms at different points within our ordinary percolation process.
#
# The rest of our code will exist within a loop updating our network to different stages of percolation, so that we may view our relative diffusivity and permeability at different points of saturation.
#
# Before we add in the loop aspect, we will walk through the code that will be inside the loop.
#
# Note that we want the algorithms that are single phase (where only the specified fluid exists in the network) to help us make our permeability and diffusivity values relative. Any algorithm that is single phase will use the hydraulic or diffusive conductances before we recalculated based on occupancy. This calls for our conductance parameter to be 'hydraulic_conductance' or 'diffusive_conductance' instead of 'conduit_hydraulic_conductance' or 'conduit_diffusive_conductance'.
#
# The need for all these different algorithms can be made clearer by the equation relating effective permeability to the absolute permeability and relative permeability:
#
# $$K_{eff, p}(s_p) = K*K_{r, p}(s_p)$$
#
# |Symbol|Description|
# |-------------------|-------------------------------------------------------------|
# |$$K_{eff, p}(s_p)$$|effective permeability of phase p as a function of saturation|
# |$$K$$ |absoulte permeability (or single phase permeability) |
# |$$K_{r, p}(s_p)$$ |relative permeability of phase p as a function of saturation |
#
# Therefore, relative permeability can be found by dividing the effective permeability by the absolute permeability. Thus the need for a single phase algorithm (absolute permeability) for every multi phase algorithm (effective permeability).
#
# The same goes for relative diffusivity, which has an very similar equation that looks like this:
# $$D_{eff, p}(s_p) = D*D_{r, p}(s_p)$$
# +
perm_air = {'0': [], '1': [], '2': []}
diff_air = {'0': [], '1': [], '2': []}
perm_water = {'0': [], '1': [], '2': []}
diff_water = {'0': [], '1': [], '2': []}
max_Pc = max(OP_1['throat.invasion_pressure'])
num_seq = 20
pore_volumes = sgl['pore.volume']
throat_volumes = sgl['throat.volume']
totV = np.sum(pore_volumes) + np.sum(throat_volumes)
K_air_single_phase = [None, None, None]
D_air_single_phase = [None, None, None]
K_water_single_phase = [None, None, None]
D_water_single_phase = [None, None, None]
bounds = [['left', 'right'], ['front', 'back'], ['top', 'bottom']]
for bound_increment in range(len(bounds)):
# Run Single phase algs effective properties
BC1_pores = sgl.pores(labels=bounds[bound_increment][0]+'_boundary')
BC2_pores = sgl.pores(labels=bounds[bound_increment][1]+'_boundary')
# Effective permeability : air
sf_air = op.algorithms.StokesFlow(network=sgl, phase=air)
sf_air.setup(conductance='throat.hydraulic_conductance')
sf_air.set_value_BC(values=0.6, pores=BC1_pores)
sf_air.set_value_BC(values=0.2, pores=BC2_pores)
sf_air.run()
K_air_single_phase[bound_increment] = sf_air.calc_effective_permeability()
proj.purge_object(obj=sf_air)
# Effective diffusivity : air
fd_air = op.algorithms.FickianDiffusion(network=sgl,phase=air)
fd_air.setup(conductance='throat.diffusive_conductance')
fd_air.set_value_BC(values=0.6, pores=BC1_pores)
fd_air.set_value_BC(values=0.2, pores=BC2_pores)
fd_air.run()
D_air_single_phase[bound_increment] = fd_air.calc_effective_diffusivity()
proj.purge_object(obj=fd_air)
# Effective permeability : water
sf_water = op.algorithms.StokesFlow(network=sgl, phase=water)
sf_water.setup(conductance='throat.hydraulic_conductance')
sf_water.set_value_BC(values=0.6, pores=BC1_pores)
sf_water.set_value_BC(values=0.2, pores=BC2_pores)
sf_water.run()
K_water_single_phase[bound_increment] = sf_water.calc_effective_permeability()
proj.purge_object(obj=sf_water)
# Effective diffusivity : water
fd_water = op.algorithms.FickianDiffusion(network=sgl,phase=water)
fd_water.setup(conductance='throat.diffusive_conductance')
fd_water.set_value_BC(values=0.6, pores=BC1_pores)
fd_water.set_value_BC(values=0.2, pores=BC2_pores)
fd_water.run()
D_water_single_phase[bound_increment] = fd_water.calc_effective_diffusivity()
proj.purge_object(obj=fd_water)
# -
# Now we can repeat the algorithms at each filtered pressure. This process takes about 1 minute.
for Pc in filter_pc:
update_phase_and_phys(OP_1.results(Pc=Pc))
print('-' * 80)
print('Pc', Pc)
for bound_increment in range(len(bounds)):
BC1_pores = sgl.pores(labels=bounds[bound_increment][0]+'_boundary')
BC2_pores = sgl.pores(labels=bounds[bound_increment][1]+'_boundary')
# Multiphase
sf_air = op.algorithms.StokesFlow(network=sgl,phase=air)
sf_air.setup(conductance='throat.conduit_hydraulic_conductance')
sf_water = op.algorithms.StokesFlow(network=sgl,phase=water)
sf_water.setup(conductance='throat.conduit_hydraulic_conductance')
fd_air = op.algorithms.FickianDiffusion(network=sgl,phase=air)
fd_air.setup(conductance='throat.conduit_diffusive_conductance')
fd_water = op.algorithms.FickianDiffusion(network=sgl,phase=water)
fd_water.setup(conductance='throat.conduit_diffusive_conductance')
#BC1
sf_air.set_value_BC(values=0.6, pores=BC1_pores)
sf_water.set_value_BC(values=0.6, pores=BC1_pores)
fd_air.set_value_BC(values=0.6, pores=BC1_pores)
fd_water.set_value_BC(values=0.6, pores=BC1_pores)
#BC2
sf_air.set_value_BC(values=0.2, pores=BC2_pores)
sf_water.set_value_BC(values=0.2, pores=BC2_pores)
fd_air.set_value_BC(values=0.2, pores=BC2_pores)
fd_water.set_value_BC(values=0.2, pores=BC2_pores)
# Run Multiphase algs
sf_air.run()
sf_water.run()
fd_air.run()
fd_water.run()
Keff_air_mphase = sf_air.calc_effective_permeability()
Deff_air_mphase = fd_air.calc_effective_diffusivity()
Keff_water_mphase = sf_air.calc_effective_permeability()
Deff_water_mphase = fd_water.calc_effective_diffusivity()
Kr_eff_air = Keff_air_mphase / K_air_single_phase[bound_increment]
Kr_eff_water = Keff_water_mphase / K_water_single_phase[bound_increment]
Dr_eff_air = Deff_air_mphase / D_air_single_phase[bound_increment]
Dr_eff_water = Deff_water_mphase / D_water_single_phase[bound_increment]
perm_air[str(bound_increment)].append(Kr_eff_air)
diff_air[str(bound_increment)].append(Dr_eff_air)
perm_water[str(bound_increment)].append(Kr_eff_water)
diff_water[str(bound_increment)].append(Dr_eff_water)
proj.purge_object(obj=sf_air)
proj.purge_object(obj=sf_water)
proj.purge_object(obj=fd_air)
proj.purge_object(obj=fd_water)
# Now we can plot the results including those from the paper
# +
# NBVAL_IGNORE_OUTPUT
from matplotlib.font_manager import FontProperties
# %matplotlib inline
# Data points taken directly from Gostick's graphs using GraphClick
gostick_saturation_1 = [0.008, 0.04, 0.093, 0.14, 0.193, 0.246, 0.293, 0.337, 0.395, 0.442, 0.496,
0.542, 0.59, 0.641, 0.687, 0.748, 0.793, 0.838, 0.894, 0.945, 0.986]
gostick_perm_air_case1 = [0.917, 0.821, 0.68, 0.568, 0.466, 0.366, 0.286, 0.204, 0.144, 0.096, 0.051, 0.024,
0.003, -1.08E-04, -1.96E-04, -3.12E-04, -3.97E-04, -4.84E-04, -5.90E-04, 0.002, 0.002]
gostick_saturation_2 = [0.99, 0.899, 0.847, 0.802, 0.75, 0.701, 0.645, 0.594, 0.546, 0.497, 0.449,
0.398, 0.348, 0.298, 0.245, 0.196, 0.147, 0.094, 0.044, 0.003]
gostick_perm_water = [0.935, 0.774, 0.709, 0.664, 0.618, 0.572, 0.514, 0.461, 0.401, 0.347,
0.284, 0.211, 0.145, 0.084, 0.044, 0.024, 0.012, 0.001, 0.001, 0.001]
gostick_saturation_3 =[0.006, 0.05, 0.102, 0.151, 0.199, 0.247, 0.297, 0.348, 0.399, 0.447, 0.496,
0.546, 0.597, 0.645, 0.699, 0.75, 0.798, 0.846, 0.899, 0.949, 0.983]
gostick_diff_air_case1 = [0.939, 0.836, 0.725, 0.626, 0.531, 0.442, 0.353, 0.27, 0.203, 0.14, 0.085, 0.048,
0.008, 5.49E-04, 4.48E-04, 3.50E-04, 2.59E-04, 1.67E-04, 0.003, 0.003, 0.003]
gostick_saturation_4 = [0.985, 0.946, 0.898, 0.846, 0.795, 0.749, 0.695, 0.643, 0.596, 0.545, 0.496, 0.448,
0.396, 0.346, 0.298, 0.251, 0.196, 0.146, 0.094]
gostick_diff_water = [0.941, 0.901, 0.853, 0.809, 0.756, 0.7, 0.638, 0.569, 0.503, 0.428, 0.36, 0.291, 0.214, 1.48E-01,
8.00E-02, 4.50E-02, 2.30E-02, 1.60E-02, 0.005]
fontP = FontProperties()
fontP.set_size('small')
# Setting up subplots
fig = plt.figure(figsize=(6, 10), dpi=80, facecolor='w', edgecolor='k')
ax1 = fig.add_subplot(211) #top
ax2 = fig.add_subplot(212) #bottom
x_values1 = [x/20 for x in range(21)]
z = '.75'
# Plots for subplot1 - strict permeability
p1, = ax1.plot(sat, perm_water['0'], color = 'k', linestyle = '-', marker = 'o')
p2, = ax1.plot(sat, perm_water['1'], color = z, linestyle = '-', marker = 'o')
p3, = ax1.plot(sat, perm_water['2'], color = 'b', linestyle = '-', marker = 'o')
p4, = ax1.plot(sat, perm_air['0'], color = 'k', linestyle = '-', marker = '^')
p5, = ax1.plot(sat, perm_air['1'], color = z, linestyle = '-', marker = '^')
p6, = ax1.plot(sat, perm_air['2'], color = 'b', linestyle = '-', marker = '^')
p10, = ax1.plot(x_values1, [x**(3) for x in x_values1], 'k--')
ax1.plot(x_values1, [(1-x)**(3) for x in x_values1], 'k--')
gs1, = ax1.plot(gostick_saturation_1, gostick_perm_air_case1, color = 'r', linestyle = '-', marker = 'D')
gs2, = ax1.plot(gostick_saturation_2, gostick_perm_water, color = 'r', linestyle = '-', marker = 'o')
ax1.set_ylabel('permeability')
ax1.set_xlabel("saturation")
ax1.set_ylim([0,1])
ax1.set_xlim([0,1])
# Need to work on legend to match up with the right things
lgd1 = ax1.legend([p1, p2, p3, p4, p5, p6, p10, gs1, gs2],
["KrWater,x", "KrWater,y", "KrWater,z",
"KrAir,x","KrAir,y","KrAir,z", "a = 3",
"Gostick et al \n KrAir,x (case 1)",
"Gostick et al \n KrWater,x"],
loc='center left', bbox_to_anchor=(1, 0.5), prop=fontP)
# Plots for subplot4 - diffusivity
p11, = ax2.plot(sat, diff_water['0'], color = 'k', linestyle = '-', marker = 'o')
p12, = ax2.plot(sat, diff_water['1'], color = z, linestyle = '-', marker = 'o')
p13, = ax2.plot(sat, diff_water['2'], color = 'b', linestyle = '-', marker = 'o')
p14, = ax2.plot(sat, diff_air['0'], color = 'k', linestyle = '-', marker = '^')
p15, = ax2.plot(sat, diff_air['1'], color = z, linestyle = '-', marker = '^')
p16, = ax2.plot(sat, diff_air['2'], color = 'b', linestyle = '-', marker = '^')
p20, = ax2.plot(x_values1, [x**(2) for x in x_values1], 'k--')
ax2.plot(x_values1, [(1-x)**(2) for x in x_values1], 'k--')
gs3, = ax2.plot(gostick_saturation_3, gostick_diff_air_case1, color = 'r', linestyle = '-', marker = 'D')
gs4, = ax2.plot(gostick_saturation_4, gostick_diff_water, color = 'r', linestyle = '-', marker = 'o')
ax2.set_ylabel('diffusivity')
ax2.set_xlabel("saturation")
ax2.set_ylim([0,1])
ax2.set_xlim([0,1])
lgd2 = ax2.legend([p11, p12, p13, p14, p15, p16, p20, gs3, gs4],
["DrWater,x", "DrWater,y", "DrWater,z",
"DrAir,x","DrAir,y","DrAir,z", "a = 2",
"Gostick et al \n DrAir,x (case 1)",
"Gostick et al \n DrWater,x"],
loc='center left', bbox_to_anchor=(1, 0.5), prop=fontP)
fig.subplots_adjust(left=0.13, right=.7, top=0.95, bottom=0.05)
plt.show()
# -
# ## Discrepancies with Gostick's simulation
# Several things contribute to slight differences between this simulation and that produced by Gostick et al in their 2007 paper. These include:
#
# 1. Lack of pore size correlation
# 2. Lack of late pore filling
# ## Acknowledgements
# The OpenPNM team would like to thank <NAME> (Materials Science and Engineering, University of Toronto, 1T7) for her excellent work in developing this example.
| examples/paper_recreations/Gostick et al. (2007)/Gostick et al. (2007).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import scipy.io as io
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
# +
#Set up parameters for figure display
params = {'legend.fontsize': 'x-large',
'figure.figsize': (8, 10),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'axes.labelweight': 'bold',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
pylab.rcParams["font.family"] = "serif"
pylab.rcParams["font.weight"] = "heavy"
# -
#Load the hori data from some samples..
mat_hori = io.loadmat('/work/imagingQ/SpatialAttention_Drowsiness/microMeasuresAlertness_Neuroimage2018/'
'Scripts/mat_files/horigraphics.mat')
data_hori = mat_hori['Hori_graphics']
#take the data for different scales..
y_hori1 = data_hori[0,]
y_hori2 = data_hori[3,]
y_hori3 = data_hori[6,]
y_hori4 = data_hori[9,]
y_hori5 = data_hori[12,]
y_hori6 = data_hori[13,]
y_hori7 = data_hori[15,]
y_hori8 = data_hori[18,]
y_hori9 = data_hori[21,]
y_hori10 = data_hori[23,]
# +
#Set the bolding range..
x = list(range(0, 1001))
bold_hori1a = slice(0, 500)
bold_hori1b = slice(500, 1000)
bold_hori2a = slice(50, 460)
bold_hori2b = slice(625, 835)
bold_hori3a = slice(825, 1000)
bold_hori4a = slice(0, 1000)
bold_hori6a = slice(800, 875)
bold_hori7a = slice(200, 250)
bold_hori7b = slice(280, 350)
bold_hori7c = slice(450, 525)
bold_hori7d = slice(550, 620)
bold_hori7e = slice(750, 800)
bold_hori8a = slice(650, 750)
bold_hori8b = slice(750, 795)
bold_hori9a = slice(200, 325)
bold_hori10a = slice(720, 855)
# +
#Set the main figure of the Hori scale..
plt.style.use('ggplot')
ax1 = plt.subplot2grid((60, 1), (0, 0), rowspan=6)
ax2 = plt.subplot2grid((60, 1), (6, 0), rowspan=6)
ax3 = plt.subplot2grid((60, 1), (12, 0), rowspan=6)
ax4 = plt.subplot2grid((60, 1), (18, 0), rowspan=6)
ax5 = plt.subplot2grid((60, 1), (24, 0), rowspan=6)
ax6 = plt.subplot2grid((60, 1), (30, 0), rowspan=6)
ax7 = plt.subplot2grid((60, 1), (36, 0), rowspan=6)
ax8 = plt.subplot2grid((60, 1), (42, 0), rowspan=6)
ax9 = plt.subplot2grid((60, 1), (48, 0), rowspan=6)
ax10 = plt.subplot2grid((60, 1), (54, 0), rowspan=6)
plt.setp(ax1, xticks=[0, 250, 500, 750, 999], xticklabels=['0','1', '2', '3', '4'])
plt.setp(ax2, xticks=[0, 250, 500, 750, 999], xticklabels=['0','1', '2', '3', '4'])
plt.setp(ax3, xticks=[0, 250, 500, 750, 999], xticklabels=['0','1', '2', '3', '4'])
plt.setp(ax4, xticks=[0, 250, 500, 750, 999], xticklabels=['0','1', '2', '3', '4'])
plt.setp(ax5, xticks=[0, 250, 500, 750, 999], xticklabels=['0','1', '2', '3', '4'])
plt.setp(ax6, xticks=[0, 250, 500, 750, 999], xticklabels=['0','1', '2', '3', '4'])
plt.setp(ax7, xticks=[0, 250, 500, 750, 999], xticklabels=['0','1', '2', '3', '4'])
plt.setp(ax8, xticks=[0, 250, 500, 750, 999], xticklabels=['0','1', '2', '3', '4'])
plt.setp(ax9, xticks=[0, 250, 500, 750, 999], xticklabels=['0','1', '2', '3', '4'])
plt.setp(ax10, xticks=[0, 250, 500, 750, 999], xticklabels=['0','1', '2', '3', '4'])
plt.subplots_adjust(wspace=0, hspace=0)
ax1.plot(x, y_hori1, 'k-', alpha=0.5, linewidth=2.0)
ax1.plot(x[bold_hori1a], y_hori1[bold_hori1a], 'b-', alpha=0.75)
ax1.plot(x[bold_hori1b], y_hori1[bold_hori1b], 'b-', alpha=0.75)
ax1.set_ylim([-150, 150])
ax1.axes.xaxis.set_ticklabels([])
ax1.set_ylabel('1: Alpha wave \ntrain', rotation=0,ha='right',va='center', fontsize=20, labelpad=10)
ax2.plot(x, y_hori2, 'k-', alpha=0.5, linewidth=2.0)
ax2.plot(x[bold_hori2a], y_hori2[bold_hori2a], 'b-', alpha=0.75)
ax2.plot(x[bold_hori2b], y_hori2[bold_hori2b], 'b-', alpha=0.75)
ax2.set_ylim([-150, 150])
ax2.axes.xaxis.set_ticklabels([])
ax2.set_ylabel('2: Alpha wave \nintermittent(>50%)', rotation=0,ha='right',va='center',
fontsize=20, labelpad=10)
ax3.plot(x, y_hori3, 'k-', alpha=0.5, linewidth=2.0)
ax3.plot(x[bold_hori3a], y_hori3[bold_hori3a], 'b-', alpha=0.75)
ax3.set_ylim([-150, 150])
ax3.axes.xaxis.set_ticklabels([])
ax3.set_ylabel('3: Alpha wave \nintermittent(<50%)', rotation=0,ha='right',va='center',
fontsize=20, labelpad=10)
ax4.plot(x, y_hori4, 'g-', alpha=0.5, linewidth=2.0)
ax4.plot(x[bold_hori4a], y_hori4[bold_hori4a], 'g-', alpha=0.75)
ax4.set_ylim([-150, 150])
ax4.axes.xaxis.set_ticklabels([])
ax4.set_ylabel('4: EEG flattening', rotation=0,ha='right',va='center', fontsize=20, labelpad=10)
ax5.plot(x, y_hori5, 'g-', alpha=0.5, linewidth=2.0)
ax5.plot(x[bold_hori4a], y_hori5[bold_hori4a], 'g-', alpha=0.75)
ax5.set_ylim([-150, 150])
ax5.axes.xaxis.set_ticklabels([])
ax5.set_ylabel('5: Ripples', rotation=0,ha='right',va='center', fontsize=20, labelpad=10)
ax6.plot(x, y_hori6, 'k-', alpha=0.5, linewidth=2.0)
ax6.plot(x[bold_hori6a], y_hori6[bold_hori6a], 'r-', alpha=0.75)
ax6.set_ylim([-150, 150])
ax6.axes.xaxis.set_ticklabels([])
ax6.set_ylabel('6: Vertex sharp wave \nsolitary', rotation=0,ha='right',va='center',
fontsize=20, labelpad=10)
ax7.plot(x, y_hori7, 'k-', alpha=0.5, linewidth=2.0)
ax7.plot(x[bold_hori7a], y_hori7[bold_hori7a], 'r-', alpha=0.75)
ax7.plot(x[bold_hori7b], y_hori7[bold_hori7b], 'r-', alpha=0.75)
ax7.plot(x[bold_hori7c], y_hori7[bold_hori7c], 'r-', alpha=0.75)
ax7.plot(x[bold_hori7d], y_hori7[bold_hori7d], 'r-', alpha=0.75)
ax7.plot(x[bold_hori7e], y_hori7[bold_hori7e], 'r-', alpha=0.75)
ax7.set_ylim([-150, 150])
ax7.set_ylabel('7: Vertex sharp wave \nbursts', rotation=0,ha='right',va='center',
fontsize=20, labelpad=10)
ax7.axes.xaxis.set_ticklabels([])
ax8.plot(x, y_hori8, 'k-', alpha=0.5, linewidth=2.0)
ax8.plot(x[bold_hori8a], y_hori8[bold_hori8a], 'r-', alpha=0.75)
ax8.plot(x[bold_hori8b], y_hori8[bold_hori8b], 'm-', alpha=0.75)
ax8.set_ylim([-150, 150])
ax8.set_ylabel('8: Vertex sharp wave \nand incomplete spindles', rotation=0,ha='right',va='center',
fontsize=20, labelpad=10)
ax8.axes.xaxis.set_ticklabels([])
ax9.plot(x, y_hori9, 'k-', alpha=0.5, linewidth=2.0)
ax9.plot(x[bold_hori9a], y_hori9[bold_hori9a], 'm-', alpha=0.75)
ax9.set_ylim([-40, 40])
ax9.set_ylabel('9: Spindles', rotation=0,ha='right',va='center', fontsize=20, labelpad=10)
ax9.axes.xaxis.set_ticklabels([])
ax10.plot(x, y_hori10, 'k-', alpha=0.5, linewidth=2.0)
ax10.plot(x[bold_hori10a], y_hori10[bold_hori10a], 'c-', alpha=0.75)
ax10.set_ylim([-175, 175])
ax10.set_ylabel('10: K-complexes', rotation=0,ha='right',va='center', fontsize=20, labelpad=10)
ax10.set_xlabel('Time(seconds)', rotation=0,ha='center',va='center', fontsize=20, labelpad=10)
ax1.axes.yaxis.set_ticklabels([' ',' ',''])
ax2.axes.yaxis.set_ticklabels([' ',' ',''])
ax3.axes.yaxis.set_ticklabels([' ',' ',''])
ax4.axes.yaxis.set_ticklabels([' ',' ',''])
ax5.axes.yaxis.set_ticklabels([' ',' ',''])
ax6.axes.yaxis.set_ticklabels([' ',' ',''])
ax7.axes.yaxis.set_ticklabels([' ',' ',''])
ax8.axes.yaxis.set_ticklabels([' ',' ',''])
ax9.axes.yaxis.set_ticklabels([' ',' ',''])
ax10.axes.yaxis.set_ticklabels(['-100(uV)','','100(uV)'])
ax10.axes.yaxis.tick_right()
ax1.axes.yaxis.set_ticks([-100, 0, 100])
ax2.axes.yaxis.set_ticks([-100, 0, 100])
ax3.axes.yaxis.set_ticks([-100, 0, 100])
ax4.axes.yaxis.set_ticks([-100, 0, 100])
ax5.axes.yaxis.set_ticks([-100, 0, 100])
ax6.axes.yaxis.set_ticks([-100, 0, 100])
ax7.axes.yaxis.set_ticks([-100, 0, 100])
ax8.axes.yaxis.set_ticks([-100, 0, 100])
ax9.axes.yaxis.set_ticks([-100, 0, 100])
ax10.axes.yaxis.set_ticks([-100, 0, 100])
# Here is the label of interest
ax2.annotate('Wake', xy=(-0.85, 0.90), xytext=(-0.85, 1.00), xycoords='axes fraction',rotation='vertical',
fontsize=20, ha='center', va='center')
ax6.annotate('N1', xy=(-0.85, 1), xytext=(-0.85, 1), xycoords='axes fraction', rotation='vertical',
fontsize=20, ha='center', va='center')
ax10.annotate('N2', xy=(-0.85, 0.90), xytext=(-0.85, 1.00), xycoords='axes fraction', rotation='vertical',
fontsize=20, ha='center', va='center')
# +
#Set up the vertex element now..
params = {'figure.figsize': (3, 6)}
pylab.rcParams.update(params)
y_hori6 = data_hori[13,]
y_hori7 = data_hori[15,]
x = list(range(0, 101))
x_spin = list(range(0, 301))
x_kcomp = list(range(0, 301))
y_hori6 = y_hori6[800:901]
y_hori7 = y_hori7[281:382]
#Vertex
bold_biphasic = slice(8, 75)
bold_monophasic = slice(8, 65)
plt.style.use('ggplot')
f, axarr = plt.subplots(2, sharey=True) # makes the 2 subplots share an axis.
f.suptitle('Vertex element', size=12, fontweight='bold')
plt.setp(axarr, xticks=[0, 50,100], xticklabels=['0', '0.5', '1'],
yticks=[-150,0, 150])
axarr[0].plot(x, y_hori6, 'k-', alpha=0.5, linewidth=2.0)
axarr[0].plot(x[bold_biphasic], y_hori6[bold_biphasic], 'r-', alpha=0.75)
axarr[0].set_title('Biphasic', fontsize=10, fontweight='bold')
axarr[0].set_ylim([-150, 150])
axarr[1].plot(x, y_hori7, 'k-', alpha=0.5, linewidth=2.0)
axarr[1].plot(x[bold_monophasic], y_hori7[bold_monophasic], 'r-', alpha=0.75)
axarr[1].set_title('Monophasic', fontsize=10, fontweight='bold')
axarr[1].set_xlabel('Time(s)')
f.text(-0.2, 0.5, 'Amp(uV)', va='center', rotation='vertical', fontsize=20)
f.subplots_adjust(hspace=0.3)
# +
#Set up the Spindle element now..
params = {'figure.figsize': (3, 1.5)}
pylab.rcParams.update(params)
bold_spindle = slice(95, 205)
y_hori8 = data_hori[21,]
y_hori8 = y_hori8[101:402]
fspin, axarrspin = plt.subplots(1, sharey=False) # makes the 2 subplots share an axis.
plt.setp(axarrspin, xticks=[0, 150,300], xticklabels=['0', '1.5', '3'],
yticks=[-100,0, 100])
axarrspin.plot(x_spin, y_hori8, 'k-', alpha=0.5, linewidth=2.0)
axarrspin.plot(x_spin[bold_spindle], y_hori8[bold_spindle], 'r-', alpha=0.75)
axarrspin.set_title('', fontsize=10, fontweight='bold')
axarrspin.set_ylim([-100, 100])
axarrspin.set_xlabel('Time(s)')
fspin.text(0.3, 1.5, 'Spindle element', va='center', rotation='horizontal', fontsize=12)
fspin.subplots_adjust(hspace=0.3)
# +
#Set up the K-complex element now..
bold_kcomp = slice(20, 150)
y_hori10 = data_hori[23,]
y_hori10 = y_hori10[700:1007]
fkcomp, axarrkcomp = plt.subplots(1, sharey=False) # makes the 2 subplots share an axis.
plt.setp(axarrkcomp, xticks=[0, 150,300], xticklabels=['0', '1.5', '3'],
yticks=[-200,0, 200])
axarrkcomp.plot(x_kcomp, y_hori10, 'k-', alpha=0.5, linewidth=2.0)
axarrkcomp.plot(x_kcomp[bold_kcomp], y_hori10[bold_kcomp], 'r-', alpha=0.75)
axarrkcomp.set_title('', fontsize=10, fontweight='bold')
axarrkcomp.set_ylim([-200, 200])
axarrkcomp.set_xlabel('Time(s)')
fkcomp.text(0.3, 1.5, 'K-complex element', va='center', rotation='horizontal', fontsize=12)
fkcomp.subplots_adjust(hspace=0.3)
# -
| Scripts/.ipynb_checkpoints/Figure2_Horiscale-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="TD3dwD3NNbgt" executionInfo={"status": "ok", "timestamp": 1605493120170, "user_tz": 300, "elapsed": 15734, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="ba919020-979d-4321-9b0c-c6be4a50ac0d" colab={"base_uri": "https://localhost:8080/"}
# !pip3 install torch
# !pip3 install VaderSentiment
# !pip3 install transformers
# !pip3 install sentence-transformers
# !pip3 install umap-learn
# !pip3 install hdbscan
# + id="xYmgulfBQA_P" executionInfo={"status": "ok", "timestamp": 1605493120173, "user_tz": 300, "elapsed": 15728, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
#import necessary libraries
import sys
import gc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nltk
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import TfidfVectorizer
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline,FeatureUnion
from sklearn.linear_model import LinearRegression,LogisticRegression
from sklearn.svm import SVR, SVC
from sklearn.metrics import plot_roc_curve
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier, plot_tree
from sklearn.ensemble import BaggingRegressor,RandomForestRegressor,AdaBoostRegressor,BaggingClassifier,RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, plot_confusion_matrix, f1_score, classification_report
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from keras.regularizers import l2
from keras.layers import Dropout
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping,CSVLogger
import torch
import transformers as ppb # pytorch transformers
from sentence_transformers import SentenceTransformer
import umap
import hdbscan
# + id="pv9vKC3aPqQt" executionInfo={"status": "ok", "timestamp": 1605493120877, "user_tz": 300, "elapsed": 16427, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="d28bd29e-d56b-4f92-d82f-3d2046470c31" colab={"base_uri": "https://localhost:8080/"}
gc.set_threshold(20,5,5)
gc.collect()
# + id="EBwBXaNKPpN2" executionInfo={"status": "ok", "timestamp": 1605493144953, "user_tz": 300, "elapsed": 40494, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="b92351df-be2c-4e39-fc35-c6091f153244" colab={"base_uri": "https://localhost:8080/", "height": 112}
product_reviews_ratings=pd.read_csv('/content/drive/My Drive/amazon_reviews_project/pre_processed_data.csv')
product_reviews_ratings.head(2)
# + id="TFMKz0YUPpBQ" executionInfo={"status": "ok", "timestamp": 1605493146999, "user_tz": 300, "elapsed": 42534, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="e2dc9883-8d41-4bc1-e996-5f8181fd5489" colab={"base_uri": "https://localhost:8080/"}
product_reviews_ratings['asin'].value_counts()
# + id="MlRNg_bwPWMI" executionInfo={"status": "ok", "timestamp": 1605493148009, "user_tz": 300, "elapsed": 43537, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="fe9b00c7-e7a4-42b7-bd29-bc4f80c647ce" colab={"base_uri": "https://localhost:8080/"}
#Let's create a list of 5 most reviewed books from this list
product_list=product_reviews_ratings['asin'].value_counts().index[:5]
product_list
# + id="vzpTKzZuPen3" executionInfo={"status": "ok", "timestamp": 1605493148390, "user_tz": 300, "elapsed": 43912, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="fe926219-1dd5-4a1f-ddce-45df4f800867" colab={"base_uri": "https://localhost:8080/"}
#Let's start testing with the most reviewed item: '038568231X'
reviews_1=product_reviews_ratings[product_reviews_ratings['asin']=='038568231X']
reviews_1.dropna(inplace=True)
# + id="9AaNisaeX4iU" executionInfo={"status": "ok", "timestamp": 1605493148393, "user_tz": 300, "elapsed": 43908, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
selected_reviews=reviews_1[(reviews_1['overall']==3)|(reviews_1['overall']==4)]
# + id="ryPiisAnX7Eh" executionInfo={"status": "ok", "timestamp": 1605493148395, "user_tz": 300, "elapsed": 43906, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="28b45a81-adc0-49c3-e53e-4c4a8f463fe1" colab={"base_uri": "https://localhost:8080/"}
#Creating a dataframe with only the text of selected reviews
data=selected_reviews[['reviewText']]
#reset index
data.reset_index(inplace=True)
#And let's drop all nulls
data.dropna(inplace=True)
# + id="isax7JxXZ3qa" executionInfo={"status": "ok", "timestamp": 1605493148396, "user_tz": 300, "elapsed": 43901, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="12710b99-147c-43b7-fb2f-351ab67180ff" colab={"base_uri": "https://localhost:8080/", "height": 511}
data.drop('index',axis=1,inplace=True)
data
# + id="-8mUXJoFYLIr" executionInfo={"status": "ok", "timestamp": 1605493435566, "user_tz": 300, "elapsed": 331064, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="4dcfab1b-fb81-4f8d-9aa6-91d9b5dc25bb" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["6939cd57ad844d05a21ec5ad311cf0d7", "5c2d3c54de4f4f699468e4759b885aa5", "28f29295b1ff496fba52b6a3e6fa8a33", "b11631db941b41eaa32c2140daba4ae8", "<KEY>", "51451ff1db5b455283bc087c3e93d9a2", "e1b1ab6146ea4d41ab717e3f3c37b062", "d294ef1d9c5d4ad7afcd0f473eb72675"]}
#sentence embeddings
model = SentenceTransformer('bert-large-nli-stsb-mean-tokens')
embeddings = model.encode(data['reviewText'], show_progress_bar=True)
# + id="7dQTN1Q6bIWP" executionInfo={"status": "ok", "timestamp": 1605493462448, "user_tz": 300, "elapsed": 357940, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
#reduce dimensionality to 6, keeping neighbors at 50
umap_embeddings = umap.UMAP(n_neighbors=100,n_components=6,metric='cosine').fit_transform(embeddings)
# + id="5HrtqovTcVHv" executionInfo={"status": "ok", "timestamp": 1605493462456, "user_tz": 300, "elapsed": 357944, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
#clustering using HDBSCAN
clusters=hdbscan.HDBSCAN(min_cluster_size=16,metric='euclidean',cluster_selection_method='eom').fit(umap_embeddings)
# + id="Cge430fjdyz-" executionInfo={"status": "ok", "timestamp": 1605493462459, "user_tz": 300, "elapsed": 357942, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
#Let's explore what clusters and the docs in each cluster
# + id="wsGd31JofmOD" executionInfo={"status": "ok", "timestamp": 1605493462460, "user_tz": 300, "elapsed": 357938, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="d92be8e9-7814-4cec-a7ae-b11d605e46fd" colab={"base_uri": "https://localhost:8080/"}
labels=np.unique(clusters.labels_)
labels
# + id="AB_34zBRfoIr" executionInfo={"status": "ok", "timestamp": 1605493487512, "user_tz": 300, "elapsed": 382985, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
#we can use the UMAP method we have used previously to create embedding in 2d and visualize the findings.
umap_embeddings_2D=umap.UMAP(n_neighbors=25,n_components=2,metric='cosine').fit_transform(embeddings)
df_umap_embeddings_2D=pd.DataFrame(umap_embeddings_2D,columns=['x','y'])
df_umap_embeddings_2D['label']=clusters.labels_
# + id="dFsX510gmFEj" executionInfo={"status": "ok", "timestamp": 1605493487515, "user_tz": 300, "elapsed": 382985, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="e5fe17a4-9cc8-40eb-922a-31451bced7b1" colab={"base_uri": "https://localhost:8080/"}
df_umap_embeddings_2D['label'].value_counts(normalize=True)
# + [markdown] id="QvEQBxFl6MoB"
# It appears we can capture over 88% of the reviews by considering the top 6 labels (including unclustered).
# + id="-BW6WSykf9RK" executionInfo={"status": "ok", "timestamp": 1605493491488, "user_tz": 300, "elapsed": 386951, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="9cc9e08e-8b15-4ce9-8a67-be1310dc5135" colab={"base_uri": "https://localhost:8080/", "height": 709}
#Now let's plot df_umap_embeddings_2D
df_umap_embeddings_2D.plot(x='x',y='y',kind='scatter',c='label',cmap='cividis',figsize=(16,12))
# + id="eLDmiIBhiCqb" executionInfo={"status": "ok", "timestamp": 1605493491489, "user_tz": 300, "elapsed": 386946, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="e30660b6-3ee0-4755-db00-b90ecf1ee4eb" colab={"base_uri": "https://localhost:8080/", "height": 528}
#Let's create a column with labels in the original dataframe
data['label']=df_umap_embeddings_2D['label']
data
# + [markdown] id="iUae7Hft7RlS"
# clusters larger than 2% of total number of reviews accounts from ~93% of the sample. So let's create a map with only those clusters.
# + id="XhTMIdxi7-2u" executionInfo={"status": "ok", "timestamp": 1605493491491, "user_tz": 300, "elapsed": 386941, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="f7448c4a-5699-4254-bdb9-f48ac812b64f" colab={"base_uri": "https://localhost:8080/"}
selected_clusters=[i for i in range(-1,14) if df_umap_embeddings_2D['label'].value_counts(normalize=True)[i]>0.02]
selected_clusters
# + [markdown] id="txU-5B7G9dmn"
# Let's create a map with just these
# + id="Y7l-JkeM-Ca-" executionInfo={"status": "ok", "timestamp": 1605493491492, "user_tz": 300, "elapsed": 386935, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="29179b25-b3b8-4985-a0c3-890755fe8925" colab={"base_uri": "https://localhost:8080/", "height": 708}
df_umap_embeddings_2D_selected_clusters=df_umap_embeddings_2D[df_umap_embeddings_2D['label'].isin(selected_clusters)]
df_umap_embeddings_2D_selected_clusters.plot(x='x',y='y',kind='scatter',c='label',cmap='cividis',figsize=(16,12))
# + id="47ymA2kD9bgb" executionInfo={"status": "ok", "timestamp": 1605493491494, "user_tz": 300, "elapsed": 386931, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="60f4dd36-e7ed-43e2-a2b0-fc4ef4323c90" colab={"base_uri": "https://localhost:8080/", "height": 424}
#We can perform the same operation on original dataframe to extract the corresponding reviews.
data_selected_clusters=data[data['label'].isin(selected_clusters)].copy(deep=True)
data_selected_clusters
# + [markdown] id="_KZsVC1HD9Wk"
# We'll identify the prominent themes in each of these clusters using TF-IDF. Since we'll be performing this operation several times, this is a good place to write an function that would do it for us.
# + id="M5qqQGUSuVJR" executionInfo={"status": "ok", "timestamp": 1605493491495, "user_tz": 300, "elapsed": 386926, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
def create_doc(input_dataframe,clusters_to_select):
docs=[]
for label in clusters_to_select:
doc=input_dataframe[input_dataframe['label']==label]
docs.append(doc)
return docs
# + id="LlUIadz1_4g1" executionInfo={"status": "ok", "timestamp": 1605493491497, "user_tz": 300, "elapsed": 386924, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="8440fec6-f3ff-4865-e0af-72db890c1f43" colab={"base_uri": "https://localhost:8080/"}
docs=create_doc(data_selected_clusters,selected_clusters)
print(docs)
# + id="BMlqaU5vAq51" executionInfo={"status": "ok", "timestamp": 1605493491498, "user_tz": 300, "elapsed": 386921, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
def docs_TFIDF_vectorizer(docs):
from sklearn.feature_extraction.text import TfidfVectorizer
stop_words = text.ENGLISH_STOP_WORDS.union(['00', '10', '100', '12', '15', '16', '20', '200', '24', '25',\
'2nd', '30', '40', '45', '50', '60', '75', '80', '90','!',"''","'m","'s",',','.','...','He','I','It','My','Of','``',\
'!',"''","'m","'re","'s",',','-','.','...','9','An','Ca','Do','I','It','S.','``','!',"''","'s",'(',')',',','-','.',\
'b', 'c', 'd', 'e', 'f', 'g', 'h', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y','&',"'ll",'-D','5',':','?',\
"'", '0', '1', '2', '3', '4', '6', '7', '8', 'A', 'C', 'D', 'H', 'M', 'O', 'S', '`','#',"'ve",'*','--','..','....','10/10','4/5',';',\
'As','At','HE','IS','IT','If','In','MY','No','ON','On','PR','SO','So','St','To','US','We','/', 'E', 'N', 'P', 'R', 'T', 'U', 'W', 'Y',\
'$','%',"'S","'d",'.....','1/2','1/3','105','12-lead','125','14','150','198','1\\23\\18','22','221','27','3-3.5','35','AM','Be','By',\
'CK','DJ','De','Dr','HM','JE','K.','L','MB','Mr','Ms','R.','TO','W.','YA','B', 'J', 'K', '\\','@','Im','Me','Is','000','100ish','11',\
'178','191','1945','1st','2015','2016','260','36','360','3rds','3star','55','70','78','80s','86','87','99','_____'])
#initialize TFIDF vectorizer
vectorizer = TfidfVectorizer(stop_words=stop_words)
#create an empty list
tfidf_vectorized_docs=[]
#loop over the docs
for doc in docs:
X=vectorizer.fit_transform(doc['reviewText'])
tfidf_vectorized_docs.append((vectorizer.get_feature_names(),X))
return tfidf_vectorized_docs
# + id="cocTme3rKL2O" executionInfo={"status": "ok", "timestamp": 1605493491501, "user_tz": 300, "elapsed": 386921, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="da887d88-74b3-4231-87ec-23ca10d3a573" colab={"base_uri": "https://localhost:8080/"}
tfidf_data=docs_TFIDF_vectorizer(docs)
# + id="tcPvzDh6SCVj" executionInfo={"status": "ok", "timestamp": 1605493491502, "user_tz": 300, "elapsed": 386916, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="b8d6af69-d545-4c44-ebbe-631600181e23" colab={"base_uri": "https://localhost:8080/"}
tfidf_data
# + id="DGVNdGBtKPPs" executionInfo={"status": "ok", "timestamp": 1605493491502, "user_tz": 300, "elapsed": 386912, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
tfidf_data_complete_list=[pd.DataFrame(tfidf_data[i][1].todense(),columns=tfidf_data[i][0]) for i in range(len(tfidf_data))]
# + id="exg0sF0sKTkM" executionInfo={"status": "ok", "timestamp": 1605493491503, "user_tz": 300, "elapsed": 386909, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="de25c579-b24c-4745-d7b8-c1f1a531a566" colab={"base_uri": "https://localhost:8080/"}
selected_clusters
# + [markdown] id="V-2GtHpsWrSM"
# The corresponding dataframes with tfidf data maybe called df_unclustered, df_1, df_6, df_7, df_13. We can assign names easily as follows:
# + id="Hi57CE_OOf5a" executionInfo={"status": "ok", "timestamp": 1605494188353, "user_tz": 300, "elapsed": 341, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
[df_unclustered,df_1, df_6, df_7, df_13]=[pd.DataFrame(tfidf_data[i][1].todense(),columns=tfidf_data[i][0]) for i in range(len(tfidf_data))]
# + [markdown] id="gCGWUNMHX6dP"
# Let's take a look at couple example to check if this worked.
# + id="GpR588bvXf6J" executionInfo={"status": "ok", "timestamp": 1605494190127, "user_tz": 300, "elapsed": 354, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="e646a218-4d41-4612-94a0-7262ab0cb5e5" colab={"base_uri": "https://localhost:8080/", "height": 193}
df_unclustered.head(3)
# + id="pZW1bzX5YIP1" executionInfo={"status": "ok", "timestamp": 1605494208196, "user_tz": 300, "elapsed": 651, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="0bf25921-e32e-4c6d-c803-3381d10ab560" colab={"base_uri": "https://localhost:8080/", "height": 161}
df_1.head(2)
# + id="LtS7xjEOYI7R" executionInfo={"status": "ok", "timestamp": 1605494223536, "user_tz": 300, "elapsed": 501, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="d1fcfded-096f-4be5-a3f8-a7851f919f84" colab={"base_uri": "https://localhost:8080/", "height": 161}
df_6.head(2)
# + [markdown] id="4IC5PW6IYVTB"
# We are finally ready to look at the most prominent words for each cluster.
# + id="gxXCiOJ7Xiw4" executionInfo={"status": "ok", "timestamp": 1605493491673, "user_tz": 300, "elapsed": 387051, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="37f0727d-71da-4244-a1df-f25e8ae2b474" colab={"base_uri": "https://localhost:8080/"}
df_unclustered.sum().nlargest(10)
# + id="xL9lWxk8Yg0H" executionInfo={"status": "ok", "timestamp": 1605494242310, "user_tz": 300, "elapsed": 245, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
#List comprehension to create separate list for each cluster
n_prominent_words=[df.sum().nlargest(10).index for df in [df_unclustered,df_1, df_6, df_7, df_13]]
# + id="hUNaWUCUZIB1" executionInfo={"status": "ok", "timestamp": 1605494269533, "user_tz": 300, "elapsed": 508, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
df_n_prominent_words=pd.DataFrame(n_prominent_words).T
df_n_prominent_words.columns=['df_unclustered','df_1', 'df_6', 'df_7', 'df_13']
# + id="zt6z6zpcbozy" executionInfo={"status": "ok", "timestamp": 1605494271047, "user_tz": 300, "elapsed": 471, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}} outputId="5b0173e2-b140-445e-f629-750b179a7a8f" colab={"base_uri": "https://localhost:8080/", "height": 363}
df_n_prominent_words
# + [markdown] id="B1ilL7XPiHhZ"
# ### These are quick impressions on the clusters based on just the words with high TF-IDF values.
#
# **df_unclustered:** This is our largest group. They enjoyed the characters, the story and especially the ending. The book was interesting to them.
# **df_1:**
# **df_6:**
# **df_7:**
# **df_13:**
# + id="Mxsk3pxQi4d8" executionInfo={"status": "ok", "timestamp": 1605493491677, "user_tz": 300, "elapsed": 387037, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02239321086626355081"}}
| older files and iterations/22_Review_Clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PyCharm (pycon2015_tutorial322)
# language: python
# name: pycharm-cef1f773
# ---
# ## Grid Search Hyperparameter optimization
# This case study is all about using grid searches to identify the optimal parameters for a machine learning algorithm. To complere this case study, you'll use the Pima Indian diabetes dataset from Kaggle and KNN. Follow along with the preprocessing steps of this case study.
# + [markdown] tags=[]
# Load the necessary packages
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
sns.set()
# set random seed to try make this exercise and solutions reproducible (NB: this is just for teaching purpose and not something you would do in real life)
random_seed_number = 42
np.random.seed(random_seed_number)
# + [markdown] tags=[]
# #### Load the diabetes data
# -
diabetes_data = pd.read_csv('data/diabetes.csv')
diabetes_data.head()
# + [markdown] tags=[]
# **<font color='teal'> Start by reviewing the data info.</font>**
# -
diabetes_data.info()
# + [markdown] tags=[]
# **<font color='teal'> Apply the describe function to the data.</font>**
# -
diabetes_data.describe()
# + [markdown] tags=[]
# **<font color='teal'> Currently, the missing values in the dataset are represented as zeros. Replace the zero values in the following columns ['Glucose','BloodPressure','SkinThickness','Insulin','BMI'] with nan .</font>**
# -
cols = ['Glucose','BloodPressure','SkinThickness','Insulin','BMI']
diabetes_data[cols] = diabetes_data[cols].replace({'0': np.nan, 0: np.nan})
# + [markdown] tags=[]
# **<font color='teal'> Plot histograms of each column. </font>**
# + pycharm={"name": "#%%\n"}
def plotHistogram():
'''
Code copied from: pandas histogram: plot histogram for each column as subplot of a big figure
at: https://stackoverflow.com/questions/39646070/pandas-histogram-plot-histogram-for-each-column-as-subplot-of-a-big-figure)
'''
fig, axes = plt.subplots(len(diabetes_data.columns)//3, 3, figsize=(12, 18))
i = 0
for triaxis in axes:
for axis in triaxis:
diabetes_data.hist(column = diabetes_data.columns[i], bins = 10, ax=axis)
i = i+1
# + pycharm={"name": "#%%\n"}
plotHistogram()
# + [markdown] tags=[]
# #### Replace the zeros with mean and median values.
# -
diabetes_data['Glucose'].fillna(diabetes_data['Glucose'].mean(), inplace = True)
diabetes_data['BloodPressure'].fillna(diabetes_data['BloodPressure'].mean(), inplace = True)
diabetes_data['SkinThickness'].fillna(diabetes_data['SkinThickness'].median(), inplace = True)
diabetes_data['Insulin'].fillna(diabetes_data['Insulin'].median(), inplace = True)
diabetes_data['BMI'].fillna(diabetes_data['BMI'].median(), inplace = True)
# + [markdown] tags=[]
# **<font color='teal'> Plot histograms of each column after replacing nan. </font>**
# -
plotHistogram()
# + [markdown] tags=[]
# #### Plot the correlation matrix heatmap
# -
plt.figure(figsize=(12,10))
print('Correlation between various features')
p=sns.heatmap(diabetes_data.corr(), annot=True,cmap ='Blues')
# + [markdown] tags=[]
# **<font color='teal'> Define the `y` variable as the `Outcome` column.</font>**
# -
y = diabetes_data['Outcome']
X = diabetes_data.drop('Outcome', axis=1)
# + [markdown] tags=[]
# **<font color='teal'> Create a 70/30 train and test split. </font>**
# -
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123)
# + [markdown] tags=[]
# **<font color='teal'> Using Sklearn, standarize the magnitude of the features by scaling the values. </font>**
# -
# Note: Don't forget to fit() your scaler on X_train and then use that fitted scaler to transform() X_test. This is to avoid data leakage while you standardize your data.
scaler = StandardScaler()
scaler.fit(X_train)
scaler.mean_
# + pycharm={"name": "#%%\n"}
scaler.transform(X_test)
# + [markdown] tags=[]
# #### Using a range of neighbor values of 1-10, apply the KNearestNeighbor classifier to classify the the data.
# +
from sklearn.neighbors import KNeighborsClassifier
test_scores = []
train_scores = []
for i in range(1,10):
knn = KNeighborsClassifier(i)
knn.fit(X_train,y_train)
train_scores.append(knn.score(X_train,y_train))
test_scores.append(knn.score(X_test,y_test))
# + [markdown] tags=[]
# **<font color='teal'> Print the train and test scores for each iteration.</font>**
# + pycharm={"name": "#%%\n"}
for score in train_scores:
print(f'{score:.4f}')
# + pycharm={"name": "#%%\n"}
print('Test scores:')
for score in test_scores:
print(f'{score:.4f}')
# + [markdown] tags=[]
# **<font color='teal'> Identify the number of neighbors that resulted in the max score in the training dataset. </font>**
# -
print(f'{train_scores.index(max(train_scores)) + 1} is the number of neighbors resulting in the max score on training data of {max(train_scores):.4f}.')
# + [markdown] tags=[]
# **<font color='teal'> Identify the number of neighbors that resulted in the max score in the testing dataset. </font>**
# -
print(f'{test_scores.index(max(test_scores)) + 1} is the number of neighbors resulting in the max score on testing data of {max(test_scores):.4f}.')
# + [markdown] tags=[]
# Plot the train and test model performance by number of neighbors.
# -
plt.figure(figsize=(12,5))
p = sns.lineplot(range(1,10),train_scores,marker='*',label='Train Score')
p = sns.lineplot(range(1,10),test_scores,marker='o',label='Test Score')
# + [markdown] tags=[]
# **<font color='teal'> Fit and score the best number of neighbors based on the plot. </font>**
# -
knn = KNeighborsClassifier(7)
knn.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix
y_pred = knn.predict(X_test)
pl = confusion_matrix(y_test,y_pred)
# + [markdown] tags=[]
# **<font color='teal'> Plot the confusion matrix for the model fit above. </font>**
# + pycharm={"name": "#%%\n"}
pl
# -
sns.heatmap(pl, annot=True)
# + [markdown] tags=[]
# **<font color='teal'> Print the classification report </font>**
# +
from sklearn.metrics import accuracy_score, classification_report
print(f"Accuracy score: {accuracy_score(y_test, y_pred)}")
print('Classification report:')
print(classification_report(y_test, y_pred))
# + [markdown] tags=[]
# #### In the case of the K nearest neighbors algorithm, the K parameter is one of the most important parameters affecting the model performance. The model performance isn't horrible, but what if we didn't consider a wide enough range of values in our neighbors for the KNN? An alternative to fitting a loop of models is to use a grid search to identify the proper number. It is common practice to use a grid search method for all adjustable parameters in any type of machine learning algorithm. First, you define the grid — aka the range of values — to test in the parameter being optimized, and then compare the model outcome performance based on the different values in the grid.
# + [markdown] tags=[]
# #### Run the code in the next cell to see how to implement the grid search method for identifying the best parameter value for the n_neighbors parameter. Notice the param_grid is the range value to test and we apply cross validation with five folds to score each possible value of n_neighbors.
# -
from sklearn.model_selection import GridSearchCV
param_grid = {'n_neighbors':np.arange(1,50)}
knn = KNeighborsClassifier()
knn_cv= GridSearchCV(knn,param_grid,cv=5)
knn_cv.fit(X,y)
# + [markdown] tags=[]
# #### Print the best score and best parameter for n_neighbors.
# -
print("Best Score:" + str(knn_cv.best_score_))
print("Best Parameters: " + str(knn_cv.best_params_))
# Here you can see that the ideal number of n_neighbors for this model is 14 based on the grid search performed.
#
# TODO: Discuss numbers. Looks like 31; not 14 is the best number of neighbors.
# __A__: After discussion the exercise expects 14 to be the best number of neighbors.
# However, the calculations above are very close to the answer key and result in 31 neighbors.
# The difference could be because of randomness in the model.
# + [markdown] tags=[]
# **<font color='teal'> Now, following the KNN example, apply this grid search method to find the optimal number of estimators in a Randon Forest model.
# </font>**
# +
from sklearn.ensemble import RandomForestClassifier
param_grid = {'n_estimators': np.arange(1, 100)}
rf = RandomForestClassifier()
rf_cv = GridSearchCV(rf, param_grid, cv=5, n_jobs=10)
rf_cv.fit(X, y)
# + tags=[]
print("Random Forest Classifier")
print("Best Score:" + str(rf_cv.best_score_))
print("Best Parameters: " + str(rf_cv.best_params_))
| GridSearchKNN_Case_Study.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Results
#
import sys
sys.path.insert(0, '..')
from utils import plot_stroke
# This is what you should modify:
from models.dummy import generate_unconditionally, generate_conditionally, recognize_stroke
# Please, don't modify anything below.
#
# ### Unconditional generation:
stroke = generate_unconditionally()
plot_stroke(stroke)
# ### Conditional generation:
stroke = generate_conditionally(text='welcome to lyrebird')
plot_stroke(stroke)
# ### Handwriting recognition:
text = recognize_stroke(stroke)
print(text)
| notebooks/results.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 9.3
# language: sage
# name: sagemath
# ---
#
# # Modelando Doenças Infecciosas
# ## Entendendo Epidemias como uma Reação Química
#
# A modelagem do espalhamento de doenças infecciosas ou epidemias começou clássicamente como extensão dos princípios empregados na modelagem de reações químicas. Assumindo uma população "bem misturada", ou seja, em que a probabilidade de encontro entre qualquer par de indivíduos é igual.
#
# Tais populações são então divididas em classes imunológicas, e a partir da interação entre indivíduos pertencentes a estas classes, a dinâmica se origina.
# ## O modelo SI
#
# No modelo SI temos apenas duas classes de indivíduos, os saudáveis, mas **suscetíveis** a contrair uma doença, e os que já foram **infectados** pela doença e pode portanto, espalhá-la por meio do contato direto. Note que nem todas as doenças se transmitem desta forma, mas neste modelo inicial vamos considerar que sim. O indivíduos transitam entre estes dois estados por meio de eventos de infecção ($S\rightarrow I$) e recuperação ($I\rightarrow S$)
#
# $$S\leftrightharpoons_{\beta}^{\gamma}I$$
#
# em termos de "reações" temos:
#
# $$S+I\,\, \rightarrow \,\, 2I$$
#
# $$I \rightarrow S$$
# ### Escrevendo as Equações
#
# Novamente lançaremos mão da lei de ação de massas e do fato de que o sistema é fechado, ou seja o número de indivíduos não varia.
#
# $$\frac{dS}{dt}= \gamma I -\beta S I$$
#
# $$\frac{dI}{dt}= \beta SI - \gamma I$$
#
# Utilizamos ps parâmetros $\beta$ e $\gamma$ para representar as taxas de infecção e recuperação, respectivamente.
# ## Exercícios:
#
# 1. Mostre que a população total é conservada no modelo SI.
#
#
# %display typeset
# 2. O Modelo SI apresentado acima poderia ser simplificado para uma única equação diferencial? Se sim, escreva esta equação.
# ## Aplicando Análise Dimensional ao Modelo
#
# Vamos adotar as unidades de tempo em dias. Logo, o lado esquerdo das equações terá unidades de \[número de pessoas\]/\[dias\]. Logo, concluímos que $\gamma$ possui unidades dias$^{-1}$ enquanto $\beta$ tem unidades de pessoas$^{-1} \times$ dias$^{-1}$, ou seja, é uma taxa de infecção per capita.
# #### Exercício 3:
#
# Dada a análise dimensional acima, que parâmetros (ou combinação de parâmetros) carrega a informação sobre a estala de tempo? e qual seria uma escala de tempo adequada para descrever os eventos descritos pelo modelo?
# ### Adimensionalizando o modelo
#
# Seja $x^*(t)$ a fração da população no estado infeccioso e $y^*(t)$ a fração da população total suscetível no tempo $t$. Vamos assumir que estas variáveis adimensionais somam $1$, ou seja, $x^* + y^* = 1$. temos então:
#
# $$y^* = \frac{S}{N},$$
#
# $$x^* = \frac{I}{N},$$
#
# $$t^* = \frac{t}{1/\gamma} = \gamma t.$$
#
# se $S(t) +I(t) = N,$ então:
#
# $$\frac{S}{N}+\frac{I}{N}=y^* + x^* = \frac{N}{N} = 1.$$
#
# Agora podemos substituir as novas variáveis adimensionais no modelo e obter:
#
# $$\frac{d(y^* N)}{d(t^*/\gamma)} = \gamma x^* N - \beta (y^*N)(x^* N)$$
#
# $$\frac{d(x^* N)}{d(t^*/\gamma)} = \beta(y^* N)(x^* N) - -\gamma x^* N$$
#
# Cancelando os fatores comuns $N$ e $\gamma$ em ambos os lados das duas equações, chegamos a:
#
# $$\frac{dy^*}{dt^*} = x^* - \left(\frac{\beta N}{\gamma}\right)x^*y^*$$
#
# $$\frac{dx^*}{dt^*} = \left(\frac{\beta N}{\gamma}\right) x^*y^* - x^* $$
#
# Agora podemos notar que restou uma razão de parâmetros que vamos denotar por $R_0 = \frac{\beta N}{\gamma}$. Este novo parâmetro é muito importante e controla completamente o comportamento qualitativo do modelo, como veremos mais adiante. Por ora, vamos re-escrever o modelo em trmos do $R_0$ e deixar de lado as $*$:
#
# $$\frac{dy}{dt} = x - R_0 xy$$
#
# $$\frac{dx}{dt} = R_0 xy - x $$
#
# Se repetirmos o processo de adimensionalização para o modelo reduzido a uma única equação, derivado no exercício 2, acima, chegamos à seguinte equação:
#
# $$\frac{dx}{dt} = \left(\frac{\beta N}{\gamma}\right) (1-x)x-x $$
#
# ou, em termos do R_0:
#
# $$\frac{dx}{dt} = R_0 (1-x)x-x $$
# #### Exercício 4:
#
# Considere que $1/\gamma$ é o tempo típico de recuperação, o que significa o tempo em que a pessoa está doente e pode transmitir a doença para outros. Suponha que inicialmente tenhamos uma única pessoa infectada em uma população de $N$ indivíduos suscetíveis (para $N$ grande $N-1 \approx N$). Explique o significado de R_0 para a dinâmica da epidemia.
#
#
# ## Analisando o Modelo
#
# Agora que temos o modelo construído e devidamente simplificado, pessemos à sua análise aplicando as ferramentas que já conhecemos.
# ### Encontrando os Equilíbrios
#
# Vamos partir do modelo reduzido a uma equação, expresso em tremo do $R_0$. Lembre-se que os equilíbrios devem satisfazer $dx/dt=0$.
# #### Exercício 5
#
# Encontre os equilíbrios, se estes existirem, e interprete os resultados.
#
var('R_0')
f(x) = R_0*(1-x)*x-x
solve(f,x)
#
#
# O equilíbrio em que $x$, nosso número de infectados é 0, é chamado de equilíbrio livre de doença, e neste caso $y=1$.
# #### Exercício 6:
#
# No caso em que $x=(R_0-1)/R_0$, qual o valor de $y$?
#
var('y')
x=(R_0-1)/R_0
pretty_print(html('se'))
show('x=',expand(x))
y=1-x
pretty_print(html('então $y$ no "equilíbrio endêmico" torna-se:'))
pretty_print(expand(y));
#
#
# É importante notar também que o "equilíbrio endêmico" só faz sentido, biologicamente falando, se $x>0$, logo $R_0$ também precisa ser maior que 1. O que nos leva ao seguinte teorema:
# #### Teorema:
#
# Em um modelo SI, uma doença só pode tornar-se endêmica, se $R_0>1$, onde $R_0$ é o numero reprodutivo básico da doença, $R_0=\beta N/\gamma$.
#
#
# ## Comportamento Qualitativo
#
# Para aplicar nosso inspeção gráfica dos equilíbrios do sistema, precisamos plotar $f(x) = dx/dt$
#
pl = plot(f(R_0=1.5),(x,-0.05,0.4),ymin=-0.01)
e1 = point((1-1/1.5,0),size=50,color='red') # 1-1/R_0
show(pl+e1)
# ### Simulações
def fun(t,x, p):
R_0=p[0]
x=x[0]
return [R_0*(1-x)*x-x]
T=ode_solver()
T.function = fun
t_span = [0,20]
ic = [1e-4]
r0 = 2
T.ode_solve(t_span,ic,num_points=100, params=[r0])
sol = list_plot([(i[0],i[1][0]) for i in T.solution])
ee = plot(1-(1/r0),(x,0,20),color='green')
sol+ee
#
# ### Bifurcações
#
# Com vimos acima, o valor de $R_0=1$ parece ser um ponto de bifurcação para este modelo dada a alteração na natureza dos seus equilíbrios. Vamos construir um diagrama de bifurcações em função de $R_0$:
#
import numpy as np
def drawbif(func,l,u):
pts = []
for v in np.linspace(l,u,100):
g = func(R_0=v)
xvals = solve(g,x)
pts.extend([[v,n(i.rhs().real_part())] for i in xvals if n(i.rhs().real_part())>=0])
show(points(pts),axes_labels=['$R_0$','$x$'],gridlines=True, xmin=0)
var('R_0')
f(x) = R_0*(1-x)*x - x
drawbif(f,0,4)
#
# #### Exercício 7:
#
# Interprete a bifurcação acima e diga que tipo de bifurcação dentre as estudadas nós observamos acima
#
#
# ## O Modelo SIR
#
# Se extendermos o cenário epidemico que deu origem ao modelo SI, com a introdução da possibilidade dos indíviduos acometidos pela doença tornarem-se imunes à doença, obtemos outro modelo epidemiológico clássico, o modelo SIR., que pode ser descrito por meio do seguinte sistema de EDOs:
#
# $$\frac{dS}{dt} = - \beta I S$$
#
# $$ \frac{dI}{dt} = \beta I S - \gamma I $$
#
# $$\frac{dR}{dt} = \gamma I$$
# #### Exercício 8:
#
# Interprete as equações acima e esboçe um diagrama de blocos para o modelo SIR
#
#Podemos usar o suporte do sage a grafos para desenhar o diagrama
var('S I R beta gamma')
Modelo = DiGraph({S:{I: r'$\beta$'}, I:{R:r'$\gamma$'}, R: {}})
Modelo.show(edge_labels=True)
#
# #### Exercício 9:
#
# Explique porque o modelo acima pode ser estudado como um sistema de apenas duas variáveis. Qual variável não encontra-se acoplada às outras duas?
# #### Exercício 9.5:
#
# Analise o modelo SIR com apenas as duas primeiras equações de forma similar ao que foi feito como o modelo SI. Encontre a expressão do $R_0$ para este modelo, e os valores de $S_{\infty}$ e $I_{\infty}$ no equilíbnrio endêmico.
# #### Exercício 10:
#
# Divida $dI/dt$ por $dS/dt$ e simplifique a equação para obter uma EDO para I em função de S. Resolva a equação, obtendo a seguinte solução: $I(S)=-S+\frac{\gamma}{\beta}ln S + K$, plote a solução e interprete.
#
var('beta I S gamma t')
S = function('S')(t)
I = function('I')(t)
dsdt = diff(S,t) == -beta*I*S
didt = diff(I,t) == beta*I*S - gamma*I
dids = didt/dsdt
show(dids)
pretty_print(html("Agora vamos simplificar esta equação:"))
dids2 = simplify(expand(dids))
show(dids2)
# Agora podemos resolver facilmente a equação simplificada:
sol = integrate(dids2,S)
sol
# E, após atribuir valores a $\gamma$ e $\beta$ podemos plotar a solução e interpretá-la
gamma=0.3
beta = .5
var('S I')
ip = implicit_plot(S+I-gamma/beta*log(S)-10, (S,0,12), (I,0,10), gridlines=True, axes_labels=["S","I"])
p = point([gamma/beta, (-S+gamma/beta*log(S)+10).subs(S=gamma/beta)], color='red', pointsize=30 )
show(ip+p)
# A partir desta função, podemos encontrar o número máximo de infectados, $I_{max}$ em uma epidemia. Ele ocorre quando $S=\frac{\gamma}{\beta}$.
var('gamma beta')
f=diff(S+I-gamma/beta*log(S)-10, S)
solve(f,S)
solve(S+0-gamma/beta*log(S)-10, S)
#
#
# A partir de um exame rápido da orbita obtida pelo gráfico implícito acima, vemos que ela nunca alcança o eixo do $I$, ou seja, $S$ sempre será positivo. Isto significa que uma fração da população sempre escapará da epidemia.
# #### Exercício 11:
#
# Fazer a Análise dimensional da Solução $I(t, S)$ encontrando a dimensão da constante e sua interpretação epidemiológica.
# ### Simulando o Modelo SIR
#
# Podemos também explorar o modelo por meio de simulações noméricas
#
def fun (t,y, pars):
S,I,R = y
N=501
beta, mu = pars
return [-beta*I*S/N,
beta*I*S/N - mu*I,
mu*I
]
T = ode_solver()
T.function = fun
inits = 500,1, 0
tspan = [0,80]
T.ode_solve(tspan, inits, num_points=500, params=[.5,.09])
T.plot_solution(0,interpolate=True, legend_label='S')
T.plot_solution(1,interpolate=True, legend_label='I')
T.plot_solution(2,interpolate=True, legend_label='R')
#
# ### Analisando o final da Epidemia
#
# Será que os suscetíveis são necessáriamente consumidos completamente durante uma epidemia?
#
# Para tentar responder a esta pergunta, podemos dividir a 1ª equação do modelo SIR pela 3ª.
#
# A partir de agora vamos interpretar os nossas variáveis $S$, $I$, $R$ como Frações de N, ou seja, $S/N$, $I/N$, $R/N$, Isto faz com que a nossa expressão para $R_0$ se reduza a $\beta/\gamma$, assumindo $S \approx N = 1$.
#
var('beta gamma S I R t')
S = function('S')(t)
R = function('R')(t)
f1 = diff(S,t) == -beta*S*I
f2 = diff(R,t) == gamma*I
f1/f2
#
#
# Podemos então reescrever a equação obtida acima como
#
# $$\frac{dS}{dR}==R_0 S$$
#
# +
f3 = f1/f2
pretty_print(html("Resolvendo a equação, obtemos"))
var ('beta X gamma Y R_0 S_0')
X = function('X')( Y)
solution = desolve(diff(X,Y)== -R_0*X, X, ivar=Y)
solution
# -
# Onde $C$ é $S_0$
solution.subs(_C=S_0)
solution.subs(_C=S_0, R_0=beta*S_0/gamma)
var('x')
gamma=0.09
beta = .001
s0 = 100
pretty_print(html("$R_0={}$".format(beta*s0/mu)))
plot(s0*exp(-beta*s0*x/mu), (x, 0,10),axes_labels=["R","S"])
f(S,I) = -beta*I*S
g(I,S) = beta*I*S - gamma*I
IN = plot3d(g(beta=0.008,gamma=0.1),(S,0,200),(I,0,500),alpha=.5, color="red")
SN = plot3d(f(beta=0.008,gamma=0.1),(S,0,200),(I,0,500))
show(IN+SN)
# ### Examinando a estabilidade do ELD no modelo SIR
var('beta I S gamma t')
solve([-beta*I*S,beta*I*S - gamma*I],[S,I])
var('beta I S gamma t')
jack = jacobian([-beta*I*S,beta*I*S - gamma*I],[S,I])
show(jack)
jack(beta=0.2, gamma=0.1, S=100, I=0).eigenvalues()
# ## O Modelo SEIS
#
# Vamos considerar uma extensão do Modelo SI, no qual os indivíduos infectados não se tornam imediatamente infecciosos, mas passam por um período de incubação
#
# $$\frac{dS}{dT}=B-\beta SI-\mu S+\gamma I$$
#
# $$\frac{dE}{dT}=\beta SI-(\epsilon +\mu )E$$
#
# $$\frac{dI}{dT}=\varepsilon E-(\gamma +\mu )I$$
var('beta B N E I S gamma epsilon mu t')
dsdt = B -beta*I*S -mu*S +gamma*I
dedt = beta*I*S - (epsilon+mu)*E
didt = epsilon*E-(gamma+mu)*I
(dsdt+dedt+didt).simplify()
# $B=\mu(S+E+I)$, logo $B=\mu N$
eqs = solve([dsdt,dedt, didt ],[S,E,I])
eqs
def seis(t, y, pars):
S,E,I = y
beta,B,gamma,epsilon,mu = pars
return B -beta*I*S -mu*S +gamma*I,beta*I*S - (epsilon+mu)*E,epsilon*E-(gamma+mu)*I
T = ode_solver()
T.function = seis
inits = 0.999,.001, 0
tspan = [0,80]
T.ode_solve(tspan, inits, num_points=500, params=[.9,0.01,0.09, 0.1,0.01])
T.plot_solution(0, legend_label='S(t)')
T.plot_solution(1, legend_label='E(t)')
T.plot_solution(2, legend_label='I(t)')
J = jacobian([dsdt,dedt,didt],[S,E,I])
J
autov = J.subs({S:eqs[0][0].rhs(),I:eqs[0][2].rhs()}).eigenvalues()
autov
for av in autov:
show(av.subs({beta:.9,B:.01,epsilon:.1,gamma:.09,mu:.01}))
# ## O modelo SEIR
#
# Outra variação onde adicionamos a imunidade permanente ao SEIS:
#
# $$\frac{dS}{dT}=B-\beta SI-\mu S$$
#
# $$\frac{dE}{dT}=\beta SI-(\epsilon +\mu )E$$
#
# $$\frac{dI}{dT}=\varepsilon E-(\gamma +\mu )I$$
#
# $$\frac {dR}{dT}=\gamma I-\mu R$$
| Planilhas Sage/Aula 4 - Espalhamento de Epidemias.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import string
import networkx as nx
import re
from nltk import word_tokenize, pos_tag
from pybabelfy.babelfy import *
from nltk.stem import PorterStemmer
from math import log
import pickle
import glob
import os
import OClustR as OCR
import operator
#import nltk
key = ''
inputFile = ''
outputDirectory = ''
distance_window = 0
graphName = []
dictionaries = []
dictionariesCode = []
graphsI = []
graphsD = []
sciKGraph = 0
pre_processed_graph = 0
dictionaryCodeMerged = {}
language = ''
deleted_nodes = 0
deleted_edges = 0
deleted_isolated_nodes = 0
name=""
clusters = []
crisp_clusters = []
pre_processed_graph = nx.DiGraph()
#rank Concepts
def rank(g, dictionaryCodeMerged):
grau = nx.degree_centrality(g)
sorted_grau = sorted(grau.items(), key=operator.itemgetter(1), reverse=True)
sorted_concepts = []
for i in sorted_grau:
sorted_concepts.append(dictionaryCodeMerged[i[0]].lower().replace('+', ' ') + ' : ' + i[0])
return sorted_concepts
#key Concepts
def key_concepts( g, dictionaryCodeMerged):
grau = nx.degree_centrality(g)
sorted_grau = sorted(grau.items(), key=operator.itemgetter(1), reverse=True)
sorted_concepts = []
for i in sorted_grau:
sorted_concepts.append([dictionaryCodeMerged[i[0]], i[1]])
return sorted_concepts
# open and close file
def open_file(fileName):
file = open(fileName,"r")
text = file.read()
file.close()
return text
#parse and split text in chuncks of at most 3000 characters
def parse_text(text):
#remove special characters
punctuationToRemove = string.punctuation.replace('!','').replace('.','').replace('?','').replace('-','').replace(',','')
translator = str.maketrans('', '', punctuationToRemove)
parsedText = text.translate(translator)
#remove numbers
parsedText = re.sub(r'[0-9]+', '', parsedText)
#remove double spaces
parsedText = re.sub(r' ', ' ', parsedText)
#remove non-printable characters
parsedText = "".join(filter(lambda x: x in string.printable, parsedText))
#remove spaces
parsedText = re.sub(r' ', '+', parsedText)
#split text in chuncks of at most 5000 characters
punctuation = ['.','?','!']
splitted_text = []
splitted_text.append("")
n_lines = len(parsedText.splitlines())
for line in parsedText.splitlines():
if n_lines == 1:
splitted_text[-1] = line
else:
if len(splitted_text[-1] + line) < 4500 and splitted_text[-1][-1:] not in punctuation or len(splitted_text[-1] + line) <= 3000:
splitted_text[-1] = splitted_text[-1] + '+' + line
else:
splitted_text.append(line)
translator = str.maketrans('', '', "?!.")
for l in splitted_text:
l = l.translate(translator)
return splitted_text
def frag(semantic_annotation, input_text):
start = semantic_annotation.char_fragment_start()
end = semantic_annotation.char_fragment_end()
return input_text[start:end+1]
def babelfy(lang, key, splitted_text):
babelapi = Babelfy()
paragraphs_annotations = []
paragraphs_text = []
paragraphs_code = []
count = 0
for paragraph in splitted_text: #annotate each paragraph
words_annotations = []
words_text = []
words_code = []
semantic_annotations = babelapi.disambiguate(paragraph,lang,key,match="EXACT_MATCHING",cands="TOP",mcs="ON",anntype="ALL")
#exclude unused annotations (single words of multiword expressions)
for semantic_annotation in semantic_annotations:
if len(words_annotations) == 0 or words_annotations[-1].char_fragment_end() < semantic_annotation.char_fragment_start():
words_annotations.append(semantic_annotation)
words_text.append(frag(semantic_annotation,paragraph))
words_code.append(semantic_annotation.babel_synset_id())
elif words_annotations[-1].char_fragment_start() == semantic_annotation.char_fragment_start():
del words_annotations[-1]
words_annotations.append(semantic_annotation)
del words_text[-1]
words_text.append(frag(semantic_annotation,paragraph))
del words_code[-1]
words_code.append(semantic_annotation.babel_synset_id())
paragraphs_annotations.append(words_annotations)
paragraphs_text.append(words_text)
paragraphs_code.append(words_code)
count = count + 1
print(str(count) + '/' + str(len(splitted_text)))
return paragraphs_annotations, paragraphs_text, paragraphs_code
def create_dicts(paragraphs_text, paragraphs_code):
### dictionary[word] = code ###
### dictionaryCode[code] = word ###
### weight[code] = weight ###
dictionary={}
weight={}
dictionaryCode={}
for paragraph, codes in zip(paragraphs_text, paragraphs_code):
for word, code in zip(paragraph, codes):
if code not in weight:
weight[code] = 1
else:
weight[code] = weight[code] + 1
if word not in dictionary:
dictionary[word] = code
if code not in dictionaryCode:
dictionaryCode[code] = word
return dictionary, dictionaryCode, weight
def create_simple_graph(peso, paragraphs_code, dictionaryCode, dist):
g = nx.DiGraph() #indirect Graph
g2 = nx.DiGraph() #direct Grap
#calc the weight of each vertice
for code, weight in peso.items():
g.add_node(code, peso=weight, dicionario=dictionaryCode[code])
g2.add_node(code, peso=weight, dicionario=dictionaryCode[code])
#create and weight edges
for line in paragraphs_code:
i = 0
for word in line:
i = i + 1
j = 0
for word2 in line:
j = j + 1
if j - i < dist and j - i > 0: #indirect edges
if g.has_edge(word, word2):
g[word][word2]['weight'] += 1 - log(j-i,dist)
else:
if word != word2:
g.add_edge(word, word2, weight=float(1 - log(j-i,dist)))
if j - i == 1: #direct edges
if g2.has_edge(word, word2):
g2[word][word2]['weight'] += 1
else:
if word != word2:
g2.add_edge(word, word2, weight=1)
return g, g2
def save_clusters_txt(saveFile, Clusters):
f=open(saveFile,"w+")
for c in Clusters:
line = ''
for n in c:
line += n + ' '
f.write(line[:-1] + '\n')
f.close()
return
def saveClusters(saveFile="", Clusters=[], crisp="", clusterType='normal'):
file = ''
#save clusters
#write crisp
if crisp != "":
with open(saveFile + "crisp.pickle", "wb") as fp:
pickle.dump(crisp, fp, protocol=2)
f=open(saveFile + "crisp.txt","w+")
for c in crisp:
line = ''
for n in c:
line += n + ' '
f.write(line[:-1] + '\n')
f.close()
#write normal clusters
if clusterType =='normal':
with open(saveFile + "clusters.pickle", "wb") as fp:
pickle.dump(Clusters, fp, protocol=2)
f=open(saveFile + "clusters.txt","w+")
for c in Clusters:
line = ''
for n in c:
line += n + ' '
f.write(line[:-1] + '\n')
f.close()
#write reduced clusters
elif clusterType =='reduced':
with open(saveFile + "reducedClusters.pickle", "wb") as fp:
pickle.dump(Clusters, fp, protocol=2)
f=open(saveFile + "reducedClusters.txt","w+")
for c in Clusters:
line = ''
for n in c:
line += n + ' '
f.write(line[:-1] + '\n')
f.close()
else:
print('Wrong cluster Type!\nCluster not saved')
def save_variables_pickle():
save = []
save.append(graphName)
save.append(dictionaries)
save.append(dictionariesCode)
save.append(graphsI)
save.append(graphsD)
save.append(dictionaryCodeMerged)
save.append(sciKGraph)
save.append(crisp_clusters)
save.append(pre_processed_graph)
save.append(clusters)
file = pickle.dumps(save, protocol=2)
#with open('/home/mauro/Downloads/testeDownload.sckg', "wb") as fp:
# pickle.dump(save, fp, protocol=2)
return file
def save_variables(output_file, save_graph_name=False, save_directories = False, save_directories_code = False, save_graphs_i = False, save_graphs_d = False, save_directories_code_merged = False, save_SciKGraph = False, save_clusters = False, save_crisp_clusters = False, save_pre_processed_graph = False):
save = []
save.append(graphName)
save.append(dictionaries)
save.append(dictionariesCode)
save.append(graphsI)
save.append(graphsD)
save.append(dictionaryCodeMerged)
save.append(sciKGraph)
save.append(crisp_clusters)
save.append(pre_processed_graph)
save.append(clusters)
try:
with open(output_file, "wb") as fp:
pickle.dump(save, fp, protocol=2)
except:
raise
return
def open_variables_pickle(file):
data = pickle.load(file)
graphName = data[0]
dictionaries = data[1]
dictionariesCode = data[2]
graphsI = data[3]
graphsD = data[4]
dictionaryCodeMerged = data[5]
sciKGraph = data[6]
crisp_clusters = data[7]
pre_processed_graph = data[8]
clusters = data[9]
def open_variables(open_directory, open_graph_name=False, open_directories = False, open_directories_code = False, open_graph_i = False, open_graph_d = False, open_dictionary_code_merged = False, open_SciKGraph = False, open_clusters = False, open_crisp_clusters = False, open_pre_processed_graph = False):
with open(open_directory, "rb") as fp:
data = pickle.load(fp)
graphName = data[0]
dictionaries = data[1]
dictionariesCode = data[2]
graphsI = data[3]
graphsD = data[4]
dictionaryCodeMerged = data[5]
sciKGraph = data[6]
crisp_clusters = data[7]
pre_processed_graph = data[8]
clusters = data[9]
return
def clear_variables():
key = ''
inputFile = ''
outputDirectory = ''
distance_window = 0
graphName = []
dictionaries = []
dictionariesCode = []
graphsI = []
graphsD = []
sciKGraph = 0
dictionaryCodeMerged = {}
name=""
return
def create_single_SciKGraph(filename, babelfy_key, language, distance_window):
text = filename.decode('ascii')
st = parse_text(text)
pa, pt, pc = babelfy(language, babelfy_key, st)
d, dc, p = create_dicts(pt, pc)
gI, gD = create_simple_graph(p, pc, dc, distance_window)
return d, dc, gI, gD
#Merges graphs and dictionaries
## graphs: list of graphs to merge
## dictionaryCode: list of the graphs dictionaries
def merge_graphs(graphs, dictionaryCode):
#create dictionaryCodeMerged
dictionaryCodeMerged = {}
for dic in dictionaryCode:
for w in dic:
if w not in dictionaryCodeMerged:
dictionaryCodeMerged[w] = dic[w]
#merge graphs
graph = nx.compose_all(graphs).copy()
#reset nodes weights
for i in graph.nodes():
graph.nodes()[i]['peso'] = 0
#recalc nodes weights
for i in range(len(graphs)):
for n in graphs[i]:
graph.nodes()[n]['peso'] += graphs[i].nodes()[n]['peso']
graph.nodes()[n]['dicionario'] = dictionaryCodeMerged[n]
#reset arc weight
for i in graph.edges():
graph[i[0]][i[1]]['weight'] = 0
#recalc arc weight
for i in range(len(graphs)):
for e in graphs[i].edges():
graph[e[0]][e[1]]['weight'] += graphs[i][e[0]][e[1]]['weight']
return graph, dictionaryCodeMerged
# +
def create_SciKGraph(files, file_names, babelfy_key = None, language = 'EN', graphType = 'direct', distance_window=2, mergeIfFail = False):
distance_window = distance_window + 1
if distance_window <=2:
graphType = 'direct'
else:
graphType = 'indirect'
language = language
#check if scikgraph should be fully updated (occurs when distance window changes)
if distance_window != distance_window:
distance_window = distance_window
graphName = []
toMerge = []
count = 0
added = 0
for file, file_name in zip(files, file_names):
count += 1
if file_name not in graphName:
try:
d, dc, gI, gD = create_single_SciKGraph(file, babelfy_key, language, distance_window)
graphName.append(file_name)
dictionaries.append(d)
dictionariesCode.append(dc)
graphsI.append(gI)
graphsD.append(gD)
added += 1
except Exception as e:
if len(graphName) > 0 or mergeIfFail:
print('Error Babelfying text (check your Babelcoins)\n', e, '\n')
print(graphName, '\nThe documents in \'graphName\' were correctly babelfied.\nThe SciKGraph was created with the correctly babelfied texts, to update this version with the other texts fix the error (probably babelfy key error) and run this method again.')
break
else:
if len(graphName) > 0:
print(graphName, '\nThe documents in \'graphName\' were correctly babelfied.\nTo create the SciKGraph (using the previously babelfied documents) run this method again.\n')
print('Error Babelfying text (check your Babelcoins)\n')
raise
if graphType == 'direct':
toMerge = graphsD
elif graphType == 'indirect':
toMerge = graphsI
else:
print('graphType not listed!\nDirect graph used.')
toMerge = graphsD
#check if at leat 1 graph can be added to scikgraph
if added > 0:
graph, dictionaryMerged = merge_graphs(toMerge, dictionariesCode)
sciKGraph = graph
dictionaryCodeMerged = dictionaryMerged
return sciKGraph, dictionaryCodeMerged
# +
def find_communities(g, edges_threshold, nodes_threshold):
ocr = OCR.OClustR()
clusters, crisp_clusters, pre_processed_graph = ocr.identify_clusters(g, edges_threshold, nodes_threshold)
return clusters, crisp_clusters, pre_processed_graph
# -
def cluster_graph(g):
ocr = OCR.OClustR()
clusters, crisp_clusters, sciKGraph = ocr.cluster_graph(g)
return
def pre_process_graph(g, edges_threshold, nodes_threshold, list_edges = [], list_nodes = []):
oClustR = OCR.OClustR()
g, rem_e, rem_n, rem_iso_n = oClustR.pre_process(g, edges_threshold, nodes_threshold, list_edges, list_nodes)
pre_processed_graph = g
deleted_isolated_nodes = rem_iso_n
deleted_nodes = rem_n
deleted_edges = rem_e
return
def to_crisp(Clusters):
##Crisp Cluster
crisp = []
elem = []
for c in Clusters:
cl = []
for v in c:
if v not in elem:
cl.append(v)
elem.append(v)
if len(cl) >= 1:
crisp.append(cl)
return crisp
def start( inputDirectory, babelfy_key, edges_threshold=0, nodes_threshold=0, list_nodes = [], list_edges = [], language = 'EN', graphType = 'direct', distance_window=2, mergeIfFail = False):
if babelfy_key == None:
babelfy_key = key
filenames = []
try:
for filename in sorted(glob.glob(os.path.join(inputDirectory, '*.txt'))):
filenames.append(filename)
if len(filename) == 0:
raise EmptyDirectoryError('There is no .txt file in the inputDirectory.')
except:
raise
sciKGraph, dictionaryCodeMerged = create_SciKGraph(filenames, babelfy_key, language, graphType, distance_window, mergeIfFail)
return sciKGraph
# +
############### Create SciKGraph #################
# +
#documentsList (list of files)= list of documents
#documentsNamesList (list of strings)= list of the names of the documents in documentsList
#babelfy_key (string)= babelfy key
#language (string)= 'EN'
#distance_window (int) = value of distance to consider of concepts coocorrence
#mergeIfFail (boolean) = If Babelfy fail (key problem) merged already babelfied texts?
create_SciKGraph(documentsList, documentsNamesList, babelfy_key, language, distance_window, mergeIfFail)
# -
| SciKGraph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Multitask Networks On MUV
# This notebook walks through the creation of multitask models on MUV. The goal is to demonstrate that multitask methods outperform singletask methods on MUV.
# %reload_ext autoreload
# %autoreload 2
# %pdb off
reload = True
# +
import os
import deepchem as dc
current_dir = os.path.dirname(os.path.realpath("__file__"))
dataset_file = "medium_muv.csv.gz"
full_dataset_file = "muv.csv.gz"
# We use a small version of MUV to make online rendering of notebooks easy. Replace with full_dataset_file
# In order to run the full version of this notebook
dc.utils.download_url("https://s3-us-west-1.amazonaws.com/deepchem.io/datasets/%s" % dataset_file,
current_dir)
dataset = dc.utils.save.load_from_disk(dataset_file)
print("Columns of dataset: %s" % str(dataset.columns.values))
print("Number of examples in dataset: %s" % str(dataset.shape[0]))
# -
# Now, let's visualize some compounds from our dataset
# +
from rdkit import Chem
from rdkit.Chem import Draw
from itertools import islice
from IPython.display import Image, display, HTML
def display_images(filenames):
"""Helper to pretty-print images."""
for filename in filenames:
display(Image(filename))
def mols_to_pngs(mols, basename="test"):
"""Helper to write RDKit mols to png files."""
filenames = []
for i, mol in enumerate(mols):
filename = "MUV_%s%d.png" % (basename, i)
Draw.MolToFile(mol, filename)
filenames.append(filename)
return filenames
num_to_display = 12
molecules = []
for _, data in islice(dataset.iterrows(), num_to_display):
molecules.append(Chem.MolFromSmiles(data["smiles"]))
display_images(mols_to_pngs(molecules))
# +
MUV_tasks = ['MUV-692', 'MUV-689', 'MUV-846', 'MUV-859', 'MUV-644',
'MUV-548', 'MUV-852', 'MUV-600', 'MUV-810', 'MUV-712',
'MUV-737', 'MUV-858', 'MUV-713', 'MUV-733', 'MUV-652',
'MUV-466', 'MUV-832']
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.CSVLoader(
tasks=MUV_tasks, smiles_field="smiles",
featurizer=featurizer)
dataset = loader.featurize(dataset_file)
# -
splitter = dc.splits.RandomSplitter(dataset_file)
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset)
#NOTE THE RENAMING:
valid_dataset, test_dataset = test_dataset, valid_dataset
# +
import numpy as np
import numpy.random
params_dict = {"activation": ["relu"],
"momentum": [.9],
"batch_size": [50],
"init": ["glorot_uniform"],
"data_shape": [train_dataset.get_data_shape()],
"learning_rate": [1e-3],
"decay": [1e-6],
"nb_epoch": [1],
"nesterov": [False],
"dropouts": [(.5,)],
"nb_layers": [1],
"batchnorm": [False],
"layer_sizes": [(1000,)],
"weight_init_stddevs": [(.1,)],
"bias_init_consts": [(1.,)],
"penalty": [0.],
}
n_features = train_dataset.get_data_shape()[0]
def model_builder(model_params, model_dir):
model = dc.models.MultiTaskClassifier(
len(MUV_tasks), n_features, **model_params)
return model
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
optimizer = dc.hyper.HyperparamOpt(model_builder)
best_dnn, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict, train_dataset, valid_dataset, [], metric)
| examples/notebooks/Multitask_Networks_on_MUV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Multinomial Logistic Regression
# $$\textrm{Hypothesis is }\tilde{y} = Wx + b$$
#
# ## Softmax
# Softmax allows us to turn the final hypothesis output into normalized probabilities
# $$S(y_{j}) = \frac{e^{y_{j}}}{\sum_{i}e^{y_{i}}}$$
#
# **Properties:**
# * If the scores are all multiplied by a constant, this will result in one of the scores (the highest one getting very close to 1.0, while the others die out and get close to 0)
# * If the scores are all divided by a constant, this will result in all the scores getting closer to the uniform distribution of 1/len(y)
# * So this means that if we can increase the magnitude of the outputs, the classifier becomes more and more confident.
#
# **Note:** The scores or the hypothesis output is also called as logits
#
# ## One Hot encoding
# Set the hypothesis output to 1.0 for the most probable class and 0 for all others. With very large data, this becomes a problem, because we have huge vectors with just one value as 1 and all other value as 0. We will deal with this later with "embeddings"
#
# ## Cross Entropy
# $$\textrm{Say }D(S, L) = -\sum_{i}(L_{i}\log(S_{i}))$$
# where S is the output of the softmax function and L is the one hot labels.
#
# ## Data Flow
#
| DeepLearning/1_0_SoftmaxRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="u78DRgyR7i4Y"
# # Machine Learning Process
# ## Responsabilities of this file:
#
#
# * Perform Fit and Predict for numpy arrays data saved on the previous step
# * Save all required information for plotting results on the next step
# + [markdown] id="hzTiAKYGNgUB"
# ## Who is Running?
# + id="_bwXTBwHoXbH" colab={"base_uri": "https://localhost:8080/"} outputId="04e916a0-a3eb-4735-caab-c9ae8bb15d81"
project_root = "/content/drive/MyDrive/TFC_MatheusSasso"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="-W-oAuLQ-ekY"
# ## Variable Parameters
# + id="u2j66HDyPJy0"
# Collect for all species or only for the taxonkey one
collect_all = True #@param {type:"boolean"}
# Geneal parameters used on this notebook
taxonKey=5358748 #@param {type:"integer"}
# Required parameters for OneClassSVMModel
nu = 0.1 #@param {type:"number"}
gamma = 0.5 #@param {type:"number"}
kernel = "rbf" #@param {type:"string"}
seed = 13 #@param {type:"integer"}
n_KFolds = 4 #@param {type:"integer"}
# + [markdown] id="AbdWuWQhPLta"
# ## Fixed Parameters
# + id="BKt5eUyYRNY3"
# Reference Data Paths
base_txt_files_path = project_root + "/Data/Standarized_Brazil_Data/TXT_Aux_Files"
base_csv_files_path = project_root + "/Data/Standarized_Brazil_Data/CSV_Aux_Files"
base_numpy_files_path = project_root + "/Data/Standarized_Brazil_Data/Numpy_Aux_Files"
output_base_folder = project_root + "/Data/KFold_Predictions"
# Pre-created files to retrieve
country_mask_reference = project_root + "/Data/Standarized_Brazil_Data/Base_Rasters/brazilian_mask_standarized.tif"
brazil_vars_mean_std_path = f"{base_csv_files_path}/brazil_vars_mean_std.csv"
stacked_rasters_path = f"{base_numpy_files_path}/stacked_environment_rasters_array.npy"
# + [markdown] id="LkF1PV-d7-DI"
# ## Package Downloads
# + id="mny3ZQ8b8Biq"
# ! pip install geopandas --quiet
# ! pip install rasterio --quiet
# + [markdown] id="rwdPjzSH16jA"
# ## Imports
# + id="rKlY3DDd16pb"
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from typing import List,Tuple,Dict
from sklearn import svm, metrics
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
import rasterio
import geopandas as gpd
# + [markdown] id="uwKIk0mKEVpa"
# ## Getting Specie Name
# + id="QJtasJgDEZfB" colab={"base_uri": "https://localhost:8080/", "height": 231} outputId="c7eb622f-68fa-44d3-b340-76d07acc7971"
# !wget https://raw.githubusercontent.com/climate-and-health-datasci-Unicamp/permapy/main/utils/species_taxon_id_dict.py
from species_taxon_id_dict import *
species_name = species_taxon_id_dict[taxonKey]
species_name
# + [markdown] id="MHDbLzJjFe5F"
# ## Retrieving aux Classes
# + colab={"base_uri": "https://localhost:8080/"} id="bUvqxjRMFf8c" outputId="b6b660ca-24ee-4fa0-a1a0-3d04d59c067d"
# !wget https://raw.githubusercontent.com/climate-and-health-datasci-Unicamp/permapy/main/utils/utils.py
# !wget https://raw.githubusercontent.com/climate-and-health-datasci-Unicamp/permapy/main/utils/raster_utils.py
# + id="0JlsilCaGFrz"
from utils import Utils
from raster_utils import Raster_Utils
raster_utils = Raster_Utils()
utils_methods = Utils()
# + [markdown] id="58xMCivWxvfF"
# ## List Rasters Locations
# + id="5Co7B-u9xxUa"
list_raster_files = open(f'{base_txt_files_path}/list_raster_files.txt', 'r').read().splitlines()
list_names_raster = open(f'{base_txt_files_path}/list_names_raster.txt', 'r').read().splitlines()
# + [markdown] id="CNPfla7zSx2o"
# ## Creating dictionary with hyperparameters
# + id="yZ5Lvo8x-eE5"
hyperparams = {"nu":nu,"kernel":kernel,"gamma":gamma,"seed":seed}
# + [markdown] id="iGmjkfiG8FHi"
# ## One Class SVM Model Class
# + id="aZ1EZXiz8EH-"
# import os
# import numpy as np
# import geopandas as gpd
# from typing import List,Tuple,Dict
# from sklearn import svm, metrics
# from sklearn.preprocessing import MinMaxScaler
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import KFold
# import rasterio
# import matplotlib.pyplot as plt
# import pandas as pd
class OneClassSVMModel:
"""
This class is reponsable for performing fits and predictions for the species ditribution problem using OneClassSVM
Attributes
----------
nu : float
An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1]. By default 0.5 will be taken.
kenel : str
Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’ or a callable. If none is given, ‘rbf’ will be used. If a callable is given it is used to precompute the kernel matrix.
gamma : object
Kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’.
if gamma='scale' (default) is passed then it uses 1 / (n_features * X.var()) as value of gamma,
if ‘auto’, uses 1 / n_features.
seed : object
Aleatory seed
raster_utils : object
Raster standards object
utils_methods : object
Utils object
land_reference : array
Array used as the land reference
"""
def __init__(self,hyperparams:Dict,raster_utils, utils_methods,land_reference_path:str,stacked_rasters_path:str,brazil_vars_mean_std_path:str,output_base_folder:str):
"""
Parameters
----------
hyperparams : Dict
Set of hyperparameters for the model(nu,kernel,gamma,seed)
raster_utils : Object
Raster standards object
utils_methods : Object
Utils object
land_reference_path : str
Path to a raster used as land refence
"""
#-------------- hyperparams
self.nu = hyperparams["nu"]
self.kernel = hyperparams["kernel"]
self.gamma = hyperparams["gamma"]
self.seed = hyperparams["seed"]
#-------------- Auxiliary Classes
self.raster_utils = raster_utils
self.utils_methods = utils_methods
#------------- Useful Information
self.output_base_folder = output_base_folder
self.land_reference,_,_,_,_,_= self.raster_utils.get_raster_infos(land_reference_path)
np.random.seed(self.seed)
self.stacked_raster_coverages = utils_methods.retrieve_data_from_np_array(stacked_rasters_path)
brazil_vars_mean_std_df = pd.read_csv(brazil_vars_mean_std_path)
self.mean_vars = np.float32(brazil_vars_mean_std_df['mean'].to_numpy())
self.std_vars = np.float32(brazil_vars_mean_std_df['std'].to_numpy())
# Extracting coverages land
idx = np.where(self.land_reference == self.raster_utils.positive_mask_val) # Coords X and Y in two tuples where condition matchs (array(),array())
self.idx_X = idx[0]
self.idx_Y = idx[1]
# self.utils_methods.save_nparray_to_folder(self.idx_X,self.output_base_folder,"Idx_X_Brazilian_Territory")
# self.utils_methods.save_nparray_to_folder(self.idx_Y,self.output_base_folder,"Idx_Y_Brazilian_Territory")
def fit(self,species_bunch):
""" Fitting data with normalized data """
train_cover_std = (species_bunch['raster_data_train'] - self.mean_vars) / self.std_vars
train_cover_std[np.isnan(train_cover_std)] = 0 #Nan values comes from std=0 in some variable
clf = svm.OneClassSVM(nu=self.nu, kernel=self.kernel, gamma=self.gamma)
clf.fit(train_cover_std)
return clf
def predict_land(self,clf):
""" Predict adaptability for every valid point on the map """
stacked_raster_coverages_shape = self.stacked_raster_coverages.shape
print('Shape stacked_raster_coverages: ',stacked_raster_coverages_shape)
#Performing Predictions
raster_coverages_land = self.stacked_raster_coverages[:, self.idx_X, self.idx_Y].T
for k in range(raster_coverages_land.shape[1]):
raster_coverages_land[:,k][raster_coverages_land[:,k]<=self.raster_utils.no_data_val] = self.mean_vars[k]
scaled_coverages_land = (raster_coverages_land - self.mean_vars) / self.std_vars
del raster_coverages_land
scaled_coverages_land[np.isnan(scaled_coverages_land)] = 0
global_pred = clf.decision_function(scaled_coverages_land)
del scaled_coverages_land
#Setting Spatial Predictions
Z = np.ones((stacked_raster_coverages_shape[1], stacked_raster_coverages_shape[2]), dtype=np.float32)
# Z *= global_pred.min()
# Z *=-1 #This will be necessary to set points outside map to the minimum
Z*= self.raster_utils.no_data_val #This will be necessary to set points outside map to the minimum
Z[self.idx_X, self.idx_Y] = global_pred
del global_pred
#Setting no data values
Z[self.land_reference == self.raster_utils.no_data_val] = self.raster_utils.no_data_val
return Z
def predict_test_occurences(self,species_bunch,clf):
""" Fitting adaptability only for test set data """
scaled_species_raster_test = (species_bunch['raster_data_test'] - self.mean_vars) / self.std_vars
scaled_species_raster_test[np.isnan(scaled_species_raster_test)] = 0
pred_test = clf.decision_function(scaled_species_raster_test)
return pred_test
def perform_K_folder_preidction(self,species_occurence_path:str,specie_shp_path:str,list_raster_files:List,K:int):
""" Perform K times the prediction pipeline """
#1 Getting species name
species_name = species_occurence_path.split("/")[-1].split(".")[0]
print(f">>>>>>>>>> Performing Kofld prediction for {species_name} <<<<<<<<<<")
#2 Recovering occurrences data
species_gdf = gpd.read_file(specie_shp_path)
coordinates = np.array((np.array(species_gdf['LATITUDE']),np.array(species_gdf['LONGITUDE']))).T
species_raster_data = self.utils_methods.retrieve_data_from_np_array(species_occurence_path)
#3 reating kfolds object
kf = KFold(n_splits=K,random_state=self.seed, shuffle=True)
#4 Executing Pipeline
for i, (train_index, test_index) in enumerate(kf.split(species_raster_data)):
print(f"------------------------------ KFold {i+1} ------------------------------")
#creating Kfold Folder Structure
kfold_path = os.path.join(self.output_base_folder,species_name,f"KFold{i+1}")
self.utils_methods.create_folder_structure(kfold_path)
species_raster_data_train, species_raster_data_test = species_raster_data[train_index], species_raster_data[test_index]
coords_train, coords_test = coordinates[train_index], coordinates[test_index]
species_bunch = {'species_name':species_name,
'raster_data_train':species_raster_data_train,
'raster_data_test':species_raster_data_test,
'coords_train':coords_train,
'coords_test':coords_test}
clf = self.fit(species_bunch)
#predicting values only for test points
pred_test = self.predict_test_occurences(species_bunch,clf)
#predicting land values
Z = self.predict_land(clf)
#save Z
self.utils_methods.save_nparray_to_folder(Z,kfold_path,"Land_Prediction")
del Z
#save pred_test
self.utils_methods.save_nparray_to_folder(pred_test,kfold_path,"Test_Prediction")
del pred_test
#save coords_train
self.utils_methods.save_nparray_to_folder(species_bunch['coords_train'],kfold_path,"Coords_Train")
#save_coords_test
self.utils_methods.save_nparray_to_folder(species_bunch['coords_test'],kfold_path,"Coords_Test")
#raster_data_train
self.utils_methods.save_nparray_to_folder(species_bunch['raster_data_train'],kfold_path,"Species_Raster_Data_Train")
#raster_data_test
self.utils_methods.save_nparray_to_folder(species_bunch['raster_data_test'],kfold_path,"Species_Raster_Data_Test")
del species_bunch
# + [markdown] id="ac3MPVoDTiyR"
# Creating Model Intance
# + colab={"base_uri": "https://localhost:8080/"} id="0EPmhy6qTgY9" outputId="f00a26d7-f6ed-4925-c496-09a5df0b759c"
model = OneClassSVMModel(hyperparams=hyperparams,
raster_utils=raster_utils,
utils_methods=utils_methods,
land_reference_path=country_mask_reference,
stacked_rasters_path=stacked_rasters_path,
brazil_vars_mean_std_path=brazil_vars_mean_std_path,
output_base_folder=output_base_folder)
# + [markdown] id="Hptu7n2RTlU8"
# Collecting Data
# + id="pFQe72LGL8mN"
if not collect_all:
species_occurence_path = os.path.join(project_root+"/Data/Rasters_As_Numpy_Arrays",species_name+'.npy')
specie_shp_path = os.path.join(project_root+"/Data/GBIF_Ocurrences",species_name,species_name+'.shp')
model.perform_K_folder_preidction(species_occurence_path =species_occurence_path,
specie_shp_path = specie_shp_path,
list_raster_files = list_raster_files,
K = n_KFolds)
# + [markdown] id="Rcwje7cGOWwV"
# ## Executing pipeline step for all studied species
# + id="OXV_fHuUOW4M" colab={"base_uri": "https://localhost:8080/"} outputId="e8c981d1-a177-427f-f5fe-1134b3a05a36"
if collect_all:
for tax_id, species_name in species_taxon_id_dict.items():
species_occurence_path = os.path.join(project_root+"/Data/Rasters_As_Numpy_Arrays",species_name+'.npy')
specie_shp_path = os.path.join(project_root+"/Data/GBIF_Ocurrences",species_name,species_name+'.shp')
model.perform_K_folder_preidction(species_occurence_path =species_occurence_path,
specie_shp_path = specie_shp_path,
list_raster_files = list_raster_files,
K = n_KFolds)
| Pipeline/3_ML_Process.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit
# metadata:
# interpreter:
# hash: 11ddca5089527d17826da45cd024a0d3d3b64d2c1b5dbf54dd1c238d51a17f38
# name: python3
# ---
# # Analyzing the UncertaintyForest Class by Reproducing Conditional Entropy Estimates
#
# This set of four tutorials (`uncertaintyforest_running_example.ipynb`, `uncertaintyforest_posteriorestimates.ipynb`, `uncertaintyforest_conditionalentropyestimates.ipynb`, and `uncertaintyforest_mutualinformationestimates.ipynb`) will explain the UncertaintyForest class. After following these tutorials, you should have the ability to run UncertaintyForest on your own machine and generate Figures 1, 2, and 3 from [this paper](https://arxiv.org/pdf/1907.00325.pdf), which help you to visualize a comparison of the estimated posteriors and conditional entropy values for several different algorithms.
#
# If you haven't seen it already, take a look at other tutorials to setup and install the ProgLearn package: `installation_guide.ipynb`.
#
# *Goal: Run the UncertaintyForest class to produce a figure that compares estimated conditional entropy values for the UncertaintyForest, CART, and IRF algorithms, as in Figure 2 from [this paper](https://arxiv.org/pdf/1907.00325.pdf)*
# ## Import Required Packages
# +
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from proglearn.forest import UncertaintyForest
from functions.unc_forest_tutorials_functions import generate_data_fig2, cart_estimate, true_cond_entropy, format_func, estimate_ce, get_cond_entropy_vs_n, get_cond_entropy_vs_mu, plot_cond_entropy_by_n, plot_cond_entropy_by_mu, plot_fig2
# -
# ## Specify Parameters
# +
# The following are two sets of parameters.
# The first are those that were actually used to produce figure 2.
# These take a long time to actually run since there are up to 6000 data points.
# Below those, you'll find some scaled-down parameters so that you can see the results more quickly.
# Here are the paper reproduction parameters
# mus = [i * 0.5 for i in range(1, 11)]
# effect_size = 1
# d1 = 1
# d2 = 20
# n1 = 3000
# n2 = 6000
# num_trials = 20
# num_plotted_trials = 10
# sample_sizes_d1 = range(300, 1201, 90)
# sample_sizes_d2 = range(500, 3001, 250)
# Here are the scaled-down tutorial parameters
mus = [i * 0.5 for i in range(1, 3)] # range of means of the data (x-axis in right column)
effect_size = 1 # mu for left column
d1 = 1 # data dimensions = 1
d2 = 3 # data dimensions = 1, noise dimensions = 19
n1 = 100 # number of data points for top row, right column (d1)
n2 = 110 # number of data points for bottom row, right column (d2)
num_trials = 2 # number of trials to run
num_plotted_trials = 2 # the number of "fainter" lines to be displayed on the figure
sample_sizes_d1 = range(100, 120, 10) # range of data points for top row, left column (d1)
sample_sizes_d2 = range(100, 130, 10) # range of data points for bottom row, left column (d2)
# -
# ## Specify Learners
# Now, we'll specify which learners we'll compare (by label). Figure 2 uses three different learners, which are further specified in the function `estimate_ce`, which returns estimates of conditional entropy for a given dataset (X, y) and type of learner.
# Algorithms used to produce Figure 2
algos = [
{
'label': 'CART',
'title': 'CART Forest',
'color': "#1b9e77",
},
{
'label': 'IRF',
'title': 'Isotonic Reg. Forest',
'color': "#fdae61",
},
{
'label': 'UF',
'title': 'Uncertainty Forest',
'color': "#F41711",
},
]
# ## Plot Figure 2
#
# Finally, we'll run the code to obtain and plot the estimated conditional entropy vs. means and sample sizes (4 subplots).
plot_fig2(num_plotted_trials, d1, d2, n1, n2, effect_size, algos, num_trials, sample_sizes_d1, sample_sizes_d2, mus)
| docs/tutorials/uncertaintyforest_conditionalentropyestimates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="8mx6ssmraaXD"
#file=r'/content/00000355_s003_t000.edf'
from google.colab import drive
drive.mount("/content/gdrive")
# + id="JmQdEi2jbJrX"
file
# + id="A6rWeSC2ehXF"
pip install -U mne
# + id="z--lg7gDgN72"
import mne
import scipy
import matplotlib.pyplot as plt
# + id="8g2xYHlBg_Nk"
def nb_channels_file(filename):
file = filename
data = mne.io.read_raw_edf(file)
channels = data.ch_names
return len(channels)
# + id="tF5rQRR1hJVh"
nb_channels_file(file)
# + id="r8rfWfZ5gTQ8"
#une fonction qui retourne un dictionnaire tel que chaque cle représente un fichier edf
#et chaque valeur représente le nombre de channels de ce fichier
#pour chaque patient passé avec son ID en paramètre
def patient_files_channels(ID):
Dict = {}
keys=fichiers_patient(ID)
Values=[]
for file in keys:
Values.append(nb_channels_file(file))
Dict = {}
for i in range(len(keys)):
Dict[keys[i]] = Values[i]
return Dict;
# + id="pZ_sYuOVgg2Z"
def affiche_signaux_file(file):
n=nb_channels_file(file)
data = mne.io.read_raw_edf(file)
raw_data = data.get_data()
for i in range(n):
sig=data.get_data()[i]
plt.plot(sig[:4999])
plt.title("signal channel",i)
plt.show()
# + id="lfyM4m34g4LN"
affiche_signaux_file(file)
# + id="JJ0-Ann9g7k0"
data = mne.io.read_raw_edf(file)
raw_data = data.get_data()
# + id="F5c4SEVGiAD-"
data
# + id="QydvWA_1iBsO"
info = data.info
channels = data.ch_names
# + id="UTE1VZ7yizEO"
raw_data
# + id="vm-yNGNzi3lN"
raw_data.shape
# + id="R4z7HBC9jDEb"
import matplotlib.pyplot as plt
plt.plot(raw_data[33,:90000])
plt.title("Raw EEG, electrode 0, samples 0-4999")
plt.show()
# + id="IlyPTrYojThY"
# + id="cBZ59H8Al-yH"
# + id="nmGQ4OhXmBuu"
# + id="1q8bKPOAmFKN"
# + id="pEFwr5Hcnez1"
##########################################
# #
# FILTRING #
# #
##########################################
# + id="GE_bMJHdrsI6"
#Butterworth filter (Low pass filter)
# + id="UXrTY_IkueWQ"
def filtrage_signaux_patient_channel(ID,i):
list_fichiers=fichiers_patient(ID)
list_fichiers_filtred=[]
for fichier in list_fichiers:
data = mne.io.read_raw_edf(fichier)
raw_data = data.get_data()
sig=data.get_data()[i]
cutoff=40.
fs=1000
nyq = 0.5 * fs
low = cutoff / nyq
b, a = scipy.signal.butter(3, low, btype='low', analog=False)
filtered_signal=scipy.signal.filtfilt(b,a,sig,axis=0)
list_fichiers_filtred.append(filtered_signal)
return list_fichiers_filtred #retourne une liste d'array
# + id="45vt8D2s41UU"
def filtrage_file_channel(file,i): #i:channel
data = mne.io.read_raw_edf(file)
raw_data = data.get_data()
cutoff=40.
fs=1000
nyq = 0.5 * fs
low = cutoff / nyq
b, a = scipy.signal.butter(3, low, btype='low', analog=False)
sig=data.get_data()[i]
filtered_signal1=scipy.signal.filtfilt(b,a,sig,axis=0)
return filtered_signal1 #retourne un array (un signal filtré)
# + id="mGPugLmoHIAT"
filt=filtrage_file_channel(file,0)
# + id="O8HC1kcyHvjT"
plt.plot(raw_data[0][:90000])
plt.title("original signal")
plt.show()
plt.plot(filt[:90000])
plt.title("filtered signal : Lowpass Butterworth filter ")
plt.show()
# + id="io4UwdHuM7y4"
#FFT filter (smoothing filter)
# + id="SEtamNV-Nh3_"
import numpy as np
import scipy.fftpack
def filtrage_file_channel_FFT(i): #i:channel
# data = mne.io.read_raw_edf(file)
# raw_data = data.get_data()
sig=i
sig_fft = scipy.fftpack.fft(sig)
time_step = 0.02
period = 5.
power = np.abs(sig_fft)**2
sample_freq = scipy.fftpack.fftfreq(sig.size, d=time_step)
pos_mask = np.where(sample_freq > 0)
freqs = sample_freq[pos_mask]
peak_freq = freqs[power[pos_mask].argmax()]
np.allclose(peak_freq, 1./period)
high_freq_fft = sig_fft.copy()
high_freq_fft[np.abs(sample_freq) > peak_freq] = 0
filtered_sig = scipy.fftpack.ifft(high_freq_fft)
return filtered_sig #retourne un array qui représente le résultat du signal filtré sur un channel i
# + id="s_hDiA2tNmDO"
filt2=filtrage_file_channel_FFT(filt)
# + id="7gIxl5IsOQkU"
plt.plot(raw_data[0][:90000])
plt.title("original signal")
plt.show()
plt.plot(filt[:90000])
plt.title("filtered signal : Lowpass Butterworth filter ")
plt.show()
plt.plot(filt2[:90000])
plt.title("filtered signal : FFT filter (smoothing filter) ")
plt.show()
# + id="ozHJmHa0Qma9"
##########################################
# #
# DECOMPOS #
# #
##########################################
# + id="fotekgFsSL3m"
filt2
# + id="MfHFR-6PSQJi"
import pandas as pd
from pywt import wavedec
def decompose_signal_channel(filt2,i):
#data = mne.io.read_raw_edf(file)
# raw_data = data.get_data()
channels = data.ch_names
data1=filt2
#channel_name=channels[channel]
print(channels[i])
# number of levels we are interested in
level = 6
# transpose the data because its a time-series package
data_t = data1.transpose()
# get the wavelet coefficients at each level in a list
coeffs_list = wavedec(data1, wavelet='db4', level=level)
coefficients=['A6', 'D6', 'D5', 'D4', 'D3', 'D2', 'D1']
L=[[]]
for i in range(len(coefficients)):
array=coeffs_list[i].flatten()
list1=array.tolist()
L.append(list1)
L.remove(L[0])
df = pd.DataFrame(columns=['A6', 'D6', 'D5', 'D4', 'D3', 'D2', 'D1'])
Series_coefficients=[]
for i in range(len(coeffs_list)):
Series_coefficients.append(pd.Series(L[i]))
for i in range(len(coefficients)):
df[coefficients[i]]=Series_coefficients[i]
return(df)
# + id="fVcPHNgIT_b6"
df1=decompose_signal_channel(filt2,0)
# + id="a7tCv2B0U5Rn"
df1
# + id="boXzE5M1VeBI"
##########################################
# #
# FEATures eng #
# #
##########################################
# + id="6Z63xIAbYYhQ"
List_one=[1 for i in range(5182)]
# + id="5ShLilNgm5sz"
List_one
# + id="42V2_Qjim-hu"
Serie_one=pd.Series(List_one)
# + id="SJx9G0AsnFCp"
Serie_one
# + id="3paqdDxQnGcJ"
def minus_small(data):
# find the smallest value for each data column (channel)...
min_val = data.min()
# ...and subtract it from all the data in the column and add one
List_one=[1 for i in range(5182)]
Series_one=pd.Series(List_one)
data_substract = np.subtract(data,min_val)
data_modified=np.add(data_substract,List_one)
return data
# + id="_JCbX2ndnPS0"
minus_small(df1['A6'])
# + id="1TjlKMEUnS0z"
df1.sum(axis=0)
# + id="eF1ch6n_niXJ"
df_sum=df1.sum(axis=0)
df_sum
# + id="_1egUDFPnm6A"
absolute_sum=df1.sum(axis=0)
# + id="nGa-lEEPnuGI"
absolute_sum
# + id="dqPElw98nwfO"
def ave(data, output=False):
# get the mean
mean_data = data.mean()
return(mean_data)
# + id="f3JG8z7Tn3J2"
ave(df1['A6'])
# + id="ISyFiGEan7Kh"
def mean_abs(data, output=False):
# get the mean of the absolute values
mean_abs_data = data.abs().mean()
return mean_abs_data
# + id="N9bgq1Hcn-yQ"
mean_abs(df1['A6'])
# + id="ToM-5s7Vr5iK"
coefficients=['A6', 'D6', 'D5', 'D4', 'D3', 'D2', 'D1']
# + id="Lfow5CB9reXX"
Lmean_abs =['mean_abs_A6', 'mean_abs_D6', 'mean_abs_D5', 'mean_abs_D4', 'mean_abs_D3', 'mean_abs_D2', 'mean_abs_D1']
# + id="RkJ4ZLCjoJ3v"
dfM_abs = pd.DataFrame(columns=Lmean_abs)
# + id="zEvlYT4ErmDe"
def m_abs(df1) : #retourne une liste des means abs a partir d une channel decompposee (dataframe )
LmAbs=[]
for i in range (0,7) :
t=mean_abs(df1[coefficients[i]])
LmAbs.append(t)
return LmAbs
# + id="A6qHg-b7tmWc"
LmAbs=mean_abs (df1)
# + id="T-tWtPkIrt1U"
dfM_abs
# + id="N71PxV17s2i2"
dfM_abs.loc[1,:]=LmAbs
# + id="DndmB5-q_6gU"
dfM_abs
# + id="F-NEiK44bPnu"
raw_data.shape[0]
# + id="2gFsni_4_9q0"
for i in range(raw_data.shape[0]):
df1=df1=decompose_signal_channel(raw_data[i],i)
LmAbs=m_abs(df1)
dfM_abs.loc[i,:]=LmAbs
# + id="Te4wCCNwXXxz"
raw_data
# + id="lZJiuuY9Xgc-"
dfM_abs
# + id="PGrT_6YGbg0q"
# + id="Cozg0em_b-L7"
channels
# + id="jZun70IscG95"
dfM_abs.insert(0, "channel", channels, True)
# + id="hqnx25RncmiC"
dfM_abs
# + id="84QrrVPMcnWC"
dfM_abs
# + id="dcut-DUfeiIY"
#df_a=dfM_abs.loc[0]
# + id="v2cNKjYwfHoE"
#df_a
# + id="SQJpPj3RfTUQ"
miindex = pd.MultiIndex.from_product([["x","y"], ["10","20"]],names=['row-foo', 'row-bar'])
# + id="lsfAeW360_qS"
miindex
# + id="kIlajcqE1d4r"
micol = pd.MultiIndex.from_product([['a','b','c'], ["1","2"]],names=['col-foo', 'col-bar'])
# + id="nGgFlSVs8kVe"
micol
# + id="KlQpdDaG8lk9"
df5 = pd.DataFrame(index=miindex, columns=micol).sort_index().sort_index(axis=1)
# + id="ioqUsQD98udr"
df5
# + id="S-Uyx6qn8-eQ"
df5.loc[ ('x','10'),('a', '2')]
# + id="YvisECl7_kxB"
miindex = pd.MultiIndex.from_product([["x","y"], ["10","20"]],names=['row-foo', 'row-bar'])
| Data P/INTERNAL DATA/Data_Preparation/EEG_DATA/filtering_eeg_file.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="ku7F_TeSgzer" outputId="51b4657a-7776-4435-e5b9-02671285a76b"
from GML import AutoNLP
# -
import pandas as pd
# + id="huRXlK4abIRi"
data = pd.read_csv('../input/nlp-dataset-for-gml/train.csv')
# -
X = data['ABSTRACT'].copy()
y = data['Computer Science'].values
# + id="Yu_AO1KRbOYt"
nlp = AutoNLP()
# -
# Pass each text individually to the clean function. but thats easy with pandas' apply function
# Arguments of clean function are as below:
#
# text, <br>
# fix_unicode=True, <br>
# to_ascii=True, <br>
# lower=True, <br>
# no_line_breaks=True, <br>
# no_urls=True, <br>
# no_emails=True, <br>
# no_phone_numbers=True, <br>
# no_numbers=True, <br>
# no_digits=True, <br>
# no_currency_symbols=True, <br>
# no_punct=True, <br>
# replace_with_url='<URL>', <br>
# replace_with_email='<EMAIL>', <br>
# replace_with_phone_number='<PHONE>', <br>
# replace_with_number='<NUMBER>', <br>
# replace_with_digit='0', <br>
# replace_with_currency_symbol='<CUR>', <br>
# lang='en'
# + id="XGl-IoyIbcW8"
cleanX = X.apply(lambda x: nlp.clean(x))
# + id="dnoxZjmTbhGm" outputId="d8af6e14-8878-41a9-8af7-adf5d8763009"
cleanX = pd.DataFrame(cleanX, columns = ['ABSTRACT'])
cleanX
# -
# Lets check all the models available for Tokenizing and Modeling
# + id="ezGHSsbXdHGi" outputId="d71a5657-f145-45b3-b6f8-cf534342bbda"
nlp.Model_Names()
# -
# Lets use roberta-large-mnli for both, tokenizing and modeling
# + id="rQyxaXzGcrrx"
nlp.set_params(cleanX, tokenizer_name='roberta-large-mnli', BATCH_SIZE=4,
model_name='roberta-large-mnli', MAX_LEN=200)
# -
# Above function <b>IMPORTANT</b> to call in order to initialize parameters
# As we have specified max lenght to 200, lets truncate any long sentence
# + id="DB7uve4zeroP"
def trunc_text(x):
if len(x) > 200:
return x[:200]
return x
cleanX = cleanX['ABSTRACT'].apply(lambda x: trunc_text(x))
# -
# Lets tokenize the text
# + id="TXvurrJecIML" outputId="593d203c-776d-4648-bb0e-1d831cea67b1"
tokenizedX = nlp.tokenize(cleanX)
# + id="gJfT_iB4eFjx" outputId="6e264913-65db-487d-ac70-acb425e144d1"
tokenizedX
# + id="KW_7D4WTe9KH" outputId="2e5272db-3c50-406b-91fb-aaa9cb3b7a35"
model = nlp.train_model(tokenizedX, y)
| DEMO/AutoTextClassification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# goal:
# =====
#
# to build a model takes sparse X = x_{i,j} with i in [0,n], j in [0,m] and y, a n-dimensional label vector. we then build a k-rank latent representation of the i's and j's such that we minimize ||y_i - \sum_i u_i * v_j||, an inner product that minimizes loss between an example's label and an inner product between the item's embedding and the embedding induced by all item factors
# +
# import this stuff
import time
import sys
from pylab import *
from scipy import sparse
import numpy as np
import tensorflow as tf
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.feature_extraction import FeatureHasher
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import datasets
from sklearn.metrics import roc_auc_score, f1_score, confusion_matrix
# -
def factorize(observed_features,
labels,
observed_features_validation,
labels_validation,
rank,
max_iter=100,
batch_size = 100,
verbose=False,
lambda_v=0,
lambda_u=0,
epsilon=0.001,
optimizer=tf.train.AdamOptimizer(),
seed=12345):
# Extract info about shapes etc from the training data
num_items = observed_features.shape[0]
num_features = observed_features.shape[1]
num_classes = labels.shape[1]
U = tf.Variable(tf.truncated_normal([rank, num_features], stddev=0.2, mean=0, seed=seed), name="item_explainers")
v_prime = tf.Variable(tf.truncated_normal([num_classes, rank], stddev=0.2, mean=0, seed=seed), name="hyperplane")
x = tf.placeholder(tf.float32, [None, num_features])
y = tf.placeholder(tf.float32, [None, num_classes])
pred = tf.nn.softmax(tf.transpose(tf.matmul(v_prime, tf.matmul(U, tf.transpose(tf.nn.l2_normalize(x, dim=0))))))
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred + 0.0000000001), reduction_indices=1) + # this was causing nans if pred == 0
lambda_v*tf.nn.l2_loss(v_prime) + # regularization for v
lambda_u*tf.nn.l2_loss(U)) # regularization for U
norm = tf.nn.l2_loss(v_prime)
optimize = optimizer.minimize(cost)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
last_cost = 1000000
for iter in range(0, max_iter):
avg_cost = 0
batches = int(np.ceil(num_items/batch_size))
xs = np.array_split(observed_features, batches)
ys = np.array_split(labels, batches)
for i in range(batches):
_, c, n = sess.run([optimize, cost, norm],
feed_dict={x:xs[i], y:ys[i]})
avg_cost += c / xs[i].shape[0]
if verbose:
print("epoch: %s, cost: %s, norm: %s" % (iter+1, avg_cost, n))
# check for convergence
if abs(avg_cost-last_cost)/avg_cost < epsilon:
break
last_cost = avg_cost
if verbose:
print("optimization finished")
# test prediction
predictions, test_costs, norm = sess.run([pred, cost, norm], feed_dict={x:observed_features_validation, y:labels_validation})
return predictions, test_costs, norm
# +
# use this data for now
categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']
ng = datasets.fetch_20newsgroups (categories=categories, shuffle=True)
encoder = OneHotEncoder(sparse=False)
labels = encoder.fit_transform(ng.target.reshape(-1,1))
tfidf = TfidfVectorizer(decode_error=False, min_df=5)
X_train, X_test, y_train, y_test = train_test_split(ng.data, labels, test_size=.3)
X_train = tfidf.fit_transform(X_train).todense()
X_test = tfidf.transform(X_test).todense()
# -
r = 10
predictions, test_costs, norm = factorize(X_train, y_train, X_test, y_test, r, verbose=True, lambda_v=0.1, max_iter=30)
print("rank: %s, cost: %s, norm: %s") % (r, test_costs, norm)
for i in range(y_train.shape[1]):
print("class %s AUC: %s") % (i, roc_auc_score(y_test[:,i], predictions[:,i]))
print("overall AUC: %s") % roc_auc_score(y_test, predictions, average="weighted")
# ##
grid_aucs = {}
# grid search
ranks = [10]
lambda_Us = [0, 1, .1, .01, .001]
lambda_vs = [0, 1, .1, .01, .001]
for r in ranks:
for u in lambda_Us:
for v in lambda_vs:
predictions, test_costs, norm = factorize(X_train, y_train, X_test, y_test, r, verbose=False, lambda_v=v, lambda_u=u, max_iter=200)
auc = roc_auc_score(y_test, predictions, average="weighted")
print("r: %s, u: %s, v: %s, overall AUC: %s") % (r,u,v,auc)
grid_aucs[(r,u,v)] = auc
# +
zs = np.array([[grid_aucs[(10,u,v)] for u in [1, .1, 0.01, 0.001, 0]] for v in [1, .1, 0.01, 0.001, 0]])
# %pylab inline
plt.figure()
plt.contourf(zs)
plt.colorbar()
plt.grid('on')
# -
| ipython_notebooks/latent factor logistic regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yukinaga/ai_programming/blob/main/lecture_07/08_exercise.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Niaz8_W6OX34"
# # 演習
# 新たなデータ拡張を追加し、CNNのモデルを構築しましょう。
#
# + [markdown] id="24KV93msd3i-"
# ## 領域のランダムな消去
# 新たにデータ拡張を追加します。
# transforms.RandomErasingにより、画像の領域がランダムに消去されます。
# https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.RandomErasing
#
# + id="TVWgInj2luno"
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
transform = transforms.Compose([
transforms.RandomAffine([-30, 30], scale=(0.8, 1.2)), # 回転とリサイズ
transforms.ToTensor(),
transforms.RandomErasing(p=0.5)]) # 確率0.5でランダムに領域を消去
cifar10_data = CIFAR10(root="./data",
train=False,download=True,
transform=transform)
cifar10_classes = np.array(["airplane", "automobile", "bird", "cat", "deer",
"dog", "frog", "horse", "ship", "truck"])
print("データの数:", len(cifar10_data))
n_image = 25 # 表示する画像の数
cifar10_loader = DataLoader(cifar10_data, batch_size=n_image, shuffle=True)
dataiter = iter(cifar10_loader) # イテレータ
images, labels = dataiter.next() # 最初のバッチを取り出す
plt.figure(figsize=(10,10)) # 画像の表示サイズ
for i in range(n_image):
plt.subplot(5,5,i+1)
plt.imshow(np.transpose(images[i], (1, 2, 0))) # チャンネルを一番後ろに
label = cifar10_classes[labels[i]]
plt.title(label)
plt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # ラベルとメモリを非表示に
plt.show()
# + [markdown] id="vsncPqQ-gZJr"
# ## データの前処理
# ここからCNNを実装していきます。
# 以下のセルにコードを追記し、データ拡張の一環としてtransforms.RandomErasingによるランダムな画像領域の消去を実装しましょう。
# + id="7t3NRHjhKyC0"
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
affine = transforms.RandomAffine([-15, 15], scale=(0.8, 1.2)) # 回転とリサイズ
flip = transforms.RandomHorizontalFlip(p=0.5) # 左右反転
normalize = transforms.Normalize((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)) # 平均値を0、標準偏差を1に
to_tensor = transforms.ToTensor()
erase = # ← 左にコードを追記
transform_train = transforms.Compose([affine, flip, to_tensor, normalize, erase])
transform_test = transforms.Compose([to_tensor, normalize])
cifar10_train = CIFAR10("./data", train=True, download=True, transform=transform_train)
cifar10_test = CIFAR10("./data", train=False, download=True, transform=transform_test)
# DataLoaderの設定
batch_size = 64
train_loader = DataLoader(cifar10_train, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(cifar10_test, batch_size=len(cifar10_test), shuffle=False)
# + [markdown] id="FalXNYaJPkoE"
# ## モデルの構築
# 以下のセルで、forwardメソッドの内部にコードを記述しCNNのモデルを構築しましょう。
# + id="SuqqZmsh_jNK"
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 8, 5) # 畳み込み層:(入力チャンネル数, フィルタ数、フィルタサイズ)
self.pool = nn.MaxPool2d(2, 2) # プーリング層:(領域のサイズ, ストライド)
self.conv2 = nn.Conv2d(8, 32, 5)
self.fc1 = nn.Linear(32*5*5, 256) # 全結合層
self.dropout = nn.Dropout(p=0.5) # ドロップアウト:(p=ドロップアウト率)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
# ------- 以下にコードを書く -------
# ------- ここまで -------
return x
net = Net()
net.cuda() # GPU対応
print(net)
# + [markdown] id="qsW5zCKhQE9p"
# ## 学習
# モデルを訓練します。
# エラーが発生せず、学習に伴い訓練誤差とテスト誤差が共に減少することを確認しましょう。
# 学習には時間がかかりますので、編集→ノートブックの設定のハードウェアアクセラレーターでGPUを選択しましょう。
#
# + id="u6zwN3nArbGC"
from torch import optim
# 交差エントロピー誤差関数
loss_fnc = nn.CrossEntropyLoss()
# 最適化アルゴリズム
optimizer = optim.Adam(net.parameters())
# 損失のログ
record_loss_train = []
record_loss_test = []
# 学習
x_test, t_test = iter(test_loader).next()
x_test, t_test = x_test.cuda(), t_test.cuda()
for i in range(20): # 20エポック学習
net.train() # 訓練モード
loss_train = 0
for j, (x, t) in enumerate(train_loader): # ミニバッチ(x, t)を取り出す
x, t = x.cuda(), t.cuda() # GPU対応
y = net(x)
loss = loss_fnc(y, t)
loss_train += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_train /= j+1
record_loss_train.append(loss_train)
net.eval() # 評価モード
y_test = net(x_test)
loss_test = loss_fnc(y_test, t_test).item()
record_loss_test.append(loss_test)
if i%1 == 0:
print("Epoch:", i, "Loss_Train:", loss_train, "Loss_Test:", loss_test)
# + [markdown] id="rJwwrWTw43rx"
# ## 誤差の推移
# 訓練データ、テストデータで誤差の推移をグラフ表示します。
# + id="OaJx4swE45XI"
import matplotlib.pyplot as plt
plt.plot(range(len(record_loss_train)), record_loss_train, label="Train")
plt.plot(range(len(record_loss_test)), record_loss_test, label="Test")
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Error")
plt.show()
# + [markdown] id="iMrpac0m4Nct"
# ## 正解率
# モデルの性能を把握するため、テストデータ使い正解率を測定します。
# + id="IRkGCYMM_N35"
correct = 0
total = 0
net.eval() # 評価モード
for i, (x, t) in enumerate(test_loader):
x, t = x.cuda(), t.cuda() # GPU対応
y = net(x)
correct += (y.argmax(1) == t).sum().item()
total += len(x)
print("正解率:", str(correct/total*100) + "%")
# + [markdown] id="LrRAJzwD4zpN"
# ## 訓練済みのモデルを使った予測
# 画像を入力し、モデルが機能していることを確かめます。
# + id="Pdy9nPckTDik"
cifar10_loader = DataLoader(cifar10_test, batch_size=1, shuffle=True)
dataiter = iter(cifar10_loader)
images, labels = dataiter.next() # サンプルを1つだけ取り出す
plt.imshow(np.transpose(images[0], (1, 2, 0))) # チャンネルを一番後ろに
plt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # ラベルとメモリを非表示に
plt.show()
net.eval() # 評価モード
x, t = images.cuda(), labels.cuda() # GPU対応
y = net(x)
print("正解:", cifar10_classes[labels[0]],
"予測結果:", cifar10_classes[y.argmax().item()])
# + [markdown] id="VEBDnUbHZ21y"
# # 解答例
# 以下は、どうしても手がかりがないときのみ参考にしましょう。
# + id="JTHeygFKscPI"
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
affine = transforms.RandomAffine([-15, 15], scale=(0.8, 1.2)) # 回転とリサイズ
flip = transforms.RandomHorizontalFlip(p=0.5) # 左右反転
normalize = transforms.Normalize((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)) # 平均値を0、標準偏差を1に
to_tensor = transforms.ToTensor()
erase = transforms.RandomErasing(p=0.5) # ← 左にコードを追記
transform_train = transforms.Compose([affine, flip, to_tensor, normalize, erase])
transform_test = transforms.Compose([to_tensor, normalize])
cifar10_train = CIFAR10("./data", train=True, download=True, transform=transform_train)
cifar10_test = CIFAR10("./data", train=False, download=True, transform=transform_test)
# DataLoaderの設定
batch_size = 64
train_loader = DataLoader(cifar10_train, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(cifar10_test, batch_size=len(cifar10_test), shuffle=False)
# + id="ZbtDEl0GscZK"
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 8, 5) # 畳み込み層:(入力チャンネル数, フィルタ数、フィルタサイズ)
self.pool = nn.MaxPool2d(2, 2) # プーリング層:(領域のサイズ, ストライド)
self.conv2 = nn.Conv2d(8, 32, 5)
self.fc1 = nn.Linear(32*5*5, 256) # 全結合層
self.dropout = nn.Dropout(p=0.5) # ドロップアウト:(p=ドロップアウト率)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
# ------- 以下にコードを書く -------
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 32*5*5)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
# ------- ここまで -------
return x
net = Net()
net.cuda() # GPU対応
print(net)
| lecture_07/08_exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 从 Google Finance 拉取股价数据
# ### Introduction:
#
# This time you will get data from a website.
#
#
# ### Step 1. Import the necessary libraries
# +
import pandas as pd
# package to extract data from various Internet sources into a DataFrame
# make sure you have it installed
from pandas_datareader import data, wb
# package for dates
import datetime as dt
# -
# ### Step 2. Create your time range (start and end variables). The start date should be 01/01/2015 and the end should today (whatever your today is)
# +
start = dt.datetime(2015, 1, 1)
end = dt.datetime.today()
start
# -
# ### Step 3. Select the Apple, Tesla, Twitter, IBM, LinkedIn stocks symbols and assign them to a variable called stocks
stocks = ['AAPL', 'TSLA', 'IBM', 'LNKD']
# ### Step 4. Read the data from google, assign to df and print it
df = web.DataReader(stocks, 'google', start, end)
df
# ### Step 5. What is the type of structure of df ?
# +
# 'pandas.core.panel.Panel'
# -
# ### Step 6. Print all the Items axis values
# #### To learn more about the Panel structure go to [documentation](http://pandas.pydata.org/pandas-docs/stable/dsintro.html#panel)
df.items
# ### Step 7. Good, now we know the data avaiable. Create a dataFrame called vol, with the Volume values.
vol = df['Volume']
vol.head()
# ### Step 8. Aggregate the data of Volume to weekly
# #### Hint: Be careful to not sum data from the same week of 2015 and other years.
# +
vol['week'] = vol.index.week
vol['year'] = vol.index.year
week = vol.groupby(['week','year']).sum()
week.head()
# -
# ### Step 9. Find all the volume traded in the year of 2015
# +
del vol['week']
vol['year'] = vol.index.year
year = vol.groupby(['year']).sum()
year
# -
# ### BONUS: Create your own question and answer it.
| 09_Time_Series/Getting_Financial_Data/Exercises_with_solutions_and_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Strings and Data Type Conversion
# <h3>About the Author</h3>
# This repo was created by <a href="https://www.linkedin.com/in/jubayer28/" target="_blank"><NAME></a> <br>
# <a href="https://www.linkedin.com/in/jubayer28/" target="_blank"><NAME></a> is a student of Microbiology at Jagannath University and the founder of <a href="https://github.com/hdro" target="_blank">Health Data Research Organization</a>. He is also a team member of a bioinformatics research group known as Bio-Bio-1.
#
# <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>.m
| book/pandas/14-Strings and Data Type Conversion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('C:/Users/reena/Desktop/Salary_Prediction/data/cleaned/final_data.csv')
df.head()
def short_title(title):
if 'data scientist' in title.lower():
return 'DS'
elif 'data engineer' in title.lower():
return 'DE'
elif 'analyst' in title.lower():
return 'DA'
elif 'machine learning' in title.lower():
return 'MLE'
elif 'manager' in title.lower():
return 'DM'
elif 'director' in title.lower():
return 'DD'
else:
return 'na'
def job_level(title):
if 'sr' in title.lower() or 'senior' in title.lower() or 'sr.' in title.lower() or 'lead' in title.lower() or 'principal' in title.lower():
return 'senior'
elif 'jr' in title.lower() or 'jr.' in title.lower():
return 'junior'
else:
return 'na'
df['Job Type'] = df['Job Title'].apply(short_title)
df['Job Type'].value_counts()
df['Job Level'] = df['Job Title'].apply(job_level)
df['Job Level'].value_counts()
df['State']= df.State.apply(lambda x: x.strip() if x.strip().lower() != 'los angeles' else 'CA')
df.State.value_counts()
df['Jobdesc_len'] = df['Job Description'].apply(lambda x: len(x))
df['Jobdesc_len']
df['Competitors_count'] = df['Competitors'].apply(lambda x: len(x.split(',')) if x != '-1' else 0)
df['Competitors_count']
df['min_salary'] = df.apply(lambda x: x.min_salary*2 if x.hourly ==1 else x.min_salary, axis =1)
df['max_salary'] = df.apply(lambda x: x.max_salary*2 if x.hourly ==1 else x.max_salary, axis =1)
df[df.hourly ==1][['hourly','min_salary','max_salary']]
df['Company'] = df.Company.apply(lambda x: x.replace('\n', ''))
df.Company
df.describe()
df.columns
model_df = df[['avg_salary','Rating','Size','Type of ownership','Industry','Sector','Revenue','Competitors_count','hourly','employer_provided',
'State','In-HQ_State','Company Age','Python','Spark','AWS','Excel','Job Type','Job Level','Jobdesc_len']]
df_extd = pd.get_dummies(model_df)
df_extd.to_csv('C:/Users/reena/Desktop/Salary_Prediction/data/eda/eda_data.csv',index=False)
df.Rating.hist()
df.avg_salary.hist()
df['Company Age'].hist()
df.Jobdesc_len.hist()
df.boxplot(column = ['Company Age','avg_salary','Rating'])
df.boxplot(column = 'Rating')
df[['Company Age','avg_salary','Rating','Jobdesc_len']].corr()
Corr_map = sns.diverging_palette(200, 30, as_cmap=True)
sns.heatmap(df[['Company Age','avg_salary','Rating','Jobdesc_len','Competitors_count']].corr(),vmax=.3, center=0, cmap=Corr_map,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
df.columns
df_categorical = df[['Location', 'Headquarters', 'Size','Type of ownership', 'Industry', 'Sector', 'Revenue',
'Company', 'State','In-HQ_State', 'Python', 'R','Spark', 'AWS', 'Excel', 'Job Type','Job Level']]
for i in df_categorical.columns:
cat_count = df_categorical[i].value_counts()
print("graph for %s: total = %d" % (i, len(cat_count)))
chart = sns.barplot(x=cat_count.index, y=cat_count)
chart.set_xticklabels(chart.get_xticklabels(), rotation=90)
plt.show()
for i in df_categorical[['Location','Headquarters','Company']].columns:
cat_count = df_categorical[i].value_counts()[:20]
print("graph for %s: total = %d" % (i, len(cat_count)))
chart = sns.barplot(x=cat_count.index, y=cat_count)
chart.set_xticklabels(chart.get_xticklabels(), rotation=90)
plt.show()
pd.pivot_table(df, index = 'Job Type', values = 'avg_salary')
pd.pivot_table(df, index = ['Job Type', 'Job Level'], values = 'avg_salary')
pd.pivot_table(df, index = ['State'], values = 'avg_salary').sort_values('avg_salary', ascending = False)
pd.pivot_table(df, index = ['State','Job Type'], values = 'avg_salary').sort_values('State', ascending = False)
pd.options.display.max_rows
pd.set_option('display.max_rows', None)
pd.pivot_table(df, index = ['State','Job Type'], values = 'avg_salary', aggfunc = 'count').sort_values('State', ascending = False)
pd.pivot_table(df[df['Job Type'] == 'DS'], index = 'State', values = 'avg_salary').sort_values('avg_salary', ascending = False)
df_pivots = df[['Rating', 'Industry', 'Sector', 'Revenue', 'Competitors_count', 'hourly', 'employer_provided', 'Python', 'R', 'Spark', 'AWS', 'Excel', 'Type of ownership','avg_salary']]
for i in df_pivots.columns:
if i!= 'avg_salary':
print(i)
print(pd.pivot_table(df_pivots,index =i, values = 'avg_salary').sort_values('avg_salary', ascending = False))
pd.pivot_table(df_pivots, index = 'Revenue', columns = 'Python', values = 'avg_salary', aggfunc = 'count')
| notebooks/EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import necessary libraries
import numpy as np
import pandas as pd
# # Import skeleton from plantscan3d as .txt file (MTG)
# +
# Loading in data
filename = 'two_node_list.txt' #put your filename here
# setting everything up nicely
my_array = pd.read_csv(filename,skiprows=3,sep='\t',usecols=[0,1,3,4,5],names=['Node','Parent','X','Y','Z'])
my_array['Parent'].loc[0] = 0
my_array['Parent'] = my_array['Parent'].astype('int')
# visualize
my_array
# -
# # Function for calculating branch angles
# +
# function for returning a list of angles
def branch_angles(my_array):
parents = my_array['Parent'] # getting all of the parent nodes
angles = [] # list of angles for final output
tested = [] #list of parents already tested
for parent in parents: # for each parent in the list
children = my_array[my_array['Parent'] == parent] # retrieve rows of dataframe that list the parent node as their parent
if len(children) >= 2 and parent not in tested: # if that's 2 or greater, it must be a branching point
parent_to_child_vectors = [] # list of vectors for branch vectors
tested.append(parent) # we're testing it now, so add this parent node to the tested list so we don't do it again
dot_products = [] # making a list of dot products for later
reference_node_number = my_array[my_array['Node'] == parent]['Parent']
reference_node_number = reference_node_number.values[0] # we need to first find the node preceding the parent to make a baseline vector
parent_node_coordinates = np.array([my_array[my_array['Node']==parent]['X'],my_array[my_array['Node']==parent]['Y'],my_array[my_array['Node']==parent]['Z']]) #get the coorddiinates of the parent node
reference_node_coordinates = np.array([my_array[my_array['Node']==reference_node_number]['X'],my_array[my_array['Node']==reference_node_number]['Y'],my_array[my_array['Node']==reference_node_number]['Z']]) # get the coordinates of the reference node
reference_to_parent_vector = [parent_node_coordinates - reference_node_coordinates] #subtract to get the vector
# for each child node, retrieve child node coordinates, make a child vector, calculate intervening branch angle and append to a list to check later
for i in range(len(children)):
child_node_number = children['Node'].iloc[i]
child_node_coordinates = np.array([my_array[my_array['Node']==child_node_number]['X'],my_array[my_array['Node']==child_node_number]['Y'],my_array[my_array['Node']==child_node_number]['Z']]) #getting child node coordinates
parent_to_child_vector = [parent_node_coordinates - child_node_coordinates] # calculating parent to child (or branch) vector
parent_to_child_vectors.append(parent_to_child_vector) #adding to the list
# this section calculates the dot between each to "branch" vector with the reference vector, with the logic being that the
# highest dot product will correspond to the branch vector/reference vector with the highest similarity to one another.
# We'll treat this as the "axis" we compare against in the proper branch angle calculations
unit_vector_1 = reference_to_parent_vector / np.linalg.norm(reference_to_parent_vector)
unit_vector_2 = parent_to_child_vector / np.linalg.norm(parent_to_child_vector)
unit_vector_1 = unit_vector_1.tolist()
unit_vector_2 = unit_vector_2.tolist()
unit_vector_1 = [unit_vector_1[0][0][0],unit_vector_1[0][1][0],unit_vector_1[0][2][0]]
unit_vector_2 = [unit_vector_2[0][0][0],unit_vector_2[0][1][0],unit_vector_2[0][2][0]]
dot_product = np.dot(unit_vector_1, unit_vector_2)
dot_products.append(dot_product)
max_index = np.argmax(dot_products) # finding which dot product is the higest and getting its index
reference_vector = parent_to_child_vectors[max_index] # retrieving the parent_to_child vector that corresponds to that maximal dot product
# now that we've established what we're comparing against, we go through each branch vector and compare to this reference (being sure to not report out the 0 degrees in the case where we're comparing the same things)
for i in range(len(parent_to_child_vectors)):
unit_vector_1 = reference_vector / np.linalg.norm(reference_vector)
unit_vector_2 = parent_to_child_vectors[i] / np.linalg.norm(parent_to_child_vectors[i])
unit_vector_1 = unit_vector_1.tolist()
unit_vector_2 = unit_vector_2.tolist()
unit_vector_1 = [unit_vector_1[0][0][0],unit_vector_1[0][1][0],unit_vector_1[0][2][0]]
unit_vector_2 = [unit_vector_2[0][0][0],unit_vector_2[0][1][0],unit_vector_2[0][2][0]]
dot_product = np.dot(unit_vector_1, unit_vector_2)
# getting angle in radians, then converting to degrees
angle_in_radians = np.arccos(dot_product)
angle_in_degrees = np.degrees(angle_in_radians)
if angle_in_degrees != 0: #as long as it's not 0, add and report
angles.append(angle_in_degrees)
return angles
# -
# # Retrieve angles from our previously loaded array
angles = branch_angles(my_array)
# # Print angles
print(angles)
| 20210414_JK_AngleEstimator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
from sklearn.datasets import load_boston
from sklearn.metrics import r2_score
# -
type(load_boston())
boston_data = load_boston()
boston_data.feature_names
boston = pd.DataFrame(boston_data.data, columns=boston_data.feature_names)
boston.head(20)
boston['MEDV'] = boston_data.target
sns.set(rc={'figure.figsize':(11.7,8.27)})
sns.distplot(boston['MEDV'], bins=30)
plt.show()
correlation_matrix = boston.corr().round(2)
# annot = True to print the values inside the square
sns.heatmap(data=correlation_matrix, annot=True, cmap='coolwarm')
# +
plt.figure(figsize=(20, 5))
features = ['LSTAT', 'RM']
target = boston['MEDV']
for i, col in enumerate(features):
plt.subplot(1, len(features) , i+1)
x = boston[col]
y = target
plt.scatter(x, y, marker='o')
plt.title(col)
plt.xlabel(col)
plt.ylabel('MEDV')
# -
X = pd.DataFrame(np.c_[boston['LSTAT'], boston['RM']], columns = ['LSTAT','RM'])
Y = boston['MEDV']
# +
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state=5)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
# +
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
linear_model = LinearRegression()
linear_model.fit(X_train, Y_train)
# +
# model evaluation for training set
y_train_predict = linear_model.predict(X_train)
rmse = (np.sqrt(mean_squared_error(Y_train, y_train_predict)))
r2 = r2_score(Y_train, y_train_predict)
print("The model performance for training set")
print("--------------------------------------")
print('RMSE is {}'.format(rmse))
print('R2 score is {}'.format(r2))
print("\n")
# model evaluation for testing set
y_test_predict = linear_model.predict(X_test)
rmse = (np.sqrt(mean_squared_error(Y_test, y_test_predict)))
r2 = r2_score(Y_test, y_test_predict)
print("The model performance for testing set")
print("--------------------------------------")
print('RMSE is {}'.format(rmse))
print('R2 score is {}'.format(r2))
# -
| Boston Housing .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Desafio 6
#
# Neste desafio, vamos praticar _feature engineering_, um dos processos mais importantes e trabalhosos de ML. Utilizaremos o _data set_ [Countries of the world](https://www.kaggle.com/fernandol/countries-of-the-world), que contém dados sobre os 227 países do mundo com informações sobre tamanho da população, área, imigração e setores de produção.
#
# > Obs.: Por favor, não modifique o nome das funções de resposta.
# ## _Setup_ geral
# +
import pandas as pd
import numpy as np
import seaborn as sns
import sklearn as sk
from sklearn.datasets import fetch_20newsgroups
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, KBinsDiscretizer, OneHotEncoder
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# -
# Algumas configurações para o matplotlib.
# %matplotlib inline
from IPython.core.pylabtools import figsize
figsize(12, 8)
sns.set()
# arrumando os separadores de decimais na carga do csv
countries = pd.read_csv('countries.csv', decimal=',')
# +
new_column_names = [
'Country', 'Region', 'Population', 'Area', 'Pop_density', 'Coastline_ratio',
'Net_migration', 'Infant_mortality', 'GDP', 'Literacy', 'Phones_per_1000',
'Arable', 'Crops', 'Other', 'Climate', 'Birthrate', 'Deathrate', 'Agriculture',
'Industry', 'Service'
]
countries.columns = new_column_names
countries.head(5)
# -
# ## Observações
#
# Esse _data set_ ainda precisa de alguns ajustes iniciais. Primeiro, note que as variáveis numéricas estão usando vírgula como separador decimal e estão codificadas como strings. Corrija isso antes de continuar: transforme essas variáveis em numéricas adequadamente.
#
# Além disso, as variáveis `Country` e `Region` possuem espaços a mais no começo e no final da string. Você pode utilizar o método `str.strip()` para remover esses espaços.
# ## Inicia sua análise a partir daqui
# Sua análise começa aqui.
pd.DataFrame({
'dtype': countries.dtypes,
'nulls': countries.isna().sum(),
'nulls (%)': countries.isna().mean()
})
# OK, tudo bem por aqui. Todas features numéricas como float.
# ## Questão 1
#
# Quais são as regiões (variável `Region`) presentes no _data set_? Retorne uma lista com as regiões únicas do _data set_ com os espaços à frente e atrás da string removidos (mas mantenha pontuação: ponto, hífen etc) e ordenadas em ordem alfabética.
# +
def q1():
return sorted(map(lambda r: r.strip(), countries['Region'].unique()))
q1()
# -
# ## Questão 2
#
# Discretizando a variável `Pop_density` em 10 intervalos com `KBinsDiscretizer`, seguindo o encode `ordinal` e estratégia `quantile`, quantos países se encontram acima do 90º percentil? Responda como um único escalar inteiro.
# +
def q2():
kbins = KBinsDiscretizer(n_bins=10, encode='ordinal', strategy='quantile')
pop_density_discrete = kbins.fit_transform(countries['Pop_density'].values.reshape(-1, 1))
# o índice 9 é o nosso último, pois estamos dividindo em 10 e queremos o último percentil
return int((pop_density_discrete==9).sum())
q2()
# -
# # Questão 3
#
# Se codificarmos as variáveis `Region` e `Climate` usando _one-hot encoding_, quantos novos atributos seriam criados? Responda como um único escalar.
# +
def q3():
ohe = OneHotEncoder()
# necessário preencher 'Climate' com algum dado que não exista na série para criarmos um atributo à parte
return int(ohe.fit_transform(countries[['Region', 'Climate']].fillna({'Climate': 0})).shape[1])
q3()
# -
# ## Questão 4
#
# Aplique o seguinte _pipeline_:
#
# 1. Preencha as variáveis do tipo `int64` e `float64` com suas respectivas medianas.
# 2. Padronize essas variáveis.
#
# Após aplicado o _pipeline_ descrito acima aos dados (somente nas variáveis dos tipos especificados), aplique o mesmo _pipeline_ (ou `ColumnTransformer`) ao dado abaixo. Qual o valor da variável `Arable` após o _pipeline_? Responda como um único float arredondado para três casas decimais.
test_country = [
'Test Country', 'NEAR EAST', -0.19032480757326514,
-0.3232636124824411, -0.04421734470810142, -0.27528113360605316,
0.13255850810281325, -0.8054845935643491, 1.0119784924248225,
0.6189182532646624, 1.0074863283776458, 0.20239896852403538,
-0.043678728558593366, -0.13929748680369286, 1.3163604645710438,
-0.3699637766938669, -0.6149300604558857, -0.854369594993175,
0.263445277972641, 0.5712416961268142
]
# +
def q4():
numeric_features = countries.select_dtypes('number').columns
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())
])
preprocessor = ColumnTransformer(
transformers=[('num', numeric_transformer, numeric_features)],
remainder='drop'
)
preprocessor.fit(countries)
# criando um dataframe de testes para facilitar o processamento
test = pd.DataFrame([test_country], columns=countries.columns)
arable = preprocessor.transform(test)[0][numeric_features.get_loc('Arable')]
return float(round(arable, 3))
q4()
# -
# ## Questão 5
#
# Descubra o número de _outliers_ da variável `Net_migration` segundo o método do _boxplot_, ou seja, usando a lógica:
#
# $$x \notin [Q1 - 1.5 \times \text{IQR}, Q3 + 1.5 \times \text{IQR}] \Rightarrow x \text{ é outlier}$$
#
# que se encontram no grupo inferior e no grupo superior.
#
# Você deveria remover da análise as observações consideradas _outliers_ segundo esse método? Responda como uma tupla de três elementos `(outliers_abaixo, outliers_acima, removeria?)` ((int, int, bool)).
# +
def q5():
net_migration = countries['Net_migration'].dropna()
q1, q3 = np.quantile(net_migration, [0.25, 0.75], axis=0)
iqr = q3 - q1
lower_whisker = q1 - 1.5*iqr
upper_whisker = q3 + 1.5*iqr
outliers_abaixo = int((net_migration < lower_whisker).sum())
outliers_acima = int((net_migration > upper_whisker).sum())
total_outliers = outliers_abaixo + outliers_acima
# ainda não sei se foi dado algum conselho, mas eu não removeria se passasse de 10% da base
return (outliers_abaixo, outliers_acima, (total_outliers / countries.shape[0]) <= 0.1)
q5()
# -
# ## Questão 6
# Para as questões 6 e 7 utilize a biblioteca `fetch_20newsgroups` de datasets de test do `sklearn`
#
# Considere carregar as seguintes categorias e o dataset `newsgroups`:
#
# ```
# categories = ['sci.electronics', 'comp.graphics', 'rec.motorcycles']
# newsgroup = fetch_20newsgroups(subset="train", categories=categories, shuffle=True, random_state=42)
# ```
#
#
# Aplique `CountVectorizer` ao _data set_ `newsgroups` e descubra o número de vezes que a palavra _phone_ aparece no corpus. Responda como um único escalar.
categories = ['sci.electronics', 'comp.graphics', 'rec.motorcycles']
newsgroup = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42)
# +
def q6():
vectorizer = CountVectorizer().fit(newsgroup.data)
data = vectorizer.transform(newsgroup.data)
# com vectorizer.vocabulary_['phone'] você acha o índice da palavra 'phone' no vocabulário
# agora é só somar registro a registro da base
return int(data[:, vectorizer.vocabulary_['phone']].sum())
q6()
# -
# ## Questão 7
#
# Aplique `TfidfVectorizer` ao _data set_ `newsgroups` e descubra o TF-IDF da palavra _phone_. Responda como um único escalar arredondado para três casas decimais.
# +
def q7():
vectorizer = TfidfVectorizer().fit(newsgroup.data)
data = vectorizer.transform(newsgroup.data)
# com vectorizer.vocabulary_['phone'] você acha o índice da palavra 'phone' no vocabulário
# agora é só somar registro a registro da base
return float(data[:, vectorizer.vocabulary_['phone']].sum().round(3))
q7()
# -
| data-science-4/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # python2.6
#
# - namedtuple
# - pathlib
# - dekorátory
# - list/dict/set comprehensions
# - ternary operator
# - lambda, map, filter, sort
# - hints/tips for debugging (in PyCharm also)
# - logging module
# - jupyter notebook as presentation
# - comprehensions
# - ternary operator
# # pathlib
# - say goodbye working with paths as strings: `path = "C:\\" + project + "\\script.py"`
# ### Why another lib for manipulation with paths, if `os.path` is already there?
#
# Motivation:
#
# * **string-base** vs. **object-base** approach
# +
import os.path
path = "project\\module\\file.py"
print(os.path.dirname(path))
print(os.path.basename(path))
# -
print(os.path.join(path, '..', 'viewer.py'))
print(os.path.join(os.path.dirname(path), 'viewer.py'))
# #### Same example with `Pathlib`
# +
import pathlib
print(path)
print(pathlib.Path(path) / '..' / 'view.py')
print(pathlib.Path(path).parent / 'view.py')
# -
# ### `pathlib` features, API
path = pathlib.Path(r'c:\directory/somewhere\inside.py')
path.parts
path.exists()
path.parent
path.parent.parent
with path.open('r') as fd:
content = fd.read()
path.name
path.suffix
path.is_dir()
# returns False, because file doesn't exists
path.is_file()
pathlib.Path('somedir').mkdir(parents=True)
# # `namedtuple`
# - immutable like ordinary `tuple`, but with named elements.
#
# Why tuple can sucks:
tup_address = 'Libušina třída', 1, 62300, 'Brno'
tup_address
# Which index contains **city**, `[2]` or `[3]`? 🤔
tup_address[3]
# * from `tuple` to `namedtuple`
# +
from collections import namedtuple
Address = namedtuple('Address', ['street', 'house', 'zipcode', 'city'])
#Address = namedtuple('Address', 'street house zipcode city')
nt_address = Address(city='Brno', zipcode=62300, street='Libušina třída', house=1)
nt_address = Address('Libušina třída', 1, 62300, 'Brno')
nt_address
# -
nt_address.city
nt_address.city = 'Krnov'
nt_address[3]
# * tuple as dict
nt_address._asdict()
# - use `dict` directly works, but you can't rely on keys
dict_address = {
'street': 'Libušina Třída',
'house': 1,
'zipcode': 62300,
'city': 'Brno'
}
# # dataclasses
# * like `namedtuple`, but mutable
# If you need mutable `namedtuple` go for this.
# +
from dataclasses import dataclass
@dataclass
class Address:
street: str
house: int
zipcode: int
city: str
def post_address(self):
return f'{self.street} {self.house}, {self.zipcode} {self.city}'
dc_address = Address('Libušina třída', 1, 62300, 'Brno')
dc_address
# -
dc_address.city
dc_address.city = 'Krno'
dc_address.city
str(dc_address)
dc_address.post_address()
# # filter, lambda, map function
# - `filter`
# - `lambda` is syntactic sugar around anonymous function with single expression
# - `map`
# +
# 1. filter just even number (2, 4, ...)
numbers = list(range(15))
# +
# simple solution
def filter_even(numbers):
even_numbers = []
for number in numbers:
if number % 2 == 0:
even_numbers.append(number)
return even_numbers
print(filter_even(numbers))
# -
# define lambda function
is_even = lambda x: x % 2 == 0
print(is_even)
is_even(3), is_even(4)
# ### fiter
# +
even_numbers = filter(is_even, numbers)
#even_numbers = filter(lambda x: x % 2 == 0, numbers)
print(even_numbers)
print(list(even_numbers))
# -
# ### map
list(map(lambda x: f'Numero {x}.', numbers))
# ### sort, sorted
# `sort` - work in-place (it's a method of `list`), returns `None`
#
# `sorted` - work with copy, returns new iterable
# +
numbers = list(range(15))
# list.sort
print(numbers)
numbers.sort(reverse=True)
print(numbers)
# +
# sorted
numbers = list(range(15))
sorted_numbers = sorted(numbers, reverse=True)
print(numbers)
print(sorted_numbers)
# -
# # hints/tips for debugging
# - breakpoint
# - conditional breakpoint
# - change value at break (via expression evaluator)
# - interactive console (will modify values)
# - allow parallel run
# +
# # %load python2.6.debugging.py
class State:
status = '...'
def __str__(self):
return str(self.status)
state = State()
for i in range(15):
if i == 10:
state.status = 'updated'
a = 0
print(f'State at index {i} {state}')
# -
# # logging
# - print is easy, but hard to maintain soon, when code grows
# - threadsafe
# - highly customizable (runtime, INI, JSON)
# +
# work only in fine, not Jupyter notebook!
import logging
import sys
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s [%(name)s] %(levelname)s: %(message)s')
logging.info('from root logger')
logger_a = logging.getLogger('module A')
logger_b = logging.getLogger('module B')
logger_a.debug('a-message')
logger_b.info('b-message')
logger_b.error('err')
# -
# # jupyter notebok as interactive presentation
# `RISE` 💗, but it can be used also from `File - Export`
# !python -m pip install RISE
# - View - Cell toolbar - Slideshow
# - slide
# - subslide
# - notes
# - execute cell during presentation
# - `h` for Jupyter notebook help
# - click to `?` for RISE help
# - `<Space>`, `<Shift> + <Space> - next, previous
# - `<Alt>+R` - toggle between edit/fullscreen mode
# # comprehensions
# syntax sugar.
# - list
# - dict
# - set
message = 'this is a sentence'
words_len = []
for word in message.split():
item = len(word)
words_len.append(item)
print(words_len)
[len(word) for word in message.split()]
{word: len(word) for word in message.split()}
{len(word) for word in message.split()}
# # ternary operator
# +
is_empty = True
if is_empty:
value = 'it is TRUE'
else:
value = 'FALSE'
print(value)
# +
is_empty = True
value = 'it is TRUE' if is_empty else 'FALSE'
print(value)
# -
# # decorators
#
# **Example**: simple implementation of `timeit` that collects stats
# +
import time
a = time.time()
time.sleep(1)
b = time.time()
print(b-a)
# -
# ### 1. wrap it into function, collect stats
# +
def heavy_load():
print('working...')
time.sleep(0.5)
print('done.')
a = time.time()
heavy_load()
b = time.time()
print(b - a)
# -
# ### 2. first decorator: function that returns function
# +
def decor(fn):
print('calling decor')
return fn
@decor
def heavy_load():
print('working...')
time.sleep(0.5)
print('done.')
heavy_load()
print('')
heavy_load()
# -
# ### 3. modify returned function
# +
def decor(fn):
def wrapper():
print('calling decor')
return fn()
return wrapper
@decor
def heavy_load():
print('working...')
time.sleep(0.5)
print('done.')
heavy_load()
print('')
heavy_load()
# -
# ### 3. add measuring and collection of duration
# +
stats = []
def timeit(fn):
def wrapper():
print('calling decor')
a = time.time()
retval = fn()
b = time.time()
stats.append(b-a)
return retval
return wrapper
@timeit
def heavy_load():
print('working...')
time.sleep(0.5)
print('done.')
heavy_load()
heavy_load()
heavy_load()
print(stats)
# -
| python-2/python2.6.notes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''.venv'': venv)'
# name: python37464bitvenvvenv3f7e8e4ea96948a48837abaca082db06
# ---
# + tags=[]
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk import bigrams
from nltk.util import ngrams
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
import itertools
import collections
#install plotly express, do some bar graphs
# import matplotlib.pyplot as plt
import plotly.express as px
from clean_tweet_tools.prep import prep_tweet_for_sent, prep_tweet_for_ngram
from nltk_tweet_tools.sentiment_scores import sentiment_analyzer_scores
pd.options.plotting.backend = "plotly"
pd.set_option('display.max_columns', None)
pd.set_option('display.max_colwidth', None)
# +
#TODO:
# [x] get vader sent scores
# [x] plot histogram of vader score
# [] plot bigrams and bigram frequency for corpus
# [] look into word embeddings
# [] entity recognition
# [] clustering of similar tweets using Named-Entity-Recognition algo (TF-IDF)
# --> 1) Make a vector respresentation of all texts in the data set, for example with tfidf technique.
# --> 2) or look into the Holmertz technique
#from sklearn.feature_extraction.text import TfidfVectorizer
#from sklearn.cluster import KMeans
#[] lemmatize and stem?
# + tags=[]
nltk.download('stopwords')
nltk.download('vader_lexicon')
# -
stop_words = set(stopwords.words('english'))
jan_data = pd.read_json('../data/data_json_lines/Jan.jsonl', lines=True)
jan_data.drop_duplicates(subset='status',inplace=True)
len(jan_data)
jan_data[[ 'sentiment','VADER_compound_score']] = jan_data.apply(
lambda row: pd.Series(sentiment_analyzer_scores(prep_tweet_for_sent(row['status']))), axis=1)
jan_data.head(20)
jan_data['tokens'] = jan_data.apply(lambda row: prep_tweet_for_ngram(row['status']),axis=1)
jan_data.tail()
# fig = jan_data['VADER_compound_score'].hist(figsize=(12,8))
# fig.show()
#hist of vader scores
fig = px.histogram(jan_data['VADER_compound_score'], title='VADER scores distribution', opacity=0.8,nbins=50,color_discrete_sequence=['indianred'],histfunc='avg')
fig.show()
fig = jan_data['VADER_compound_score'].hist(bins=[-1, -0.5,-.05, 0, 0.05,0.5, 1],figsize=(12,8))
fig.show()
# +
#bar graph of sent scores
# x = ['Product A', 'Product B', 'Product C', "d", "d"]
fig2 = px.bar(jan_data['sentiment'], )
# fig2.update_traces(marker_color='green')
fig2.update_traces(marker_color='indianred')
fig2.show()
# -
fig = jan_data["sentiment"].sort_values(ascending=True).plot.bar( )
fig.show()
# +
sent_col = jan_data[['sentiment', 'status']].groupby('sentiment').count().reset_index()
fig3 = px.bar(sent_col, x='sentiment', y='status')
fig3.show()
# -
sent_col
# +
# [] top unigrams, bigrams, trigrams before stopwords
# [] after stopwords
# -
| src/data_wrangling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (PyTorch 1.8 Python 3.6 CPU Optimized)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/1.8.1-cpu-py36
# ---
# # AWS Summit Atlanta 2022
# ## Using AWS Inferentia to optimize HuggingFace model inference
# Welcome to the AWS Summit Atlanta 2022 Inferentia Workshop Walkthrough !
#
# # Table of contents
# 1. [Introduction](#introduction)
# 1. [Setting up the environment](#setenv)
# 3. [Get model from HuggingFace Model Hub](#getmodel)
# 1. [Get the Tokenizer](#gettoken)
# 2. [Download models and prepare them for inference](#trace)
# 4. [Deploy default model to a GPU-based endpoint](#deploycpu)
# 1. [Perform a test GPU based inference](#testcpu)
# 5. [Compile and deploy the model on an Inferentia instance](#compiledeploy)
# 1. [Review changes to the inference code](#reviewchanges)
# 2. [Create and compile Pytorch model for the inf1 instance](#pytorchmodel)
# 3. [Deploy compiled model into the inf1 instance](#deployinf1)
# 4. [Perform a test inf1 based inference](#testinf1)
# 6. [Benchmark and comparison](#benchmark)
# 1. [Benchmark GPU based endpoint](#benchcpu)
# 2. [Benchmark Inferentia based endpoint](#benchinf1)
# 7. [Comparison and conclusions](#conclusions)
# 8. [Cleanup](#cleanup)
# ---
# # 1. Introduction <a name="introduction"></a>
# During this workshop, we will create two endpoints with one HuggingFace model each. We will use them for the task of paraphrase detection which is an NLP classification problem.
# These two endpoints will have the following configurations: a) GPU-based endpoint, where we will be deploying the model with no changes; and b) Inf1 instance based endpoint, where we will prepare and compile the model using SageMaker Neo before deploying.
# Finally, we will perform a latency and throughput performance comparison of both endpoints.
# [AWS Inferentia](https://aws.amazon.com/machine-learning/inferentia/) is Amazon's first ML chips designed to accelerate deep learning workloads and is part of a long-term strategy to deliver on this vision. AWS Inferentia is designed to provide high performance inference in the cloud, to drive down the total cost of inference, and to make it easy for developers to integrate machine learning into their business applications. AWS Inferentia chips deliver up 2.3x higher throughput and up to 70% lower cost per inference than comparable current generation GPU-based Amazon EC2 instances, as we will confirm in the example notebook.
#
# [AWS Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/) is a software development kit (SDK) for running machine learning inference using AWS Inferentia chips. It consists of a compiler, run-time, and profiling tools that enable developers to run high-performance and low latency inference using AWS Inferentia-based Amazon EC2 Inf1 instances. Using Neuron, you can bring your models that have been trained on any popular framework (PyTorch, TensorFlow, MXNet), and run them optimally on Inferentia. There is excellent support for Vision and NLP models especially, and on top of that we have released great features to help you make the most efficient use of the hardware, such as [dynamic batching](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/appnotes/perf/torch-neuron-dataparallel-app-note.html#dynamic-batching-description) or [Data Parallel](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/api-torch-neuron-dataparallel-api.html) inferencing.
#
# [SageMaker Neo](https://aws.amazon.com/sagemaker/neo/) saves you the effort of DIY model compilation, extending familiar SageMaker SDK API's to enable easy compilation for a [wide range](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#API_OutputConfig_Contents) of platforms. This includes GPU and GPU-based instances, but also Inf1 instances; in this case, SageMaker Neo uses the Neuron SDK to compile your model.
#
# ---
# ### Setting up the environment <a name="setenv"></a>
# First, make sure you are using the Python 3 (Pytorch 1.8 Python 3.6 GPU Optimized) Kernel. And that you are working in the us-west-2 region unless instructed otherwise.
#
# Then, install ipywidgets library and restart the kernel to be able to use it.
# +
# %%capture
import IPython
import sys
# #!{sys.executable} -m pip install ipywidgets
#IPython.Application.instance().kernel.do_shutdown(True) # has to restart kernel so changes are used
# -
# STOP! Restart the Kernel, comment the cell above and continue.
# We will then install required Python packages. Also, we will create a default Amazon Sagemaker session, get the Amazon Sagemaker role and default Amazon S3 bucket.
# %%capture
# !pip install -U transformers
# !pip install -U sagemaker
# !pip install -U torch
# +
import sys
import transformers
import sagemaker
import torch
import boto3
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
sess_bucket = sagemaker_session.default_bucket()
# -
# ---
# ## 2. Get model from HuggingFace Model Hub <a name="getmodel"></a>
# For this workshop, we will use [Prompsit/paraphrase-bert-en](https://huggingface.co/Prompsit/paraphrase-bert-en) transformer model from HuggingFace Model Hub. It has been fine-tuned from a pretrained model called "bert-base-uncased". The model works comparing a pair of sentences, it determines the semantic similarity between them. If the two sentences convey the same meaning it is labelled as paraphrase, otherwise it is labeled as non-paraphrase.
# So it allows to evaluate paraphrases for a given phrase, answering the following question: Is "phrase B" a paraphrase of "phrase A"? and the resulting probabilities correspond to classes:
#
# 0: Not a paraphrase
# 1: It's a paraphrase
#
# This model doesn't expect to find punctuation marks or long pieces of text.
#
# ### Get the Tokenizer <a name="gettoken"></a>
# As a first step, we need to get the tokenizer. A tokenizer breaks a stream of text into tokens, and it is in charge of preparing the inputs for a model. We need it to create a sample input to interact with the model, and will get it from HuggingFace through the `transformers` library. It is important to set the `return_dict` parameter to `False` when instantiating the model. In `transformers` v4.x, this parameter is `True` by default and it enables the return of dict-like python objects containing the model outputs, instead of the standard tuples. Neuron compilation does not support dictionary-based model ouputs, and compilation would fail if we didn't explictly set it to `False`.
# +
tokenizer = transformers.AutoTokenizer.from_pretrained("Prompsit/paraphrase-bert-en")
model = transformers.AutoModelForSequenceClassification.from_pretrained(
"Prompsit/paraphrase-bert-en", return_dict=False
)
# -
# ### Download models and prepare them for inference <a name="trace"></a>
# We will download the model and create two files with different formats. The first one is the model itself with no changes. This one will be uploaded and used in the GPU based endpoint as it is. The second image is a traced Pytorch image of the model so we can compile it before deploying it to the inf1 instance.
#
# PyTorch models must be saved as a definition file (.pt or .pth) with input datatype of float32.
# To save the model, we will use torch.jit.trace followed by torch.save. This will save an object to a file ( a python pickle: pickle_module=pickle).
#
# Next, we will convert the saved model to a compressed tar file and upload it to an S3 bucket.
# As a final step, we will create a sample input to `jit.trace` of the model with PyTorch. We need this to have SageMaker Neo compile the model artifact.
#
# +
from pathlib import Path
# Create directory for model artifacts
Path("normal_model/").mkdir(exist_ok=True)
Path("traced_model/").mkdir(exist_ok=True)
# Prepare sample input for jit model tracing
seq_0 = "Welcome to AWS Summit San Francisco 2022! Thank you for attending the workshop on using Huggingface transformers on Inferentia instances."
seq_1 = seq_0
max_length = 512
tokenized_sequence_pair = tokenizer.encode_plus(
seq_0, seq_1, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt"
)
example = tokenized_sequence_pair["input_ids"], tokenized_sequence_pair["attention_mask"]
traced_model = torch.jit.trace(model.eval(), example)
model.save_pretrained('normal_model/')
traced_model.save("traced_model/model.pth") # The `.pth` extension is required.
# -
# !tar -czvf normal_model.tar.gz -C normal_model . && mv normal_model.tar.gz normal_model/
# !tar -czvf traced_model.tar.gz -C traced_model . && mv traced_model.tar.gz traced_model/
# We upload the traced model `tar.gz` file to Amazon S3, where the compilation job will download it from
# +
normal_model_url = sagemaker_session.upload_data(
path="normal_model/normal_model.tar.gz",
key_prefix="neuron-experiments/bert-seq-classification/normal-model",
)
traced_model_url = sagemaker_session.upload_data(
path="traced_model/traced_model.tar.gz",
key_prefix="neuron-experiments/bert-seq-classification/traced-model",
)
# -
# ---
# ## 3. Deploy default model to a GPU-based endpoint <a name="deploycpu"></a>
# As a first step, we create model from the Hugging Face Model Class.
# We will be passing the `normal_model_url` as the `model_data` parameter to the `HuggingFaceModel` API.
# Notice that we are passing `inference.py` as the entry point script; also, the packages defined in the requirements file within the `source_dir` will automatically be installed in the endpoint instance. In this case we will use the `transformers` library that is compatible Inferentia instances (v. 4.15.0)
# +
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.predictor import Predictor
from datetime import datetime
prefix = "neuron-experiments/bert-seq-classification"
flavour = "normal"
date_string = datetime.now().strftime("%Y%m-%d%H-%M%S")
normal_sm_model = HuggingFaceModel(
model_data=normal_model_url,
predictor_cls=Predictor,
transformers_version="4.12.3",
pytorch_version='1.9.1',
role=role,
entry_point="inference.py",
source_dir="code",
py_version="py38",
name=f"{flavour}-distilbert-{date_string}",
env={"SAGEMAKER_CONTAINER_LOG_LEVEL": "10"},
)
# -
# Then, we create the endpoint and deploy the model for inference. This process will take about 4 minutes to complete. As you can see, one line of code will create a [real time endpoint](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints.html) for real time inference that you can integrate to your applications. These endpoints are fully managed and support autoscaling.
# +
# %%time
from sagemaker.serializers import JSONSerializer
from sagemaker.deserializers import JSONDeserializer
hardware = "g4dn"
normal_predictor = normal_sm_model.deploy(
instance_type="ml.g4dn.xlarge",
initial_instance_count=1,
endpoint_name=f"paraphrase-bert-en-{hardware}-{date_string}",
serializer=JSONSerializer(),
deserializer=JSONDeserializer(),
)
# -
# ### Perform a test inference <a name="testcpu"></a> on GPU
# We will perform a quick test to see if the endpoint is responding as expected. We will send sample sequences.
# +
# Predict with model endpoint
client = boto3.client('sagemaker')
#let's make sure it is up und running first
status = ""
while status != 'InService':
endpoint_response = client.describe_endpoint(EndpointName=f"paraphrase-bert-en-{hardware}-{date_string}")
status = endpoint_response['EndpointStatus']
# Send a payload to the endpoint and recieve the inference
payload = seq_0, seq_1
normal_predictor.predict(payload)
# -
# ---
# ## 4. Compile and deploy the model on an Inferentia instance <a name="compiledeploy"></a>
# In this section we will cover the compilation and deployment of the model into the inf1 instance. We will also review the changes in the inference code.
# ### Review inference code <a name="reviewchanges"></a>
# If you open `inference.py` you will see a few functions:
# a) `model_fn` which receives the model directory and is responsible for loading and returning the model.
# b) `input_fn` and `output_fn` functions that are in charge of pre-processing/checking content types of input and output to the endpoint.
# And c) `predict_fn`, receives the outputs of `model_fn` and `input_fn` and defines how the model will run inference (it recieves the loaded model and the deserialized/pre-processed input data).
# All of this code runs inside the endpoint once it is created.
# !pygmentize code/inference.py
# In this case, notice that we will load the corresponding model depending on where the function is deployed. `model_fn` will return a tuple containing both the model and its corresponding tokenizer. Both the model and the input data will be sent `.to(device)`, which can be a GPU or GPU.
#
# Also, notice the `predict_fn`. In this function we recieve the string for inference, convert it to the format the model accepts, ask the model for the inference, recieve the inference and format it in clear text as a return string. In real life you might not need to do this interpretation since your application might be fine receiving the predicted class and use it directly.
# ### Create and compile Pytorch model for the inf1 instance <a name="pytorchmodel"></a>
# We will now create a new `Huggingface` model that will use the `inference.py` file described above as its entry point script.
# +
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.predictor import Predictor
from datetime import datetime
from sagemaker.serializers import JSONSerializer
from sagemaker.deserializers import JSONDeserializer
date_string = datetime.now().strftime("%Y%m-%d%H-%M%S")
hardware = "inf1"
compilation_job_name = f"paraphrase-bert-en-{hardware}-" + date_string
output_model_path = f"s3://{sess_bucket}/{prefix}/neo-compilations19/{hardware}-model"
compiled_inf1_model = HuggingFaceModel(
model_data=traced_model_url,
predictor_cls=Predictor,
transformers_version="4.12.3",
pytorch_version='1.9.1',
role=role,
entry_point="inference.py",
source_dir="code",
py_version="py37",
name=f"distilbert-{date_string}",
env={"SAGEMAKER_CONTAINER_LOG_LEVEL": "10"},
)
# -
# We are ready to compile the model! Two additional notes:
# * HuggingFace models should be compiled to `dtype` `int64`
# * the format for `compiler_options` differs from the standard Python `dict` that you can use when compiling for "normal" instance types; for inferentia, you must provide a JSON string with CLI arguments, which correspond to the ones supported by the [Neuron Compiler](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-cc/command-line-reference.html) (read more about `compiler_options` [here](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#API_OutputConfig_Contents))
#
# #### Model compilation
# Let's compile the model (this will take around 10 minutes to complete):
# +
# %%time
import json
compiled_inf1_model = compiled_inf1_model.compile(
target_instance_family=f"ml_{hardware}",
input_shape={"input_ids": [1, 512], "attention_mask": [1, 512]},
job_name=compilation_job_name,
role=role,
framework="pytorch",
framework_version="1.9.1",
output_path=output_model_path,
compiler_options=json.dumps("--dtype int64"),
compile_max_run=900,
)
# -
# #### Compiler logs and artifacts
# Open a new browser tab and navigate to the Sagemaker Console. Under the Images menu on the left you will find the menu Inference and inside "Compilation Jobs". Here is where you will find the job that was executed in the previous cell. Look for the job name to get its details. If you scroll down you will find a section called "Monitor" you can access the compiler logs hosted in Cloudwatch. Look for the successful completion of the job in a line similar to the following:
# + active=""
# localhost compiler-container-Primary[4736]: Compiler status PASS
# and localhost compiler-container-Primary[4736]: INFO:Neuron:Neuron successfully compiled 1 sub-graphs, Total fused subgraphs = 1, Percent of model sub-graphs successfully compiled = 100.0%
# -
# Also, in the Output section, you will find a link to the S3 compiled model artifact. Click on it so see where it was stored.
print("Compilation job name: {} \nOutput model path in S3: {}".format(compilation_job_name, output_model_path))
# ### Deploy compiled model into the inf1 instance <a name="deployinf1"></a>
# After successful compilation, we deploy the new model to an inf1.xlarge instance based [real time endpoint](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints.html). As you can see, the one line of code procedure is similar to creating a GPU based instance.
# +
# %%time
compiled_inf1_predictor = compiled_inf1_model.deploy(
instance_type="ml.inf1.xlarge",
initial_instance_count=1,
endpoint_name=f"paraphrase-bert-en-{hardware}-{date_string}",
serializer=JSONSerializer(),
deserializer=JSONDeserializer(),
wait=False
)
# -
# ### Perform a test inference <a name="testinf1"></a>
# As a final test, we first make sure the endpoint is up una running in a `InService` state, and then perform a simple inference sending two sequences of text and wait for the response.
# +
# Predict with model endpoint
client = boto3.client('sagemaker')
#let's make sure it is up und running first
status = ""
while status != 'InService':
endpoint_response = client.describe_endpoint(EndpointName=f"paraphrase-bert-en-{hardware}-{date_string}")
status = endpoint_response['EndpointStatus']
# Send a payload to the endpoint and recieve the inference
payload = seq_0, seq_1
compiled_inf1_predictor.predict(payload)
# -
# ---
# ## 5. Benchmark and comparison <a name="benchmark"></a>
# Now that we have both endpoints online, we will perform a benchmark using Python's `threading` module. In each benchmark, we start 5 threads that will each make 100 requests to the model endpoint. We measure the inference latency for each request, and we also measure the total time to finish the task, so that we can get an estimate of the request throughput/second.
# ### Benchmark GPU based endpoint <a name="benchcpu"></a>
# +
# %%time
# Run the benchmark
import threading
import time
num_preds = 100
num_threads = 5
times = []
def predict():
thread_id = threading.get_ident()
print(f"Thread {thread_id} started")
for i in range(num_preds):
tick = time.time()
response = normal_predictor.predict(payload)
tock = time.time()
times.append((thread_id, tock - tick))
threads = []
[threads.append(threading.Thread(target=predict, daemon=False)) for i in range(num_threads)]
[t.start() for t in threads]
# Wait for threads, get an estimate of total time
start = time.time()
[t.join() for t in threads]
end = time.time() - start
# +
# Display results
from matplotlib.pyplot import hist, title, show, xlim
import numpy as np
TPS_GPU = (num_preds * num_threads) / end
t_GPU = [duration for thread__id, duration in times]
latency_percentiles = np.percentile(t_GPU, q=[50, 90, 95, 99])
latency_GPU = latency_percentiles[2]*1000
hist(t_GPU, bins=100)
title("Request latency histogram on GPU")
show()
print("==== Default HuggingFace model on GPU benchmark ====\n")
print(f"95 % of requests take less than {latency_GPU} ms")
print(f"Rough request throughput/second is {TPS_GPU}")
# -
# We can see that request latency is in the 1-1.2 second range, and throughput is ~4.5 TPS.
# ### Benchmark Inferentia based endpoint <a name="benchinf1"></a>
# +
# %%time
# Run benchmark
import threading
import time
num_preds = 300
num_threads = 5
times = []
def predict():
thread_id = threading.get_ident()
print(f"Thread {thread_id} started")
for i in range(num_preds):
tick = time.time()
response = compiled_inf1_predictor.predict(payload)
tock = time.time()
times.append((thread_id, tock - tick))
threads = []
[threads.append(threading.Thread(target=predict, daemon=False)) for i in range(num_threads)]
[t.start() for t in threads]
# Make a rough estimate of total time, wait for threads
start = time.time()
[t.join() for t in threads]
end = time.time() - start
# +
# Display results
from matplotlib.pyplot import hist, title, show, xlim
import numpy as np
TPS_inf1 = (num_preds * num_threads) / end
t_inf1 = [duration for thread__id, duration in times]
latency_percentiles = np.percentile(t_inf1, q=[50, 90, 95, 99])
latency_inf1 = latency_percentiles[2]*1000
hist(t_inf1, bins=100)
title("Request latency histogram on Inferentia")
show()
print("==== Default HuggingFace model on inf1 benchmark ====\n")
print(f"95 % of requests take less than {latency_inf1} ms")
print(f"Rough request throughput/second is {TPS_inf1}")
# -
# We can see that request latency is in the 0.02-0.05 millisecond range, and throughput is ~157 TPS.
# ---
# # 6. Conclusion <a name="conclusions"></a>
print("Using inf1 instances latency dropped to a {:.2f} millisecond range from {:.2f} ms on a GPU endpoint.".format(latency_inf1, latency_GPU))
print("Also, The average throughput increased to {:.2f} TPS from {:.2f} TPS on the GPU.".format( TPS_inf1, TPS_GPU) )
# This increase in performance obtained from using inf1 instances, paired with the cost reduction and the use of known SageMaker SDK APIs, enables new benefits with little development effort and a gentle learning curve.
# * To learn more about how to deploy Hugging Face modes through Sagemaker on to Inf1, please watch their latest [Webinar](https://www.youtube.com/watch?v=3fulTyMXhWQ), and read their latest [blog post](https://huggingface.co/blog/bert-inferentia-sagemaker).
# * For more information about Inferentia, please see the AWS EC2 Inf1 [website](https://aws.amazon.com/ec2/instance-types/inf1/) or check out other Tutorials available online [here] (https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-intro/tutorials.html).
# * You can learn more about Inferentia performance on the [Neuron Inference Performance](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/index.html) pages
#
# ---
# # 7. Clean up <a name="cleanup"></a>
# Delete the models and release the endpoints.
normal_predictor.delete_model()
normal_predictor.delete_endpoint()
compiled_inf1_predictor.delete_model()
compiled_inf1_predictor.delete_endpoint()
| aws_summit_2022_inf1_bert_compile_and_deploy_walkthrough.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training and Export
#
# In this notebook, I train and export a model to identify dog breeds from photos.
# +
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
import utils
# -
# ## Data
#
# Getting the data here is easy, since I did all of the hard work in the data processing script.
# First, I load in the label vocabulary from a saved numpy array.
label_vocab = np.load('data/labelvocab.npy')
n_classes = np.shape(label_vocab)[0]
# Then, I load in the basis for the transfer learning model so I can get its input size. I'm using the one of the pre-trained MobileNet V2 models from Tensorflow Hub because it works very well on limited resources, so I won't need anything fancy (or expensive) to serve the model.
# +
image_col = hub.image_embedding_column("image", "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/2")
height, width = hub.get_expected_image_size(image_col.module_spec)
depth = hub.get_num_image_channels(image_col.module_spec)
size = (height, width, depth)
# -
# The input function here is pretty straightforward. it just loads the TFRecords at the given filename, decodes them, shuffles them, and batches them. The function returns a lambda function so I can make versions for both training and validation data.
# +
def make_input_fn(fname, repeat=1, batch_size=256):
ds = (tf.data.TFRecordDataset(fname)
.map(lambda im:
utils.decode_image_example(im, size))
.shuffle(batch_size*2) # arbitrary
.repeat(repeat)
.batch(batch_size)
.prefetch(2))
return lambda: ds.make_one_shot_iterator().get_next()
train_input_fn = make_input_fn('data/dogs224_train.tfrecord', 3)
valid_input_fn = make_input_fn('data/dogs224_valid.tfrecord')
# -
# ## Model
#
# Here's the fun (and slow part): training the model. Keeping with my theme of simplicity, I train a canned linear classifier that consumes the output of MobileNet and outputs a prediction in terms of our labels.
est = tf.estimator.LinearClassifier(
[image_col],
n_classes=n_classes,
label_vocabulary=list(label_vocab)
)
# I turn down log verbosity here because TF Hub modules produce a monumental amount of log spam when they first load in. I also periodically print evaluation metrics from the validation data.
tf.logging.set_verbosity(tf.logging.WARN)
for _ in range(5):
est.train(train_input_fn)
print(est.evaluate(valid_input_fn))
# My serving input function takes in a vector (of unknown length) of strings that represent encoded images. They're then preprocessed and resized in the same manner as the training data (with the same function) before being sent to the model for prediction.
def serving_input_fn():
receiver = tf.placeholder(tf.string, shape=(None))
examples = tf.parse_example(
receiver,
{
"image": tf.FixedLenFeature((), tf.string),
}
)
decode_and_prep = lambda image: utils.preprocess_image(image, size[:-1])
images = tf.map_fn(decode_and_prep, examples["image"],
tf.float32)
return tf.estimator.export.ServingInputReceiver(
{"image": images},
receiver,
)
est.export_savedmodel("serving/model/", serving_input_fn)
| complete_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
# -
train_df = pd.read_csv('./train.csv')
test_df = pd.read_csv('./test.csv')
combine = [train_df, test_df]
train_df
print(train_df.__class__)
print("### shape")
print(train_df.shape)
print("### columns")
print(train_df.columns)
train_df.describe()
train_df.describe(include=['O'])
train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
train_df[["Sex", "Survived"]].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)
train_df[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
train_df[["Parch", "Survived"]].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False)
g = sns.FacetGrid(train_df, col='Survived')
g.map(plt.hist, 'Age', bins=20)
# grid = sns.FacetGrid(train_df, col='Pclass', hue='Survived')
grid = sns.FacetGrid(train_df, col='Survived', row='Pclass', size=2.2, aspect=1.6)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend();
grid = sns.FacetGrid(train_df, row='Embarked', size=2.2, aspect=1.6)
grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')
grid.add_legend()
grid = sns.FacetGrid(train_df, row='Embarked', col='Survived', size=2.2, aspect=1.6)
grid.map(sns.barplot, 'Sex', 'Fare', alpha=.5, ci=None)
grid.add_legend()
# +
print("Before", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape)
train_df = train_df.drop(['Ticket', 'Cabin'], axis=1)
test_df = test_df.drop(['Ticket', 'Cabin'], axis=1)
combine = [train_df, test_df]
"After", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape
# +
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Survived'])
# +
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
# +
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train_df['Title']
# -
train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape
# +
for dataset in combine:
dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
train_df.head()
# -
grid = sns.FacetGrid(train_df, row='Pclass', col='Sex', size=2.2, aspect=1.6)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend()
# +
guess_ages = np.zeros((2,3))
guess_ages
for dataset in combine:
for i in range(0, 2): # Sex
for j in range(0, 3): # Pclass
guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
# Convert random age float to nearest .5 age
guess_ages[i,j] = int( age_guess/0.5 + 0.5 ) * 0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[ dataset.Age.isnull() & (dataset.Sex == i) & (dataset.Pclass == j + 1), 'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
train_df.head()
# -
train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True)
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']
train_df.head()
train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head()
# +
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# +
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
# +
train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
train_df.head()
# +
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10)
# -
freq_port = train_df.Embarked.dropna().mode()[0]
freq_port
# +
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# +
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head()
# -
test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)
test_df.head()
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
# +
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head(10)
# -
test_df.head(10)
# +
X_train = train_df.drop("Survived", axis=1)
Y_train = train_df["Survived"]
X_test = test_df.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
X_train
# +
# Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_train, Y_train) * 100, 2)
acc_log
# +
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# +
# Random Forest
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
random_forest.score(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
acc_random_forest
# +
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
print(submission)
submission.to_csv('./submission.csv', index=False)
| tasks/titanic_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from orbit.utils.dataset import load_iclaims
from orbit.eda import eda_plot
import orbit.constants.palette as palette
import seaborn as sns
import pandas as pd
import numpy as np
from orbit.models import LGT, DLT
import arviz as az
from orbit.diagnostics.plot import plot_param_diagnostics, plot_predicted_data
from orbit.utils.plot import get_orbit_style
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# %load_ext autoreload
# %autoreload 2
import matplotlib.pyplot as plt
import matplotlib.colors as clr
from matplotlib import rc
# # Orbit Signature Palette and Plot Style
#
# Oribt provides a set of qualitative, sequential and divering palettes to choose from, and a unique Orbit signature plotting style to create unique and visually pleasing charts.
#
# To use for Orbit Palette
#
# import orbit.constants.palette as palette
#
# specify choice of colors for example: palette.OrbitColorMap.BLUE_GRADIENT.value
#
# Users have the option to use Orbit style for the entire notebook or just for individual plotting functions:
#
# 1. use Orbit plotting style for the entire notebook: this WILL overwrite user's style or default style for the entire notebook
#
# orbit_style = get_orbit_style()
#
# plt.style.use(orbit_style)
#
# 2. only use Orbit plotting style for Orbit plotting functions: this will NOT impact user's style or default style for the entire notebook
#
# user to specify whether to use orbit style for individual functions (default use_orbit_style=True). Please see examples below
#
#
#
#
raw_df = load_iclaims()
raw_df.dtypes
df = raw_df.copy()
df.head()
# +
test_size=52
train_df=df[:-test_size]
test_df=df[-test_size:]
# -
dlt = DLT(response_col='claims',
date_col='week',
seasonality=52,
seed=2020)
# + jupyter={"outputs_hidden": true} tags=[]
dlt.fit(train_df)
# -
predicted_df = dlt.predict(df=df, decompose=True)
predicted_df
# ## Use Orbit Style for the Notebook
# +
###### call for orbit style
# orbit_style = get_orbit_style()
# plt.style.use(orbit_style)
###### change back to default
# plt.style.use(default)
# + [markdown] tags=[]
# ## Use Orbit Style for Individual Functions
# -
# default using orbit style
_ = plot_predicted_data(training_actual_df=train_df,
predicted_df=predicted_df,
title='Prediction',
date_col='week',
actual_col='claims',
test_actual_df=test_df)
# +
# specify to use orbit style
_ = eda_plot.ts_heatmap(df = df, date_col = 'week', value_col='claims',
palette = palette.OrbitColorMap.BLUE_GRADIENT.value, use_orbit_style=True)
# -
# use orbit style and orbit palette
_ = eda_plot.dual_axis_ts_plot(df=df, var1='trend.unemploy', var2='claims', date_col='week')
# use customize style and customized palette
_ = eda_plot.dual_axis_ts_plot(df=df, var1='trend.unemploy', var2='claims', date_col='week', color1='red', color2='green',
use_orbit_style=False)
| examples/eda_orbit_style.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Predicting Chances of Admission for Graduate Programs in Universities.
#
# ### Our Objective:
# * Determine the most important factors that contribute to a student's chance of admission, and select the most accurate model to predict the probability of admission.
# * The predicted output gives them a fair idea about their admission chances in a particular university.
#
# ### Getting to know the dataset!
# GA dataset contains various paraameters which are important for admission into graduate programs in universities. The features included are :
# * GRE Scores ( out of 340 ).
# * TOEFL Scores ( out of 120 ).
# * University Rating ( out of 5 ).
# * Statement of Purpose and Letter of Recommendation Strength ( out of 5 ).
# * Undergraduate GPA ( out of 10 ).
# * Research Experience ( either 0 or 1 ).
# * Chance of Admit ( ranging from 0 to 1 ).
#
# ### Approach:
# * Explore our data to check for imbalance and missing values.
# * Explore the correlation between various features in the dataset.
# * Split the preprocessed dataset into train and test sets respectively.
# * Create and Train a AdaBoost Classifier using mlpack.
# * We'll perform evaluation on our test set using metrics such as Accuracy, ROC AUC to quantify the performance of out model.
# Import necessary libraries.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import mlpack
from sklearn.metrics import *
# %matplotlib inline
sns.set(color_codes=True)
# ### Load the data
# Load Graduate Admission dataset.
graduateData = pd.read_csv("Admission_Predict.csv")
# ### Explore the data
# Examine first 5 samples from our dataset.
graduateData.head()
# Concise summary of all the features in the dataframe.
graduateData.info()
# From the above summary statistic we can infer there is no categorical variables to be transformed in out dataset.
## Check the dataset for missing values.
graduateData.isnull().sum()
# From the above output we can infer there are no missing values in any of the features of our dataset, so there is no need for imputation or resampling.
def PlotHeatMap(data, figWidth=8, figHeight=8):
"""
Generates an heatmap to visualize the correlation between various features in the dataset.
Parameter:
data (DataFrame): Pandas dataframe to be ploted.
figWidth (int): Width of the figure.
figHeight (int): Height of the figure.
Returns:
None
"""
plt.figure(figsize=(figWidth,figHeight))
sns.heatmap(data.corr(), square=True,
annot=True, fmt=".2f")
plt.show()
# Plot the correlation matrix as heatmap.
PlotHeatMap(graduateData)
# As we can observe from the above heatmap, there is high correlation between the follwing features:
# * Chance of Admit & GRE Score.
# * Change of Admit & TOEFL Score.
# * Chance of Admit & CGPA.
# * GRE & TOEFL Score.
#
# We can infer that these are really important for the chance of admit function as it varies almost about linearly with the mentioned factors.
# ### Exploratory Data Analysis
# #### Univariate Analysis
plt.figure(figsize=(10, 6))
ax = sns.histplot(x="Chance of Admit", data=graduateData)
ax.set_title("Distibution of Chance of Admit")
plt.show()
# * Most of the students have above 70% chance of admit.
# * More than 50% of students have above 72% chance of admit.
plt.figure(figsize=(10, 6))
ax = sns.histplot(x="GRE Score", bins=20, data=graduateData)
ax.set_title("GRE Score Distribution")
plt.show()
# * Large Number of students have secured GRE score between 308 & 325.
# * More than 50% of students scored more than 316 in GRE.
plt.figure(figsize=(10, 6))
ax = sns.histplot(x="TOEFL Score", bins=20, data=graduateData)
ax.set_title("TOEFL Score Distribution")
plt.show()
# * Large number of students have scored between 103 & 112 in TOEFL.
# * More than 50% of students scored more than 107 in TOEFL.
plt.figure(figsize=(8,6))
ax = sns.countplot(x="University Rating", data=graduateData)
# * From the above plot we can infer that students from universities that have got a rating of 3 are more in number among those who have applied for MS program.
# * More than 50% of universities have rating of 3 and above.
plt.figure(figsize=(8,6))
ax = sns.countplot(x="SOP", data=graduateData)
# * From the above plot we can infer that students with SOP score of 4 are highest in number.
# * Large number of students have SOP score ranging between 2.5 to 4.
# * More than 50% of students have SOP scores of 3.5 and above.
plt.figure(figsize=(8,6))
ax = sns.countplot(x="LOR", data=graduateData)
# * From the above plot we can infer that the studetnts with LOR score of 3 are highest in number.
# * Large number of students have LOR score ranging between 3 to 4.
# * More than 50% of students have LOR scores of 3.5 and above.
plt.figure(figsize=(10, 6))
ax = sns.histplot(x="CGPA", bins=20, data=graduateData)
ax.set_title("CGPA Score Distribution")
plt.show()
# * Large number of students have secured CGPA between 8.0 & 9.0.
# * More than 50% of students have CGPA of 8.5 and above.
plt.figure(figsize=(6,6))
ax = sns.countplot(x="Research", data=graduateData)
# * From the above fig we can infer most students did some kind of research.
# #### Bivariate Analysis
plt.figure(figsize=(16,6))
ax = sns.lmplot(x="GRE Score", y="Chance of Admit", data=graduateData)
plt.title("GRE Score vs Chance of Admit")
plt.show()
# * Higher the GRE score, higher the chance of getting admit.
# * From the above plot it is clear that most students tend to score above 310 in GRE. Maximum GRE scores are in range 320-340.
plt.figure(figsize=(16,6))
ax = sns.lmplot(x="TOEFL Score", y="Chance of Admit", data=graduateData)
plt.title("TOEFL Score vs Chance of Admit")
plt.show()
# * High TOEFL score has a greater chance of getting admit.
ax = sns.lineplot(x="University Rating", y="Chance of Admit", data=graduateData)
plt.show()
# * Students from universitites rated 5 have a average of whopping 88.8% chances of admit whilst students from 1 rated universities have not a great value of 56.2 % chances.
plt.figure(figsize=(6,6))
ax = sns.lmplot(x="SOP", y="Chance of Admit", data=graduateData)
plt.title("SOP vs Chance of Admit")
plt.show()
# * Students who have secured higher score for their Statement of Purpose (SOP), have an upper hand in getting an admit.
plt.figure(figsize=(6,6))
ax = sns.lmplot(x="LOR", y="Chance of Admit", data=graduateData)
plt.title("LOR vs Chance of Admit")
plt.show()
# * Students who have secured higher score for their Letter of Recommendation (LOR), have an upper hand in getting an admit.
plt.figure(figsize=(10,6))
ax = sns.scatterplot(x="CGPA", y="Chance of Admit", data=graduateData)
ax.set_title("CGPA vs Chance of Admit")
plt.show()
# * Students with high CGPA are likely to get more chance of admit than those who scored low CGPA.
sns.scatterplot(x="University Rating", y="CGPA", data=graduateData)
# Ratings of university increase with the increase in the CGPA.
co_gre = graduateData[graduateData["GRE Score"] >= 300]
co_toefl = graduateData[graduateData["TOEFL Score"] >= 100]
plt.figure(figsize=(15, 8))
ax = sns.barplot(x="GRE Score", y="Chance of Admit", data=co_gre, linewidth=1.5, edgecolor="0.1")
# From the above plot it is clear that higher the GRE Score better is the chance of admit.
plt.figure(figsize=(15,8))
ax = sns.barplot(x="TOEFL Score", y="Chance of Admit", data=co_gre, linewidth=1.5, edgecolor="0.1")
# From the above plot it is clear that higher the TOEFL Score better the chance of admit.
def FeatureTargetSplit(data, admitThresh=0.8):
"""
Returns the Features of interest and targets.
Parameter:
data (DataFrame): Pandas dataframe which is to be splited into features and targets.
admitThresh (double): cutoff for admission in graduate program, default: 0.8.
Returns:
features (DataFrame): Pandas dataframe consisting of the features.
target (DataFrame): Pandas dataframe containing the target.
"""
features = graduateData.drop("Chance of Admit", axis=1)
targets = graduateData["Chance of Admit"]
targets = targets.apply(lambda x: 1 if x > admitThresh else 0)
return features, targets
print(f"Average GRE Score: {graduateData['GRE Score'].mean() :.2f} out of 340")
print(f"Average TOEFL Score: {graduateData['TOEFL Score'].mean() :.2f} out of 120")
print(f"Average CGPA: {graduateData['CGPA'].mean() :.2f} out of 10")
print(f"Average chance of getting admitted: {graduateData['Chance of Admit'].mean() * 100}")
features, targets = FeatureTargetSplit(graduateData)
def TrainTestSplit(features, target, test_ratio=0.2, standardize=False):
"""
Splits the features & target into respective training and test set based on the test ratio.
Parameter:
features (DataFrame): Pandas dataframe containing the features.
target (DataFrame): Pandas dataframe containing the targets.
test_ratio (double): Percentage of dataset to be hold out for test set. default 20%.
standardize (bool): if True, features are scaled, default False.
Returns:
Xtrain (DataFrame): Pandas dataframe containing training features.
Xtest (DataFrame): Pandas dataframe containing test features.
ytrain (DataFrame): Pandas dataframe containing training targets.
ytest (DataFrame): Pandas dataframe containing test targets.
"""
train_len = len(features)
train_idxs = list(range(train_len))
np.random.shuffle(train_idxs)
split = int(np.floor(test_ratio * train_len))
Xtest = features.iloc[train_idxs[:split], :].values
Xtrain = features.iloc[train_idxs[split:], :].values
ytest = target.iloc[train_idxs[:split]].values
ytrain = target.iloc[train_idxs[split:]].values
if standardize:
# Normalize Features.
Xtrain = StandardScaler(Xtrain)
Xtest = StandardScaler(Xtest)
return Xtrain, Xtest, ytrain, ytest
# ### Train Test Split
# The dataset has to be split into training and test set. Here the dataset has 400 observations and the test ratio is taken as 25% of the total observations. This indicates that the test set should have 25% * 400 = 100 observations and training set should have 300 observations respectively.
Xtrain, Xtest, ytrain, ytest = TrainTestSplit(features, targets, 0.25)
# ### Training the AdaBoost Classifier model
# * Ensemble methods are meta-algorithms that combine several machine learning techniques into one predictive model in order to decrease variance (bagging), bias (boosting), or improve predictions (stacking)
# * AdaBoost is a boosting approach to machine learning based on the idea of creating a highly accurate prediction rule by combining many relatively weak an inaccurate rules.
# Create and train AdaBoost model with DecisionStump as weak learner.
output = mlpack.adaboost(training=Xtrain, labels=ytrain, weak_learner="decision_stump")
ab = output["output_model"]
# ### Making Predictions on Test set
# Predict the values for test data using previously trained model as input.
predictions = mlpack.adaboost(input_model=ab, test=Xtest)
yPreds = predictions["predictions"].reshape(-1, 1).squeeze()
# +
def PlotRocAUC(fper, tper):
"""
Generates an ROC AUC curve for the give True Positive Rate and False Positive Rate.
Parameter:
fper (np.ndarray): Contains the false positive rate.
tper (np.ndarray): Contains the true positive rate.
Returns:
None
"""
plt.plot(fper, tper, color="orange", label="ROC")
plt.plot([0, 1], [0, 1], color="darkblue", linestyle="--")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
def modelEval(ytest, yPreds):
print(f"Accuracy: {accuracy_score(ytest, yPreds)}")
print(f"ROC AUC: {roc_auc_score(ytest, yPreds)}")
print(f"Cohen's Kappa: {cohen_kappa_score(ytest, yPreds)}")
print(classification_report(ytest,yPreds))
fper, tper, thresh = roc_curve(ytest, predictions["probabilities"][:, 1])
PlotRocAUC(fper, tper)
# -
# ### Model Evaluation
modelEval(ytest, yPreds)
# ### Conclusion
# From the above ROC AUC curve, we can infer that out AdaBoost model performs well on predicting student admissions. There is still room for improvement. Feel free to play around with the hyperparameters, split ratio, admission threshold etc.
| graduate_admission_classification_with_Adaboost/graduate-admission-classification-with-adaboost-py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] graffitiCellId="id_twe5ety"
# ## Keypad Combinations
#
# A keypad on a cellphone has alphabets for all numbers between 2 and 9, as shown in the figure below:
#
# <img style="float: center;height:200px;" src="Keypad.png"><br>
#
# You can make different combinations of alphabets by pressing the numbers.
#
# For example, if you press 23, the following combinations are possible:
#
# `ad, ae, af, bd, be, bf, cd, ce, cf`
#
# Note that because 2 is pressed before 3, the first letter is always an alphabet on the number 2.
# Likewise, if the user types 32, the order would be
#
# `da, db, dc, ea, eb, ec, fa, fb, fc`
#
#
# Given an integer `num`, find out all the possible strings that can be made using digits of input `num`.
# Return these strings in a list. The order of strings in the list does not matter. However, as stated earlier, the order of letters in a particular string matters.
# + graffitiCellId="id_ffyjq5w"
def get_characters(num):
if num == 2:
return "abc"
elif num == 3:
return "def"
elif num == 4:
return "ghi"
elif num == 5:
return "jkl"
elif num == 6:
return "mno"
elif num == 7:
return "pqrs"
elif num == 8:
return "tuv"
elif num == 9:
return "wxyz"
else:
return ""
def keypad(num):
# TODO: Write your keypad solution here!
# num % 10, 100, 1000, etc. to get num then get characters
# get the characters corresponding to the last num, then insert to the front using insert(0, character)
# stop condition:
if num <= 1:
return [""]
# base case for n-1:
comb_strings = []
previous_strings = keypad(int(num/10))
characters = get_characters(num%10)
for i in range(len(characters)):
for element in previous_strings:
comb_strings.append(element+characters[i])
return comb_strings
# + [markdown] graffitiCellId="id_9ibtd5w"
# <span class="graffiti-highlight graffiti-id_9ibtd5w-id_haj1ksk"><i></i><button>Hide Solution</button></span>
# + graffitiCellId="id_haj1ksk"
# Recursive Solution
def keypad(num):
# Base case
if num <= 1:
return [""]
# If `num` is single digit, get the LIST having one element - the associated string
elif 1 < num <= 9:
return list(get_characters(num))
# Otherwise `num` >= 10. Find the unit's (last) digits of `num`
last_digit = num % 10
'''Step 1'''
# Recursive call to the same function with “floor” of the `num//10`
small_output = keypad(num//10) # returns a LIST of strings
'''Step 2'''
# Get the associated string for the `last_digit`
keypad_string = get_characters(last_digit) # returns a string
'''Permute the characters of result obtained from Step 1 and Step 2'''
output = list()
'''
The Idea:
Each character of keypad_string must be appended to the
end of each string available in the small_output
'''
for character in keypad_string:
for item in small_output:
new_item = item + character
output.append(new_item)
return output # returns a LIST of strings
# + graffitiCellId="id_l66zrar"
def test_keypad(input, expected_output):
if sorted(keypad(input)) == expected_output:
print("Yay. We got it right.")
else:
print("Oops! That was incorrect.")
# + graffitiCellId="id_vnyax73"
# Base case: list with empty string
input = 0
expected_output = [""]
test_keypad(input, expected_output)
# + graffitiCellId="id_458su6i"
# Example case
input = 23
expected_output = sorted(["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"])
test_keypad(input, expected_output)
# + graffitiCellId="id_j04lazf"
# Example case
input = 32
expected_output = sorted(["da", "db", "dc", "ea", "eb", "ec", "fa", "fb", "fc"])
test_keypad(input, expected_output)
# + graffitiCellId="id_4ziwk7w"
# Example case
input = 8
expected_output = sorted(["t", "u", "v"])
test_keypad(input, expected_output)
# + graffitiCellId="id_u7fe0h1"
input = 354
expected_output = sorted(["djg", "ejg", "fjg", "dkg", "ekg", "fkg", "dlg", "elg", "flg", "djh", "ejh", "fjh", "dkh", "ekh", "fkh", "dlh", "elh", "flh", "dji", "eji", "fji", "dki", "eki", "fki", "dli", "eli", "fli"])
test_keypad(input, expected_output)
| data_structure/Keypad Combinations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Heading
#
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path ='/home/nrw/Documents/projects_Spring2018/howe299r/Experiments/26Mar2018/data/deltas/'
viz = '03April_ID1.csv'
imu = '03IMU.csv'
vizf = pd.read_csv(path+viz,sep='\t')
vizf.dropna(how="all", inplace=True)
imuf = pd.read_csv(path+imu,sep='\t')
imuf.dropna(how="all", inplace=True)
yaw, pitch, roll = vizf["yaw"], vizf["pitch"], vizf["roll"]
yaw, pitch, roll = vizf["yaw"]*57.29578, vizf["pitch"]*57.29578, vizf["roll"]*57.29578
yaw, pitch, roll = yaw, pitch, roll
xIMU,yIMU,zIMU = imuf["x"], imuf["y"], imuf["z"]
forces = np.arange(0,yaw.shape[0]*10,10)
#print(len(yaw))
#print(len(yIMU))
#print(yIMU)
#print(roll)
#print(pitch)
#print(len(forces))
# +
yIMU = [-0.3125, -1.4375, -2.4375, -3.25, -3.75, -4. ,-5.625 ,-6. ]
yIMU += [-0.4375 ,-1.0625 ,-1.875 ,-2.6875 ,-3.75 ,-4.5625 ,-5.8125 ,-6.125 ]
yIMU += [-0.625 ,-1.5 ,-2.125 ,-2.875 ,-4.125 ,-4.4375 ,-5.75 ,-6. ]
yaw = [ 0.29166667 ,-0.0483631 ,-0.26339286 ,-0.29092262 ,-0.00595238 ,0.52901786
,-0.3110119 ,0.09895833]
yaw += [-1.17708333 ,0.06324405 ,0.11607143 ,0.16889881 ,-0.02827381 ,0.02455357
,-0.36011905 ,0.19270833]
yaw += [ 0.01041667 ,-0.06622024 ,0.10714286 ,0.15550595 ,-0.29613095 ,0.18973214
,-0.32440476 ,0.22395833]
print(len(yIMU))
print(len(yaw))
plt.scatter(forces, yIMU, label='imu')
plt.scatter(forces, yaw, label='aprilTags') #????
plt.ylabel('degrees')
plt.xlabel('force (g)')
plt.legend()
plt.show()
# +
import numpy as np
import plotly.plotly as py
import plotly.offline as po
import plotly.graph_objs as go
from sklearn import linear_model
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error, r2_score
po.init_notebook_mode(connected=True)
trace0 = go.Scatter( x = forces, y = yIMU, mode = 'markers',
name = 'degrees (by IMU)' )
trace1 = go.Scatter( x = forces, y = yaw, mode = 'markers',
name = 'true degrees (by apriltag)' )
forces = forces.reshape(-1, 1)
myX = forces
myy = yIMU
#regr= Ridge(fit_intercept=True, alpha=1.0, random_state=0, normalize=True)
regr = linear_model.LinearRegression()
regr.fit(myX, myy)
coef_ridge = regr.coef_
gridx = np.linspace(myX.min(), myX.max(), 20)
coef_ = regr.coef_ * gridx + regr.intercept_
yPred= regr.predict(myX)
#plt.plot(gridx, coef_, 'g-', label="ridge regression")
trace2 = go.Scatter( x= gridx, y = coef_,
name = 'linear fit (w/ridge penalty)' )
# +
data = [trace1, trace0, trace2]
layout = go.Layout(
title='Force vs Degrees of Deflection',
yaxis=dict(title='degrees'),
xaxis=dict(title='Force (in grams)'),
legend=dict(x=.1, y=-.5) )
fig = go.Figure(data=data, layout=layout)
# Plot and embed in ipython notebook!
print('Coefficients: \n', regr.coef_)
print("Mean squared error: %.2f" % mean_squared_error(yIMU, yPred))
print('Variance score (ideal 1): %.2f' % r2_score(yIMU, yPred))
po.iplot(fig)
#po.plot(fig, filename='temp_plot.html')
# +
resid = yIMU - yPred
layout = go.Layout(
title='Force vs Degrees of Deflection',
yaxis=dict(title='degrees'),
xaxis=dict(title='Force (in grams)'),
legend=dict(x=.1, y=-.5) )
traceResid = go.Scatter( x = yIMU, y = resid, mode = 'markers',
name = 'resid' )
data = [traceResid]
layout = go.Layout(
title='Residuals',
yaxis=dict(title='residuals'),
xaxis=dict(title='yIMU (degrees)'),
legend=dict(x=.1, y=-.5) )
fig = go.Figure(data=data, layout=layout)
po.iplot(fig)
print(yIMU)
print(resid)
# -
| Experiments/26Mar2018/26Mar2018_Graphs_Residuals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
import os
import pandas as pd
import numpy as np
import torch
from torch import nn, optim
import seaborn as sns
from matplotlib import pyplot as plt
# -
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)
data = pd.read_csv('../input/trip-advisor-hotel-reviews/tripadvisor_hotel_reviews.csv')
data.head()
data['Rating'].replace({1: 0, 2: 1, 3: 2, 4: 3, 5: 4}, inplace=True)
data.head()
# +
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(data, test_size=0.2)
df_train, df_valid = train_test_split(df_train, test_size=0.25)
# -
df_train.to_csv('train.csv', index=False)
df_valid.to_csv('valid.csv', index=False)
df_test.to_csv('test.csv', index=False)
# +
import spacy
from spacy.tokenizer import Tokenizer
nlp = spacy.load("en_core_web_sm")
tokenizer = Tokenizer(nlp.vocab)
def spacy_tokenize(x):
return [tok.text for tok in tokenizer(x)]
# +
from torchtext.legacy.data import Field, TabularDataset, BucketIterator
label_field = Field(sequential=False, use_vocab=False, batch_first=True)
text_field = Field(tokenize = spacy_tokenize, tokenizer_language="en", lower=True, include_lengths=True, batch_first=True)
fields = [('text', text_field), ('label', label_field)]
train, valid, test = TabularDataset.splits(path='', train='train.csv', validation='valid.csv', test='test.csv', format='CSV', fields=fields, skip_header=True)
trainloader = BucketIterator(train, batch_size=64, sort_key=lambda x: len(x.text),
device=device, sort=True, sort_within_batch=True)
validloader = BucketIterator(valid, batch_size=64, sort_key=lambda x: len(x.text),
device=device, sort=True, sort_within_batch=True)
testloader = BucketIterator(test, batch_size=64, sort_key=lambda x: len(x.text),
device=device, sort=True, sort_within_batch=True)
text_field.build_vocab(train, min_freq=3)
# +
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class GRU(nn.Module):
def __init__(self, dimension=128):
super(GRU, self).__init__()
self.embedding = nn.Embedding(len(text_field.vocab), 256)
self.dimension = dimension
self.gru = nn.GRU(input_size=256, hidden_size=dimension, num_layers=3, dropout=0.2, batch_first=True, bidirectional=True)
self.fc = nn.Linear(2*dimension, 5)
self.softmax = nn.Softmax(dim=1)
def forward(self, text, text_len):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len.cpu(), batch_first=True, enforce_sorted=False)
packed_output, _ = self.gru(packed_input)
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out_forward = output[range(len(output)), text_len - 1, :self.dimension]
out_reverse = output[:, 0, self.dimension:]
out_reduced = torch.cat((out_forward, out_reverse), 1)
out = self.fc(self.softmax(out_reduced))
return out
model = GRU().to(device)
# -
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),lr=0.001)
PATH = './best_model.pth'
# +
import torch.nn.functional as F
tr_losses, v_losses = [], []
tr_acc, v_acc = [], []
valid_loss_min = np.Inf
counter = 0
for epoch in range(100):
running_loss = 0.0
valid_loss = 0.0
correct_train = 0
total_train = 0
correct_valid = 0
total_valid = 0
for ((text, text_len), labels), _ in trainloader:
labels = labels.to(device)
text = text.to(device)
text_len = text_len.to(device)
output = model(text, text_len)
_, predictions = torch.max(output, 1)
total_train += labels.size(0)
correct_train += (predictions == labels).sum().item()
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
with torch.no_grad():
for ((text, text_len), labels), _ in validloader:
labels = labels.to(device)
text = text.to(device)
text_len = text_len.to(device)
output = model(text, text_len)
_, predictions = torch.max(output, 1)
total_valid += labels.size(0)
correct_valid += (predictions == labels).sum().item()
loss = criterion(output, labels)
valid_loss += loss.item()
avg_train_loss = running_loss / total_train
tr_losses.append(avg_train_loss)
avg_valid_loss = valid_loss / total_valid
v_losses.append(avg_valid_loss)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f} \tTraining Accuracy: {:.1f} \tValidation Accuracy: {:.1f}'.format(
epoch + 1,
avg_train_loss,
avg_valid_loss,
100 * correct_train / total_train,
100 * correct_valid / total_valid
))
tr_acc.append(100 * correct_train / total_train)
v_acc.append(100 * correct_valid / total_valid)
if avg_valid_loss <= valid_loss_min:
print('Validation loss decreased({:.6f} -->{:.6f}). Saving Model ...'.format(valid_loss_min, avg_valid_loss))
torch.save(model.state_dict(), PATH)
valid_loss_min = avg_valid_loss
counter = 0
else:
counter += 1
if counter >= 5:
print("Early stopping!")
break
print('Finished Training')
# +
plt.plot(tr_losses, label='Train Loss')
plt.plot(v_losses, label='Valid Loss')
plt.legend()
# +
plt.plot(tr_acc, label='Train Accuracy')
plt.plot(v_acc, label='Valid Accuracy')
plt.legend()
# -
model = GRU().to(device)
model.load_state_dict(torch.load(PATH))
# +
correct = 0
total = 0
model.to(device)
with torch.no_grad():
for ((text, text_len), labels), _ in validloader:
labels = labels.to(device)
text = text.to(device)
text_len = text_len.to(device)
output = model(text, text_len)
_, predictions = torch.max(output, 1)
total += labels.size(0)
correct += (predictions == labels).sum().item()
print('Accuracy of the network on the test reviews: %d %%' % (
100 * correct / total))
| trip-advisor-hotel-reviews-gru.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# This is WIP (work in progress) file for the Dashboard of graphs
'''!pip3 install dash
!pip3 install jupyter-dash
'''
# +
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# assume you have a "long-form" data frame
# see https://plotly.com/python/px-arguments/ for more options
df = pd.DataFrame({
"Fruit": ["Apples", "Oranges", "Bananas", "Apples", "Oranges", "Bananas"],
"Amount": [4, 1, 2, 2, 4, 5],
"City": ["SF", "SF", "SF", "Montreal", "Montreal", "Montreal"]
})
fig = px.line(df, x="Fruit", y="Amount", color="City")
app.layout = html.Div(children=[
html.H1(children='Hello Dash'),
html.Div(children='''
Dash: A web application framework for Python.
'''),
dcc.Graph(
id='example-graph',
figure=fig
)
])
if __name__ == '__main__':
app.run_server(debug=False)
| Dashboard/.ipynb_checkpoints/WIP-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analysis of Twitter Data related to 'Abrogation of Article 370 & 35-A'
# This is a data analysis project. This project is done in partial fulfilment of the *Advanced Operating Systems* course that I am doing at **University of Hyderabad**.
# The name of the project is *"Analysis of Tweeter data"*
# Firstly we include the following libraries needed using `import`. You will be able to find the code [here](www.github.com/19mcmi07/aos_mini_project).
# - Matplotlib
# - pandas
# - textblob
# - os
import matplotlib.pyplot as plt
import pandas as pd
from textblob import TextBlob
data = pd.read_csv("data/tweets.csv")
# Now we have read in our dataset in a variable named *data*. This variable is a pandas dataframe. Pandas library helps us to manage the data efficiently.
# Now we can try to analyze the data in its raw form.
pos_count = 0
neg_count = 0
# We have initialized two variables that keep track of the total number of negative tweets that we see and the number of positive number of tweets that we see.
for st in data.get('text'):
analysis = TextBlob(str(st))
if analysis.sentiment.polarity >= 0.1:
if analysis.sentiment.polarity > 0:
pos_count += 1
if analysis.sentiment.polarity <= -0.1:
if analysis.sentiment.polarity <= 0:
neg_count += 1
# The above code iterates through the entire dataset in the attribute named *text* and finds the polarity of the sentence using TextBlob. If the polarity is greater than 0, it means that the text is positive and if the polarity is less than, it means that the sentence is negative. All the points (positive and negative) are counted and then the numbers thus obtained are printed below.
print("Total number of positive tweets:", pos_count)
print("Total number of negative tweets:", neg_count)
# Now that we have the number of positive tweets and the number of negative tweets, we plot a pie chart, so that it is easy to visualize and understand.
plt.pie([pos_count, neg_count],
labels=['positive', 'negative'],
autopct='%.2f')
plt.axis('equal')
plt.show()
# Now we'll calculate how many tweets are there that have been replied to.
rep_count = 0
total_rep_count = 0
for rep in data.get('is_reply'):
if rep == True:
rep_count += 1
total_rep_count += 1
else:
total_rep_count += 1
print(rep_count, total_rep_count)
plt.pie([rep_count, total_rep_count],
labels=['Replied', 'Not replied'],
autopct='%.2f')
plt.axis('equal')
plt.show()
data.get('nbr_reply').plot.kde(bw_method=0.9)
# As we can see above, that there is a huge peak at 0, for the number of replies. This is also evident that the graph follows a normal distribution. We can Infer that a very large number of tweets didn't receive any replies.
plt.hist(data.get('nbr_reply'), bins=10, range=(1.0, 10.0))
plt.show()
plt.hist(data.get('nbr_reply'), bins=30, range=(50.0, 100.0))
plt.show()
print(max(data.get('nbr_reply')))
plt.hist(data.get('nbr_reply'), bins=30, range=(1000.0, 11000.0))
plt.show()
plt.hist(data.get('nbr_retweet'), bins=10, range=(1.0, 10.0))
plt.show()
plt.hist(data.get('nbr_retweet'), bins=30, range=(50.0, 100.0))
plt.show()
plt.hist(data.get('nbr_retweet'), bins=30, range=(1000.0, 11000.0))
plt.show()
print(max(data.get('nbr_retweet')))
plt.hist(data.get('nbr_favorite'), bins=10, range=(1.0, 10.0))
plt.show()
plt.hist(data.get('nbr_favorite'), bins=30, range=(50.0, 100.0))
plt.show()
plt.hist(data.get('nbr_favorite'), bins=30, range=(1000.0, 11000.0))
plt.show()
print(max(data.get('nbr_favorite')))
# The maximum number of likes that a tweet got is 67424. But there are a very low number of tweets that got very high number of likes. These tweets are probably from people like the President, Prime Minister or people who are famous.
| aos_mini_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# 线性回归从零开始实现,包括数据流水线、模型、损失函数和小批量随机梯度下降优化器
# %matplotlib inline
import random
import torch
from d2l import torch as d2l
# +
# 根据带有噪声的线性模型构造一个人造数据集。我们使用线性模型参数w=[2,-3.4]^T、b=4.2和噪声项ε生成数据集及其标签
# y = Xw + b + ε
def synthetic_data(w, b, num_examples):
"""生成y = Xw + b + ε噪声"""
X = torch.normal(0, 1, (num_examples, len(w))) # X是均值为0,方差为1的随机数,样本数是num-examples,列数是w的长度
y = torch.matmul(X, w) + b
y += torch.normal(0, 0.01, y.shape) # 随机噪音
return X, y.reshape((-1, 1)) # y作为列向量返回
true_w = torch.tensor([2, -3.4])
true_b = 4.2
features, labels = synthetic_data(true_w, true_b, 1000)
# -
# features每一行都包含一个二位数据样本,labels每一行都包含一维标签值(一个标量)
print('features:', features[0], '\nlabel:', labels[0])
d2l.set_figsize()
d2l.plt.scatter(features[:, 1].detach().numpy(),
labels.detach().numpy(), 1)
# +
# 定义一个data_iter函数,该函数接收批量大小、特征矩阵和标签向量作为输入,生成大小为batch_size的小批量
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
# 这些样本是随机的,没有特定的顺序
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])
yield features[batch_indices], labels[batch_indices]
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
print(X, '\n', y)
break
# -
# 定义初始化模型参数
w = torch.normal(0, 0.01, size=(2, 1), requires_grad = True)
b = torch.zeros(1, requires_grad = True)
# 定义类型
def linreg(X, w, b):
"""线性回归模型"""
return torch.matmul(X, w) + b
# 定义损失函数
def squared_loss(y_hat, y):
"""均方损失"""
return (y_hat - y.reshape(y_hat.shape)) **2 / 2
# 定义优化算法
def sgd(params, lr, batch_size):
"""小批量随机梯度下降"""
with torch.no_grad(): # 更新的时候无需梯度计算
for param in params: # W, b,梯度存在.grad里
param -= lr * param.grad / batch_size
param.grad.zero_()
# +
# 训练过程
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y) # X和y的小批量损失
l.sum().backward()
sgd([w, b], lr, batch_size)
with torch.no_grad():
train_1 = loss(net(features, w, b), labels)
print(f'epoch {epoch + 1}, loss {float(train_1.mean()):f}')
# -
# 比较真实参数和通过训练学到的参数来评估训练的成功程度
print(f'w的估计误差:{true_w - w.reshape(true_w.shape)}')
print(f'b的估计误差:{true_b - b}')
| artificial-intelligence/d2l-pytorch/notes/5_linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="PZdkPJRoMzFF" colab_type="code" colab={}
# !apt-get install -y -qq software-properties-common python-software-properties module-init-tools
# !add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
# !apt-get update -qq 2>&1 > /dev/null
# !apt-get -y install -qq google-drive-ocamlfuse fuse
from google.colab import auth
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
creds = GoogleCredentials.get_application_default()
import getpass
# !google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass()
# !echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
# + id="rAlBhIu-aqBM" colab_type="code" colab={}
# !ls
# + id="ON9rqJZrM_xm" colab_type="code" colab={}
# !mkdir -p drive
# !google-drive-ocamlfuse drive
# + id="aQ-g8NZANC8w" colab_type="code" colab={}
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, Lambda,Concatenate
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D, Add
from keras.models import Model
from keras import regularizers
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from keras.initializers import glorot_normal
#import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
# + id="yFXklS3nbMFp" colab_type="code" colab={}
import tensorflow as tf
# + id="FTWUi_3BN58K" colab_type="code" colab={}
def two_path(X_input):
X = Conv2D(64,(7,7),strides=(1,1),padding='valid')(X_input)
X = BatchNormalization()(X)
X1 = Conv2D(64,(7,7),strides=(1,1),padding='valid')(X_input)
X1 = BatchNormalization()(X1)
X = layers.Maximum()([X,X1])
X = Conv2D(64,(4,4),strides=(1,1),padding='valid',activation='relu')(X)
X2 = Conv2D(160,(13,13),strides=(1,1),padding='valid')(X_input)
X2 = BatchNormalization()(X2)
X21 = Conv2D(160,(13,13),strides=(1,1),padding='valid')(X_input)
X21 = BatchNormalization()(X21)
X2 = layers.Maximum()([X2,X21])
X3 = Conv2D(64,(3,3),strides=(1,1),padding='valid')(X)
X3 = BatchNormalization()(X3)
X31 = Conv2D(64,(3,3),strides=(1,1),padding='valid')(X)
X31 = BatchNormalization()(X31)
X = layers.Maximum()([X3,X31])
X = Conv2D(64,(2,2),strides=(1,1),padding='valid',activation='relu')(X)
X = Concatenate()([X2,X])
#X = Conv2D(5,(21,21),strides=(1,1))(X)
#X = Activation('softmax')(X)
#model = Model(inputs = X_input, outputs = X)
return X
# + id="6O3jE_k6cEIl" colab_type="code" colab={}
def input_cascade(input_shape1,input_shape2):
X1_input = Input(input_shape1)
X1 = two_path(X1_input)
X1 = Conv2D(5,(21,21),strides=(1,1),padding='valid',activation='relu')(X1)
X1 = BatchNormalization()(X1)
X2_input = Input(input_shape2)
X2_input1 = Concatenate()([X1,X2_input])
#X2_input1 = Input(tensor = X2_input1)
X2 = two_path(X2_input1)
X2 = Conv2D(5,(21,21),strides=(1,1),padding='valid')(X2)
X2 = BatchNormalization()(X2)
X2 = Activation('softmax')(X2)
model = Model(inputs=[X1_input,X2_input],outputs=X2)
return model
# + id="QPq0SXTivPZ1" colab_type="code" colab={}
def MFCcascade(input_shape1,input_shape2):
X1_input = Input(input_shape1)
X1 = two_path(X1_input)
X1 = Conv2D(5,(21,21),strides=(1,1),padding='valid',activation='relu')(X1)
X1 = BatchNormalization()(X1)
#X1 = MaxPooling2D((2,2))(X1)
X2_input = Input(input_shape2)
X2 = two_path(X2_input)
X2 = Concatenate()([X1,X2])
X2 = Conv2D(5,(21,21),strides=(1,1),padding='valid',activation='relu')(X2)
X2 = BatchNormalization()(X2)
X2 = Activation('softmax')(X2)
model = Model(inputs=[X1_input,X2_input],outputs=X2)
return model
# + id="sAqw5R8vweug" colab_type="code" colab={}
m = MFCcascade((53,53,4),(33,33,4))
m.summary()
# + id="I3l16YuGsuYu" colab_type="code" colab={}
m.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
# + id="nnHOtrWqsdVw" colab_type="code" colab={}
m.save('trial_0001_MFCcascade_acc.h5')
# + id="irkvZbGfzSP6" colab_type="code" colab={}
m1 = input_cascade((65,65,4),(33,33,4))
m1.summary()
# + id="jPr6MX-zgAC0" colab_type="code" colab={}
import os
os.chdir('drive/brat')
# + id="b9DjejaYSVcU" colab_type="code" colab={}
def model_gen(input_dim,x,y,slice_no):
X1 = []
X2 = []
Y = []
print(int((input_dim)/2))
for i in range(int((input_dim)/2),175-int((input_dim)/2)):
for j in range(int((input_dim)/2),195-int((input_dim)/2)):
X2.append(x[i-16:i+17,j-16:j+17,:])
X1.append(x[i-int((input_dim)/2):i+int((input_dim)/2)+1,j-int((input_dim)/2):j+int((input_dim)/2)+1,:])
Y.append(y[i,slice_no,j])
X1 = np.asarray(X1)
X2 = np.asarray(X2)
Y = np.asarray(Y)
d = [X1,X2,Y]
return d
# + id="fyc_LhG_SVx2" colab_type="code" colab={}
def data_gen(path,slice_no,model_no):
p = os.listdir(path)
p.sort(key=str.lower)
arr = []
for i in range(len(p)):
if(i != 4):
p1 = os.listdir(path+'/'+p[i])
p1.sort()
img = sitk.ReadImage(path+'/'+p[i]+'/'+p1[-1])
arr.append(sitk.GetArrayFromImage(img))
else:
p1 = os.listdir(path+'/'+p[i])
img = sitk.ReadImage(path+'/'+p[i]+'/'+p1[0])
y = sitk.GetArrayFromImage(img)
data = np.zeros((196,176,216,4))
for i in range(196):
data[i,:,:,0] = arr[0][:,i,:]
data[i,:,:,1] = arr[1][:,i,:]
data[i,:,:,2] = arr[2][:,i,:]
data[i,:,:,3] = arr[3][:,i,:]
x = data[slice_no]
if(model_no == 0):
X1 = []
for i in range(16,159):
for j in range(16,199):
X1.append(x[i-16:i+17,j-16:j+17,:])
Y1 = []
for i in range(16,159):
for j in range(16,199):
Y1.append(y[i,slice_no,j])
X1 = np.asarray(X1)
Y1 = np.asarray(Y1)
d = [X1,Y1]
elif(model_no == 1):
d = model_gen(65,x,y,slice_no)
elif(model_no == 2):
d = model_gen(56,x,y,slice_no)
elif(model_no == 3):
d = model_gen(53,x,y,slice_no)
return d
# + id="l13G56crSV1W" colab_type="code" colab={}
d = data_gen('LG/0001',100,3)
# + id="VNcrwh0_eLhA" colab_type="code" colab={}
d[2].all == 0
# + id="2AQ4FnZ4S9aa" colab_type="code" colab={}
len(d[0])
# + id="nm-VUWyASV4U" colab_type="code" colab={}
# !pip3 install SimpleITK
# + id="ApcCWE_kSWJR" colab_type="code" colab={}
import SimpleITK as sitk
import numpy as np
# + id="z6kFD2BoSWNF" colab_type="code" colab={}
y = np.zeros((17589,1,1,5))
# + id="3ykXWR6aSVv7" colab_type="code" colab={}
for i in range(y.shape[0]):
y[i,:,:,d[2][i]] = 1
# + id="a8MbsET_lITK" colab_type="code" colab={}
sample = np.zeros((5,1))
for i in range(5):
sample[i] = np.sum(y[:,:,:,i])
print(sample/np.sum(sample))
# + id="-jDfh_KSTGYh" colab_type="code" colab={}
X1 = np.asarray(d[0])
# + id="hvIGmdh9U7yH" colab_type="code" colab={}
X1.shape
# + id="0Dr_yMLaTGWA" colab_type="code" colab={}
X2 = np.asarray(d[1])
# + id="wOSoBQZ7U9cf" colab_type="code" colab={}
X2.shape
# + id="vEI0767dVR46" colab_type="code" colab={}
m1.inputs
# + id="LujuHlelTGUK" colab_type="code" colab={}
m.compile(optimizer='adam',loss='categorical_crossentropy',metrics=[f1_score])
# + id="BDUjM2drTGQ1" colab_type="code" colab={}
m_info = m.fit([X1,X2],y,epochs=20,batch_size=256)
# + id="PnhDVGN8TGOS" colab_type="code" colab={}
m.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
# + [markdown] id="r3Vzzfj-F4bb" colab_type="text"
# Slice 136 patient 0002
# + id="L9unxNJ0GBjv" colab_type="code" colab={}
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced',
np.unique(d[2]),
d[2])
# + id="vNUs_MEpGBoY" colab_type="code" colab={}
class_weights
# + id="hTD1_MwBGBuC" colab_type="code" colab={}
import keras
model = keras.models.load_model('trial_0001_MFCcas_dim2_128_acc.h5')
# + id="3M-UhTnDGB_q" colab_type="code" colab={}
m_info = m.fit([X1,X2],y,epochs= 20,batch_size = 256,class_weight = class_weights)
# + id="ZLKHRdib01PP" colab_type="code" colab={}
import matplotlib.pyplot as plt
plt.plot(m_info.history['acc'])
#plt.plot(m_info.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="1RMqW8zQ0H6e" colab_type="code" colab={}
m.save('trial_MFCcascade_acc.h5')
# + [markdown] id="gwNr5z5DTFPm" colab_type="text"
# eval on 128th slice 0002
# + id="tBL5kEi7TFgh" colab_type="code" colab={}
model.evaluate([X1,X2],y,batch_size = 1024)
# + id="mbH9DMlNG9VZ" colab_type="code" colab={}
model_info = model.fit([X1,X2],y,epochs=30,batch_size=256,class_weight= class_weights)
# + id="_U33DPJVJU4Y" colab_type="code" colab={}
import matplotlib.pyplot as plt
plt.plot(model_info.history['acc'])
#plt.plot(m_info.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="3lziDgWfJWj-" colab_type="code" colab={}
model.save('trial_0001_MFCcas_dim2_128_acc.h5')
# + [markdown] id="mQSajo4FftOE" colab_type="text"
# eval on 100th slice 0001
# + id="nzrrhhMsfxpu" colab_type="code" colab={}
model.evaluate([X1,X2],y,batch_size = 1024)
# + id="T9YzEZDuJfOu" colab_type="code" colab={}
pred = model.predict([X1,X2],batch_size = 1024)
pred = np.around(pred)
pred1 = np.dot(pred.reshape(17589,5),np.array([0,1,2,3,4]))
y1 = np.dot(y.reshape(17589,5),np.array([0,1,2,3,4]))
# + id="ih34JtbkhL0K" colab_type="code" colab={}
y2 = np.argmax(y.reshape(17589,5),axis = 1)
y2.all() == 0
# + id="nzqkK_CqaNck" colab_type="code" colab={}
y1.all()==0
# + id="0gyXYBgmJgqE" colab_type="code" colab={}
from sklearn import metrics
# + id="oVuRmb5IZdfE" colab_type="code" colab={}
f1 = metrics.f1_score(y1,pred1,average='micro')
f1
# + id="hSCNIVMObhqR" colab_type="code" colab={}
p1 = metrics.precision_score(y1,pred1,average='micro')
p1
# + id="juoUfTgPbxtu" colab_type="code" colab={}
r1 = metrics.recall_score(y1,pred1,average='micro')
r1
# + id="BLKVwEU6b6yf" colab_type="code" colab={}
p2 = metrics.precision_score(y1,pred2,average='micro')
p2
# + id="K5K8wS3ZZgGd" colab_type="code" colab={}
pred2 = np.zeros((17589))
f2 = metrics.f1_score(y1,pred2,average='micro')
f2
# + [markdown] id="F7HQ0ZY9GCrL" colab_type="text"
# Slice 128 patient 0001
# + id="f7A623TIkCy4" colab_type="code" colab={}
from sklearn.utils import class_weight
# + id="9ahv4o9ikC7h" colab_type="code" colab={}
class_weights = class_weight.compute_class_weight('balanced',
np.unique(d[2]),
d[2])
# + id="pDX-NlqMg2c_" colab_type="code" colab={}
class_weights
# + id="qyw0msu-mS6c" colab_type="code" colab={}
m1.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
# + id="zQfSUXmrZDka" colab_type="code" colab={}
m1_info = m1.fit([X1,X2],y,epochs=20,batch_size=256,class_weight= class_weights)
# + [markdown] id="XHpzjw0XiP1R" colab_type="text"
# plot of inputcascade
# + id="KYA-a0zTwacP" colab_type="code" colab={}
import matplotlib.pyplot as plt
# + id="PoHH76zZiNjb" colab_type="code" colab={}
plt.plot(m1_info.history['acc'])
#plt.plot(m_info.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="3ZEXAAs5iU3g" colab_type="code" colab={}
m1.save('trial_0001_input_cascade_acc.h5')
# + id="Up0HYhUL2vIm" colab_type="code" colab={}
plt.plot(m_info.history['acc'])
#plt.plot(m_info.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + [markdown] id="I9qANEg_IYlf" colab_type="text"
# Training on slice 128, evaluating on 136
# + id="yR3BooKbHAzM" colab_type="code" colab={}
m.evaluate([X1,X2],y,batch_size = 1024)
# + id="edBYjuKXHXPw" colab_type="code" colab={}
m.save('trial_0001_MFCcas_dim2_128_acc.h5')
# + id="6oxdLZroKhXm" colab_type="code" colab={}
pred = m.predict([X1,X2],batch_size = 1024)
# + id="FUGQ9d5jLvp2" colab_type="code" colab={}
print(((pred != 0.) & (pred != 1.)).any())
# + id="Ag_48CJ6L8_0" colab_type="code" colab={}
pred = np.around(pred)
# + id="Lw5X_WarL9RE" colab_type="code" colab={}
type(y)
# + id="jF_O_oExPGXu" colab_type="code" colab={}
pred1 = np.dot(pred.reshape(17589,5),np.array([0,1,2,3,4]))
# + id="vtri4ExhP6Iv" colab_type="code" colab={}
pred1.shape
# + id="cgQkvvVKPtOV" colab_type="code" colab={}
y1 = np.dot(y.reshape(17589,5),np.array([0,1,2,3,4]))
# + id="T0sHLLLKKhi9" colab_type="code" colab={}
from sklearn import metrics
# + id="CcB5r9aZKiFP" colab_type="code" colab={}
f1 = metrics.f1_score(y1,pred1,average='micro')
f1
# + id="8oC5O024R_DO" colab_type="code" colab={}
pred2 = np.zeros((17589,1))
f1 = f1 = metrics.f1_score(y1,pred2,average='micro')
f1
# + id="jlqhXHKkQ9QN" colab_type="code" colab={}
f1 = metrics.f1_score(y1,pred1,average='weighted')
f1
# + id="EFBIwoSURUen" colab_type="code" colab={}
f1 = metrics.f1_score(y1,pred1,average='macro')
f1
# + id="vxO5TfhjmD8A" colab_type="code" colab={}
plt.plot(m_info.history['acc'])
#plt.plot(m_info.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="w9e_7_ONvmIV" colab_type="code" colab={}
m_info = m.fit([X1,X2],y,epochs=20,batch_size=256,class_weight= 10*class_weights)
# + id="4OBR2KwDwQmm" colab_type="code" colab={}
plt.plot(m_info.history['acc'])
#plt.plot(m_info.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="QT-eYsOxmGUx" colab_type="code" colab={}
import matplotlib.pyplot as plt
# + id="LC8jMBpmZT-b" colab_type="code" colab={}
m.save('trial_0001_MFCcas_dim2_128_acc.h5')
# + id="DcNy4_hAshI-" colab_type="code" colab={}
import keras
# + id="kKTL7TgjUEs5" colab_type="code" colab={}
def two_pathcnn(input_shape):
X_input = Input(input_shape)
X = Conv2D(64,(7,7),strides=(1,1),padding='valid')(X_input)
X = BatchNormalization()(X)
X1 = Conv2D(64,(7,7),strides=(1,1),padding='valid')(X_input)
X1 = BatchNormalization()(X1)
X = layers.Maximum()([X,X1])
X = Conv2D(64,(4,4),strides=(1,1),padding='valid',activation='relu')(X)
X2 = Conv2D(160,(13,13),strides=(1,1),padding='valid')(X_input)
X2 = BatchNormalization()(X2)
X21 = Conv2D(160,(13,13),strides=(1,1),padding='valid')(X_input)
X21 = BatchNormalization()(X21)
X2 = layers.Maximum()([X2,X21])
X3 = Conv2D(64,(3,3),strides=(1,1),padding='valid')(X)
X3 = BatchNormalization()(X3)
X31 = Conv2D(64,(3,3),strides=(1,1),padding='valid')(X)
X31 = BatchNormalization()(X31)
X = layers.Maximum()([X3,X31])
X = Conv2D(64,(2,2),strides=(1,1),padding='valid',activation='relu')(X)
X = Concatenate()([X2,X])
X = Conv2D(5,(21,21),strides=(1,1),padding='valid')(X)
X = Activation('softmax')(X)
model = Model(inputs = X_input, outputs = X)
return model
# + id="g65XgHCVNjG_" colab_type="code" colab={}
import os
# + id="qtCNZjdEYdpI" colab_type="code" colab={}
m0 = two_pathcnn((33,33,4))
m0.summary()
# + id="YzWnu9qENpex" colab_type="code" colab={}
os.chdir('drive/brat')
# + id="8BzGy_BHU8g5" colab_type="code" colab={}
# !ls
# + [markdown] id="jArTgu1mYbDX" colab_type="text"
# for training over entire image, create batch of patches for one image, batch of labels in Y
# + id="R52SBQaaN3fu" colab_type="code" colab={}
import h5py
import numpy as np
# + id="VRG2Y_KGYrku" colab_type="code" colab={}
hf = h5py.File('data_trial_dim2_128.h5', 'r')
X = hf.get('dataset_1')
Y = hf.get('dataset_2')
# + id="1n4VWsNqjDOd" colab_type="code" colab={}
y = np.zeros((26169,1,1,5))
# + id="HjCGoxKTjStY" colab_type="code" colab={}
for i in range(y.shape[0]):
y[i,:,:,Y[i]] = 1
# + id="Tcx4_Tz7BuYv" colab_type="code" colab={}
X = np.asarray(X)
# + id="LuQRiz28B0-p" colab_type="code" colab={}
X.shape
# + id="iHYYek1PGiho" colab_type="code" colab={}
keras.__version__
# + id="7rBbjTV_HKB7" colab_type="code" colab={}
import keras.backend as K
def f1_score(y_true, y_pred):
# Count positive samples.
c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
c2 = K.sum(K.round(K.clip(y_true, 0, 1)))
c3 = K.sum(K.round(K.clip(y_pred, 0, 1)))
# If there are no true samples, fix the F1 score at 0.
if c3 == 0:
return 0
# How many selected items are relevant?
precision = c1 / c2
# How many relevant items are selected?
recall = c1 / c3
# Calculate f1_score
f1_score = 2 * (precision * recall) / (precision + recall)
return f1_score
# + id="EeHXZrYUZxyN" colab_type="code" colab={}
from sklearn.utils import class_weight
# + id="UdWiVL_kZ6K0" colab_type="code" colab={}
class_weights = class_weight.compute_class_weight('balanced',
np.unique(Y),
Y)
# + id="boPmoQwxe50Z" colab_type="code" colab={}
# + id="QR_-oP1EOB-M" colab_type="code" colab={}
m0.compile(optimizer='adam',loss='categorical_hinge',metrics=[f1_score])
# + id="KyA-yBHTO_tW" colab_type="code" colab={}
m0_info = m0.fit(X,y,epochs=20,batch_size=1024,class_weight = class_weights)
# + id="kmGNE1aOcUN6" colab_type="code" colab={}
m0.save('trial_0001_dim2_128_f1.h5')
# + id="CkDsXfH8cl5O" colab_type="code" colab={}
# !ls
# + id="JxvSP4fUTkj2" colab_type="code" colab={}
m0.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
# + id="jEI-h7EEbhWS" colab_type="code" colab={}
m0_info = m0.fit(X,y,epochs=20,batch_size=4096,class_weight = class_weights)
# + id="7oUOLbDUdz9I" colab_type="code" colab={}
m0.save('trial_0001_dim2_128_accuracy.h5')
# + id="szFEqCnvcp5T" colab_type="code" colab={}
# !ls
# + id="znv9mwx2xonf" colab_type="code" colab={}
mod = keras.models.load_model('trial_0001_81_accuracy.h5')
# + id="RhpsTdGUzH_r" colab_type="code" colab={}
mod.evaluate(X,y,batch_size = 1024)
# + id="8R6h8URV0_GI" colab_type="code" colab={}
pred = m0.predict(X,batch_size = 1024)
# + id="Now748jY1upl" colab_type="code" colab={}
pred.shape
# + id="zEMDqkhvmDTX" colab_type="code" colab={}
pred = np.floor(pred)
# + id="R07kO7eLl1HW" colab_type="code" colab={}
y.reshape(26169,5)
# + id="a_zAutNQmNyv" colab_type="code" colab={}
pred.astype(int)
# + id="ByK4ldMBiCaX" colab_type="code" colab={}
pred = pred.reshape(26169,5)
y_pred = np.floor(np.dot(pred,np.array([0,1,2,3,4])))
y_pred.reshape(26169,1)
# + id="ft5MP-UGjaPC" colab_type="code" colab={}
y_pred.shape
# + id="dYMNZchKoUC-" colab_type="code" colab={}
print(((y_pred != 0.) & (y_pred != 1.)).any())
# + id="6tUKdKLm17dF" colab_type="code" colab={}
from matplotlib import pyplot as plt
plt.imshow(np.uint8(y_pred*32))
plt.show()
# + id="xFyViJgY5W1v" colab_type="code" colab={}
from sklearn import metrics
# + id="scB7FvW0fONE" colab_type="code" colab={}
f1 = metrics.f1_score(y,pred)
# + id="q0rVMeaxfw6I" colab_type="code" colab={}
# !pip3 install SimpleITK
# + id="E7QS9aQMuaxV" colab_type="code" colab={}
import SimpleITK as sitk
import numpy as np
# + id="ksUKU914ucuO" colab_type="code" colab={}
path = 'LG/0001'
p = os.listdir(path)
p.sort(key=str.lower)
arr = []
for i in range(len(p)):
if(i != 4):
p1 = os.listdir(path+'/'+p[i])
p1.sort()
img = sitk.ReadImage(path+'/'+p[i]+'/'+p1[-1])
arr.append(sitk.GetArrayFromImage(img))
else:
p1 = os.listdir(path+'/'+p[i])
img = sitk.ReadImage(path+'/'+p[i]+'/'+p1[0])
Y_labels = sitk.GetArrayFromImage(img)
data = np.zeros((Y_labels.shape[1],Y_labels.shape[0],Y_labels.shape[2],4))
for i in range(196):
data[i,:,:,0] = arr[0][:,i,:]
data[i,:,:,1] = arr[1][:,i,:]
data[i,:,:,2] = arr[2][:,i,:]
data[i,:,:,3] = arr[3][:,i,:]
# + id="ZA2jG-uAuezU" colab_type="code" colab={}
def model_gen(input_dim,x,y,slice_no):
X1 = []
X2 = []
Y = []
for i in range(int((input_dim)/2),175-int((input_dim)/2)):
for j in range(int((input_dim)/2),195-int((input_dim)/2)):
if(x[i-16:i+17,j-16:j+17,:].any != 0):
X2.append(x[i-16:i+17,j-16:j+17,:])
X1.append(x[i-int((input_dim)/2):i+int((input_dim)/2)+1,j-int((input_dim)/2):j+int((input_dim)/2)+1,:])
Y.append(y[i,slice_no,j])
X1 = np.asarray(X1)
X2 = np.asarray(X2)
Y = np.asarray(Y)
d = [X1,X2,Y]
return d
# + id="ysYx5d6auhE_" colab_type="code" colab={}
def data_gen(data,y,slice_no,model_no):
d = []
x = data[slice_no]
if(x.any() != 0 and y.any() != 0):
if(model_no == 0):
X1 = []
for i in range(16,159):
for j in range(16,199):
if(x[i-16:i+17,j-16:j+17,:].all != 0):
X1.append(x[i-16:i+17,j-16:j+17,:])
Y1 = []
for i in range(16,159):
for j in range(16,199):
if(x[i-16:i+17,j-16:j+17,:].all != 0):
Y1.append(y[i,slice_no,j])
X1 = np.asarray(X1)
Y1 = np.asarray(Y1)
d = [X1,Y1]
elif(model_no == 1):
d = model_gen(65,x,y,slice_no)
elif(model_no == 2):
d = model_gen(56,x,y,slice_no)
elif(model_no == 3):
d = model_gen(53,x,y,slice_no)
return d
# + id="flsyxngLumkg" colab_type="code" colab={}
from sklearn.utils import class_weight
# + id="NIJQtuExvNMV" colab_type="code" colab={}
m0.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
# + id="aQDLM0pUuoxT" colab_type="code" colab={}
info = []
for i in range(90,data.shape[0],2):
d = data_gen(data,Y_labels,i,0)
if(len(d) != 0):
y = np.zeros((d[-1].shape[0],1,1,5))
for j in range(y.shape[0]):
y[j,:,:,d[-1][j]] = 1
X1 = d[0]
class_weights = class_weight.compute_class_weight('balanced',
np.unique(d[-1]),
d[-1])
print('slice no:'+str(i))
info.append(m0.fit(X1,y,epochs=2,batch_size=32,class_weight= class_weights))
m0.save('trial_0001_2path_acc.h5')
# + id="vhGYnI_qu8Dx" colab_type="code" colab={}
| model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # selenium
# selenium适用于爬取少量的页面和数据,
#
# 优点是简单,不用分析数据是由哪个请求产生的
#
# 缺点是慢,如果要爬取大量页面和数据,那得慢死。。。
# https://selenium-python.readthedocs.io/
# !pip install selenium
# !pip freeze | grep selenium
# # !pip install selenium
# !pip freeze | grep selenium
# ## selenium自动化_如何启动safari浏览器
# https://www.cnblogs.com/mini-monkey/p/12189290.html
# +
# 启动safari
from selenium import webdriver
from time import sleep
driver = webdriver.Safari() # 启动safari
# 最大化浏览器
driver.maximize_window()
sleep(3)
driver.get('http://moni.51hupai.org/') # 打开51沪牌模拟拍牌系统
sleep(3)
driver.find_element_by_css_selector('.whcusraiseinput').send_keys('1000')
sleep(3)
driver.find_element_by_css_selector('.whcusraisebtn').click()
# driver.quit() # 关掉safari进程
# -
# ## Drivers
# Selenium requires a driver to interface with the chosen browser.
#
# Make sure it’s in your PATH, e. g., place it in /usr/bin or /usr/local/bin.
#
# Chrome: https://sites.google.com/a/chromium.org/chromedriver/downloads
# # Getting Started
# https://selenium-python.readthedocs.io/getting-started.html
# ## Simple Usage
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get("http://www.python.org")
driver.title
assert "Python" in driver.title
elem = driver.find_element_by_name("q")
elem
elem.text
elem.clear()
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
driver.page_source
driver.page_source.find('No results found.')
assert "No results found." not in driver.page_source
driver.close()
# ## Using Selenium to write tests
# !python test_python_org_search.py
# ## Navigating 导航
# https://selenium-python.readthedocs.io/navigating.html
driver.get("http://www.google.com")
# ### Interacting with the page
<input type="text" name="passwd" id="passwd-id" />
element = driver.find_element_by_id("passwd-id")
element = driver.find_element_by_name("passwd")
element = driver.find_element_by_xpath("//input[@id='passwd-id']")
# you may want to enter some text into a text field:
element.send_keys("some <PASSWORD>")
# You can simulate pressing the arrow keys by using the “Keys” class:
element.send_keys(" and some", Keys.ARROW_DOWN)
# It is possible to call send_keys on any element, which makes it possible to test keyboard shortcuts such as those used on GMail. A side-effect of this is that typing something into a text field won’t automatically clear it. Instead, what you type will be appended to what’s already there.
# +
# You can easily clear the contents of a text field or textarea with the clear method:
element.clear()
# -
# ### Filling in forms
# ## Locating Elements
# * find_element_by_id
# * find_element_by_name
# * find_element_by_xpath
# * find_element_by_link_text
# * find_element_by_partial_link_text
# * find_element_by_tag_name
# * find_element_by_class_name
# * find_element_by_css_selector
#
# To find multiple elements (these methods will return a list):
#
# * find_elements_by_name
# * find_elements_by_xpath
# * find_elements_by_link_text
# * find_elements_by_partial_link_text
# * find_elements_by_tag_name
# * find_elements_by_class_name
# * find_elements_by_css_selector
# ## Waits
# These days most of the web apps are using AJAX techniques. When a page is loaded by the browser, the elements within that page may load at different time intervals. This makes locating elements difficult: if an element is not yet present in the DOM, a locate function will raise an ElementNotVisibleException exception. Using waits, we can solve this issue. Waiting provides some slack between actions performed - mostly locating an element or any other operation with the element.
#
# Selenium Webdriver provides two types of waits - implicit & explicit. An explicit wait makes WebDriver wait for a certain condition to occur before proceeding further with execution. An implicit wait makes WebDriver poll the DOM for a certain amount of time when trying to locate an element.
#
#
# ### Explicit Waits 显示等待
# An explicit wait is a code you define to wait for a certain condition to occur before proceeding further in the code. The extreme case of this is time.sleep(), which sets the condition to an exact time period to wait. There are some convenience methods provided that help you write code that will wait only as long as required. WebDriverWait in combination with ExpectedCondition is one way this can be accomplished.
# +
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Firefox()
driver.get("http://somedomain/url_that_delays_loading")
try:
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "myDynamicElement"))
)
finally:
driver.quit()
# -
# This waits up to 10 seconds before throwing a TimeoutException unless it finds the element to return within 10 seconds. WebDriverWait by default calls the ExpectedCondition every 500 milliseconds until it returns successfully. A successful return is for ExpectedCondition type is Boolean return true or not null return value for all other ExpectedCondition types.
# #### Expected Conditions
#
# There are some common conditions that are frequently of use when automating web browsers. Listed below are the names of each. Selenium Python binding provides some convenience methods so you don’t have to code an expected_condition class yourself or create your own utility package for them.
title_is
title_contains
presence_of_element_located
visibility_of_element_located
visibility_of
presence_of_all_elements_located
text_to_be_present_in_element
text_to_be_present_in_element_value
frame_to_be_available_and_switch_to_it
invisibility_of_element_located
element_to_be_clickable
staleness_of
element_to_be_selected
element_located_to_be_selected
element_selection_state_to_be
element_located_selection_state_to_be
alert_is_present
# ### Implicit Waits
# An implicit wait tells WebDriver to poll the DOM for a certain amount of time when trying to find any element (or elements) not immediately available. The default setting is 0. Once set, the implicit wait is set for the life of the WebDriver object.
#
#
# +
from selenium import webdriver
driver = webdriver.Firefox()
driver.implicitly_wait(10) # seconds
driver.get("http://somedomain/url_that_delays_loading")
myDynamicElement = driver.find_element_by_id("myDynamicElement")
# -
| crawl/selenium_ops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Parkinson's Disease Prediction
#importing libraries
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
# Read data
df = pd.read_csv("../Datasets/parkinsons.data")
df.head()
# Getting the dependent and independent variables from dataset
X = df.loc[:,df.columns!='status'].values[:,1:]
y = df.loc[:,'status'].values
print(X)
print(y)
# Heatmap visulisation for each attribute coefficient correlation.
import seaborn as sb
corr_map=df.corr()
sb.heatmap(corr_map,square=True)
# Counting the zeros and ones in status
print(y[y==1].shape[0])
print(y[y==0].shape[0])
# +
# Splitting the dataset into Training and Testing sets
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=1)
# -
# Feature Scaling using MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
mn = MinMaxScaler()
X_train = mn.fit_transform(X_train)
X_test = mn.transform(X_test)
# Using XGBoost Classifier to train the model
from xgboost import XGBClassifier
classifier = XGBClassifier()
classifier.fit(X_train,y_train)
# Making Confusion Matrix
from sklearn.metrics import confusion_matrix , accuracy_score
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test,y_pred)
print(cm)
print(accuracy_score(y_test,y_pred)*100)
# Creating a pickle file
import pickle
with open('parkinson_model.pkl','wb') as f:
pickle.dump(classifier,f)
| Notebooks/ParkinsonPrediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demo EEFxTMS_2F (equatorial electric field)
#
# > Authors: <NAME>
# >
# > Abstract: Access to the equatorial electric field (level 2 product).
# %load_ext watermark
# %watermark -i -v -p viresclient,pandas,xarray,matplotlib
# +
from viresclient import SwarmRequest
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
request = SwarmRequest()
# -
# ## EEFxTMS_2F product information
#
# Dayside equatorial electric field, sampled at every dayside equator crossing +- 20mins
#
#
# Documentation:
# - https://earth.esa.int/web/guest/missions/esa-eo-missions/swarm/data-handbook/level-2-product-definitions#EEFxTMS_2F
# ### Check what "EEF" data variables are available
request.available_collections("EEF", details=False)
request.available_measurements("EEF")
# ## Fetch all the EEF and EEJ values from Bravo during 2016
request.set_collection("SW_OPER_EEFBTMS_2F")
request.set_products(measurements=["EEF", "EEJ", "Flags"])
data = request.get_between(
dt.datetime(2016,1,1),
dt.datetime(2017,1,1)
)
# The first three and last three source (daily) files
data.sources[:3], data.sources[-3:]
df = data.as_dataframe()
df.head()
ax = df.plot(y="EEF", figsize=(20,10))
ax.set_ylim((-2, 2));
ax.set_ylabel("EEF [mV/m]");
# Take a look at the time jumps between entries... Nominally the product should produce one measurement "every dayside equator crossing ±20 minutes"
times = df.index
delta_t_minutes = [t.seconds/60 for t in np.diff(times.to_pydatetime())]
print("Range of time gaps (in minutes) between successive measurements:")
np.unique(np.sort(delta_t_minutes))
# ## Access the EEJ estimate via xarray instead of pandas
#
# Since the EEJ estimate has both time and latitude dimensions, it is not suited to pandas. Here we load the data as a `xarray.Dataset` which better handles n-dimensional data.
ds = data.as_xarray()
ds
# Let's select a subset (one month) and visualise it:
# +
_ds = ds.sel({"Timestamp": "2016-01"})
fig, ax1 = plt.subplots(nrows=1, figsize=(15,3), sharex=True)
_ds.plot.scatter(x="Timestamp", y="EEJ_QDLat", hue="EEJ", vmax=10, s=1, ax=ax1)
| notebooks/03f__Demo-EEFxTMS_2F.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.10 64-bit
# language: python
# name: python3
# ---
import pandas as pd
data = pd.read_excel('p2data.xlsx',sheet_name='Sheet1')
data
empty = data[data.Screen!=0].index
for i in empty:
data.drop(i,inplace=True)
data
data = data.reset_index(drop=True)
data
# Analysis 1:
data['Intelligence'].corr(data['Performance'])
data['Integrity'].corr(data['CWB'])
data['Con'].corr(data['Quit'])
# Analysis 2:
# a. Ability data (Aptitude or Intelligence)
print(data['Aptitude'].corr(data['Performance']))
print(data['Intelligence'].corr(data['Performance']))
# b. Biodata (GPA, Interview, or Work Experience)
print(data['GPA'].corr(data['Performance']))
print(data['Integrity'].corr(data['Performance']))
print(data['Experience'].corr(data['Performance']))
# c. Demographic data (Age or Sex)
print(data['Age'].corr(data['Performance']))
print(data['Sex'].corr(data['Performance']))
# d. Personality data (Conscientiousness or Integrity)
print(data['Con'].corr(data['Performance']))
print(data['Integrity'].corr(data['Performance']))
# Multiregression: Aptitude, GPA, Age, Conscientiousness
from sklearn import linear_model
# +
X = data[['Aptitude','Experience','Age','Con']]
y = data['Performance']
regr = linear_model.LinearRegression()
regr.fit(X,y)
regr.score(X,y)
# -
from sklearn.metrics import mean_squared_error, r2_score
y_pred = regr.predict(X)
print("Coefficients: \n", regr.coef_)
# The mean squared error
print("Mean squared error: %.4f" % mean_squared_error(y, y_pred))
# The coefficient of determination: 1 is perfect prediction
print("Coefficient of determination: %.4f" % r2_score(y, y_pred))
# Decision Tree
from sklearn import tree
clf = tree.DecisionTreeRegressor()
clf = clf.fit(X,y)
clf.score(X,y)
text_representation = tree.export_text(clf)
print(text_representation)
| DataProject/analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Performing unit conversions
#
# Conversion of timeseries data units is one of the most tedious aspects of modelling and scenario analysis -
# and it is a frequent source for errors!
#
# The **pyam** function [convert_unit()](https://pyam-iamc.readthedocs.io/en/stable/api/iamdataframe.html#pyam.IamDataFrame.convert_unit) can support and simplify this task.
# The function uses the Python package [pint](https://pint.readthedocs.io),
# which natively handles conversion of standard (SI) units and commonly used equivalents
# (e.g., exajoule to terawatt-hours, `EJ -> TWh`).
# The **pint** package can also parse combined units (e.g., exajoule per year, `EJ/yr`).
#
# To better support common use cases when working with energy systems analysis and integrated-assessment scenarios,
# the default [pint.UnitRegistry](https://pint.readthedocs.io/en/stable/developers_reference.html#pint.UnitRegistry)
# used by **pyam** loads the unit definitions collected at [IAMconsortium/units](https://github.com/IAMconsortium/units).
# This repository provides a wide range of conversion factors in a **pint**-compatible format
# so that they can easily be used across multiple applications (**pyam** is just one of them).
#
# <div class="alert alert-info">
#
# If you have suggestions for additional units to be handled in **pyam** by default,
# please [start an issue](https://github.com/IAMconsortium/units/issues) in the units repository -
# or make a pull request!
#
# </div>
#
# ## Overview
#
# This notebook illustrates the following features:
#
# 0. Define timeseries data and initialize an **IamDataFrame**
# 1. Use the default **pint** unit conversion
# 2. Use a unit & conversion factor from the [IAMconsortium/units](https://github.com/IAMconsortium/units) repository
# 3. Use a custom conversion factor
# 4. Use contexts to specify conversion metrics
# 5. More advanced use cases with a unit registry
import pandas as pd
import pyam
# ## 0. Define timeseries data and initialize an IamDataFrame
#
# This tutorial uses a scenario similar to the data in the **first-steps tutorial** (here on
# [GitHub](https://github.com/IAMconsortium/pyam/blob/master/doc/source/tutorials/pyam_first_steps.ipynb)
# and on [read the docs](https://pyam-iamc.readthedocs.io/en/stable/tutorials/pyam_first_steps.html)).
# Please read that tutorial for the reference and further information.
# +
UNIT_DF = pd.DataFrame([
['MESSAGEix-GLOBIOM 1.0', 'CD-LINKS_NPi', 'World', 'Primary Energy', 'EJ/yr', 500.74, 636.79, 809.93, 1284.78],
['MESSAGEix-GLOBIOM 1.0', 'CD-LINKS_NPi', 'World', 'Emissions|CH4', 'Mt CH4/yr', 327.92, 354.35, 377.88, 403.98],
],
columns=pyam.IAMC_IDX + [2010, 2030, 2050, 2100],
)
df = pyam.IamDataFrame(UNIT_DF)
df.timeseries()
# -
# ## 1. Use the default pint unit conversion
#
# As a first step, we illustrate unit conversion between "standard formats",
# i.e., units that **pint** knows by default.
#
# In this particular case, we convert exajoule to petawatthours, `EJ/yr -> PWh/yr`.
# Note that the timeseries data for other units (CO2 emissions in this case) are not changed.
df.convert_unit('EJ/yr', to='PWh/yr').timeseries()
# The **pint** package usually does a good job at parsing orders of magnitude (peta, giga, mega, milli, ...)
# and their abbreviations (`P`, `G`, `M`, `m`, ...)
# as well as common units (centimeter, inch, kilometer, mile).
# It also handles combined units like exajoule per year with various spellings:
# `PWh/yr`, `PWh / yr` and `petawatthour / year` will all be treated as synomyms by the conversion.
# The only difference is the format in the resulting **IamDataFrame**.
#
# [Read the docs](https://pint.readthedocs.io) for more information!
df.convert_unit('EJ/yr', to='petawatthour / year').timeseries()
# ## 2. Use a conversion factor from the shared energy & IAM units repository
#
# The **pint** package includes standard units, but many units often encountered in the context of energy systems analysis and integrated assessment scenarios are not defined by default.
#
# Therefore, the [IAMconsortium/units](https://github.com/IAMconsortium/units) repository
# provides a common location to define such units.
# The **pyam** package loads these definitions and uses them by default in any unit conversion.
#
# One entry defined there is 'tons of coal equivalent' (`tce`) as a measure of energy (content).
# This is used in the next cell.
df.convert_unit('EJ/yr', to='Gtce/yr').timeseries()
# ## 3. Use a custom conversion factor
#
# In some cases, a user needs to specify a custom unit.
# The `convert_unit()` function supports that by specifying a `factor` as a keyword argument.
df.convert_unit('EJ/yr', to='my_unit', factor=2.3).timeseries()
# ## 4. Use contexts to specify conversion metrics
#
# There are unit conversions where no "default" factor exists.
# One such case is calculating the CO2-equivalent of CH4 emissions (or other greenhouse gases),
# because the conversion depends on the species' "global warming potential"
# and estimates for that potential are updated regularly in the literature.
#
# To facilitate such use cases, **pint** provides "contexts" to allow specifying the appropriate metric.
# The [IAMconsortium/units](https://github.com/IAMconsortium/units) parametrizes multiple contexts
# for the conversion of greenhouse gases;
# see the [emissions module](https://github.com/IAMconsortium/units/blob/master/modules/emissions) for details.
#
# Performing a unit conversion with context is illustrated below using the IPCC AR5-GWP100 factor;
# in this situation, not specifying a context would result in a **pint.DimensionalityError**.
df.convert_unit('Mt CH4/yr', to='Mt CO2e/yr', context='AR5GWP100').timeseries()
# <div class="alert alert-info">
#
# When working with contexts, it is important to track the information which metric was used.
# This can be done either in the metadata of the resulting data (file)
# or directly in the unit (or variable) of the timeseries.
# See an illustration below for a simple workflow.
#
# </div>
gwp = 'AR5GWP100'
target = 'Mt CO2e/yr'
(
df.convert_unit('Mt CH4/yr', to=target, context=gwp)
.rename(unit={target: f'{target} ({gwp})'})
.timeseries()
)
# ## 5. More advanced use cases with a unit registry
#
# For more advanced use cases, **pyam** supports two further features: first, it can sometimes be useful
# to work with the **UnitRegistry** used by default directly. This registry can be accessed
# via [pint.get_application_registry()](https://pint.readthedocs.io/en/latest/developers_reference.html#pint.get_application_registry).
import pint
pint.get_application_registry()
# In other use cases, it can be helpful to use one (or several) specific registries
# instead of the default application registry.
# The `convert_unit()` function therefore allows passing a `registry` as a keyword argument.
#
# The specifications below are the same as the example in section 3.
# +
ureg = pint.UnitRegistry()
ureg.define('my_unit = 1 / 2.3 * EJ/yr')
df.convert_unit('EJ/yr', to='my_unit', registry=ureg).timeseries()
| doc/source/tutorials/unit_conversion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as pp
import seaborn
# %matplotlib inline
import url.request
urllib.request.urlretrieve('ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/ghcnd-stations.txt','stations.txt')
open('stations.txt','r').readlines()[:10]
# +
stations = {}
for line in open('stations.txt','r'):
if 'GSN' in line:
fields = line.split()
stations[fields[0]] = ' '.join(fields[4:])
# -
len(stations)
def findstation(s):
found = {code: name for code,name in stations.items() if s in name}
print(found)
findstation('New York')
findstation('LIHUE')
findstation('SAN DIEGO')
findstation('IRKUTSK')
findstation('MINNEAPOLIS')
datastations = ['USW00022536','USW00023188','USW00014922','RSM00030710']
open('readme.txt','r').readlines()[:10]
open('ghcnd-stations.txt','r').readlines()[:10]
open('USW00022536.dly','r').readlines()[:10]
open('readme.txt','r').readlines()[:10]
def parsefile(filename):
return np.genfromtxt(filename,
delimiter = dly_delimiter,
usecols = dly_usecols,
dtype = dly_dtype,
names = dly_names)
dly_delimiter = [11,4,4,2] + [5,1,1,1] * 31
dly_usecols = [1,2,3] + [4*i for i in range(1,32)]
dly_dtype = [np.int32,np.int32,(np.str_,4)] + [np.int32] * 31
dly_names = ['year','month','obs'] + [str(day) for day in range(1,31+1)]
lihue = parsefile('USW00022536.dly')
lihue
def unroll(record):
startdate = np.datetime64(' {}-{:02}'.format(record['year'],record['month']))
dates = np.arange(startdate,startdate + np.timedelta64(1,'M'), np.timedelta64(1,'D'))
rows = [(date,record[str(i+1)]/10) for i,date in enumerate(dates)]
return np.array(rows,dtype=[('date','M8[D]'),('value','d')])
unroll(lihue[0])
sanDiego = parsefile('USW00023188.dly')
sanDiego
unroll(sanDiego[0])
| weatherData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Backends
# The default backend is always [pyopencl](https://documen.tician.de/pyopencl/) based.
# +
import numpy as np
import pyclesperanto_prototype as cle
cle.get_device()
# -
array = cle.push(np.asarray([[1, 2, 3]]))
cle.add_image_and_scalar(array, scalar = 5)
# # cupy backend
# One can switch to an experimental [cupy](https://cupy.dev) backend like this:
cle.select_device("cupy")
array = cle.push(np.asarray([[1, 2, 3]]))
cle.add_image_and_scalar(array, scalar = 5)
| demo/basics/select_backend.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 05 - PCCA and TPT analysis
#
# <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons Licence" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" title='This work is licensed under a Creative Commons Attribution 4.0 International License.' align="right"/></a>
#
# In this notebook, we will cover how to use PCCA++ to extract a coarse representation of the MSM.
# We will further investigate how to use transition path theory (TPT) to follow the pathways of the processes.
# When we want to analyze pathways, models with fewer states are more often desirable, since these are easier to understand.
# PCCA++ allows us to assign the microstates directly to metastable macrostates and TPT uses this group assignment to compute fluxes and pathways.
#
# Another method to get a model with fewer states are hidden Markov state models (HMM),
# introduced in [Notebook 07 ➜ 📓](07-hidden-markov-state-models.ipynb).
# In contrast to computing memberships of microstates to meta stable sets as in PCCA++,
# in HMMs we directly obtain a model with fewer states.
#
# While we will mostly rely on previously estimated/validated models, it will be helpful to understand the topics
# - data loading/visualization ([Notebook 01 ➜ 📓](01-data-io-and-featurization.ipynb))
# - dimension reduction ([Notebook 02 ➜ 📓](02-dimension-reduction-and-discretization.ipynb))
# - the estimation and validation process ([Notebook 03 ➜ 📓](03-msm-estimation-and-validation.ipynb))
#
# Here you can find literature on the used methods:
# - <a id="ref-1" href="#cite-pcca_plus_plus">roeblitz-weber-14</a>
# - <a id="ref-2" href="#cite-weinan-tpt">weinan-06</a>
# - <a id="ref-3" href="#cite-metzner-msm-tpt">metzner-09</a>
#
# Maintainers: [@cwehmeyer](https://github.com/cwehmeyer), [@marscher](https://github.com/marscher), [@thempel](https://github.com/thempel), [@psolsson](https://github.com/psolsson)
#
# **Remember**:
# - to run the currently highlighted cell, hold <kbd>⇧ Shift</kbd> and press <kbd>⏎ Enter</kbd>;
# - to get help for a specific function, place the cursor within the function's brackets, hold <kbd>⇧ Shift</kbd>, and press <kbd>⇥ Tab</kbd>;
# - you can find the full documentation at [PyEMMA.org](http://www.pyemma.org).
#
# ---
#
# ⚠️ We have assigned the integer numbers $1 \dots $ `nstates` to PCCA++ metastable states.
# As PyEMMA is written in Python, it internally indexes states starting from $0$.
# In consequence, numbers in the code cells differ by $-1$ from the plot labels and markdown text.
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import mdshare
import pyemma
# ## Case 1: preprocessed, two-dimensional data (toy model)
# We start by loading the data and the previously analyzed MSM (estimated in [Notebook 04 ➜ 📓](04-msm-analysis.ipynb)) from disk:
# +
file = mdshare.fetch('hmm-doublewell-2d-100k.npz', working_directory='data')
with np.load(file) as fh:
data = fh['trajectory']
msm = pyemma.load('nb4.pyemma', model_name='doublewell_msm')
bayesian_msm = pyemma.load('nb4.pyemma', model_name='doublewell_bayesian_msm')
cluster = pyemma.load('nb4.pyemma', model_name='doublewell_cluster')
# -
# We currently have an MSM with $50$ discrete states which was validated for two metastable states in the previous notebook.
# Internally, the metastable states have been computed using the Perron Cluster Cluster Analysis (PCCA++) method <a id="ref-4" href="#cite-pcca_plus_plus">roeblitz-14</a>.
# Let's analyze this in more detail here.
# We can explicitly compute it by calling `msm.pcca()`.
nstates = 2
msm.pcca(nstates)
# PCCA++ computes membership distributions, i.e., probabilities of micro-states to belong to the same metastable state.
# It does so by using the properties of slow processes in eigenvector space.
# Let us visualize the membership distributions in the same fashion as before:
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
for i, ax in enumerate(axes.flat):
pyemma.plots.plot_contour(
*data.T, msm.metastable_distributions[i][cluster.dtrajs[0]], ax=ax, cmap='afmhot_r',
mask=True, method='nearest', cbar_label='metastable distribution {}'.format(i + 1))
ax.scatter(*cluster.clustercenters.T, s=15, c='k')
ax.set_xlabel('$x$')
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
ax.set_aspect('equal')
axes[0].set_ylabel('$y$')
fig.tight_layout()
# As expected, PCCA++ has assigned metastable states to the basins of the double well.
# Since PCCA++, in simplified words, does a clustering in eigenvector space and the first eigenvector separated these states already, the nice separation comes to no surprise.
#
# It is important to note, though, that PCCA++ in general does not yield a coarse transition matrix.
# How to obtain this will be covered in [Notebook 07 ➜ 📓](07-hidden-markov-state-models.ipynb).
# However, we can compute mean first passage times (MFPTs) and equilibrium probabilities on the metastable sets and extract representative structures.
#
# The stationary probability of metastable states can simply be computed by summing over all of its micro-states
# (please note that the PCCA++ object returned by `msm.pcca()` also has a convenience function to do that.)
for i, s in enumerate(msm.metastable_sets):
print('π_{} = {:f}'.format(i + 1, msm.pi[s].sum()))
# We use the `mfpt()` method of the original MSM object to compute MFPTs between pairs of metastable sets
# (accessible via the `metastable_sets` attribute of the MSM object).
# +
mfpt = np.zeros((nstates, nstates))
for i in range(nstates):
for j in range(nstates):
mfpt[i, j] = msm.mfpt(
msm.metastable_sets[i],
msm.metastable_sets[j])
from pandas import DataFrame
print('MFPT / steps:')
DataFrame(np.round(mfpt, decimals=2), index=range(1, nstates + 1), columns=range(1, nstates + 1))
# -
# As described above, the errors can be estimated from the Bayesian MSM.
# Instead of just printing means and confidence intervals, let's compute the samples explicitly and histogram them.
# +
mfpt_sample = np.zeros((nstates, nstates, bayesian_msm.nsamples))
for i in range(nstates):
for j in range(nstates):
mfpt_sample[i, j] = bayesian_msm.sample_f(
'mfpt',
msm.metastable_sets[i],
msm.metastable_sets[j])
fig, ax = plt.subplots()
ax.hist(mfpt_sample[0, 1], histtype='step', label='MS 1 -> MS 2', density=True)
ax.hist(mfpt_sample[1, 0], histtype='step', label='MS 2 -> MS 1', density=True)
ax.set_xlabel('MFPT (steps)')
ax.set_title('Bayesian MFPT sample histograms')
fig.legend(loc=10);
# -
# We clearly see that there is no overlap of the distributions approximated by the Bayesian MSM.
#
# To do a more detailed analysis of the transition paths, we make use of transition path theory (TPT) in its MSM formulation.
# We first analyze the flux between the two metastable sets:
A = msm.metastable_sets[0]
B = msm.metastable_sets[1]
flux = pyemma.msm.tpt(msm, A, B)
# In TPT, many properties are derived from the committor functions.
# They describe the probability of reaching a set $A$ before set $B$ as a function of some state $x$.
# In order to understand this, we plot the committor of the previously defined sets as a function of the cluster centers:
fig, ax = plt.subplots(figsize=(5, 4))
pyemma.plots.plot_contour(
*data.T,
flux.committor[cluster.dtrajs[0]],
ax=ax,
cmap='brg',
mask=True,
cbar_label=r'committor A $\to$ B')
ax.set_xlabel('$x$')
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
ax.set_aspect('equal')
ax.set_ylabel('$y$')
fig.tight_layout()
# We see that the committor for the double well data approximates a step function between the two basins.
# In other words, the probability of transitioning from metastable state $A$ to $B$ is only $1$ if we already are in state $B$.
# If we are in $A$, it is $0$ by definition.
# The clustering did not resolve the transition region, so this particular example does not provide more information.
# In the next example we will see more.
#
# ## Case 2: low-dimensional molecular dynamics data (alanine dipeptide)
#
# Again, we load the model that we have estimated previously.
# +
pdb = mdshare.fetch('alanine-dipeptide-nowater.pdb', working_directory='data')
files = mdshare.fetch('alanine-dipeptide-*-250ns-nowater.xtc', working_directory='data')
feat = pyemma.coordinates.featurizer(pdb)
feat.add_backbone_torsions(periodic=False)
data = pyemma.coordinates.load(files, features=feat)
data_concatenated = np.concatenate(data)
msm = pyemma.load('nb4.pyemma', model_name='ala2_msm')
bayesian_msm = pyemma.load('nb4.pyemma', model_name='ala2_bayesian_msm')
cluster = pyemma.load('nb4.pyemma', model_name='ala2_cluster')
# not to be used in MSM estimation (artificical transitions between individual trajectories)!
dtrajs_concatenated = np.concatenate(cluster.dtrajs)
# -
# In the previous [Notebook 04 ➜ 📓](04-msm-analysis.ipynb), we saw that four metastable states are a reasonable choice for our MSM.
# We, thus, perform PCCA++ with this number of states for further analysis and print out the stationary probabilities of the metastable sets:
nstates = 4
msm.pcca(nstates)
for i, s in enumerate(msm.metastable_sets):
print('π_{} = {:f}'.format(i + 1, msm.pi[s].sum()))
# We visualize the metastable memberships:
fig, axes = plt.subplots(1, 4, figsize=(15, 3))
for i, ax in enumerate(axes.flat):
pyemma.plots.plot_contour(
*data_concatenated.T,
msm.metastable_distributions[i][dtrajs_concatenated],
ax=ax,
cmap='afmhot_r',
mask=True,
cbar_label='metastable distribution {}'.format(i + 1))
ax.set_xlabel('$\Phi$')
axes[0].set_ylabel('$\Psi$')
fig.tight_layout()
# PCCA++ nicely separates the high-density regions and we find that each of the basins was assigned a metastable set.
# This indicates that our projection indeed describes the slow dynamics.
#
# We concatenate all three discrete trajectories and obtain a single trajectory of metastable states which we use to visualize the metastable state memberships of all datapoints.
# We further compute the state with the highest membership to a PCCA metastable state to plot a state label there.
#
# ⚠️ Please remember that the concatenated discrete trajectories (dtrajs) are not meant to be used for MSM estimation (artificial transitions), but only for visualization and indexing purposes!
metastable_traj = msm.metastable_assignments[dtrajs_concatenated]
highest_membership = msm.metastable_distributions.argmax(1)
coarse_state_centers = cluster.clustercenters[msm.active_set[highest_membership]]
# Now, we use the `mfpt()` method of the MSM object to compute MFPTs between pairs of metastable sets and compute the inverse MFPTs for visualization purposes:
# +
mfpt = np.zeros((nstates, nstates))
for i in range(nstates):
for j in range(nstates):
mfpt[i, j] = msm.mfpt(
msm.metastable_sets[i],
msm.metastable_sets[j])
inverse_mfpt = np.zeros_like(mfpt)
nz = mfpt.nonzero()
inverse_mfpt[nz] = 1.0 / mfpt[nz]
# -
# We visualize our model in backbone torsion space:
# +
fig, ax = plt.subplots(figsize=(10, 7))
_, _, misc = pyemma.plots.plot_state_map(
*data_concatenated.T, metastable_traj, ax=ax, zorder=-1)
misc['cbar'].set_ticklabels(range(1, nstates + 1)) # set state numbers 1 ... nstates
pyemma.plots.plot_network(
inverse_mfpt,
pos=coarse_state_centers,
figpadding=0,
arrow_label_format='%.1f ps',
arrow_labels=mfpt,
size=12,
show_frame=True,
ax=ax)
ax.set_xlabel('$\Phi$')
ax.set_ylabel('$\Psi$')
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi, np.pi)
fig.tight_layout()
# -
# Have you noticed how well the metastable state coloring agrees with the eigenvector visualization of the three slowest processes?
#
# If we could afford a shorter lag time, we might even be able to resolve more processes and, thus,
# subdivide the metastable states three and four.
# We show how to do this with HMMs in [Notebook 07 ➜ 📓](07-hidden-markov-state-models.ipynb).
#
# Now we define a small function to visualize samples of metastable states with NGLView.
def visualize_metastable(samples, cmap, selection='backbone'):
""" visualize metastable states
Parameters
----------
samples: list of mdtraj.Trajectory objects
each element contains all samples for one metastable state.
cmap: matplotlib.colors.ListedColormap
color map used to visualize metastable states before.
selection: str
which part of the molecule to selection for visualization. For details have a look here:
http://mdtraj.org/latest/examples/atom-selection.html#Atom-Selection-Language
"""
import nglview
from matplotlib.colors import to_hex
widget = nglview.NGLWidget()
widget.clear_representations()
ref = samples[0]
for i, s in enumerate(samples):
s = s.superpose(ref)
s = s.atom_slice(s.top.select(selection))
comp = widget.add_trajectory(s)
comp.add_ball_and_stick()
# this has to be done in a separate loop for whatever reason...
x = np.linspace(0, 1, num=len(samples))
for i, x_ in enumerate(x):
c = to_hex(cmap(x_))
widget.update_ball_and_stick(color=c, component=i, repr_index=i)
widget.remove_cartoon(component=i)
return widget
# We now sample some representative structures and visualize these with the aid of NGLView.
# For the sake of clarity, we draw only the backbone atoms.
# Since we have obtained several samples for each metastable state, you can click the play button to iterate over all samples.
# For each iteration, the samples of all four states will be drawn.
# You can double click the molecule to show it at full screen.
# Press escape to go back.
# +
cmap = mpl.cm.get_cmap('viridis', nstates)
my_samples = [pyemma.coordinates.save_traj(files, idist, outfile=None, top=pdb)
for idist in msm.sample_by_distributions(msm.metastable_distributions, 50)]
visualize_metastable(my_samples, cmap, selection='backbone')
# -
# Coming back to TPT, we now have more than two metastable states and can expect more insights from analyzing the transition paths.
# As an example, we will focus on the committor between metastable sets $0$ and $3$ as defined above.
A = msm.metastable_sets[0]
B = msm.metastable_sets[3]
flux = pyemma.msm.tpt(msm, A, B)
# Before we go on with the visualization, let's coarse grain the flux with the metastable sets estimated with PCCA++:
cg, cgflux = flux.coarse_grain(msm.metastable_sets)
# We now show an overlay of the committor probabilities and the most likely transition path from the coarse graining TPT:
# +
fig, ax = plt.subplots(figsize=(10, 7))
pyemma.plots.plot_contour(
*data_concatenated.T,
flux.committor[dtrajs_concatenated],
cmap='brg',
ax=ax,
mask=True,
cbar_label=r'committor 1 $\to$ 4',
alpha=0.8,
zorder=-1);
pyemma.plots.plot_flux(
cgflux,
coarse_state_centers,
cgflux.stationary_distribution,
state_labels=['A','' ,'', 'B'],
ax=ax,
show_committor=False,
figpadding=0,
show_frame=True,
arrow_label_format='%2.e / ps');
ax.set_xlabel('$\Phi$')
ax.set_ylabel('$\Psi$')
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi, np.pi)
fig.tight_layout()
# -
# First, the color map shows us a region with committor probability $\approx 0.5$.
# This indicates that this particular metastable state is a transition state in the pathway from $A$ to $B$.
# Second, the `plot_flux()` function displays the most likely transition pathway along this path.
# There are other, less likely pathways included in the plot as well.
# The arrow thickness indicates the flux between the states.
#
# We can decompose the flux into these individual pathways by:
paths, path_fluxes = cgflux.pathways(fraction=0.99)
print('percentage \tpath')
print('-------------------------------------')
for i in range(len(paths)):
print(np.round(path_fluxes[i] / np.sum(path_fluxes), 3),' \t', paths[i] + 1)
# As expected, about $85\%$ of the flux goes through only one pathway.
# To get a cleaner picture, the `plot_flux()` function supports a `minflux` keyword argument that can be increased to exclude very low fluxes from the plot.
#
# #### Exercise 1
#
# Define a `featurizer` that loads the heavy atom coordinates and load the data into memory.
# Also load the TICA object from [Notebook 04 ➜ 📓](04-msm-analysis.ipynb) to transform the featurized data.
# Further, the estimated MSM, Bayesian MSM, and Cluster objects should be loaded from disk.
# + solution2="hidden" solution2_first=true
feat = #FIXME
feat. #FIXME
data = #FIXME
tica = #FIXME
tica_output = tica.transform(data)
tica_concatenated = np.concatenate(tica_output)
msm = #FIXME
bayesian_msm = #FIXME
cluster = #FIXME
dtrajs_concatenated = #FIXME
# + [markdown] solution2="hidden"
# ###### Solution
# + solution2="hidden"
feat = pyemma.coordinates.featurizer(pdb)
pairs = feat.pairs(feat.select_Heavy())
feat.add_distances(pairs, periodic=False)
data = pyemma.coordinates.load(files, features=feat)
tica = pyemma.load('nb4.pyemma', model_name='ala2tica_tica')
tica_output = tica.transform(data)
tica_concatenated = np.concatenate(tica_output)
msm = pyemma.load('nb4.pyemma', model_name='ala2tica_msm')
bayesian_msm = pyemma.load('nb4.pyemma', model_name='ala2tica_bayesian_msm')
cluster = pyemma.load('nb4.pyemma', model_name='ala2tica_cluster')
dtrajs_concatenated = np.concatenate(cluster.dtrajs)
# -
# #### Exercise 2
#
# Do a PCCA++ analysis of the MSM with four metastable states,
# compute the probability of the metastable sets, and visualize the metastable state memberships.
# + solution2="hidden" solution2_first=true
nstates = 4
#FIXME (PCCA)
for i, s in enumerate(msm.metastable_sets):
print('π_{} = {:f}'.format(i + 1, )) #FIXME
fig, axes = plt.subplots(1, 4, figsize=(15, 3))
for i, ax in enumerate(axes.flat):
pyemma.plots.plot_contour(
*tica_concatenated.T,
msm.metastable_distributions[i][dtrajs_concatenated],
ax=ax,
cmap='afmhot_r',
mask=True,
cbar_label='metastable distribution {}'.format(i + 1))
ax.set_xlabel('IC 1')
axes[0].set_ylabel('IC 2')
fig.tight_layout()
# + [markdown] solution2="hidden"
# ###### Solution
# + solution2="hidden"
nstates = 4
msm.pcca(nstates)
for i, s in enumerate(msm.metastable_sets):
print('π_{} = {:f}'.format(i + 1, msm.pi[s].sum()))
fig, axes = plt.subplots(1, 4, figsize=(15, 3))
for i, ax in enumerate(axes.flat):
pyemma.plots.plot_contour(
*tica_concatenated.T,
msm.metastable_distributions[i][dtrajs_concatenated],
ax=ax,
cmap='afmhot_r',
mask=True,
cbar_label='metastable distribution {}'.format(i + 1))
ax.set_xlabel('IC 1')
axes[0].set_ylabel('IC 2')
fig.tight_layout()
# -
# Did you guess the metastable states correctly?
#
# Note the similarities between the MSM built from the backbone torsions and the MSM built from the TICA projection of heavy atom distances.
# Even though we started from different features, both models found the same kinetic information in the data.
#
# #### Exercise 3
#
# Compute the pairwise MFPTs and transition rates, and visualize the resulting kinetic network.
# + solution2="hidden" solution2_first=true
mfpt = np.zeros((nstates, nstates))
for i in range(nstates):
for j in range(nstates):
mfpt[i, j] = #FIXME
inverse_mfpt = np.zeros_like(mfpt)
nz = mfpt.nonzero()
inverse_mfpt[nz] = 1.0 / mfpt[nz]
pyemma.plots.plot_network(
inverse_mfpt,
pos=np.asarray([[0, 0], [4, 0], [2, 4], [6, 4]]),
arrow_label_format='%.1f ps',
arrow_labels=mfpt,
arrow_scale=3.0,
state_labels=range(1, nstates + 1),
size=12);
# + [markdown] solution2="hidden"
# ###### Solution
# + solution2="hidden"
mfpt = np.zeros((nstates, nstates))
for i in range(nstates):
for j in range(nstates):
mfpt[i, j] = msm.mfpt(
msm.metastable_sets[i],
msm.metastable_sets[j])
inverse_mfpt = np.zeros_like(mfpt)
nz = mfpt.nonzero()
inverse_mfpt[nz] = 1.0 / mfpt[nz]
pyemma.plots.plot_network(
inverse_mfpt,
pos=np.asarray([[0, 0], [4, 0], [2, 4], [6, 4]]),
arrow_label_format='%.1f ps',
arrow_labels=mfpt,
arrow_scale=3.0,
state_labels=range(1, nstates + 1),
size=12);
# -
# #### Exercise 4
# Compute the TPT object, coarse grain it onto the PCCA++ metastable sets and visualize the flux along with the committor probabilities.
# + solution2="hidden" solution2_first=true
A = msm.metastable_sets[0]
B = msm.metastable_sets[3]
flux = #FIXME
cg, cgflux = #FIXME
highest_membership = msm.metastable_distributions.argmax(1)
coarse_state_centers = cluster.clustercenters[msm.active_set[highest_membership]]
fig, ax = plt.subplots(figsize=(10, 7))
pyemma.plots.plot_contour(
*tica_concatenated.T,
flux.committor[dtrajs_concatenated],
cmap='brg',
ax=ax,
mask=True,
cbar_label=r'committor 1 $\to$ 4',
alpha=0.8,
zorder=-1)
pyemma.plots.plot_flux(
cgflux,
coarse_state_centers,
cgflux.stationary_distribution,
ax=ax,
show_committor=False,
figpadding=0.2,
state_labels=['A', '', '', 'B'],
arrow_label_format='%2.e / ps')
ax.set_aspect('equal')
ax.set_xlim(tica_concatenated[:, 0].min(), tica_concatenated[:, 0].max())
ax.set_ylim(tica_concatenated[:, 1].min(), tica_concatenated[:, 1].max())
# + [markdown] solution2="hidden"
# ###### Solution
# + solution2="hidden"
A = msm.metastable_sets[0]
B = msm.metastable_sets[3]
flux = pyemma.msm.tpt(msm, A, B)
cg, cgflux = flux.coarse_grain(msm.metastable_sets)
highest_membership = msm.metastable_distributions.argmax(1)
coarse_state_centers = cluster.clustercenters[msm.active_set[highest_membership]]
fig, ax = plt.subplots(figsize=(10, 7))
pyemma.plots.plot_contour(
*tica_concatenated.T,
flux.committor[dtrajs_concatenated],
cmap='brg',
ax=ax,
mask=True,
cbar_label=r'committor 1 $\to$ 4',
zorder=-1)
pyemma.plots.plot_flux(
cgflux,
coarse_state_centers,
cgflux.stationary_distribution,
ax=ax,
show_committor=False,
figpadding=0.2,
state_labels=['A', '', '', 'B'],
arrow_label_format='%2.e / ps')
ax.set_xlabel('IC 1')
ax.set_ylabel('IC 2')
ax.set_aspect('equal')
ax.set_xlim(tica_concatenated[:, 0].min(), tica_concatenated[:, 0].max())
ax.set_ylim(tica_concatenated[:, 1].min(), tica_concatenated[:, 1].max())
fig.tight_layout()
# -
# ## Wrapping up
# In this notebook, we have learned how to use PCCA++ using an existing MSM and how to extract kinetic information from the model.
# In detail, we have used
# - the `pcca()` method of an MSM object to find metastable states,
# - the `mfpt()` method of an MSM object to compute mean first passage times between metastable states which, in turn, are accessible via
# - the `metastable_sets` and `metastable_assignments` attributes of an MSM object.
#
# For visualizing MSMs or kinetic networks we used
# - `pyemma.plots.plot_density()`, `pyemma.plots.plot_contour()` and
# - `pyemma.plots.plot_network()`.
# ## References
#
# <a id="cite-pcca_plus_plus"/><sup><a href=#ref-1>[^]</a><a href=#ref-4>[^]</a></sup><NAME> and <NAME>. 2013. _Fuzzy spectral clustering by PCCA+: application to Markov state models and data classification_. [URL](https://doi.org/10.1007/s11634-013-0134-6)
#
# <a id="cite-weinan-tpt"/><sup><a href=#ref-2>[^]</a></sup><NAME>. and <NAME>. 2006. _Towards a Theory of Transition Paths_. [URL](https://doi.org/10.1007/s10955-005-9003-9)
#
# <a id="cite-metzner-msm-tpt"/><sup><a href=#ref-3>[^]</a></sup><NAME> and <NAME> and <NAME>. 2009. _Transition Path Theory for Markov Jump Processes_. [URL](https://doi.org/10.1137/070699500)
#
#
| notebooks/05-pcca-tpt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import os
import json
import numpy as np
experiment_dir = "../experiments/random_jsons/random_architectures_with_loss"
dir_depth = 4
filenames = [f for f in os.listdir(experiment_dir) if os.path.isfile(os.path.join(experiment_dir, f))]
data = []
for filename in filenames:
full_path = os.path.join(experiment_dir, filename)
config = json.load( open( full_path) )
if 'Results' in config:
data_element = { 'filename': filename, **config['FTP'], **config['Results'] }
else:
data_element = { 'filename': filename, **config['FTP'], 'learning_curve_train': [] }
data.append( data_element )
for e in data:
print(e['filename'], ':')
s1 = f'''{len(e['quantiles'])} quantiles available'''
print( " ", s1)
s2 = f'''{len(e['learning_curve_train'])} training sample available'''
print( " ", s2)
print( " ", e['learning_curve_train'] )
# +
scatter_data = {}
for i in range(11):
scatter_data[i] = []
for e in data:
train_sample = e['learning_curve_train']
for s in train_sample:
for i in range(11):
if s < 0.5: # Remove outliers
scatter_data[i].append( (e['quantiles'][i], s))
# Data points for each quantile
for i in range(11):
print( i, ':', len(scatter_data[i]) )
# +
import matplotlib.pyplot as plt
from scipy import stats
for i in range(1,10):
# Bivariate analysis of quantiles and losses
x = [x for (x,y) in scatter_data[i]]
y = [y for (x,y) in scatter_data[i]]
spearman = scipy.stats.spearmanr(x, y)
pearson = scipy.stats.pearsonr(x, y)
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)
print("Correlation measures: ")
print("Spearman:", spearman )
print("Pearson :", pearson )
print("R2 :", r_value)
# Plot
plt.scatter( x, y , alpha=0.2)
plt.title(f'''FPT quantile at {i*0.1} vs Loss''')
plt.xlabel("FPT quantile")
plt.ylabel("Loss")
plt.savefig(f'''FTP_quantile{i*10}.png''')
plt.show()
# -
# !pip install scipy
| ipynb/LossStatistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: deepl
# language: python
# name: deepl
# ---
# # US Energy data
#
# First, you need to [register for an API key at the EIA](https://www.eia.gov/opendata/register.php). This requires an email address.
#
# One option is to put it in a file called `secrets.py` and keep it to yourself. So, for instance, if you use GitHub, say, make sure you add that file to your `.gitignore` file. So the file might contain:
#
# eia = "c742288cf5e46cff832ace44a7d54a4fa"
#
# Then, from this notebook, you can do:
#
# from secrets import eia as key
#
# Alternatively, just copy and paste your key here:
key = " <KEY GOES HERE> "
# You'll also need the requests library. If you don't have it, you can run this cell:
import requests
# To figure out the name of the series you need, drill down here: https://www.eia.gov/opendata/qb.php
#
# For example, if we go to EIA Data Sets > Petroleum > Prices > NYMEX Futures Prices, we get this info:
#
# - API call to use: http://api.eia.gov/series/?api_key=YOUR_API_KEY_HERE&series_id=PET.RCLC1.M
# - Series name: _Cushing, OK Crude Oil Future Contract 1, Monthly_
#
# Now we have everything we need to make our own API call to this dataset:
# +
url = "http://api.eia.gov/series/"
params = {
"api_key": key,
"series_id": "PET.RCLC1.M",
}
r = requests.get(url, params=params)
# -
# +
import pandas as pd
df = pd.DataFrame(r.json()['series'][0]['data'], columns=['Date', 'Price'])
df['Date'] = pd.to_datetime(df['Date'], format='%Y%m')
df = df.set_index('Date').sort_index()
# -
# Let's look at the dataframe:
# And plot it:
# +
# %matplotlib inline
import matplotlib.pyplot as plt
# -
| notebooks/3_Energy_data_for_the_US.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lvisdd/object_detection_tutorial/blob/master/object_detection_tutorial_webrtc.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="cor8fWw3gF0W" colab_type="code" colab={}
# restart (or reset) your virtual machine
# #!kill -9 -1
# + [markdown] id="5vMoF2OaFTxi" colab_type="text"
# # [Tensorflow Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection)
# + id="cIucMKsaFXBY" colab_type="code" outputId="4ae92d63-d1db-4854-8ea0-db842641df68" colab={"base_uri": "https://localhost:8080/", "height": 136}
# !git clone https://github.com/tensorflow/models.git
# + [markdown] id="zesR5I_qFbJg" colab_type="text"
# # COCO API installation
# + id="D44-QM5cFaqX" colab_type="code" outputId="742467bb-ffb2-4be4-8d81-b1f453e2e8ef" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !git clone https://github.com/cocodataset/cocoapi.git
# %cd cocoapi/PythonAPI
# !make
# !cp -r pycocotools /content/models/research/
# + [markdown] id="ZNtknm_fFhcP" colab_type="text"
# # Protobuf Compilation
# + id="_PwXOE1wFk9I" colab_type="code" outputId="8038acd1-8077-4e9d-b670-a22da8f2192a" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %cd /content/models/research/
# !protoc object_detection/protos/*.proto --python_out=.
# + [markdown] id="iNPcWjYMFq_n" colab_type="text"
# # Add Libraries to PYTHONPATH
# + id="gc8H-ScuFpv5" colab_type="code" outputId="a604fde3-cdf4-4579-b488-490d30492aa2" colab={"base_uri": "https://localhost:8080/", "height": 714}
# %cd /content/models/research/
# %env PYTHONPATH=/env/python:/content/models/research:/content/models/research/slim:/content/models/research/object_detection
# %env
# + [markdown] id="4lFy4jC7FwYp" colab_type="text"
# # Testing the Installation
# + id="WoZSvZzpFxFo" colab_type="code" outputId="96edf84d-99eb-4d2a-b50c-25e11a6c02b0" colab={"base_uri": "https://localhost:8080/", "height": 867}
# !python object_detection/builders/model_builder_test.py
# + id="d6eeEiH4HRoH" colab_type="code" outputId="4ff650b8-34b9-4276-837c-c0a7ce929078" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %cd /content/models/research/object_detection
# + [markdown] id="d5y3UhXGOyFN" colab_type="text"
# ## [Tensorflow Object Detection API Web Service](https://github.com/webrtcHacks/tfObjWebrtc)
# + id="CkzL8IUZO34M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8253b63b-3078-4cf7-b5df-164cf5b8143a"
# %cd /content
# + id="W0YBn3ViOyYc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="08ae0b79-3327-4b3a-af6e-7fa89ec9f4ac"
# !git clone https://github.com/webrtcHacks/tfObjWebrtc.git
# + id="wpQ0csosQIEE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="51d85c4b-a4e1-4f0e-e2ce-f7281d175a5a"
# %cd tfObjWebrtc
# + id="ABOaIH5nQNDc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="5d36360a-48f2-482d-c44d-d6e9479039c3"
# !python setup.py install
# + id="2v7PVdzdQ-1-" colab_type="code" colab={}
# !sed -i.bak -e "s|tf.__version__ != '1.4.0'|tf.__version__ < '1.13.0'|g;s|\( with tf.Session(graph=detection_graph) as sess:\)|#\1\n sess = tf.Session(graph=detection_graph)|g;s|outputJson = json.dumps(\[ob.__dict__ for ob in output\])|outputJson = json.dumps(\[ob.__dict__ for ob in output\], cls = MyEncoder)|g" object_detection_api.py
# + id="-8vckapwZo0d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 425} outputId="f4186643-02cf-42e1-8cab-7208897d6d14" language="bash"
# cat <<EOF>> object_detection_api.py
# class MyEncoder(json.JSONEncoder):
# def default(self, obj):
# if isinstance(obj, np.integer):
# return int(obj)
# elif isinstance(obj, np.floating):
# return float(obj)
# elif isinstance(obj, np.ndarray):
# return obj.tolist()
# else:
# return super(MyEncoder, self).default(obj)
# EOF
# diff object_detection_api.py.bak object_detection_api.py
# + id="x_DzQJLqRU6j" colab_type="code" colab={}
# #!mv object_detection_api.py.bak object_detection_api.py
# + [markdown] id="lHfLPT0cR8E3" colab_type="text"
# ## Install ngrok
# + id="dKrHLY0jR9Wr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="0b4a4e34-9e32-4bfe-c6e6-5f36227c699b"
# !wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
# !unzip ngrok-stable-linux-amd64.zip
# + id="FcafpBedSAxz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="85cb3892-2887-4575-ab57-00ff21c27f51"
get_ipython().system_raw('./ngrok http 5000 &')
# ! curl -s http://localhost:4040/api/tunnels | python3 -c \
# "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])"
# + [markdown] id="n_aenkEHgaYs" colab_type="text"
# * https://xxxxxxxx.ngrok.io/local
# * https://xxxxxxxx.ngrok.io/video
#
# + [markdown] id="zWmaYWvfSHI8" colab_type="text"
# ## Serve Flask app and Click ngrok URL
#
# + id="PfJvromwQdRb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="164802a9-a2f3-4b40-fc1c-d758a07a4217"
# !python server.py
| object_detection_tutorial_webrtc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="K-becVUr5_Q9"
# <div align="center">
# <h1><img width="30" src="https://madewithml.com/static/images/rounded_logo.png"> <a href="https://madewithml.com/">Made With ML</a></h1>
# Applied ML · MLOps · Production
# <br>
# Join 20K+ developers in learning how to responsibly <a href="https://madewithml.com/about/">deliver value</a> with ML.
# </div>
#
# <br>
#
# <div align="center">
# <a target="_blank" href="https://newsletter.madewithml.com"><img src="https://img.shields.io/badge/Subscribe-20K-brightgreen"></a>
# <a target="_blank" href="https://github.com/GokuMohandas/MadeWithML"><img src="https://img.shields.io/github/stars/GokuMohandas/MadeWithML.svg?style=social&label=Star"></a>
# <a target="_blank" href="https://www.linkedin.com/in/goku"><img src="https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social"></a>
# <a target="_blank" href="https://twitter.com/GokuMohandas"><img src="https://img.shields.io/twitter/follow/GokuMohandas.svg?label=Follow&style=social"></a>
# <p>🔥 Among the <a href="https://github.com/topics/deep-learning" target="_blank">top ML</a> repositories on GitHub</p>
# </div>
#
# <br>
# <hr>
# + [markdown] id="eTdCMVl9YAXw"
# # Embeddings
#
# In this lesson, we will motivate the need for embeddings, which are capable of capturing the contextual, semantic and syntactic meaning in data.
# + [markdown] id="xuabAj4PYj57"
# <div align="left">
# <a target="_blank" href="https://madewithml.com/courses/ml-foundations/embeddings/"><img src="https://img.shields.io/badge/📖 Read-blog post-9cf"></a>
# <a href="https://github.com/GokuMohandas/MadeWithML/blob/main/notebooks/12_Embeddings.ipynb" role="button"><img src="https://img.shields.io/static/v1?label=&message=View%20On%20GitHub&color=586069&logo=github&labelColor=2f363d"></a>
# <a href="https://colab.research.google.com/github/GokuMohandas/MadeWithML/blob/main/notebooks/12_Embeddings.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
# </div>
# + [markdown] id="N9lh8_YvoR50"
# # Overview
#
# + [markdown] id="Dtu7IU66obsh"
# While one-hot encoding allows us to preserve the structural information, it does poses two major disadvantages.
#
# - linearly dependent on the number of unique tokens in our vocabulary, which is a problem if we're dealing with a large corpus.
# - representation for each token does not preserve any relationship with respect to other tokens.
#
# In this notebook, we're going to motivate the need for embeddings and how they address all the shortcomings of one-hot encoding. The main idea of embeddings is to have fixed length representations for the tokens in a text regardless of the number of tokens in the vocabulary. With one-hot encoding, each token is represented by an array of size [1 X `vocab_size`], but with embeddings, each token now has the shape [1 X `embed_dim`]. The values in the representation will are not fixed binary values but rather, changing floating points allowing for fine-grained learned representations.
#
# * **Objective:** Represent tokens in text that capture the intrinsic semantic relationships.
# * **Advantages:**
# * Low-dimensionality while capturing relationships.
# * Interpretable token representations
# * **Disadvantages:** Can be computationally intensive to precompute.
# * **Miscellaneous:** There are lot's of pretrained embeddings to choose from but you can also train your own from scratch.
#
#
#
# + [markdown] id="nH_O4MZ294jk"
# # Learning Embeddings
# + [markdown] id="F47IiPgUupAk"
# We can learn embeddings by creating our models in PyTorch but first, we're going to use a library that specializes in embeddings and topic modeling called [Gensim](https://radimrehurek.com/gensim/).
# + id="_pZljlaCgG6Y" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249517914, "user_tz": 420, "elapsed": 5623, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="0a90af35-f470-4d30-b9b4-de7fd847f632"
import nltk
nltk.download('punkt');
import numpy as np
import re
import urllib
# + id="oektJd55gG1p"
SEED = 1234
# + id="tqbnugiD-SW0"
# Set seed for reproducibility
np.random.seed(SEED)
# + id="vF5D_nNjlx2d" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249518769, "user_tz": 420, "elapsed": 6439, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="d72e77e6-486f-4177-a92c-3b1884fb0539"
# Split text into sentences
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
book = urllib.request.urlopen(url="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/harrypotter.txt")
sentences = tokenizer.tokenize(str(book.read()))
print (f"{len(sentences)} sentences")
# + id="xWyxaKfJOomF"
def preprocess(text):
"""Conditional preprocessing on our text."""
# Lower
text = text.lower()
# Spacing and filters
text = re.sub(r"([-;;.,!?<=>])", r" \1 ", text)
text = re.sub('[^A-Za-z0-9]+', ' ', text) # remove non alphanumeric chars
text = re.sub(' +', ' ', text) # remove multiple spaces
text = text.strip()
# Separate into word tokens
text = text.split(" ")
return text
# + id="NsZz5jfMlx0d" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249518770, "user_tz": 420, "elapsed": 6411, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="2005b37e-9d95-435c-f3cd-fdf1b451da66"
# Preprocess sentences
print (sentences[11])
sentences = [preprocess(sentence) for sentence in sentences]
print (sentences[11])
# + [markdown] id="rozFTf06ji1b"
# But how do we learn the embeddings the first place? The intuition behind embeddings is that the definition of a token depends on the token itself but on its context. There are several different ways of doing this:
#
# 1. Given the word in the context, predict the target word (CBOW - continuous bag of words).
# 2. Given the target word, predict the context word (skip-gram).
# 3. Given a sequence of words, predict the next word (LM - language modeling).
#
# All of these approaches involve create data to train our model on. Every word in a sentence becomes the target word and the context words are determines by a window. In the image below (skip-gram), the window size is 2 (2 words to the left and right of the target word). We repeat this for every sentence in our corpus and this results in our training data for the unsupervised task. This in an unsupervised learning technique since we don't have official labels for contexts. The idea is that similar target words will appear with similar contexts and we can learn this relationship by repeatedly training our mode with (context, target) pairs.
#
# <div align="left">
# <img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/basics/python/skipgram.png" width="600">
# </div>
#
# We can learn embeddings using any of these approaches above and some work better than others. You can inspect the learned embeddings but the best way to choose an approach is to empirically validate the performance on a supervised task.
# + [markdown] id="No6c943C-P7o"
# ## Word2Vec
# + [markdown] id="VeszvcMOji4u"
# When we have large vocabularies to learn embeddings for, things can get complex very quickly. Recall that the backpropagation with softmax updates both the correct and incorrect class weights. This becomes a massive computation for every backwas pass we do so a workaround is to use [negative sampling](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/) which only updates the correct class and a few arbitrary incorrect classes (`NEGATIVE_SAMPLING`=20). We're able to do this because of the large amount of training data where we'll see the same word as the target class multiple times.
#
#
# + id="TqKCr--k-f9e"
import gensim
from gensim.models import KeyedVectors
from gensim.models import Word2Vec
# + id="ufU-9l_W-QKj"
EMBEDDING_DIM = 100
WINDOW = 5
MIN_COUNT = 3 # Ignores all words with total frequency lower than this
SKIP_GRAM = 1 # 0 = CBOW
NEGATIVE_SAMPLING = 20
# + id="Ha3I2oSsmhJa" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249530492, "user_tz": 420, "elapsed": 18082, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="9b3e1430-3430-438c-be18-6112815397aa"
# Super fast because of optimized C code under the hood
w2v = Word2Vec(
sentences=sentences, size=EMBEDDING_DIM,
window=WINDOW, min_count=MIN_COUNT,
sg=SKIP_GRAM, negative=NEGATIVE_SAMPLING)
print (w2v)
# + id="Cl6oJv8jmhHE" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249530493, "user_tz": 420, "elapsed": 18061, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="a72c6cee-8dd7-42ee-f233-7441369105d7"
# Vector for each word
w2v.wv.get_vector("potter")
# + id="DyuLX9DTnLvM" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249530494, "user_tz": 420, "elapsed": 18041, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="2e8d31cf-d7cb-4d63-957b-cbfaf6341fa4"
# Get nearest neighbors (excluding itself)
w2v.wv.most_similar(positive="scar", topn=5)
# + id="YT7B0KRVTFew"
# Saving and loading
w2v.wv.save_word2vec_format('model.bin', binary=True)
w2v = KeyedVectors.load_word2vec_format('model.bin', binary=True)
# + [markdown] id="JZXVP5vfuiD5"
# ## FastText
# + [markdown] id="uvuoeWYMuqsa"
# What happen's when a word doesn't exist in our vocabulary? We could assign an UNK token which is used for all OOV (out of vocabulary) words or we could use [FastText](https://radimrehurek.com/gensim/models/fasttext.html), which uses character-level n-grams to embed a word. This helps embed rare words, misspelled words, and also words that don't exist in our corpus but are similar to words in our corpus.
# + id="fVg3PBeD-kAa"
from gensim.models import FastText
# + id="eTNW4Mfgrpo0" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249545432, "user_tz": 420, "elapsed": 32935, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="d7aa13b8-5beb-48dc-f661-d841edde385a"
# Super fast because of optimized C code under the hood
ft = FastText(sentences=sentences, size=EMBEDDING_DIM,
window=WINDOW, min_count=MIN_COUNT,
sg=SKIP_GRAM, negative=NEGATIVE_SAMPLING)
print (ft)
# + id="LbA4vU5uxiw3"
# This word doesn't exist so the word2vec model will error out
# w2v.wv.most_similar(positive="scarring", topn=5)
# + id="eRG30aE4sMjt" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249545434, "user_tz": 420, "elapsed": 32903, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="65bdc527-c62e-4da7-c3c1-5136695855ec"
# FastText will use n-grams to embed an OOV word
ft.wv.most_similar(positive="scarring", topn=5)
# + id="7SE5fPMUnLyP"
# Save and loading
ft.wv.save('model.bin')
ft = KeyedVectors.load('model.bin')
# + [markdown] id="67UmjtK0pF9X"
# # Pretrained embeddings
# + [markdown] id="Xm1GPn4spF6x"
# We can learn embeddings from scratch using one of the approaches above but we can also leverage pretrained embeddings that have been trained on millions of documents. Popular ones include [Word2Vec](https://www.tensorflow.org/tutorials/text/word2vec) (skip-gram) or [GloVe](https://nlp.stanford.edu/projects/glove/) (global word-word co-occurrence). We can validate that these embeddings captured meaningful semantic relationships by confirming them.
# + id="Hh42Mb4lLbuB"
from gensim.scripts.glove2word2vec import glove2word2vec
from io import BytesIO
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from urllib.request import urlopen
from zipfile import ZipFile
# + id="m9gxHJA9M8hK"
# Arguments
EMBEDDING_DIM = 100
# + id="ANfQHxGrMKTe"
def plot_embeddings(words, embeddings, pca_results):
for word in words:
index = embeddings.index2word.index(word)
plt.scatter(pca_results[index, 0], pca_results[index, 1])
plt.annotate(word, xy=(pca_results[index, 0], pca_results[index, 1]))
plt.show()
# + id="ZW9Qtkz3LfdY" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250237848, "user_tz": 420, "elapsed": 427321, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="d6953cc6-b41a-453f-c632-a234776e075b"
# Unzip the file (may take ~3-5 minutes)
resp = urlopen('http://nlp.stanford.edu/data/glove.6B.zip')
zipfile = ZipFile(BytesIO(resp.read()))
zipfile.namelist()
# + id="bWnVBrOaLjIC" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1608250240804, "user_tz": 420, "elapsed": 430134, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="093636f1-3952-4328-e7eb-1b308b62cc9c"
# Write embeddings to file
embeddings_file = 'glove.6B.{0}d.txt'.format(EMBEDDING_DIM)
zipfile.extract(embeddings_file)
# + id="qFLyIqIxrUIs" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250240806, "user_tz": 420, "elapsed": 429968, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="bc8a3ead-34e6-46c0-b5f7-6471cf523acd"
# Preview of the GloVe embeddings file
with open(embeddings_file, 'r') as fp:
line = next(fp)
values = line.split()
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
print (f"word: {word}")
print (f"embedding:\n{embedding}")
print (f"embedding dim: {len(embedding)}")
# + id="9eD5doqFLjFY" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250242031, "user_tz": 420, "elapsed": 430805, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="59310cfa-e600-4ce5-ddca-28b308591241"
# Save GloVe embeddings to local directory in word2vec format
word2vec_output_file = '{0}.word2vec'.format(embeddings_file)
glove2word2vec(embeddings_file, word2vec_output_file)
# + id="To4sx_1iMCX0"
# Load embeddings (may take a minute)
glove = KeyedVectors.load_word2vec_format(word2vec_output_file, binary=False)
# + id="UEhBhvgHMEH9" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250277481, "user_tz": 420, "elapsed": 465141, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="85582e56-ccd1-40eb-e7d4-7d376adf17ac"
# (king - man) + woman = ?
glove.most_similar(positive=['woman', 'king'], negative=['man'], topn=5)
# + id="xR94AICkMEFV" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250277482, "user_tz": 420, "elapsed": 464638, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="209fcf3f-6bb8-4593-872d-aac59d719431"
# Get nearest neighbors (exlcusing itself)
glove.wv.most_similar(positive="goku", topn=5)
# + id="gseqjBmzMECq" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250280774, "user_tz": 420, "elapsed": 467358, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="491e696c-7bb1-4eb0-cbc4-aaaa7f134afa"
# Reduce dimensionality for plotting
X = glove[glove.wv.vocab]
pca = PCA(n_components=2)
pca_results = pca.fit_transform(X)
# + id="LFQWGyncMHgK" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1608250280988, "user_tz": 420, "elapsed": 467060, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="33817b31-e29e-460f-cfad-37aabb53c884"
# Visualize
plot_embeddings(
words=["king", "queen", "man", "woman"], embeddings=glove,
pca_results=pca_results)
# + id="MzrZ2_RBMHdn" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250280990, "user_tz": 420, "elapsed": 466243, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="1cabc6da-79d8-485b-d6d6-c85994e12247"
# Bias in embeddings
glove.most_similar(positive=['woman', 'doctor'], negative=['man'], topn=5)
# + [markdown] id="EE1kCvwnkBPc"
# # Set up
# + id="m_DIRj8G5uOC"
import numpy as np
import pandas as pd
import random
import torch
import torch.nn as nn
# + id="5tdPACZf5uTo"
SEED = 1234
# + id="NMt7hJuB5uXN"
def set_seeds(seed=1234):
"""Set seeds for reproducibility."""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # multi-GPU# Set seeds for reproducibility
set_seeds(seed=SEED)
# + id="mQzlWknv5ua6"
# Set seeds for reproducibility
set_seeds(seed=SEED)
# + id="M1-vnM-P8i_P"
# Set device
cuda = True
device = torch.device('cuda' if (
torch.cuda.is_available() and cuda) else 'cpu')
torch.set_default_tensor_type('torch.FloatTensor')
if device.type == 'cuda':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
print (device)
# + [markdown] id="NfNfv1NTkKXa"
# ## Load data
# + [markdown] id="YiQk-yClkL3s"
# We will download the [AG News dataset](http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html), which consists of 120K text samples from 4 unique classes (`Business`, `Sci/Tech`, `Sports`, `World`)
# + id="5HLyMQBDj__P"
import numpy as np
import pandas as pd
# + id="SNfmmNBokAHI" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1608249763205, "user_tz": 420, "elapsed": 1037, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/<KEY>", "userId": "00378334517810298963"}} outputId="35775f46-c599-4ed8-d3e7-5ee126259e90"
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/news.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# + [markdown] id="Bk0a7TE2kTq4"
# ## Preprocessing
# + [markdown] id="yeTyLL8-kU9F"
# We're going to clean up our input data first by doing operations such as lower text, removing stop (filler) words, filters using regular expressions, etc.
# + id="ZIrwF49UkAJ9"
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import re
# + id="TQR8I3HxkAMS" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249764788, "user_tz": 420, "elapsed": 1910, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="4a2f2da6-af99-4b87-af7e-0a64779cffe6"
nltk.download('stopwords')
STOPWORDS = stopwords.words('english')
print (STOPWORDS[:5])
porter = PorterStemmer()
# + id="g43E1Oa1kAO4"
def preprocess(text, stopwords=STOPWORDS):
"""Conditional preprocessing on our text unique to our task."""
# Lower
text = text.lower()
# Remove stopwords
pattern = re.compile(r'\b(' + r'|'.join(stopwords) + r')\b\s*')
text = pattern.sub('', text)
# Remove words in paranthesis
text = re.sub(r'\([^)]*\)', '', text)
# Spacing and filters
text = re.sub(r"([-;;.,!?<=>])", r" \1 ", text)
text = re.sub('[^A-Za-z0-9]+', ' ', text) # remove non alphanumeric chars
text = re.sub(' +', ' ', text) # remove multiple spaces
text = text.strip()
return text
# + id="tsWX-VNQkARQ" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1608249764794, "user_tz": 420, "elapsed": 1618, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="f11955cc-f7a7-4a6b-a4a0-fcda5de3cff9"
# Sample
text = "Great week for the NYSE!"
preprocess(text=text)
# + colab={"base_uri": "https://localhost:8080/"} id="5Up0hTP8kwJx" executionInfo={"status": "ok", "timestamp": 1608249766536, "user_tz": 420, "elapsed": 3162, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="03d221e5-06b3-4491-c8ef-6e56e769d33e"
# Apply to dataframe
preprocessed_df = df.copy()
preprocessed_df.title = preprocessed_df.title.apply(preprocess)
print (f"{df.title.values[0]}\n\n{preprocessed_df.title.values[0]}")
# + [markdown] id="_F_TNV8Nk7Za"
# > If you have preprocessing steps like standardization, etc. that are calculated, you need to separate the training and test set first before applying those operations. This is because we cannot apply any knowledge gained from the test set accidentally (data leak) during preprocessing/training. However for global preprocessing steps like the function above where we aren't learning anything from the data itself, we can perform before splitting the data.
# + [markdown] id="cZaX1WFyk8gR"
# ## Split data
# + id="1iy_Ej7ukwMt"
import collections
from sklearn.model_selection import train_test_split
# + id="Bw5zFpt3k-wz"
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
# + id="SHutQd7Pk-zt"
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# + id="SOz9QSm5lBH7"
# Data
X = preprocessed_df["title"].values
y = preprocessed_df["category"].values
# + colab={"base_uri": "https://localhost:8080/"} id="nU8ubwislBKk" executionInfo={"status": "ok", "timestamp": 1608249766539, "user_tz": 420, "elapsed": 1879, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="f1127638-129f-4edb-a7c5-96f44c2fa041"
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# + [markdown] id="JZhaxH8xmHAy"
# ## LabelEncoder
# + [markdown] id="fYueMyIUmHEh"
# Next we'll define a `LabelEncoder` to encode our text labels into unique indices
# + id="DsPgVemMmHJK"
import itertools
# + id="dlZ4w8OfmHM2"
class LabelEncoder(object):
"""Label encoder for tag labels."""
def __init__(self, class_to_index={}):
self.class_to_index = class_to_index
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
def __len__(self):
return len(self.class_to_index)
def __str__(self):
return f"<LabelEncoder(num_classes={len(self)})>"
def fit(self, y):
classes = np.unique(y)
for i, class_ in enumerate(classes):
self.class_to_index[class_] = i
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
return self
def encode(self, y):
encoded = np.zeros((len(y)), dtype=int)
for i, item in enumerate(y):
encoded[i] = self.class_to_index[item]
return encoded
def decode(self, y):
classes = []
for i, item in enumerate(y):
classes.append(self.index_to_class[item])
return classes
def save(self, fp):
with open(fp, 'w') as fp:
contents = {'class_to_index': self.class_to_index}
json.dump(contents, fp, indent=4, sort_keys=False)
@classmethod
def load(cls, fp):
with open(fp, 'r') as fp:
kwargs = json.load(fp=fp)
return cls(**kwargs)
# + colab={"base_uri": "https://localhost:8080/"} id="nA4TyhuBmHPu" executionInfo={"status": "ok", "timestamp": 1608249768403, "user_tz": 420, "elapsed": 686, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="ada26a5c-c7e7-47bb-fd54-f603ae237160"
# Encode
label_encoder = LabelEncoder()
label_encoder.fit(y_train)
NUM_CLASSES = len(label_encoder)
label_encoder.class_to_index
# + colab={"base_uri": "https://localhost:8080/"} id="FjEqwySTmHSg" executionInfo={"status": "ok", "timestamp": 1608249768404, "user_tz": 420, "elapsed": 585, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="a282ef19-441d-4f2d-cb85-7dcfd1e6218b"
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.encode(y_train)
y_val = label_encoder.encode(y_val)
y_test = label_encoder.encode(y_test)
print (f"y_train[0]: {y_train[0]}")
# + colab={"base_uri": "https://localhost:8080/"} id="xE5-9S6VmHVO" executionInfo={"status": "ok", "timestamp": 1608249768612, "user_tz": 420, "elapsed": 334, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="a8cb3a16-4ef0-4fc6-b48d-c60ab4c0f13b"
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
# + [markdown] id="mHRC9EfzlmP-"
# ## Tokenizer
# + [markdown] id="bkQmzdUXlmUH"
# We'll define a `Tokenizer` to convert our text input data into token indices.
# + id="AcqOl3Lbk-2Q"
import json
from collections import Counter
from more_itertools import take
# + id="XbyIehIDl7l-"
class Tokenizer(object):
def __init__(self, char_level, num_tokens=None,
pad_token='<PAD>', oov_token='<UNK>',
token_to_index=None):
self.char_level = char_level
self.separator = '' if self.char_level else ' '
if num_tokens: num_tokens -= 2 # pad + unk tokens
self.num_tokens = num_tokens
self.pad_token = pad_token
self.oov_token = oov_token
if not token_to_index:
token_to_index = {pad_token: 0, oov_token: 1}
self.token_to_index = token_to_index
self.index_to_token = {v: k for k, v in self.token_to_index.items()}
def __len__(self):
return len(self.token_to_index)
def __str__(self):
return f"<Tokenizer(num_tokens={len(self)})>"
def fit_on_texts(self, texts):
if not self.char_level:
texts = [text.split(" ") for text in texts]
all_tokens = [token for text in texts for token in text]
counts = Counter(all_tokens).most_common(self.num_tokens)
self.min_token_freq = counts[-1][1]
for token, count in counts:
index = len(self)
self.token_to_index[token] = index
self.index_to_token[index] = token
return self
def texts_to_sequences(self, texts):
sequences = []
for text in texts:
if not self.char_level:
text = text.split(' ')
sequence = []
for token in text:
sequence.append(self.token_to_index.get(
token, self.token_to_index[self.oov_token]))
sequences.append(np.asarray(sequence))
return sequences
def sequences_to_texts(self, sequences):
texts = []
for sequence in sequences:
text = []
for index in sequence:
text.append(self.index_to_token.get(index, self.oov_token))
texts.append(self.separator.join([token for token in text]))
return texts
def save(self, fp):
with open(fp, 'w') as fp:
contents = {
'char_level': self.char_level,
'oov_token': self.oov_token,
'token_to_index': self.token_to_index
}
json.dump(contents, fp, indent=4, sort_keys=False)
@classmethod
def load(cls, fp):
with open(fp, 'r') as fp:
kwargs = json.load(fp=fp)
return cls(**kwargs)
# + [markdown] id="gZgQb7cdqD6q"
# > It's important that we only fit using our train data split because during inference, our model will not always know every token so it's important to replicate that scenario with our validation and test splits as well.
# + colab={"base_uri": "https://localhost:8080/"} id="XlMH93AKl7oc" executionInfo={"status": "ok", "timestamp": 1608249770260, "user_tz": 420, "elapsed": 612, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="3f20c3cf-b4e1-47be-d5dc-8f56a63afc4a"
# Tokenize
tokenizer = Tokenizer(char_level=False, num_tokens=5000)
tokenizer.fit_on_texts(texts=X_train)
VOCAB_SIZE = len(tokenizer)
print (tokenizer)
# + colab={"base_uri": "https://localhost:8080/"} id="VT93_ZFIl7rE" executionInfo={"status": "ok", "timestamp": 1608249770260, "user_tz": 420, "elapsed": 456, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="f02d0710-6b99-4a80-a50f-fb27bef475ca"
# Sample of tokens
print (take(5, tokenizer.token_to_index.items()))
print (f"least freq token's freq: {tokenizer.min_token_freq}") # use this to adjust num_tokens
# + colab={"base_uri": "https://localhost:8080/"} id="Mz2XAlijl7u0" executionInfo={"status": "ok", "timestamp": 1608249771770, "user_tz": 420, "elapsed": 1090, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="23161be4-416f-472d-94de-55c512ffbc12"
# Convert texts to sequences of indices
X_train = tokenizer.texts_to_sequences(X_train)
X_val = tokenizer.texts_to_sequences(X_val)
X_test = tokenizer.texts_to_sequences(X_test)
preprocessed_text = tokenizer.sequences_to_texts([X_train[0]])[0]
print ("Text to indices:\n"
f" (preprocessed) → {preprocessed_text}\n"
f" (tokenized) → {X_train[0]}")
# + [markdown] id="581nl9EYFAsS"
# # Embedding layer
# + [markdown] id="JbOzzfLNFCtW"
# We can embed our inputs using PyTorch's [embedding layer](https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html#torch.nn.Embedding).
# + id="1tHb3v_KH53e" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249787092, "user_tz": 420, "elapsed": 10907, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="cf03b218-df68-4620-96e7-f6f59f95ba12"
# Input
vocab_size = 10
x = torch.randint(high=vocab_size, size=(1,5))
print (x)
print (x.shape)
# + id="FXUpmH7AFOJh" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249787093, "user_tz": 420, "elapsed": 10703, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="3c3cdf59-fd99-4e9b-831c-3e813672cfbb"
# Embedding layer
embeddings = nn.Embedding(embedding_dim=100, num_embeddings=vocab_size)
print (embeddings.weight.shape)
# + id="bVGWIgEGGmHn" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608249787094, "user_tz": 420, "elapsed": 10219, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="f510b101-fe78-490a-a148-7684ed1585f0"
# Embed the input
embeddings(x).shape
# + [markdown] id="WbO8HYjaGxZY"
# Each token in the input is represented via embeddings (all out-of-vocabulary (OOV) tokens are given the embedding for `UNK` token.) In the model below, we'll see how to set these embeddings to be pretrained GloVe embeddings and how to choose whether to freeze (fixed embedding weights) those embeddings or not during training.
# + [markdown] id="uTWMME1VmaTQ"
# # Padding
# + [markdown] id="qu5gHg_Fmzdp"
# Our inputs are all of varying length but we need each batch to be uniformly shaped. Therefore, we will use padding to make all the inputs in the batch the same length. Our padding index will be 0 (note that this is consistent with the `<PAD>` token defined in our `Tokenizer`).
#
# > While embedding our input tokens will create a batch of shape (`N`, `max_seq_len`, `embed_dim`) we only need to provide a 2D matrix (`N`, `max_seq_len`) for using embeddings with PyTorch.
# + id="JJE5dW33mHZn"
def pad_sequences(sequences, max_seq_len=0):
"""Pad sequences to max length in sequence."""
max_seq_len = max(max_seq_len, max(len(sequence) for sequence in sequences))
padded_sequences = np.zeros((len(sequences), max_seq_len))
for i, sequence in enumerate(sequences):
padded_sequences[i][:len(sequence)] = sequence
return padded_sequences
# + colab={"base_uri": "https://localhost:8080/"} id="5niX_T9ZmHcn" executionInfo={"status": "ok", "timestamp": 1608249787095, "user_tz": 420, "elapsed": 6649, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="0fbea476-bc76-4462-b591-ad5946ac10df"
# 2D sequences
padded = pad_sequences(X_train[0:3])
print (padded.shape)
print (padded)
# + [markdown] id="t8LGKXCmUgzV"
# # Dataset
# + [markdown] id="ACBKJ77TVBpi"
# We're going to create Datasets and DataLoaders to be able to efficiently create batches with our data splits.
# + id="jREEFz72Hssx"
FILTER_SIZES = list(range(1, 4)) # uni, bi and tri grams
# + id="2K0D-vTGUgHV"
class Dataset(torch.utils.data.Dataset):
def __init__(self, X, y, max_filter_size):
self.X = X
self.y = y
self.max_filter_size = max_filter_size
def __len__(self):
return len(self.y)
def __str__(self):
return f"<Dataset(N={len(self)})>"
def __getitem__(self, index):
X = self.X[index]
y = self.y[index]
return [X, y]
def collate_fn(self, batch):
"""Processing on a batch."""
# Get inputs
batch = np.array(batch, dtype=object)
X = batch[:, 0]
y = np.stack(batch[:, 1], axis=0)
# Pad sequences
X = pad_sequences(X)
# Cast
X = torch.LongTensor(X.astype(np.int32))
y = torch.LongTensor(y.astype(np.int32))
return X, y
def create_dataloader(self, batch_size, shuffle=False, drop_last=False):
return torch.utils.data.DataLoader(
dataset=self, batch_size=batch_size, collate_fn=self.collate_fn,
shuffle=shuffle, drop_last=drop_last, pin_memory=True)
# + colab={"base_uri": "https://localhost:8080/"} id="tdAyMvfnUgP9" executionInfo={"status": "ok", "timestamp": 1608250567232, "user_tz": 420, "elapsed": 745, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="161a90cc-0400-4a0e-c83a-37182cda415f"
# Create datasets
max_filter_size = max(FILTER_SIZES)
train_dataset = Dataset(X=X_train, y=y_train, max_filter_size=max_filter_size)
val_dataset = Dataset(X=X_val, y=y_val, max_filter_size=max_filter_size)
test_dataset = Dataset(X=X_test, y=y_test, max_filter_size=max_filter_size)
print ("Datasets:\n"
f" Train dataset:{train_dataset.__str__()}\n"
f" Val dataset: {val_dataset.__str__()}\n"
f" Test dataset: {test_dataset.__str__()}\n"
"Sample point:\n"
f" X: {train_dataset[0][0]}\n"
f" y: {train_dataset[0][1]}")
# + colab={"base_uri": "https://localhost:8080/"} id="VeK0C3ORUgTu" executionInfo={"status": "ok", "timestamp": 1608250567570, "user_tz": 420, "elapsed": 857, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="ad19832b-95fd-4132-b83c-edd91980b248"
# Create dataloaders
batch_size = 64
train_dataloader = train_dataset.create_dataloader(batch_size=batch_size)
val_dataloader = val_dataset.create_dataloader(batch_size=batch_size)
test_dataloader = test_dataset.create_dataloader(batch_size=batch_size)
batch_X, batch_y = next(iter(train_dataloader))
print ("Sample batch:\n"
f" X: {list(batch_X.size())}\n"
f" y: {list(batch_y.size())}\n"
"Sample point:\n"
f" X: {batch_X[0]}\n"
f" y: {batch_y[0]}")
# + [markdown] id="pfhjWZRD94hK"
# # Model
# + [markdown] id="h0U0V8fViHZc"
# We'll be using a convolutional neural network on top of our embedded tokens to extract meaningful spatial signal. This time, we'll be using many filter widths to act as n-gram feature extractors.
# + [markdown] id="goDI3dSxHbgr"
# Let's visualize the model's forward pass.
#
# 1. We'll first tokenize our inputs (`batch_size`, `max_seq_len`).
# 2. Then we'll embed our tokenized inputs (`batch_size`, `max_seq_len`, `embedding_dim`).
# 3. We'll apply convolution via filters (`filter_size`, `vocab_size`, `num_filters`) followed by batch normalization. Our filters act as character level n-gram detecors. We have three different filter sizes (2, 3 and 4) and they will act as bi-gram, tri-gram and 4-gram feature extractors, respectivelyy.
# 4. We'll apply 1D global max pooling which will extract the most relevant information from the feature maps for making the decision.
# 5. We feed the pool outputs to a fully-connected (FC) layer (with dropout).
# 6. We use one more FC layer with softmax to derive class probabilities.
# + [markdown] id="EIheSuazHeBT"
# <div align="left">
# <img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/basics/embeddings/model.png" width="1000">
# </div>
# + id="_I3dmAFtsfy6"
import math
import torch.nn.functional as F
# + id="z1rRdLydmjdp"
EMBEDDING_DIM = 100
HIDDEN_DIM = 100
DROPOUT_P = 0.1
# + id="juRjat3CiShK"
class CNN(nn.Module):
def __init__(self, embedding_dim, vocab_size, num_filters,
filter_sizes, hidden_dim, dropout_p, num_classes,
pretrained_embeddings=None, freeze_embeddings=False,
padding_idx=0):
super(CNN, self).__init__()
# Filter sizes
self.filter_sizes = filter_sizes
# Initialize embeddings
if pretrained_embeddings is None:
self.embeddings = nn.Embedding(
embedding_dim=embedding_dim, num_embeddings=vocab_size,
padding_idx=padding_idx)
else:
pretrained_embeddings = torch.from_numpy(pretrained_embeddings).float()
self.embeddings = nn.Embedding(
embedding_dim=embedding_dim, num_embeddings=vocab_size,
padding_idx=padding_idx, _weight=pretrained_embeddings)
# Freeze embeddings or not
if freeze_embeddings:
self.embeddings.weight.requires_grad = False
# Conv weights
self.conv = nn.ModuleList(
[nn.Conv1d(in_channels=embedding_dim,
out_channels=num_filters,
kernel_size=f) for f in filter_sizes])
# FC weights
self.dropout = nn.Dropout(dropout_p)
self.fc1 = nn.Linear(num_filters*len(filter_sizes), hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, inputs, channel_first=False, apply_softmax=False):
# Embed
x_in, = inputs
x_in = self.embeddings(x_in)
# Rearrange input so num_channels is in dim 1 (N, C, L)
if not channel_first:
x_in = x_in.transpose(1, 2)
# Conv outputs
z = []
max_seq_len = x_in.shape[2]
for i, f in enumerate(self.filter_sizes):
# `SAME` padding
padding_left = int((self.conv[i].stride[0]*(max_seq_len-1) - max_seq_len + self.filter_sizes[i])/2)
padding_right = int(math.ceil((self.conv[i].stride[0]*(max_seq_len-1) - max_seq_len + self.filter_sizes[i])/2))
# Conv + pool
_z = self.conv[i](F.pad(x_in, (padding_left, padding_right)))
_z = F.max_pool1d(_z, _z.size(2)).squeeze(2)
z.append(_z)
# Concat conv outputs
z = torch.cat(z, 1)
# FC layers
z = self.fc1(z)
z = self.dropout(z)
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# + [markdown] id="QBmYu6wjkgf0"
# # GloVe embeddings
# + [markdown] id="RFRaj2AUojN5"
# We're going create some utility functions to be able to load the pretrained GloVe embeddings into our Embeddings layer.
# + id="x9uev5AGsuqq"
def load_glove_embeddings(embeddings_file):
"""Load embeddings from a file."""
embeddings = {}
with open(embeddings_file, "r") as fp:
for index, line in enumerate(fp):
values = line.split()
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings[word] = embedding
return embeddings
# + id="tQHD-ThwWnjD"
def make_embeddings_matrix(embeddings, word_index, embedding_dim):
"""Create embeddings matrix to use in Embedding layer."""
embedding_matrix = np.zeros((len(word_index), embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
# + id="9WxP2GR3LmrO" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250578680, "user_tz": 420, "elapsed": 10090, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="9ed4eca0-bec3-46f3-a24e-4f767815f406"
# Create embeddings
embeddings_file = 'glove.6B.{0}d.txt'.format(EMBEDDING_DIM)
glove_embeddings = load_glove_embeddings(embeddings_file=embeddings_file)
embedding_matrix = make_embeddings_matrix(
embeddings=glove_embeddings, word_index=tokenizer.token_to_index,
embedding_dim=EMBEDDING_DIM)
print (f"<Embeddings(words={embedding_matrix.shape[0]}, dim={embedding_matrix.shape[1]})>")
# + [markdown] id="C26maF-9Goit"
# # Experiments
# + [markdown] id="eTWQcUJ_GrIx"
# We have first have to decice whether to use pretrained embeddings randomly initialized ones. Then, we can choose to freeze our embeddings or continue to train them using the supervised data (this could lead to overfitting). Here are the three experiments we're going to conduct:
# * randomly initialized embeddings (fine-tuned)
# * GloVe embeddings (frozen)
# * GloVe embeddings (fine-tuned)
# + id="geKOPVzVK6S9"
import json
from sklearn.metrics import precision_recall_fscore_support
from torch.optim import Adam
# + id="64iPmq2lDv2h"
NUM_FILTERS = 50
LEARNING_RATE = 1e-3
PATIENCE = 5
NUM_EPOCHS = 10
# + id="iIXt8XA09vYX"
class Trainer(object):
def __init__(self, model, device, loss_fn=None, optimizer=None, scheduler=None):
# Set params
self.model = model
self.device = device
self.loss_fn = loss_fn
self.optimizer = optimizer
self.scheduler = scheduler
def train_step(self, dataloader):
"""Train step."""
# Set model to train mode
self.model.train()
loss = 0.0
# Iterate over train batches
for i, batch in enumerate(dataloader):
# Step
batch = [item.to(self.device) for item in batch] # Set device
inputs, targets = batch[:-1], batch[-1]
self.optimizer.zero_grad() # Reset gradients
z = self.model(inputs) # Forward pass
J = self.loss_fn(z, targets) # Define loss
J.backward() # Backward pass
self.optimizer.step() # Update weights
# Cumulative Metrics
loss += (J.detach().item() - loss) / (i + 1)
return loss
def eval_step(self, dataloader):
"""Validation or test step."""
# Set model to eval mode
self.model.eval()
loss = 0.0
y_trues, y_probs = [], []
# Iterate over val batches
with torch.no_grad():
for i, batch in enumerate(dataloader):
# Step
batch = [item.to(self.device) for item in batch] # Set device
inputs, y_true = batch[:-1], batch[-1]
z = self.model(inputs) # Forward pass
J = self.loss_fn(z, y_true).item()
# Cumulative Metrics
loss += (J - loss) / (i + 1)
# Store outputs
y_prob = torch.sigmoid(z).cpu().numpy()
y_probs.extend(y_prob)
y_trues.extend(y_true.cpu().numpy())
return loss, np.vstack(y_trues), np.vstack(y_probs)
def predict_step(self, dataloader):
"""Prediction step."""
# Set model to eval mode
self.model.eval()
y_probs = []
# Iterate over val batches
with torch.no_grad():
for i, batch in enumerate(dataloader):
# Forward pass w/ inputs
inputs, targets = batch[:-1], batch[-1]
y_prob = self.model(inputs, apply_softmax=True)
# Store outputs
y_probs.extend(y_prob)
return np.vstack(y_probs)
def train(self, num_epochs, patience, train_dataloader, val_dataloader):
best_val_loss = np.inf
for epoch in range(num_epochs):
# Steps
train_loss = self.train_step(dataloader=train_dataloader)
val_loss, _, _ = self.eval_step(dataloader=val_dataloader)
self.scheduler.step(val_loss)
# Early stopping
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = self.model
_patience = patience # reset _patience
else:
_patience -= 1
if not _patience: # 0
print("Stopping early!")
break
# Logging
print(
f"Epoch: {epoch+1} | "
f"train_loss: {train_loss:.5f}, "
f"val_loss: {val_loss:.5f}, "
f"lr: {self.optimizer.param_groups[0]['lr']:.2E}, "
f"_patience: {_patience}"
)
return best_model
# + id="Us7Smprz9cWO"
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# + [markdown] id="Y8JzMrcv_p8a"
# ### Randomly initialized embeddings
# + id="TnLSYV0WKo8x"
PRETRAINED_EMBEDDINGS = None
FREEZE_EMBEDDINGS = False
# + id="wD4sRUS5_lwq" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250579005, "user_tz": 420, "elapsed": 8166, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="682e80e9-7d7c-42e0-8880-d64532d6224f"
# Initialize model
model = CNN(
embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE,
num_filters=NUM_FILTERS, filter_sizes=FILTER_SIZES,
hidden_dim=HIDDEN_DIM, dropout_p=DROPOUT_P, num_classes=NUM_CLASSES,
pretrained_embeddings=PRETRAINED_EMBEDDINGS, freeze_embeddings=FREEZE_EMBEDDINGS)
model = model.to(device) # set device
print (model.named_parameters)
# + id="0uiqDFypJLU9"
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values())).to(device)
loss = nn.CrossEntropyLoss(weight=class_weights_tensor)
# + id="BVLmJFYFJLXs"
# Define optimizer & scheduler
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.1, patience=3)
# + id="CaV-uU0tJLbI"
# Trainer module
trainer = Trainer(
model=model, device=device, loss_fn=loss_fn,
optimizer=optimizer, scheduler=scheduler)
# + colab={"base_uri": "https://localhost:8080/"} id="2Ee8qqUnJLeg" executionInfo={"status": "ok", "timestamp": 1608250621765, "user_tz": 420, "elapsed": 33687, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="b2502d32-3f6a-4537-80e8-6cdeb6c26915"
# Train
best_model = trainer.train(
NUM_EPOCHS, PATIENCE, train_dataloader, val_dataloader)
# + id="PRzw6CpqJRUA"
# Get predictions
test_loss, y_true, y_prob = trainer.eval_step(dataloader=test_dataloader)
y_pred = np.argmax(y_prob, axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="qYUvB6FlJRW1" executionInfo={"status": "ok", "timestamp": 1608250622444, "user_tz": 420, "elapsed": 17904, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="a9e8ff54-beb0-4d29-b24e-1c9a9b01abd7"
# Determine performance
performance = get_performance(
y_true=y_test, y_pred=y_pred, classes=label_encoder.classes)
print (json.dumps(performance['overall'], indent=2))
# + [markdown] id="To_CB7ibLesP"
# ### GloVe embeddings (frozen)
# + id="oT9w__AMkqfG"
PRETRAINED_EMBEDDINGS = embedding_matrix
FREEZE_EMBEDDINGS = True
# + id="yg13AyoUkqcJ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250622446, "user_tz": 420, "elapsed": 14826, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="40e0fa9e-3cb5-413c-fd95-b7ed2ad6b1fc"
# Initialize model
model = CNN(
embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE,
num_filters=NUM_FILTERS, filter_sizes=FILTER_SIZES,
hidden_dim=HIDDEN_DIM, dropout_p=DROPOUT_P, num_classes=NUM_CLASSES,
pretrained_embeddings=PRETRAINED_EMBEDDINGS, freeze_embeddings=FREEZE_EMBEDDINGS)
model = model.to(device) # set device
print (model.named_parameters)
# + id="6rJNp4Vb-dqz"
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values())).to(device)
loss = nn.CrossEntropyLoss(weight=class_weights_tensor)
# + id="RKtdPOdM-dt0"
# Define optimizer & scheduler
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.1, patience=3)
# + id="MtVthG4r-dwy"
# Trainer module
trainer = Trainer(
model=model, device=device, loss_fn=loss_fn,
optimizer=optimizer, scheduler=scheduler)
# + colab={"base_uri": "https://localhost:8080/"} id="oy3FP3ht-gJY" executionInfo={"status": "ok", "timestamp": 1608250652270, "user_tz": 420, "elapsed": 31857, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="998ce771-66c3-4b39-8362-e241129e00c3"
# Train
best_model = trainer.train(
NUM_EPOCHS, PATIENCE, train_dataloader, val_dataloader)
# + id="h2e0q965-gMK"
# Get predictions
test_loss, y_true, y_prob = trainer.eval_step(dataloader=test_dataloader)
y_pred = np.argmax(y_prob, axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="qyNhVcHi-juw" executionInfo={"status": "ok", "timestamp": 1608250652758, "user_tz": 420, "elapsed": 29176, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="b3cc6369-3d29-4810-df2b-4fe359f51417"
# Determine performance
performance = get_performance(
y_true=y_test, y_pred=y_pred, classes=label_encoder.classes)
print (json.dumps(performance['overall'], indent=2))
# + [markdown] id="dUVkeDbNqO7V"
# ### Fine-tuned GloVe embeddings (unfrozen)
# + id="eubLrHydkt_J"
PRETRAINED_EMBEDDINGS = embedding_matrix
FREEZE_EMBEDDINGS = False
# + id="IGeZwoy9qUpa" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608250652759, "user_tz": 420, "elapsed": 27850, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="56db02ee-5401-4875-a0ef-6b938e8bc9f6"
# Initialize model
model = CNN(
embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE,
num_filters=NUM_FILTERS, filter_sizes=FILTER_SIZES,
hidden_dim=HIDDEN_DIM, dropout_p=DROPOUT_P, num_classes=NUM_CLASSES,
pretrained_embeddings=PRETRAINED_EMBEDDINGS, freeze_embeddings=FREEZE_EMBEDDINGS)
model = model.to(device) # set device
print (model.named_parameters)
# + id="ifqXyPZ1JKWY"
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values())).to(device)
loss = nn.CrossEntropyLoss(weight=class_weights_tensor)
# + id="kXGrQ0ceJKZk"
# Define optimizer & scheduler
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.1, patience=3)
# + id="IinLK_ohJKdr"
# Trainer module
trainer = Trainer(
model=model, device=device, loss_fn=loss_fn,
optimizer=optimizer, scheduler=scheduler)
# + colab={"base_uri": "https://localhost:8080/"} id="tpVOifjMJKgx" executionInfo={"status": "ok", "timestamp": 1608250686423, "user_tz": 420, "elapsed": 60458, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="f281d7dc-2196-4c50-bf1b-8877c20db9ad"
# Train
best_model = trainer.train(
NUM_EPOCHS, PATIENCE, train_dataloader, val_dataloader)
# + id="TJfbNqp2JQgT"
# Get predictions
test_loss, y_true, y_prob = trainer.eval_step(dataloader=test_dataloader)
y_pred = np.argmax(y_prob, axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="thdmnUOTJQld" executionInfo={"status": "ok", "timestamp": 1608250686999, "user_tz": 420, "elapsed": 59824, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="19b75fb4-a64f-45c0-c142-2e33338580bf"
# Determine performance
performance = get_performance(
y_true=y_test, y_pred=y_pred, classes=label_encoder.classes)
print (json.dumps(performance['overall'], indent=2))
# + id="5R-df_DMY51A"
# Save artifacts
from pathlib import Path
dir = Path("cnn")
dir.mkdir(parents=True, exist_ok=True)
label_encoder.save(fp=Path(dir, 'label_encoder.json'))
tokenizer.save(fp=Path(dir, 'tokenizer.json'))
torch.save(best_model.state_dict(), Path(dir, 'model.pt'))
with open(Path(dir, 'performance.json'), "w") as fp:
json.dump(performance, indent=2, sort_keys=False, fp=fp)
# + [markdown] id="xKdgLetZKhEj"
# # Inference
# + id="zPWRyqBoKks0"
def get_probability_distribution(y_prob, classes):
"""Create a dict of class probabilities from an array."""
results = {}
for i, class_ in enumerate(classes):
results[class_] = np.float64(y_prob[i])
sorted_results = {k: v for k, v in sorted(
results.items(), key=lambda item: item[1], reverse=True)}
return sorted_results
# + colab={"base_uri": "https://localhost:8080/"} id="1zg0ErQMY4cZ" executionInfo={"status": "ok", "timestamp": 1608254328349, "user_tz": 420, "elapsed": 773, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="a5925f8f-b8ca-4b38-c4f7-9ab446890378"
# Load artifacts
device = torch.device("cpu")
label_encoder = LabelEncoder.load(fp=Path(dir, 'label_encoder.json'))
tokenizer = Tokenizer.load(fp=Path(dir, 'tokenizer.json'))
model = CNN(
embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE,
num_filters=NUM_FILTERS, filter_sizes=FILTER_SIZES,
hidden_dim=HIDDEN_DIM, dropout_p=DROPOUT_P, num_classes=NUM_CLASSES,
pretrained_embeddings=PRETRAINED_EMBEDDINGS, freeze_embeddings=FREEZE_EMBEDDINGS)
model.load_state_dict(torch.load(Path(dir, 'model.pt'), map_location=device))
model.to(device)
# + id="Bviv-K-FY4gS"
# Initialize trainer
trainer = Trainer(model=model, device=device)
# + colab={"base_uri": "https://localhost:8080/"} id="cDD44HKfY4jY" executionInfo={"status": "ok", "timestamp": 1608255720107, "user_tz": 420, "elapsed": 386, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="546f31f7-8b4b-4004-b1d4-90fe4f1b03b8"
# Dataloader
text = "The final tennis tournament starts next week."
X = tokenizer.texts_to_sequences([preprocess(text)])
print (tokenizer.sequences_to_texts(X))
y_filler = label_encoder.encode([label_encoder.classes[0]]*len(X))
dataset = Dataset(X=X, y=y_filler, max_filter_size=max_filter_size)
dataloader = dataset.create_dataloader(batch_size=batch_size)
# + colab={"base_uri": "https://localhost:8080/"} id="EXrACSa6ZJgb" executionInfo={"status": "ok", "timestamp": 1608255720870, "user_tz": 420, "elapsed": 583, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="858e4b78-f00c-4ee4-e3b0-15f69e54d03c"
# Inference
y_prob = trainer.predict_step(dataloader)
y_pred = np.argmax(y_prob, axis=1)
label_encoder.decode(y_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="Tbz0ZnYSZJkD" executionInfo={"status": "ok", "timestamp": 1608255721724, "user_tz": 420, "elapsed": 939, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="ea77c0bc-430e-420a-e6b7-b26755a2a4eb"
# Class distributions
prob_dist = get_probability_distribution(y_prob=y_prob[0], classes=label_encoder.classes)
print (json.dumps(prob_dist, indent=2))
# + [markdown] id="zXZtx3nsKlDr"
# # Interpretability
# + [markdown] id="4KImhaLkcFuJ"
# We went through all the trouble of padding our inputs before convolution to result is outputs of the same shape as our inputs so we can try to get some interpretability. Since every token is mapped to a convolutional output on which we apply max pooling, we can see which token's output was most influential towards the prediction. We first need to get the conv outputs from our model:
# + id="RnHVnI8hdxYC"
import collections
import seaborn as sns
# + id="atX4qOs1Kl1Y"
class InterpretableCNN(nn.Module):
def __init__(self, embedding_dim, vocab_size, num_filters,
filter_sizes, hidden_dim, dropout_p, num_classes,
pretrained_embeddings=None, freeze_embeddings=False,
padding_idx=0):
super(InterpretableCNN, self).__init__()
# Filter sizes
self.filter_sizes = filter_sizes
# Initialize embeddings
if pretrained_embeddings is None:
self.embeddings = nn.Embedding(
embedding_dim=embedding_dim, num_embeddings=vocab_size,
padding_idx=padding_idx)
else:
pretrained_embeddings = torch.from_numpy(pretrained_embeddings).float()
self.embeddings = nn.Embedding(
embedding_dim=embedding_dim, num_embeddings=vocab_size,
padding_idx=padding_idx, _weight=pretrained_embeddings)
# Freeze embeddings or not
if freeze_embeddings:
self.embeddings.weight.requires_grad = False
# Conv weights
self.conv = nn.ModuleList(
[nn.Conv1d(in_channels=embedding_dim,
out_channels=num_filters,
kernel_size=f) for f in filter_sizes])
# FC weights
self.dropout = nn.Dropout(dropout_p)
self.fc1 = nn.Linear(num_filters*len(filter_sizes), hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, inputs, channel_first=False, apply_softmax=False):
# Embed
x_in, = inputs
x_in = self.embeddings(x_in)
# Rearrange input so num_channels is in dim 1 (N, C, L)
if not channel_first:
x_in = x_in.transpose(1, 2)
# Conv outputs
z = []
max_seq_len = x_in.shape[2]
for i, f in enumerate(self.filter_sizes):
# `SAME` padding
padding_left = int((self.conv[i].stride[0]*(max_seq_len-1) - max_seq_len + self.filter_sizes[i])/2)
padding_right = int(math.ceil((self.conv[i].stride[0]*(max_seq_len-1) - max_seq_len + self.filter_sizes[i])/2))
# Conv + pool
_z = self.conv[i](F.pad(x_in, (padding_left, padding_right)))
z.append(_z.cpu().numpy())
return z
# + id="wybIldYFctMF"
PRETRAINED_EMBEDDINGS = embedding_matrix
FREEZE_EMBEDDINGS = False
# + colab={"base_uri": "https://localhost:8080/"} id="QLNK4ez2ctPB" executionInfo={"status": "ok", "timestamp": 1608255723908, "user_tz": 420, "elapsed": 679, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="c415d468-67d8-456e-91a6-4123e88e2f7a"
# Initialize model
interpretable_model = InterpretableCNN(
embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE,
num_filters=NUM_FILTERS, filter_sizes=FILTER_SIZES,
hidden_dim=HIDDEN_DIM, dropout_p=DROPOUT_P, num_classes=NUM_CLASSES,
pretrained_embeddings=PRETRAINED_EMBEDDINGS, freeze_embeddings=FREEZE_EMBEDDINGS)
interpretable_model.load_state_dict(torch.load(Path(dir, 'model.pt'), map_location=device))
interpretable_model.to(device)
# + id="SGdju-O6dEwW"
# Initialize trainer
interpretable_trainer = Trainer(model=interpretable_model, device=device)
# + colab={"base_uri": "https://localhost:8080/"} id="0NJYmV6idE1v" executionInfo={"status": "ok", "timestamp": 1608255724804, "user_tz": 420, "elapsed": 407, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="5c9f507e-dd04-4ebc-fd8b-2157ae558507"
# Get conv outputs
conv_outputs = interpretable_trainer.predict_step(dataloader)
print (conv_outputs.shape) # (len(filter_sizes), num_filters, max_seq_len)
# + colab={"base_uri": "https://localhost:8080/", "height": 333} id="w6jWfdK7dE43" executionInfo={"status": "ok", "timestamp": 1608255725564, "user_tz": 420, "elapsed": 880, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="8aaed942-a4db-416d-86dc-6526cc6e3d55"
# Visualize a bi-gram filter's outputs
tokens = tokenizer.sequences_to_texts(X)[0].split(' ')
sns.heatmap(conv_outputs[1], xticklabels=tokens)
# + [markdown] id="Jh_EP08yezUw"
# 1D global max-pooling would extract the highest value from each of our `num_filters` for each `filter_size`. We could also follow this same approach to figure out which n-gram is most relevant but notice in the heatmap above that many filters don't have much variance. To mitigate this, this [paper](https://www.aclweb.org/anthology/W18-5408/) uses threshold values to determine which filters to use for interpretability. But to keep things simple, let's extract which tokens' filter outputs were extracted via max-pooling the most frequenctly.
# + colab={"base_uri": "https://localhost:8080/"} id="GOT3TkRgexTI" executionInfo={"status": "ok", "timestamp": 1608255727355, "user_tz": 420, "elapsed": 527, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="3f0223bb-ec44-4113-d1dc-d440ade4b400"
sample_index = 0
print (f"Origin text:\n{text}")
print (f"\nPreprocessed text:\n{tokenizer.sequences_to_texts(X)[0]}")
print ("\nMost important n-grams:")
# Process conv outputs for each unique filter size
for i, filter_size in enumerate(FILTER_SIZES):
# Identify most important n-gram (excluding last token)
popular_indices = collections.Counter([np.argmax(conv_output) \
for conv_output in conv_outputs[i]])
# Get corresponding text
start = popular_indices.most_common(1)[-1][0]
n_gram = " ".join([token for token in tokens[start:start+filter_size]])
print (f"[{filter_size}-gram]: {n_gram}")
| notebooks/12_Embeddings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/farkoo/Covid19-Detection-from-ChestXRayImages/blob/master/Covid19_Detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="DaaiF9bHIDZh" outputId="32a9c50e-131f-4e19-d45d-4a98656cf821"
# %cd drive/MyDrive
# + colab={"base_uri": "https://localhost:8080/"} id="3iiciEZLILWS" outputId="37916697-6993-4e10-ce97-b65c3c1577b3"
# !git clone "https://github.com/farkoo/Covid19-Detection-from-ChestXRayImages.git"
# + colab={"base_uri": "https://localhost:8080/"} id="hWgrRZJoIRBR" outputId="44073213-6875-4a83-865b-a13453d63d21"
# %cd 'Covid19-Detection-from-ChestXRayImages'
# + colab={"base_uri": "https://localhost:8080/"} id="wjdi4yB-IhoX" outputId="eb6691cf-e6f8-4f03-a1c8-5b0396cbbfd2"
!7z x dataset.7z -odataset
# + id="xAwwPCVTIxPS"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import AveragePooling2D, Dropout, Flatten, Dense, Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
import os
import random
# + colab={"base_uri": "https://localhost:8080/"} id="EWbjYUErLrzd" outputId="0894688b-e756-43f3-c972-dfce2f3892ae"
# %cd dataset
# + id="Zx4OR5DoaBqx" outputId="f37826b0-aa56-471f-f915-e64a3ff3096f" colab={"base_uri": "https://localhost:8080/"}
# %cd ..
# + id="KcbyPvrGIm-s"
# Hyper parameters
learning_rate = 1e-3
epochs = 25
batch_size = 8
dir = 'dataset'
width = 224
height = 224
classes = 2
# + id="jtJFs3-hJVBV"
def load_imgs_from_dir(dir, width, height):
img_data = []
img_label = []
categories = os.listdir(dir)
for category in categories:
for file in os.listdir(os.path.join(dir,category)):
img_path = os.path.join(dir,category,file)
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image,(width, height))
img_data.append(image)
img_label.append(category)
img_data = np.asarray(img_data)
img_label = np.asarray(img_label)
zip_list = list(zip(img_data, img_label))
random.shuffle(zip_list)
img_data, img_label = zip(*zip_list)
return img_data, img_label
# + id="64JQJ8u5LETD"
data, label = load_imgs_from_dir(dir, width, height)
# + id="O4K_chq2OyA5"
lb = LabelBinarizer()
one_hot = lb.fit_transform(label)
one_hot = to_categorical(one_hot)
# + id="5iTWUfd6QYYV"
train_X, test_X, train_Y, test_Y = train_test_split(data, one_hot, test_size = 0.10,
stratify = one_hot, random_state = 42)
# + id="P3rOO-GzX_7k"
train_X, valid_X, train_Y, valid_Y = train_test_split(train_X, train_Y, test_size = 0.10,
stratify = train_Y, random_state = 13)
# + id="4avGcDaMbHDk"
train_X = np.asarray(train_X)
test_X = np.asarray(test_X)
valid_X = np.asarray(valid_X)
valid_Y = np.asarray(valid_Y)
train_Y = np.asarray(train_Y)
test_Y = np.asarray(test_Y)
# + id="c5KlpiMjTXN3" outputId="6514124f-1f55-4592-fa6b-6796cacdf161" colab={"base_uri": "https://localhost:8080/"}
base_model = VGG16(weights = "imagenet", include_top = False,
input_tensor = Input(shape = (224, 224, 3)))
# + id="BA8eSNRyWQmw"
head_model = base_model.output
head_model = AveragePooling2D(pool_size = (4, 4))(head_model)
head_model = Flatten(name = "flatten")(head_model)
head_model = Dense(64, activation = "relu")(head_model)
head_model = Dropout(0.5)(head_model)
head_model = Dense(2, activation = "softmax")(head_model)
# + id="PFOOZ9nKW7nd"
model = Model(inputs = base_model.input, outputs = head_model)
# + id="iYsZd6ThXDe7"
for layer in base_model.layers:
layer.trainable = False
# + id="f1loT6TdXVzw"
opt = Adam(learning_rate = learning_rate, decay = learning_rate/epochs)
# + id="6Kn-h2n2XnAw"
model.compile(loss = "binary_crossentropy", optimizer = opt, metrics = ["accuracy"])
# + colab={"base_uri": "https://localhost:8080/"} id="cbGmo1ErX0el" outputId="b5627d32-c14a-439d-c6d7-330865779ee0"
H = model.fit(
x = train_X,
y = train_Y,
steps_per_epoch = len(train_X)//batch_size,
validation_data = (valid_X, valid_Y),
validation_steps = len(valid_X)//batch_size,
epochs = epochs
)
# + colab={"base_uri": "https://localhost:8080/"} id="c5VmixdTeYv-" outputId="bcf9d0de-15ec-49ac-d3e3-c7b9c928dfa5"
predIdxs = model.predict(test_X, batch_size=batch_size)
predIdxs = np.argmax(predIdxs, axis=1)
print(classification_report(test_Y.argmax(axis=1), predIdxs, target_names=lb.classes_))
# + colab={"base_uri": "https://localhost:8080/"} id="WgCOFj8dk8vs" outputId="3fcf0040-7842-4fae-988a-87e637c38360"
cm = confusion_matrix(test_Y.argmax(axis=1), predIdxs)
total = sum(sum(cm))
acc = (cm[0, 0] + cm[1, 1]) / total
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
print("acc: {:.4f}".format(acc))
print("sensitivity: {:.4f}".format(sensitivity))
print("specificity: {:.4f}".format(specificity))
# + colab={"base_uri": "https://localhost:8080/", "height": 316} id="tWdwPBcolDeq" outputId="5775b9b9-d384-4c0f-d866-49e68bf57dca"
N = epochs
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.title("Training Loss and Accuracy on COVID-19 Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
# + id="VY3n35qnlcEN"
model.save('Covid19-model.h5')
| Covid19_Detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
class Mother:
def __init__(self):
self.name = 'Aaju'
def print(self):
print("Print of mother is called")
class Father:
def __init__(self):
self.name = 'Ajay'
def print(self):
print("Print of father is called")
class Child(Mother, Father):
def __init__(self):
super().__init__()
def printchild(self):
print("Name of child: ", self.name)
c = Child()
c.printchild()
| 06. OOPS Part-2/5.Multiple Inheritance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Performance
#
# [<NAME>](https://twitter.com/wesmckinn), the creator of pandas, is kind of obsessed with performance. From micro-optimizations for element access, to [embedding](https://github.com/pydata/pandas/tree/master/pandas/src/klib) a fast hash table inside pandas, we all benefit from his and others' hard work.
# This post will focus mainly on making efficient use of pandas and NumPy.
#
# One thing I'll explicitly not touch on is storage formats.
# Performance is just one of many factors that go into choosing a storage format.
# Just know that pandas can talk to [many formats](http://pandas.pydata.org/pandas-docs/version/0.18.0/io.html), and the format that strikes the right balance between performance, portability, data-types, metadata handling, etc., is an [ongoing](http://blog.cloudera.com/blog/2016/03/feather-a-fast-on-disk-format-for-data-frames-for-r-and-python-powered-by-apache-arrow/) topic of discussion.
# +
# %matplotlib inline
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
if int(os.environ.get("MODERN_PANDAS_EPUB", 0)):
import prep # noqa
sns.set_style('ticks')
sns.set_context('talk')
# -
# ## Constructors
#
# It's pretty common to have many similar sources (say a bunch of CSVs) that need to be combined into a single DataFrame. There are two routes to the same end:
#
# 1. Initialize one DataFrame and append to that
# 2. Make many smaller DataFrames and concatenate at the end
#
# For pandas, the second option is faster.
# DataFrame appends are expensive relative to a list append.
# Depending on the values, pandas might have to recast the data to a different type.
# And indexes are immutable, so each time you append pandas has to create an entirely new one.
#
# In the last section we downloaded a bunch of weather files, one per state, writing each to a separate CSV.
# One could imagine coming back later to read them in, using the following code.
#
# The idiomatic python way
#
# ```python
# files = glob.glob('weather/*.csv')
# columns = ['station', 'date', 'tmpf', 'relh', 'sped', 'mslp',
# 'p01i', 'vsby', 'gust_mph', 'skyc1', 'skyc2', 'skyc3']
#
# # init empty DataFrame, like you might for a list
# weather = pd.DataFrame(columns=columns)
#
# for fp in files:
# city = pd.read_csv(fp, columns=columns)
# weather.append(city)
# ```
#
# This is pretty standard code, quite similar to building up a list of tuples, say.
# The only nitpick is that you'd probably use a list-comprehension if you were just making a list.
# But we don't have special syntax for DataFrame-comprehensions (if only), so you'd fall back to the "initialize empty container, append to said container" pattern.
#
# But there's a better, pandorable, way
#
# ```python
# files = glob.glob('weather/*.csv')
# weather_dfs = [pd.read_csv(fp, names=columns) for fp in files]
# weather = pd.concat(weather_dfs)
# ```
#
# Subjectively this is cleaner and more beautiful.
# There's fewer lines of code.
# You don't have this extraneous detail of building an empty DataFrame.
# And objectively the pandorable way is faster, as we'll test next.
# We'll define two functions for building an identical DataFrame. The first `append_df`, creates an empty DataFrame and appends to it. The second, `concat_df`, creates many DataFrames, and concatenates them at the end. We also write a short decorator that runs the functions a handful of times and records the results.
# +
import time
size_per = 5000
N = 100
cols = list('abcd')
def timed(n=30):
'''
Running a microbenchmark. Never use this.
'''
def deco(func):
def wrapper(*args, **kwargs):
timings = []
for i in range(n):
t0 = time.time()
func(*args, **kwargs)
t1 = time.time()
timings.append(t1 - t0)
return timings
return wrapper
return deco
@timed(60)
def append_df():
'''
The pythonic (bad) way
'''
df = pd.DataFrame(columns=cols)
for _ in range(N):
df.append(pd.DataFrame(np.random.randn(size_per, 4), columns=cols))
return df
@timed(60)
def concat_df():
'''
The pandorabe (good) way
'''
dfs = [pd.DataFrame(np.random.randn(size_per, 4), columns=cols)
for _ in range(N)]
return pd.concat(dfs, ignore_index=True)
# +
t_append = append_df()
t_concat = concat_df()
timings = (pd.DataFrame({"Append": t_append, "Concat": t_concat})
.stack()
.reset_index()
.rename(columns={0: 'Time (s)',
'level_1': 'Method'}))
timings.head()
# -
plt.figure(figsize=(4, 6))
sns.boxplot(x='Method', y='Time (s)', data=timings)
sns.despine()
plt.tight_layout()
# ## Datatypes
#
# The pandas type system essentially [NumPy's](http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html) with a few extensions (`categorical`, `datetime64` with timezone, `timedelta64`).
# An advantage of the DataFrame over a 2-dimensional NumPy array is that the DataFrame can have columns of various types within a single table.
# That said, each column should have a specific dtype; you don't want to be mixing bools with ints with strings within a single column.
# For one thing, this is slow.
# It forces the column to be have an `object` dtype (the fallback python-object container type), which means you don't get any of the type-specific optimizations in pandas or NumPy.
# For another, it means you're probably violating the maxims of tidy data, which we'll discuss next time.
#
# When should you have `object` columns?
# There are a few places where the NumPy / pandas type system isn't as rich as you might like.
# There's no integer NA (at the moment anyway), so if you have any missing values, represented by `NaN`, your otherwise integer column will be floats.
# There's also no `date` dtype (distinct from `datetime`).
# Consider the needs of your application: can you treat an integer `1` as `1.0`?
# Can you treat `date(2016, 1, 1)` as `datetime(2016, 1, 1, 0, 0)`?
# In my experience, this is rarely a problem other than when writing to something with a stricter schema like a database.
# But at that point it's fine to cast to one of the less performant types, since you're just not doing numeric operations anymore.
#
# The last case of `object` dtype data is text data.
# Pandas doesn't have any fixed-width string dtypes, so you're stuck with python objects.
# There is an important exception here, and that's low-cardinality text data, for which you'll want to use the `category` dtype (see below).
#
# If you have object data (either strings or python objects) that needs to be converted, checkout the [`to_numeric`](http://pandas.pydata.org/pandas-docs/version/0.18.0/generated/pandas.to_numeric.html), [`to_datetime`](http://pandas.pydata.org/pandas-docs/version/0.18.0/generated/pandas.to_datetime.html) and [`to_timedelta`](http://pandas.pydata.org/pandas-docs/version/0.18.0/generated/pandas.to_timedelta.html) methods.
# ## Iteration, Apply, And Vectorization
#
# We know that ["Python is slow"](https://jakevdp.github.io/blog/2014/05/09/why-python-is-slow/) (scare quotes since that statement is too broad to be meaningful).
# There are various steps that can be taken to improve your code's performance from relatively simple changes, to rewriting your code in a lower-level language, to trying to parallelize it.
# And while you might have many options, there's typically an order you would proceed in.
#
# First (and I know it's cliché to say so, but still) benchmark your code.
# Make sure you actually need to spend time optimizing it.
# There are [many](https://github.com/nvdv/vprof) [options](https://jiffyclub.github.io/snakeviz/) [for](https://github.com/rkern/line_profiler) [benchmarking](https://docs.python.org/3.5/library/timeit.html) and visualizing where things are slow.
#
# Second, consider your algorithm.
# Make sure you aren't doing more work than you need to.
# A common one I see is doing a full sort on an array, just to select the `N` largest or smallest items.
# Pandas has methods for that.
df = pd.read_csv("data/627361791_T_ONTIME.csv")
delays = df['DEP_DELAY']
# Select the 5 largest delays
delays.nlargest(5).sort_values()
delays.nsmallest(5).sort_values()
# We follow up the `nlargest` or `nsmallest` with a sort (the result of `nlargest/smallest` is unordered), but it's much easier to sort 5 items that 500,000. The timings bear this out:
# %timeit delays.sort_values().tail(5)
# %timeit delays.nlargest(5).sort_values()
# "Use the right algorithm" is easy to say, but harder to apply in practice since you have to actually figure out the best algorithm to use.
# That one comes down to experience.
#
# Assuming you're at a spot that needs optimizing, and you've got the correct algorithm, *and* there isn't a readily available optimized version of what you need in pandas/numpy/scipy/scikit-learn/statsmodels/..., then what?
#
# The first place to turn is probably a vectorized NumPy implementation.
# Vectorization here means operating directly on arrays, rather than looping over lists scalars.
# This is generally much less work than rewriting it in something like Cython, and you can get pretty good results just by making *effective* use of NumPy and pandas.
# While not every operation can be vectorized, many can.
#
# Let's work through an example calculating the [Great-circle distance](https://en.wikipedia.org/wiki/Great-circle_distance) between airports.
# Grab the table of airport latitudes and longitudes from the [BTS website](http://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=288&DB_Short_Name=Aviation%20Support%20Tables) and extract it to a CSV.
import requests
import zipfile
# +
headers = {
'Pragma': 'no-cache',
'Origin': 'http://www.transtats.bts.gov',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36'\
'(KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Cache-Control': 'no-cache',
'Referer': 'http://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=288&DB_Short_'
'Name=Aviation%20Support%20Tables',
'Connection': 'keep-alive',
'DNT': '1',
}
if not os.path.exists('data/airports.csv.zip'):
with open('url_4.txt') as f:
data = f.read().strip()
r = requests.post('http://www.transtats.bts.gov/DownLoad_Table.asp?Table_ID=288&Has'
'_Group=0&Is_Zipped=0', data=data, headers=headers)
with open('data/airports.csv.zip', 'wb') as f:
f.write(r.content)
# -
zf = zipfile.ZipFile('data/airports.csv.zip')
fp = zf.extract(zf.filelist[0], path='data')
airports = pd.read_csv(fp)
# +
coord = (pd.read_csv(fp, index_col=['AIRPORT'],
usecols=['AIRPORT', 'LATITUDE', 'LONGITUDE'])
.groupby(level=0).first()
.dropna()
.sample(n=500, random_state=42)
.sort_index())
coord.head()
# -
# For whatever reason, suppose we're interested in all the pairwise distances (I've limited it to just a sample of 500 airports to make this manageable.
# In the real world you *probably* don't need *all* the pairwise distances and would be better off with a [tree](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html). Remember: think about what you actually need, and find the right algorithm for that).
#
# MultiIndexes have an alternative `from_product` constructor for getting the [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product) of the arrays you pass in.
# We'll give it `coords.index` twice (to get its Cartesian product with itself).
# That gives a MultiIndex of all the combination.
# With some minor reshaping of `coords` we'll have a DataFrame with all the latitude/longitude pairs.
# +
idx = pd.MultiIndex.from_product([coord.index, coord.index],
names=['origin', 'dest'])
pairs = pd.concat([coord.add_suffix('_1').reindex(idx, level='origin'),
coord.add_suffix('_2').reindex(idx, level='dest')],
axis=1)
pairs.head()
# -
idx = idx[idx.get_level_values(0) <= idx.get_level_values(1)]
len(idx)
# We'll break that down a bit, but don't lose sight of the real target: our great-circle distance calculation.
#
# The `add_suffix` (and `add_prefix`) method is handy for quickly renaming the columns.
coord.add_suffix('_1').head()
# Alternatively you could use the more general `.rename` like `coord.rename(columns=lambda x: x + '_1')`.
#
# Next, we have the `reindex`.
# Like I mentioned in the prior chapter, indexes are crucial to pandas.
# `.reindex` is all about aligning a Series or DataFrame to a given index.
# In this case we use `.reindex` to align our original DataFrame to the new
# MultiIndex of combinations.
# By default, the output will have the original value if that index label was already present, and `NaN` otherwise.
# If we just called `coord.reindex(idx)`, with no additional arguments, we'd get a DataFrame of all `NaN`s.
coord.reindex(idx).head()
# That's because there weren't any values of `idx` that were in `coord.index`,
# which makes sense since `coord.index` is just a regular one-level Index, while `idx` is a MultiIndex.
# We use the `level` keyword to handle the transition from the original single-level Index, to the two-leveled `idx`.
#
# > `level` : int or name
# >
# Broadcast across a level, matching Index values on the
# passed MultiIndex level
#
#
coord.reindex(idx, level='dest').head()
# If you ever need to do an operation that mixes regular single-level indexes with Multilevel Indexes, look for a level keyword argument.
# For example, all the arithmatic methods (`.mul`, `.add`, etc.) have them.
#
# This is a bit wasteful since the distance from airport `A` to `B` is the same as `B` to `A`.
# We could easily fix this with a `idx = idx[idx.get_level_values(0) <= idx.get_level_values(1)]`, but we'll ignore that for now.
#
#
# Quick tangent, I got some... let's say skepticism, on my last piece about the value of indexes.
# Here's an alternative version for the skeptics
from itertools import product, chain
coord2 = coord.reset_index()
# +
x = product(coord2.add_suffix('_1').itertuples(index=False),
coord2.add_suffix('_2').itertuples(index=False))
y = [list(chain.from_iterable(z)) for z in x]
df2 = (pd.DataFrame(y, columns=['origin', 'LATITUDE_1', 'LONGITUDE_1',
'dest', 'LATITUDE_1', 'LONGITUDE_2'])
.set_index(['origin', 'dest']))
df2.head()
# -
# It's also readable (it's Python after all), though a bit slower.
# To me the `.reindex` method seems more natural.
# My thought process was, "I need all the combinations of origin & destination (`MultiIndex.from_product`).
# Now I need to align this original DataFrame to this new MultiIndex (`coords.reindex`)."
#
# With that diversion out of the way, let's turn back to our great-circle distance calculation.
# Our first implementation is pure python.
# The algorithm itself isn't too important, all that matters is that we're doing math operations on scalars.
# +
import math
def gcd_py(lat1, lng1, lat2, lng2):
'''
Calculate great circle distance between two points.
http://www.johndcook.com/blog/python_longitude_latitude/
Parameters
----------
lat1, lng1, lat2, lng2: float
Returns
-------
distance:
distance from ``(lat1, lng1)`` to ``(lat2, lng2)`` in kilometers.
'''
# python2 users will have to use ascii identifiers (or upgrade)
degrees_to_radians = math.pi / 180.0
ϕ1 = (90 - lat1) * degrees_to_radians
ϕ2 = (90 - lat2) * degrees_to_radians
θ1 = lng1 * degrees_to_radians
θ2 = lng2 * degrees_to_radians
cos = (math.sin(ϕ1) * math.sin(ϕ2) * math.cos(θ1 - θ2) +
math.cos(ϕ1) * math.cos(ϕ2))
# round to avoid precision issues on identical points causing ValueErrors
cos = round(cos, 8)
arc = math.acos(cos)
return arc * 6373 # radius of earth, in kilometers
# -
# The second implementation uses NumPy.
# Aside from numpy having a builtin `deg2rad` convenience function (which is probably a bit slower than multiplying by a constant $\frac{\pi}{180}$), basically all we've done is swap the `math` prefix for `np`.
# Thanks to NumPy's broadcasting, we can write code that works on scalars or arrays of conformable shape.
def gcd_vec(lat1, lng1, lat2, lng2):
'''
Calculate great circle distance.
http://www.johndcook.com/blog/python_longitude_latitude/
Parameters
----------
lat1, lng1, lat2, lng2: float or array of float
Returns
-------
distance:
distance from ``(lat1, lng1)`` to ``(lat2, lng2)`` in kilometers.
'''
# python2 users will have to use ascii identifiers
ϕ1 = np.deg2rad(90 - lat1)
ϕ2 = np.deg2rad(90 - lat2)
θ1 = np.deg2rad(lng1)
θ2 = np.deg2rad(lng2)
cos = (np.sin(ϕ1) * np.sin(ϕ2) * np.cos(θ1 - θ2) +
np.cos(ϕ1) * np.cos(ϕ2))
arc = np.arccos(cos)
return arc * 6373
# To use the python version on our DataFrame, we can either iterate...
# %%time
pd.Series([gcd_py(*x) for x in pairs.itertuples(index=False)],
index=pairs.index)
# Or use `DataFrame.apply`.
# %%time
r = pairs.apply(lambda x: gcd_py(x['LATITUDE_1'], x['LONGITUDE_1'],
x['LATITUDE_2'], x['LONGITUDE_2']), axis=1);
# But as you can see, you don't want to use apply, especially with `axis=1` (calling the function on each row). It's doing a lot more work handling dtypes in the background, and trying to infer the correct output shape that are pure overhead in this case. On top of that, it has to essentially use a for loop internally.
#
# You *rarely* want to use `DataFrame.apply` and almost never should use it with `axis=1`. Better to write functions that take arrays, and pass those in directly. Like we did with the vectorized version
# %%time
r = gcd_vec(pairs['LATITUDE_1'], pairs['LONGITUDE_1'],
pairs['LATITUDE_2'], pairs['LONGITUDE_2'])
r.head()
# I try not to use the word "easy" when teaching, but that optimization was easy right?
# Why then, do I come across uses of `apply`, in my code and others', even when the vectorized version is available?
# The difficulty lies in knowing about broadcasting, and seeing where to apply it.
#
# For example, the README for [lifetimes](https://github.com/CamDavidsonPilon/lifetimes) (by <NAME>, also author of [Bayesian Methods for Hackers](https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers), [lifelines](https://github.com/CamDavidsonPilon/lifelines), and [Data Origami](https://dataorigami.net)) used to have an example of passing [this method](https://github.com/CamDavidsonPilon/lifetimes/blob/5b4f7de0720413b6951ac0a4b0082bd50255a231/lifetimes/estimation.py#L249) into a `DataFrame.apply`.
#
# ```python
# data.apply(lambda r: bgf.conditional_expected_number_of_purchases_up_to_time(
# t, r['frequency'], r['recency'], r['T']), axis=1
# )
# ```
#
# If you look at the function [I linked to](https://github.com/CamDavidsonPilon/lifetimes/blob/5b4f7de0720413b6951ac0a4b0082bd50255a231/lifetimes/estimation.py#L249), it's doing a fairly complicated computation involving a negative log likelihood and the Gamma function from `scipy.special`.
# But crucially, it was already vectorized.
# We were able to change the example to just pass the arrays (Series in this case) into the function, rather than applying the function to each row.
#
# ```python
# bgf.conditional_expected_number_of_purchases_up_to_time(
# t, data['frequency'], data['recency'], data['T']
# )
# ```
#
# This got us another 30x speedup on the example dataset.
# I bring this up because it's very natural to have to translate an equation to code and think, "Ok now I need to apply this function to each row", so you reach for `DataFrame.apply`.
# See if you can just pass in the NumPy array or Series itself instead.
#
# Not all operations this easy to vectorize.
# Some operations are iterative by nature, and rely on the results of surrounding computations to proceed. In cases like this you can hope that one of the scientific python libraries has implemented it efficiently for you, or write your own solution using Numba / C / Cython / Fortran.
#
# Other examples take a bit more thought or knowledge to vectorize.
# Let's look at [this](http://nbviewer.jupyter.org/github/jreback/pydata2015-london/blob/master/notebooks/idioms.ipynb)
# example, taken from <NAME>'s PyData London talk, that groupwise normalizes a dataset by subtracting the mean and dividing by the standard deviation for each group.
# +
import random
def create_frame(n, n_groups):
# just setup code, not benchmarking this
stamps = pd.date_range('20010101', periods=n, freq='ms')
random.shuffle(stamps.values)
return pd.DataFrame({'name': np.random.randint(0,n_groups,size=n),
'stamp': stamps,
'value': np.random.randint(0,n,size=n),
'value2': np.random.randn(n)})
df = create_frame(1000000,10000)
def f_apply(df):
# Typical transform
return df.groupby('name').value2.apply(lambda x: (x-x.mean())/x.std())
def f_unwrap(df):
# "unwrapped"
g = df.groupby('name').value2
v = df.value2
return (v-g.transform(np.mean))/g.transform(np.std)
# + active=""
# Timing it we see that the "unwrapped" version, get's quite a bit better performance.
# -
# %timeit f_apply(df)
# %timeit f_unwrap(df)
# Pandas GroupBy objects intercept calls for common functions like mean, sum, etc. and substitutes them with optimized Cython versions.
# So the unwrapped `.transform(np.mean)` and `.transform(np.std)` are fast, while the `x.mean` and `x.std` in the `.apply(lambda x: x - x.mean()/x.std())` aren't.
#
# `Groupby.apply` is always going to be around, beacuse it offers maximum flexibility. If you need to [fit a model on each group and create additional columns in the process](http://stackoverflow.com/q/35924126/1889400), it can handle that. It just might not be the fastest (which may be OK sometimes).
#
# This last example is admittedly niche.
# I'd like to think that there aren't too many places in pandas where the natural thing to do `.transform((x - x.mean()) / x.std())` is slower than the less obvious alternative.
# Ideally the user wouldn't have to know about GroupBy having special fast implementations of common methods.
# But that's where we are now.
# ## Categoricals
#
# Thanks to some great work by [<NAME>](https://twitter.com/janschulz), [<NAME>](https://twitter.com/janschulz), and others, pandas 0.15 gained a new [Categorical](http://pandas.pydata.org/pandas-docs/version/0.18.0/categorical.html) data type. Categoricals are nice for many reasons beyond just efficiency, but we'll focus on that here.
#
# Categoricals are an efficient way of representing data (typically strings) that have a low *cardinality*, i.e. relatively few distinct values relative to the size of the array. Internally, a Categorical stores the categories once, and an array of `codes`, which are just integers that indicate which category belongs there. Since it's cheaper to store a `code` than a `category`, we save on memory (shown next).
#
#
# +
import string
s = pd.Series(np.random.choice(list(string.ascii_letters), 100000))
print('{:0.2f} KB'.format(s.memory_usage(index=False) / 1000))
# -
c = s.astype('category')
print('{:0.2f} KB'.format(c.memory_usage(index=False) / 1000))
# Beyond saving memory, having codes and a fixed set of categories offers up a bunch of algorithmic optimizations that pandas and others can take advantage of.
#
# [<NAME>](https://twitter.com/mrocklin) has a very nice [post](http://matthewrocklin.com/blog/work/2015/06/18/Categoricals) on using categoricals, and optimizing code in general.
# ## Going Further
#
# The pandas documentation has a section on [enhancing performance](http://pandas.pydata.org/pandas-docs/version/0.18.0/enhancingperf.html), focusing on using Cython or `numba` to speed up a computation. I've focused more on the lower-hanging fruit of picking the right algorithm, vectorizing your code, and using pandas or numpy more effetively. There are further optimizations availble if these aren't enough.
# ## Summary
#
# This post was more about how to make effective use of numpy and pandas, than writing your own highly-optimized code.
# In my day-to-day work of data analysis it's not worth the time to write and compile a cython extension.
# I'd rather rely on pandas to be fast at what matters (label lookup on large arrays, factorizations for groupbys and merges, numerics).
# If you want to learn more about what pandas does to make things fast, checkout <NAME>' talk from PyData Seattle [talk](http://www.jeffreytratner.com/slides/pandas-under-the-hood-pydata-seattle-2015.pdf) on pandas' internals.
#
# Next time we'll look at a differnt kind of optimization: using the Tidy Data principles to facilitate efficient data analysis.
#
| modern_4_performance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="klGNgWREsvQv"
# **Copyright 2021 The TF-Agents Authors.**
# + cellView="form" id="nQnmcm0oI1Q-"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="lsaQlK8fFQqH"
# # SAC minitaur with the Actor-Learner API
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/agents/tutorials/7_SAC_minitaur_tutorial">
# <img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
# View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/7_SAC_minitaur_tutorial.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
# Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/7_SAC_minitaur_tutorial.ipynb">
# <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
# View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/agents/docs/tutorials/7_SAC_minitaur_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
#
# + [markdown] id="ZOUOQOrFs3zn"
# ## Introduction
# + [markdown] id="cKOCZlhUgXVK"
# This example shows how to train a [Soft Actor Critic](https://arxiv.org/abs/1812.05905) agent on the [Minitaur](https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/gym/pybullet_envs/bullet/minitaur.py) environment.
#
# If you've worked through the [DQN Colab](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) this should feel very familiar. Notable changes include:
#
# * Changing the agent from DQN to SAC.
# * Training on Minitaur which is a much more complex environment than CartPole. The Minitaur environment aims to train a quadruped robot to move forward.
# * Using the TF-Agents Actor-Learner API for distributed Reinforcement Learning.
#
# The API supports both distributed data collection using an experience replay buffer and variable container (parameter server) and distributed training across multiple devices. The API is designed to be very simple and modular. We utilize [Reverb](https://deepmind.com/research/open-source/Reverb) for both replay buffer and variable container and [TF DistributionStrategy API](https://www.tensorflow.org/guide/distributed_training) for distributed training on GPUs and TPUs.
# + [markdown] id="9vUQms4DAY5A"
# If you haven't installed the following dependencies, run:
# + id="fskoLlB-AZ9j"
# !sudo apt-get install -y xvfb ffmpeg
# !pip install 'imageio==2.4.0'
# !pip install matplotlib
# !pip install tf-agents[reverb]
# !pip install pybullet
# + [markdown] id="1u9QVVsShC9X"
# ## Setup
# + [markdown] id="nNV5wyH3dyMl"
# First we will import the different tools that we need.
# + id="sMitx5qSgJk1"
import base64
import imageio
import IPython
import matplotlib.pyplot as plt
import os
import reverb
import tempfile
import PIL.Image
import tensorflow as tf
from tf_agents.agents.ddpg import critic_network
from tf_agents.agents.sac import sac_agent
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.environments import suite_pybullet
from tf_agents.metrics import py_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.policies import greedy_policy
from tf_agents.policies import py_tf_eager_policy
from tf_agents.policies import random_py_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.replay_buffers import reverb_utils
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import strategy_utils
from tf_agents.train.utils import train_utils
tempdir = tempfile.gettempdir()
# + [markdown] id="LmC0NDhdLIKY"
# ## Hyperparameters
# + id="HC1kNrOsLSIZ"
env_name = "MinitaurBulletEnv-v0" # @param {type:"string"}
# Use "num_iterations = 1e6" for better results (2 hrs)
# 1e5 is just so this doesn't take too long (1 hr)
num_iterations = 100000 # @param {type:"integer"}
initial_collect_steps = 10000 # @param {type:"integer"}
collect_steps_per_iteration = 1 # @param {type:"integer"}
replay_buffer_capacity = 10000 # @param {type:"integer"}
batch_size = 256 # @param {type:"integer"}
critic_learning_rate = 3e-4 # @param {type:"number"}
actor_learning_rate = 3e-4 # @param {type:"number"}
alpha_learning_rate = 3e-4 # @param {type:"number"}
target_update_tau = 0.005 # @param {type:"number"}
target_update_period = 1 # @param {type:"number"}
gamma = 0.99 # @param {type:"number"}
reward_scale_factor = 1.0 # @param {type:"number"}
actor_fc_layer_params = (256, 256)
critic_joint_fc_layer_params = (256, 256)
log_interval = 5000 # @param {type:"integer"}
num_eval_episodes = 20 # @param {type:"integer"}
eval_interval = 10000 # @param {type:"integer"}
policy_save_interval = 5000 # @param {type:"integer"}
# + [markdown] id="VMsJC3DEgI0x"
# ## Environment
#
# Environments in RL represent the task or problem that we are trying to solve. Standard environments can be easily created in TF-Agents using `suites`. We have different `suites` for loading environments from sources such as the OpenAI Gym, Atari, DM Control, etc., given a string environment name.
#
# Now let's load the Minituar environment from the Pybullet suite.
# + id="RlO7WIQHu_7D"
env = suite_pybullet.load(env_name)
env.reset()
PIL.Image.fromarray(env.render())
# + [markdown] id="gY179d1xlmoM"
# In this environment the goal is for the agent to train a policy that will control the Minitaur robot and have it move forward as fast as possible. Episodes last 1000 steps and the return will be the sum of rewards throughout the episode.
#
# Let's look at the information the environment provides as an `observation` which the policy will use to generate `actions`.
# + id="exDv57iHfwQV"
print('Observation Spec:')
print(env.time_step_spec().observation)
print('Action Spec:')
print(env.action_spec())
# + [markdown] id="Wg5ysVTnctIm"
# As we can see the observation is fairly complex. We recieve 28 values representing the angles, velocities and torques for all the motors. In return the environment expects 8 values for the actions between `[-1, 1]`. These are the desired motor angles.
#
# Usually we create two environments: one for collecting data during training and one for evaluation. The environments are written in pure python and use numpy arrays, which the Actor Learner API directly consumes.
# + id="Xp-Y4mD6eDhF"
collect_env = suite_pybullet.load(env_name)
eval_env = suite_pybullet.load(env_name)
# + [markdown] id="Da-z2yF66FR9"
# ## Distribution Strategy
# We use the DistributionStrategy API to enable running the train step computation across multiple devices such as multiple GPUs or TPUs using data parallelism. The train step:
# * Receives a batch of training data
# * Splits it across the devices
# * Computes the forward step
# * Aggregates and computes the MEAN of the loss
# * Computes the backward step and performs a gradient variable update
#
# With TF-Agents Learner API and DistributionStrategy API it is quite easy to switch between running the train step on GPUs (using MirroredStrategy) to TPUs (using TPUStrategy) without changing any of the training logic below.
# + [markdown] id="wGREYZCaDB1h"
# ### Enabling the GPU
# If you want to try running on a GPU, you'll first need to enable GPUs for the notebook:
#
# * Navigate to Edit→Notebook Settings
# * Select GPU from the Hardware Accelerator drop-down
# + [markdown] id="5ZuvwDV66Mn1"
# ### Picking a strategy
# Use `strategy_utils` to generate a strategy. Under the hood, passing the parameter:
# * `use_gpu = False` returns `tf.distribute.get_strategy()`, which uses CPU
# * `use_gpu = True` returns `tf.distribute.MirroredStrategy()`, which uses all GPUs that are visible to TensorFlow on one machine
# + id="ff5ZZRZI15ds"
use_gpu = True #@param {type:"boolean"}
strategy = strategy_utils.get_strategy(tpu=False, use_gpu=use_gpu)
# + [markdown] id="fMn5FTs5kHvt"
# All variables and Agents need to be created under `strategy.scope()`, as you'll see below.
# + [markdown] id="E9lW_OZYFR8A"
# ## Agent
#
# To create an SAC Agent, we first need to create the networks that it will train. SAC is an actor-critic agent, so we will need two networks.
#
# The critic will give us value estimates for `Q(s,a)`. That is, it will recieve as input an observation and an action, and it will give us an estimate of how good that action was for the given state.
#
# + id="TgkdEPg_muzV"
observation_spec, action_spec, time_step_spec = (
spec_utils.get_tensor_specs(collect_env))
with strategy.scope():
critic_net = critic_network.CriticNetwork(
(observation_spec, action_spec),
observation_fc_layer_params=None,
action_fc_layer_params=None,
joint_fc_layer_params=critic_joint_fc_layer_params,
kernel_initializer='glorot_uniform',
last_kernel_initializer='glorot_uniform')
# + [markdown] id="pYy4AH4V7Ph4"
# We will use this critic to train an `actor` network which will allow us to generate actions given an observation.
#
# The `ActorNetwork` will predict parameters for a tanh-squashed [MultivariateNormalDiag](https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/MultivariateNormalDiag) distribution. This distribution will then be sampled, conditioned on the current observation, whenever we need to generate actions.
# + id="TB5Y3Oub4u7f"
with strategy.scope():
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_spec,
action_spec,
fc_layer_params=actor_fc_layer_params,
continuous_projection_net=(
tanh_normal_projection_network.TanhNormalProjectionNetwork))
# + [markdown] id="z62u55hSmviJ"
# With these networks at hand we can now instantiate the agent.
#
# + id="jbY4yrjTEyc9"
with strategy.scope():
train_step = train_utils.create_train_step()
tf_agent = sac_agent.SacAgent(
time_step_spec,
action_spec,
actor_network=actor_net,
critic_network=critic_net,
actor_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=actor_learning_rate),
critic_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=critic_learning_rate),
alpha_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=alpha_learning_rate),
target_update_tau=target_update_tau,
target_update_period=target_update_period,
td_errors_loss_fn=tf.math.squared_difference,
gamma=gamma,
reward_scale_factor=reward_scale_factor,
train_step_counter=train_step)
tf_agent.initialize()
# + [markdown] id="NLva6g2jdWgr"
# ## Replay Buffer
#
# In order to keep track of the data collected from the environment, we will use [Reverb](https://deepmind.com/research/open-source/Reverb), an efficient, extensible, and easy-to-use replay system by Deepmind. It stores experience data collected by the Actors and consumed by the Learner during training.
#
# In this tutorial, this is less important than `max_size` -- but in a distributed setting with async collection and training, you will probably want to experiment with `rate_limiters.SampleToInsertRatio`, using a samples_per_insert somewhere between 2 and 1000. For example:
# ```
# rate_limiter=reverb.rate_limiters.SampleToInsertRatio(samples_per_insert=3.0, min_size_to_sample=3, error_buffer=3.0))
# ```
#
# + id="vX2zGUWJGWAl"
table_name = 'uniform_table'
table = reverb.Table(
table_name,
max_size=replay_buffer_capacity,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1))
reverb_server = reverb.Server([table])
# + [markdown] id="LRNvAnkO7JK2"
# The replay buffer is constructed using specs describing the tensors that are to be stored, which can be obtained from the agent using `tf_agent.collect_data_spec`.
#
# Since the SAC Agent needs both the current and next observation to compute the loss, we set `sequence_length=2`.
# + id="xVLUxyUo7HQR"
reverb_replay = reverb_replay_buffer.ReverbReplayBuffer(
tf_agent.collect_data_spec,
sequence_length=2,
table_name=table_name,
local_server=reverb_server)
# + [markdown] id="rVD5nQ9ZGo8_"
# Now we generate a TensorFlow dataset from the Reverb replay buffer. We will pass this to the Learner to sample experiences for training.
# + id="ba7bilizt_qW"
dataset = reverb_replay.as_dataset(
sample_batch_size=batch_size, num_steps=2).prefetch(50)
experience_dataset_fn = lambda: dataset
# + [markdown] id="I0KLrEPwkn5x"
# ## Policies
#
# In TF-Agents, policies represent the standard notion of policies in RL: given a `time_step` produce an action or a distribution over actions. The main method is `policy_step = policy.step(time_step)` where `policy_step` is a named tuple `PolicyStep(action, state, info)`. The `policy_step.action` is the `action` to be applied to the environment, `state` represents the state for stateful (RNN) policies and `info` may contain auxiliary information such as log probabilities of the actions.
#
# Agents contain two policies:
#
# - `agent.policy` — The main policy that is used for evaluation and deployment.
# - `agent.collect_policy` — A second policy that is used for data collection.
# + id="yq7JE8IwFe0E"
tf_eval_policy = tf_agent.policy
eval_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_eval_policy, use_tf_function=True)
# + id="f_A4rZveEQzW"
tf_collect_policy = tf_agent.collect_policy
collect_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_collect_policy, use_tf_function=True)
# + [markdown] id="azkJZ8oaF8uc"
# Policies can be created independently of agents. For example, use `tf_agents.policies.random_py_policy` to create a policy which will randomly select an action for each time_step.
# + id="BwY7StuMkuV4"
random_policy = random_py_policy.RandomPyPolicy(
collect_env.time_step_spec(), collect_env.action_spec())
# + [markdown] id="l1LMqw60Kuso"
# ## Actors
# The actor manages interactions between a policy and an environment.
# * The Actor components contain an instance of the environment (as `py_environment`) and a copy of the policy variables.
# * Each Actor worker runs a sequence of data collection steps given the local values of the policy variables.
# * Variable updates are done explicitly using the variable container client instance in the training script before calling `actor.run()`.
# * The observed experience is written into the replay buffer in each data collection step.
# + [markdown] id="XjE59ct9fU7W"
# As the Actors run data collection steps, they pass trajectories of (state, action, reward) to the observer, which caches and writes them to the Reverb replay system.
#
# We're storing trajectories for frames [(t0,t1) (t1,t2) (t2,t3), ...] because `stride_length=1`.
# + id="HbyGmdiNfNDc"
rb_observer = reverb_utils.ReverbAddTrajectoryObserver(
reverb_replay.py_client,
table_name,
sequence_length=2,
stride_length=1)
# + [markdown] id="6yaVVC22fOcF"
# We create an Actor with the random policy and collect experiences to seed the replay buffer with.
# + id="ZGq3SY0kKwsa"
initial_collect_actor = actor.Actor(
collect_env,
random_policy,
train_step,
steps_per_run=initial_collect_steps,
observers=[rb_observer])
initial_collect_actor.run()
# + [markdown] id="6Pkg-0vZP_Ya"
# Instantiate an Actor with the collect policy to gather more experiences during training.
# + id="A6ooXyk0FZ5j"
env_step_metric = py_metrics.EnvironmentSteps()
collect_actor = actor.Actor(
collect_env,
collect_policy,
train_step,
steps_per_run=1,
metrics=actor.collect_metrics(10),
summary_dir=os.path.join(tempdir, learner.TRAIN_DIR),
observers=[rb_observer, env_step_metric])
# + [markdown] id="FR9CZ-jfPN2T"
# Create an Actor which will be used to evaluate the policy during training. We pass in `actor.eval_metrics(num_eval_episodes)` to log metrics later.
# + id="vHY2BT5lFhgL"
eval_actor = actor.Actor(
eval_env,
eval_policy,
train_step,
episodes_per_run=num_eval_episodes,
metrics=actor.eval_metrics(num_eval_episodes),
summary_dir=os.path.join(tempdir, 'eval'),
)
# + [markdown] id="y6eBGSYiOf83"
# ## Learners
# The Learner component contains the agent and performs gradient step updates to the policy variables using experience data from the replay buffer. After one or more training steps, the Learner can push a new set of variable values to the variable container.
# + id="gi37YicSFTfF"
saved_model_dir = os.path.join(tempdir, learner.POLICY_SAVED_MODEL_DIR)
# Triggers to save the agent's policy checkpoints.
learning_triggers = [
triggers.PolicySavedModelTrigger(
saved_model_dir,
tf_agent,
train_step,
interval=policy_save_interval),
triggers.StepPerSecondLogTrigger(train_step, interval=1000),
]
agent_learner = learner.Learner(
tempdir,
train_step,
tf_agent,
experience_dataset_fn,
triggers=learning_triggers)
# + [markdown] id="94rCXQtbUbXv"
# ## Metrics and Evaluation
#
# We instantiated the eval Actor with `actor.eval_metrics` above, which creates most commonly used metrics during policy evaluation:
# * Average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes.
# * Average episode length.
#
# We run the Actor to generate these metrics.
# + id="83iMSHUC71RG"
def get_eval_metrics():
eval_actor.run()
results = {}
for metric in eval_actor.metrics:
results[metric.name] = metric.result()
return results
metrics = get_eval_metrics()
# + id="jnOMvX_eZvOW"
def log_eval_metrics(step, metrics):
eval_results = (', ').join(
'{} = {:.6f}'.format(name, result) for name, result in metrics.items())
print('step = {0}: {1}'.format(step, eval_results))
log_eval_metrics(0, metrics)
# + [markdown] id="hWWURm_rXG-f"
# Check out the [metrics module](https://github.com/tensorflow/agents/blob/master/tf_agents/metrics/tf_metrics.py) for other standard implementations of different metrics.
# + [markdown] id="hBc9lj9VWWtZ"
# ## Training the agent
#
# The training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.
# + id="0pTbJ3PeyF-u"
#@test {"skip": true}
try:
# %%time
except:
pass
# Reset the train step
tf_agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = get_eval_metrics()["AverageReturn"]
returns = [avg_return]
for _ in range(num_iterations):
# Training.
collect_actor.run()
loss_info = agent_learner.run(iterations=1)
# Evaluating.
step = agent_learner.train_step_numpy
if eval_interval and step % eval_interval == 0:
metrics = get_eval_metrics()
log_eval_metrics(step, metrics)
returns.append(metrics["AverageReturn"])
if log_interval and step % log_interval == 0:
print('step = {0}: loss = {1}'.format(step, loss_info.loss.numpy()))
rb_observer.close()
reverb_server.stop()
# + [markdown] id="68jNcA_TiJDq"
# ## Visualization
#
# + [markdown] id="aO-LWCdbbOIC"
# ### Plots
#
# We can plot average return vs global steps to see the performance of our agent. In `Minitaur`, the reward function is based on how far the minitaur walks in 1000 steps and penalizes the energy expenditure.
# + id="rXKzyGt72HS8"
#@test {"skip": true}
steps = range(0, num_iterations + 1, eval_interval)
plt.plot(steps, returns)
plt.ylabel('Average Return')
plt.xlabel('Step')
plt.ylim()
# + [markdown] id="M7-XpPP99Cy7"
# ### Videos
# + [markdown] id="9pGfGxSH32gn"
# It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab.
# + id="ULaGr8pvOKbl"
def embed_mp4(filename):
"""Embeds an mp4 file in the notebook."""
video = open(filename,'rb').read()
b64 = base64.b64encode(video)
tag = '''
<video width="640" height="480" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>'''.format(b64.decode())
return IPython.display.HTML(tag)
# + [markdown] id="9c_PH-pX4Pr5"
# The following code visualizes the agent's policy for a few episodes:
# + id="PSgaQN1nXT-h"
num_episodes = 3
video_filename = 'sac_minitaur.mp4'
with imageio.get_writer(video_filename, fps=60) as video:
for _ in range(num_episodes):
time_step = eval_env.reset()
video.append_data(eval_env.render())
while not time_step.is_last():
action_step = eval_actor.policy.action(time_step)
time_step = eval_env.step(action_step.action)
video.append_data(eval_env.render())
embed_mp4(video_filename)
| site/en-snapshot/agents/tutorials/7_SAC_minitaur_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 第7章 決定木
# ## 7.1 回帰の決定木
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import japanize_matplotlib
import scipy
from numpy.random import randn #正規乱数
def sq_loss(y):
if len(y)==0:
return(0)
else:
y_bar=np.mean(y)
return(np.linalg.norm(y-y_bar)**2)
def branch(x, y, S, rf=0):
if rf==0:
m=x.shape[1]
if x.shape[0]==0:
return([0,0,0,0,0,0,0])
best_score=np.inf
for j in range(x.shape[1]):
for i in S:
left=[]; right=[]
for k in S:
if x[k,j]<x[i,j]:
left.append(k)
else:
right.append(k)
left_score=f(y[left]); right_score=f(y[right])
score=left_score+right_score
if score < best_score:
best_score=score
i_1=i; j_1=j
left_1=left; right_1=right
left_score_1=left_score; right_score_1=right_score
return [i_1, j_1, left_1, right_1, best_score, left_score_1, right_score_1]
class Stack:
def __init__(self, parent, set, score):
self.parent = parent
self.set = set
self.score = score
class Node:
def __init__(self, parent, j, th, set):
self.parent = parent
self.j = j
self.th=th
self.set=set
def dt(x, y, alpha=0, n_min=1, rf=0):
if rf==0:
m=x.shape[1]
# 1個からなるstackを構成。決定木を初期化
stack=[Stack(0, list(range(x.shape[0])), f(y))] # 関数 fは、大域
node=[]
k=-1
# stackの最後の要素を取り出して、決定木を更新する。
while len(stack)>0:
popped=stack.pop()
k=k+1
i, j, left, right, score, left_score, right_score=branch(x, y, popped.set,rf)
if popped.score-score<alpha or len(popped.set)<n_min or len(left)==0 or len(right)==0:
node.append(Node(popped.parent, -1, 0, popped.set))
else:
node.append(Node(popped.parent, j, x[i,j], popped.set))
stack.append(Stack(k, right, right_score))
stack.append(Stack(k, left, left_score))
# これより下でnode.left, node.rightの値を設定する
for h in range(k,-1,-1):
node[h].left=0; node[h].right=0;
for h in range(k,0,-1):
pa=node[h].parent
if node[pa].right==0:
node[pa].right=h
else:
node[pa].left=h
# これより下で、node.centerの値を計算する
if f==sq_loss:
g=np.mean
else:
g=mode_max
for h in range(k+1):
if node[h].j==-1:
node[h].center=g(y[node[h].set])
else:
node[h].center=0
return(node)
from sklearn.datasets import load_boston
boston = load_boston()
X=boston.data
y=boston.target
f=sq_loss
node=dt(X,y,n_min=50)
len(node)
from igraph import *
r=len(node)
edge=[]
for h in range(1,r):
edge.append([node[h].parent,h])
TAB=[];
for h in range(r):
if not node[h].j==0:
TAB.append([h, node[h].j, node[h].th])
TAB
def draw_graph(node):
r=len(node)
col=[]
for h in range(r):
col.append(node[h].j)
colorlist = ['#ffffff', '#fff8ff', '#fcf9ce', '#d6fada', '#d7ffff', '#d9f2f8', '#fac8be', '#ffebff','#ffffe0','#fdf5e6','#fac8be', '#f8ecd5', '#ee82ee']
color=[colorlist[col[i]] for i in range(r)]
edge=[]
for h in range(1,r):
edge.append([node[h].parent,h])
g = Graph(edges=edge,directed=True)
layout=g.layout_reingold_tilford(root=[0])
out=plot(g,vertex_size=15,layout=layout,bbox=(300,300),vertex_label=list(range(r)), vertex_color=color)
return(out)
draw_graph(node)
def value(u, node):
r=0
while node[r].j !=-1:
if u[node[r].j] < node[r].th:
r=node[r].left
else:
r=node[r].right
return(node[r].center)
from sklearn.datasets import load_boston
boston = load_boston()
n=100
X=boston.data[range(n),:]
y=boston.target[range(n)]
f=sq_loss
alpha_seq=np.arange(0, 1.5, 0.1)
s=np.int(n/10)
out=[]
for alpha in alpha_seq:
SS=0
for h in range(10):
test=list(range(h*s,h*s+s))
train=list(set(range(n))-set(test))
node=dt(X[train,:],y[train], alpha=alpha)
for t in test:
SS=SS+(y[t]-value(X[t,:],node))**2
print(SS/n)
out.append(SS/n)
plt.plot(alpha_seq,out)
plt.xlabel('alpha')
plt.ylabel('2乗誤差')
plt.title("CVで最適なalpha (N=100)")
from sklearn.datasets import load_boston
boston = load_boston()
n=100
X=boston.data[range(n),:]
y=boston.target[range(n)]
n_min_seq=np.arange(1, 13, 1)
s=np.int(n/10)
out=[]
for n_min in n_min_seq:
SS=0
for h in range(10):
test=list(range(h*s,h*s+s))
train=list(set(range(n))-set(test))
node=dt(X[train,:],y[train], n_min=n_min)
for t in test:
SS=SS+(y[t]-value(X[t,:],node))**2
print(SS/n)
out.append(SS/n)
plt.plot(n_min_seq,out)
plt.xlabel('n_min')
plt.ylabel('2乗誤差')
plt.title("CVで最適なn_min (N=100)")
# # 7.2 分類の決定木
def freq(y):
y=list(y)
return([y.count(i) for i in set(y)])
# モード(最頻度)
def mode(y):
n=len(y)
if n==0:
return(0)
return(max(freq(y)))
# 誤り率
def mis_match(y):
return(len(y)-mode(y))
# Gini
def gini(y):
n=len(y)
if n==0:
return(0)
fr=freq(y)
return(sum([fr[i]*(n-fr[i]) for i in range(len(fr))]))
# Entropy
def entropy(y):
n=len(y)
if n==0:
return(0)
freq=[y.count(i) for i in set(y)]
return(np.sum([-freq[i]*np.log (freq[i]/n) for i in range(len(freq))]))
def table_count(m,u,v): # 再掲
n=u.shape[0]
count=np.zeros([m,m])
for i in range(n):
count[int(u[i]),int(v[i])]+=1
return(count)
def mode_max(y):
if len(y)==0:
return(-np.inf)
count = np.bincount(y)
return(np.argmax(count))
from sklearn.datasets import load_iris
iris = load_iris()
iris.target_names
f=mis_match
n=iris.data.shape[0]
x=iris.data
y=iris.target
n=len(x)
node=dt(x,y,n_min=4)
m=len(node)
u=[]; v=[]
for h in range(m):
if node[h].j==-1:
w=y[node[h].set]
u.extend([node[h].center]*len(w))
v.extend(w)
table_count(3,np.array(u),np.array(v))
#sum([u[i]==v[i] for i in range(150)])
draw_graph(node)
from sklearn.datasets import load_iris
iris = load_iris()
iris.target_names
f=mis_match
index=np.random.choice(n, n, replace=False) # 並び替える
X=iris.data[index,:]
y=iris.target[index]
n_min_seq=np.arange(5,51,5)
s=15
for n_min in n_min_seq:
SS=0
for h in range(10):
test=list(range(h*s,h*s+s))
train=list(set(range(n))-set(test))
node=dt(X[train,:],y[train], n_min=n_min)
for t in test:
SS=SS+np.sum(y[t]!=value(X[t,:],node))
print(SS/n)
# ## 7.3 バギング
n=200
p=5
X=np.random.randn(n,p)
beta=randn(p)
Y=np.array(np.abs(np.dot(X,beta)+randn(n)),dtype=np.int64)
f=mis_match
node_seq=[]
for h in range(8):
index=np.random.choice(n, n, replace=True) # 並び替える
x=X[index,:]
y=Y[index]
node_seq.append(dt(x,y,n_min=6))
draw_graph(node_seq[0])
draw_graph(node_seq[1])
draw_graph(node_seq[2])
def branch(x, y, S, rf=0): ##
if rf==0: ##
T=np.arange(x.shape[1]) ##
else: ##
T=np.random.choice(x.shape[1], rf, replace=False) ##
if x.shape[0]==0:
return([0,0,0,0,0,0,0])
best_score=np.inf
for j in T: ##
for i in S:
left=[]; right=[]
for k in S:
if x[k,j]<x[i,j]:
left.append(k)
else:
right.append(k)
left_score=f(y[left]); right_score=f(y[right])
score=left_score+right_score
if score < best_score:
best_score=score
i_1=i; j_1=j
left_1=left; right_1=right
left_score_1=left_score; right_score_1=right_score
return [i_1, j_1, left_1, right_1, best_score, left_score_1, right_score_1]
# ## 7.4 ランダムフォレスト
def rf(z):
z=np.array(z,dtype=np.int64)
zz=[]
for b in range(B):
u=sum([mode_max(z[range(b+1),i])==y[i+100] for i in range(50)])
zz.append(u)
return(zz)
iris = load_iris()
iris.target_names
f=mis_match
n=iris.data.shape[0]
order=np.random.choice(n, n, replace=False) # 並び替える
X=iris.data[order,:]
y=iris.target[order]
train=list(range(100))
test=list(range(100,150))
B=100
plt.ylim([35, 55])
m_seq=[1,2,3,4]
c_seq=["r","b","g","y"]
label_seq=['m=1','m=2','m=3','m=4']
plt.xlabel('繰り返し数 b')
plt.ylabel('テスト50データでの正答数')
plt.title('ランダムフォレスト')
for m in m_seq:
z=np.zeros((B,50))
for b in range(B):
index=np.random.choice(train, 100, replace=True)
node=dt(X[index,:],y[index],n_min=2,rf=m)
for i in test:
z[b,i-100]=value(X[i,],node)
plt.plot(list(range(B)),np.array(rf(z))-0.2*(m-2), label=label_seq[m-1], linewidth=0.8, c=c_seq[m-1])
plt.legend(loc='lower right')
plt.axhline(y=50,c="b",linewidth=0.5,linestyle = "dashed")
# ## 7.5 ブーステイング
def b_dt(x, y, d):
n=x.shape[0]
node=[]
first=Node(0, -1, 0, np.arange(n))
first.score=f(y[first.set])
node.append(first)
while len(node)<=2*d-1:
r=len(node)
gain_max=-np.inf
for h in range(r):
if node[h].j==-1:
i, j, left, right, score, left_score, right_score=branch(x, y, node[h].set)
gain=node[h].score-score
if gain >gain_max:
gain_max=gain
h_max=h
i_0=i; j_0=j
left_0=left; right_0=right
left_score_0=left_score; right_score_0=right_score
node[h_max].th=x[i_0,j_0]; node[h_max].j=j_0
next=Node(h_max, -1, 0, left_0)
next.score=f(y[next.set]); node.append(next)
next=Node(h_max, -1, 0, right_0)
next.score=f(y[next.set]); node.append(next)
r=2*d+1
for h in range(r):
node[h].left=0; node[h].right=0
for h in range(r-1,1,-1):
pa=node[h].parent
if node[pa].right==0:
node[pa].right=h
else:
node[pa].left=h
if node[h].right==0 and node[h].left==0:
node[h].j=-1
if f==sq_loss:
g=np.mean
else:
g=mode_max
for h in range(r):
if node[h].j==-1:
node[h].center=g(node[h].set)
# これより下でnode.left, node.rightの値を設定する
for h in range(r-1,-1,-1):
node[h].left=0; node[h].right=0;
for h in range(r-1,0,-1):
pa=node[h].parent
if node[pa].right==0:
node[pa].right=h
else:
node[pa].left=h
# これより下で、node.centerの値を計算する
if f==sq_loss:
g=np.mean
else:
g=mode_max
for h in range(r):
if node[h].j==-1:
node[h].center=g(y[node[h].set])
else:
node[h].center=0
return(node)
from sklearn.datasets import load_boston
boston = load_boston()
B=200
lam=0.1
X=boston.data
y=boston.target
f=sq_loss
train=list(range(200))
test=list(range(200,300))
# ブースティングの木をB個生成
# 各dで5分程度、合計15分程度かかります
trees_set=[]
for d in range(1,4):
trees=[]
r=y[train]
for b in range(B):
trees.append(b_dt(X[train,:],r,d))
for i in train:
r[i]=r[i]-lam*value(X[i,:],trees[b])
print(b)
trees_set.append(trees)
# テストデータで評価
out_set=[]
for d in range(1,4):
trees=trees_set[d-1]
z=np.zeros((B,600))
for i in test:
z[0,i]=lam*value(X[i,],trees[0])
for b in range(1,B):
for i in test:
z[b,i]=z[b-1,i]+lam*value(X[i,:],trees[b])
out=[]
for b in range(B):
out.append(sum((y[test]-z[b,test])**2)/len(test))
out_set.append(out)
# グラフで表示
plt.ylim([0, 40])
c_seq=["r","b","g"]
label_seq=['d=1','d=2','d=3']
plt.xlabel('生成した木の個数')
plt.ylabel('テストデータでの二乗誤差')
plt.title('本書のプログラム (lambda=0.1)')
for d in range(1,4):
out=out_set[d-1]
u=range(20,100)
v=out[20:100];
plt.plot(u,v,label=label_seq[d-1], linewidth=0.8, c=c_seq[d-1])
plt.legend(loc='upper right')
# +
import lightgbm as lgb
from sklearn.datasets import load_boston
boston = load_boston()
X=boston.data
y=boston.target
train=list(range(200))
test=list(range(200,300))
B=200
lgb_train = lgb.Dataset(X[train,:], y[train])
lgb_eval = lgb.Dataset(X[test,:], y[test], reference=lgb_train)
B=5000
nn_seq=list(range(1,10,1))+list(range(10,91,10))+list(range(100,B,50))
out_set=[]
for d in range(1,4):
lgbm_params = {
'objective': 'regression',
'metric': 'rmse',
'num_leaves': d+1,
'learning_rate': 0.001
}
out=[]
for nn in nn_seq:
model = lgb.train(lgbm_params,lgb_train, valid_sets=lgb_eval,verbose_eval=False, num_boost_round=nn)
z= model.predict(X[test,:], num_iteration=model.best_iteration)
out.append(sum((z-y[test])**2)/100)
out_set.append(out)
# -
# グラフで表示
plt.ylim([0, 80])
c_seq=["r","b","g"]
label_seq=['d=1','d=2','d=3']
plt.xlabel('生成した木の個数')
plt.ylabel('テストデータでの二乗誤差')
plt.title('lightgbm パッケージ (lambda=0.001)')
for d in range(1,4):
out=out_set[d-1]
u=range(20,100)
v=out[20:100];
plt.plot(u,v,label=label_seq[d-1], linewidth=0.8, c=c_seq[d-1])
plt.legend(loc='upper right')
f=sq_loss
n=100; p=5
x=randn(n,p)
y=randn(n)
S=np.random.choice(n, 10, replace=False)
branch(x,y,S)
| 2-7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PaddlePaddle 2.1.2 (Python 3.5)
# language: python
# name: py35-paddle1.2.0
# ---
# # 【新手入门】PaddleX实现小白的第一个目标检测任务
#
# 适合与我一样的小白上手PaddleX简单预训练模型,可以按照自己的想法完成自己的第一个Paddle项目(或是demo),并且PaddleX的全流程开发,无需担心自己对于PaddlePaddle不熟悉。
#
# # 一、项目背景
#
# 在飞桨领航团中学习了一些paddlex项目的流程,尝试写一个目标检测demo,调用预训练模型进行对数据集图片进行训练后,对测试集中图片目标检测。
#
# # 二、数据集简介
#
# PASCAL VOC为图像识别和分类提供了一整套标准化的优秀的数据集,从2005年到2012年每年都会举行一场图像识别challenge。此数据集可以用于图像分类、目标检测、图像分割,非常适合我这样的小白入门尝试,而Paddlex对于VOC十分友好。
#
# 下载paddlex依赖方便后续使用
# !pip install paddlex==2.0rc
#解压数据集
# !tar -xf /home/aistudio/data/data37195/VOC2012.tar
#调用paddlex切分数据集----7:2:1的分布训练集、验证集、测试集(在voc文件下)
#paddlex的便利性,直接一步切分数据集
# !paddlex --split_dataset --format VOC --dataset_dir VOCdevkit/VOC2012 --val_value 0.2 --test_value 0.1
# +
# 设置参数,对数据中的图片进行处理
# 配置模型使用gpugpu显卡训练(cpu下删除下方代码中的0)
import paddlex as pdx
from paddlex import transforms as T
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
#对读入的数据图片进行处理
train_transforms = T.Compose([
T.MixupImage(mixup_epoch=250), T.RandomDistort(),
T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]), T.RandomCrop(),
T.RandomHorizontalFlip(), T.BatchRandomResize(
target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608],
interp='RANDOM'), T.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
eval_transforms = T.Compose([
T.Resize(
608, interp='CUBIC'), T.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# +
# 读入分配好的数据集,及上方设定的参数
# 按着划分好的list读入
train_dataset = pdx.datasets.VOCDetection(
data_dir='VOCdevkit/VOC2012',
file_list='VOCdevkit/VOC2012/train_list.txt',
label_list='VOCdevkit/VOC2012/labels.txt',
transforms=train_transforms,
shuffle=True)
eval_dataset = pdx.datasets.VOCDetection(
data_dir='VOCdevkit/VOC2012',
file_list='VOCdevkit/VOC2012/val_list.txt',
label_list='VOCdevkit/VOC2012/labels.txt',
transforms=eval_transforms,
shuffle=False)
# +
#模型参数设定,及使用的预训练模型
#运行该代码进行模型训练
num_classes = len(train_dataset.labels)
model = pdx.models.YOLOv3(num_classes=num_classes, backbone='MobileNetV1')
#模型训练参数设定
model.train(
num_epochs=200,
train_dataset=train_dataset,
train_batch_size=8,
eval_dataset=eval_dataset,
learning_rate=0.001 / 8,
warmup_steps=1000,
warmup_start_lr=0.0,
save_interval_epochs=5,
lr_decay_epochs=[216, 243],
save_dir='output/yolov3_MobileNetV1')
# +
#模型预测(选取图片查看测试效果)
test_jpg = 'VOCdevkit/VOC2012/JPEGImages/2011_001878.jpg'#可以选取test中的图片测试
model = pdx.load_model('output/yolov3_darknet53/epoch_30')#填写训练后的模型路径,这里是用的我30epoch训练出的模型测试预测
result = model.predict(test_jpg)
# 可视化结果存储在./visualized_test.jpg, 见下图
#pdx.det.visualize(test_jpg, result, threshold=0.3, save_dir='./')----这是PADDLEX文档使用的预测调用但是无法成功运作,我不知道为啥,就换成下面的,
pdx.visualize_det(test_jpg, result, threshold=0.3, save_dir='./')
# -
# # 四、效果展示
#
# 多训练调参后可以得到更好的效果
#
# 
#
# # 五、总结
#
# 自己尝试写的demo,虽然很简单,但是经过paddlex的使用,有了继续探索paddle深度学习框架的兴趣,希望大佬留下宝贵的建议!感谢
#
# # 六、个人简介
#
# 吴世君 东北大学秦皇岛分校 测控技术与仪器专业 大二本科生
#
# 感兴趣方向:计算机视觉
#
# 小小青铜渴望关注
#
# 来互关呀~ [me jun](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/791981)
#
| 2278259(1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
dataset_dir = "/runai-ivrl-scratch/students/2021-fall-sp-jellouli/mix6"
# +
import os, sys
sys.path.append(os.path.normpath(os.path.join(os.getcwd(), '..')))
sys.path.append(os.path.normpath(os.path.join(os.getcwd(), '..', 'external', 'MiDaS')))
import matplotlib.pyplot as plt
from midas_hkrm.utils.img_utils import read_image
# -
# %env MIX6_DATASETS=/runai-ivrl-scratch/students/2021-fall-sp-jellouli/mix6
# %env ZERO_SHOT_DATASETS=/runai-ivrl-scratch/students/2021-fall-sp-jellouli/zero_shot_datasets
# ## NYU
# +
from scipy.io import loadmat
import numpy as np
import h5py
import midas_hkrm
import midas_hkrm.datasets
from midas_hkrm.datasets import NYU
from torch.utils.data import DataLoader
# -
data_path = "/runai-ivrl-scratch/students/2021-fall-sp-jellouli/zero_shot_datasets/NYU/nyu_depth_v2_labeled.mat"
split_path = "/runai-ivrl-scratch/students/2021-fall-sp-jellouli/zero_shot_datasets/NYU/splits.mat"
dataset = NYU(data_path, split_path)
loader = DataLoader(dataset,batch_size=1, collate_fn=lambda x: x)
for i in loader:
batch = i[0]
image, depth = batch
break
| notebooks/zero_shot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Installation Instructions
#
# Download and install miniconda:
# https://conda.io/miniconda.html
#
# Make sure you are using the conda-forge channel:
# ```bash
# $ conda config --add channels conda-forge
# $ conda update --yes conda python
# ```
#
# Install gsshapy:
# ```bash
# $ conda create -n gssha python=2
# $ source activate gssha
# (gssha)$ conda install --yes gsshapy jupyter
# ```
#
# Install GSSHA:
# http://www.gsshawiki.com/GSSHA_Download
#
# <div class="alert alert-warning">
# For Windows, add GSSHA executable to Path:
# <ul><ol>
# <li>Go to: "Control Panel\System and Security\System"</li>
# <li>Click "Advanced system settings"</li>
# <li>Click "Environmental variables ..."</li>
# <li>Edit the "Path" variable under "User variables" and add the path to the directory containing GSSHA (i.e. C:\Program Files\U.S. Army\gssha70)</li>
# </ol></ul>
# </div>
# Make sure GSSHA is on the PATH:
# !gssha70
# +
import os
from datetime import datetime, timedelta
from gsshapy.modeling import GSSHAFramework
# -
# Setup environment:
# assuming notebook is run from examples folder
base_dir = os.getcwd()
gssha_model_name = 'philippines_example'
gssha_model_directory = os.path.join(base_dir, gssha_model_name)
# Run the model:
# +
gr = GSSHAFramework("gssha70",
gssha_model_directory,
"{0}.prj".format(gssha_model_name),
gssha_simulation_start=datetime(2017, 5 ,9),
gssha_simulation_duration=timedelta(seconds=2*60))
# http://www.gsshawiki.com/Model_Construction:Defining_a_uniform_precipitation_event
rain_intensity = 24 # mm/hr
rain_duration = timedelta(seconds=1*60) # 2 minutes
gr.event_manager.add_uniform_precip_event(rain_intensity,
rain_duration)
gssha_event_directory = gr.run()
# -
# The `gssha_event_directory` is where the simulation output is stored.
gssha_event_directory
| notebooks/Run GSSHA Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""Represent a volume as lego blocks (voxels). Colors correspond to the volume's scalar."""
# https://matplotlib.org/users/colormaps.html
from vedo import *
vol = load(dataurl+'embryo.tif') # load Volume
printHistogram(vol, logscale=True)
vol.crop(back=0.5) # crop 50% from neg. y
# show lego blocks whose value is between vmin and vmax
lego = vol.legosurface(vmin=60, cmap='seismic')
lego.addScalarBar(horizontal=1) # make colormap start at 40
plt = show(lego, viewup='z', axes=1)
plt
# -
plt.close()
| examples/notebooks/volumetric/legosurface.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
import models
from torchvision.utils import make_grid
from torch import nn
import matplotlib.pyplot as plt
def show_conv1(model):
for m in [module for module in model.modules() if type(module) != nn.Sequential]:
if isinstance(m, nn.Conv2d):
break
kernels = m.weight.detach().clone().cpu()
kernels = kernels - kernels.min()
kernels = kernels / kernels.max()
img = make_grid(kernels, nrow=16)
ax = plt.imshow(img.permute(1, 2, 0))
return ax;
# -
# load model
model_name = 'ipcl1'
model, transform = models.__dict__[model_name]()
model
show_conv1(model);
# If you are getting activations for test images, use the supplied transform
# note that different models used different normalization statistics
# transforms used for training and evaulation can be found in the corresponding scripts
transform
# +
from PIL import Image
from lib.feature_extractor import FeatureExtractor
# load an image
img = Image.open('./images/cheetah.jpg')
# transform and add batch dimension
img = transform(img).unsqueeze(0)
# get features from fc7
model.eval()
with FeatureExtractor(model, 'fc7') as extractor:
features = extractor(img)
for name,val in features.items():
print(name, val.shape)
# -
# get features from fc7, fc8, and l2norm layers
model.eval()
with FeatureExtractor(model, ['fc7','fc8','l2norm']) as extractor:
features = extractor(img)
for name,val in features.items():
print(name, val.shape)
# get features from conv_block1.0, conv_block1.1, conv_block1.2
model.eval()
with FeatureExtractor(model, ['conv_block_1.0','conv_block_1.1','conv_block_1.2']) as extractor:
features = extractor(img)
for name,val in features.items():
print(name, val.shape)
| load_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# <center>
# <img src="https://gitlab.com/ibm/skills-network/courses/placeholder101/-/raw/master/labs/module%201/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" />
# </center>
#
# # **Web Scraping Lab**
#
# Estimated time needed: **30** minutes
#
# ## Objectives
#
# After completing this lab you will be able to:
#
# <h2>Table of Contents</h2>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ul>
# <li>
# <a href="BSO">Beautiful Soup Object</a>
# <ul>
# <li>Tag</li>
# <li>Children, Parents, and Siblings</li>
# <li>HTML Attributes</li>
# <li>Navigable String</li>
# </ul>
# </li>
# </ul>
# <ul>
# <li>
# <a href="filter">Filter</a>
# <ul>
# <li>find All</li>
# <li>find </li>
# <li>HTML Attributes</li>
# <li>Navigable String</li>
# </ul>
# </li>
# </ul>
# <ul>
# <li>
# <a href="DSCW">Downloading And Scraping The Contents Of A Web</a>
# </li>
# </ul>
# <p>
# Estimated time needed: <strong>25 min</strong>
# </p>
#
# </div>
#
# <hr>
#
#
#
# For this lab, we are going to be using Python and several Python libraries. Some of these libraries might be installed in your lab environment or in SN Labs. Others may need to be installed by you. The cells below will install these libraries when executed.
#
# !pip install bs4
# #!pip install requests
# Import the required modules and functions
#
from bs4 import BeautifulSoup # this module helps in web scrapping.
import requests # this module helps us to download a web page
# <h2 id="BSO">Beautiful Soup Objects</h2>
#
# Beautiful Soup is a Python library for pulling data out of HTML and XML files, we will focus on HTML files. This is accomplished by representing the HTML as a set of objects with methods used to parse the HTML. We can navigate the HTML as a tree and/or filter out what we are looking for.
#
# Consider the following HTML:
#
# + language="html"
# <!DOCTYPE html>
# <html>
# <head>
# <title>Page Title</title>
# </head>
# <body>
# <h3><b id='boldest'><NAME></b></h3>
# <p> Salary: $ 92,000,000 </p>
# <h3> <NAME></h3>
# <p> Salary: $85,000,000 </p>
# <h3> <NAME> </h3>
# <p> Salary: $73,200,000</p>
# </body>
# </html>
# -
# We can store it as a string in the variable HTML:
#
html="<!DOCTYPE html><html><head><title>Page Title</title></head><body><h3><b id='boldest'><NAME></b></h3><p> Salary: $ 92,000,000 </p><h3> <NAME></h3><p> Salary: $85,000, 000 </p><h3> <NAME> </h3><p> Salary: $73,200, 000</p></body></html>"
# To parse a document, pass it into the <code>BeautifulSoup</code> constructor, the <code>BeautifulSoup</code> object, which represents the document as a nested data structure:
#
soup = BeautifulSoup(html, 'html5lib')
# First, the document is converted to Unicode, (similar to ASCII), and HTML entities are converted to Unicode characters. Beautiful Soup transforms a complex HTML document into a complex tree of Python objects. The <code>BeautifulSoup</code> object can create other types of objects. In this lab, we will cover <code>BeautifulSoup</code> and <code>Tag</code> objects that for the purposes of this lab are identical, and <code>NavigableString</code> objects.
#
# We can use the method <code>prettify()</code> to display the HTML in the nested structure:
#
print(soup.prettify())
# ## Tags
#
# Let's say we want the title of the page and the name of the top paid player we can use the <code>Tag</code>. The <code>Tag</code> object corresponds to an HTML tag in the original document, for example, the tag title.
#
tag_object=soup.title
print("tag object:",tag_object)
# we can see the tag type <code>bs4.element.Tag</code>
#
print("tag object type:",type(tag_object))
# If there is more than one <code>Tag</code> with the same name, the first element with that <code>Tag</code> name is called, this corresponds to the most paid player:
#
tag_object=soup.h3
tag_object
# Enclosed in the bold attribute <code>b</code>, it helps to use the tree representation. We can navigate down the tree using the child attribute to get the name.
#
# ### Children, Parents, and Siblings
#
# As stated above the <code>Tag</code> object is a tree of objects we can access the child of the tag or navigate down the branch as follows:
#
tag_child =tag_object.b
tag_child
# You can access the parent with the <code> parent</code>
#
parent_tag=tag_child.parent
parent_tag
# this is identical to
#
tag_object
# <code>tag_object</code> parent is the <code>body</code> element.
#
tag_object.parent
# <code>tag_object</code> sibling is the <code>paragraph</code> element
#
sibling_1=tag_object.next_sibling
sibling_1
# `sibling_2` is the `header` element which is also a sibling of both `sibling_1` and `tag_object`
#
sibling_2=sibling_1.next_sibling
sibling_2
# <h3 id="first_question">Exercise: <code>next_sibling</code></h3>
#
# Using the object <code>sibling_2</code> and the method <code>next_sibling</code> to find the salary of <NAME>:
#
sibling_2.next_sibling
# <details><summary>Click here for the solution</summary>
#
# ```
# sibling_2.next_sibling
#
# ```
#
# </details>
#
# ### HTML Attributes
#
# If the tag has attributes, the tag <code>id="boldest"</code> has an attribute <code>id</code> whose value is <code>boldest</code>. You can access a tag’s attributes by treating the tag like a dictionary:
#
tag_child['id']
# You can access that dictionary directly as <code>attrs</code>:
#
tag_child.attrs
# You can also work with Multi-valued attribute check out <a href="https://www.crummy.com/software/BeautifulSoup/bs4/doc/">[1]</a> for more.
#
# We can also obtain the content if the attribute of the <code>tag</code> using the Python <code>get()</code> method.
#
tag_child.get('id')
# ### Navigable String
#
# A string corresponds to a bit of text or content within a tag. Beautiful Soup uses the <code>NavigableString</code> class to contain this text. In our HTML we can obtain the name of the first player by extracting the sting of the <code>Tag</code> object <code>tag_child</code> as follows:
#
tag_string=tag_child.string
tag_string
# we can verify the type is Navigable String
#
type(tag_string)
# A NavigableString is just like a Python string or Unicode string, to be more precise. The main difference is that it also supports some <code>BeautifulSoup</code> features. We can covert it to sting object in Python:
#
unicode_string = str(tag_string)
unicode_string
# <h2 id="filter">Filter</h2>
#
# Filters allow you to find complex patterns, the simplest filter is a string. In this section we will pass a string to a different filter method and Beautiful Soup will perform a match against that exact string. Consider the following HTML of rocket launchs:
#
# + language="html"
# <table>
# <tr>
# <td id='flight' >Flight No</td>
# <td>Launch site</td>
# <td>Payload mass</td>
# </tr>
# <tr>
# <td>1</td>
# <td><a href='https://en.wikipedia.org/wiki/Florida'>Florida</a></td>
# <td>300 kg</td>
# </tr>
# <tr>
# <td>2</td>
# <td><a href='https://en.wikipedia.org/wiki/Texas'>Texas</a></td>
# <td>94 kg</td>
# </tr>
# <tr>
# <td>3</td>
# <td><a href='https://en.wikipedia.org/wiki/Florida'>Florida<a> </td>
# <td>80 kg</td>
# </tr>
# </table>
# -
# We can store it as a string in the variable <code>table</code>:
#
table="<table><tr><td id='flight'>Flight No</td><td>Launch site</td> <td>Payload mass</td></tr><tr> <td>1</td><td><a href='https://en.wikipedia.org/wiki/Florida'>Florida<a></td><td>300 kg</td></tr><tr><td>2</td><td><a href='https://en.wikipedia.org/wiki/Texas'>Texas</a></td><td>94 kg</td></tr><tr><td>3</td><td><a href='https://en.wikipedia.org/wiki/Florida'>Florida<a> </td><td>80 kg</td></tr></table>"
table_bs = BeautifulSoup(table, 'html5lib')
# ## find All
#
# The <code>find_all()</code> method looks through a tag’s descendants and retrieves all descendants that match your filters.
#
# <p>
# The Method signature for <code>find_all(name, attrs, recursive, string, limit, **kwargs)<c/ode>
# </p>
#
#
# ### Name
#
# When we set the <code>name</code> parameter to a tag name, the method will extract all the tags with that name and its children.
#
table_rows=table_bs.find_all('tr')
table_rows
table_bs.find_all('a')
# The result is a Python Iterable just like a list, each element is a <code>tag</code> object:
#
first_row =table_rows[0]
first_row
second_row = table_rows[1]
second_row
# The type is <code>tag</code>
#
print(type(first_row))
print(type(second_row))
# we can obtain the child
#
first_row.td
second_row.td
# If we iterate through the list, each element corresponds to a row in the table:
#
for i,row in enumerate(table_rows):
print("row",i,"is",row)
# As <code>row</code> is a <code>cell</code> object, we can apply the method <code>find_all</code> to it and extract table cells in the object <code>cells</code> using the tag <code>td</code>, this is all the children with the name <code>td</code>. The result is a list, each element corresponds to a cell and is a <code>Tag</code> object, we can iterate through this list as well. We can extract the content using the <code>string</code> attribute.
#
for i,row in enumerate(table_rows):
print("row",i)
cells=row.find_all('td')
for j,cell in enumerate(cells):
print('colunm',j,"cell",cell)
# If we use a list we can match against any item in that list.
#
list_input=table_bs.find_all(name=["tr", "td"])
list_input
# ## Attributes
#
# If the argument is not recognized it will be turned into a filter on the tag’s attributes. For example the <code>id</code> argument, Beautiful Soup will filter against each tag’s <code>id</code> attribute. For example, the first <code>td</code> elements have a value of <code>id</code> of <code>flight</code>, therefore we can filter based on that <code>id</code> value.
#
table_bs.find_all(id="flight")
# We can find all the elements that have links to the Florida Wikipedia page:
#
list_input=table_bs.find_all(href="https://en.wikipedia.org/wiki/Florida")
list_input
# If we set the <code>href</code> attribute to True, regardless of what the value is, the code finds all tags with <code>href</code> value:
#
table_bs.find_all(href=True)
# There are other methods for dealing with attributes and other related methods; Check out the following <a href='https://www.crummy.com/software/BeautifulSoup/bs4/doc/#css-selectors'>link</a>
#
# <h3 id="exer_type">Exercise: <code>find_all</code></h3>
#
# Using the logic above, find all the elements without <code>href</code> value
#
table_bs.find_all(href=False)
# <details><summary>Click here for the solution</summary>
#
# ```
# table_bs.find_all(href=False)
#
# ```
#
# </details>
#
# Using the soup object <code>soup</code>, find the element with the <code>id</code> attribute content set to <code>"boldest"</code>.
#
soup.find_all(id='boldest')
# <details><summary>Click here for the solution</summary>
#
# ```
# soup.find_all(id="boldest")
#
# ```
#
# </details>
#
# ### string
#
# With string you can search for strings instead of tags, where we find all the elments with Florida:
#
table_bs.find_all(string="Florida")
# ## find
#
# The <code>find_all()</code> method scans the entire document looking for results, it’s if you are looking for one element you can use the <code>find()</code> method to find the first element in the document. Consider the following two table:
#
# + language="html"
# <h3>Rocket Launch </h3>
#
# <p>
# <table class='rocket'>
# <tr>
# <td>Flight No</td>
# <td>Launch site</td>
# <td>Payload mass</td>
# </tr>
# <tr>
# <td>1</td>
# <td>Florida</td>
# <td>300 kg</td>
# </tr>
# <tr>
# <td>2</td>
# <td>Texas</td>
# <td>94 kg</td>
# </tr>
# <tr>
# <td>3</td>
# <td>Florida </td>
# <td>80 kg</td>
# </tr>
# </table>
# </p>
# <p>
#
# <h3>Pizza Party </h3>
#
#
# <table class='pizza'>
# <tr>
# <td>Pizza Place</td>
# <td>Orders</td>
# <td>Slices </td>
# </tr>
# <tr>
# <td>Domino's Pizza</td>
# <td>10</td>
# <td>100</td>
# </tr>
# <tr>
# <td>Little Caesars</td>
# <td>12</td>
# <td >144 </td>
# </tr>
# <tr>
# <td>Papa John's </td>
# <td>15 </td>
# <td>165</td>
# </tr>
#
# -
# We store the HTML as a Python string and assign <code>two_tables</code>:
#
two_tables="<h3>Rocket Launch </h3><p><table class='rocket'><tr><td>Flight No</td><td>Launch site</td> <td>Payload mass</td></tr><tr><td>1</td><td>Florida</td><td>300 kg</td></tr><tr><td>2</td><td>Texas</td><td>94 kg</td></tr><tr><td>3</td><td>Florida </td><td>80 kg</td></tr></table></p><p><h3>Pizza Party </h3><table class='pizza'><tr><td>Pizza Place</td><td>Orders</td> <td>Slices </td></tr><tr><td>Domino's Pizza</td><td>10</td><td>100</td></tr><tr><td>Little Caesars</td><td>12</td><td >144 </td></tr><tr><td>Papa John's </td><td>15 </td><td>165</td></tr>"
# We create a <code>BeautifulSoup</code> object <code>two_tables_bs</code>
#
two_tables_bs= BeautifulSoup(two_tables, 'html.parser')
# We can find the first table using the tag name table
#
two_tables_bs.find("table")
# We can filter on the class attribute to find the second table, but because class is a keyword in Python, we add an underscore.
#
two_tables_bs.find("table",class_='pizza')
# <h2 id="DSCW">Downloading And Scraping The Contents Of A Web Page</h2>
#
# We Download the contents of the web page:
#
url = "http://www.ibm.com"
# We use <code>get</code> to download the contents of the webpage in text format and store in a variable called <code>data</code>:
#
data = requests.get(url).text
# We create a <code>BeautifulSoup</code> object using the <code>BeautifulSoup</code> constructor
#
soup = BeautifulSoup(data, "html5lib") # create a soup object using the variable 'data'
# Scrape all links
#
for link in soup.find_all('a', href=True): # in html anchor/link is represented by the tag <a>
print(link.get('href'))
# ## Scrape all images Tags
#
for link in soup.find_all('img'): # in html image is represented by the tag <img>
print(link)
print(link.get('src'))
# ## Scrape data from HTML tables
#
#The below url contains an html table with data about colors and color codes.
url = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DA0321EN-SkillsNetwork/labs/datasets/HTMLColorCodes.html"
# Before proceeding to scrape a web site, you need to examine the contents, and the way data is organized on the website. Open the above url in your browser and check how many rows and columns are there in the color table.
#
# get the contents of the webpage in text format and store in a variable called data
data = requests.get(url).text
soup = BeautifulSoup(data,"html5lib")
#find a html table in the web page
table = soup.find('table') # in html table is represented by the tag <table>
table
#Get all rows from the table
for row in table.find_all('tr'): # in html table row is represented by the tag <tr>
# Get all columns in each row.
cols = row.find_all('td') # in html a column is represented by the tag <td>
color_name = cols[2].string # store the value in column 3 as color_name
color_code = cols[3].string # store the value in column 4 as color_code
color_rgb = cols[4].string # store the value in column 5 as color_rbg
print("{}--->{}--->{}".format(color_name,color_code,color_rgb))
# ## Scrape data from HTML tables into a DataFrame using BeautifulSoup and Pandas
#
import pandas as pd
#The below url contains html tables with data about world population.
url = "https://en.wikipedia.org/wiki/World_population"
# Before proceeding to scrape a web site, you need to examine the contents, and the way data is organized on the website. Open the above url in your browser and check the tables on the webpage.
#
# get the contents of the webpage in text format and store in a variable called data
data = requests.get(url).text
soup = BeautifulSoup(data,"html5lib")
#find all html tables in the web page
tables = soup.find_all('table') # in html table is represented by the tag <table>
# we can see how many tables were found by checking the length of the tables list
len(tables)
# Assume that we are looking for the `10 most densly populated countries` table, we can look through the tables list and find the right one we are look for based on the data in each table or we can search for the table name if it is in the table but this option might not always work.
#
for index,table in enumerate(tables):
if ("10 most densely populated countries" in str(table)):
table_index = index
print(table_index)
# See if you can locate the table name of the table, `10 most densly populated countries`, below.
#
print(tables[table_index].prettify())
# +
population_data = pd.DataFrame(columns=["Rank", "Country", "Population", "Area", "Density"])
for row in tables[table_index].tbody.find_all("tr"):
col = row.find_all("td")
if (col != []):
rank = col[0].text
country = col[1].text
population = col[2].text.strip()
area = col[3].text.strip()
density = col[4].text.strip()
population_data = population_data.append({"Rank":rank, "Country":country, "Population":population, "Area":area, "Density":density}, ignore_index=True)
population_data
# -
for index, table in enumerate(tables):
if ("Population by continent (2020 estimates)" in str(table)):
table_index = index
print(table_index)
# +
population_continent = pd.DataFrame(columns=["Continent", "Density", "Population", "Most populous country", "Most Populous city"])
for row in tables[table_index].tbody.find_all("tr"):
col = row.find_all("td")
if (col != []):
continent = col[0].text
density = col[1].text
population = col[2].text.strip()
most_pop_country = col[3].text.strip()
most_pop_city = col[4].text.strip()
population_continent = population_continent.append({"Continent":continent, "Density":density, "Population":population, "Most populous country":most_pop_country, "Most Populous city":most_pop_city}, ignore_index=True)
population_continent
# -
# ## Scrape data from HTML tables into a DataFrame using BeautifulSoup and read_html
#
# Using the same `url`, `data`, `soup`, and `tables` object as in the last section we can use the `read_html` function to create a DataFrame.
#
# Remember the table we need is located in `tables[table_index]`
#
# We can now use the `pandas` function `read_html` and give it the string version of the table as well as the `flavor` which is the parsing engine `bs4`.
#
pd.read_html(str(tables[5]), flavor='bs4')
# The function `read_html` always returns a list of DataFrames so we must pick the one we want out of the list.
#
# +
population_data_read_html = pd.read_html(str(tables[5]), flavor='bs4')[0]
population_data_read_html
# + tags=[]
pd.read_html(str(tables[1]), flavor='bs4')
# +
population_continent_read_html = pd.read_html(str(tables[1]), flavor='bs4')[0]
population_continent_read_html
# +
population_milestones_read_html = pd.read_html(str(tables[2]), flavor='bs4')[0]
population_milestones_read_html
# -
# ## Scrape data from HTML tables into a DataFrame using read_html
#
# We can also use the `read_html` function to directly get DataFrames from a `url`.
#
dataframe_list = pd.read_html(url, flavor='bs4')
# We can see there are 25 DataFrames just like when we used `find_all` on the `soup` object.
#
len(dataframe_list)
# Finally we can pick the DataFrame we need out of the list.
#
dataframe_list[5]
# We can also use the `match` parameter to select the specific table we want. If the table contains a string matching the text it will be read.
#
pd.read_html(url, match="10 most densely populated countries", flavor='bs4')[0]
dataframe_list[1]
# ## Authors
#
# <NAME>
#
# ### Other Contributors
#
# <NAME>
#
# ## Change Log
#
# | Date (YYYY-MM-DD) | Version | Changed By | Change Description |
# | ----------------- | ------- | -------------------------------------------------------- | ------------------ |
# | 2020-10-17 | 0.1 | <NAME> Created initial version of the lab | |
#
# Copyright © 2020 IBM Corporation. This notebook and its source code are released under the terms of the [MIT License](https://cognitiveclass.ai/mit-license?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork-19487395&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork-19487395&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork-19487395&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork-19487395&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-PY0220EN-SkillsNetwork-23455606&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-PY0220EN-SkillsNetwork-23455606&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ).
#
| 4_Python for Data Science, AI & Development/PY0101EN-5-4-WebScraping_Review_Lab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from PIL import Image
im = Image.open(r'.\sample_image_v1\banana.png') # Can be many different formats.
pix = im.load()
display(im)
print(im.format, im.size, im.mode)
# + pycharm={"name": "#%%\n"}
width, height = im.size
non_empty_pixel = []
for x in range(width):
for y in range(height):
pixel = pix[x, y]
if not pixel == (0, 0, 0):
non_empty_pixel.append((x,y))
import numpy as np
a = np.asarray(non_empty_pixel)
lower_right = np.amax(a, axis=0)
upper_left = np.amin(a, axis=0)
print(lower_right)
print(upper_left)
# -
coco_tuple = [*lower_right, *(lower_right-upper_left)]
coco_tuple
from PIL import ImageDraw
draw = ImageDraw.Draw(im)
draw.rectangle((tuple(upper_left), tuple(lower_right)),outline="red")
display(im)
| masks/.ipynb_checkpoints/mask_border-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Generating new Objects w/ `Generative Adversarial Network (GAN)`
#
# ### Import dependencies
# + deletable=true editable=true
import os
import sys
import datetime as dt
import numpy as np
import tensorflow as tf
# %matplotlib inline
# -
# ### Loading datasets
# + deletable=true editable=true
from tensorflow.examples.tutorials.mnist import input_data
data_dir = 'datasets/MNIST/'
data = input_data.read_data_sets(data_dir, one_hot=True)
# from dataset import ImageDataset
# data_dir = 'datasets/101_ObjectCategories/'
# save_file = 'saved/data.pkl'
# data = ImageDataset(data_dir=data_dir, size=24, grayscale=True, flatten=True)
# data.create()
# data.save(save_file=save_file, force=True)
# # data = data.load(save_file=save_file)
# + [markdown] deletable=true editable=true
# ### Hyperparameters
# + deletable=true editable=true
# Inputs
img_size = 28 # data.size
img_channel = 1 # data.channel
img_size_flat = img_size * img_size * img_channel
print(f'Images »»» Size: {img_size:,}\tChannel: {img_channel:,}\tFlattened: {img_size_flat:,}')
# + deletable=true editable=true
# Network
kernel_size = 5
n_noise = 64
keep_prob = 0.8
# + deletable=true editable=true
# Training
batch_size = 24
learning_rate = .01
save_interval = 100
log_interval = 1000
iterations = 10000
# + [markdown] deletable=true editable=true
# ### Helpers
# + deletable=true editable=true
import matplotlib.pyplot as plt
def visualize(imgs, name=None, smooth=False, **kwargs):
# Plot images in grid
grid = int(np.sqrt(len(imgs)))
# Create figure with sub-plots.
fig, axes = plt.subplots(grid, grid)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Interpolation type.
interpolation = 'spline16' if smooth else 'nearest'
shape = [img_size, img_size]
ax.imshow(imgs[i].reshape(shape), interpolation=interpolation, **kwargs)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
if name:
plt.suptitle(name)
plt.show()
# + deletable=true editable=true
def lrelu(x, alpha=0.2):
return tf.maximum(x, tf.multiply(x, alpha))
# + deletable=true editable=true
def binary_cross_entropy(x, z, eps=1e-12):
return (-(x * tf.log(z + eps) + (1. - x) * tf.log(1. - z + eps)))
# + [markdown] deletable=true editable=true
# ### The Discriminator
# + deletable=true editable=true
def discriminator(X, reuse=None):
with tf.variable_scope('discriminator', reuse=reuse):
net = X
# reshape
net = tf.reshape(net, [-1, img_size, img_size, img_channel])
# conv + dropout
net = tf.layers.conv2d(net, filters=64, kernel_size=5, strides=2, padding='SAME', activation=lrelu)
net = tf.nn.dropout(net, keep_prob=keep_prob)
# conv + dropout
net = tf.layers.conv2d(net, filters=64, kernel_size=5, strides=1, padding='SAME', activation=lrelu)
net = tf.nn.dropout(net, keep_prob=keep_prob)
# conv + dropout
net = tf.layers.conv2d(net, filters=64, kernel_size=5, strides=1, padding='SAME', activation=lrelu)
net = tf.nn.dropout(net, keep_prob=keep_prob)
# flatten
net = tf.contrib.layers.flatten(net)
# 2 fully connected layers
net = tf.layers.dense(net, units=128, activation=lrelu)
net = tf.layers.dense(net, units=1, activation=tf.nn.sigmoid)
return net
# + [markdown] deletable=true editable=true
# ### The Generator
# + deletable=true editable=true
def generator(noise, reuse=None, is_training=False):
decay = 0.99
with tf.variable_scope('generator', reuse=reuse):
net = noise
d1 = 4
d2 = 1
# fully connected + dropout + batch norm
net = tf.layers.dense(net, units=d1*d1*d2, activation=lrelu)
net = tf.nn.dropout(net, keep_prob=keep_prob)
net = tf.contrib.layers.batch_norm(net, decay=decay, is_training=is_training)
# reshape + resize
net = tf.reshape(net, shape=[-1, d1, d1, d2])
net = tf.image.resize_images(net, size=[7, 7])
# conv transpose + dropout + batch_norm
net = tf.layers.conv2d_transpose(net, filters=64, kernel_size=5, strides=2, padding='SAME', activation=lrelu)
net = tf.nn.dropout(net, keep_prob=keep_prob)
net = tf.contrib.layers.batch_norm(net, decay=decay, is_training=is_training)
# conv transpose + dropout + batch_norm
net = tf.layers.conv2d_transpose(net, filters=64, kernel_size=5, strides=2, padding='SAME', activation=lrelu)
net = tf.nn.dropout(net, keep_prob=keep_prob)
net = tf.contrib.layers.batch_norm(net, decay=decay, is_training=is_training)
# conv transpose + dropout + batch_norm
net = tf.layers.conv2d_transpose(net, filters=64, kernel_size=5, strides=1, padding='SAME', activation=lrelu)
net = tf.nn.dropout(net, keep_prob=keep_prob)
net = tf.contrib.layers.batch_norm(net, decay=decay, is_training=is_training)
# conv transpose
net = tf.layers.conv2d_transpose(net, filters=64, kernel_size=5, strides=1, padding='SAME', activation=tf.nn.sigmoid)
return net
# + deletable=true editable=true
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=[None, img_size_flat])
noise = tf.placeholder(tf.float32, shape=[None, n_noise])
# + deletable=true editable=true
G = generator(noise, is_training=True)
Dx = discriminator(X, reuse=None)
Dg = discriminator(G, reuse=True)
# log ops
print(f'{G}\n{Dx}\n{Dg}')
# + [markdown] deletable=true editable=true
# ### Loss function
# + deletable=true editable=true
# Discriminator's loss (Real->rated highly, Fake->rated poorly)
loss_d_real = tf.nn.softmax_cross_entropy_with_logits(logits=Dx, labels=tf.ones_like(Dx))
loss_d_fake = tf.nn.softmax_cross_entropy_with_logits(logits=Dg, labels=tf.zeros_like(Dg))
loss_d = tf.reduce_mean(0.5 * (loss_d_real + loss_d_fake), name='loss_d')
# Generator's loss (Generator->rated highly)
loss_g = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=G, labels=tf.ones_like(G)), name='loss_g')
# + [markdown] deletable=true editable=true
# ### Optimizer & Regularizer
# + deletable=true editable=true
# Trainable variables for generator & discriminator
d_vars = [var for var in tf.trainable_variables() if var.name.startswith('discriminator')]
g_vars = [var for var in tf.trainable_variables() if var.name.startswith('generator')]
# Regularizer for generator & discriminator
regularizer = tf.contrib.layers.l2_regularizer(scale=1e-6)
d_reg = tf.contrib.layers.apply_regularization(regularizer=regularizer, weights_list=d_vars)
g_reg = tf.contrib.layers.apply_regularization(regularizer=regularizer, weights_list=g_vars)
# We have to provide the update_ops to our optimizers when applying batch normalization
update_ops = tf.get_collection(key=tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(control_inputs=update_ops):
d_global_step = tf.Variable(0, trainable=False, name='d_global_step')
g_global_step = tf.Variable(0, trainable=False, name='g_global_step')
# Optimizer for Discriminator
optimizer_d = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
optimizer_d = optimizer_d.minimize(loss_d + d_reg, global_step=d_global_step, var_list=d_vars)
# Optimizer for Generator
optimizer_g = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
optimizer_g = optimizer_g.minimize(loss_g + g_reg, global_step=g_global_step, var_list=g_vars)
# + [markdown] deletable=true editable=true
# ## Running the Computational Graph
# + deletable=true editable=true
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# + [markdown] deletable=true editable=true
# ### Tensorboard
# + deletable=true editable=true
# Tensorboard & Model's directory
tensorboard_dir = 'tensorboard/generate/gan/'
logdir = os.path.join(tensorboard_dir, 'log')
save_path = 'models/generate/gan/'
save_model = os.path.join(save_path, 'model.ckpt')
# Summary
tf.summary.scalar('loss_d_real', loss_d_real)
tf.summary.scalar('loss_d_fake', loss_d_fake)
tf.summary.scalar('loss_d', loss_d)
tf.summary.scalar('loss_g', loss_g)
gen_img = generator(noise, reuse=True, is_training=False)
tf.summary.image('gen_img', gen_img, max_outputs=6)
merged = tf.summary.merge_all()
# Saver & Writer
saver = tf.train.Saver()
writer = tf.summary.FileWriter(logdir=logdir, graph=sess.graph)
# + deletable=true editable=true
if tf.gfile.Exists(save_path):
try:
sys.stdout.write('INFO: Attempting to restore last checkpoint.\n')
last_ckpt = tf.train.latest_checkpoint(save_path)
saver.restore(sess=sess, save_path=last_ckpt)
sys.stdout.write(f'INFO: Restored last checkpoint from {last_ckpt}\n')
sys.stdout.flush()
except Exception as e:
sys.stderr.write(f'ERR: Could not restore checkpoint. {e}')
sys.stderr.flush()
else:
tf.gfile.MakeDirs(save_path)
sys.stdout.write(f'INFO: Created checkpoint directory: {save_path}\n')
sys.stdout.flush()
# + [markdown] deletable=true editable=true
# ### Training
# + deletable=true editable=true
start_time = dt.datetime.now()
for i in range(iterations):
train_d = True
train_g = True
X_batch = data.train.next_batch(batch_size=batch_size)[0]
n = np.random.uniform(low=0.0, high=1.0, size=[batch_size, n_noise])
feed_dict = {X: X_batch, noise: n}
# Run the losses
_d_real, _d_fake, _loss_d, _loss_g, _d_global, _g_global = sess.run([loss_d_real, loss_d_fake,
loss_d, loss_g,
d_global_step,
g_global_step],
feed_dict=feed_dict)
_d_real, _d_fake = np.mean(_d_real), np.mean(_d_fake)
# Stop training discriminator
if _loss_g * 1.5 < _loss_d:
train_d = False
sys.stderr.write(f'\nDiscriminator stopped training!'
f'\tReal: {_d_real:.2f}\tFake: {_d_fake:.2f}'
f'\tLoss: {_loss_d:.4f}')
sys.stderr.flush()
# Stop training generator
if _loss_d * 2 < _loss_g:
train_g = False
sys.stderr.write(f'\nGenerator stopped training!'
f'\tLoss: {_loss_g:.4f}')
sys.stderr.flush()
# Train discriminator
if train_d:
sess.run(optimizer_d, feed_dict=feed_dict)
# Train generator
if train_g:
sess.run(optimizer_g, feed_dict=feed_dict)
# Save model & Graph summary
if i%save_interval == 0:
saver.save(sess=sess, save_path=save_model, global_step=g_global_step)
summary = sess.run(merged, feed_dict=feed_dict)
writer.add_summary(summary=summary, global_step=_g_global)
# Log generated images @ intervals
if i%log_interval == 0:
randoms = np.random.uniform(low=0.0, hight=1.0, size=[9, n_noise])
gen_imgs = generator(randoms, is_training=False)
imgs = sess.run(gen_imgs, feed_dict={noise: randoms})
visualize(imgs, name=f'Iteration: {i+1}', smooth=True, cmap='gray')
sys.stdout.write(f'\rIter: {i+1:,}\tg_Global: {_g_global:,}\td_Global: {_d_global:,}'
f'\tDiscriminator »»» Real: {_d_real:.2f}\tFake: {_d_fake:.2f}\tLoss: {_loss_d:.2f}'
f'\tGenerator »»» Loss: {_loss_g:.2f}')
sys.stdout.flush()
# + deletable=true editable=true
# + deletable=true editable=true
| generate-objects-gan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import labnote as lb
import os
# +
parser = lb.ArgumentParser()
parser.add_argument('--epochs',type=int,default=20)
parser.add_argument('--batch_size',type=int,default=128)
parser.add_argument('--gpu_dev',type=str,default='0')
params = parser.parse_args()
# +
params.num_classes = 10
script_name=None # <- required only for jupyter with password authentification
if lb.utils.is_executed_on_ipython():
script_name = 'keras_mnist_sample.ipynb'
note = lb.Note('./log_dir',script_name=script_name)
note.set_params(params)
# -
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = note.params.gpu_dev
# +
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.callbacks import ModelCheckpoint, CSVLogger
# +
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, note.params.num_classes)
y_test = keras.utils.to_categorical(y_test, note.params.num_classes)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(note.params.num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
# -
note.save(memo='sample code for keras mnist. I just want to explain how to use note with general deep learning framework.')
with note.record() as rec:
print(rec.dirname)
csv_name = os.path.join(rec.dirname,"history.csv")
model_name = os.path.join(rec.dirname,"mnist_models.pkl")
cb_csv = CSVLogger(csv_name)
cb_mcp = ModelCheckpoint(model_name,period=5)
history = model.fit(x_train, y_train,
batch_size=note.params.batch_size,
epochs=note.params.epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[cb_csv,cb_mcp]
)
score = model.evaluate(x_test, y_test, verbose=0)
with open(os.path.join(rec.dirname,"score.txt"),'w') as f:
f.write("Test loss: %f\n"%score[0])
f.write("Test accuracy: %f\n"%score[1])
last_exp = rec.dirname
with open(os.path.join(last_exp,"score.txt")) as f:
for l in f:
print(l)
exit()
| samples/keras_mnist_sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: sklift-env
# language: python
# name: sklift-env
# ---
# # 🎯 Uplift modeling `metrics`
#
# <br>
# <center>
# <a href="https://colab.research.google.com/github/maks-sh/scikit-uplift/blob/master/notebooks/uplift_metrics_tutorial.ipynb">
# <img src="https://colab.research.google.com/assets/colab-badge.svg">
# </a>
# <br>
# <b><a href="https://github.com/maks-sh/scikit-uplift/">SCIKIT-UPLIFT REPO</a> | </b>
# <b><a href="https://scikit-uplift.readthedocs.io/en/latest/">SCIKIT-UPLIFT DOCS</a> | </b>
# <b><a href="https://scikit-uplift.readthedocs.io/en/latest/user_guide/index.html">USER GUIDE</a></b>
# <br>
# </center>
# +
import sys
# install uplift library scikit-uplift and other libraries
# !{sys.executable} -m pip install scikit-uplift dill catboost
# -
# # 📝 Load data
#
# We are going to use a `Lenta dataset` from the BigTarget Hackathon hosted in summer 2020 by Lenta and Microsoft.
#
# Lenta is a russian food retailer.
#
# ### Data description
#
# ✏️ Dataset can be loaded from `sklift.datasets` module using `fetch_lenta` function.
#
# Read more about dataset <a href="https://www.uplift-modeling.com/en/latest/api/datasets/fetch_lenta.html">in the api docs</a>.
#
# This is an uplift modeling dataset containing data about Lenta's customers grociery shopping, marketing campaigns communications as `treatment` and store visits as `target`.
#
# #### ✏️ Major columns:
#
# - `group` - treatment / control flag
# - `response_att` - binary target
# - `CardHolder` - customer id
# - `gender` - customer gender
# - `age` - customer age
# +
from sklift.datasets import fetch_lenta
# returns sklearn Bunch object
# with data, target, treatment keys
# data features (pd.DataFrame), target (pd.Series), treatment (pd.Series) values
dataset = fetch_lenta()
# -
print(f"Dataset type: {type(dataset)}\n")
print(f"Dataset features shape: {dataset.data.shape}")
print(f"Dataset target shape: {dataset.target.shape}")
print(f"Dataset treatment shape: {dataset.treatment.shape}")
# # 📝 EDA
dataset.data.head().append(dataset.data.tail())
# ### 🤔 target share for `treatment / control`
# +
import pandas as pd
pd.crosstab(dataset.treatment, dataset.target, normalize='index')
# +
# make treatment binary
treat_dict = {
'test': 1,
'control': 0
}
dataset.treatment = dataset.treatment.map(treat_dict)
# +
# fill NaNs in the categorical feature `gender`
# for CatBoostClassifier
dataset.data['gender'] = dataset.data['gender'].fillna(value='Не определен')
print(dataset.data['gender'].value_counts(dropna=False))
# -
# ### ✂️ train test split
#
# - stratify by two columns: treatment and target.
#
# `Intuition:` In a binary classification problem definition we stratify train set by splitting target `0/1` column. In uplift modeling we have two columns instead of one.
# +
from sklearn.model_selection import train_test_split
stratify_cols = pd.concat([dataset.treatment, dataset.target], axis=1)
X_train, X_val, trmnt_train, trmnt_val, y_train, y_val = train_test_split(
dataset.data,
dataset.treatment,
dataset.target,
stratify=stratify_cols,
test_size=0.3,
random_state=42
)
print(f"Train shape: {X_train.shape}")
print(f"Validation shape: {X_val.shape}")
# -
# # 👾 Class Transformation uplift model
#
# `Class transformation` method is described <a href="https://www.uplift-modeling.com/en/latest/user_guide/models/revert_label.html"> here</a>
#
# Class transormation method `may` be used in case of treatment unbalanced data. In this case one will get not an uplift score but some *ranking* score still useful for ranking objects.
# +
from sklift.models import ClassTransformation
from catboost import CatBoostClassifier
estimator = CatBoostClassifier(verbose=100,
cat_features=['gender'],
random_state=42,
thread_count=1)
ct_model = ClassTransformation(estimator=estimator)
# -
ct_model.fit(
X=X_train,
y=y_train,
treatment=trmnt_train
)
# ### Save model
# +
import dill
with open("model.dill", 'wb') as f:
dill.dump(ct_model, f)
# -
# ### Uplift prediction
uplift_ct = ct_model.predict(X_val)
# # 🚀🚀🚀 Uplift metrics
# ## 🚀 `uplift@k`
#
# - uplift at first k%
# - usually falls between [0; 1] depending on k, model quality and data
#
#
# ### `uplift@k` = `target mean at k% in the treatment group` - `target mean at k% in the control group`
#
# ___
#
# How to count `uplift@k`:
#
# 1. sort by predicted uplift
# 2. select first k%
# 3. count target mean in the treatment group
# 4. count target mean in the control group
# 5. substract the mean in the control group from the mean in the treatment group
#
# ---
#
# Code parameter options:
#
# - `strategy='overall'` - sort by uplift treatment and control together
# - `strategy='by_group'` - sort by uplift treatment and control separately
# +
from sklift.metrics import uplift_at_k
# k = 10%
k = 0.1
# strategy='overall' sort by uplift treatment and control together
uplift_overall = uplift_at_k(y_val, uplift_ct, trmnt_val, strategy='overall', k=k)
# strategy='by_group' sort by uplift treatment and control separately
uplift_bygroup = uplift_at_k(y_val, uplift_ct, trmnt_val, strategy='by_group', k=k)
print(f"uplift@{k * 100:.0f}%: {uplift_overall:.4f} (sort groups by uplift together)")
print(f"uplift@{k * 100:.0f}%: {uplift_bygroup:.4f} (sort groups by uplift separately)")
# -
# ## 🚀 `uplift_by_percentile` table
#
# Count metrics for each percentile in data in descending order by uplift prediction (by rows):
#
# - `n_treatment` - treatment group size in the one percentile
# - `n_control` - control group size in the one percentile
# - `response_rate_treatment` - target mean in the treatment group in the one percentile
# - `response_rate_control` - target mean in the control group in the one percentile
# - `uplift = response_rate_treatment - response_rate_control` in the one percentile
#
# ___
#
# Code parameter options are:
#
# - `strategy='overall'` - sort by uplift treatment and control groups together
# - `strategy='by_group'` - sort by uplift treatment and control groups separately
# - `total=True` - show total metric on full data
# - `std=True` - show metrics std by row
# +
from sklift.metrics import uplift_by_percentile
uplift_by_percentile(y_val, uplift_ct, trmnt_val,
strategy='overall',
total=True, std=True, bins=10)
# -
# ## 🚀 `weighted average uplift `
#
# - counts uplift on full data
# - uses results from `uplift_by_percentile` table
# - result depends on number of bins
#
# ### `weighted average uplift` = `sum of uplift by percentile weighted on the treatment group size`
#
# +
from sklift.metrics import weighted_average_uplift
uplift_full_data = weighted_average_uplift(y_val, uplift_ct, trmnt_val, bins=10)
print(f"average uplift on full data: {uplift_full_data:.4f}")
# -
# ## 🚀 `uplift_by_percentile` plot
#
# - visualize results of `uplift_by_percentile` table
#
# Two ways to plot:
#
# - line plot `kind='line'`
# - bar plot `kind='bar'`
#
# +
from sklift.viz import plot_uplift_by_percentile
# line plot
plot_uplift_by_percentile(y_val, uplift_ct, trmnt_val, strategy='overall', kind='line');
# -
# bar plot
plot_uplift_by_percentile(y_val, uplift_ct, trmnt_val, strategy='overall', kind='bar');
# ## 🚀 `Qini curve`
#
# The curve plots the absolute incremental outcome of the treated group compared to group with no treatment.
#
#
# plot Qini curve:
# - `blue line` is a `real Qini curve` based on data.
# - `red line` is an `ideal Qini curve` based on data. Code: `perfect=True`
# - `grey line` is a `random Qini curve` based on data
#
#
# ## 🚀 `AUQC` (`area under Qini curve` or `Qini coefficient`)
#
# `Qini coefficient` = `light blue area between the real Qini curve and the random Qini curve normalized on area between the random and the ideal line`
#
# <img src="https://habrastorage.org/getpro/habr/upload_files/18a/f90/b30/18af90b30dbdff84e1b3a8ab77195101.png" width="400" alt="qini_curve">
#
#
# - metric is printed at the title of the Qini curve plot
# - can be called as a separate function
# +
from sklift.viz import plot_qini_curve
# with ideal Qini curve (red line)
# perfect=True
plot_qini_curve(y_val, uplift_ct, trmnt_val, perfect=True);
# -
# no ideal Qini curve
# only real Qini curve
# perfect=False
plot_qini_curve(y_val, uplift_ct, trmnt_val, perfect=False);
# +
from sklift.metrics import qini_auc_score
# AUQC = area under Qini curve = Qini coefficient
auqc = qini_auc_score(y_val, uplift_ct, trmnt_val)
print(f"Qini coefficient on full data: {auqc:.4f}")
# -
# ## 🚀 `Uplift curve`
#
# The Uplift curve plots incremental uplift.
#
#
# - `blue line` is a `real Uplift curve` based on data.
# - `red line` is an `ideal Uplift curve` based on data. Code: `perfect=True`
# - `grey line` is a `random Uplift curve` based on data.
#
#
# ## 🚀 `AUUQ` (`area under uplift curve`)
#
# - `Area under uplift curve` = blue area between the real Uplift curve and the random Uplift curve
# - appears at the title of the Uplift curve plot
# - can be called as a separate function
#
# +
from sklift.viz import plot_uplift_curve
# with ideal curve
# perfect=True
plot_uplift_curve(y_val, uplift_ct, trmnt_val, perfect=True);
# -
# only real
# perfect=False
plot_uplift_curve(y_val, uplift_ct, trmnt_val, perfect=False);
# +
from sklift.metrics import uplift_auc_score
# AUUQ = area under uplift curve
auuc = uplift_auc_score(y_val, uplift_ct, trmnt_val)
print(f"Uplift auc score on full data: {auuc:.4f}")
# -
| notebooks/uplift_metrics_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example of correctly formatting data
#
# For use in decoding (see "Examples_decoders_hc" and "Examples_kf_decoder_hc"), we need the following format of inputs:
# - Neural data should be a matrix of size "number of time bins" x "number of neurons", where each entry is the firing rate of a given neuron in a given time bin
# - The output you are decoding should be a matrix of size "number of time bins" x "number of features you are decoding"
#
# In this example, we load Matlab data that contains
# - The spike times of all neurons. In Matlab, "spike_times" is a cell of size "number of neurons" x 1. Within spike_times{i} is a vector containing all the spike times of neuron i.
# - A continuous stream of the output variables. In this example, we are aiming to decode position. In Matlab, "pos" is a matrix of size "number of recorded time points" x 2 (x and y positions were recorded) that contains the x and y position components at all time points. Time points that were not recorded have NaN values. "pos_times" is a vector that states the time at all recorded time points.
#
# We will put this data in the format described above, with the help of the functions "bin_spikes" and "bin_output" that are in the file "preprocessing_funcs.py"
#
#
#
# ## Import packages and functions
# Note that you may need to specify the path below
# + jupyter={"outputs_hidden": false}
###Import standard packages###
import numpy as np
from scipy import io
import sys
###Import functions for binning data for preprocessing###
from Neural_Decoding.preprocessing_funcs import bin_spikes
from Neural_Decoding.preprocessing_funcs import bin_output
# -
# ## Load Data
# The data for this example can be downloaded at this [link](https://www.dropbox.com/s/94dhsgnx2cfs3jx/hc_data_raw.mat?dl=0)
#
# It is the hc-2 dataset from [crcns](https://crcns.org/data-sets/hc/hc-2). Specifically, we use the dataset "ec014.333"
# + jupyter={"outputs_hidden": false}
###Load Data###
folder='' #ENTER THE FOLDER THAT YOUR DATA IS IN
# folder='/home/jglaser/Data/DecData/'
data=io.loadmat(folder+'hc_data_raw.mat')
spike_times=data['spike_times'] #Load spike times of all neurons
pos=data['pos'] #Load x and y positions
pos_times=data['pos_times'][0] #Load times at which positions were recorded
# -
# ## User Inputs
# + jupyter={"outputs_hidden": false}
dt=.2 #Size of time bins (in seconds)
t_start=pos_times[0] #Time to start extracting data - here the first time position was recorded
t_end=5608 #pos_times[-1] #Time to finish extracting data - when looking through the dataset, the final position was recorded around t=5609, but the final spikes were recorded around t=5608
downsample_factor=1 #Downsampling of output (to make binning go faster). 1 means no downsampling.
# -
# ## Put data in binned format
#When loading the Matlab cell "spike_times", Python puts it in a format with an extra unnecessary dimension
#First, we will put spike_times in a cleaner format: an array of arrays
spike_times=np.squeeze(spike_times)
for i in range(spike_times.shape[0]):
spike_times[i]=np.squeeze(spike_times[i])
# + jupyter={"outputs_hidden": false}
###Preprocessing to put spikes and output in bins###
#Bin neural data using "bin_spikes" function
neural_data=bin_spikes(spike_times,dt,t_start,t_end)
#Bin output (position) data using "bin_output" function
pos_binned=bin_output(pos,pos_times,dt,t_start,t_end,downsample_factor)
# -
# ## Save Data
# + jupyter={"outputs_hidden": false}
import pickle
data_folder='' #FOLDER YOU WANT TO SAVE THE DATA TO
# data_folder='/home/jglaser/Data/DecData/'
with open(data_folder+'example_data_hc.pickle','wb') as f:
pickle.dump([neural_data,pos_binned],f)
# + jupyter={"outputs_hidden": true}
| Examples_hippocampus/Example_format_data_hc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Efficient-simulation-of-non-markovian-dynamics-using-Max-Ent-states
#
# One of the fundamental problems in Quantum Information Theory and Quantum Computing is the accurate representation of quantum composite systems, in particular: their's states and dynamics. Said composite systems are univoquely represented by mathematical objects, the density operators, which containt the information of all possible n-body correlations present. A notable exception are the Gaussian dynamics. In these dynamics, achievable for bosonic systems, the dynamics is closed over the set of Gaussian states. Said set of Gaussian states are parameterizable in terms of pairwise correlations, thus forming a $n$ or $n^2$-dimensional Riemannian differentiable manifold; with a metric given by Hilbert-Schmidt inner product of operators. This has motivated to search for generalizations of the Gaussian states to still bosonic systems.
#
# In this work, a generalization based on the Max-Ent property of Gaussian states is proposed, in which we will consider families of states that maximize the entropy of the system (Max-Ent principle), under the restriction of fixing the mean values of a certain set of independent observables. Strategies to build approximations within this family, which represent arbitrary states, will then be discussed.
#
# As an application case, we will study the relative entropy between the states that results from the dynamics in the Dicke model with its corresponding estimates as MaxEnt states defined by their local values and two-body correlations.
#
# * We'll compare rho(t) with its max-ent state associated with a base of observables,
# * compare rho(t) with its projected state, using the sc corresponding to the initial state, associated to a base of observables,
# * and compare rho(t) with its projected state, using the sc corresponding to the instantaneous state, associated to a base of observables.
#
#
# +
import qutip
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
import pickle
dim=20
def prod_basis(b1, b2):
return [qutip.tensor(b,s) for b in b1 for s in b2]
def scalar_prod(op1,op2,rho0=None):
if op1.dims[0][0]!=op1.dims[0][0]:
return None
if rho0 is None:
rho0 = qutip.qeye(op1.dims[0])/op1.dims[0][0]
return ((op1.dag()*op2+op2.dag()*op1)*rho0).tr()
def base_orto(ops,rho0):
dim = ops[0].dims[0][0]
base = []
# hacer gramm schmidt
for op in ops:
coeff = [scalar_prod(op2,op, rho0) for op2 in base]
op_mod = op - sum([ c*op2 for c, op2 in zip(coeff, base)])
op_mod = op_mod/np.sqrt(scalar_prod(op_mod,op_mod, rho0))
base.append(op_mod)
return base
def proj_op(K,base,rho0):
return sum([ scalar_prod(b, K,rho0) * b for b in base])
def logM(rho):
vals, vecs = rho.eigenstates()
return sum([np.log(val)*vec*vec.dag() for val,vec in zip(vals, vecs) if val>0])
def sqrtM(rho):
vals, vecs = rho.eigenstates()
return sum([ (abs(val)**.5)*vec*vec.dag() for val,vec in zip(vals, vecs)])
def rel_entropy(rho, sigma):
val = (rho*(logM(rho)-logM(sigma))).tr()
if abs(val.imag)>1.e-6:
print("rho or sigma not positive")
print(rho.eigenstates())
print(sigma.eigenstates())
return val.real
def bures(rho, sigma):
val = abs((sqrtM(rho)*sqrtM(sigma)).tr())
val = max(min(val,1.),-1.)
return np.arccos(val)/np.pi
def maxent_rho(rho, basis):
def test(x, rho, basis):
k = sum([-u*b for u,b in zip(x, basis)])
sigma = (.5*(k+k.dag())).expm()
sigma = sigma/sigma.tr()
return rel_entropy(rho, sigma)
res = opt.minimize(test,np.zeros(len(basis)),args=(rho,basis))
k = sum([-u*b for u,b in zip(res.x, basis)])
sigma = (.5*(k+k.dag())).expm()
sigma = sigma/sigma.tr()
return sigma
def error_maxent_state(rho, basis, distance=bures):
try:
sigma = maxent_rho(rho, basis)
return distance(rho,sigma)
except:
print("fail")
return None
def error_proj_state(rho, rho0, basis, distance=bures):
try:
basis = base_orto(basis, rho0)
sigma = proj_op(logM(rho), basis, rho0).expm()
return distance(rho, sigma)
except:
print("fail")
return None
# +
class Result(object):
def __init__(self, ts=None, states=None):
self.ts = ts
self.states = states
self.max_ent_app = None
self.projrho0_app = None
self.projrho_inst_app = None
def simul(omega_bos=3, omega_s=3, temp=1, gaussian=False, deltat=10., tmax=500., distance=bures):
basis_bos = [qutip.qeye(dim), qutip.create(dim),qutip.create(dim).dag(),qutip.num(dim)]
H_bos = qutip.tensor(qutip.num(dim), qutip.qeye(2))
H_i = qutip.tensor(.5*(qutip.create(dim)+qutip.destroy(dim)), qutip.sigmax())
# Estado inicial
rho0 = qutip.tensor((-qutip.num(dim)/temp).expm(), qutip.qeye(2)/2.)
rho0 = rho0/rho0.tr()
# Base
if gaussian:
basis_spin = [qutip.qeye(2), qutip.sigmax()]
H_s = qutip.tensor(qutip.qeye(dim),qutip.sigmax())
else:
basis_spin = [qutip.qeye(2), qutip.sigmax(),qutip.sigmay(),qutip.sigmaz()]
H_s = qutip.tensor(qutip.qeye(dim),qutip.sigmaz())
basis = base_orto(prod_basis(basis_bos, basis_spin), rho0)
# Hamiltoniano
H = omega_bos * H_bos + omega_s * H_s + H_i
sampling = int(10*max(1,omega_bos, omega_s)*deltat)
states = [rho0]
rho = rho0
ts= [0]
for i in range(int(tmax/deltat)):
result = qutip.mesolve(H, states[-1], np.linspace(0,deltat, sampling))
states.append(result.states[-1])
ts.append(deltat*i)
result = Result(ts, states)
result.times = ts
result.states = states
result.max_ent_app = np.array([error_maxent_state(rho, basis, distance) for rho in states])
result.projrho0_app = np.array([error_proj_state(rho, rho0, basis,distance) for rho in states])
result.projrho_inst_app = np.array([error_proj_state(rho, qutip.tensor(rho.ptrace([0]),rho.ptrace([1])),
basis, distance) for rho in states])
if gaussian:
title = distance.__name__ + f" - Dinámica gaussiana dim={dim} wb={omega_bos} dw={abs(omega_s-omega_bos)} "
else:
title = distance.__name__ + f" - Dinámica no gaussiana dim={dim} wb={omega_bos} dw={abs(omega_s-omega_bos)} "
with open(title+".pkl","wb") as f:
pickle.dump(result, f)
return result, title
# +
## Dinámica Gaussiana, resonante
result, title = simul(omega_bos=3., omega_s=3., temp=1,
gaussian=True,
deltat=10., tmax=500.,
distance=bures)
plt.plot(result.times, result.max_ent_app, label="max-ent")
plt.plot(result.times, result.projrho0_app, label="proj rho0")
plt.plot(result.times, result.projrho_inst_app, label="proj rho(t)")
plt.xlabel("t")
plt.ylabel("Arccos(F)")
plt.legend()
plt.title(title + f" dim={dim}")
plt.savefig(title + f" dim={dim}.svg")
# -
| thesis_notebook.ipynb |