code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import string
import pandas as pd
import matplotlib.pyplot as plt
# ## Read training and testing data
data_train = pd.read_csv('data/kaggle/train.csv', index_col=0)
print('Shape of train: ', data_train.shape)
data_train.head()
data_test = pd.read_csv('data/kaggle/test_complete.csv', index_col=0)
print('Shape of test: ', data_test.shape)
data_test.head()
data_combined = pd.concat([data_train, data_test], axis=0)
print('Shape of combined: ', data_combined.shape)
# ## Explore data
# How many examples of each category do we have?
category_cols = ['toxic','severe_toxic','obscene','threat','insult','identity_hate']
num_toxic_comments = data_train[category_cols].astype(bool).sum(axis=0)
untoxic_comments = data_train[category_cols].astype(bool).sum(axis=1)
num_untoxic_comments = untoxic_comments[untoxic_comments == 0].count()
print('Train set:')
print('no label\t', num_untoxic_comments)
print(num_toxic_comments)
category_cols = ['toxic','severe_toxic','obscene','threat','insult','identity_hate']
num_toxic_comments = data_test[category_cols].astype(bool).sum(axis=0)
untoxic_comments = data_test[category_cols].astype(bool).sum(axis=1)
num_untoxic_comments = untoxic_comments[untoxic_comments == 0].count()
print('Test set:')
print('no label\t', num_untoxic_comments)
print(num_toxic_comments)
# How long are the comments in the training set?
# +
comment_lengths = data_combined['comment_text'].apply(lambda x: len(x))
print('Mean length: ', comment_lengths.mean())
print('Median length: ', comment_lengths.median())
print('Maximum length: ', max(comment_lengths))
print('Minimum length: ', min(comment_lengths))
comment_lengths.hist(bins=100)
plt.title('histogram of number of characters per comment in training set')
plt.xlabel('# characters')
plt.ylabel('# occurrences')
plt.xlim(0,2000)
plt.show()
# -
# What is the rate of punctuation compared to all characters in a comment?
# +
count_occurences = lambda c, l: sum([1 for x in c if x in l])
num_of_punctuation = data_combined['comment_text'].apply(lambda x: count_occurences(x, string.punctuation))
punctuation_rates = (num_of_punctuation / comment_lengths)
print('Mean punctuation rate: ', punctuation_rates.mean())
print('Median punctuation rate: ', punctuation_rates.median())
print('Maximum punctuation rate: ', max(punctuation_rates))
print('Minimum punctuation rate: ', min(punctuation_rates))
punctuation_rates.hist(bins=100)
plt.title('histogram of punctuation in data')
plt.xlabel('punctuation rate')
plt.xlim(0, 0.4)
plt.ylabel('# occurrences')
plt.show()
# -
# How many exclamation marks in the training set?
# +
num_of_exclamation_marks = data_combined['comment_text'].apply(lambda x: count_occurences(x, '!'))
print('Mean #exclamation marks: ', num_of_exclamation_marks.mean())
print('Median #exclamation marks: ', num_of_exclamation_marks.median())
print('Maximum #exclamation marks: ', max(num_of_exclamation_marks))
print('Minimum #exclamation marks: ', min(num_of_exclamation_marks))
num_of_exclamation_marks[num_of_exclamation_marks < 50].hist(bins=50)
plt.title('histogram of exclamation marks in data')
plt.xlabel('# exclamation marks')
plt.xlim(0, 10)
plt.ylabel('# occurrences')
plt.show()
# +
count_uppercase = lambda c: sum([1 for x in c if x.isupper()])
number_uppercase_characters = data_combined['comment_text'].apply(lambda x: count_uppercase(x))
uppercase_rate = (number_uppercase_characters / comment_lengths)
print('Mean uppercase rate: ', uppercase_rate.mean())
print('Median uppercase rate: ', uppercase_rate.median())
print('Maximum uppercase rate: ', max(uppercase_rate))
print('Minimum uppercase rate: ', min(uppercase_rate))
uppercase_rate.hist(bins=100)
plt.title('histogram of uppercase rate in data')
plt.xlabel('uppercase rate')
plt.ylabel('# occurrences')
plt.show()
uppercase_rate.head()
# -
# ## Examples for every category in the data
data_train[data_train['toxic'] == 1].head(3)
data_train[data_train['severe_toxic'] == 1].head(3)
data_train[data_train['obscene'] == 1].head(3)
data_train[data_train['threat'] == 1].head(3)
data_train[data_train['insult'] == 1].head(3)
data_train[data_train['identity_hate'] == 1].head(3)
|
explore_kaggle_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime
from datetime import timedelta
sns.set()
tf.compat.v1.random.set_random_seed(1234)
df = pd.read_csv('../dataset/BTC-sentiment.csv')
df.head()
# ## How we gather the data, provided by Bitcurate, bitcurate.com
#
# Because I don't have sentiment data related to stock market, so I will use crpytocurrency data, `BTC/USDT` from binance.
#
# 1. close data came from CCXT, https://github.com/ccxt/ccxt, an open source cryptocurrency aggregator.
# 2. We gather from streaming twitter, crawling hardcoded crpyocurrency telegram groups and Reddit. And we store in Elasticsearch as a single index. We trained 1/4 layers BERT MULTILANGUAGE (200MB-ish, originally 700MB-ish) released by Google on most-possible-found sentiment data on the internet, leveraging sentiment on multilanguages, eg, english, korea, japan. **Actually, it is very hard to found negative sentiment related to bitcoin / btc in large volume.**
#
# How we request using elasticsearch-dsl, https://elasticsearch-dsl.readthedocs.io,
# ```python
# # from index name
# s = s.filter(
# 'query_string',
# default_field = 'text',
# query = 'bitcoin OR btc',
# )
# ```
#
# We only do text query only contain `bitcoin` or `btc`.
# ## Consensus introduction
#
# We have 2 questions here when saying about consensus, what happened,
#
# 1. to future price if we assumed future sentiment is really positive, near to 1.0 . Eg, suddenly China want to adapt cryptocurrency and that can cause huge requested volumes.
# 2. to future price if we assumed future sentiment is really negative, near to 1.0 . Eg, suddenly hackers broke binance or any exchanges, or any news that caused wreck by negative sentiment.
#
# **We can use deep-learning to simulate for us!**
# +
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
close = df['close'].tolist()
positive = df['positive'].tolist()
negative = df['negative'].tolist()
timestamp = df['timestamp'].tolist()
plt.figure(figsize = (17, 5))
host = host_subplot(111)
plt.subplots_adjust(right = 0.75, top = 0.8)
par1 = host.twinx()
par2 = host.twinx()
par2.spines['right'].set_position(('axes', 1.1))
par2.spines['bottom'].set_position(('axes', 0.9))
host.set_xlabel('timestamp')
host.set_ylabel('BTC/USDT')
par1.set_ylabel('positive')
par2.set_ylabel('negative')
host.plot(close, label = 'BTC/USDT')
par1.plot(positive, label = 'positive')
par2.plot(negative, label = 'negative')
host.legend()
plt.xticks(
np.arange(len(timestamp))[::30], timestamp[::30], rotation = '45', ha = 'right'
)
plt.legend()
plt.show()
# -
minmax = MinMaxScaler().fit(df.iloc[:, 1:2].astype('float32'))
df_log = minmax.transform(df.iloc[:, 1:2].astype('float32'))
df_log = pd.DataFrame(df_log)
df_log[1] = df['positive']
df_log[2] = df['negative']
df_log.head()
# ## Model definition
#
# This example is using model 17.cnn-seq2seq, if you want to use another model, need to tweak a little bit, but I believe it is not that hard.
# +
num_layers = 1
size_layer = 128
epoch = 200
dropout_rate = 0.75
test_size = 3 * 10 # timestamp every 20 minutes, and I want to test on last 12 hours
learning_rate = 1e-3
timestamp = test_size
df_train = df_log.iloc[:-test_size]
df_test = df_log.iloc[-test_size:]
df.shape, df_train.shape, df_test.shape
# +
def encoder_block(inp, n_hidden, filter_size):
inp = tf.expand_dims(inp, 2)
inp = tf.pad(
inp,
[
[0, 0],
[(filter_size[0] - 1) // 2, (filter_size[0] - 1) // 2],
[0, 0],
[0, 0],
],
)
conv = tf.layers.conv2d(
inp, n_hidden, filter_size, padding = 'VALID', activation = None
)
conv = tf.squeeze(conv, 2)
return conv
def decoder_block(inp, n_hidden, filter_size):
inp = tf.expand_dims(inp, 2)
inp = tf.pad(inp, [[0, 0], [filter_size[0] - 1, 0], [0, 0], [0, 0]])
conv = tf.layers.conv2d(
inp, n_hidden, filter_size, padding = 'VALID', activation = None
)
conv = tf.squeeze(conv, 2)
return conv
def glu(x):
return tf.multiply(
x[:, :, : tf.shape(x)[2] // 2],
tf.sigmoid(x[:, :, tf.shape(x)[2] // 2 :]),
)
def layer(inp, conv_block, kernel_width, n_hidden, residual = None):
z = conv_block(inp, n_hidden, (kernel_width, 1))
return glu(z) + (residual if residual is not None else 0)
class Model:
def __init__(
self,
learning_rate,
num_layers,
size,
size_layer,
output_size,
kernel_size = 3,
n_attn_heads = 16,
dropout = 0.9,
):
self.X = tf.placeholder(tf.float32, (None, None, size))
self.Y = tf.placeholder(tf.float32, (None, output_size))
encoder_embedded = tf.layers.dense(self.X, size_layer)
e = tf.identity(encoder_embedded)
for i in range(num_layers):
z = layer(
encoder_embedded,
encoder_block,
kernel_size,
size_layer * 2,
encoder_embedded,
)
z = tf.nn.dropout(z, keep_prob = dropout)
encoder_embedded = z
encoder_output, output_memory = z, z + e
g = tf.identity(encoder_embedded)
for i in range(num_layers):
attn_res = h = layer(
encoder_embedded,
decoder_block,
kernel_size,
size_layer * 2,
residual = tf.zeros_like(encoder_embedded),
)
C = []
for j in range(n_attn_heads):
h_ = tf.layers.dense(h, size_layer // n_attn_heads)
g_ = tf.layers.dense(g, size_layer // n_attn_heads)
zu_ = tf.layers.dense(
encoder_output, size_layer // n_attn_heads
)
ze_ = tf.layers.dense(output_memory, size_layer // n_attn_heads)
d = tf.layers.dense(h_, size_layer // n_attn_heads) + g_
dz = tf.matmul(d, tf.transpose(zu_, [0, 2, 1]))
a = tf.nn.softmax(dz)
c_ = tf.matmul(a, ze_)
C.append(c_)
c = tf.concat(C, 2)
h = tf.layers.dense(attn_res + c, size_layer)
h = tf.nn.dropout(h, keep_prob = dropout)
encoder_embedded = h
encoder_embedded = tf.sigmoid(encoder_embedded[-1])
self.logits = tf.layers.dense(encoder_embedded, output_size)
self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
self.cost
)
def calculate_accuracy(real, predict):
real = np.array(real) + 1
predict = np.array(predict) + 1
percentage = 1 - np.sqrt(np.mean(np.square((real - predict) / real)))
return percentage * 100
def anchor(signal, weight):
buffer = []
last = signal[0]
for i in signal:
smoothed_val = last * weight + (1 - weight) * i
buffer.append(smoothed_val)
last = smoothed_val
return buffer
# -
tf.reset_default_graph()
modelnn = Model(
learning_rate, num_layers, df_log.shape[1], size_layer, df_log.shape[1],
dropout = dropout_rate
)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# +
from tqdm import tqdm
pbar = tqdm(range(epoch), desc = 'train loop')
for i in pbar:
init_value = np.zeros((1, num_layers * 2 * size_layer))
total_loss, total_acc = [], []
for k in range(0, df_train.shape[0] - 1, timestamp):
index = min(k + timestamp, df_train.shape[0] - 1)
batch_x = np.expand_dims(
df_train.iloc[k : index, :].values, axis = 0
)
batch_y = df_train.iloc[k + 1 : index + 1, :].values
logits, _, loss = sess.run(
[modelnn.logits, modelnn.optimizer, modelnn.cost],
feed_dict = {modelnn.X: batch_x, modelnn.Y: batch_y},
)
total_loss.append(loss)
total_acc.append(calculate_accuracy(batch_y[:, 0], logits[:, 0]))
pbar.set_postfix(cost = np.mean(total_loss), acc = np.mean(total_acc))
# +
future_day = test_size
output_predict = np.zeros((df_train.shape[0] + future_day, df_train.shape[1]))
output_predict[0] = df_train.iloc[0]
upper_b = (df_train.shape[0] // timestamp) * timestamp
for k in range(0, (df_train.shape[0] // timestamp) * timestamp, timestamp):
out_logits = sess.run(
modelnn.logits,
feed_dict = {
modelnn.X: np.expand_dims(
df_train.iloc[k : k + timestamp], axis = 0
)
},
)
output_predict[k + 1 : k + timestamp + 1] = out_logits
if upper_b != df_train.shape[0]:
out_logits = sess.run(
modelnn.logits,
feed_dict = {
modelnn.X: np.expand_dims(df_train.iloc[upper_b:], axis = 0)
},
)
output_predict[upper_b + 1 : df_train.shape[0] + 1] = out_logits
future_day -= 1
# -
output_predict_negative = output_predict.copy()
output_predict_positive = output_predict.copy()
for i in range(future_day):
o = output_predict[-future_day - timestamp + i:-future_day + i].copy()
o = np.expand_dims(o, axis = 0)
o_negative = output_predict_negative[-future_day - timestamp + i:-future_day + i].copy()
o_negative = np.expand_dims(o_negative, axis = 0)
o_negative[:, :, 1] = 0.0
o_negative[:, :, 2] = 1.0
o_positive = output_predict_positive[-future_day - timestamp + i:-future_day + i].copy()
o_positive = np.expand_dims(o_positive, axis = 0)
o_positive[:, :, 1] = 1.0
o_positive[:, :, 2] = 0.0
# original without any consensus
out_logits = sess.run(
modelnn.logits,
feed_dict = {
modelnn.X: o
},
)
output_predict[-future_day + i] = out_logits[-1]
# negative consensus
out_logits = sess.run(
modelnn.logits,
feed_dict = {
modelnn.X: o_negative
},
)
output_predict_negative[-future_day + i] = out_logits[-1]
# positive consensus
out_logits = sess.run(
modelnn.logits,
feed_dict = {
modelnn.X: o_positive
},
)
output_predict_positive[-future_day + i] = out_logits[-1]
output_predict_original = minmax.inverse_transform(output_predict[:,:1])
output_predict_negative = minmax.inverse_transform(output_predict_negative[:,:1])
output_predict_positive = minmax.inverse_transform(output_predict_positive[:,:1])
deep_future = anchor(output_predict_original[:, 0], 0.7)
deep_future_negative = anchor(output_predict_negative[:, 0], 0.7)
deep_future_positive = anchor(output_predict_positive[:, 0], 0.7)
df.shape, len(deep_future_negative)
df_train = minmax.inverse_transform(df_train)
df_test = minmax.inverse_transform(df_test)
# +
timestamp = df['timestamp'].tolist()
pad_test = np.pad(df_test[:,0], (df_train.shape[0], 0), 'constant', constant_values=np.nan)
plt.figure(figsize = (15, 5))
plt.plot(pad_test, label = 'test trend', c = 'blue')
plt.plot(df_train[:,0], label = 'train trend', c = 'black')
plt.plot(deep_future, label = 'forecast without consensus')
plt.plot(deep_future_negative, label = 'forecast with negative consensus', c = 'red')
plt.plot(deep_future_positive, label = 'forecast with positive consensus', c = 'green')
plt.legend()
plt.xticks(
np.arange(len(timestamp))[::30], timestamp[::30], rotation = '45', ha = 'right'
)
plt.show()
# -
# ## What we can observe
#
# 1. The model learn, if positive and negative sentiments increasing, both will increase the price. That is why, using positive consensus or negative consensus caused price going up.
# 2. Volatility of price is higher if negative sentiment is higher, still positive volatility.
# 3. Momentum of price is higher if negative sentiment is higher, still positive momentum.
|
deep-learning/sentiment-consensus.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/MHaley206265/DS-Unit-2-Kaggle-Challenge/blob/master/module4-classification-metrics/Matt_Haley_DSPT6_224_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="hpnjzx0pA3Ld" colab_type="code" outputId="9d840f93-3f12-4dc3-decd-6a5b5a0f1827" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import sys
# !pip install category_encoders==2.*
# !pip install pandas-profiling==2.*
# + id="_f4W-OlErWi3" colab_type="code" outputId="832e5da5-aec0-453a-dd60-e08db63e3aff" colab={"base_uri": "https://localhost:8080/", "height": 34}
import pandas as pd
from sklearn.model_selection import train_test_split
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
train.shape, test.shape
# + id="6X0XUw3aJIdo" colab_type="code" colab={}
# from pandas_profiling import ProfileReport
# profile = ProfileReport(train, minimal=True).to_notebook_iframe()
# profile
# + id="PEQwJFbduB_i" colab_type="code" outputId="611c6dd9-c0a6-4572-86c2-4da767912a39" colab={"base_uri": "https://localhost:8080/", "height": 51}
import category_encoders as ce
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.utils import all_estimators
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint, uniform
# + id="GIxY7X-Oxfpw" colab_type="code" colab={}
# target
target = 'status_group'
# + id="FkIV-lB6Lt7S" colab_type="code" colab={}
def drop_columns(df):
df = df.copy()
drop = ['num_private', 'recorded_by', 'payment_type', 'quantity_group', 'id']
df = df.drop(columns=drop)
return df
# + id="tWjA-GCfMAbK" colab_type="code" colab={}
# convert date_recorded to just year_recorded
def date_to_year(df):
df = df.copy()
df['date_recorded'] = pd.to_datetime(df['date_recorded'], infer_datetime_format=True)
df['year_recorded'] = df['date_recorded'].dt.year
df['month_recorded'] = df['date_recorded'].dt.month
df['day_recorded'] = df['date_recorded'].dt.day
df['years'] = df['year_recorded'] - df['construction_year']
df['years_MISSING'] = df['years'].isnull()
df = df.drop(columns=['date_recorded'])
return df
# + id="bWa_90TzMgsL" colab_type="code" colab={}
# convert funder to funded_by_gov
# At locations where the neighborhood is NOT in the top 10,
def decrease_card(df):
df = df.copy()
# funded_by_gov
df.loc[df['funder']!='Government Of Tanzania', 'funder'] = 'other'
# installed_by_dwe
df.loc[df['installer']!='DWE', 'installer'] = 'other'
return df
# + id="-6NWwvHbN4iN" colab_type="code" colab={}
# change -2e-08 values to 0 in latitude
def change_to_zero(df):
df = df.copy()
df['latitude'] = df['latitude'].replace(-2e-08, 0)
return df
# + id="jfv4zOM_Opc9" colab_type="code" colab={}
# use ordinal encoder on ['ward', 'lga', 'scheme_name']
def ord_encode(df):
enc = ce.ordinal.OrdinalEncoder(handle_missing='return_nan')
df = df.copy()
df[['ward', 'lga', 'scheme_name']] = enc.fit_transform(df[['ward', 'lga', 'scheme_name']])
return df
# + id="T2dMhd86Pj7c" colab_type="code" colab={}
# convert 0 to nan in ['latitude', 'longitude', 'construction_year']
def zero_to_nan(df):
df = df.copy()
cols_with_zeros = ['longitude', 'latitude', 'construction_year']
for col in cols_with_zeros:
df[col] = df[col].replace(0, np.nan)
return df
# + id="uVasXU04wH4B" colab_type="code" colab={}
def train_val(df):
df = df.copy()
train, val = train_test_split(df, train_size=.80, test_size=.20,
stratify=df['status_group'], random_state=42)
return train, val
# + id="mRJ55meFwu7S" colab_type="code" colab={}
def engineer(df):
df = df.copy()
df = drop_columns(df)
df = date_to_year(df)
#df = decrease_card(df)
df = change_to_zero(df)
#df = ord_encode(df)
df = zero_to_nan(df)
return df
# + id="m-yTH-w4xXJ0" colab_type="code" colab={}
def train_val_test(train, test):
train, val = train_val(train)
train = engineer(train)
val = engineer(val)
test = engineer(test)
return train, val, test
# + id="UDMpuY8Kx2XK" colab_type="code" colab={}
def get_features(df):
df = df.copy()
train_features = df.drop(target, axis=1)
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
cardinality = train_features.select_dtypes(exclude='number').nunique().index.tolist()
#categorical_features = cardinality[cardinality <= 50].index.tolist()
features = numeric_features + cardinality
return features
# + id="SSiVx0oKzBS5" colab_type="code" colab={}
def mat_vect(df, features):
df = df.copy()
X = df[features]
y = df[target]
return X, y
# + id="pOyd3_MXzhmy" colab_type="code" colab={}
def prepare(train, test):
train = train.copy()
test = test.copy()
train, val, test = train_val_test(train, test)
features = get_features(train)
X_train, y_train = mat_vect(train, features)
X_val, y_val = mat_vect(val, features)
test = test[features]
return X_train, y_train, X_val, y_val, test
# + id="xuHGQk2aVeBa" colab_type="code" colab={}
def kaggle_sub(df, pipe, test):
X_test = df.copy()
y_pred = pipe.predict(X_test)
submission = test[['id']].copy()
submission['status_group'] = y_pred
submission.to_csv('my_submission.csv', index=False)
# + id="VI9l4WqU0aNk" colab_type="code" colab={}
X_train, y_train, X_val, y_val, X_test = prepare(train, test)
# + id="ccY3dhRp7rSL" colab_type="code" colab={}
pipe = make_pipeline(
ce.ordinal.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1, min_samples_leaf=2,
min_samples_split=6, max_features=0.18340450985343382, max_depth = 20)
)
# + id="_g1tsbE5tKfH" colab_type="code" colab={}
# param_grid = {
# 'randomforestclassifier__max_depth': [20, 22, 24, 26, 28],
# }
# rscv = RandomizedSearchCV(pipe, param_grid, random_state=42)
# search = rscv.fit(X_train, y_train)
# print(f'Best Parameters: {search.best_params_}')
# print(f'Best Score: {search.best_score_}')
# + id="bYhwqkj68lxQ" colab_type="code" outputId="42769743-5bca-4d73-a535-2d6744947c92" colab={"base_uri": "https://localhost:8080/", "height": 442}
pipe.fit(X_train, y_train)
# + id="h92z-NLPPmDE" colab_type="code" colab={}
from sklearn.metrics import plot_confusion_matrix
# + id="VQV6_QVhPrPJ" colab_type="code" outputId="5ec83b12-3c4a-4596-a50b-70900afef8bc" colab={"base_uri": "https://localhost:8080/", "height": 385}
plot_confusion_matrix(pipe, X_val, y_val, values_format='.0f', cmap='Reds',
xticks_rotation='vertical');
# + id="-d9BOezOQSEa" colab_type="code" outputId="87df2b13-b9c2-496f-9fd7-dc045c995eb3" colab={"base_uri": "https://localhost:8080/", "height": 385}
plot_confusion_matrix(pipe, X_val, y_val, values_format='.2f', cmap='Reds',
xticks_rotation='vertical', normalize='true');
# + id="n7R7ZjiR8pqQ" colab_type="code" outputId="842b6332-fc51-4c16-d32a-5c2d484cdd52" colab={"base_uri": "https://localhost:8080/", "height": 34}
pipe.score(X_val, y_val)
# + id="flyxHmajZFPY" colab_type="code" outputId="05542b49-7660-4f2a-e9bd-91c87573d040" colab={"base_uri": "https://localhost:8080/", "height": 34}
pipe.score(X_train, y_train)
# + id="LWdJYdnLZvBT" colab_type="code" outputId="28081bb5-7fdb-4fac-b2f3-2f5947b3c037" colab={"base_uri": "https://localhost:8080/", "height": 187}
X_test.columns
# + id="nlvuwzyEZ0gN" colab_type="code" outputId="f9d9e291-a34b-4ea7-ad4e-847485ac1f06" colab={"base_uri": "https://localhost:8080/", "height": 187}
X_train.columns
# + id="iwt3lrhCZ8KL" colab_type="code" outputId="1b2f271c-75ff-4478-edb2-baf8bf573b91" colab={"base_uri": "https://localhost:8080/", "height": 34}
X_train.shape, X_test.shape
# + id="RbqT6RsaWs5q" colab_type="code" colab={}
# y_pred = pipe.predict(X_test)
# submission = test[['id']].copy()
# submission['status_group'] = y_pred
# submission.to_csv('my_submission_new.csv', index=False)
|
module4-classification-metrics/Matt_Haley_DSPT6_224_Assignment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [python3point5]
# language: python
# name: Python [python3point5]
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lakigigar/Caltech-CS155-2021/blob/main/psets/set1/set1_prob4_part2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8zXk15sqanJn"
# # Problem 4, Parts F-H: Stochastic Gradient Descent with a Larger Dataset
# + [markdown] id="m7tyF2u9anJp"
# Use this notebook to write your code for problem 4 parts F-H by filling in the sections marked `# TODO` and running all cells.
# + id="D2Hkms3PanJq"
# Setup.
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] id="-AyihJn4anJq"
# ## Problem 4F: Perform SGD with the new dataset
# + [markdown] id="8lqAvFzOanJr"
# For the functions below, you may re-use your code from parts 4C-E. Note that you can now modify your SGD function to return the final weight vector instead of the weights after every epoch.
# + id="agvOJInYanJr"
def loss(X, Y, w):
'''
Calculate the squared loss function.
Inputs:
X: A (N, D) shaped numpy array containing the data points.
Y: A (N, ) shaped numpy array containing the (float) labels of the data points.
w: A (D, ) shaped numpy array containing the weight vector.
Outputs:
The loss evaluated with respect to X, Y, and w.
'''
#==============================================
# TODO: Implement the SGD loss function.
#==============================================
pass
def gradient(x, y, w):
'''
Calculate the gradient of the loss function with respect to
a single point (x, y), and using weight vector w.
Inputs:
x: A (D, ) shaped numpy array containing a single data point.
y: The float label for the data point.
w: A (D, ) shaped numpy array containing the weight vector.
Output:
The gradient of the loss with respect to x, y, and w.
'''
#==============================================
# TODO: Implement the gradient of the loss function.
#==============================================
pass
def SGD(X, Y, w_start, eta, N_epochs):
'''
Perform SGD using dataset (X, Y), initial weight vector w_start,
learning rate eta, and N_epochs epochs.
Inputs:
X: A (N, D) shaped numpy array containing the data points.
Y: A (N, ) shaped numpy array containing the (float) labels of the data points.
w_start: A (D, ) shaped numpy array containing the weight vector initialization.
eta: The step size.
N_epochs: The number of epochs (iterations) to run SGD.
Outputs:
w: A (D, ) shaped array containing the final weight vector.
losses: A (N_epochs, ) shaped array containing the losses from all iterations.
'''
#==============================================
# TODO: Implement the SGD algorithm.
#==============================================
pass
# + [markdown] id="qKB_1fKOanJs"
# Next, we need to load the dataset. In doing so, the following function may be helpful:
# + id="FjN68-QNanJt"
def load_data(filename):
"""
Function loads data stored in the file filename and returns it as a numpy ndarray.
Inputs:
filename: GeneratorExitiven as a string.
Outputs:
Data contained in the file, returned as a numpy ndarray
"""
return np.loadtxt(filename, skiprows=1, delimiter=',')
# + [markdown] id="fUbVU3OaanJt"
# Now, load the dataset in `sgd_data.csv` and run SGD using the given parameters; print out the final weights.
# + id="yD6I-fRmanJu"
#==============================================
# TODO:
# (1) load the dataset
# (2) run SGD using the given parameters
# (3) print out the final weights.
#==============================================
# The following should help you get started:
data = load_data('https://raw.githubusercontent.com/lakigigar/Caltech-CS155-2021/main/psets/set1/data/sgd_data.csv')
# + [markdown] id="szWc9IBYanJu"
# ## Problem 4G: Convergence of SGD
# + [markdown] id="x8sztA8-anJu"
# This problem examines the convergence of SGD for different learning rates. Please implement your code in the cell below:
# + id="huDf79aranJu"
#==============================================
# TODO: create a plot showing the convergence
# of SGD for the different learning rates.
#==============================================
# + [markdown] id="UoHE8aTcanJv"
# ## Problem 4H
# + [markdown] id="bwAsdSYJanJv"
# Provide your code for computing the least-squares analytical solution below.
# + id="o7a3L97fanJv"
#==============================================
# TODO: implement the least-squares
# analytical solution.
#==============================================
|
psets/set1/set1_prob4_part2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import print_function, division, absolute_import
import numpy as np
from scipy.optimize import fmin_l_bfgs_b, least_squares, fmin_slsqp, minimize
import pyfde
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from time import time
import constrNMPy as cNM
from joblib import Parallel, delayed
sns.set()
def otto_model_create_data(x):
"""
Run the otto model to generate numerical data
Input:
x: 1D Numpy array or list with 5 elements as defined below
x[0] = alpha CPE phase factor
x[1] = K CPE magnitude factor
x[2] = ren encapsulation resistance
x[3] = rex extracellular resistance
x[4] = am membrane area in cm**2
f: frequencies of the data points
example:
zr, zj = otto_model_create_test_data(x)
zr: 1D Numpy array of length f real component of z
zj: 1D Numpy array of length f imaginary component of z
"""
# glial encapsulation
am = x[4] # membrane area (cm**2)
cm = 1e-6*am # cell membrane capaacitance (uf/cm**2)
rm = 3.33/am # Cell membrane resistivity (ohm*cm**2)
# 1j in Python is sqrt(-1.0)
ecpe = 1.0 / (((1j*2*np.pi*f)**x[0])*(x[1]/1e6))
ren = (x[2]*1e3) * np.ones(n)
rex = (x[3]*1e3) * np.ones(n)
# 2 parallel RC circuits in series
cell_membrane = (1.0/((1j*2*np.pi*f*cm)+(1.0/rm))) + \
(1.0/((1j*2*np.pi*f*cm)+(1.0/rm)))
# combine total impedances
wire1 = 1.0 / (1j*2*np.pi*f*wire_c)
z_electrode_tissue = ecpe + ren + (1.0 / ((1.0/(cell_membrane))+(1.0/rex)))
ztot = 1.0 / ((1./z_electrode_tissue)+(1./wire1))
return np.real(ztot), np.abs(np.imag(ztot))
def otto_model_L1_log(x):
"""
return the L1 norm of the otto model for x from some test data
Input:
x: 1D Numpy array or list with 5 elements as defined below
x[0] = alpha CPE phase factor
x[1] = K CPE magnitude factor
x[2] = ren encapsulation resistance
x[3] = rex extracellular resistance
x[4] = am membrane area in cm**2
"""
ir = np.log10(zr)
ij = np.log10(zj)
# glial encapsulation
am = x[4] # membrane area (cm**2)
cm = 1e-6*am # cell membrane capaacitance (uf/cm**2)
rm = 3.33/am # Cell membrane resistivity (ohm*cm**2)
# 1j in Python is sqrt(-1.0)
ecpe = 1.0 / (((1j*2*np.pi*f)**x[0])*(x[1]/1e6))
ren = (x[2]*1e3) * np.ones(n)
rex = (x[3]*1e3) * np.ones(n)
# 2 parallel RC circuits in series
cell_membrane = (1.0/((1j*2*np.pi*f*cm)+(1.0/rm))) + \
(1.0/((1j*2*np.pi*f*cm)+(1.0/rm)))
# combine total impedances
wire1 = 1.0 / (1j*2*np.pi*f*wire_c)
z_electrode_tissue = ecpe + ren + (1.0 / ((1.0/(cell_membrane))+(1.0/rex)))
ztot = 1.0 / ((1./z_electrode_tissue)+(1./wire1))
er = np.log10(np.real(ztot))
ej = np.log10(np.abs(np.imag(ztot)))
L1 = (np.sum(np.abs(ir-er))/n) + (np.sum(np.abs(ij-ej))/n)
if np.isnan(L1):
return np.inf
return L1
def otto_model_L2_log(x):
"""
return the L2 norm of the otto model for x from some test data
Input:
x: 1D Numpy array or list with 5 elements as defined below
x[0] = alpha CPE phase factor
x[1] = K CPE magnitude factor
x[2] = ren encapsulation resistance
x[3] = rex extracellular resistance
x[4] = am membrane area in cm**2
"""
ir = np.log10(zr)
ij = np.log10(zj)
# glial encapsulation
am = x[4] # membrane area (cm**2)
cm = 1e-6*am # cell membrane capaacitance (uf/cm**2)
rm = 3.33/am # Cell membrane resistivity (ohm*cm**2)
# 1j in Python is sqrt(-1.0)
ecpe = 1.0 / (((1j*2*np.pi*f)**x[0])*(x[1]/1e6))
ren = (x[2]*1e3) * np.ones(n)
rex = (x[3]*1e3) * np.ones(n)
# 2 parallel RC circuits in series
cell_membrane = (1.0/((1j*2*np.pi*f*cm)+(1.0/rm))) + \
(1.0/((1j*2*np.pi*f*cm)+(1.0/rm)))
# combine total impedances
wire1 = 1.0 / (1j*2*np.pi*f*wire_c)
z_electrode_tissue = ecpe + ren + (1.0 / ((1.0/(cell_membrane))+(1.0/rex)))
ztot = 1.0 / ((1./z_electrode_tissue)+(1./wire1))
er = np.log10(np.real(ztot))
ej = np.log10(np.abs(np.imag(ztot)))
e = np.concatenate((ir - er, ij - ej))
L2 = np.dot(e.T, e)/n
if np.isnan(L2):
return np.inf
return np.sqrt(L2)
def otto_model_L_inf(x):
"""
return the L infity norm of the otto model for x from some test data
this returns the maximum deviation in the real + max deviation imaginary
Input:
x: 1D Numpy array or list with 5 elements as defined below
x[0] = alpha CPE phase factor
x[1] = K CPE magnitude factor
x[2] = ren encapsulation resistance
x[3] = rex extracellular resistance
x[4] = am membrane area in cm**2
"""
# glial encapsulation
am = x[4] # membrane area (cm**2)
cm = 1e-6*am # cell membrane capaacitance (uf/cm**2)
rm = 3.33/am # Cell membrane resistivity (ohm*cm**2)
# 1j in Python is sqrt(-1.0)
ecpe = 1.0 / (((1j*2*np.pi*f)**x[0])*(x[1]/1e6))
ren = (x[2]*1e3) * np.ones(n)
rex = (x[3]*1e3) * np.ones(n)
# 2 parallel RC circuits in series
cell_membrane = (1.0/((1j*2*np.pi*f*cm)+(1.0/rm))) + \
(1.0/((1j*2*np.pi*f*cm)+(1.0/rm)))
# combine total impedances
wire1 = 1.0 / (1j*2*np.pi*f*wire_c)
z_electrode_tissue = ecpe + ren + (1.0 / ((1.0/(cell_membrane))+(1.0/rex)))
ztot = 1.0 / ((1./z_electrode_tissue)+(1./wire1))
L_inf = (np.max(np.abs(zr-np.real(ztot)))) +\
(np.max(np.abs(zj-np.abs(np.imag(ztot)))))
return L_inf
def otto_model_L2_Kait(x):
"""
return the L2 norm with Kaitlynn's nomalized deviation
of the otto model for x from some test data
Input:
x: 1D Numpy array or list with 5 elements as defined below
x[0] = alpha CPE phase factor
x[1] = K CPE magnitude factor
x[2] = ren encapsulation resistance
x[3] = rex extracellular resistance
x[4] = am membrane area in cm**2
"""
# glial encapsulation
am = x[4] # membrane area (cm**2)
cm = 1e-6*am # cell membrane capaacitance (uf/cm**2)
rm = 3.33/am # Cell membrane resistivity (ohm*cm**2)
# 1j in Python is sqrt(-1.0)
ecpe = 1.0 / (((1j*2*np.pi*f)**x[0])*(x[1]/1e6))
ren = (x[2]*1e3) * np.ones(n)
rex = (x[3]*1e3) * np.ones(n)
# 2 parallel RC circuits in series
cell_membrane = (1.0/((1j*2*np.pi*f*cm)+(1.0/rm))) + \
(1.0/((1j*2*np.pi*f*cm)+(1.0/rm)))
zmag = np.sqrt((zr**2) + (zj**2))
# combine total impedances
wire1 = 1.0 / (1j*2*np.pi*f*wire_c)
z_electrode_tissue = ecpe + ren + (1.0 / ((1.0/(cell_membrane))+(1.0/rex)))
ztot = 1.0 / ((1./z_electrode_tissue)+(1./wire1))
e = (((zr-np.real(ztot))**2)/(np.abs(zmag))) + \
(((zj-np.abs(np.imag(ztot)))**2)/(np.abs(zmag)))
L2 = np.dot(e.T, e)
if np.isnan(L2):
return np.inf
return L2
def otto_model_L1_Kait(x):
"""
return the L2 norm with Kaitlynn's nomalized deviation
of the otto model for x from some test data
Input:
x: 1D Numpy array or list with 5 elements as defined below
x[0] = alpha CPE phase factor
x[1] = K CPE magnitude factor
x[2] = ren encapsulation resistance
x[3] = rex extracellular resistance
x[4] = am membrane area in cm**2
"""
# glial encapsulation
am = x[4] # membrane area (cm**2)
cm = 1e-6*am # cell membrane capaacitance (uf/cm**2)
rm = 3.33/am # Cell membrane resistivity (ohm*cm**2)
# 1j in Python is sqrt(-1.0)
ecpe = 1.0 / (((1j*2*np.pi*f)**x[0])*(x[1]/1e6))
ren = (x[2]*1e3) * np.ones(n)
rex = (x[3]*1e3) * np.ones(n)
# 2 parallel RC circuits in series
cell_membrane = (1.0/((1j*2*np.pi*f*cm)+(1.0/rm))) + \
(1.0/((1j*2*np.pi*f*cm)+(1.0/rm)))
zmag = np.sqrt((zr**2) + (zj**2))
# combine total impedances
wire1 = 1.0 / (1j*2*np.pi*f*wire_c)
z_electrode_tissue = ecpe + ren + (1.0 / ((1.0/(cell_membrane))+(1.0/rex)))
ztot = 1.0 / ((1./z_electrode_tissue)+(1./wire1))
e = np.sum((np.abs((zr-np.real(ztot))))/(np.abs(zmag))) + \
np.sum((np.abs((zj-np.abs(np.imag(ztot)))))/(np.abs(zmag)))
L1 = e / n
if np.isnan(L1):
return np.inf
return L1
def plot_results(f, x_l1, x_l2, x_linf, x_k, title):
"""
plots the results of all of the optimizations
Input:
x: 1D Numpy array or list with 5 elements as defined below
x[0] = alpha CPE phase factor
x[1] = K CPE magnitude factor
x[2] = ren encapsulation resistance
x[3] = rex extracellular resistance
x[4] = am membrane area in cm**2
"""
# plot the fitted data
zr_l1, zj_l1 = otto_model_create_data(x_l1)
zr_l2, zj_l2 = otto_model_create_data(x_l2)
zr_linf, zj_linf = otto_model_create_data(x_linf)
zr_lk, zj_lk = otto_model_create_data(x_k)
plt.figure()
plt.title(title)
plt.plot(zr_l1, zj_l1, 'o-', label='Log10 L1 norm')
plt.plot(zr_l2, zj_l2, 's-', label='Log10 L2 norm')
plt.plot(zr_linf, zj_linf, '>-', label=r"L1 Kaitlynn's norm")
plt.plot(zr_lk, zj_lk, '<-', label="Kaitlynn's norm")
plt.plot(zr, zj, 'xk', label='Test Data')
plt.xlabel(r'$Z_r (\Omega)$')
plt.ylabel(r'$-Z_j (\Omega)$')
plt.legend()
plt.savefig('figs_pc/' + title + 'rj_nolog.png', dpi=300, bbox_inches='tight')
plt.figure()
plt.title(title)
plt.plot(zr_l1, zj_l1, 'o-', label='Log10 L1 norm')
plt.plot(zr_l2, zj_l2, 's-', label='Log10 L2 norm')
plt.plot(zr_linf, zj_linf, '>-', label=r"L1 Kaitlynn's norm")
plt.plot(zr_lk, zj_lk, '<-', label="Kaitlynn's norm")
plt.plot(zr, zj, 'xk', label='Test Data')
plt.xlabel(r'$Z_r (\Omega)$')
plt.ylabel(r'$-Z_j (\Omega)$')
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.savefig('figs_pc/' + title + 'rj.png', dpi=300, bbox_inches='tight')
plt.figure()
plt.title(title)
plt.plot(f, zj_l1, 'o-', label='Log10 L1 norm')
plt.plot(f, zj_l2, 's-', label='Log10 L2 norm')
plt.plot(f, zj_linf, '>-', label=r"L1 Kaitlynn's norm")
plt.plot(f, zj_lk, '<-', label="Kaitlynn's norm")
plt.plot(f, zj, 'xk', label='Test Data')
plt.xlabel(r'$f$')
plt.ylabel(r'$-Z_j (\Omega)$')
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.savefig('figs_pc/' + title + 'j.png', dpi=300, bbox_inches='tight')
plt.figure()
plt.title(title)
plt.plot(f, zr_l1, 'o-', label='Log10 L1 norm')
plt.plot(f, zr_l2, 's-', label='Log10 L2 norm')
plt.plot(f, zr_linf, '>-', label=r"L1 Kaitlynn's norm")
plt.plot(f, zr_lk, '<-', label="Kaitlynn's norm")
plt.plot(f, zr, 'xk', label='Test Data')
plt.xlabel(r'$f$')
plt.ylabel(r'$Z_r (\Omega)$')
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.savefig('figs_pc/' + title + 'r.png', dpi=300, bbox_inches='tight')
# residual plots
plt.figure()
plt.title(title)
plt.plot(f, (zj-zj_l1)/zj, 'o-', label='Log10 L1 norm')
plt.plot(f, (zj-zj_l2)/zj, 's-', label='Log10 L2 norm')
plt.plot(f, (zj-zj_linf)/zj, '>-', label=r"L1 Kaitlynn's norm")
plt.plot(f, (zj-zj_lk)/zj, '<-', label="Kaitlynn's norm")
plt.plot([f[0], f[-1]], [0.0, 0.0], '-k')
plt.xlabel(r'$f$')
plt.ylabel(r'$(Z_{j,\mathrm{data}} - Z_{j,\mathrm{model}})/Z_{j,\mathrm{data}}$')
plt.xscale('log')
plt.legend()
plt.savefig('figs_pc/' + title + 'j_resid.png', dpi=300, bbox_inches='tight')
plt.figure()
plt.title(title)
plt.plot(f, (zr-zr_l1)/zr, 'o-', label='Log10 L1 norm')
plt.plot(f, (zr-zr_l2)/zr, 's-', label='Log10 L2 norm')
plt.plot(f, (zr-zr_linf)/zr, '>-', label=r"L1 Kaitlynn's norm")
plt.plot(f, (zr-zr_lk)/zr, '<-', label="Kaitlynn's norm")
plt.plot([f[0], f[-1]], [0.0, 0.0], '-k')
plt.xlabel(r'$f$')
plt.ylabel(r'$(Z_{r,\mathrm{data}} - Z_{r,\mathrm{model}})/Z_{r,\mathrm{data}}$')
plt.xscale('log')
plt.legend()
plt.savefig('figs_pc/' + title + 'r_resid.png', dpi=300, bbox_inches='tight')
plt.show()
def my_opt_fun(obj_function):
# run differential evolution
# solver = pyfde.ClassicDE(obj_function, n_dim=5, n_pop=25,
# limits=bounds, minimize=True)
solver = pyfde.JADE(obj_function, n_dim=5, n_pop=30,
limits=bounds, minimize=True)
solver.c = np.random.random()
solver.p = np.random.random()
solver.cr = np.random.random()
solver.f = np.random.random()*2.0
# solver.cr, solver.f = 1.0, 0.9
best, fit = solver.run(n_it=1000)
fit = fit*-1
# polish with constrained nelder mead simplex optimization
res_cnm = cNM.constrNM(obj_function, best, bounds[:, 0], bounds[:, 1],
full_output=True, xtol=1e-15, ftol=1e-15)
# if polish better save polish results
if res_cnm['fopt'] < fit:
opts = res_cnm['fopt']
results_x = res_cnm['xopt']
print('Polish was better')
else:
opts = fit
results_x = best
print('Polish did not help')
return np.append(results_x, opts)
def opt_routine(obj_function, runs=50, n_proc=8):
"""
An optimization routine which first runs a Differential Evolution
(global optimization), then runs a Bounded BFGS (gradient optimization)
to polish the result. This processes is repeated for a user defined number
of runs (which default to 10).
Input:
obj_function - Your objective function to minimize
runs (int: optional) - the number of times to repeat the optimization
process (defaults runs=10)
Output: results_x, opts, best_x, best_opt
results_x - a 2D array of all x values from all optimization runs
opts - a 1D array of the objective values from all optimization runs
best_x - a 1D array of the best x values found
best_opt - the best objective value
"""
# initialize result arrays
results_x = np.zeros((runs, 5))
opts = np.zeros(runs)
t0 = time()
opt_res = Parallel(n_jobs=n_proc)(delayed(my_opt_fun)(obj_function) for _ in range(runs)) # noqa
opt_res = np.array(opt_res)
results_x = opt_res[:, :5]
opts = opt_res[:, 5]
t1 = time()
print('Optimization runtime %0.4f seconds' % (t1-t0))
# find the best result
best_index = np.nanargmin(opts)
best_opt = opts[best_index]
best_x = results_x[best_index]
# print('Paramters:', results_x)
print('Set of optima:', opts)
print('Best Objective value:', best_opt)
print('Best Paramters:', best_x, '\n')
print('\n')
return results_x, opts, best_x, best_opt
# -
# # Attempt sensitivyt study on parasitic capacitance values
# +
# optimization bounds
bounds = np.array(((1e-2, 1e1),
(1e-2, 1e1),
(1e0, 1e4),
(1e0, 1e4),
(1e0, 1e3)))
# my data set
data_prefix = 'data/'
data_list = ['2018_02_25.csv', '2018_02_26.csv', '2018_02_27.csv',
'2018_03_03.csv', '2018_03_10.csv']
res_l1 = np.zeros((10, 6))
# Parasitic Capacitance Values for Wires
c_values = np.linspace(1e-12, 1000e-12, 10)
for ind, data in enumerate(data_list):
dataset = pd.read_csv(data_prefix+data)
for i in range(2):
j = i*3
f = dataset.values[:, j]
n = len(f)
zr = dataset.values[:, j+1]
zj = dataset.values[:, j+2]
# sort the zr zj and f values
f_ind = np.argsort(f)
f = f[f_ind]
zr = zr[f_ind]
zj = zj[f_ind]
# remove nans in zr and zj experimental data
inds = np.where(np.isnan(np.log10(zj)))
zj = np.delete(zj, inds)
zr = np.delete(zr, inds)
f = np.delete(f, inds)
inds = np.where(np.isnan(np.log10(zr)))
zj = np.delete(zj, inds)
zr = np.delete(zr, inds)
f = np.delete(f, inds)
n = len(f)
# run log10 optimizations
for k, wire_c in enumerate(c_values):
xs_l1, opts_l1, x_l1, opt_l1 = opt_routine(otto_model_L1_Kait)
res_l1[k, :5] = x_l1
res_l1[k, 5] = opt_l1
break
break
# -
# # Visualization of the difference from 1pf to 1000 pf
# +
title = 'Blue Rat 2018 02 25'
plt.figure()
plt.title(title)
for i in range(10):
zr_lk, zj_lk = otto_model_create_data(res_l1[i, :5])
plt.plot(zr_lk, zj_lk, '-')
plt.plot(zr, zj, 'xk', label='Test Data')
plt.xlabel(r'$Z_r (\Omega)$')
plt.ylabel(r'$-Z_j (\Omega)$')
plt.xscale('log')
plt.yscale('log')
plt.legend()
# plt.savefig('figs/' + title + 'logrj.pdf', dpi=300, bbox_inches='tight')
plt.figure()
plt.title(title)
for i in range(10):
zr_lk, zj_lk = otto_model_create_data(res_l1[i, :5])
plt.plot(zr_lk, zj_lk, '-')
plt.plot(zr, zj, 'xk', label='Test Data')
plt.xlabel(r'$Z_r (\Omega)$')
plt.ylabel(r'$-Z_j (\Omega)$')
plt.legend()
# plt.savefig('figs/' + title + 'rj.pdf', dpi=300, bbox_inches='tight')
plt.figure()
plt.title(title)
for i in range(10):
zr_lk, zj_lk = otto_model_create_data(res_l1[i, :5])
plt.plot(f, zj_lk, '-')
plt.plot(f, zj, 'xk', label='Test Data')
plt.xlabel(r'$f$')
plt.ylabel(r'$-Z_j (\Omega)$')
plt.xscale('log')
plt.yscale('log')
plt.legend()
# plt.savefig('figs/' + title + 'logj.pdf', dpi=300, bbox_inches='tight')
plt.figure()
plt.title(title)
for i in range(10):
zr_lk, zj_lk = otto_model_create_data(res_l1[i, :5])
plt.plot(f, zj_lk, '-')
plt.plot(f, zj, 'xk', label='Test Data')
plt.xlabel(r'$f$')
plt.ylabel(r'$-Z_j (\Omega)$')
plt.legend()
# plt.savefig('figs/' + title + 'j.pdf', dpi=300, bbox_inches='tight')
plt.figure()
plt.title(title)
for i in range(10):
zr_lk, zj_lk = otto_model_create_data(res_l1[i, :5])
plt.plot(f, zr_lk, '-')
plt.plot(f, zr, 'xk', label='Test Data')
plt.xlabel(r'$f$')
plt.ylabel(r'$Z_r (\Omega)$')
plt.xscale('log')
plt.yscale('log')
plt.legend()
# plt.savefig('figs/' + title + 'logr.pdf', dpi=300, bbox_inches='tight')
plt.figure()
plt.title(title)
for i in range(10):
zr_lk, zj_lk = otto_model_create_data(res_l1[i, :5])
plt.plot(f, zr_lk, '-')
plt.plot(f, zr, 'xk', label='Test Data')
plt.xlabel(r'$f$')
plt.ylabel(r'$Z_r (\Omega)$')
plt.legend()
# plt.savefig('figs/' + title + 'r.pdf', dpi=300, bbox_inches='tight')
plt.show()
# -
columns = ['alpha CPE phase factor','K CPE magnitude factor', 'ren encapsulation resistance (k ohm)', 'rex extracellular resistance (k ohm)', 'am membrane area in cm**2', 'objective value']
index_rows = ['blue rat 2018_02_25 rep 1', 'blue rat 2018_02_25 rep 2', 'blue rat 2018_02_26 rep 1', 'blue rat 2018_02_26 rep 2', 'blue rat 2018_02_27 rep 1', 'blue rat 2018_02_27 rep 2',
'blue rat 2018_03_03 rep 1', 'blue rat 2018_03_03 rep 2', 'blue rat 2018_03_10 rep 1', 'blue rat 2018_03_10 rep 2']
index_rows = c_values
pd_l1 = pd.DataFrame(res_l1, index=index_rows, columns=columns)
# # L1 Norm
pd_l1
pd_l1.describe()
|
parasitic_capacitance_sensitivy_notlog10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Datashader Demo
#
# "[hvPlot](https://hvplot.holoviz.org/#) is designed to work well in and outside the Jupyter notebook, and thanks to built-in [Datashader](http://datashader.org/) support scales easily to millions or even billions of datapoints." ([Source](https://hvplot.holoviz.org/#))
#
#
# %matplotlib inline
import pandas as pd
import geopandas as gpd
import hvplot.pandas # noqa
from geopandas import read_file
gdf = read_file('data/demodata_ais.gpkg')
wgs84 = gdf.crs
print("Finished reading {}".format(len(gdf)))
# %%time
gdf.plot()
# %%time
gdf.hvplot(datashade=True)
df = gdf.copy()
df['x'] = df.geometry.x
df['y'] = df.geometry.y
df = pd.DataFrame(df)
# %%time
df.hvplot.scatter(x='x', y='y', datashade=True)
|
tutorials/demo_datashader.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/camilorey/material_clases/blob/main/SEIR_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="BLZu_lnBNeh3" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="e48d91be-10cb-4df6-d7c5-46bab96d13dc"
# !pip install gekko
# + [markdown] id="8bk2lC-mTUbE"
# Importamos de GEKKO el módulo para resolver problemas y vamos a usar NumPy para generar horizontes de simulación.
#
# También PyPlot para poder graficar.
# + id="eUG-OWbbTRAc" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="c4211e48-186c-4106-aaf5-d3a9ef2d221a"
from gekko import GEKKO
import numpy as NumPy
import matplotlib.pyplot as PyPlot
import seaborn as SeaBorn
# + [markdown] id="cVpsQgAdO6CW"
# El modelo epidemiológico SEIR divide una población de tamaño $N$ en cuatro categorías: los susceptibles $S$ que es la población que podría infectarse con la enfermedad, las personas que fueron expuestas a la enfermedad $E$; los infectados $I$ que pueden transmitir la enfermedad y los recuperados de la enfermedad $R$. Notemos que la población total se puede considerar en cada una de estas categorías. A saber:
#
# >> $N = S+ E+ I+R$
#
# Mientras que la población con una tasa de reemplazo poblacional $\mu$ y mueren por (causas no relacionadas con la enfermedad) al cabo de $1/\nu$ años. En este sentido, tenemos que
#
# >> $\mu = \nu{N}$
#
# Vamos a considerar que los individuos de esta población tienen contacto entre sí unas $r$ veces al día y por cada contacto hay una probabilidad $\beta$ de infectarse en cada contacto.
#
# Lo interesante del modelo SEIR es que considera un periodo de incubación de la enfermedad, de modo tal que una persona puede adquirir la enfermedad, pero puede permanecer asintomático (y no infeccioso) con un tiempo medio entre incubación de $1/\epsilon$, mientras que los infectados duran un tiempo medio $1/\gamma$ de días enfermos.
#
# Notemos que para ver el progreso de una enfermedad necesitamos definir un horizonte de tiempo para calcular estos parámetros.
#
# Un parámetro importante para clasificar una enfermedad es el factor $R_0$ que determina si una enfermedad puede volverse epidemia y depende de los parámetros que hemos definido por medio de:
#
# >>$R_0 =\displaystyle r{\beta}\left(\frac{1}{\gamma + \nu}\right)\left(\frac{\epsilon}{\epsilon+\nu}\right)$
#
# Las reglas son:
#
#
# * Si $R_0<1$ la enfermedad parará y no se volverá epidemia.
# * Si $R_0>1$ la enfermedad puede desatarse dentro de la población y conformar una epidemia o bien puede establecerse dentro de la población (como la gripa).
# + id="9sk8Nb8dYuEn" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="9ecf0798-9d0e-4657-f4ca-99348fbb0ffe"
poblacionInicial = 7950000 #esta es la población total de Bogotá
esperanzaDeVida = 76.92 #esta es la esperanza de vida en Colombia promedio en años
#tasa de crecimiento de la población
mu = 165
#tasa de muertes por la enfermedad
nu = 0.000006
#número de contactos entre personas por día
r = 18
#probabilidad de contagio de la enfermedad
beta = 0.55
#tiempo de incubación de la enfermedad
tiempoIncubacion = 25
epsilon = 365.4/tiempoIncubacion
#tiempo de duracion de la enfermedad
tiempoEnfermo = 50
gamma = 365.4/tiempoEnfermo
#cálculo del R_0 para los parámetros dados
R_0 = r*beta*(1.0/(gamma + nu))*(epsilon/(epsilon+nu))
print("población actual:","{:,.2f}".format(poblacionInicial))
print("tasa de reemplazo poblacional (mu):","{:,.2f}".format(mu))
print("tasa de salida poblacional (nu):","{:,.2f}".format(nu))
print("tasa de infección (r x beta):","{:,.2f}".format(r*beta))
print("tasa de incubación (epsilon):","{:,.2f}".format(epsilon))
print("tasa de enfermedad (gama):","{:,.2f}".format(gamma))
print("R_0 para esta enfermedad","{:,.2f}".format(R_0))
# + [markdown] id="hpaxof-1hXAk"
# Debemos considerar parámetros iniciales para poder simular este sistema. Aquí, vamos a considerar la población actual de colombia y vamos a considerar que el 65% de la población es suscpetible de ser infectada.
#
# En este escenario, consideraremos que hay 10 infectados iniciales y que además vienen otros 1000 han sido expuestos a ella. Por el momento no hay personas recuperadas de la enfermedad.
# + id="RIIhwZ8DddO3" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="4ffbe451-09cd-4dd3-e18d-2f33d30cc2e3"
S_0 = int(poblacionInicial*0.55) #vamos a considerar que el 55% de la población inicial es susceptible a la enfermedad
E_0 = int(poblacionInicial*0.05) #vamos a considerar que el 5% de la población inicial ya ha sido expuesto
I_0 = int(poblacionInicial*0.01) #vamos a considerar que el 1% de la población inicial está infectado
R_0 = 0 # hasta ahora nadie se ha recuperado de la enfermedad
print("población susceptible inicial ", "{:,.2f}".format(S_0))
print("población expuesta inicial ", "{:,.2f}".format(E_0))
print("población infectada inicial ", "{:,.2f}".format(I_0))
print("población recuperada inicial", "{:,.2f}".format(R_0))
# + [markdown] id="ccEz38Eei24c"
# Para efectos de este ejercicio, definiremos un horizonte de simulación para ver cómo progresa la enfermedad en un periodo de 10 años y vamos a monitorearla mes a mes, esto quiere decir subdividir este intervalo de 10 años en 120 partes. Queremos ver el progreso de la enfermedad anualmente. Ya sabemos que el $R_0$ es mayor a uno por lo cual veremos la pandemia.
# + id="B7wO8fwgi8d0"
horizonteSimulacion = NumPy.linspace(0,2,365)
# + [markdown] id="MXuW7-7wYuzg"
# De acuerdo al modelo SERIR, las variables $S$, $E$, $I$ y $R$ son en realidad funciones del tiempo, cuyas tasas de cambio se interrelacionan conformando un sistema dinámico no-lineal *autónomo* de primer orden con las siguientes ecuaciones:
#
# Las ecuaciones que gobiernan este modelo son las siguientes:
#
# >>$\displaystyle\frac{dS}{dt} = \mu -\nu{S}-r\beta\frac{SI}{N}$
#
# >>$\displaystyle\frac{dE}{dt} = r\beta\frac{SI}{N} -\epsilon{E}$
#
# >>$\displaystyle\frac{dI}{dt} = \epsilon{E}-(\gamma+\nu)I$
#
# >>$\displaystyle\frac{dR}{dt} = \gamma{I}-\nu{R}$
#
# donde las personas pasan de una categoría a otra, por lo cual estos modelos se conocen como modelos *compartamentalizados*. Vamos a utilizar GEKKO para simular este modelo.
# + id="KVqvXmLIPALO"
seirModel = GEKKO(remote=False)
seirModel.time = horizonteSimulacion
S = seirModel.Var(value=S_0)
E = seirModel.Var(value=E_0)
I = seirModel.Var(value=I_0)
R = seirModel.Var(value=R_0)
#Definimos el modelo de ecuaciones diferenciales
seirModel.Equations([S.dt() == mu-nu*S-r*beta*S*I/(S+E+I+R),
E.dt() == r*beta*S*I/(S+E+I+R)-epsilon*E,
I.dt() == epsilon*E-(gamma+nu)*I,
R.dt() == gamma*I-nu*R])
#now we solve the model
seirModel.options.IMODE=4
seirModel.solve(disp=False)
# + [markdown] id="Xuof-m-pkm2Q"
# Ahora podemos graficar las curvas de solución de este sistema en el tiempo.
# + id="K7U7b0yNksL9" colab={"base_uri": "https://localhost:8080/", "height": 971} outputId="cfdb4701-110f-4a4a-fc59-cb9278fc5c5c"
import matplotlib.ticker as ticker
figure,axes = PyPlot.subplots(4,figsize=(10,15))
figure.suptitle("Resultados del modelo SEIR")
axes[0].set_title("Susceptibles")
axes[0].plot(seirModel.time,S.value,'tab:blue')
axes[1].set_title("Expuestos")
axes[1].plot(seirModel.time,E.value,'tab:red')
axes[2].set_title("Infectados")
axes[2].plot(seirModel.time,I.value,'tab:purple')
axes[3].set_title("Recuperados")
axes[3].plot(seirModel.time,R.value,'tab:green')
PyPlot.show()
# + [markdown] id="UeCHlm1F0Fz5"
#
# Una forma alternativa de aproximarnos al problema epidemiológico, es fijar las tasas de cambio de los Susceptibles, Expuestos, Infectados y Recuperados y averiguar cuanto debería ser la población
#
# Vamos a utilizar GEKKO para averiguar cual es el punto de equilibrio de este sistema.
# + id="f82tmplRlzH3" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="b05b3edc-3233-4f25-8c17-0263370d6cc0"
seirEquilibrio = GEKKO(remote=False)
S = seirEquilibrio.Var(value=S_0)
E = seirEquilibrio.Var(value=E_0)
I = seirEquilibrio.Var(value=I_0)
R = seirEquilibrio.Var(value=R_0)
#Definimos el sistema de ecuaciones
seirEquilibrio.Equations([mu-nu*S-r*beta*S*I/(S+E+I+R) <=1,
r*beta*S*I/(S+E+I+R)-epsilon*E<=1,
epsilon*E-(gamma+nu)*I<=2,
gamma*I-nu*R>=1])
#now we solve the model
#ponemos la función objetivo
seirEquilibrio.Obj(-R)
seirEquilibrio.solve(disp=False)
#e imprimimos los resultados
print("máximo del sistema")
print("S*","{:,.2f}".format(S.value[0]))
print("E*","{:,.2f}".format(E.value[0]))
print("I*","{:,.2f}".format(I.value[0]))
print("R*","{:,.2f}".format(R.value[0]))
|
SEIR_model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ridhodwidharmawan/Landsat-Classification-Using-Neural-Network/blob/ipynb/Builtup_ANN_Landsat8_exercise.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="SrP_HKU_TBz5" outputId="6fd542ee-9886-4105-8b73-b9765580bcd1" colab={"base_uri": "https://localhost:8080/"}
#Install Pyrsgis
# !pip install pyrsgis
# + id="_tzprFfsTB0H" outputId="4e44f7f1-41d3-407e-8a5e-7e6d8170d2f3" colab={"base_uri": "https://localhost:8080/"}
#Get dataset
# !git clone https://github.com/ridhodwidharmawan/Landsat-Classification-Using-Neural-Network
# + id="3dJxEU08TB0O" outputId="786b574b-f66a-483a-c8a6-c2dc81456107" colab={"base_uri": "https://localhost:8080/"}
import os
import numpy as np
from tensorflow import keras
from pyrsgis import raster
from pyrsgis.convert import changeDimension
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, precision_score, recall_score
# + id="VQkL_uoHTB0Y"
# Change the directory
os.chdir("Landsat-Classification-Using-Neural-Network/dataset")
# + id="mbf8I2fJTB0f"
# Assign file names
featuredata_inp = 'LC08_120065_20180505_featuresdata.tif'
labeldata_inp = 'builtup_labeldata.tif'
unlabeleddata_inp = 'LC08_120065_20190625_unlabeleddata.tif'
# + id="A7IV_pKPTB0m"
# Read the rasters as array
ds1, featuredata = raster.read(featuredata_inp, bands='all')
ds2, labeldata = raster.read(labeldata_inp, bands=1)
ds3, unlabeleddata = raster.read(unlabeleddata_inp, bands='all')
# + id="OBnUwA6STB0t" outputId="86199deb-3012-441b-eea5-6c16d5c105c7" colab={"base_uri": "https://localhost:8080/"}
# Print the size of the arrays
print("Feature data multiband image shape: ", featuredata.shape)
print("Label data builtup area image shape: ", labeldata.shape)
print("Unlabeled data multiband image shape: ", unlabeleddata.shape)
# + id="QDv52Ox_TB01"
# Clean the labelled data to replace NoData values by zero
labeldata = (labeldata == 1).astype(int)
# + id="lvro135BTB0-"
# Reshape the array to single dimensional array
featuredata = changeDimension(featuredata)
labeldata = changeDimension (labeldata)
unlabeleddata = changeDimension(unlabeleddata)
nBands = featuredata.shape[1]
# + id="_Ie1AgNjTB1D" outputId="a8d49fdd-4e42-4bcb-be2e-2182af31d49f" colab={"base_uri": "https://localhost:8080/"}
# Print new dimension size
print("Feature data multiband image shape: ", featuredata.shape)
print("Label data builtup area image shape: ", labeldata.shape)
print("Unlabeled data multiband image shape: ", unlabeleddata.shape)
# + id="SdQRL-yZTB1K"
# Split testing and training datasets
xTrain, xTest, yTrain, yTest = train_test_split(featuredata, labeldata, test_size=0.4, random_state=42)
# + id="aGXApVISTB1P" outputId="6afbbe6c-f2ce-4442-b0d9-ab20bd3fd997" colab={"base_uri": "https://localhost:8080/"}
# Print train size
print(xTrain.shape)
print(yTrain.shape)
# + id="JMbj5x5LTB1V" outputId="af46ff6b-e72a-47ea-bed8-86de4e0a4b70" colab={"base_uri": "https://localhost:8080/"}
# Print test size
print(xTest.shape)
print(yTest.shape)
# + id="aMeEJYRHTB1e"
# Normalise the data
xTrain = xTrain / 10000
xTest = xTest / 10000
unlabeleddata = unlabeleddata / 10000
# + id="t1RqjNB_TB1l"
# Reshape the data
xTrain = xTrain.reshape((xTrain.shape[0], 1, xTrain.shape[1]))
xTest = xTest.reshape((xTest.shape[0], 1, xTest.shape[1]))
unlabeleddata = unlabeleddata.reshape((unlabeleddata.shape[0], 1, unlabeleddata.shape[1]))
# + id="NcRKMmOrTB1t" outputId="d7a7f67c-c723-424f-e0a0-00c068e1cb76" colab={"base_uri": "https://localhost:8080/"}
# Print the shape of reshaped data
print(xTrain.shape, xTest.shape, unlabeleddata.shape)
# + id="tuLYKfqVTB11"
# Define the parameters of the model
model = keras.Sequential([
keras.layers.Flatten(input_shape=(1, nBands)),
keras.layers.Dense(14, activation='relu'),
keras.layers.Dense(2, activation='softmax')])
# + id="HahRKTLGTB1-"
# Define the accuracy metrics and parameters
opt = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=opt, loss="sparse_categorical_crossentropy", metrics=["accuracy"])
# + id="tq6B1159TB2C" outputId="662285d9-a01d-4f9c-9e78-62d0cb0e1f35" colab={"base_uri": "https://localhost:8080/"}
# Run the model
model.fit(xTrain, yTrain, epochs=5)
# + id="2hiH9ihYTB2K"
# Predict for test data
yTestPredicted = model.predict(xTest)
yTestPredicted = yTestPredicted[:,1]
# + id="KQshe1NXTB2O"
# Calculate and display the error metrics
yTestPredicted = (yTestPredicted>0.5).astype(int)
cMatrix = confusion_matrix(yTest, yTestPredicted)
pScore = precision_score(yTest, yTestPredicted)
rScore = recall_score(yTest, yTestPredicted)
# + id="_BHZhm4UTB2U" outputId="84b31192-952a-4a82-da13-15ce16468989" colab={"base_uri": "https://localhost:8080/"}
print("Confusion matrix: for 14 nodes\n", cMatrix)
print("\nP-Score: %.3f, R-Score: %.3f" % (pScore, rScore))
# + id="yKU1RLVJTB2b"
predicted = model.predict(unlabeleddata)
predicted = predicted[:,1]
# + id="coPt4gc3TB2g"
#Export raster
prediction = np.reshape(predicted, (ds3.RasterYSize, ds3.RasterXSize))
outFile = 'builtup_predicted.tif'
raster.export(prediction, ds3, filename=outFile, dtype='float')
# + id="zsGllvoTcHen"
|
Builtup_ANN_Landsat8_exercise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# --- -2 ---- -1 ---- 0 ---- 1 ---- 2 ---
# -
import numpy as np
from numpy.random import randn
randn()
# +
# --- -2 ---- -1 ---- 0 ---- 1 ---- 2 ---
answer = None
y = randn()
if y>1:
answer ="Greater than 1"
print(y)
print(answer)
# +
# --- -2 ---- -1 ---- 0 ---- 1 ---- 2 ---
answer = None
y = randn()
if y>1:
answer ="Greater than 1"
else:
answer ="Less than 1"
print(y)
print(answer)
# +
# --- -2 ---- -1 ---- 0 ---- 1 ---- 2 ---
#nested statement
answer = None
y = randn()
if y>1:
answer ="Greater than 1"
else:
if y>= -1:
answer= "Between -1 and 1"
else :
answer="Less than -1"
print(y)
print(answer)
# +
# --- -2 ---- -1 ---- 0 ---- 1 ---- 2 ---
#Chained statement
answer = None
y = randn()
if y>1:
answer ="Greater than 1"
elif y>= -1:
answer= "Between -1 and 1"
else :
answer="Less than -1"
print(y)
|
8. Code indentation in Python-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import cvxpy as cvx
import numpy as np
import math
import matplotlib.pyplot as plt
import pandas as pd
import itertools
def get_error(sorted_data, x, p, is_int=False):
if is_int:
x = round(x)
n = len(sorted_data)
li = np.searchsorted(sorted_data, x, side="left")
ri = np.searchsorted(sorted_data, x, side="right")
qi = int(p * n)
if ri < qi:
return (qi-ri)*1.0/n
elif li > qi:
return (li-qi)*1.0/n
else:
return 0.0
precisions = [64, 48, 32, 24, 20, 16, 12, 8]
orig_df_milan_full = pd.read_csv("../sampledata/internet-mi-2013-11.csv")
orig_data_milan_full = orig_df_milan_full["internet"]
sorted_data_milan_full = np.sort(orig_data_milan_full)
orig_data_hepmass = pd.read_csv("../sampledata/hepmass_l.csv", usecols=[0]).iloc[:,0]
sorted_data_hepmass = np.sort(orig_data_hepmass)
colormap = {
7.0: "C0",
11.0: "C1",
19.0: "C2",
23.0: "C3",
# 19.0: "C4",
# 23.0: "C5",
# 15.0: "C6"
}
markers = {
7.0: "x",
11.0: "^",
19.0: "s",
23.0: "+",
# 19.0: "D",
# 23.0: "o",
# 15.0: "<"
}
def flip(items, ncol):
return itertools.chain(*[items[i::ncol] for i in range(ncol)])
# size_params = [3.0, 7.0, 11.0, 15.0, 19.0, 23.0]
size_params = [7.0, 11.0, 19.0, 23.0]
def gen_plot(df, sorted_data, name):
f, axs = plt.subplots(1,2, figsize=(6,2.3), sharey=True)
for i in range(2):
dft = df[i]
dft["error"] = dft.apply(
lambda r: get_error(
sorted_data[i], r["quantile_estimate"], r["q"],
is_int=False
),
axis=1
)
dft["rerror"] = dft["error"] / np.minimum(dft["q"],(1-dft["q"]))
dfg = dft.groupby([
"precision", "size_param"
])["train_time", "query_time", "space", "error", "rerror", "bound_size"].mean()
dfs = dfg.reset_index().sort_values(["precision"])
dfs = dfs[dfs["precision"] != 6.0]
ax = axs[i]
lines = []
labels = []
for size in size_params:
c_df = dfs[dfs["size_param"] == size]
l, = ax.plot(
c_df["precision"],
c_df["error"],
label="k = " + str(int(size)-1),
marker=markers[size],
color=colormap[size],
markersize=5,
ls="dotted",
lw=1
)
lines.append(l)
labels.append("k = " + str(int(size)-1))
ax.set_yscale('log')
ax.set_xlabel("Precision (bits per value)")
if i == 0:
ax.set_ylabel("Avg Error")
ax.set_title(name[i])
ax.yaxis.grid()
ax.legend(frameon=False)
# lgd = f.legend(
# flip(lines, 4), flip(labels, 4),
# bbox_to_anchor=(0.16, 0.97, 1., .102), ncol=4, loc=2, frameon=False
# )
f.tight_layout()
# plt.savefig("low_precision.pdf", bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig("low_precision.pdf", bbox_inches='tight')
df_milan_merge = pd.read_csv("../javamsketch/results/precision_merge_milan.csv")
df_hepmass_merge = pd.read_csv("../javamsketch/results/precision_merge_hepmass.csv")
_ = gen_plot([df_milan_merge, df_hepmass_merge], [sorted_data_milan_full, sorted_data_hepmass], ["milan", "hepmass"])
# # Other plots
# +
df_milan = pd.read_csv("../javamsketch/results/precision_milan_1526547552.csv")
df_hepmass = pd.read_csv("../javamsketch/results/precision_hepmass_1526588547.csv")
_ = gen_plot([df_milan, df_hepmass], [sorted_data_milan_full, sorted_data_hepmass], ["milan", "hepmass"])
# -
# # Results
pd.read_csv("../javamsketch/results/precision_merge_milan_1526697391.csv").to_csv(
"../javamsketch/results/precision_merge_milan.csv")
pd.read_csv("../javamsketch/results/precision_merge_hepmass_1526696046.csv").to_csv(
"../javamsketch/results/precision_merge_hepmass.csv")
|
analysis/Precision Benchmark.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (ml-agents)
# language: python
# name: ml-agents
# ---
# +
import sys; sys.path.insert(0, '..')
import gym
import sys
import torch
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import clear_output, Javascript
import pytorch_drl.models.actor_critic_models as models
from notebook_utils import plot, mean_filter
from pytorch_drl.algs.trpo import TRPO
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
device =torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device:", device)
# ## Create environment
# +
env_name = "LunarLander-v2"
env_name = "CartPole-v0"
env = gym.make(env_name)
env.seed(0)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
print("State size:", state_size, "\nAction size:", action_size)
# -
# ## TRPO Test
# +
tmax = 200
n_episodes = 2000
n_env = 1
actor_net = models.ActorNetwork(state_size, action_size, env.action_space)
critic_net = models.CriticNetwork(state_size, action_size)
agent = TRPO(actor_net,
critic_net,
critic_lr=1e-3,
max_kl=1e-2,
backtrack_alpha=0.5,
backtrack_steps=10,
damping_coeff=0.1,
env_id=env_name,
gamma=0.99,
gae_tau=0.95,
n_env=n_env,
device=device,
max_grad_norm=0.5,
)
max_score = 195.
model_name = "trpo_{}".format(env_name)
# train the agent
scores, losses = agent.train(tmax, n_episodes, env, max_score, model_name)
# plot the training:
x = np.arange(len(scores))
scores = mean_filter(scores, 50)
plt.plot(x, scores, label = "scores")
plt.show()
# -
# ### Trained Agent Demonstration
agent.test(env, render=True, n_episodes=4)
|
tests/trpo_test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Задание 4 - Перенос обучения (transfer learning) и тонкая настройка (fine-tuning)
#
# Одной из важнейшних техник в тренировке сетей - использовать заранее натренированные веса на более общей задачи в качестве начальной точки, а потом "дотренировать" их на конкретной.
#
# Такой подход и убыстряет обучение, и позволяет тренировать эффективные модели на маленьких наборах данных.
#
# В этом упражнении мы натренируем классификатор, который отличает хотдоги от не хотдогов!
# (более подробно - https://www.youtube.com/watch?v=ACmydtFDTGs)
#
# Это задание требует доступа к GPU, поэтому его можно выполнять либо на компьютере с GPU от NVidia, либо в [Google Colab](https://colab.research.google.com/).
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="FcXBeP1O7cnY" outputId="2b081ee6-3006-47a5-8733-ea0c317bc78e"
import json
import os
import csv
import urllib
from io import BytesIO
from PIL import Image
from socket import timeout
from google.colab import files
#from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
#platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
#accelerator = 'cu80' if path.exists('/opt/bin/nvidia-smi') else 'cpu'
# !pip3 install -q torch torchvision
# !pip3 install -q Pillow==4.0.0
# -
# Сначала давайте скачаем данные с картинками. Это сделает код в следующей ячейке. Данные будут разделены на две части. На обучающей выборке, которая будет храниться в папке **train_kaggle**, мы будем строить наши модели, а на тестовой выборке **test_kaggle** будем предсказывать класс, к которому относится фотография (хотдог или нет).
#
# ### Если вы в Google Colab!
#
# В нем можно запускать ноутбуки с доступом к GPU. Они не очень быстрые, зато бесплатные!
# Каждый ноутбук получает свой собственный environment c доступным диском итд.
#
# Через 90 минут отсуствия активности этот environment пропадает со всеми данными.
# Поэтому нам придется скачивать данные каждый раз.
# + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" id="ourBj07Arm3R" outputId="10b4ee22-fbaa-4e2f-e12d-7c3e4022f0c1"
# Download train data
# !wget "https://www.dropbox.com/s/cupinvuotopehty/train.zip?dl=0"
# !unzip -q "train.zip?dl=0"
#local_folder = "../../HotDogOrNot/content/train_kaggle/"
train_folder = "train_kaggle/"
# Count number of files in the train folder, should be 4603
print('Number of files in the train folder', len(os.listdir(train_folder)))
# Download test data
# !wget "https://www.dropbox.com/s/7xakfl2r9gn5p1j/test.zip?dl=0"
# !unzip -q "test.zip?dl=0"
test_folder = "test_kaggle/"
# Count number of files in the test folder, should be 1150
print('Number of files in the test folder', len(os.listdir(test_folder)))
# + colab={} colab_type="code" id="NNU-OD9O9ltP"
import torch
from torchvision import models
from torch.utils.data import Dataset, SubsetRandomSampler
from torchvision import transforms
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
device = torch.device("cuda:0") # Let's make sure GPU is available!
# -
# # Имплементируем свой Dataset для загрузки данных
#
# В этом задании мы реализуем свой собственный класс Dataset для загрузки данных. Его цель - загрузить данные с диска и выдать по ним тензор с входом сети, меткой и идентификатором картинки (так будет проще подготовить сабмит для kaggle на тестовых данных).
#
# Вот ссылка, где хорошо объясняется как это делать на примере: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
#
# Ваш Dataset должен в качестве количества сэмплов выдать количество файлов в папке и уметь выдавать кортеж из сэмпла, метки по индексу и названия файла.
# Если название файла начинается со слов 'frankfurter', 'chili-dog' или 'hotdog' - метка положительная. Иначе отрицательная (ноль).
#
# И не забудьте поддержать возможность трансформации входа (аргумент `transforms`), она нам понадобится!
# + colab={"base_uri": "https://localhost:8080/", "height": 253} colab_type="code" id="bN2SPiJa9v5M" outputId="c5d5db7c-746c-41db-d146-30e09f7f7278"
class HotdogOrNotDataset(Dataset):
def __init__(self, folder, transform=None):
self.transform = transform
# TODO: Your code here!
def __len__(self):
raise Exception("Not implemented!")
def __getitem__(self, index):
# TODO Implement getting item by index
# Hint: os.path.join is helpful!
raise Exception("Not implemented!")
return img, y, img_id
def visualize_samples(dataset, indices, title=None, count=10):
# visualize random 10 samples
plt.figure(figsize=(count*3,3))
display_indices = indices[:count]
if title:
plt.suptitle("%s %s/%s" % (title, len(display_indices), len(indices)))
for i, index in enumerate(display_indices):
x, y, _ = dataset[index]
plt.subplot(1,count,i+1)
plt.title("Label: %s" % y)
plt.imshow(x)
plt.grid(False)
plt.axis('off')
orig_dataset = HotdogOrNotDataset(train_folder)
indices = np.random.choice(np.arange(len(orig_dataset)), 7, replace=False)
visualize_samples(orig_dataset, indices, "Samples")
# + colab={"base_uri": "https://localhost:8080/", "height": 253} colab_type="code" id="mQNsUvYm4_2V" outputId="bb771beb-38bd-40ce-a935-b27841f748ca"
# Let's make sure transforms work!
dataset = HotdogOrNotDataset(train_folder, transform=transforms.RandomVerticalFlip(0.9))
visualize_samples(dataset, indices, "Samples with flip - a lot should be flipped!")
# -
# # Создаем Dataset для тренировки
#
# И разделяем его на train и validation.
# На train будем обучать модель, на validation проверять ее качество, а соревнование Kaggle In-Class проведем на фотографиях из папки test_kaggle.
# + colab={} colab_type="code" id="YAvkoRx-9FsP"
# First, lets load the dataset
train_dataset = HotdogOrNotDataset(train_folder,
transform=transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
# Use mean and std for pretrained models
# https://pytorch.org/docs/stable/torchvision/models.html
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
)
test_dataset = HotdogOrNotDataset(test_folder,
transform=transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
# Use mean and std for pretrained models
# https://pytorch.org/docs/stable/torchvision/models.html
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
)
# + colab={} colab_type="code" id="YRnr8CPg7Hli"
batch_size = 64
data_size = len(dataset)
validation_fraction = .2
val_split = int(np.floor((validation_fraction) * data_size))
indices = list(range(data_size))
np.random.seed(42)
np.random.shuffle(indices)
val_indices, train_indices = indices[:val_split], indices[val_split:]
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
sampler=val_sampler)
# Notice that we create test data loader in a different way. We don't have the labels.
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size)
# -
# Наши обычные функции для тренировки
# + colab={} colab_type="code" id="2ek3KVQK7hJ6"
def train_model(model, train_loader, val_loader, loss, optimizer, num_epochs):
loss_history = []
train_history = []
val_history = []
for epoch in range(num_epochs):
model.train() # Enter train mode
loss_accum = 0
correct_samples = 0
total_samples = 0
for i_step, (x, y,_) in enumerate(train_loader):
x_gpu = x.to(device)
y_gpu = y.to(device)
prediction = model(x_gpu)
loss_value = loss(prediction, y_gpu)
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
_, indices = torch.max(prediction, 1)
correct_samples += torch.sum(indices == y_gpu)
total_samples += y.shape[0]
loss_accum += loss_value
ave_loss = loss_accum / i_step
train_accuracy = float(correct_samples) / total_samples
val_accuracy = compute_accuracy(model, val_loader)
loss_history.append(float(ave_loss))
train_history.append(train_accuracy)
val_history.append(val_accuracy)
print("Average loss: %f, Train accuracy: %f, Val accuracy: %f" % (ave_loss, train_accuracy, val_accuracy))
return loss_history, train_history, val_history
def compute_accuracy(model, loader):
"""
Computes accuracy on the dataset wrapped in a loader
Returns: accuracy as a float value between 0 and 1
"""
model.eval() # Evaluation mode
# TODO: Copy implementation from previous assignment
# Don't forget to move the data to device before running it through the model!
raise Exception("Not implemented")
# -
# # Использование заранее натренированной сети (pretrained network)
#
# Чаще всего в качестве заранее натренированной сети используется сеть, натренированная на данных ImageNet с 1M изображений и 1000 классами.
#
# PyTorch включает такие натренированные сети для различных архитектур (https://pytorch.org/docs/stable/torchvision/models.html)
# Мы будем использовать ResNet18.
#
# Для начала посмотрим, что выдает уже натренированная сеть на наших картинках. То есть, посмотрим к какому из 1000 классов их отнесет сеть.
#
# Запустите модель на 10 случайных картинках из датасета и выведите их вместе с классами с наибольшей вероятностью.
# В коде уже есть код, который формирует соответствие между индексами в выходном векторе и классами ImageNet.
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="CnvXSmtyLAgz" outputId="ca961672-29d0-4055-fad2-3b4c74cd500a"
# Thanks to https://discuss.pytorch.org/t/imagenet-classes/4923/2
def load_imagenet_classes():
classes_json = urllib.request.urlopen('https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json').read()
classes = json.loads(classes_json)
# TODO: Process it to return dict of class index to name
return { int(k): v[-1] for k, v in classes.items()}
model = models.resnet18(pretrained=True)
# TODO: Run this model on 10 random images of your dataset and visualize what it predicts
# + [markdown] colab_type="text" id="6a-3a1ZFGEw_"
# # Перенос обучения (transfer learning) - тренировать только последний слой
#
# Существует несколько вариантов переноса обучения, мы попробуем основные.
# Первый вариант - сделать новый последний слой и тренировать только его, заморозив остальные.
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="jCWMUWmr7t5g" outputId="87b511e8-7ddf-4530-d218-1f03bf03cdb0"
import torch.nn as nn
import torch.optim as optim
model = models.resnet18(pretrained=True)
# TODO: Freeze all the layers of this model and add a new output layer
# https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
parameters = None # Fill the right thing here!
loss = nn.CrossEntropyLoss()
optimizer = optim.SGD( parameters, lr=0.001, momentum=0.9)
loss_history, train_history, val_history = train_model(model, train_loader, val_loader, loss, optimizer, 2)
# + [markdown] colab_type="text" id="8dDH4WfaB2Il"
# # Перенос обучения (transfer learning) - тренировать всю модель
#
# Второй вариант - добавить новый слой и обучать всю модель целиком.
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="5ss0jilyvuOh" outputId="3170f126-2b7a-405a-b63b-ccd21f94c5c2"
import torch.nn as nn
import torch.optim as optim
model = models.resnet18(pretrained=True)
# TODO: Freeze all the layers of this model and add a new output layer
# https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
parameters = None # Fill the right thing here!
loss = nn.CrossEntropyLoss()
optimizer = optim.SGD( parameters, lr=0.001, momentum=0.9)
loss_history, train_history, val_history = train_model(model, train_loader, val_loader, loss, optimizer, 5)
# + [markdown] colab_type="text" id="meQt_vDCs9cc"
# # Перенос обучения (transfer learning) - разные скорости обучения для разных слоев
#
# И наконец последний вариант, который мы рассмотрим - использовать разные скорости обучения для новых и старых слоев
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="evro9ksXGs9u" outputId="e4f5aca7-2e1b-4972-e061-fe9109fbeb1f"
import torch.nn as nn
import torch.optim as optim
model = models.resnet18(pretrained=True)
# TODO: Freeze all the layers of this model and add a new output layer
# https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
loss = nn.CrossEntropyLoss()
# Train new layer with learning speed 0.001 and old layers with 0.0001
optimizer = None #
loss_history, train_history, val_history = train_model(model_conv, train_loader, val_loader, loss, optimizer, 5)
# -
# # Визуализируем метрики и ошибки модели
#
# Попробуем посмотреть, где модель ошибается - визуализируем ложные срабатывания (false positives) и ложноотрицательные срабатывания (false negatives).
#
# Для этого мы прогоним модель через все примеры и сравним ее с истинными метками (ground truth).
# + colab={} colab_type="code" id="ieEzZUglJAUB"
from torch.utils.data.sampler import Sampler
class SubsetSampler(Sampler):
r"""Samples elements with given indices sequentially
Arguments:
data_source (Dataset): dataset to sample from
indices (ndarray): indices of the samples to take
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in range(len(self.indices)))
def __len__(self):
return len(self.indices)
def evaluate_model(model, dataset, indices):
"""
Computes predictions and ground truth labels for the indices of the dataset
Returns:
predictions: np array of booleans of model predictions
grount_truth: np array of boolean of actual labels of the dataset
"""
model.eval() # Evaluation mode
# TODO: Evaluate model on the list of indices and capture predictions
# and ground truth labels
# Hint: SubsetSampler above could be useful!
raise Exception("Not implemented")
return predictions, ground_truth
predictions, gt = evaluate_model(model_conv, train_dataset, val_indices)
# + [markdown] colab_type="text" id="r0bcioK6JBDK"
# И теперь можно визуализировать false positives и false negatives.
# + colab={"base_uri": "https://localhost:8080/", "height": 469} colab_type="code" id="WMmaPfdeKk9H" outputId="c162d02d-385c-4994-df8e-0844c2969b9f"
# TODO: Compute indices of the false positives on the validation set.
# Note those have to be indices of the original dataset
false_positive_indices = None
visualize_samples(orig_dataset, false_positive_indices, "False positives")
# TODO: Compute indices of the false negatives on the validation set.
# Note those have to be indices of the original dataset
false_negatives_indices = None
visualize_samples(orig_dataset, false_negatives_indices, "False negatives")
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="JoDeVjN4HZSV" outputId="c43261ad-524b-4a5f-ba53-fdce19c9f840"
import sklearn.metrics as metrics
def binary_classification_metrics(prediction, ground_truth):
# TODO: Implement this function!
# We did this already it in the assignment1
return precision, recall, f1
precision, recall, f1 = binary_classification_metrics(predictions, gt)
print("F1: %4.3f, P: %4.3f, R: %4.3f" % (precision, recall, f1))
# + [markdown] colab_type="text" id="u_O9qiYySvuj"
# # Что будет в конце вы уже поняли
#
# Натренируйте лучшую модель на основе `resnet18`, меняя только процесс тренировки.
# Выбирайте лучшую модель по F1 score.
#
# Как всегда, не забываем:
# - побольше агментаций!
# - перебор гиперпараметров
# - различные оптимизаторы
# - какие слои тюнить
# - learning rate annealing
# - на какой эпохе останавливаться
#
# Наша цель - довести F1 score на validation set до значения, большего **0.9**.
# + colab={} colab_type="code" id="i6mhfdQ9K-N3"
# TODO: Train your best model!
best_model = None
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Y6xExdw8JB1l" outputId="8cdf205e-14ad-4013-924a-449d06cc3eec"
# Let's check how it performs on validation set!
predictions, ground_truth = evaluate_model(best_model, dataset, val_indices)
precision, recall, f1 = binary_classification_metrics(predictions, ground_truth)
print("F1: %4.3f, P: %4.3f, R: %4.3f" % (precision, recall, f1))
# TODO: Visualize training curve for the best model
# -
# ## Визуализируйте ошибки лучшей модели
# + colab={} colab_type="code" id="BFUeNOm1VACr"
# TODO Visualize false positives and false negatives of the best model on the validation set
# -
# # Необязательное задание с большой звездочкой
#
# Поучавствуйте в Kaggle In-Class Hot Dog Recognition Challenge!
# Это соревнование сделано специально для курса и в нем учавствуют только те, кто проходит курс.
#
# В нем участники соревнуются в качестве натренированных моделей, загружая на сайт предсказания своих моделей на тестовой выборке. Разметка тестовой выборке участникам недоступна.
# Более подробно о правилах соревнования ниже.
#
# Те, кто проходят курс лично, за высокое место в соревновании получат дополнительные баллы.
#
# Вот ссылка на соревнование:
# https://www.kaggle.com/c/hotdogornot
image_id = []
predictions = []
model.eval()
for x,_,id_img in test_loader:
# TODO : Напишите код для предсказания меток (1 = есть хотдог, 0 = хотдога нет)
# Код должен возвратить список из id картинки и метку predictions
# image id - это название файла картинки, например '10000.jpg'
pass
# +
# Так можно создать csv файл, чтобы затем загрузить его на kaggle
# Ожидаемый формат csv-файла:
# image_id,label
# 10000.jpg,1
# 10001.jpg,1
# 10002.jpg,0
# 10003.jpg,1
# 10004.jpg,0
with open('subm.csv', 'w') as submissionFile:
writer = csv.writer(submissionFile)
writer.writerow(['image_id', 'label'])
writer.writerows(zip(image_id,predictions))
# -
# А так можно скачать файл с Google Colab
files.download('subm.csv')
# ### Небольшое введение в Kaggle для тех, кто не слышал об этой платформе раньше
# В основе своей Kaggle - это платформа для проведения соревнований по машинному обучению. Появилась она в 2010 и, пожалуй, стала самой популярной и известной из всех существующих площадок по машинному обучению. Надо сказать, что Kaggle - это не только соревнования, но и сообщество людей, увлеченных машинным обучением. А судя по Википедии, в 2017 году отметка зарегистрированных пользователей перевалила за миллион. Есть там и обучающие материалы, возможность задавать вопросы, делиться кодом и идеями - просто мечта.
#
# ### Как проходят соревнования?
# Обычно участники скачивают данные для обучения моделей (train data), чтобы затем делать предсказания на тестовых данных (test data). Обучающая выборка содержит как сами данные, так и правильные метки (значения зависимой переменной), чтобы можно было обучить модель. Но тестовые данные ответа не содержат - и нашей целью является предсказание меток по имеющимся данным. Файл с ответами для каждого наблюдения из тестовой выборки загружается на Kaggle и оценивается в соответствии с выбранной метрикой соревнования, а результат является публичным и показывается в общей таблице (ее называют еще лидербордом - leaderboard) - чтобы появилось желание посоревноваться и создать еще более сильную модель. В "настоящих" соревнованиях, которые проходят на Kaggle, есть и денежное вознаграждение для тех участников, кто занимает первые места на лидерборде. Например, в [этом](https://www.kaggle.com/c/zillow-prize-1#description) соревновании, человек, занявший первое место, получил около 1 000 000 долларов.
#
# Тестовые данные делятся случайным образом в некоторой пропорции. И пока соревнование идет, на лидерборде показываются очки и рейтинг участников только по одной части (Public Leaderboard). А вот когда соревнование заканчивается, то рейтинг участников составляется по второй части тестовых данных (Private Leaderboard). И часто можно видеть, как люди занимавшие первые места на публичной части тестовых данных, оказываются далеко не первыми на закрытой части тестовых данных. Зачем это сделано? Есть несколько причин, но, пожалуй, самой фундаментальной является идея недообучения-переобучения. Всегда возможно, что наша модель настроилась на конкретную выборку, но как она поведет себя на тех данных, которые еще не видела? Разбиение тестовых данных на публичную и скрытую части сделано для того, чтобы отобрать модели, которые имеют большую обобщающую способность. Одним из лозунгов участников соревнований стал "Доверяйте своей локальной кросс-валидации" (Trust your CV!). Есть очень большой соблазн оценивать свою модель по публичной части лидерборда, но лучшей стратегией будет выбирать ту модель, которая дает лучшую метрику на кросс-валидации на обучающей выборке.
#
# В нашем соревновании публичная часть лидерборда составляет 30%, а скрытая 70%. Вы можете делать до двух попыток в день, а оцениваться попытки будут по F1-мере. Удачи и доверяйте своей локальной валидации! В конце соревнования у вас будет возможность выбрать 2 из всех совершенных попыток - лучшая из этих двух и будет засчитана вам на скрытой части тестовых данных.
|
assignments/assignment4/.ipynb_checkpoints/HotdogOrNot-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#hide
from fastdata.core import *
# # Fastdata
# The goal of the `fastdata` library is enable **fast data analysis** with `pandas`. It is part of the [Eigendata](http://eigendata.co) ecosystem.
# ## Install
# `pip install fastdata`
|
index.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="36EdAGhThQov"
# # Building, Training and Evaluating Models with TensorFlow Decision Forests
#
# ## Overview
#
# In this lab, you use TensorFlow Decision Forests (TF-DF) library for the training, evaluation, interpretation and inference of Decision Forest models.
#
# ## Learning Objective
#
# In this notebook, you learn how to:
#
# 1. Train a binary classification Random Forest on a dataset containing numerical, categorical and missing features.
# 2. Evaluate the model on a test dataset and prepare the model for [TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving).
# 3. Examine the overall structure of the model and the importance of each feature.
# 4. Re-train the model with a different learning algorithm (Gradient Boosted Decision Trees) and use a different set of input features.
# 5. Change the hyperparameters of the model.
# 6. Preprocess the features and train a model for regression.
# 7. Train a model for ranking.
#
# ## Introduction
#
# This tutorial shows how to use TensorFlow Decision Forests (TF-DF) library for the training, evaluation, interpretation and inference of Decision Forest models.
#
# Decision Forests (DF) are a large family of Machine Learning algorithms for supervised classification, regression and ranking. As the name suggests, DFs use decision trees as a building block. Today, the two most popular DF training algorithms are [Random Forests](https://en.wikipedia.org/wiki/Random_forest) and [Gradient Boosted Decision Trees](https://en.wikipedia.org/wiki/Gradient_boosting). Both algorithms are ensemble techniques that use multiple decision trees, but differ on how they do it.
#
# Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/train_models_with_tensorFlow_decision_forests.ipynb).
# + [markdown] id="jK9tCTcwqq4k"
# ## Installing TensorFlow Decision Forests
#
# Install TF-DF by running the following cell.
# + id="Pa1Pf37RhEYN"
# Install the specified package
# !pip install tensorflow_decision_forests
# + [markdown] id="jK9tCTcwqq4k"
# **Please ignore incompatible errors.**
# + [markdown] id="vZGda2dOe-hH"
# Install [Wurlitzer](https://pypi.org/project/wurlitzer/) to display
# the detailed training logs. This is only needed in colabs.
# + id="lk26uBSCe8Du"
# Install the specified package
# !pip install wurlitzer
# + [markdown] id="3oinwbhXlggd"
# ## Importing libraries
# + id="52W45tmDjD64"
# Import necessary libraries
import tensorflow_decision_forests as tfdf
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import math
try:
from wurlitzer import sys_pipes
except:
from colabtools.googlelog import CaptureLog as sys_pipes
from IPython.core.magic import register_line_magic
from IPython.display import Javascript
# + [markdown] id="0LPPwWxYxtDM"
# The hidden code cell limits the output height in colab.
#
# + cellView="form" id="2AhqJz3VmQM-"
# Some of the model training logs can cover the full
# screen if not compressed to a smaller viewport.
# This magic allows setting a max height for a cell.
@register_line_magic
def set_cell_height(size):
display(
Javascript("google.colab.output.setIframeHeight(0, true, {maxHeight: " +
str(size) + "})"))
# + id="8gVQ-txtjFU4"
# Check the version of TensorFlow Decision Forests
print("Found TensorFlow Decision Forests v" + tfdf.__version__)
# + [markdown] id="QGRtRECujKeu"
# ## Training a Random Forest model
#
# In this section, we train, evaluate, analyse and export a binary classification Random Forest trained on the [Palmer's Penguins](https://allisonhorst.github.io/palmerpenguins/articles/intro.html) dataset.
#
# <center>
# <img src="https://allisonhorst.github.io/palmerpenguins/man/figures/palmerpenguins.png" width="150"/></center>
#
# **Note:** The dataset was exported to a csv file without pre-processing: `library(palmerpenguins); write.csv(penguins, file="penguins_toy.csv", quote=F, row.names=F)`.
# + [markdown] id="3qsSU1RfmNiP"
# ### Load the dataset and convert it in a tf.Dataset
# + [markdown] id="9nJ5igfElg2I"
# This dataset is very small (300 examples) and stored as a .csv-like file. Therefore, use Pandas to load it.
#
# **Note:** Pandas is practical as you don't have to type in name of the input features to load them. For larger datasets (>1M examples), using the
# [TensorFlow Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) to read the files may be better suited.
#
# Let's assemble the dataset into a csv file (i.e. add the header), and load it:
# + id="44Jq6g_mJFmj"
# Download the dataset
# !gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/penguins_toy.csv /tmp/penguins.csv
# Load a dataset into a Pandas Dataframe.
dataset_df = pd.read_csv("/tmp/penguins.csv")
# Display the first 3 examples.
dataset_df.head(3)
# + [markdown] id="23AewWT1lkIK"
# The dataset contains a mix of numerical (e.g. `bill_depth_mm`), categorical
# (e.g. `island`) and missing features. TF-DF supports all these feature types natively (differently than NN based models), therefore there is no need for preprocessing in the form of one-hot encoding, normalization or extra `is_present` feature.
#
# Labels are a bit different: Keras metrics expect integers. The label (`species`) is stored as a string, so let's convert it into an integer.
# + id="uO_jz2sj0IBZ"
# Encode the categorical label into an integer.
#
# Details:
# This stage is necessary if your classification label is represented as a
# string. Note: Keras expected classification labels to be integers.
# Name of the label column.
label = "species"
classes = dataset_df[label].unique().tolist()
print(f"Label classes: {classes}")
dataset_df[label] = dataset_df[label].map(classes.index)
# + [markdown] id="vwJjLFhbtozI"
# Next split the dataset into training and testing:
# + id="u7DEIxn2oB3U"
# Split the dataset into a training and a testing dataset.
def split_dataset(dataset, test_ratio=0.30):
"""Splits a panda dataframe in two."""
test_indices = np.random.rand(len(dataset)) < test_ratio
return dataset[~test_indices], dataset[test_indices]
train_ds_pd, test_ds_pd = split_dataset(dataset_df)
print("{} examples in training, {} examples for testing.".format(
len(train_ds_pd), len(test_ds_pd)))
# + [markdown] id="uWq7uQcCuBzO"
# And finally, convert the pandas dataframe (`pd.Dataframe`) into tensorflow datasets (`tf.data.Dataset`):
# + id="qtXgUBKluTX0"
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
# + [markdown] id="BRKLWIWNuOZ1"
# **Notes:** `pd_dataframe_to_tf_dataset` could have converted the label to integer for you.
#
# And, if you wanted to create the `tf.data.Dataset` yourself, there is a couple of things to remember:
#
# - The learning algorithms work with a one-epoch dataset and without shuffling.
# - The batch size does not impact the training algorithm, but a small value might slow down reading the dataset.
#
# + [markdown] id="mYAoyfYtqHG4"
# ### Train the model
# + id="xete-FbuqJCV"
# %set_cell_height 300
# Specify the model.
model_1 = tfdf.keras.RandomForestModel()
# Optionally, add evaluation metrics.
model_1.compile(
metrics=["accuracy"])
# Train the model.
# "sys_pipes" is optional. It enables the display of the training logs.
# TODO
with sys_pipes():
model_1.fit(x=train_ds)
# + [markdown] id="OBnjxdip-MC0"
# ### Remarks
#
# - No input features are specified. Therefore, all the columns will be used as
# input features except for the label. The feature used by the model are shown
# in the training logs and in the `model.summary()`.
# - DFs consume natively numerical, categorical, categorical-set features and
# missing-values. Numerical features do not need to be normalized. Categorical
# string values do not need to be encoded in a dictionary.
# - No training hyper-parameters are specified. Therefore the default
# hyper-parameters will be used. Default hyper-parameters provide
# reasonable results in most situations.
# - Calling `compile` on the model before the `fit` is optional. Compile can be
# used to provide extra evaluation metrics.
# - Training algorithms do not need validation datasets. If a validation dataset
# is provided, it will only be used to show metrics.
#
# **Note:** A *Categorical-Set* feature is composed of a set of categorical values (while a *Categorical* is only one value). More details and examples are given later.
# + [markdown] id="tSdtNJUArBpl"
# ## Evaluate the model
# + [markdown] id="Udtu_uS1paSu"
# Let's evaluate our model on the test dataset.
# + id="xUy4ULEMtDXB"
# TODO
# Evaluate the model
evaluation = model_1.evaluate(test_ds, return_dict=True)
print()
for name, value in evaluation.items():
print(f"{name}: {value:.4f}")
# + [markdown] id="tlhfzZ34pfO4"
# **Remark:** The test accuracy is close to the Out-of-bag accuracy
# shown in the training logs.
#
# See the **Model Self Evaluation** section below for more evaluation methods.
# + [markdown] id="mHBFtUeElRYz"
# ## Prepare this model for TensorFlow Serving.
# + [markdown] id="JbC4lmgfr5Sm"
# Export the model to the SavedModel format for later re-use e.g.
# [TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving).
#
# + id="08YWGr9U2fza"
# Save the model
model_1.save("/tmp/my_saved_model")
# + [markdown] id="6-8R02_SXpbq"
# ## Plot the model
#
# Plotting a decision tree and following the first branches helps learning about decision forests. In some cases, plotting a model can even be used for debugging.
#
# Because of the difference in the way they are trained, some models are more interresting to plan than others. Because of the noise injected during training and the depth of the trees, plotting Random Forest is less informative than plotting a CART or the first tree of a Gradient Boosted Tree.
#
# Never the less, let's plot the first tree of our Random Forest model:
# + id="KUIxf8N6Yjl0"
# Plot the first tree of the model
tfdf.model_plotter.plot_model_in_colab(model_1, tree_idx=0, max_depth=3)
# + [markdown] id="cPcL_hDnY7Zy"
# The root node on the left contains the first condition (`bill_depth_mm >= 16.55`), number of examples (240) and label distribution (the red-blue-green bar).
#
# Examples that evaluates true to `bill_depth_mm >= 16.55` are branched to the green path. The other ones are branched to the red path.
#
# The deeper the node, the more `pure` they become i.e. the label distribution is biased toward a subset of classes.
#
# **Note:** Over the mouse on top of the plot for details.
# + [markdown] id="-ob3ovQ2seVY"
# ## Model tructure and feature importance
#
# The overall structure of the model is show with `.summary()`. You will see:
#
# - **Type**: The learning algorithm used to train the model (`Random Forest` in
# our case).
# - **Task**: The problem solved by the model (`Classification` in our case).
# - **Input Features**: The input features of the model.
# - **Variable Importance**: Different measures of the importance of each
# feature for the model.
# - **Out-of-bag evaluation**: The out-of-bag evaluation of the model. This is a
# cheap and efficient alternative to cross-validation.
# - **Number of {trees, nodes} and other metrics**: Statistics about the
# structure of the decisions forests.
#
# **Remark:** The summary's content depends on the learning algorithm (e.g.
# Out-of-bag is only available for Random Forest) and the hyper-parameters (e.g.
# the *mean-decrease-in-accuracy* variable importance can be disabled in the
# hyper-parameters).
# + id="kzXME28Lq7Il"
# Print the overall structure of the model
# %set_cell_height 300
model_1.summary()
# + [markdown] id="d4ApRpUm02zU"
# The information in ``summary`` are all available programatically using the model inspector:
# + id="G3xuB3jN1Cww"
# The input features
model_1.make_inspector().features()
# + id="BZ2RBbU51L6s"
# The feature importances
model_1.make_inspector().variable_importances()
# + [markdown] id="0zvyRJVk1aEk"
# The content of the summary and the inspector depends on the learning algorithm (`tfdf.keras.RandomForestModel` in this case) and its hyper-parameters (e.g. `compute_oob_variable_importances=True` will trigger the computation of Out-of-bag variable importances for the Random Forest learner).
# + [markdown] id="tFVmrHtWXYKY"
# ## Model Self Evaluation
#
# During training TFDF models can self evaluate even if no validation dataset is provided to the `fit()` method. The exact logic depends on the model. For example, Random Forest will use Out-of-bag evaluation while Gradient Boosted Trees will use internal train-validation.
#
# **Note:** While this evaluation is computed during training, it is NOT computed on the training dataset and can be used as a low quality evaluation.
#
# The model self evaluation is available with the inspector's `evaluation()`:
# + id="BZPzyIMmYmsI"
# TODO
# Evaluate the model
model_1.make_inspector().evaluation()
# + [markdown] id="vBSz-jE0Qss_"
# ## Plotting the training logs
#
# The training logs show the quality of the model (e.g. accuracy evaluated on the out-of-bag or validation dataset) according to the number of trees in the model. These logs are helpful to study the balance between model size and model quality.
#
# The logs are available in multiple ways:
#
# 1. Displayed in during training if `fit()` is wrapped in `with sys_pipes():` (see example above).
# 1. At the end of the model summary i.e. `model.summary()` (see example above).
# 1. Programmatically, using the model inspector i.e. `model.make_inspector().training_logs()`.
# 1. Using [TensorBoard](https://www.tensorflow.org/tensorboard)
#
# Let's try the options 2 and 3:
#
# + id="ZbRk7xvpTKQG"
# %set_cell_height 150
model_1.make_inspector().training_logs()
# + [markdown] id="WynFJCEbhuF_"
# Let's plot it:
# + id="xzPH7Gggh0g1"
# Import necessary libraries
import matplotlib.pyplot as plt
logs = model_1.make_inspector().training_logs()
# Plot the logs
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot([log.num_trees for log in logs], [log.evaluation.accuracy for log in logs])
plt.xlabel("Number of trees")
plt.ylabel("Accuracy (out-of-bag)")
plt.subplot(1, 2, 2)
plt.plot([log.num_trees for log in logs], [log.evaluation.loss for log in logs])
plt.xlabel("Number of trees")
plt.ylabel("Logloss (out-of-bag)")
plt.show()
# + [markdown] id="w1xzugBRhwuN"
# This dataset is small. You can see the model converging almost immediately.
#
# Let's use TensorBoard:
# + id="5R_m-JmvU9tu"
# This cell start TensorBoard that can be slow.
# Load the TensorBoard notebook extension
# %load_ext tensorboard
# Google internal version
# # %load_ext google3.learning.brain.tensorboard.notebook.extension
# + id="j6mp7K6HWwqQ"
# Clear existing results (if any)
# !rm -fr "/tmp/tensorboard_logs"
# + id="16NbLILYo124"
# Export the meta-data to tensorboard.
model_1.make_inspector().export_to_tensorboard("/tmp/tensorboard_logs")
# + id="TSsN6aTXW0LJ"
# docs_infra: no_execute
# Start a tensorboard instance.
# %tensorboard --logdir "/tmp/tensorboard_logs"
# + [markdown] id="r_tlSccjZ8kE"
# <!-- <img class="tfo-display-only-on-site" src="images/beginner_tensorboard.png"/> -->
#
# + [markdown] id="phTUr6F1t-_E"
# ## Re-train the model with a different learning algorithm
#
# The learning algorithm is defined by the model class. For
# example, `tfdf.keras.RandomForestModel()` trains a Random Forest, while
# `tfdf.keras.GradientBoostedTreesModel()` trains a Gradient Boosted Decision
# Trees.
#
# The learning algorithms are listed by calling `tfdf.keras.get_all_models()` or in the
# [learner list](https://github.com/google/yggdrasil-decision-forests/manual/learners).
# + id="OwEAAzUZq2m8"
# List all algorithms
tfdf.keras.get_all_models()
# + [markdown] id="xmzvuI78voD4"
# The description of the learning algorithms and their hyper-parameters are also available in the [API reference](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf) and builtin help:
# + id="2hONToBav4DE"
# help works anywhere.
help(tfdf.keras.RandomForestModel)
# # ? only works in ipython or notebooks, it usually opens on a separate panel.
# tfdf.keras.RandomForestModel?
# + [markdown] id="PuWEYvXaiwhk"
# ## Using a subset of features
#
# The previous example did not specify the features, so all the columns were used
# as input feature (except for the label). The following example shows how to
# specify input features.
# + id="sgn_LnRz3M7z"
feature_1 = tfdf.keras.FeatureUsage(name="bill_length_mm")
feature_2 = tfdf.keras.FeatureUsage(name="island")
all_features = [feature_1, feature_2]
# Note: This model is only trained with two features. It will not be as good as
# the one trained on all features.
# TODO
model_2 = tfdf.keras.GradientBoostedTreesModel(
features=all_features, exclude_non_specified_features=True)
model_2.compile(metrics=["accuracy"])
model_2.fit(x=train_ds, validation_data=test_ds)
print(model_2.evaluate(test_ds, return_dict=True))
# + [markdown] id="zvM84cgCmbUR"
# **Note:** As expected, the accuracy is lower than previously.
# + [markdown] id="MFmqpivc7x7p"
# **TF-DF** attaches a **semantics** to each feature. This semantics controls how
# the feature is used by the model. The following semantics are currently supported:
#
# - **Numerical**: Generally for quantities or counts with full ordering. For
# example, the age of a person, or the number of items in a bag. Can be a
# float or an integer. Missing values are represented with float(Nan) or with
# an empty sparse tensor.
# - **Categorical**: Generally for a type/class in finite set of possible values
# without ordering. For example, the color RED in the set {RED, BLUE, GREEN}.
# Can be a string or an integer. Missing values are represented as "" (empty
# sting), value -2 or with an empty sparse tensor.
# - **Categorical-Set**: A set of categorical values. Great to represent
# tokenized text. Can be a string or an integer in a sparse tensor or a
# ragged tensor (recommended). The order/index of each item doesn't matter.
#
# If not specified, the semantics is inferred from the representation type and shown in the training logs:
#
# - int, float (dense or sparse) → Numerical semantics.
# - str (dense or sparse) → Categorical semantics
# - int, str (ragged) → Categorical-Set semantics
#
# In some cases, the inferred semantics is incorrect. For example: An Enum stored as an integer is semantically categorical, but it will be detected as numerical. In this case, you should specify the semantic argument in the input. The `education_num` field of the Adult dataset is classical example.
#
# This dataset doesn't contain such a feature. However, for the demonstration, we will make the model treat the `year` as a categorical feature:
# + id="RNRIwLYC8zrp"
# Define the features
# %set_cell_height 300
feature_1 = tfdf.keras.FeatureUsage(name="year", semantic=tfdf.keras.FeatureSemantic.CATEGORICAL)
feature_2 = tfdf.keras.FeatureUsage(name="bill_length_mm")
feature_3 = tfdf.keras.FeatureUsage(name="sex")
all_features = [feature_1, feature_2, feature_3]
model_3 = tfdf.keras.GradientBoostedTreesModel(features=all_features, exclude_non_specified_features=True)
model_3.compile( metrics=["accuracy"])
with sys_pipes():
model_3.fit(x=train_ds, validation_data=test_ds)
# + [markdown] id="2AQaNwihcpP7"
# Note that `year` is in the list of CATEGORICAL features (unlike the first run).
# + [markdown] id="GYrw7nKN40Vm"
# ## Hyper-parameters
#
# **Hyper-parameters** are parameters of the training algorithm that impact
# the quality of the final model. They are specified in the model class
# constructor. The list of hyper-parameters is visible with the *question mark* colab command (e.g. `?tfdf.keras.GradientBoostedTreesModel`).
#
# Alternatively, you can find them on the [TensorFlow Decision Forest Github](https://github.com/tensorflow/decision-forests/keras/wrappers_pre_generated.py) or the [Yggdrasil Decision Forest documentation](https://github.com/google/yggdrasil_decision_forests/documentation/learners).
#
# The default hyper-parameters of each algorithm matches approximatively the initial publication paper. To ensure consistancy, new features and their matching hyper-parameters are always disable by default. That's why it is a good idea to tune your hyper-parameters.
# + id="vHgPr4Pt43hv"
# A classical but slighly more complex model.
model_6 = tfdf.keras.GradientBoostedTreesModel(
num_trees=500, growing_strategy="BEST_FIRST_GLOBAL", max_depth=8)
model_6.fit(x=train_ds)
# + id="uECgPGDc2P4p"
# TODO
# A more complex, but possibly, more accurate model.
model_7 = tfdf.keras.GradientBoostedTreesModel(
num_trees=500,
growing_strategy="BEST_FIRST_GLOBAL",
max_depth=8,
split_axis="SPARSE_OBLIQUE",
categorical_algorithm="RANDOM",
)
model_7.fit(x=train_ds)
# + [markdown] id="Xk7wEmUZu3V0"
# As new training methods are published and implemented, combinaisons of hyper-parameters can emerge as good or almost-always-better than the default parameters. To avoid changing the default hyper-parameter values these good combinaisons are indexed and available as hyper-parameter templates.
#
# For example, the `benchmark_rank1` template is the best combinaison on our internal benchmarks. Those templates are versioned to allow training configuration stability e.g. `benchmark_rank1@v1`.
# + id="LtrRhMhj3hSu"
# A good template of hyper-parameters.
model_8 = tfdf.keras.GradientBoostedTreesModel(hyperparameter_template="benchmark_rank1")
model_8.fit(x=train_ds)
# + [markdown] id="FSDXcKXB3u6M"
# The available tempaltes are available with `predefined_hyperparameters`. Note that different learning algorithms have different templates, even if the name is similar.
# + id="MQrWI2iv37Bo"
# The hyper-parameter templates of the Gradient Boosted Tree model.
print(tfdf.keras.GradientBoostedTreesModel.predefined_hyperparameters())
# + [markdown] id="gcX4tov1_lwp"
# ## Feature Preprocessing
#
# Pre-processing features is sometimes necessary to consume signals with complex
# structures, to regularize the model or to apply transfer learning.
# Pre-processing can be done in one of three ways:
#
# 1. Preprocessing on the Pandas dataframe. This solution is easy to implement
# and generally suitable for experimentation. However, the
# pre-processing logic will not be exported in the model by `model.save()`.
#
# 2. [Keras Preprocessing](https://keras.io/guides/preprocessing_layers/): While
# more complex than the previous solution, Keras Preprocessing is packaged in
# the model.
#
# 3. [TensorFlow Feature Columns](https://www.tensorflow.org/tutorials/structured_data/feature_columns):
# This API is part of the TF Estimator library (!= Keras) and planned for
# deprecation. This solution is interesting when using existing preprocessing
# code.
#
# Note: Using [TensorFlow Hub](https://www.tensorflow.org/hub)
# pre-trained embedding is often, a great way to consume text and image with
# TF-DF. For example, `hub.KerasLayer("https://tfhub.dev/google/nnlm-en-dim128/2")`. See the [Intermediate tutorial](intermediate_colab.ipynb) for more details.
#
# In the next example, pre-process the `body_mass_g` feature into `body_mass_kg = body_mass_g / 1000`. The `bill_length_mm` is consumed without pre-processing. Note that such
# monotonic transformations have generally no impact on decision forest models.
# + id="tGcIvTeKAApp"
# %set_cell_height 300
body_mass_g = tf.keras.layers.Input(shape=(1,), name="body_mass_g")
body_mass_kg = body_mass_g / 1000.0
bill_length_mm = tf.keras.layers.Input(shape=(1,), name="bill_length_mm")
raw_inputs = {"body_mass_g": body_mass_g, "bill_length_mm": bill_length_mm}
processed_inputs = {"body_mass_kg": body_mass_kg, "bill_length_mm": bill_length_mm}
# "preprocessor" contains the preprocessing logic.
preprocessor = tf.keras.Model(inputs=raw_inputs, outputs=processed_inputs)
# "model_4" contains both the pre-processing logic and the decision forest.
model_4 = tfdf.keras.RandomForestModel(preprocessing=preprocessor)
model_4.fit(x=train_ds)
model_4.summary()
# + [markdown] id="h1Bx3Feyjb2o"
# The following example re-implements the same logic using TensorFlow Feature
# Columns.
# + id="fnwe3sBt-yJk"
def g_to_kg(x):
return x / 1000
feature_columns = [
tf.feature_column.numeric_column("body_mass_g", normalizer_fn=g_to_kg),
tf.feature_column.numeric_column("bill_length_mm"),
]
preprocessing = tf.keras.layers.DenseFeatures(feature_columns)
model_5 = tfdf.keras.RandomForestModel(preprocessing=preprocessing)
model_5.compile(metrics=["accuracy"])
model_5.fit(x=train_ds)
# + [markdown] id="9vif6gsAjfzv"
# ## Training a regression model
#
# The previous example trains a classification model (TF-DF does not differentiate
# between binary classification and multi-class classification). In the next
# example, train a regression model on the
# [Abalone dataset](https://archive.ics.uci.edu/ml/datasets/abalone). The
# objective of this dataset is to predict the number of shell's rings of an
# abalone.
#
# **Note:** The csv file is assembled by appending UCI's header and data files. No preprocessing was applied.
#
# <center>
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/33/LivingAbalone.JPG/800px-LivingAbalone.JPG" width="200"/></center>
# + id="0uKI_Uy7RyWN"
# Download the dataset.
# !gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/abalone_raw_toy.csv /tmp/abalone.csv
dataset_df = pd.read_csv("/tmp/abalone.csv")
print(dataset_df.head(3))
# + id="_gjrquQySU7Q"
# Split the dataset into a training and testing dataset.
train_ds_pd, test_ds_pd = split_dataset(dataset_df)
print("{} examples in training, {} examples for testing.".format(
len(train_ds_pd), len(test_ds_pd)))
# Name of the label column.
label = "Rings"
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label, task=tfdf.keras.Task.REGRESSION)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label, task=tfdf.keras.Task.REGRESSION)
# + id="t8fUhQKISqYT"
# %set_cell_height 300
# TODO
# Configure the regression model.
model_7 = tfdf.keras.RandomForestModel(task = tfdf.keras.Task.REGRESSION)
# Optional.
model_7.compile(metrics=["mse"])
# Train the model.
with sys_pipes():
model_7.fit(x=train_ds)
# + id="aSriIAaMSzwA"
# Evaluate the model on the test dataset.
evaluation = model_7.evaluate(test_ds, return_dict=True)
print(evaluation)
print()
print(f"MSE: {evaluation['mse']}")
print(f"RMSE: {math.sqrt(evaluation['mse'])}")
# + [markdown] id="S54mR6i9jkhp"
# ## Training a ranking model
#
# Finaly, after having trained a classification and a regression models, train a [ranking](https://en.wikipedia.org/wiki/Learning_to_rank) model.
#
# The goal of a ranking is to **order** items by importance. The "value" of
# relevance does not matter directly. Ranking a set of *documents* with regard to
# a user *query* is an example of ranking problem: It is only important to get the right order, where the top documents matter more.
#
# TF-DF expects for ranking datasets to be presented in a "flat" format. A
# document+query dataset might look like that:
#
# query | document_id | feature_1 | feature_2 | relevance/label
# ----- | ----------- | --------- | --------- | ---------------
# # # cat | 1 | 0.1 | blue | 4
# # # cat | 2 | 0.5 | green | 1
# # # cat | 3 | 0.2 | red | 2
# dog | 4 | NA | red | 0
# dog | 5 | 0.2 | red | 1
# dog | 6 | 0.6 | green | 1
#
# The *relevance/label* is a floating point numerical value between 0 and 5
# (generally between 0 and 4) where 0 means "completely unrelated", 4 means "very
# relevant" and 5 means "the same as the query".
#
# Interestingly, decision forests are often good rankers, and many
# state-of-the-art ranking models are decision forests.
#
# In this example, use a sample of the
# [LETOR3](https://www.microsoft.com/en-us/research/project/letor-learning-rank-information-retrieval/#!letor-3-0)
# dataset. More precisely, we want to download the `OHSUMED.zip` from [the LETOR3 repo](https://onedrive.live.com/?authkey=%21ACnoZZSZVfHPJd0&id=8FEADC23D838BDA8%21107&cid=8FEADC23D838BDA8). This dataset is stored in the
# libsvm format, so we will need to convert it to csv.
# + id="axD6x1ZivHCS"
# %set_cell_height 200
archive_path = tf.keras.utils.get_file("letor.zip",
"https://download.microsoft.com/download/E/7/E/E7EABEF1-4C7B-4E31-ACE5-73927950ED5E/Letor.zip",
extract=True)
# Path to the train and test dataset using libsvm format.
raw_dataset_path = os.path.join(os.path.dirname(archive_path),"OHSUMED/Data/All/OHSUMED.txt")
# + [markdown] id="rcManr98ZGID"
# The dataset is stored as a .txt file in a specific format, so first convert it into a csv file.
# + id="mkiM9HJox-e8"
def convert_libsvm_to_csv(src_path, dst_path):
"""Converts a libsvm ranking dataset into a flat csv file.
Note: This code is specific to the LETOR3 dataset.
"""
dst_handle = open(dst_path, "w")
first_line = True
for src_line in open(src_path,"r"):
# Note: The last 3 items are comments.
items = src_line.split(" ")[:-3]
relevance = items[0]
group = items[1].split(":")[1]
features = [ item.split(":") for item in items[2:]]
if first_line:
# Csv header
dst_handle.write("relevance,group," + ",".join(["f_" + feature[0] for feature in features]) + "\n")
first_line = False
dst_handle.write(relevance + ",g_" + group + "," + (",".join([feature[1] for feature in features])) + "\n")
dst_handle.close()
# Convert the dataset.
# !gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/ohsumed_toy.csv /tmp/ohsumed.csv
csv_dataset_path="/tmp/ohsumed.csv"
convert_libsvm_to_csv(raw_dataset_path, csv_dataset_path)
# Load a dataset into a Pandas Dataframe.
dataset_df = pd.read_csv(csv_dataset_path)
# Display the first 3 examples.
dataset_df.head(3)
# + id="wB7bWAja1G-o"
train_ds_pd, test_ds_pd = split_dataset(dataset_df)
print("{} examples in training, {} examples for testing.".format(
len(train_ds_pd), len(test_ds_pd)))
# Display the first 3 examples of the training dataset.
train_ds_pd.head(3)
# + [markdown] id="YQKqN9zN4L00"
# In this dataset, the `relevance` defines the ground-truth rank among rows of the same `group`.
# + id="5QMbBkCEXxu_"
# Name of the relevance and grouping columns.
relevance = "relevance"
ranking_train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=relevance, task=tfdf.keras.Task.RANKING)
ranking_test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=relevance, task=tfdf.keras.Task.RANKING)
# + id="Ba1gb75SX1rr"
# %set_cell_height 400
# TODO
# Define the ranking model
model_8 = tfdf.keras.GradientBoostedTreesModel(
task=tfdf.keras.Task.RANKING,
ranking_group="group",
num_trees=50)
with sys_pipes():
model_8.fit(x=ranking_train_ds)
# + [markdown] id="spZCfxfR3VK0"
# At this point, keras does not propose any ranking metrics. Instead, the training and validation (a GBDT uses a validation dataset) are shown in the training
# logs. In this case the loss is `LAMBDA_MART_NDCG5`, and the final (i.e. at
# the end of the training) NDCG (normalized discounted cumulative gain) is `0.510136` (see line `Final model valid-loss: -0.510136`).
#
# Note that the NDCG is a value between 0 and 1. The larget the NDCG, the better
# the model. For this reason, the loss to be -NDCG.
#
# As before, the model can be analysed:
# + id="L4N1R8fM4jFh"
# Print the summary of the model
# %set_cell_height 400
model_8.summary()
# -
|
courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/train_models_with_tensorFlow_decision_forests.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
The Sparks Foundation – GRIP – Data Science and Business Analytics
TASK 1: Prediction using supervised ML
Author: <NAME>
Dataset used: Student Scores
# +
import pandas as pd
import numpy as np
import lux #new data visualization Library
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# -
path= "http://bit.ly/w-data"
s_data = pd.read_csv(path)
print("The given dataset: ")
s_data.head(25)
#PLotting the given data in 2-D to understand the destribution of data and the relation between scores and study hours.
s_data.plot(x='Hours', y='Scores', style='o')
plt.title('Hours vs Percentage')
plt.xlabel('Hours Studied(hr)-->')
plt.ylabel('Percentage Score(%)-->')
plt.show()
# selecting the value of data from the dataframe
X = s_data.iloc[:, :-1].values
y = s_data.iloc[:, 1].values
# Splitting the data values obtained into training and testing samples:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2, random_state=0)
# +
# Sample datataken for training are trained using Linear Regression model's algorithm
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
print("Training complete.")
# +
# Plotting the regression line
line = regressor.coef_*X+regressor.intercept_
# Plotting for the test data
plt.scatter(X, y)
plt.plot(X, line);
plt.show()
# -
print(X_test) # Testing data - In Hours
y_pred = regressor.predict(X_test) # Predicting the scores
# Comparing Actual vs Predicted
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df
df.plot(kind= "line")
plt.title("Graph for Actual Percentage vs Predicted Percentage")
plt.xlabel("Student No. -->")
plt.ylabel("Percentage Scored -->")
plt.show()
# Predict Function can be used to calculate the student score if the given amount of time he/she studied is given:
hours= 9.25
fin_pred= round(regressor.predict([[9.25]])[0],2)
print("No. of hours studied= {}".format(hours))
print("Predicted score= {}".format(fin_pred))
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(X_test, y_pred))
|
Simple Linear Regression task1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture 4: Optimization Using Python - SciPy
# In this lecture / tutorial, we will learn how to solve some simple optimization problems using Python, focusing on the specific optimization library ```scipy.optimize```.
#
# ***
# ## Learning goals
# - Obtain an overview of optimization problems that can be easily solved using SciPy.
# - Learn the syntax to solve some simple optimization problems using SciPy
# - Test your understanding by solving a few of the practice problems in each section.
# ***
# # Prerequisites for running this notebook
#
# You should have Python 3.6 installed on your computer, with all necessary packages installed.
#
# We recommend that you install Anaconda (Python 3.6 version) from the following links depending on your OS:
# - For Windows: https://www.anaconda.com/download/#windows
# - For macOS: https://www.anaconda.com/download/#macos
# - For Linux: https://www.anaconda.com/download/#linux
#
# **If you are not using Anaconda, it is your responsibility to make sure that Python and all necessary packages are correctly installed and configured to be able to run this notebook.**
#
# ***
#
# Once Anaconda is installed, open a **Terminal** (if you are using macOS / Linux), or **Anaconda Prompt** (if you are using Windows), and then create a new Python environment called **cme193**, by running the following command:<br>
# > ```conda create -n cme193 python=3.6```
#
# Next, change to the newly created virtual environment by running the command:
#
# On Windows
# > ```activate cme193``` <br>
#
# On macOS or Linux
# > ```source activate cme193```
#
# Next install all the necessary packages by running the following commands:
#
# > ```conda install nb_conda``` <br>
# > ```conda install -c anaconda scipy``` <br>
# > ```conda install -c conda-forge matplotlib``` <br>
#
# Now navigate to the directory containing this .ipynb file, from inside the terminal, and start jupyter notebook by typing the following command:
# > ```jupyter notebook```
#
# You should now be able to launch the .ipynb file from the browser. For more information on jupyter notebooks, read the <a href="https://jupyter-notebook.readthedocs.io/en/stable/notebook.html" style="text-decoration: none;">user documentation</a>.
# ***
# # 1. Introduction to scipy.optimize
#
# In this section we will learn how to solve some simple optimization problems using ```scipy```. The ```scipy.optimize``` package already gives us a lot of basic tools to solve a wide variety of important optimization problems. For more information please read the <a href="https://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html" style="text-decoration: none;">documentation</a>.
#
# We can import the module as follows (henceforth to be referred to as ```sciopt```). We also import some other modules we will use in this notebook.
import scipy.optimize as sciopt
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
# ***
# ## 1.1 Solving a linear program
#
# The first example we will look at is that of solving a **linear program (LP)**. A linear program is any optimization problem of the following form:
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & c^{T}x \\
# \text{subject to} \;\; & A_{ub}x \leq b_{ub} \\
# & A_{eq}x = b_{eq}
# \end{split}
# \end{equation}
# $$
#
# where $c, x \in \mathbb{R}^n$, $A_{ub} \in \mathbb{R}^{m \times n}$, $A_{eq} \in \mathbb{R}^{p \times n}$, $b_{ub} \in \mathbb{R}^{m}$, and $b_{eq} \in \mathbb{R}^{p}$. It should be noted that all LP can be put in this form.
#
# ```scipy.optimize``` provides a simple function ```scipy.optimize.linprog``` to solve such problems, which is documented <a href="https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.optimize.linprog.html#scipy.optimize.linprog" style="text-decoration: none;">here</a>. Currently, the only available algorithm that is implemented are the **simplex method**, and the **interior point method**. We will demonstrate its usage using a few examples.
# ***
# ### Example 1.1.1
# Let us consider the problem
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & x_1 + 2 x_2 \\
# \text{subject to} \;\; & x_1 \leq 1 \\
# & 5 x_1 + x_2 \geq 0
# \end{split}
# \end{equation}
# $$
#
# In order to solve it, we first need to transform it to the form that ```scipy.optimize.linprog``` requires. The problem is clearly equivalent to
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & x_1 + 2 x_2 \\
# \text{subject to} \;\; & x_1 \leq 1 \\
# & -5 x_1 - x_2 \leq 0
# \end{split}
# \end{equation}
# $$
#
# The following Python code then solves this problem.
c = np.array([1, 2])
A_ub = np.array([[1, 0], [-5, -1]])
b_ub = np.array([1, 0])
bounds = ((None, None), (None, None))
sciopt.linprog(c = c, A_ub = A_ub, b_ub = b_ub, bounds = bounds)
# +
x1 = np.linspace(-10, 10, 100)
x2 = np.linspace(-10, 10, 100)
x1_grid, x2_grid = np.meshgrid(x1, x2)
def f(x):
return x[0] + 2 * x[1]
y_grid = f((x1_grid, x2_grid))
mask = ((x1_grid <= 1) & (-5 * x1_grid - x2_grid <= 0))
y_grid[~mask] = None
plt.imshow(y_grid, extent = (-10, 10, 10, -10))
plt.colorbar()
# +
# Define problem parameters
c = [1, 2]
A_ub = [[1, 0], [-5, -1]]
b_ub = [1, 0]
bounds = ((None, None), (None, None))
# Solve the LP
result = sciopt.linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds)
# -
# Print the result
print(result)
# ***
# Notice that we must explicitly set the ```bounds``` parameter in the above problem. If we don't pass this parameter, the default assumption is that the variables are non-negative.
#
# You can additionally pass the parameter ```options={"disp": True}``` to print convergence messages from the solver. **Solver method specific parameters can also be passed as optional parameters in** ```options```.
# Solve the LP and print convergence messages
result = sciopt.linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, options={"disp": True})
# Extract the solution and print it
obj_optimal = result['fun']
x = result['x']
print("Optimal solution: x1 = ", x[0], ", x2 = ", x[1])
print("Optimal value = ", obj_optimal)
# ***
# ### Example 1.1.2
# Let us change the problem by adding an equality constraint
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & x_1 + 2 x_2 \\
# \text{subject to} \;\; & x_1 \leq 1 \\
# & 5 x_1 + x_2 \geq 0 \\
# & x_1 + x_2 = 3.
# \end{split}
# \end{equation}
# $$
#
# In order to solve it, we first need to transform it to the form that ```scipy.optimize.linprog``` requires. The problem is clearly equivalent to
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & x_1 + 2 x_2 \\
# \text{subject to} \;\; & x_1 \leq 1 \\
# & -5 x_1 - x_2 \leq 0 \\
# & x_1 + x_2 = 3.
# \end{split}
# \end{equation}
# $$
#
# The following Python code then solves this problem.
# +
# Define problem parameters
c = [1, 2]
A_ub = [[1, 0], [-5, -1]]
b_ub = [1, 0]
A_eq = [[1, 1]]
b_eq = [3]
bounds = ((None, None), (None, None))
# Solve the LP
print("Solving the LP")
result = sciopt.linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, method="interior-point",bounds=bounds, options={"disp": True})
# Extract the solution and print it
obj_optimal = result['fun']
x = result['x']
print("\n")
print("Optimal solution: x1 = ", x[0], ", x2 = ", x[1])
print("Optimal value = ", obj_optimal)
# -
# #### Alternate way of solving the problem
# Notice that the inequality constraint ```x1 <= 1``` is a **bound constraint**. Hence, an alternate way to solve **Example 1.1.2** is as follows:
# +
# Define problem parameters
c = [1, 2]
A_ub = [[-5, -1]]
b_ub = [0]
A_eq = [[1, 1]]
b_eq = [3]
bounds = ((None, 1), (None, None))
# Solve the LP
print("Solving the LP")
result = sciopt.linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, options={"disp": True})
# Extract the solution and print it
obj_optimal = result['fun']
x = result['x']
print("\n")
print("Optimal solution: x1 = ", x[0], ", x2 = ", x[1])
print("Optimal value = ", obj_optimal)
# -
# ***
# ### InClass Exercise 1
# Here is another interesting example. Consider the following optimization problem
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & \max \{|x_1|, |x_2|, |x_3|\} \\
# \text{subject to} \;\; & x_1 + x_2 + x_3 \geq 1.
# \end{split}
# \end{equation}
# $$
#
# It is easy to show that this problem is equivalent to the problem (this is called the **epigraph form** of the problem)
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & s \\
# \text{subject to} \;\; & |x_1| \leq s \\
# & |x_2| \leq s \\
# & |x_3| \leq s \\
# & s \geq 0 \\
# & x_1 + x_2 + x_3 \geq 1
# \end{split}
# \end{equation}
# $$
#
# where the minimization is now over the variables $x_1, x_2, x_3,$ and $s$.
#
# As before we need to change this problem into a form that is suitable for ```scipy.optimize.linprog```. The problem can be written equivalently as
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & s \\
# \text{subject to} \;\; & x_1 - s \leq 0 \\
# & x_2 - s \leq 0 \\
# & x_3 - s \leq 0 \\
# & - x_1 - s \leq 0 \\
# & - x_2 - s \leq 0 \\
# & - x_3 - s \leq 0 \\
# & - x_1 - x_2 - x_3 \leq -1 \\
# & s \geq 0 .
# \end{split}
# \end{equation}
# $$
# +
# Write your solution here
# -
# ***
# ## 1.2 Minimum weight matching in bipartite graphs
#
# Given an (undirected) **complete bipartite graph** $G = (V_1, V_2, E)$, with an edge cost function $C : E \rightarrow \mathbb{R}$, the goal is to find a minimum weight **matching** $M \subset E$ that covers the smaller of the two sets $V_1$ or $V_2$. Thus $V_1$ and $V_2$ need not be of the same sizes. $G$ being complete bipartite graph means that there is an edge $e \in E$ between every pair of vertices $v_1 \in V_1$, and $v_2 \in V_2$. A matching refers to a selection of edges such that no vertex is covered more than once. This problem is also known as the **linear sum assignment** problem.
#
# Let $|V_1| = N_1$, and $|V_2| = N_2$, and without loss of generality assume that $N_1 \leq N_2$. If we index the vertices in $V_1$ by $i$, and those in $V_2$ by $j$, then $e_{ij}$ will refer to the edge between $i$ and $j$, and similarly $C_{ij}$ will refer to the cost of the edge $e_{ij}$. Let $X_{ij}$ be a boolean $\{0,1\}$ variable that indicates whether edge $e_{ij}$ is selected or not. Then our goals can be represented by the following optimization problem:
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & \sum_{i=1}^{N_1} \sum_{j=1}^{N_2} C_{ij} X_{ij} \\
# \text{subject to} \;\; & X_{ij} \in \{0, 1\}, \;\; \forall \;\; i, j \\
# & \sum_{j=1}^{N_2} X_{ij} = 1, \;\; \forall \;\; i \\
# & \sum_{i=1}^{N_1} X_{ij} \leq 1, \;\; \forall \;\; j.
# \end{split}
# \end{equation}
# $$
#
# ```scipy.optimize``` provides an inbuilt function ```scipy.optimize.linear_sum_assignment``` that solves exactly this problem, which is documented <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html#scipy.optimize.linear_sum_assignment" style="text-decoration: none;">here</a>. The algorithm used to solve this problem is the famous **Hungarian algorithm**, also known as the **Kuhn-Munkres algorithm**, although it was discovered in 2006 that <a href="https://en.wikipedia.org/wiki/Carl_Gustav_Jacob_Jacobi" style="text-decoration: none;"><NAME></a> had solved the problem in 1840s (published only posthumously in 1890).
# ***
# Let us see an example.
#
# ### Example 1.2.1
# Consider the following $C$ matrix
#
# $$
# C =
# \begin{bmatrix}
# 2 & 1 & -1 & 1 \\
# 4 & 5 & -2 & -3 \\
# 1 & 2 & -1 & 5 \\
# -2 & 3 & 4 & 0
# \end{bmatrix}
# \;\;.
# $$
#
# This problem is easily solved using the following Python code.
# +
# Define problem parameters
cost_matrix = [[2, 1, -1, 1], [4, 5, -2, -3], [1, 2, -1, 5], [-2, 3, 4, 0]]
# Solve the linear sum assignment problem
print("Solving the linear sum assignment problem")
row_ind, col_ind = sciopt.linear_sum_assignment(cost_matrix=cost_matrix)
# Print the solution
print("\n")
print("Row index : ", row_ind)
print("Col index : ", col_ind)
# Print selected edges and the costs
print("\n")
print("The selected edges in the optimal assignment and their costs are:")
cost_opt = 0
for ind, row in enumerate(row_ind):
col = col_ind[ind]
cost_opt += cost_matrix[row][col]
print("Edge (" + str(row) + "," + str(col) + ") , Cost = " + str(cost_matrix[row][col]))
# Print optimal cost
print("\n")
print("The optimal cost is : ", cost_opt)
# -
# ***
# ### InClass Exercise 2
#
# Consider the following $C$ matrix
#
# $$
# C =
# \begin{bmatrix}
# 2 & 1 & -1 & 1 \\
# 4 & 5 & -2 & -3 \\
# 1 & 2 & -1 & 5
# \end{bmatrix}
# \;\;.
# $$
#
# Solve the minimum weight matching problem for this matrix.
# +
# Write your code here
# -
# ***
# ## 1.4 Root finding problems - multivariate rootfinding
#
# We now turn to the much harder problem of finding zeros of functions of the form $f : \mathbb{R}^m \rightarrow \mathbb{R}^n$. ```scipy.optimize``` provides a single function ```scipy.optimize.root```, through which all the other functions listed in the <a href="https://docs.scipy.org/doc/scipy/reference/optimize.html#module-scipy.optimize" style="text-decoration: none;">documentation page</a> for multivariate root finding are accessible. All the algorithms require an initial guess (or starting point) $x_0$. The syntax for the function ```scipy.optimize.root``` can be found <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html#scipy.optimize.root" style="text-decoration: none;">here</a>.
#
# The important parameters that this function accepts, and about which you should be aware of are:
# - ```fun```: A function that implements $f$. The function can optionally return the Jacobian as well.
# - ```x0```: Initial guess.
# - ```method```: The type of solver to use. Options include ```hybr```, ```krylov```, ```broyden1``` etc.
# - ```jac```: Either a ```bool```, or a callable function that returns the Jacobian. In this case, it must accept the same arguments as fun.
# - ```options```: A dictionary with optional arguments for the solver ```method```.
#
# **Note:** If ```jac``` is a Boolean and is True, ```fun``` is assumed to return the value of Jacobian along with the objective function. If False, the Jacobian will be estimated numerically. Also one should be aware that many methods do not need the Jacobian implemented; they approximate the Jacobian internally.
#
# We will learn to use some of the features of ```scipy.optimize.root``` using an example.
# ***
# ### Example 1.4.1
# Consider the function $f : \mathbb{R}^2 \rightarrow \mathbb{R}^2$ defined as
#
# $$
# f(x,y) = ((x - x_t)^2 - (y - y_t)^2, 2(x - x_t)(y - y_t)),
# $$
#
# for some $(x_t, y_t) \in \mathbb{R}^2$.
#
# Alternatively you can also think of this function as $f : \mathbb{C} \rightarrow \mathbb{C}$, defined as $f(z) = (z - z_t)^2$, where $z = x + i y$, and $z_t = x_t + i y_t$. Clearly this function has only one root $z = z_t$, i.e. $(x, y) = (x_t, y_t)$.
#
# Let us code up the function and its Jacobian. The Jacobian is given by
#
# $$
# J(x,y) =
# \begin{bmatrix}
# 2(x - x_t) & 2(y - y_t) \\
# -2(y - y_t) & 2(x - x_t)
# \end{bmatrix}
# .
# $$
#
# Set $x_t = 1, y_t = 1$.
# +
# Define xt, yt
xt = 1
yt = 1
# Define the function
def fun(x):
return [(x[0] - xt) ** 2 - (x[1] - yt) ** 2, 2 * (x[0] - xt) * (x[1] - yt)]
# Define the Jacobian
def jac(x):
return [[2 * (x[0] - xt), 2 * (x[1] - yt)], [-2 * (x[1] - yt), 2 * (x[0] - xt)]]
# Define the function that also returns the Jacobian
def fun1(x):
return (
[(x[0] - xt) ** 2 - (x[1] - yt) ** 2, 2 * (x[0] - xt) * (x[1] - yt)],
[[2 * (x[0] - xt), 2 * (x[1] - yt)], [-2 * (x[1] - yt), 2 * (x[0] - xt)]]
)
# -
# Define a starting guess of the root $(x_0, y_0) = (0.5, 0.5)$, and lets demonstrate how the Jacobian can be passed.
# +
# Define starting guess
x0 = [0.5, 0.5]
# Demonstrate usage using different ways to supply function and Jacobian
print("Method 1", "\n")
sol = sciopt.root(fun=fun1, x0=x0, jac=True, method='hybr')
print(sol, "\n")
print("Solution : x = ", sol.x[0], ", y = ", sol.x[1], "\n\n\n")
print("Method 2", "\n")
sol = sciopt.root(fun=fun, x0=x0, jac=False, method='hybr')
print(sol, "\n")
print("Solution : x = ", sol.x[0], ", y = ", sol.x[1], "\n\n\n")
print("Method 3", "\n")
sol = sciopt.root(fun=fun, x0=x0, jac=jac, method='hybr')
print(sol, "\n")
print("Solution : x = ", sol.x[0], ", y = ", sol.x[1], "\n\n\n")
# -
# ***
# ### InClass Exercise 3
# 1. Try different values of the starting guess $(x_0, y_0)$, and see the impact on performance, as measured by the number of function and Jacobian evaluations.
# 2. Repeat the experiment with different values of $(x_t, y_t)$. What happens as you approach $x_t = 0, y_t = 0$?
# +
# Write your code here
# -
# ***
# ### Example 1.4.2
# Consider the following system of nonlinear equations
#
# $$
# \begin{split}
# x + \frac{(x - y)^3}{2} - 1 &= 0 \\
# \frac{(y - x)^3}{2} + y &= 0 \;.
# \end{split}
# $$
#
# We can try to solve this system by trying to find the roots of the function $f : \mathbb{R}^2 \rightarrow \mathbb{R}^2$ defined as
#
# $$
# f(x,y) = \left( x + \frac{(x - y)^3}{2} - 1, \frac{(y - x)^3}{2} + y \right).
# $$
#
# We code up the function, its Jacobian, and solve the problem using a few different methods.
# +
# Define the function
def fun_nonlinear_eq(x):
return [x[0] + 0.5 * ((x[0] - x[1]) ** 3) - 1, 0.5 * ((x[1] - x[0]) ** 3) + x[1]]
# Define the Jacobian
def jac_nonlinear_eq(x):
return [
[1 + 1.5 * ((x[0] - x[1]) ** 2), -1.5 * ((x[1] - x[0]) ** 2)],
[-1.5 * ((x[0] - x[1]) ** 2), 1 + 1.5 * ((x[1] - x[0]) ** 2)]
]
# Define starting guess
x0 = [1, 1]
# Solve using method 'hybr'
name = 'hybr'
print("Method " + name, "\n")
sol = sciopt.root(fun=fun_nonlinear_eq, x0=x0, jac=jac_nonlinear_eq, method=name)
print(sol, "\n")
print("Solution : x = ", sol.x[0], ", y = ", sol.x[1], "\n\n\n")
# Solve using method 'lm'
name = 'lm'
print("Method " + name, "\n")
sol = sciopt.root(fun=fun_nonlinear_eq, x0=x0, jac=jac_nonlinear_eq, method=name)
print(sol, "\n")
print("Solution : x = ", sol.x[0], ", y = ", sol.x[1], "\n\n\n")
# Methods below do not use Jacobian -- should throw an warning if Jacobian is passed
# Solve using method 'broyden1'
name = 'broyden1'
print("Method " + name, "\n")
sol = sciopt.root(fun=fun_nonlinear_eq, x0=x0, jac=jac_nonlinear_eq, method=name)
print(sol, "\n")
print("Solution : x = ", sol.x[0], ", y = ", sol.x[1], "\n\n\n")
# Solve using method 'anderson'
name = 'anderson'
print("Method " + name, "\n")
sol = sciopt.root(fun=fun_nonlinear_eq, x0=x0, method=name)
print(sol, "\n")
print("Solution : x = ", sol.x[0], ", y = ", sol.x[1], "\n\n\n")
# Solve using method 'krylov'
name = 'krylov'
print("Method " + name, "\n")
sol = sciopt.root(fun=fun_nonlinear_eq, x0=x0, method=name)
print(sol, "\n")
print("Solution : x = ", sol.x[0], ", y = ", sol.x[1], "\n\n\n")
# -
# ***
# ### InClass Exercise 4
# 1. Increase the maximum number of iterations for the 'krylov' method and see if there is an impact on the solution.
# 2. Try different starting guesses for $(x_0, y_0)$, for e.g. try $(0.8, 0.2)$ for the 'krylov' method. Does it help?
# +
# Write your code here
# -
# ***
# # 2. Local optimization using ```scipy.optimize```
# We will now consider more general optimization problems that arise in many science and engineering applications, which can be instances of both convex and non-convex optimization problems. Typically we will be given a twice continuously differentiable function $f: \mathbb{R}^n \rightarrow \mathbb{R}$, with additional constraints that define a region $\Omega \subset \mathbb{R}^n$, and the goal is to minimize $f$ (or equivalently maximize $-f$) over $\Omega$, i.e. we want to find a point $x^{\ast} \in \Omega$ such that $f(x^{\ast}) \leq f(y)$ for all $y \in \Omega$.
# We start by giving examples of a convex function $f(x) = x^2$, and a non-convex function $f(x) = x^4 - x^2 + \frac{x}{5}$, both of which are graphed below.
# +
# %matplotlib inline
step = 0.01
max_x = 1
x = np.arange(-max_x, max_x + step, step)
y1 = x ** 2
y2 = x ** 4 - x ** 2 + x / 5.0
plt.plot(x, y1, "-r", label="$x^2$")
plt.plot(x, y2, "-b", label="$x^4 - x^2 + x / 5$")
plt.grid()
plt.xlabel("x", fontsize=16)
plt.ylabel("$f(x)$", fontsize=16)
plt.title("Examples of convex and non-convex functions", fontsize=16)
plt.legend(fontsize=16)
# -
# The basic structure of the optimization problem that we want to solve can be written down in the following form:
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & f(x) \\
# \text{subject to} \;\; & h_{i} (x) = 0,\;\; \forall \;\; 1 \leq i \leq m \;\;\;\;\;\; \text{(equality constraints)}\\
# & g_{j} (x) \geq 0,\;\; \forall \;\; 1 \leq j \leq p, \;\;\;\; \text{(inequality constraints)}
# \end{split}
# \end{equation}
# $$
#
# where $x \in \mathbb{R}^n,\;\;$ $f,\; h_i,\; g_j : \mathbb{R}^n \rightarrow \mathbb{R}$ are twice continuously differentiable, for all $1 \leq i \leq m$, and $1 \leq j \leq p$.
#
# Only in some very special cases, for example if the function is convex, can one find a solution that is a global optimum to the optimization problem above. In most cases we will only be concerned with finding a local optimum, i.e. we want to find a point $x^{\ast} \in \Omega$, such that there exists a neighborhood $\mathcal{V} \subset \Omega$, $x^{\ast} \in \mathcal{V}$, such that $f(x^{\ast}) \leq f(y)$, for all $y \in \mathcal{V}$. Finding such a local optimum $x^{\ast}$ is the goal of **local optimization**. Throughout we will assume that a local optimum exists. Of course, this can only be done up to some accuracy on a finite precision computer.
#
# ```scipy.optimize``` provides two general methods for this task:
# - ```scipy.optimize.minimize_scalar``` for univariate optimization problems documented <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html#scipy.optimize.minimize_scalar" style="text-decoration: none;">here</a>.
# - ```scipy.optimize.minimize``` for multivariate optimization problems documented <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize" style="text-decoration: none;">here</a>.
#
# We will first study univariate minimization, as it is much simpler than the multivariate case. For the multivariate optimization case, the user needs to know how the objective function and the constraints need to be modeled using objects (or classes) that have a standardized interface.
# ***
# ## 2.1 Univariate minimization
# The one dimensional local optimization problem reads as the following:
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & f(x) \\
# \text{subject to} \;\; & b_{lb} \leq x \leq b_{ub}
# \end{split}
# \end{equation}
# $$
#
# where $f : \mathbb{R} \rightarrow \mathbb{R}$, and $x,\; b_{lb},\; b_{ub} \in \mathbb{R}$, and notice that the general case above reduces to the special case, as typically for univariate problems we do not have nonlinear constraints.
#
# We illustrate the usage of ```scipy.optimize.minimize_scalar``` below, but there are a few important arguments to this function that the user must be aware of:
# - ```fun```: This is just a function that accepts a scalar $x \in \mathbb{R}$ and returns the function value $f(x)$.
# - ```method```: There are three main optimization algorithms that are implemented - a) ```Brent```, b) ```Bounded```, and c) ```Golden```.
# - ```bracket```: This is required if the methods chosen are either ```Brent``` or ```Golden```.
# - ```bounds```: This is required if the method chosen is ```Bounded```.
#
# Detailed documentation of all the arguments to ```scipy.optimize.minimize_scalar``` can be found <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html#scipy.optimize.minimize_scalar" style="text-decoration: none;">here</a>.
#
# The method ```Golden``` refers to the **Golden-section** search method, as described <a href="https://en.wikipedia.org/wiki/Golden-section_search" style="text-decoration: none;">here</a>. The ```Brent``` method improves on golden-section search by using inverse parabolic interpolation to speed up convergence (more information <a href="https://en.wikipedia.org/wiki/Brent%27s_method" style="text-decoration: none;">here</a>), while the ```Bounded``` method is exactly the ```Brent``` method with the added guarantee that the solution always lies in an input interval.
#
# The original interfaces to these methods are given below and they can still be invoked, but now all of them are integrated through the method ```scipy.optimize.minimize_scalar```. However the documentation to these original interfaces to the three methods contain useful information and should be read by the user:
# - ```Brent```: <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brent.html#scipy.optimize.brent" style="text-decoration: none;">https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brent.html#scipy.optimize.brent</a>
# - ```Bounded```: <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fminbound.html#scipy.optimize.fminbound" style="text-decoration: none;">https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fminbound.html#scipy.optimize.fminbound</a>
# - ```Golden```: <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.golden.html#scipy.optimize.golden" style="text-decoration: none;">https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.golden.html#scipy.optimize.golden</a>
# ***
# ### 2.1.1 Bracket
# The parameter ```bracket``` deserves a brief discussion. This parameter can either be a tuple of the form $(a,c)$, or $(a,b,c)$, and achieves the same functionality as for the method <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.bracket.html#scipy.optimize.bracket">```scipy.optimize.bracket```</a>. If a tuple $(a,b,c)$ is input then it is assumed that both $f(b) < f(a)$ and $f(b) < f(c)$. If a tuple $(a,b)$ is input then a new tuple $(a',b',c')$ is generated such that both $f(b') < f(a')$, and $f(b') < f(c')$, which is also called **auto bracketing**.
#
# We give an example below for the function $f(x) = x^4 - x^2 + \frac{x}{5}$ that we encountered before that demonstrates how auto bracketing works .
# +
# Define the function
def func_local_1d_opt(x):
return x ** 4 - x ** 2 + x / 5.0
# Bracketing examples (notice that inputs need not satisfy xa < xb)
xa, xb, xc, fa, fb, fc, fcalls = sciopt.bracket(func=func_local_1d_opt, xa=0, xb=1)
print("Bracketing interval:")
print("xa =", xa, ", xb =", xb, ", xc =", xc)
print("\n")
print("Function values:")
print("f(xa) =", fa, ", f(xb) =", fb, ", f(xc) =", fc)
print("\n")
print("Number of function calls:", fcalls)
# -
# ***
# ### Example 2.1.1
# Let us now solve the problem of minimizing the same function $f(x) = x^4 - x^2 + \frac{x}{5}$, which is graphed again below for convenience.
# +
# %matplotlib inline
step = 0.01
max_x = 1
x = np.arange(-max_x, max_x + step, step)
y1 = x ** 4 - x ** 2 + x / 5.0
plt.plot(x, y1, "-r")
plt.grid()
plt.xlabel("x", fontsize=16)
plt.ylabel("$x^4 - x^2 + x / 5$", fontsize=16)
# -
# Let us first see what happens if we try to use the method ```Golden``` in different ways.
# Default usage with method Golden
res = sciopt.minimize_scalar(fun=func_local_1d_opt, method="Golden")
print(res)
# Notice that specifying the ```bounds``` parameter has no impact.
# Usage with method Golden, and bounds
res = sciopt.minimize_scalar(fun=func_local_1d_opt, method="Golden", bounds=[0, 1])
print(res)
# But specifying ```bracket``` does have an impact.
# +
# Usage with method Golden, and bracket
# Usage 1 (specifying exact bracket)
print("Usage 1: Specifying exact brackets")
res = sciopt.minimize_scalar(fun=func_local_1d_opt, method="Golden", bracket=[0.9, 0.75, 0.1])
print(res)
print("\n")
# Usage 2 (auto bracketing) - notice that bounds are not enforced
print("Usage 2: Auto bracketing")
res = sciopt.minimize_scalar(fun=func_local_1d_opt, method="Golden", bracket=[1, 0])
print(res)
# -
# Let us next try the same set of experiments with the method ```Brent```.
# +
# Default usage with method Brent
res = sciopt.minimize_scalar(fun=func_local_1d_opt, method="Brent")
print("Default usage")
print(res)
print("\n")
# Usage with method Brent, and bounds
print("Usage of bounds")
res = sciopt.minimize_scalar(fun=func_local_1d_opt, method="Brent", bounds=[0, 1])
print(res)
print("\n")
# Usage with method Brent, and exact bracketing
print("Usage of exact bracketing")
res = sciopt.minimize_scalar(fun=func_local_1d_opt, method="Brent", bracket=[1, 0.75, 0])
print(res)
print("\n")
# Usage with method Brent, and auto bracketing
print("Usage of auto bracketing")
res = sciopt.minimize_scalar(fun=func_local_1d_opt, method="Brent", bracket=[1, 0])
print(res)
# -
# ***
# ### InClass Exercise 5
# You are left to try the method ```Bounded``` in this exercise.
#
# a) Repeat the 4 experiments as done before with the method ```Bounded```. In particular you should make sure that the parameter ```bounds``` is not optional.
#
# b) Try different intervals for the bounds and make sure that the solution lies within it. In particular, try the bound $(-0.25, 0.25)$.
#
# c) Compare the number of iterations required versus using the method ```Golden```.
# +
# Write your code here
# -
# ***
# ## 2.2 Multivariate minimization
# We now turn to the more general (and much more difficult) case of multivariate minimization using the method ```scipy.optimize.minimize```. This method supports a lot of varied usage and is the workhorse for solving local optimization problems using ```scipy.optimize```. We will only be able to provide a brief overview of all the functionalities, and the user should read the <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize" style="text-decoration: none;">documentation page</a> for more details.
#
# However before we look at specific examples, it is necessary to take a small detour and understand how we model constraints for such optimization problems.
# ***
# ### 2.2.1 Handling constraints
# There are three general classes that ```scipy``` provides that allow us to model constraints:
# - ```scipy.optimize.LinearConstraint```: This can be used to model both linear equality and inequality constraints.
# - ```scipy.optimize.Bounds```: This can be used only to model bound constraints.
# - ```scipy.optimize.NonlinearConstraint```: This can be used to model more general non-linear equality and inequality constraints.
#
# These three classes cover among them almost any constraint that you will write down for any optimization problem.
#
# #### ```scipy.optimize.LinearConstraint```
# This class is documented <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LinearConstraint.html#scipy.optimize.LinearConstraint" style="text-decoration: none;">here</a>. It allows us to implement linear equality and inequality constraints of the following form:
#
# $$
# \begin{equation}
# b_{lb} \leq Ax \leq b_{ub}
# \end{equation}
# $$
#
# where $A \in \mathbb{R}^{m \times n}$ is a matrix, and $b_{lb}, b_{ub} \in \mathbb{R}^{m}$. Any equality constraint can be modeled by setting the lower and upper bounds to be the same for the corresponding entries in the vectors $b_{lb}$ and $b_{ub}$. Any equation which does not have an upper bound can be modeled by setting the corresponding entry in $b_{ub}$ to ```np.inf```, and similarly any equation which does not have a lower bound can be modeled by setting the corresponding entry in $b_{lb}$ to ```-np.inf```.
#
# #### ```scipy.optimize.Bounds```
# This class is documented <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.Bounds.html#scipy.optimize.Bounds" style="text-decoration: none;">here</a>. It allows us to implement only bound constraints, which is a very special kind of linear constraint:
#
# $$
# \begin{equation}
# b_{lb} \leq x \leq b_{ub},
# \end{equation}
# $$
#
# where $b_{lb}, b_{ub} \in \mathbb{R}^{n}$, i.e. has the same dimension as $x$. Any variable which does not have an upper bound can be modeled by setting the corresponding entry in $b_{ub}$ to ```np.inf```, and similarly any variable which does not have a lower bound can be modeled by setting the corresponding entry in $b_{lb}$ to ```-np.inf```. Thus for any optimization problem we solve, we will always assume that we have bound constraints on all the variables.
#
# #### ```scipy.optimize.NonlinearConstraint```
# This class is documented <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.NonlinearConstraint.html#scipy.optimize.NonlinearConstraint" style="text-decoration: none;">here</a>. It allows us to implement general non-linear inequality and equality constraints of the form:
#
# $$
# \begin{equation}
# b_{lb} \leq c_i(x) \leq b_{ub}, \;\; \forall \;\; 1 \leq i \leq m.
# \end{equation}
# $$
#
# The treatment of equality constraints or lack of upper or lower bounds is the same as the other two classes. Among the arguments that the method ```scipy.optimize.NonlinearConstraint``` accepts, two of them deserve particular attention from the user: ```jac``` and ```hess```, which are the Jacobian and Hessians of the constraints respectively. They are **different** from the Jacobians and Hessians of the objective function being minimized, and is a source of a lot of confusion.
# ***
# ### 2.2.2 Description of the parameters for ```scipy.optimize.minimize```.
# Before we get to solving some real optimization problems, we need to understand some of the important parameters that the method ```scipy.optimize.minimize``` accepts. The detailed documentation can be found on the <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize" style="text-decoration: none;">documentation page</a> of the method. The important parameters that you must be aware of are:
#
# - ```fun```: A function that implements $f$. It accepts $x \in \mathbb{R}^n$ and returns $f(x) \in \mathbb{R}$. The function can optionally return the Jacobian as well.
# - ```x0```: Initial guess of a local minimizer.
# - ```method```: The type of solver to use, explained in the next section.
# - ```jac```: It can be a function that returns the Jacobian of $f$. In this case, it must accept the same arguments as fun. The Jacobian may also be estimated using finite difference schemes such as ```2-point```, ```3-point```, or ```cs```. If ```jac``` is a Boolean and is ```True```, ```fun``` is assumed to return the Jacobian along with the objective function. If ```False```, the gradient will be estimated using ```2-point``` finite difference estimation.
# - ```hess```: A function for computing the Hessian matrix of $f$. Similar to ```jac```, it can also be estimated using the same finite difference schemes. Read the documentation for more details.
# - ```hessp```: A function that implements the action of the Hessian of the objective function on an arbitrary vector. Note that only one of ```hess``` or ```hessp``` must be provided. If both are provided, this parameter is ignored.
# - ```bounds```: Must be an instance of the class ```scipy.optimize.Bounds``` as described in last section.
# - ```constraints```: This parameter is only used when the ```method``` is either ```trust-constr```, ```COBYLA``` or ```SLSQP```. For ```trust-constr```, the constraints must be specified as a list of objects of type ```scipy.optimize.LinearConstraint``` or ```scipy.optimize.NonlinearConstraint```. For the other two methods, the constraints must be specified as dictionaries, and you should read the documentation if you want to ue them.
# - ```options```: A dictionary with optional arguments for the solver ```method```.
#
# One thing that is important to note here to avoid confusion is that the Jacobian of the function $f$ is nothing but the gradient of $f$, which is exactly what ```jac``` needs to calculate.
#
# **Note: One really important thing that the user should keep in mind is that not all of the above parameters can be chosen or activated independent of one another. You should always read the documentation first before you use any new option that you have not used before, and do not know how it works.**
# ***
# ### 2.2.3 Description of the ```methods``` parameter, and possible choices
# This section is intended to be a short guide to help the user at least get started with some of the basic usage of the function ```scipy.optimize.minimize```. Here we shed some light on the various options that can be chosen for the ```methods``` parameter. Very broadly the methods can be classified into three main categories by the kind of the optimization problems that they can solve:
#
# - **Unconstrained minimization methods**: ```Nelder-Mead```, ```Powell```, ```CG```, ```BFGS```, ```Newton-CG```, ```dogleg```, ```trust-ncg```, ```trust-krylov```, ```trust-exact```
# - **Constrained minimization methods (bound only constraints)**: ```L-BFGS-B```, ```TNC```
# - **Constrained minimization methods (general constraints)**: ```trust-constr```, ```COBYLA```, ```SLSQP```
#
# Among the unconstrained minimization methods, a further subdivision is possible based on whether the optimization algorithm is zero-order, first-order, or second-order, meaning that the algorithm only uses function values, or the function value and the gradient, or the function value, the gradient and the Hessian information respectively in the three cases. While Quasi-Newton methods estimate the Hessian from the gradient, and thus do not strictly need an implementation of the Hessian to be supplied, we will group them under second-order methods. However the trust region methods are truly different from all of them, and so are classified separately.
#
# - **Zero-order methods**: ```Nelder-Mead```, ```Powell```
# - **First-order methods**: ```CG```
# - **Second-order methods**: ```BFGS```, ```Newton-CG```
# - **Trust region methods**: ```dogleg```, ```trust-ncg```, ```trust-krylov```, ```trust-exact```
#
# The classification given above gives a rough guide as to which method is compatible with which other parameter choices for the other arguments of ```scipy.optimize.minimize```. We illustrate these aspects with some simple examples next.
# ***
# ### Example 2.2.1
# The first example that we consider is that of an unconstrained optimization problem.
# Consider the function
#
# $$
# f(x_1, x_2) = \sin (x_1 - a_1) + \sin (x_2 - a_2).
# $$
#
# It is graphed below for $a_1 = -2$, and $a_2 = -2$, and as you can see it has a lot of local minima.
# +
# %matplotlib inline
# Define function
def func_sines(x, a):
return np.sin(x[0] - a[0]) + np.sin(x[1] - a[1])
# Define a
a = [-2, -2]
# Create the function
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
Z = np.copy(X)
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
Z[i, j] = func_sines(x=[X[i, j], Y[i, j]], a=a)
# Plot the function
fig = plt.figure(figsize=[10, 6])
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0, antialiased=False)
# Customize the z axis.
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.65, aspect=5)
plt.title("$\sin (x_1 - a_1) + \sin (x_2 - a_2)$" + ", $a_1 = $" + str(a[0]) + ", $a_2 = $" + str(a[1]), fontsize=16)
plt.show()
# -
# Let us code up its Jacobian, Hessian, and Hessian action.
# +
# Define function Jacobian
def func_sines_jac(x, a):
return np.asarray([np.cos(x[0] - a[0]), np.cos(x[1] - a[1])])
# Define function Hessian
def func_sines_hess(x, a):
return np.asarray([[-np.sin(x[0] - a[0]), 0], [0, -np.sin(x[1] - a[1])]])
# Define function Hessian * p
def func_sines_hessp(x, p, a):
return np.asarray([-np.sin(x[0] - a[0]) * p[0], -np.sin(x[1] - a[1]) * p[1]])
# -
# Now let us try to find the local minima for this function by starting from $x_0 = (-2, -2)$, and for different choices of methods.
# +
# Define a
a = [-2, -2]
# Define starting point
x0 = [-2, -2]
# Optimize using Nelder-Mead
print("Optimization using Nelder-Mead:")
res = sciopt.minimize(
fun=func_sines,
x0=x0,
args=(a),
method="Nelder-Mead",
options={"disp": True}
)
print("Optimal x =", res.x)
print("\n")
# Optimize using Powell
print("Optimization using Powell:")
res = sciopt.minimize(
fun=func_sines,
x0=x0,
args=(a),
method="Powell",
options={"disp": True}
)
print("Optimal x =", res.x)
print("\n")
# Optimize using CG
print("Optimization using CG:")
res = sciopt.minimize(
fun=func_sines,
x0=x0,
args=(a),
method="CG",
jac=func_sines_jac,
options={"disp": True}
)
print("Optimal x =", res.x)
print("\n")
# Optimize using BFGS
print("Optimization using BFGS:")
res = sciopt.minimize(
fun=func_sines,
x0=x0,
args=(a),
method="BFGS",
jac=func_sines_jac,
options={"disp": True}
)
print("Optimal x =", res.x)
print("\n")
# Optimize using trust-ncg
print("Optimization using trust-ncg:")
res = sciopt.minimize(
fun=func_sines,
x0=x0,
args=(a),
method="trust-ncg",
jac=func_sines_jac,
hess=func_sines_hess,
options={"disp": True}
)
print("Optimal x =", res.x)
print("\n")
# Optimize using trust-krylov
print("Optimization using trust-krylov:")
res = sciopt.minimize(
fun=func_sines,
x0=x0,
args=(a),
method="trust-krylov",
jac=func_sines_jac,
hessp=func_sines_hessp,
options={"disp": True}
)
print("Optimal x =", res.x)
print("\n")
# Optimize using trust-krylov
print("Optimization using trust-krylov:")
res = sciopt.minimize(
fun=func_sines,
x0=[-1, -1],
args=(a),
method="trust-krylov",
jac=func_sines_jac,
hessp=func_sines_hessp,
options={"disp": True}
)
print("Optimal x =", res.x)
print("\n")
# -
# ***
# ### InClass Exercise 6
# a) Using the above code as a starting point, try different values of $x_0$, and compare the performance of the zero-order methods ```Nelder-Mead``` and ```Powell```.
#
# b) Using the above code as a starting point, try different values of $x_0$, and compare the performance of the methods ```CG``` and ```BFGS```.
#
# c) For any choice of $x_0$, solve the above problem using ```Newton-CG```. Compare the performance with ```trust-ncg```.
#
# d) Test the performance of choosing ```hess``` vs ```hessp``` as an input argument for the methods ```Newton-CG```, ```trust-ncg```, and ```trust-krylov```, for different values of $x_0$.
# +
# Write your code here
# -
# ***
# ### Example 2.2.2
# The next example that we consider is that of a constrained optimization problem with simple bound constraints. We consider the same function as before
#
# $$
# f(x_1, x_2) = \sin (x_1 - a_1) + \sin (x_2 - a_2).
# $$
#
# for $a_1 = -2$, and $a_2 = -2$. We will minimize this function in the region given by $-3 \leq x_1 \leq 3, \;\;-3 \leq x_2 \leq 3$. This leads to the following minimization problem:
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & \sin (x_1 - a_1) + \sin (x_2 - a_2) \\
# \text{subject to} \;\; & -3 \leq x_1 \leq 3 \\
# & -3 \leq x_2 \leq 3 . \\
# \end{split}
# \end{equation}
# $$
#
# The following Python code solves this problem using the methods ```L-BFGS-B``` and ```TNC```.
# +
def create_bounds(lb, ub):
return sciopt.Bounds(lb=lb, ub=ub)
def solve_bound_constrained_problem(method_name, bounds, x0, a):
if method_name not in ["L-BFGS-B", "TNC"]:
raise ValueError("Method must be either L-BFGS-B or TNC")
print("Optimization using " + str(method_name) + ":")
res = sciopt.minimize(
fun=func_sines,
x0=x0,
args=(a),
method=method_name,
jac=func_sines_jac,
bounds=bounds,
options={"disp": True}
)
print("Optimal x =", res.x)
print("\n")
if __name__ == "__main__":
# Define a
a = [-2, -2]
# Define starting point
x0 = [-2, -2]
# Create bounds
bounds = create_bounds(lb=[-3, -3], ub=[3, 3])
# Solve using L-BFGS-B
solve_bound_constrained_problem(method_name="L-BFGS-B", bounds=bounds, x0=x0, a=a)
# Solve using TNC
solve_bound_constrained_problem(method_name="TNC", bounds=bounds, x0=x0, a=a)
# -
# ***
# ### Example 2.2.3
# We now consider a general non-linear constraint. Consider now the following optimization problem:
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & \sin (x_1 - a_1) + \sin (x_2 - a_2) \\
# \text{subject to} \;\; & 3 \leq x_1^2 + x_2^2 \leq 20,
# \end{split}
# \end{equation}
# $$
#
# where again we take $a_1 = -2$, and $a_2 = -2$. The following Python code then solves this problem using the method ```trust-constr```.
# +
# Define constraint (hessian evaluated using finite difference)
def get_circle_constraint(r1, r2):
def f_circle(x):
return x.dot(x)
def f_circle_jac(x):
return 2 * x
nl_constraint = sciopt.NonlinearConstraint(
fun=f_circle,
lb=r1,
ub=r2,
jac=f_circle_jac,
hess="2-point"
)
return nl_constraint
def solve_circle_constrained_problem(constraints, x0, a):
print("Optimization using trust-constr:")
res = sciopt.minimize(
fun=func_sines,
x0=x0,
args=(a),
method="trust-constr",
jac=func_sines_jac,
hessp=func_sines_hessp,
constraints=constraints,
options={"disp": True}
)
print("Optimal x =", res.x)
print("\n")
if __name__ == "__main__":
# Define a
a = [-2, -2]
# Define starting point
x0 = [-2, -2]
# Create constraint
r1 = 3
r2 = 20
constraints = get_circle_constraint(r1=r1, r2=r2)
# Solve using trust-constr
solve_circle_constrained_problem(constraints=constraints, x0=x0, a=a)
# -
# ***
# ### InClass Exercise 7
# Solve the following constrained optimization problem:
#
# $$
# \begin{equation}
# \begin{split}
# \text{minimize} \;\; & \sin (x_1 - a_1) + \sin (x_2 - a_2) \\
# \text{subject to} \;\; & 1 \leq \frac{x_1^2}{4} + \frac{x_2^2}{9} \leq 8,
# \end{split}
# \end{equation}
# $$
#
# for $a_1 = -2$, and $a_2 = -2$. Choose any appropriate $x_0$.
# +
# Write your code here
|
nb/2019_winter/Lecture4-Optimization-Using-Python-SciPy-InClass.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Can you tell the difference?
#
# [](https://github.com/Dragon1573/PyChallenge-Tips/blob/master/LICENSE)
# [](http://www.pythonchallenge.com/pc/return/balloons.html)
#
# <center><img src="../../resources/imgs/Quiz18-1.png" /></center>
#   这是由2张小图片拼接而成的组合图。先检查源代码,看看有没有什么提示。
from requests import get
import bs4
from re import search
response = get(
'http://www.pythonchallenge.com/pc/return/balloons.html',
headers={'Authorization': 'Basic aHVnZTpmaWxl'}
)
response = bs4.BeautifulSoup(response.text)
print(search(r'<!--(.*)-->', response.prettify()).group(1))
#   `比你想的更明显`,这两张图最明显的区别就是亮度差,试试<http://www.pythonchallenge.com/pc/return/bright.html>。
response = get(
'http://www.pythonchallenge.com/pc/return/bright.html',
headers={'Authorization': 'Basic aHVnZTpmaWxl'}
)
print(response.text.rstrip())
#   `ness`是单词后缀,会不会是<http://www.pythonchallenge.com/pc/return/brightness.html>?
response = get(
'http://www.pythonchallenge.com/pc/return/brightness.html',
headers={'Authorization': 'Basic aHVnZTpmaWxl'}
)
response = bs4.BeautifulSoup(response.text, features='html.parser')
print(response.img.next.next.strip())
#   按照提示获取`deltas.gz`并解压。
import gzip
from io import BytesIO, StringIO
from difflib import Differ
from PIL import Image
response = get(
'http://www.pythonchallenge.com/pc/return/deltas.gz',
headers={'Authorization': 'Basic aHVnZTpmaWxl'}
)
response = gzip.open(BytesIO(response.content))
response = response.read().decode()
content = ([], [])
for line in StringIO(response):
parts = line.split(' ')
content[0].append(parts[0].strip() + '\n')
content[1].append(parts[-1].strip() + '\n')
compare = Differ().compare(content[0], content[1])
files = [bytes(), bytes(), bytes()]
for line in compare:
byte_str = bytes([int(i, 16) for i in line[2:].strip().split(' ') if i])
if line[0] == '+':
files[0] += byte_str
elif line[0] == '-':
files[1] += byte_str
else:
files[2] += byte_str
display(Image.open(BytesIO(files[0])))
display(Image.open(BytesIO(files[1])))
display(Image.open(BytesIO(files[2])))
#   现在,我们获得了3张图片,这三张图片的意思是:
#
# - 用户名:`butter`
# - 密码:`<PASSWORD>`
# - 下一关链接:<http://www.pythonchallenge.com/pc/hex/bin.html>
|
src/Part2/Quiz18.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## 1. Data processing
#
# Before we can begin analysing the data, we need to get it and "clean" it so that we can run computations on it.
# +
# %matplotlib inline
import ast
import csv
import numpy as np
from collections import Counter
import matplotlib
import matplotlib.pyplot as plt
# -
# pretty plotting
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 5]
# First lets parse the data
reader = csv.reader(open("movie_recommendations.csv", "rb"), delimiter=",")
data = list(reader)
print data
# +
# The first row has header info and the second row is empty, so we can ignore them.
# Note: the data is stored as strings, so we need to process it some more
text_data = np.array(data[2:])
movie_titles = [unicode(title, 'utf-8') for title in text_data[:,0]]
raw_movie_genres = text_data[:,1]
raw_omkar_ratings = text_data[:,2]
raw_imdb_ratings = text_data[:,3]
# -SOON->
# raw_meta_critic_ratings = result[:,4]
# raw_rotten_tomato_ratings = result[:,5]
# +
# Now lets normalize these ratings so they are between 0 and 1
from __future__ import division # so that python will evaluate 3/10 as a floating pt operation instead of an integer op
def string_to_numpy(string_arr):
tmp_list = []
for string_val in string_arr:
if string_val is 'N/A':
tmp_list.append(0)
else:
tmp_list.append(eval(string_val))
return np.asarray(tmp_list).astype("float")
omkar_ratings = string_to_numpy(raw_omkar_ratings)
imdb_ratings = string_to_numpy(raw_imdb_ratings)
# -
# ## 2. Analysis
#
# Lets look at the raw data first:
assert len(imdb_ratings) == len(movie_titles)
# plt.xticks(range(len(imdb_ratings)), movie_titles, rotation=90) # <- too messy :(
# Remember, we scalled all scores to [0,1]!
plt.plot(imdb_ratings, alpha=0.5, label="IMDB rating")
plt.plot(omkar_ratings, alpha=1.0, label="Omkar's rating")
plt.legend()
plt.title('Plotting Omkar and IMDB ratings (scaled to [0,1])')
plt.show()
# _Phew!_ That's a pretty dense chart and on its own we can quickly surmise how closely related Omkar's ratings are w.r.t IMDB. For a single number statistic, let's look at [cross-correlation](https://en.wikipedia.org/wiki/Cross-correlation) between Omakar and IMDB:
print "Overall IMDB corellation: ",np.corrcoef(omkar_ratings, imdb_ratings)[0,1]
# On its own, the correlation doesn't tell us much. Let's look at where the largest difference between Omkar and IMDB come up:
def analyze_diff(diff_omkar_imdb, title):
print 'Max difference: ', diff_omkar_imdb.max()
print 'Min difference: ', diff_omkar_imdb.min()
print 'Mean: ', diff_omkar_imdb.mean()
print 'Std dev: ', diff_omkar_imdb.std()
below_1_sigma = np.array(diff_omkar_imdb < (diff_omkar_imdb.mean() - diff_omkar_imdb.std()))
above_1_sigma = np.array(diff_omkar_imdb > (diff_omkar_imdb.mean() + diff_omkar_imdb.std()))
# everything that's not 1 sigma above/below the mean
rest = np.logical_not(below_1_sigma) & np.logical_not(above_1_sigma)
_x_axis = np.arange(len(imdb_ratings))
plt.bar(_x_axis[above_1_sigma], diff_omkar_imdb[above_1_sigma], label='Above 1 $\sigma$')
plt.bar(_x_axis[below_1_sigma], diff_omkar_imdb[below_1_sigma], label='Below 1 $\sigma$')
plt.bar(_x_axis[rest], diff_omkar_imdb[rest], alpha=0.5, label='Within 2 $\sigma$')
plt.legend()
plt.title(title)
high_positive_diff = []
high_negative_diff = []
for idx in range(len(movie_titles)):
if above_1_sigma[idx]:
high_positive_diff.append((movie_titles[idx], diff_omkar_imdb[idx]))
if below_1_sigma[idx]:
high_negative_diff.append((movie_titles[idx], diff_omkar_imdb[idx]))
# Note: diff = Omkar - IMDB, so a positive score indicates Omkar rated a movie higher and vice versa
print 'Movies that are above 1 sigma from the mean difference b/w Omkar and IMDB: (total: {})'.format(len(high_positive_diff))
for movie_title, diff in high_positive_diff:
print '\tMovie: {}, diff: {}'.format(movie_title.encode('utf-8'), diff)
print 'Movies that are below 1 sigma from the mean difference b/w Omkar and IMDB: (total: {})'.format(len(high_negative_diff))
for movie_title, diff in high_negative_diff:
print '\tMovie: {}, diff: {}'.format(movie_title.encode('utf-8'), diff)
return
analyze_diff(omkar_ratings - imdb_ratings, 'Difference b/w Omkar and IMDB (both of which were first scaled to [0,1])')
# This is interesting: on average, it looks like Omkar rates movies ~4% higher than IMDB. With a standard deviation of ~6%, we see that Omkar tends to genrally be more generous with his ratings.
#
# Additionally, we can also look at the **absolute** difference b/w Omkar and IMDB in order to see which movies have very strong agreement b/w both datasets:
analyze_diff(np.abs(omkar_ratings - imdb_ratings), 'Absolute difference b/w Omkar and IMDB (both of which were first scaled to [0,1])')
# ### Genre-based analysis
# +
# Num unique genres
all_genres = []
for raw_genres in raw_movie_genres:
genres = raw_genres.split('/')
for genre in genres:
word = genre.lower().strip()
# spelling mistakes
if word == 'crme':
word = 'crime'
elif word == 'myster':
word = 'mystery'
all_genres.append(word)
unique_genres = sorted(set(all_genres))
counts = Counter(all_genres)
print unique_genres
print counts
# +
max_correlation = 0
max_corr_genre = 'N/A'
for genre in unique_genres:
use = []
for raw_genres in raw_movie_genres:
use.append(genre in raw_genres.lower())
if sum(use) < 3:
print '> Genre "{}" has too few examples ({})'.format(genre, counts[genre])
continue
correlation = np.corrcoef(omkar_ratings[use], imdb_ratings[use])[0, 1]
print 'Genre: {}, Num. data pts: {}, Correlation: {}'.format(genre, counts[genre], correlation)
if correlation > max_correlation:
max_correlation = correlation
max_corr_genre = genre
# -
print "Max. correlated genre: {}, ({})".format(max_corr_genre, max_correlation)
|
analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: My Hatchet Dev Kernel
# language: python
# name: hatchet-dev-kernel
# ---
import hatchet as ht
# # Basic Mock Example
#
# This basic example uses a mock GraphFrame taken from the testing directory in the Hatchet repo.
# ## Generate and Visualize Mock GraphFrame
# Copied from hatchet/hatchet/tests/conftest.py
def mock_graph_literal():
graph_dict = [
{
"name": "foo",
"metrics": {"time (inc)": 130.0, "time": 0.0},
"children": [
{
"name": "bar",
"metrics": {"time (inc)": 20.0, "time": 5.0},
"children": [
{"name": "baz", "metrics": {"time (inc)": 5.0, "time": 5.0}},
{
"name": "grault",
"metrics": {"time (inc)": 10.0, "time": 10.0},
},
],
},
{
"name": "qux",
"metrics": {"time (inc)": 60.0, "time": 0.0},
"children": [
{
"name": "quux",
"metrics": {"time (inc)": 60.0, "time": 5.0},
"children": [
{
"name": "corge",
"metrics": {"time (inc)": 55.0, "time": 10.0},
"children": [
{
"name": "bar",
"metrics": {
"time (inc)": 20.0,
"time": 5.0,
},
"children": [
{
"name": "baz",
"metrics": {
"time (inc)": 5.0,
"time": 5.0,
},
},
{
"name": "grault",
"metrics": {
"time (inc)": 10.0,
"time": 10.0,
},
},
],
},
{
"name": "grault",
"metrics": {
"time (inc)": 10.0,
"time": 10.0,
},
},
{
"name": "garply",
"metrics": {
"time (inc)": 15.0,
"time": 15.0,
},
},
],
}
],
}
],
},
{
"name": "waldo",
"metrics": {"time (inc)": 50.0, "time": 0.0},
"children": [
{
"name": "fred",
"metrics": {"time (inc)": 35.0, "time": 5.0},
"children": [
{
"name": "plugh",
"metrics": {"time (inc)": 5.0, "time": 5.0},
},
{
"name": "xyzzy",
"metrics": {"time (inc)": 25.0, "time": 5.0},
"children": [
{
"name": "thud",
"metrics": {
"time (inc)": 25.0,
"time": 5.0,
},
"children": [
{
"name": "baz",
"metrics": {
"time (inc)": 5.0,
"time": 5.0,
},
},
{
"name": "garply",
"metrics": {
"time (inc)": 15.0,
"time": 15.0,
},
},
],
}
],
},
],
},
{
"name": "garply",
"metrics": {"time (inc)": 15.0, "time": 15.0},
},
],
},
],
},
{
"name": "waldo",
"metrics": {"time (inc)": 30.0, "time": 10.0},
"children": [
{
"name": "bar",
"metrics": {"time (inc)": 20.0, "time": 5.0},
"children": [
{"name": "baz", "metrics": {"time (inc)": 5.0, "time": 5.0}},
{
"name": "grault",
"metrics": {"time (inc)": 10.0, "time": 10.0},
},
],
}
],
},
]
return graph_dict
gf = ht.GraphFrame.from_literal(mock_graph_literal())
print(gf.tree(color=True, metric="time (inc)"))
gf.dataframe
# ## Query 1
# This query matches the following:
# 1. A single node with name "qux"
# 2. 0 or more nodes with inclusive time greater than 10
# 3. A single node with name starting with "gr" and inclusive time less than or equal to 10
query = [
{"name": "qux"},
("*", {"time (inc)": "> 10"}),
{"name": "gr[a-z]+", "time (inc)": "<= 10"}
]
sgf = gf.filter(query, squash=True)
print(sgf.tree(color=True, metric="time (inc)"))
sgf.dataframe
# ## Query 2
# This query matches the following:
# 1. A single node with name "bar"
# 2. 0 or more nodes with inclusive time greater than 10
# 3. A single node with name starting with "gr" and inclusive time less than or equal to 10
query = [
{"name": "bar"},
("*", {"time (inc)": "> 50"}),
{"name": "gr[a-z]+", "time (inc)": "<= 10"}
]
sgf = gf.filter(query, squash=True)
print(sgf.tree(color=True, metric="time (inc)"))
sgf.dataframe
# ## Query 3
#
# This query matches the following:
# 1. A single node with name "waldo"
# 2. 1 or more of any node
# 3. A single node with an inclusive time >= 20
# 4. 1 or more of any node
# 5. A single node with an exclusive and inclusive time equal to 5
query = [
{"name": "waldo"},
"+",
{"time (inc)": ">= 20.0"},
"+",
{"time (inc)": 5.0, "time": 5.0}
]
sgf = gf.filter(query, squash=True)
print(sgf.tree(color=True, metric="time (inc)"))
sgf.dataframe
# ## Query 4
#
# This query matches the following:
# 1. A single node with name "waldo"
# 2. 1 or more of any node
# 3. A single node with an inclusive time >= 20
# 4. 1 or more of any node
# 5. A single node with an exclusive and inclusive time equal to 7.5
#
# This query does not match any node. It should raise an `EmptyFilter` exception.
query = [
{"name": "waldo"},
"+",
{"time (inc)": ">= 20.0"},
"+",
{"time (inc)": 7.5, "time": 7.5}
]
sgf = gf.filter(query, squash=True)
print(sgf.tree(color=True, metric="time (inc)"))
sgf.dataframe
# # "Real-Life" Example
#
# This example uses the `asde/hatchet-sc19-datasets/kripke-mpi/mvapich2.3/hpctoolkit-kripke-database-2589460` database from the SC19 paper.
gf = ht.GraphFrame.from_hpctoolkit("../hatchet-sc19-datasets/kripke-mpi/mvapich2.3/hpctoolkit-kripke-database-2589460")
print(gf.tree(color=True, metric="time (inc)"))
gf.dataframe
gf.drop_index_levels()
gf.dataframe
isinstance(gf.dataframe.iloc[0]["name"], str)
gf.dataframe[gf.dataframe["name"].str.startswith("MPI")]
# ## Query 1
query = [
"*",
{"name": "MPI.*"},
"*"
]
sgf = gf.filter(query, squash=True)
print(sgf.tree(color=True, metric="time (inc)"))
sgf.dataframe
|
docs/examples/tutorial/05_hatchet_query_examples.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NYC RESTAURANT HEALTH INSPECTION VIOLATIONS ANALYSIS
# 
# ## What does an “A” rating actually mean?
#
# ### Created by <NAME> & <NAME>
#
# New York City is famous for its nearly endless restaurant options. At many of our favorite restaurants, we do not think about the cleanliness of the preparation of the food once we see that the restaurant has passed its health inspection. But what does an “A” rating actually mean? We came across a dataset that showed that many of the places one would assume to be safe from unclean conditions given their health inspection grade, were actually not as clean as we would have thought. Here we will be presenting and testing our hypotheses about restaurants’ health inspection data from the five boroughs. We will be analyzing to see if trends exist between health inspection grades, violation scores, critical violations, and boroughs.
#
# ### Where did we get the data?
# NYC Open Data is a website that aggregates public data generated by various New York City agencies and organizations. We were able to retrieve a dataset with information about the results of restaurants inspections compiled by the New York City Department of Health & Mental Hygiene ("DOHMH"). The dataset contains 437,119 rows and 18 columns. Columns include information such as the name of restaurants, the boroughs, cuisines served, violation grades, descriptions of violations, and other relevant information.
#
# ### Our hypotheses:
#
# #### "A" rated restaurants will have little to no "Critical" violations
# The data contains a column that flags violations as Critical, Not Critical, and Not Applicable. Not Applicable flags are only given to those restaurants that did not have a violation during their inspection. Our hypothesis is that as the Health Inspection Grade falls (from A to B), the number of restaurants with critical violations will increase. We also hypothesize that 0% to 10% of restaurants with an A rating will have critical violations.
#
# #### We expect Manhattan to be cleaner than the rest of the boroughs
#
# The data provides us with the location of each restaurant, including borough information. We can use this location data to see whether the restaurants around NYU are cleanlier than the average for other boroughs. We expect Manhattan to be cleaner than the average zip code in New York City. We hope to see which borough has the cleanest food preparation.
#
# #### We expect that as violation scores go up, restaurant grades go down at distinct score levels
#
# The data provides us with a "score" column. On the DOHMH website, they tell us that "each violation of a regulation gets a certain number of points. At the end of the inspection, the inspector totals the points, and this number is the restaurant's inspection score—the lower the score, the better the Grade." We decided to plot the series as a whole to see how many violations a typical restaurant in NYC gets. We expect to see clear cut off scores that go with the grade received.
#
# +
import sys # system module
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics module
import datetime as dt # date and time module
import numpy as np # foundation for Pandas
import seaborn as sns # graphics
# %matplotlib inline
# check versions
print('Python version:', sys.version)
print('Pandas version: ', pd.__version__)
print('Today: ', dt.date.today())
# -
# %cd /Users/Hubert/Downloads
project = pd.read_csv('DOHMH_New_York_City_Restaurant_Inspection_Results.csv')
project = project[['DBA','BORO','ZIPCODE','CUISINE DESCRIPTION','CRITICAL FLAG','SCORE','GRADE']]
project = project.dropna()
project = project[project.GRADE != 'Z']
project = project[project.GRADE != 'P']
project = project[project.GRADE != 'Not Yet Graded']
project = project[project.BORO != 'Missing']
project = project[project.BORO != 'STATEN ISLAND']
project = project.reset_index()
# ## Here is what the Data looks like:
#
# We cleaned the data to include only the relevant information. The variables we decided to include are: name of the restaurant, Borough of the restaurant, zipcode of the restaurant, cuisine served, critical flag information, violation score, and grade received. We also cleaned the data to only show grades A, B, and C, eliminating pending and non-graded restaurants. We also decided to exclude Staten Island.
project.head()
# ## Our first plot of the data
#
# ## Boroughs and their grade distributions
#
# We plotted the grade information and separated the grades by borough. There are more restaurants in Manhattan than other burroughs, and therefore Manhattan has more A rated restaurants. This plot simply shows the amount of restaurants in each borough with each grade. We will analyze the percentage of A rated restaurants in each borough later. Manhattan has the most restaurants. Brooklyn and Queens have about the same number of restaurants. Bronx has the lowest amount of restaurants.
sns.countplot(y='GRADE',data=project,hue='BORO',palette='Greens_d')
plt.style.use("fivethirtyeight")
criticalgraph = project[["CRITICAL FLAG", "GRADE"]]
critnona = criticalgraph.dropna()
# ## Grade distribution is roughly normal
#
# The grades seem to follow a normal distribution, meaning that it roughly fits a bell curve. Our data indicated that 79% of restaurants receive an A rating, 17% receive a B rating, and 4% receive a C rating. Our initial analysis suggests that there does not seem to be a correlation between grade scores and borough, even though we hypothesized that the Bronx would have lower grades than Manhattan.
critnona["GRADE"].value_counts().plot(kind="pie", title = "All Boros")
per = critnona["GRADE"].value_counts()
per["per"] = critnona["GRADE"].value_counts()/(154922+32740+8261)
per
critnona["combined"] = critnona["CRITICAL FLAG"] + critnona["GRADE"]
criticalandgrade = critnona["combined"].value_counts()
dfcritgradeabc = criticalandgrade.iloc[[1,2,4]]
dfcritgradeabc = dfcritgradeabc.reset_index()
gradeseries = critnona["GRADE"].value_counts()
gradeseriesABC = gradeseries.head(3)
gradeseriesABC = gradeseriesABC.reset_index()
gradeseriesABC = gradeseriesABC.rename(columns={'index': 'Grade', 'GRADE': 'Number'})
dfcrit = pd.concat([dfcritgradeabc,gradeseriesABC], axis=1)
dfcrit["Percentage With Critical Violation"] = dfcrit["combined"]/dfcrit["Number"]
dfcrit = dfcrit.set_index("index")
dfcrit["Percentage With Critical Violation"]=dfcrit["Percentage With Critical Violation"] * 100
dfcrit["Percentage With Critical Violation"]
# ## Almost 50% of A rated restaurants have one or more "critical" violations
#
# The figure below shows that 48.3% of A rated restaurants have one or more "critical" violations, while B and C rated restaurants have 68.3% and 67.5% chance of having one or more critical violations, respectively.
# +
fig, ax = plt.subplots()
xlabels=['A','B','C']
dfcrit["Percentage With Critical Violation"].plot(ax=ax,kind="bar", title = "Critical Violations per Grade (in %)")
ax.set_xticklabels(xlabels)
ax.set_xlabel("")
ax.set_ylabel("Percentage Points")
#bar chart with the percentage of Critical Violation given grade.
#~50% of A rated restaurants have critical violations
# -
borograph = project[["BORO", "GRADE"]]
borograph = borograph.dropna()
borograph["Boro Grades"] = borograph["BORO"] + borograph["GRADE"]
combinedboro = borograph["Boro Grades"].value_counts()
manhattangrades = combinedboro.iloc[[0,4,7]]
brooklyngrades = combinedboro.iloc[[1,5,10]]
queensgrades = combinedboro.iloc[[2,6,9]]
bronxgrades = combinedboro.iloc[[3,8,11]]
# ## All boroughs have roughly the same grade distribution
#
# All the boroughs are rated the same as one another. If DOHMH applies the same set of standards across all boroughs, we must reject our initial hypothesis that some boroughs are cleaner than others.
fig, ax = plt.subplots(2,2, figsize=(14,12))
queensgrades.plot(ax=ax[1,0],kind='pie',title='Queens Grades', labels = ["A","B","C"])
brooklyngrades.plot(ax=ax[0,1],kind="pie", title="Brooklyn Grades",labels = ["A","B","C"])
manhattangrades.plot(ax=ax[0,0],kind="pie",title="Manhattan Grades",labels = ["A","B","C"])
bronxgrades.plot(ax=ax[1,1],kind="pie",title="Bronx Grades",labels = ["A","B","C"])
ax[0,0].set_ylabel("")
ax[1,0].set_ylabel("")
ax[0,1].set_ylabel("")
ax[1,1].set_ylabel("")
# +
#project['SCOREvalue'] = if project['GRADE'] == 'A':
# -
project['SCORE'] = project['SCORE'].astype(int)
project['SCORE'].dtype
# ## Almost all restaurants receive less than 25 points in score
#
# The KDE plot below shows us that most restaurants have few health inspection violations.
# +
fig, ax = plt.subplots()
sns.kdeplot(project["SCORE"], shade=True, ax=ax)
ax.legend()
fig.suptitle("Health Inspection Violations Density")
plt.show()
# -
scoregraph = project[["SCORE", "GRADE"]]
scoregraph = scoregraph.dropna()
scoregraph = scoregraph[scoregraph.GRADE != 'Z']
scoregraph = scoregraph[scoregraph.GRADE != 'P']
scoregraph = scoregraph[scoregraph.GRADE != 'Not Yet Graded']
scoregraph = scoregraph.reset_index()
ListOfGrades = []
# ### Data Manipulation:
#
# We are manipulating the data so that we can use functions that typically take numbers instead of qualitative data. We decided to convert the A grade into the integer 1, the grade into the integer 2, and the C grade into the integer 3.
#
# `{'A':1, 'B':2, 'C':3}`
for item in scoregraph["GRADE"]:
if item == "A":
ListOfGrades.append(1)
if item == "B":
ListOfGrades.append(2)
if item == "C":
ListOfGrades.append(3)
if item == "NaN":
ListOfGrades.append(5)
scoregraph["GradeIndex"] = pd.Series(ListOfGrades)
# ## The violin plot tells us that most C rated restaurants have more than 25 points
#
fig, ax = plt.subplots(figsize=(14,12))
sns.violinplot(x="GradeIndex", y="SCORE", data=scoregraph, linewidth = 2)
ax.set_xticklabels(xlabels)
ax.set_ylim(-10,140)
ax.set_xlabel("")
ax.set_ylabel("Health Inspection Violations Given Grade")
#df.corr will give a correlation.
# ## Conclusions
#
# #### • 79% of restaurants have an A rating, 17% of restaurants have a B rating, and 4% of restaurants have a C rating.
#
# #### • Restaurant grade distributions remain constant for all four boroughs analyzed.
#
# #### • 48% of A rated restaurants have one of more critical violation.
#
# #### • Most C rated restaurants have more than 25 points in score, while most A rated restaurants have less than 15 points in score.
#
# We should not blindly trust regulators about how safe our restaurants actually are. One of the biggest shocking revelations was that so many A rated restaurants have critical flag, since critical flags are given for obscene violations, like the presence of rodent feces. In conclusion, an A rating does not mean much, and those who actually care about a restaurants food safety should research its violation record specifically. We should learn to challenge the information presented around us, as the reality is in the data.
|
UG_F16/Lin-Sarinoglu-NYC Inspection Analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py36
# language: python
# name: py36
# ---
# ## Gameplan
# * mark out all of the coordinates that each wire has traveled in a set
# * take the intersection of the sets
# * calculate the manhatten distance from each coordinate to the origin.
# +
def parse(puzzle_input):
wires = puzzle_input.strip().split('\n')
wire_corners = []
for wire in wires:
wire_corners.append([(x[0], int(x[1:])) for x in wire.split(',')])
return wire_corners
def get_points(A):
visited = set()
step_counter = {}
DX = {'D': 0, 'U': 0, 'R': 1, 'L': -1}
DY = {'D': -1, 'U': 1, 'R': 0, 'L': 0}
x, y, step_count = 0, 0, 0
for direction, distance in A:
for _ in range(distance):
step_count += 1
x += DX[direction]
y += DY[direction]
visited.add((x, y))
step_counter[(x, y)] = step_count
return visited, step_counter
def intersections(A, B):
return get_points(A)[0].intersection(get_points(B)[0])
def min_dist_intersection(A, B):
return min([abs(x) + abs(y) for x, y in intersections(A, B)])
# +
TEST = '''R75,D30,R83,U83,L12,D49,R71,U7,L72
U62,R66,U55,R34,D71,R55,D58,R83'''
A, B = parse(TEST)
assert min_dist_intersection(A, B) == 159
# +
with open('data/day03_input.txt') as f:
A, B = parse(f.read())
min_dist_intersection(A, B)
# -
def min_step_intersection(A, B):
return sorted([(get_points(A)[1][coords] + get_points(B)[1][coords], coords )for coords in intersections(A, B)], key=lambda x: x[0])[0][0]
# +
TEST2 = '''R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51
U98,R91,D20,R16,D67,R40,U7,R15,U6,R7'''
assert answers(*parse(TEST2)) == 410
# -
min_step_intersection(A, B)
|
day03.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cw-tiler]
# language: python
# name: conda-env-cw-tiler-py
# ---
# ## This notebook demonstrates the abiliyt to create a SpaceNET CSV from GeoJSON's as ground truth
from spacenetutilities.labeltools import coreLabelTools as lT
import os
import glob
import argparse
# +
## Src Directory for a Group of Tiff Files
srcDirectory = '/raid/nfs/data/Datasets/CosmiQ_General_Study/AOI_6_Atlanta/processedDatav3/SpaceNet-Off-Nadir_Train/Atlanta_nadir10_catid_1030010003993E00/'
## GeoJSON Directory
geojsonDirectory = "/raid/nfs/data/Datasets/CosmiQ_General_Study/AOI_6_Atlanta/processedDatav3/SpaceNet-Off-Nadir_Train/geojson/spacenet-buildings/"
# +
## Get List of all Pan-Sharpen Tiffs in rasterList (Ensure Tiff is at resolutions required)
rasterList = glob.glob(os.path.join(srcDirectory, '**', 'Pan-Sharpen*.tif'), recursive=True)
geoJsonList = glob.glob(os.path.join(geojsonDirectory, '**', '*.geojson'), recursive=True)
# Use this flag to have the CSV be a prediction CSV with a Confidence Value Inserted for Creating Ground Truth Set to False
createProposalFile=False
chipSummaryList = []
# Set Pixel Precision (Round(pixelPrecision)) This says that each pixel coordinate should only have 3 decimals
pixelPrecision = 3
# Specify Competition type
compType = "buildings"
## Specify Output File
outputCSVFileName = "/raid/nfs/workingDir/cw-tiler/AOI_6_Atlanta_Train_v6.csv"
## Specify whether to understand occluded_flag, SpaceNet 4 added the occluded flag for Building footprints
occluded_flag=True
## Create Chip List, This look matches a GeoTiff, with a GeoJSON and then specifies the imageId for the CSV
for rasterName in rasterList:
rasterBase = os.path.basename(rasterName)
baseSplit = rasterBase.rsplit('_', 3)
imageId = rasterBase.split('_',1)[1]
geoVectorName = os.path.join(geojsonDirectory,
"spacenet-buildings_{}_{}".format(baseSplit[2], baseSplit[3]).replace(".tif", ".geojson")
)
chipSummary = {'chipName': rasterName,
'geoVectorName': geoVectorName,
'imageId': imageId.replace(".tif", "")}
chipSummaryList.append(chipSummary)
## Call Final Command for
lT.createCSVSummaryFile(chipSummaryList, outputCSVFileName,
replaceImageID="",
createProposalsFile=createProposalFile,
pixPrecision=pixelPrecision,
competitionType=compType,
occluded_flag=occluded_flag)
|
Build_Ground_TruthCSV.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Nolanole/DS-Unit-2-Applied-Modeling/blob/master/module3/LS_DS_233_solution.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="nCc3XZEyG3XV"
# Lambda School Data Science
#
# *Unit 2, Sprint 3, Module 3*
#
# ---
#
#
# # Permutation & Boosting
#
# You will use your portfolio project dataset for all assignments this sprint.
#
# ## Assignment
#
# Complete these tasks for your project, and document your work.
#
# - [ ] If you haven't completed assignment #1, please do so first.
# - [ ] Continue to clean and explore your data. Make exploratory visualizations.
# - [ ] Fit a model. Does it beat your baseline?
# - [ ] Try xgboost.
# - [ ] Get your model's permutation importances.
#
# You should try to complete an initial model today, because the rest of the week, we're making model interpretation visualizations.
#
# But, if you aren't ready to try xgboost and permutation importances with your dataset today, that's okay. You can practice with another dataset instead. You may choose any dataset you've worked with previously.
#
# The data subdirectory includes the Titanic dataset for classification and the NYC apartments dataset for regression. You may want to choose one of these datasets, because example solutions will be available for each.
#
#
# ## Reading
#
# Top recommendations in _**bold italic:**_
#
# #### Permutation Importances
# - _**[Kaggle / <NAME>: Machine Learning Explainability](https://www.kaggle.com/dansbecker/permutation-importance)**_
# - [<NAME>: Interpretable Machine Learning](https://christophm.github.io/interpretable-ml-book/feature-importance.html)
#
# #### (Default) Feature Importances
# - [<NAME>: Selecting good features, Part 3, Random Forests](https://blog.datadive.net/selecting-good-features-part-iii-random-forests/)
# - [<NAME>, et al: Beware Default Random Forest Importances](https://explained.ai/rf-importance/index.html)
#
# #### Gradient Boosting
# - [A Gentle Introduction to the Gradient Boosting Algorithm for Machine Learning](https://machinelearningmastery.com/gentle-introduction-gradient-boosting-algorithm-machine-learning/)
# - _**[A Kaggle Master Explains Gradient Boosting](http://blog.kaggle.com/2017/01/23/a-kaggle-master-explains-gradient-boosting/)**_
# - [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/ISLR%20Seventh%20Printing.pdf) Chapter 8
# - [Gradient Boosting Explained](http://arogozhnikov.github.io/2016/06/24/gradient_boosting_explained.html)
# - _**[Boosting](https://www.youtube.com/watch?v=GM3CDQfQ4sw) (2.5 minute video)**_
# + id="g-PL0pHg6ybp" colab_type="code" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# !pip install category_encoders==2.*
# !pip install eli5
# If you're working locally:
else:
DATA_PATH = '../data/'
# + [markdown] id="eNo22BM37BCN" colab_type="text"
# # Titanic (Classification)
# + id="SdP5CM2e6tg-" colab_type="code" colab={}
import pandas as pd
from sklearn.model_selection import train_test_split
train, val = train_test_split(pd.read_csv(DATA_PATH+'titanic/train.csv'), random_state=42)
# + id="_bs09iwa780F" colab_type="code" colab={}
features = ['Sex', 'Pclass', 'Embarked', 'SibSp', 'Parch', 'Age']
target = 'Survived'
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
# + id="vKe0yCYD7sYd" colab_type="code" outputId="26fe06cd-dc1f-4680-f751-8b56ab3ea998" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from xgboost import XGBClassifier
transformers = make_pipeline(ce.OneHotEncoder(use_cat_names=True), SimpleImputer())
X_train_transformed = transformers.fit_transform(X_train)
X_val_transformed = transformers.transform(X_val)
eval_set = [(X_train_transformed, y_train),
(X_val_transformed, y_val)]
model = XGBClassifier(n_estimators=1000, n_jobs=-1)
model.fit(X_train_transformed, y_train, eval_set=eval_set,
eval_metric='error', early_stopping_rounds=20)
# + id="h970qtXl-i14" colab_type="code" outputId="5d668408-aca2-4295-d264-ccd250c9bc93" colab={"base_uri": "https://localhost:8080/", "height": 225}
import eli5
from eli5.sklearn import PermutationImportance
permuter = PermutationImportance(model, scoring='accuracy', n_iter=3)
permuter.fit(X_val_transformed, y_val)
encoder = transformers.named_steps['onehotencoder']
feature_names = encoder.transform(X_val).columns.tolist()
eli5.show_weights(permuter, top=None, feature_names=feature_names)
# + [markdown] id="X4p1JrTe7PoX" colab_type="text"
# # NYC Apartments (Regression)
# + id="A0a01i2D7a2i" colab_type="code" colab={}
import numpy as np
import pandas as pd
# Read New York City apartment rental listing data
df = pd.read_csv(DATA_PATH+'apartments/renthop-nyc.csv')
assert df.shape == (49352, 34)
# Remove the most extreme 1% prices,
# the most extreme .1% latitudes, &
# the most extreme .1% longitudes
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
# + id="Vp8Y8j1KCAba" colab_type="code" colab={}
# Do train/val/test split
# Train on April 2016
# Validate on May 2016
# Test on June 2016
df['created'] = pd.to_datetime(df['created'], infer_datetime_format=True)
train = df[df.created.dt.month == 4]
val = df[df.created.dt.month == 5]
test = df[df.created.dt.month == 6]
# + id="AhIRPtb4CAsT" colab_type="code" colab={}
# Wrangle train, val, test sets in the same way
def engineer_features(df):
# Avoid SettingWithCopyWarning
df = df.copy()
# Does the apartment have a description?
df['description'] = df['description'].str.strip().fillna('')
df['has_description'] = df['description'] != ''
# How long is the description?
df['description_length'] = df['description'].str.len()
# How many total perks does each apartment have?
perk_cols = ['elevator', 'cats_allowed', 'hardwood_floors', 'dogs_allowed',
'doorman', 'dishwasher', 'no_fee', 'laundry_in_building',
'fitness_center', 'pre-war', 'laundry_in_unit', 'roof_deck',
'outdoor_space', 'dining_room', 'high_speed_internet', 'balcony',
'swimming_pool', 'new_construction', 'exclusive', 'terrace',
'loft', 'garden_patio', 'common_outdoor_space',
'wheelchair_access']
df['perk_count'] = df[perk_cols].sum(axis=1)
# Are cats or dogs allowed?
df['cats_or_dogs'] = (df['cats_allowed']==1) | (df['dogs_allowed']==1)
# Are cats and dogs allowed?
df['cats_and_dogs'] = (df['cats_allowed']==1) & (df['dogs_allowed']==1)
# Total number of rooms (beds + baths)
df['rooms'] = df['bedrooms'] + df['bathrooms']
# Extract number of days elapsed in year, and drop original date feature
df['days'] = (df['created'] - pd.to_datetime('2016-01-01')).dt.days
df = df.drop(columns='created')
return df
train = engineer_features(train)
val = engineer_features(val)
test = engineer_features(test)
# + id="9WqN9GGxBhlb" colab_type="code" colab={}
target = 'price'
features = train.columns.drop(target)
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
y_test = test[target]
# + id="5j2tyn_GC8rB" colab_type="code" colab={}
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='xgboost')
# + id="8H9WbCAiBU1p" colab_type="code" outputId="08d25174-c28b-48c2-9d3a-3f467031aa95" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from xgboost import XGBRegressor
transformers = make_pipeline(ce.OrdinalEncoder(), SimpleImputer())
X_train_transformed = transformers.fit_transform(X_train)
X_val_transformed = transformers.transform(X_val)
eval_set = [(X_train_transformed, y_train),
(X_val_transformed, y_val)]
model = XGBRegressor(
n_estimators=1000,
max_depth=10,
objective='reg:squarederror',
n_jobs=-1,
)
model.fit(X_train_transformed, y_train, eval_set=eval_set,
eval_metric='mae', early_stopping_rounds=20)
# + id="3mKAkfJSCgh0" colab_type="code" outputId="d260dde8-14b8-4312-aa2b-453141543bbe" colab={"base_uri": "https://localhost:8080/", "height": 708}
import eli5
from eli5.sklearn import PermutationImportance
permuter = PermutationImportance(model, scoring='neg_mean_absolute_error', n_iter=3)
permuter.fit(X_val_transformed, y_val)
feature_names = X_val.columns.tolist()
eli5.show_weights(permuter, top=None, feature_names=feature_names)
|
module3/LS_DS_233_solution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # OpenDataTools 支持城市空气质量数据
# +
# 导入aqi接口
from opendatatools import aqi
# 注:数据来源 - 环保部网站
# -
# 获取某日全国各大城市的AQI数据
df = aqi.get_daily_aqi('2018-01-01')
df.head(20)
# 获取单个城市的AQI历史数据
df = aqi.get_daily_aqi_onecity('北京市')
df.head(20)
# 获取单个城市某日的AQI小时数据
aqi_hour = aqi.get_hour_aqi_onecity('北京市', '2018-06-19')
aqi_hour.set_index('time', inplace=True)
aqi_hour
#获取实时AQI小时数据
aqi_hour = aqi.get_hour_aqi()
aqi_hour.head(20)
# +
from pyecharts import Line
df_aqi = aqi.get_daily_aqi_onecity('北京市')
df_aqi.set_index('date', inplace=True)
df_aqi.sort_index(ascending=True, inplace=True)
df_aqi = df_aqi[df_aqi.index >= "2018-01-01"]
axis_x = df_aqi.index
axis_y = df_aqi['aqi']
line = Line("北京AQI趋势图")
line.add("aqi curve for beijing", axis_x, axis_y, mark_point=["average"])
line
# -
line
# +
from pyecharts import Geo
df_aqi = aqi.get_daily_aqi('2018-06-19')
# some city cannot by process by echart
echart_unsupported_city = ["菏泽市", "襄阳市", "恩施州", "湘西州","阿坝州", "延边州",
"甘孜州", "凉山州", "黔西南州", "黔东南州", "黔南州", "普洱市", "楚雄州", "红河州",
"文山州", "西双版纳州", "大理州", "德宏州", "怒江州", "迪庆州", "昌都市", "山南市",
"林芝市", "临夏州", "甘南州", "海北州", "黄南州", "海南州", "果洛州", "玉树州", "海西州",
"昌吉州", "博州", "克州", "伊犁哈萨克州"]
data = []
for index, row in df_aqi.iterrows():
city = row['city']
aqi = row['aqi']
if city in echart_unsupported_city:
continue
data.append( (city, aqi) )
geo = Geo("全国主要城市空气质量(AQI) - 2018-06-19", "数据来源于环保部网站",
title_color="#fff",
title_pos="center", width=1000,
height=600, background_color='#404a59')
attr, value = geo.cast(data)
geo.add("", attr, value,
visual_range=[0, 200], maptype='china',visual_text_color="#fff",
symbol_size=10, is_visualmap=True,
label_formatter='{b}', # 指定 label 只显示城市名
tooltip_formatter='{c}', # 格式:经度、纬度、值
label_emphasis_textsize=15, # 指定标签选中高亮时字体大小
label_emphasis_pos='right' # 指定标签选中高亮时字体位置
)
geo
# -
|
example/ipynb/opendatatools-aqi.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project1 - Team 5, Studio 1
#
# <NAME>, <NAME>
# ### Question: What would the age demographics be in 15 years if the US implemented an one-child policy?
# +
# Configure Jupyter so figures appear in the notebook
# %matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
# %config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
# importing "copy" for copy operations
import copy
# -
# ### Initial Data Pulls
# #### Initial Population Data by Age Range and Gender (0-4, 5-9, etc)
pop_init_data = pd.read_csv('data/pop_2017_US.csv', index_col='age')
# Source: https://factfinder.census.gov/faces/tableservices/jsf/pages/productview.xhtml?src=bkmk
# #### Death Rate Data by Age Group and Gender (0, 1-4, 5-9, etc)
death_rate_data = pd.read_csv('data/age_death_rate.csv', index_col='age')
# Source: https://www.statista.com/statistics/241488/population-of-the-us-by-sex-and-age/
# #### Probability of Woman Ever Having a Child by Age Group (15-19, 20-24, etc)
child_ever_data = pd.read_csv('data/child_ever_born.csv', index_col='age')
# Source: Fertility of Women in the United States: 2012, <NAME> and <NAME>
# #### Probability of Woman Having First Child by Age
first_birth_data = pd.read_csv('data/first_birth_rate.csv', index_col='age')
# Source: https://www.cdc.gov/nchs/nvss/births.htm
# #### Adapting Initial Population Data into Age Groups
# +
male_pop = linspace(0,0,90)
female_pop = linspace(0,0,90)
ident = 4
for i in range(90):
if i>ident:
ident += 5
male_pop[i] = int(pop_init_data.male[ident]/5)
female_pop[i] = int(pop_init_data.female[ident]/5)
# -
female_pop[0] #test
# #### Setting Initial Population State by Gender
init_population = State(male = male_pop, female = female_pop)
# #### Setting Parameters
system = System(male_death = death_rate_data.male,
female_death = death_rate_data.female,
child_ever = child_ever_data.percentage,
first_rate = first_birth_data.percentage,
t_0 = 2018,
t_end = 2200,
init_pop = init_population)
# #### Splitting Initial Female Population --> Female w/ no children, Female w/ children
population = State(male = copy.deepcopy(system.init_pop.male), female = copy.deepcopy(system.init_pop.female), female_w = linspace(0,0,90))
# #### Assigning Population Value to 2 Female Groups Above
ident = 19
for i in range(15,49):
if i>ident:
ident += 5
population.female_w[i] = int(population.female[i]*system.child_ever[ident])
population.female[i] = int(population.female[i]*(1-system.child_ever[ident]))
def update_func_death(state,system):
ident = 4
state.male[0] = int(state.male[0] * (1-system.male_death[0]))
state.female[0] = int(state.female[0] * (1-system.female_death[0]))
for i in range(1,90):
if i>ident:
ident += 5
state.male[i] = int(state.male[i] * (1-system.male_death[ident]))
state.female[i] = int(state.female[i] * (1-system.female_death[ident]))
state.female_w[i] = int(state.female_w[i] * (1-system.female_death[ident]))
return state
def update_func_birth(state,system):
baby_total = 0
for i in range(15,50):
baby = int(state.female[i]*system.first_rate[i])
state.female[i] -= baby
state.female_w[i] += baby
baby_total += baby
return baby_total
def update_func_pop(baby,state,system):
for i in range(89):
k = 89-i
state.male[k] = state.male[k-1]
state.female[k] = state.female[k-1]
state.female_w[k] = state.female_w[k-1]
state.male[0] = int(baby/2)
state.female[0] = int(baby/2)
return state
def addall(state):
total = 0
for i in range(90):
total = total + state.male[i] + state.female[i] + state.female_w[i]
return total
def run_simulation(system, state, update_func_death, update_func_birth, update_func_pop, addall):
nstate = State(male = copy.deepcopy(state.male), female = copy.deepcopy(state.female), female_w = copy.deepcopy(state.female_w))
results = TimeSeries()
for t in linrange(system.t_0, system.t_end):
nstate = update_func_death(nstate,system)
baby = update_func_birth(nstate,system)
nstate = update_func_pop(baby,nstate,system)
totalpop = addall(nstate)
results[t+1] = totalpop
return results
addall(population)
results = run_simulation(system, population, update_func_death, update_func_birth, update_func_pop, addall)
plot(results, ':')
# Methods
# Parameters:
# 1. Initial Population by age and gender group.
# 2. Death rate by age and gender group.
# 3. First birth rate for women by age group.
# 4. Probability of child birth gender (assume 50/50?)
# 5. Percentage of women with child ever born
#
# State:
# 1. Updated population by age and gender group
# 2. Time
#
# Limitations:
#
# 1. Not accounting for twins
# 2. birth/death rate assuming constant for next 15 years
# 3. Assumption of gender birth ratio
# 4. No immigrants
# 5. Data not the most recent
#
# Results
# 1. Compare with US Gov Data for projected future population number
# 2. Plot general population vs time (line graph)
# 3. Plot general populations (with subsections of age groups) vs time (bar graph)
#
# To Do List
# 1. 3 Update Functions
# 2. Assume 50/50 gender birth ratio
# 3. Graph (to test, after each updatefunc?)
#
#
|
code/Project1_summit.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="0OfDqwNP23u_"
# #BART Rap Generation
# Tutorial Here https://towardsdatascience.com/teaching-bart-to-rap-fine-tuning-hugging-faces-bart-model-41749d38f3ef
# + id="B7EVTfMVdv4Z"
# !pip install -q pytorch-lightning
# !pip install -q transformers
# + id="3fDnjiDoeZbK"
import transformers
from torch.utils.data import DataLoader, TensorDataset, random_split, RandomSampler, Dataset
import pandas as pd
import numpy as np
import torch.nn.functional as F
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import ModelCheckpoint
import math
import random
import re
import argparse
# + colab={"base_uri": "https://localhost:8080/"} id="Goj88fbbQ3Wo" outputId="118e30fd-f89b-43d6-ba49-188db0af0bbe"
from google.colab import drive
drive.mount('/content/gdrive', force_remount=False)
root_dir = "/content/gdrive/My Drive/"
base_dir = root_dir + 'BART/'
# + [markdown] id="-YqJ68pEB-_g"
# #Use pytorch lightning for learning.https://pytorch-lightning.readthedocs.io/en/latest/
# + id="cPxWynRmeGlL"
class LitModel(pl.LightningModule):
# Instantiate the model
def __init__(self, learning_rate, tokenizer, model, hparams):
super().__init__()
self.tokenizer = tokenizer
self.model = model
self.learning_rate = learning_rate
self.hparams = hparams
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
if self.hparams.freeze_embeds:
self.freeze_embeds()
def freeze_embeds(self):
''' freeze the positional embedding parameters of the model; adapted from finetune.py '''
freeze_params(self.model.model.shared)
for d in [self.model.model.encoder, self.model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
# Do a forward pass through the model
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr = self.learning_rate)
return optimizer
def training_step(self, batch, batch_idx):
# Load the data into variables
src_ids, src_mask = batch[0], batch[1]
tgt_ids = batch[2]
# Shift the decoder tokens right (but NOT the tgt_ids)
decoder_input_ids = shift_tokens_right(tgt_ids, tokenizer.pad_token_id)
# Run the model and get the logits
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False)
lm_logits = outputs[0]
# Create the loss function
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=self.tokenizer.pad_token_id)
# Calculate the loss on the un-shifted tokens
loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))
return {'loss':loss}
def validation_step(self, batch, batch_idx):
src_ids, src_mask = batch[0], batch[1]
tgt_ids = batch[2]
decoder_input_ids = shift_tokens_right(tgt_ids, tokenizer.pad_token_id)
# Run the model and get the logits
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False)
lm_logits = outputs[0]
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=self.tokenizer.pad_token_id)
val_loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))
return {'loss': val_loss}
# Method that generates text using the BartForConditionalGeneration's generate() method
def generate_text(self, text, eval_beams, early_stopping = True, max_len = 40):
''' Function to generate text '''
generated_ids = self.model.generate(
text["input_ids"],
attention_mask=text["attention_mask"],
use_cache=True,
decoder_start_token_id = self.tokenizer.pad_token_id,
num_beams= eval_beams,
max_length = max_len,
early_stopping = early_stopping
)
return [self.tokenizer.decode(w, skip_special_tokens=True, clean_up_tokenization_spaces=True) for w in generated_ids]
def freeze_params(model):
''' Function that takes a model as input (or part of a model) and freezes the layers for faster training
adapted from finetune.py '''
for layer in model.parameters():
layer.requires_grade = False
# + id="3cUIEJBIjjNJ"
# Create a dataloading module as per the PyTorch Lightning Docs
class SummaryDataModule(pl.LightningDataModule):
def __init__(self, tokenizer, data_file, batch_size, num_examples = 20000):
super().__init__()
self.tokenizer = tokenizer
self.data_file = data_file
self.batch_size = batch_size
self.num_examples = num_examples
# Loads and splits the data into training, validation and test sets with a 60/20/20 split
def prepare_data(self):
self.data = pd.read_csv(self.data_file)[:self.num_examples]
self.train, self.validate, self.test = np.split(self.data.sample(frac=1), [int(.6*len(self.data)), int(.8*len(self.data))])
# encode the sentences using the tokenizer
def setup(self, stage):
self.train = encode_sentences(self.tokenizer, self.train['source'], self.train['target'])
self.validate = encode_sentences(self.tokenizer, self.validate['source'], self.validate['target'])
self.test = encode_sentences(self.tokenizer, self.test['source'], self.test['target'])
# Load the training, validation and test sets in Pytorch Dataset objects
def train_dataloader(self):
dataset = TensorDataset(self.train['input_ids'], self.train['attention_mask'], self.train['labels'])
train_data = DataLoader(dataset, sampler = RandomSampler(dataset), batch_size = self.batch_size)
return train_data
def val_dataloader(self):
dataset = TensorDataset(self.validate['input_ids'], self.validate['attention_mask'], self.validate['labels'])
val_data = DataLoader(dataset, batch_size = self.batch_size)
return val_data
def test_dataloader(self):
dataset = TensorDataset(self.test['input_ids'], self.test['attention_mask'], self.test['labels'])
test_data = DataLoader(dataset, batch_size = self.batch_size)
return test_data
# + id="ibsTjaBjZ-OE"
hparams = argparse.Namespace()
hparams.freeze_encoder = True
hparams.freeze_embeds = True
hparams.eval_beams = 4
# + id="no6DwOqaE9Jw"
def shift_tokens_right(input_ids, pad_token_id):
""" Shift input ids one token to the right, and wrap the last non pad token (usually <eos>).
This is taken directly from modeling_bart.py
"""
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_output_tokens
def encode_sentences(tokenizer, source_sentences, target_sentences, max_length=32, pad_to_max_length=True, return_tensors="pt"):
''' Function that tokenizes a sentence
Args: tokenizer - the BART tokenizer; source and target sentences are the source and target sentences
Returns: Dictionary with keys: input_ids, attention_mask, target_ids
'''
input_ids = []
attention_masks = []
target_ids = []
tokenized_sentences = {}
for sentence in source_sentences:
encoded_dict = tokenizer(
sentence,
max_length=max_length,
padding="max_length" if pad_to_max_length else None,
truncation=True,
return_tensors=return_tensors,
add_prefix_space = True
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
input_ids = torch.cat(input_ids, dim = 0)
attention_masks = torch.cat(attention_masks, dim = 0)
for sentence in target_sentences:
encoded_dict = tokenizer(
sentence,
max_length=max_length,
padding="max_length" if pad_to_max_length else None,
truncation=True,
return_tensors=return_tensors,
add_prefix_space = True
)
# Shift the target ids to the right
# shifted_target_ids = shift_tokens_right(encoded_dict['input_ids'], tokenizer.pad_token_id)
target_ids.append(encoded_dict['input_ids'])
target_ids = torch.cat(target_ids, dim = 0)
batch = {
"input_ids": input_ids,
"attention_mask": attention_masks,
"labels": target_ids,
}
return batch
def noise_sentence(sentence_, percent_words, replacement_token = "<mask>"):
'''
Function that noises a sentence by adding <mask> tokens
Args: sentence - the sentence to noise
percent_words - the percent of words to replace with <mask> tokens; the number is rounded up using math.ceil
Returns a noised sentence
'''
# Create a list item and copy
sentence_ = sentence_.split(' ')
sentence = sentence_.copy()
num_words = math.ceil(len(sentence) * percent_words)
# Create an array of tokens to sample from; don't include the last word as an option because in the case of lyrics
# that word is often a rhyming word and plays an important role in song construction
sample_tokens = set(np.arange(0, np.maximum(1, len(sentence)-1)))
words_to_noise = random.sample(sample_tokens, num_words)
# Swap out words, but not full stops
for pos in words_to_noise:
if sentence[pos] != '.':
sentence[pos] = replacement_token
# Remove redundant spaces
sentence = re.sub(r' {2,5}', ' ', ' '.join(sentence))
# Combine concurrent <mask> tokens into a single token; this just does two rounds of this; more could be done
sentence = re.sub(r'<mask> <mask>', "<mask>", sentence)
sentence = re.sub(r'<mask> <mask>', "<mask>", sentence)
return sentence
# + [markdown] id="510B_rmRFAE8"
# #Load BART
# + id="sAKO4lHfiJMP"
# Load the model
from transformers import BartTokenizer, BartForConditionalGeneration, AdamW, BartConfig
tokenizer = BartTokenizer.from_pretrained('facebook/bart-base', add_prefix_space=True)
bart_model = BartForConditionalGeneration.from_pretrained(
"facebook/bart-base")
# + id="_h8QhLcyh9RJ"
# Load the data into the model for training
summary_data = SummaryDataModule(tokenizer, '/content/gdrive/My Drive/BART learns to rap/lyrics_simple_noised.csv',
batch_size = 16, num_examples = 140000)
# Load the model from a pre-saved checkpoint; alternatively use the code below to start training from scratch
# model = LitModel.load_from_checkpoint(base_dir + "checkpoint_files_2/8_ep_140k_simple_0210.ckpt",
# learning_rate = 2e-5, tokenizer = tokenizer, model = bart_model, hparams = hparams)
model = LitModel(learning_rate = 2e-5, tokenizer = tokenizer, model = bart_model, hparams = hparams)
# + [markdown] id="2xcEqNMdGa6i"
# #Training
# + colab={"base_uri": "https://localhost:8080/"} id="qAj9wgyRXbRG" outputId="011f13e4-8fc0-4774-98ee-1840f37e3615"
checkpoint = ModelCheckpoint(filepath=base_dir + 'checkpoint_files_2/')
trainer = pl.Trainer(gpus = 1,
max_epochs = 1,
min_epochs = 1,
auto_lr_find = False,
checkpoint_callback = checkpoint,
progress_bar_refresh_rate = 500)
# + colab={"base_uri": "https://localhost:8080/", "height": 182, "referenced_widgets": ["cb022c7476c54739a9cf1495d9104f78", "e4d60959a2aa4534933655a884fab7b7", "0a3dd8a3fd7a45368fb510f42f707848", "8d0ab078cd34499792764b7d1d57a3d6", "a40a914ea6594ea69ced9a9fdf95223f", "5e4fd82825a34b10a0761ab7bc262e58", "<KEY>", "<KEY>", "b2aad43e1cdf47a09d4ad27c8db23d4c", "3dc9ca0f775d4179a6c86a8928a83a41", "<KEY>", "e37872d765c0484ba2b06acad3fdaeb8", "ca55e49365e74f2dae286220cec83ff0", "77c29fed41f443278d0b7a759e8e536f", "<KEY>", "d62c66372c58441bb1e6f0a8f16573c8", "<KEY>", "637e0cc2c5aa435cad665317f4df065e", "8f4e5f0f9cb340408e1ec4c7b5657203", "<KEY>", "<KEY>", "fd3e6ee5734e46979874a7daa6dc0ddf", "8caa7c157fca44628659b74b7a9dcac4", "671ac164f26c42e2b52ddc055ee6c01a"]} id="iWccEjPWwhHW" outputId="f4e775a6-af3a-448b-b36b-92660b36ad14"
# Fit the instantiated model to the data
trainer.fit(model, summary_data)
# + [markdown] id="yTgrdG4eHMKV"
# #Rap Generation
# + id="xj6WYHbFO_9V"
def generate_lyrics(seed_line, num_lines, model_, noise_percent = 0.25, multiple_lines = False, max_line_history = 3):
''' Function that generates lyrics based on previously generated lyrics
Args: seed_line - a line to start off the machine
num_lines - the number of lines to generate
model_ - the model used to generate the text
multiple_lines - whether the model generates based on multiple previous lines or just the past line
max_line_history - the maximum number of previous lines used in the current input
Returns a list with num_lines of rap lines
'''
# Put the model on eval mode
model_.to(torch.device('cpu'))
model_.eval()
lyrics = []
lyrics.append(seed_line)
prompt_line_tokens = tokenizer(noise_sentence(seed_line, 0.2), max_length = 32, return_tensors = "pt", truncation = True)
# Loop through the number of lines generating a new line based on the old
line = [seed_line]
for i in range(num_lines):
# Print out the new line
print(line[0].strip())
lyrics.append(line[0])
line = model.generate_text(prompt_line_tokens, eval_beams = 4)
# This deals with an artefact in the training data that I had an issue cleaning
if line[0].find(":") != -1:
line[0] = re.sub(r'[A-Z]+: ', '', line[0])
# This allows the model to generate a new line conditioned on more than one line
if multiple_lines:
start_line = np.maximum(0, i - max_line_history)
end_line = i
prompt_line = ' '.join(lyrics[start_line:end_line]) # Going to end_line is fine because it is non-inclusive
else:
prompt_line = lyrics[i]
prompt_line_tokens = tokenizer(noise_sentence(prompt_line, noise_percent), max_length = 32, return_tensors = "pt", truncation = True)
return lyrics
# + colab={"base_uri": "https://localhost:8080/"} id="OYC2VmEKJr0W" outputId="7a46c4d1-f3a8-4baa-f380-d09315e3b74a"
new_song = generate_lyrics(seed_line = "You and me forever cruising city lights", num_lines = 4, model_ = model,
noise_percent = 0.25, multiple_lines = False, max_line_history = 1)
|
BART.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demo: Single Curvelet
import numpy as np
import matplotlib.pyplot as plt
from curvelops import FDCT2D
# ### Setup
m = 512
n = 512
x = np.zeros((m * n))
DCT = FDCT2D((m, n))
# ### Curvelet Domain
# +
y = DCT * x
# Convert to a curvelet struct indexed by
# [scale, wedge (angle), x, y]
y_reshape = DCT.struct(y)
# -
# Select single curvelet
s = 4
w = 0
a, b = y_reshape[s][w].shape
y_reshape[s][w][a//2, b//2] = 1
y = DCT.vect(y_reshape)
# Perform adjoint transform and reshape
x = DCT.H * y
x = x.reshape(m, n)
# F-K domain
x_fk = np.fft.fft2(x)
x_fk = np.fft.fftshift(x_fk)
# Visualize
vmin, vmax = 0.8 * np.array([-1,1]) * np.abs(np.max(x))
fig, ax = plt.subplots(2,2, figsize=(8,8))
ax[0, 0].imshow(np.real(x.T), cmap='gray', vmin=vmin, vmax=vmax)
ax[0, 1].imshow(np.imag(x.T), cmap='gray', vmin=vmin, vmax=vmax)
ax[1, 0].imshow(np.abs(x_fk.T), cmap='gray', vmin=0)
ax[1, 1].imshow(np.angle(x_fk.T, deg=True), cmap='gray', vmin=-180, vmax=180)
ax[0, 0].set(xlim=(m//2 - 50, m//2 + 50), ylim=(n//2 - 50, n//2 + 50), title='Space domain (Real) magnified')
ax[0, 1].set(xlim=(m//2 - 50, m//2 + 50), ylim=(n//2 - 50, n//2 + 50), title='Space domain (Imag) magnified')
ax[1, 0].set(title='Frequency domain (Abs)')
ax[1, 1].set(title='Frequency domain (Phase)')
fig.tight_layout()
|
examples/Demo_Single_Curvelet.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.0 64-bit ('.venv')
# metadata:
# interpreter:
# hash: d870b2e3edabf66c5b7e5d287c1252743d8d17b4ec9c8ad0dd3b83b06dc1fe69
# name: python3
# ---
import numpy as np
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import pdist, squareform
import pandas as pd
import pyod
df = pd.read_csv("data/bcw_data.csv")
df.columns
df.isna().sum()
df["diagnosis"].value_counts()
# B = 0
# M = 1
df.dtypes
df.drop(columns=["Unnamed: 32"], inplace=True)
def encode_target(x):
if x == "M":
return 1
return 0
df["diagnosis"] = df["diagnosis"].apply(lambda x: encode_target(x))
b_df = df[df["diagnosis"] == 0]
m_df = df[df["diagnosis"] == 1]
out_df = b_df.append(m_df[:6]).sample(frac=1, random_state=42).reset_index(drop=True)
out_df.head(3)
out_df.diagnosis.value_counts()
'''
Total data = 367
Outlier placed = 6
1.64% of outliers are now placed
'''
# Init RBDA
from rbda import RBOD
# Create the similarity matrix
X_train = out_df.iloc[:,2:].to_numpy()
# +
C = np.zeros((X_train.shape[0], X_train.shape[0]))
# A simple euclidean distance over the synthethic dataset. Not against our similarity
for i in range(0, len(X_train)):
for j in range(0, len(X_train)):
dist = np.linalg.norm(X_train[i].reshape(1, -1) - X_train[j].reshape(1, -1))
C[i][j] = dist
C_df = pd.DataFrame(C)
C_df.insert(0, "id", out_df["id"])
# -
X_train.shape
#outlier_output_file = open("rbod_eval_outliers.csv", "w+")
#outlier_output_file.write("k,precision_n,roc_auc,algorithm\n")
ids = out_df["id"].to_numpy().reshape(len(out_df["id"]), 1)
X_train = np.hstack((ids, X_train))
y_train = out_df["diagnosis"].to_numpy()
# + tags=["outputPrepend"]
k_range = [_ for _ in range(3, 60, 2)]
z_val = 2.5
for k in k_range:
print("Value of k ", k)
rbod = RBOD(C_df, kneighbors=k, z_val=z_val)
combination_dict = {}
rbod = RBOD(C_df, kneighbors=k)
combination_dict["outliers"] = rbod.detect(X_train)
#To show labels for RBDA
# This code based on numpy executions of precision_scoring
rbod_decision_scores = np.asarray([val[1] for val in combination_dict["outliers"]])
#threshold = np.percentile(rbod_decision_scores, 100 * (1 - contamination))
threshold = z_val
rbod_labels = (rbod_decision_scores > threshold).astype('int')
print("Classifier RBDA Outlier labels are - {}".format(rbod_labels))
from pyod.utils import evaluate_print
from sklearn.metrics import roc_auc_score
roc_rbod = np.round(roc_auc_score(y_train,
[val[1] for val in combination_dict["outliers"]]), decimals=4)
print("AUC Score for k-{},{}".format(k, roc_rbod))
#outlier_output_file.write("".join(str(k) + "," + str(prn_rbod) + "," + str(roc_rbod) + "," + "RBOD" + "\n"))
# +
#Finally close the file
#outlier_output_file.close()
# -
|
test_rbda_bcw.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Blankenbach Benchmark Case 1
# ======
#
# Steady isoviscous thermal convection
# ----
#
# Two-dimensional, incompressible, bottom heated, steady isoviscous thermal convection in a 1 x 1 box, see case 1 of Blankenbach *et al.* 1989 for details.
#
# 
#
# **This example introduces:**
# 1. Loading/Saving variables to disk.
# 2. Defining analysis tools.
# 3. Finding a steady state.
#
# **Keywords:** Stokes system, advective diffusive systems, analysis tools
#
# **References**
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. A benchmark comparison for mantle convection codes. Geophysical Journal International, 98, 1, 23–38, 1989
# http://onlinelibrary.wiley.com/doi/10.1111/j.1365-246X.1989.tb05511.x/abstract
#
import underworld as uw
from underworld import function as fn
import glucifer
import math
import numpy as np
# Setup parameters
# -----
boxHeight = 1.0
boxLength = 1.0
# Set grid resolution.
res = 128
# Set max & min temperautres
tempMin = 0.0
tempMax = 1.0
# Choose which Rayleigh number, see case 1 of Blankenbach *et al.* 1989 for details.
case = "a"
if(case=="a"):
Ra=1.e4
eta0=1.e23
elif(case=="b"):
Ra=1.e5
eta0=1.e22
else:
Ra=1.e6
eta0=1.e21
# Set input and output file directory
inputPath = 'input/1_03_BlankenbachBenchmark/'
outputPath = 'output/'
# Make output directory if necessary.
if uw.mpi.rank==0:
import os
if not os.path.exists(outputPath):
os.makedirs(outputPath)
# Create mesh and variables
# ------
# +
mesh = uw.mesh.FeMesh_Cartesian( elementType = ("Q1/dQ0"),
elementRes = (res, res),
minCoord = (0., 0.),
maxCoord = (boxLength, boxHeight))
velocityField = mesh.add_variable( nodeDofCount=2 )
pressureField = mesh.subMesh.add_variable( nodeDofCount=1 )
temperatureField = mesh.add_variable( nodeDofCount=1 )
temperatureDotField = mesh.add_variable( nodeDofCount=1 )
# initialise velocity, pressure and temperatureDot field
velocityField.data[:] = [0.,0.]
pressureField.data[:] = 0.
temperatureField.data[:] = 0.
temperatureDotField.data[:] = 0.
# -
# Set up material parameters and functions
# -----
#
# Set values and functions for viscosity, density and buoyancy force.
# +
# Set a constant viscosity.
viscosity = 1.
# Create our density function.
densityFn = Ra * temperatureField
# Define our vertical unit vector using a python tuple (this will be automatically converted to a function).
z_hat = ( 0.0, 1.0 )
# A buoyancy function.
buoyancyFn = densityFn * z_hat
# -
# Set initial temperature field
# -----
# The initial temperature field can be loaded from a pre-run steady state data set ( ``LoadFromFile = True`` ) or set to a sinusodial perterbation ( ``LoadFromFile = False`` ).
# Steady state temperature field to be loaded from data file.
LoadFromFile = True
# **If loading steady state data set**
#
# Data is stored in h5 format from a 64\*64 grid resolution model. Data has been saved for 3 different Rayleigh numbers, $Ra = 10^4$, $10^5$ or $10^6$.
#
# Once loaded the data will need to be re-meshed onto a new grid, unless the new resolution is also 64\*64.
#
# For more information on using meshes see the user guide.
#
if(LoadFromFile == True):
# Setup mesh and temperature field for 64*64 data file.
mesh64 = uw.mesh.FeMesh_Cartesian( elementType = ("Q1/dQ0"),
elementRes = (64, 64),
minCoord = (0., 0.),
maxCoord = (boxLength, boxHeight),
partitioned = False )
temperatureField64 = mesh64.add_variable( nodeDofCount=1 )
# read in saved steady state temperature field data
if( case == "a" ):
temperatureField64.load(inputPath+'tempfield_inp_64_Ra1e4.h5')
print('Loading 64*64 for Ra = 1e4')
elif( case == "b" ):
temperatureField64.load(inputPath+'tempfield_inp_64_Ra1e5.h5')
print('Loading 64*64 for Ra = 1e5')
else:
temperatureField64.load(inputPath+'tempfield_inp_64_Ra1e6.h5')
print('Loading 64*64 for Ra = 1e6')
if( res==64 ): # no remeshing needed, copy directly
temperatureField.data[:] = temperatureField64.data[:]
else: # remeshing needed
temperatureField.data[:] = temperatureField64.evaluate(mesh)
# **If using sinusodial perturbation**
if(LoadFromFile == False):
temperatureField.data[:] = 0.
pertStrength = 0.1
deltaTemp = tempMax - tempMin
for index, coord in enumerate(mesh.data):
pertCoeff = math.cos( math.pi * coord[0]/boxLength ) * math.sin( math.pi * coord[1]/boxLength )
temperatureField.data[index] = tempMin + deltaTemp*(boxHeight - coord[1]) + pertStrength * pertCoeff
temperatureField.data[index] = max(tempMin, min(tempMax, temperatureField.data[index]))
# **Show initial temperature field**
#
fig = glucifer.Figure()
fig.append( glucifer.objects.Surface(mesh, temperatureField) )
fig.show()
# Create boundary conditions
# ----------
#
# Set temperature boundary conditions on the bottom ( ``MinJ`` ) and top ( ``MaxJ`` ).
for index in mesh.specialSets["MinJ_VertexSet"]:
temperatureField.data[index] = tempMax
for index in mesh.specialSets["MaxJ_VertexSet"]:
temperatureField.data[index] = tempMin
# Construct sets for the both horizontal and vertical walls. Combine the sets of vertices to make the ``I`` (left and right side walls) and ``J`` (top and bottom walls) sets.
# +
iWalls = mesh.specialSets["MinI_VertexSet"] + mesh.specialSets["MaxI_VertexSet"]
jWalls = mesh.specialSets["MinJ_VertexSet"] + mesh.specialSets["MaxJ_VertexSet"]
freeslipBC = uw.conditions.DirichletCondition( variable = velocityField,
indexSetsPerDof = (iWalls, jWalls) )
tempBC = uw.conditions.DirichletCondition( variable = temperatureField,
indexSetsPerDof = (jWalls,) )
# -
# System setup
# -----
#
# **Setup a Stokes system**
#
stokes = uw.systems.Stokes( velocityField = velocityField,
pressureField = pressureField,
conditions = [freeslipBC,],
fn_viscosity = viscosity,
fn_bodyforce = buoyancyFn )
# get the default stokes equation solver
solver = uw.systems.Solver( stokes )
# **Create an advection diffusion system**
#
advDiff = uw.systems.AdvectionDiffusion( phiField = temperatureField,
phiDotField = temperatureDotField,
velocityField = velocityField,
fn_diffusivity = 1.0,
conditions = [tempBC,] )
# Analysis tools
# -----
# **Nusselt number**
#
# The Nusselt number is the ratio between convective and conductive heat transfer
#
# \\[
# Nu = -h \frac{ \int_0^l \partial_z T (x, z=h) dx}{ \int_0^l T (x, z=0) dx}
# \\]
#
#
#
#
# +
nuTop = uw.utils.Integral( fn=temperatureField.fn_gradient[1],
mesh=mesh, integrationType='Surface',
surfaceIndexSet=mesh.specialSets["MaxJ_VertexSet"])
nuBottom = uw.utils.Integral( fn=temperatureField,
mesh=mesh, integrationType='Surface',
surfaceIndexSet=mesh.specialSets["MinJ_VertexSet"])
# -
nu = - nuTop.evaluate()[0]/nuBottom.evaluate()[0]
print('Nusselt number = {0:.6f}'.format(nu))
# **RMS velocity**
#
# The root mean squared velocity is defined by intergrating over the entire simulation domain via
#
# \\[
# \begin{aligned}
# v_{rms} = \sqrt{ \frac{ \int_V (\mathbf{v}.\mathbf{v}) dV } {\int_V dV} }
# \end{aligned}
# \\]
#
# where $V$ denotes the volume of the box.
# +
intVdotV = uw.utils.Integral( fn.math.dot( velocityField, velocityField ), mesh )
vrms = math.sqrt( intVdotV.evaluate()[0] )
print('Initial vrms = {0:.3f}'.format(vrms))
# -
# Main simulation loop
# -----
# If the initial conditions are loaded from file then this loop will only take a single step. If you would like to run the entire simulation from a small perturbation then change the ``LoadFromFile`` variable above to equal ``False``. Warning: the simulation will take a long time to get to steady state.
# +
#initialise time, step, output arrays
time = 0.
step = 0
timeVal = []
vrmsVal = []
# starting from steady state == True
if(LoadFromFile == True):
step_end = 1
else:
step_end = 5000
# output frequency
step_output = max(1,min(100, step_end/10))
epsilon = 1.e-8
velplotmax = 0.0
nuLast = -1.0
# -
# define an update function
def update():
# Determining the maximum timestep for advancing the a-d system.
dt = advDiff.get_max_dt()
# Advect using this timestep size.
advDiff.integrate(dt)
return time+dt, step+1
# Perform steps.
while step<=step_end:
# Solving the Stokes system.
solver.solve()
# Calculate & store the RMS velocity and Nusselt number.
vrms = math.sqrt( intVdotV.evaluate()[0] )
nu = - nuTop.evaluate()[0]/nuBottom.evaluate()[0]
vrmsVal.append(vrms)
timeVal.append(time)
velplotmax = max(vrms, velplotmax)
# print output statistics
if step%(step_end/step_output) == 0:
if(uw.mpi.rank==0):
print('steps = {0:6d}; time = {1:.3e}; v_rms = {2:.3f}; Nu = {3:.3f}; Rel change = {4:.3e}'
.format(step, time, vrms, nu, abs((nu - nuLast)/nu)))
# Check loop break conditions.
if(abs((nu - nuLast)/nu) < epsilon):
if(uw.mpi.rank==0):
print('steps = {0:6d}; time = {1:.3e}; v_rms = {2:.3f}; Nu = {3:.3f}; Rel change = {4:.3e}'
.format(step, time, vrms, nu, abs((nu - nuLast)/nu)))
break
nuLast = nu
# update
time, step = update()
# Post analysis
# -----
#
# **Benchmark values**
#
# The time loop above outputs $v_{rms}$ and $Nu$ as general statistics for the system. For comparison, the benchmark values for the RMS velocity and Nusselt number are shown below for different Rayleigh numbers. All benchmark values shown below were determined in Blankenbach *et al.* 1989 by extroplation of numerical results.
#
#
# | $Ra$ | $v_{rms}$ | $Nu$ | $q_1$ | $q_2$ |
# | ------------- |:-------------:|:-----:|:-----:|:-----:|
# | 10$^4$ | 42.865 | 4.884 | 8.059 | 0.589 |
# | 10$^5$ | 193.215 | 10.535 | 19.079 | 0.723 |
# | 10$^6$ | 833.990 | 21.972 | 45.964 | 0.877 |
#
# Let's add a test to ensure things are working as expected
if case == "a":
if not np.isclose(nu,4.884,rtol=1.e-2):
raise RuntimeError("Model did not produce the expected Nusselt number.")
if not np.isclose(vrms,42.865,rtol=1.e-2):
raise RuntimeError("Model did not produce the expected Nusselt number.")
# **Resulting pressure field**
#
# Use the same method as above to plot the new temperature field. This can also be used to plot the pressure field, or any other data structures of interest.
figtemp = glucifer.Figure()
figtemp.append( glucifer.objects.Surface( mesh, pressureField ) )
figtemp.show()
# **Plot the velocity vector field**
#
# For this example the velocity field is interesting to see. This is visualised in two ways, firstly plotting a surface colour map of the velocity magnitude, and secondly the velocity vectors at points on the mesh. For aesthetics the vector arrows are scaled by a little more than the maximum $v_{rms}$ value found in the time loop above.
fig2 = glucifer.Figure()
velmagfield = uw.function.math.sqrt( uw.function.math.dot( velocityField, velocityField ) )
fig2.append( glucifer.objects.VectorArrows(mesh, velocityField/(2.5*velplotmax), arrowHead=0.2, scaling=0.1) )
fig2.append( glucifer.objects.Surface(mesh, temperatureField) )
fig2.show()
# Parallel friendly post analysis
# ----
#
# When running underworld in parallel the data of each mesh variable is spread across all the processors. However often we will want to calculate a quantity based on data at specific points that may not all be on the same processor.
#
# A solution is presented here which consists of saving the data from all processors to file, then reloading the mesh variable data using a new non-partitioned mesh. This enables all the data to be available to each processor. We will the carry out the post analysis using the first processor.
#
# **Save temperature, pressure and velocity data**
#
# Save the basic mesh variable data to files using the HDF5 format. This is the same file type as is loaded above.
mesh.save(outputPath+"mesh.h5")
temperatureField.save(outputPath+'tempfield.h5')
pressureField.save(outputPath+'presfield.h5')
velocityField.save(outputPath+'velfield.h5')
# **Construct new mesh and variable on non-partitioned mesh**
#
# Read saved mesh variable data into a new mesh variable where the information is not partitioned across multiple processors. This means that we can use a single processor to access all the data and calculate some quantities of interest.
# +
# build a non-partitioned mesh with same box size
mesh0 = uw.mesh.FeMesh_Cartesian( elementType = ("Q1/dQ0"),
elementRes = (res, res),
minCoord = (0., 0.),
maxCoord = (boxLength, boxHeight),
partitioned = False )
# load previous mesh coordinate data onto new non-partitioned mesh
mesh0.load(outputPath+'mesh.h5')
# load T, P and V data onto the new mesh
# note that pressure is always on the submesh
temperatureField0 = mesh0.add_variable( nodeDofCount=1 )
pressureField0 = mesh0.subMesh.add_variable( nodeDofCount=1 )
velocityField0 = mesh0.add_variable( nodeDofCount=2 )
temperatureField0.load(outputPath+"tempfield.h5")
pressureField0.load(outputPath+"presfield.h5")
velocityField0.load(outputPath+"velfield.h5")
# -
# **Temperature gradient**
#
# The final benchmarks in the Blankenbach paper involve the temperature gradient in the vertical direction ($\frac{\partial T}{\partial z}$). This is easy to find using the underworld functions, as shown below.
#
if(uw.mpi.rank==0):
tempgradField = temperatureField0.fn_gradient
vertTGradField = - boxHeight * tempgradField[1] / tempMax # scaled for direct benchmarking below
# **More benchmark values**
#
#
# The vertical temperature gradient (above) is set up to be non-dimensional as per Blankenbach et al 1989. To compare to the benchmark values in their work the gradient is compared at the corners of the simulation box: $q_1$ at $x = 0$, $z = h$; $q_2$ at $x = l$, $z = h$; $q_3$ at $x = l$, $z = 0$; $q_4$ at $x = 0$, $z = 0$. Where $h$ = Box_Height and $l$ = Box_Length and the non-dimensional gradient field is given by
# \\[
# q = \frac{-h}{\Delta T} \left( \frac{\partial T}{\partial z} \right)
# \\]
#
# Provided the simulation is run to steady-state with sufficent resolution then the $q$ values should be close to the benchmark values given again below for different Rayleigh numbers.
#
# | $Ra$ | $q_1$ | $q_2$ |
# | ------------- |:-----:|:-----:|
# | 10$^4$ | 8.059 | 0.589 |
# | 10$^5$ | 19.079 | 0.723 |
# | 10$^6$ | 45.964 | 0.877 |
#
if(uw.mpi.rank==0):
q1 = vertTGradField.evaluate( (0., boxHeight))[0][0]
q2 = vertTGradField.evaluate( (boxLength, boxHeight))[0][0]
q3 = vertTGradField.evaluate( (boxLength, 0.))[0][0]
q4 = vertTGradField.evaluate( (0., 0.))[0][0]
print('Rayleigh number = {0:.1e}'.format(Ra))
print('q1 = {0:.3f}; q2 = {1:.3f}'.format(q1, q2))
print('q3 = {0:.3f}; q4 = {1:.3f}'.format(q3, q4))
# Let's add a test to ensure things are working as expected
if case == "a":
if not np.isclose(q1,8.020,rtol=1.e-2):
raise RuntimeError("Model did not produce the expected q1.")
if not np.isclose(q2,0.589,rtol=1.e-2):
raise RuntimeError("Model did not produce the expected q2.")
# **Save time and rms values**
#
# The following command uses the ``numpy`` package save to text file function to output all $v_{RMS}$ values as a function of time. This is particularly useful if you have run the simulation from the perturbed initial condition rather than the saved data file, as you can see the system coming to steady state.
#
# The format for this text file is:
#
# timeVal[0], vrmsVal[0]
# timeVal[1], vrmsVal[1]
# ...
# timeVal[N], vrmsVal[N]
if(uw.mpi.rank==0):
np.savetxt(outputPath+'vrms.txt', np.c_[timeVal, vrmsVal], header="Time, VRMS" )
# **Calculate stress values for benchmark comparison**
#
#
# Determine stress field for whole box in dimensionless units (King 2009)
# \begin{equation}
# \tau_{ij} = \eta \frac{1}{2} \left[ \frac{\partial v_j}{\partial x_i} + \frac{\partial v_i}{\partial x_j}\right]
# \end{equation}
# which for vertical normal stress becomes
# \begin{equation}
# \tau_{zz} = \eta \frac{1}{2} \left[ \frac{\partial v_z}{\partial z} + \frac{\partial v_z}{\partial z}\right] = \eta \frac{\partial v_z}{\partial z}
# \end{equation}
# which is implemented for the whole box in the functions defined below.
# +
# get topography from non-partitioned stress tensor
if(uw.mpi.rank==0):
stresstensorFn = 2.* stokes.fn_viscosity*fn.tensor.symmetric( velocityField0.fn_gradient ) - (1.,1.,0.)*pressureField0
verticalStressFn = stresstensorFn[1]
stress_zz_top = -verticalStressFn.evaluate(mesh0.specialSets["MaxJ_VertexSet"])
# subtract the average value for benchmark.
mean_sigma_zz_top = np.mean(stress_zz_top)
sigma_zz_top = stress_zz_top - mean_sigma_zz_top
# -
# Dimensionalise the stress from the vertical normal stress at the top of the box (King 2009)
#
# $$
# \sigma_{t} = \frac{\eta_0 \kappa}{\rho g h^2}\tau _{zz} \left( x, z=h\right)
# $$
#
# where all constants have been defined above. Finally calculate the topography, defined using $h = \sigma_{top} / (\rho g)$.
# +
# Set parameters in SI units
if(uw.mpi.rank==0):
grav = 10 # m.s^-2
height = 1.e6 # m
rho = 4.0e3 # g.m^-3
kappa = 1.0e-6 # m^2.s^-1
# dimensionalise
dim_sigma_zz_top = (eta0 * kappa / (height*height)) * sigma_zz_top
# find topography in [m]
topography = dim_sigma_zz_top / (rho * grav)
# -
# **Calculate x-coordinate at zero stress**
#
# Calculate the zero point for the stress along the x-axis at the top of the box using the **interpolation function** from ``numpy``. Note that ``numpy`` requires that the first array input for ``np.interp`` must be increasing, so the negative of the topography is used.
if(uw.mpi.rank==0):
xCoordFn = fn.input()[0]
x = xCoordFn.evaluate(mesh0.specialSets["MinJ_VertexSet"])
xIntercept = np.interp(0.0,-1.0*topography[:, 0],x[:, 0])
# **Topography comparison**
#
# Topography of the top boundary calculated in the left and right corners as given in Table 9 of Blankenbach et al 1989.
#
# | $Ra$ | $\xi_1$ | $\xi_2$ | $x$ ($\xi = 0$) |
# | ------------- |:-----------:|:--------:|:--------------:|
# | 10$^4$ | 2254.02 | -2903.23 | 0.539372 |
# | 10$^5$ | 1460.99 | -2004.20 | 0.529330 |
# | 10$^6$ | 931.96 | -1283.80 | 0.506490 |
#
if(uw.mpi.rank==0):
e1 = float(topography[0])
e2 = float(topography[len(topography)-1])
print('Rayleigh number = {0:.1e}'.format(Ra))
print('Topography[x=0],[x=max] = {0:.2f}, {1:.2f}'.format(e1, e2))
print('x(topo=0) = {0:.6f}'.format(xIntercept))
# output a summary file with benchmark values (useful for parallel runs)
np.savetxt(outputPath+'summary.txt', [Ra, e1, e2, xIntercept, q1, q2, q3, q4])
# Let's add a test to ensure things are working as expected
if case == "a":
if not np.isclose(e1,2254.02,rtol=1.e-2):
raise RuntimeError("Model did not produce the expected xi1.")
if not np.isclose(e2,-2903.23,rtol=1.e-2):
raise RuntimeError("Model did not produce the expected xi2.")
|
docs/examples/03_BlankenbachBenchmark.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Solution Notebook
# ## Problem: Given sorted arrays A, B, merge B into A in sorted order.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# ## Constraints
#
# * Does A have enough space for B?
# * Yes
# * Can the inputs have duplicate array items?
# * Yes
# * Can we assume the inputs are valid?
# * No
# * Does the inputs also include the actual size of A and B?
# * Yes
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# * A or B is None -> Exception
# * index of last A or B < 0 -> Exception
# * A or B is empty
# * General case
# * A = [1, 3, 5, 7, 9, None, None, None]
# * B = [4, 5, 6]
# * A = [1, 3, 4, 5, 5, 6, 7, 9]
# ## Algorithm
#
# <pre>
# i k
# A = [1, 3, 5, 7, 9, None, None, None]
# j
# B = [4, 5, 6]
#
# ---
#
# A[k] = max(A[i], B[j])
# i k
# A = [1, 3, 5, 7, 9, None, None, 9]
# j
# B = [4, 5, 6]
#
# ---
#
# A[k] = max(A[i], B[j])
# i k
# A = [1, 3, 5, 7, 9, None, 7, 9]
# j
# B = [4, 5, 6]
#
# ---
#
# A[k] = max(A[i], B[j])
# i k
# A = [1, 3, 5, 7, 9, 6, 7, 9]
# j
# B = [4, 5, 6]
#
# ---
#
# A[k] = max(A[i], B[j])
# i k
# A = [1, 3, 5, 7, 5, 6, 7, 9]
# j
# B = [4, 5, 6]
#
# ---
#
# A[k] = max(A[i], B[j])
# i k
# A = [1, 3, 5, 5, 5, 6, 7, 9]
# j
# B = [4, 5, 6]
#
# ---
#
# A[k] = max(A[i], B[j])
# i k
# A = [1, 3, 4, 5, 5, 6, 7, 9]
# j
# B = [4, 5, 6]
#
# ---
#
# A[k] = max(A[i], B[j])
# ik
# A = [1, 3, 4, 5, 5, 6, 7, 9]
#
# B = [4, 5, 6]
#
# ---
#
# A = [1, 3, 4, 5, 5, 6, 7, 9]
#
# </pre>
#
# Complexity:
# * Time: O(m + n)
# * Space: O(1)
# ## Code
class Array(object):
def merge_into(self, source, dest, source_end_index, dest_end_index):
if source is None or dest is None:
raise TypeError('source or dest cannot be None')
if source_end_index < 0 or dest_end_index < 0:
raise ValueError('end indices must be >= 0')
if not source:
return dest
if not dest:
return source
source_index = source_end_index - 1
dest_index = dest_end_index - 1
insert_index = source_end_index + dest_end_index - 1
while dest_index >= 0:
if source[source_index] > dest[dest_index]:
source[insert_index] = source[source_index]
source_index -= 1
else:
source[insert_index] = dest[dest_index]
dest_index -= 1
insert_index -= 1
return source
# ## Unit Test
# +
# %%writefile test_merge_into.py
from nose.tools import assert_equal, assert_raises
class TestArray(object):
def test_merge_into(self):
array = Array()
assert_raises(TypeError, array.merge_into, None, None, None, None)
assert_raises(ValueError, array.merge_into, [1], [2], -1, -1)
a = [1, 2, 3]
assert_equal(array.merge_into(a, [], len(a), 0), [1, 2, 3])
a = [1, 2, 3]
assert_equal(array.merge_into(a, [], len(a), 0), [1, 2, 3])
a = [1, 3, 5, 7, 9, None, None, None]
b = [4, 5, 6]
expected = [1, 3, 4, 5, 5, 6, 7, 9]
assert_equal(array.merge_into(a, b, 5, len(b)), expected)
print('Success: test_merge_into')
def main():
test = TestArray()
test.test_merge_into()
if __name__ == '__main__':
main()
# -
# %run -i test_merge_into.py
|
sorting_searching/merge_into/merge_into_solution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
def make_hello(N=1000, rseed=42):
# Make a plot with "HELLO" text; save as PNG
fig, ax = plt.subplots(figsize=(4, 1))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax.axis('off')
ax.text(0.5, 0.4, 'HELLO', va='center', ha='center', weight='bold', size=85)
fig.savefig('hello.png')
plt.close(fig)
# Open this PNG and draw random points from it
from matplotlib.image import imread
data = imread('hello.png')[::-1, :, 0].T
rng = np.random.RandomState(rseed)
X = rng.rand(4 * N, 2)
i, j = (X * data.shape).astype(int).T
mask = (data[i, j] < 1)
X = X[mask]
X[:, 0] *= (data.shape[0] / data.shape[1])
X = X[:N]
return X[np.argsort(X[:, 0])]
X = make_hello(1000)
colorize = dict(c=X[:,0], cmap = plt.cm.get_cmap('rainbow',5))
plt.scatter(X[:,0],X[:,1],**colorize)
plt.axis('equal');
# +
def rotate(X,angle):
theta = np.deg2rad(angle)
R = [[np.cos(theta),np.sin(theta)],
[-np.sin(theta),np.cos(theta)]]
return np.dot(X,R)
X2 = rotate(X,30)+5
plt.scatter(X2[:,0],X2[:,1],**colorize)
plt.axis('equal');
# -
from sklearn.metrics import pairwise_distances
D = pairwise_distances(X)
D.shape
plt.imshow(D,zorder=2,cmap='Blues',interpolation='nearest')
plt.colorbar();
D2 = pairwise_distances(X2)
np.allclose(D,D2)
from sklearn.manifold import MDS
model = MDS(n_components=2,dissimilarity='precomputed',random_state=1)
out = model.fit_transform(D)
plt.scatter(out[:,0],out[:,1],**colorize)
plt.axis('equal')
|
manifold.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
emissions = pd.read_csv("/Users/alexanderkell/Documents/SGI/Projects/11-starter-kits/data/raw/starter-kits/kenya/Table7_KEN.csv")
emissions
emissions.Value *= 0.000001
emissions
CommIn = pd.read_csv("/Users/alexanderkell/Documents/SGI/Projects/11-starter-kits/data/external/muse_data/default/technodata/gas/CommIn.csv")
CommIn
# ## Generate Oil CommOut
oil_technodata = pd.read_csv("/Users/alexanderkell/Documents/SGI/Projects/11-starter-kits/data/interim/technodata/oil_technodata.csv")
oil_technodata
crude_oil_output = {
"Crude Oil Refinery Option 1": {"oil": 1, "gas": 0, "electricity": 0, "CO2f": 0, "wind": 0},
"Crude Oil Refinery Option 2": {"oil": 1, "gas": 0, "electricity": 0, "CO2f": 0, "wind": 0}
}
crude_oil_output
processes = list(pd.unique(oil_technodata[oil_technodata['ProcessName'] != "Unit"]['ProcessName']))
processes
units = {
"ProcessName": ["Unit"],
"RegionName": ["-"],
"Time": ["Year"],
"Level": ["-"],
"electricity": ["PJ/PJ"],
"gas": ["PJ/PJ"],
"oil": ["PJ/PJ"],
"wind": ["PJ/PJ"],
"CO2f": ["kt/PJ"],
}
# ## Generate Power CommOut
power_technodata = pd.read_csv("/Users/alexanderkell/Documents/SGI/Projects/11-starter-kits/data/interim/technodata/power_technodata.csv")
power_technodata
list(pd.unique(power_technodata[power_technodata.ProcessName != "Unit"]['ProcessName']))
power_fuels = list(pd.unique(power_technodata[power_technodata.ProcessName != "Unit"]['Fuel']))
power_fuels
example_technoeconomic = pd.read_csv("/Users/alexanderkell/Documents/SGI/Projects/11-starter-kits/data/external/example_model/Techno_Economic.csv")
example_technoeconomic
power_types = power_technodata[power_technodata.ProcessName != "Unit"][["ProcessName", "Fuel"]].drop_duplicates()
power_types['value'] = 1
power_types
# +
power_comm_in = power_types.pivot(index='ProcessName', columns='Fuel', values="value").fillna(0)
power_comm_in.insert(0, "RegionName", "kenya")
power_comm_in.insert(1, "Time", 2020)
power_comm_in.insert(2, "Level", "fixed")
power_comm_in.insert(3, "electricity", 0)
power_comm_in['CO2f'] = 0
power_comm_in
# -
pd.unique(power_types.Fuel)
emissions.Fuel = emissions.Fuel.str.lower()
emissions.Fuel = emissions.Fuel.str.replace("natural gas", "gas")
emissions.Fuel = emissions.Fuel.str.replace("crude oil", "oil")
emissions
power_types = power_technodata[power_technodata.ProcessName != "Unit"][["ProcessName", "Fuel"]].drop_duplicates()
power_types
power_types_emissions = power_types.merge(emissions.drop(columns="Parameter"), on="Fuel", how="left").fillna(0)
power_types_emissions = power_types_emissions.rename(columns = {"Value": "CO2f"})
power_types_emissions['value'] = 0
power_types_emissions
# +
power_comm_out = power_types_emissions.pivot(index=['ProcessName', "CO2f"], columns='Fuel', values="value").fillna(0).reset_index()
power_comm_out['electricity'] = 1
power_comm_out.insert(1, "RegionName", "kenya")
power_comm_out.insert(2, "Time", 2020)
power_comm_out.insert(3, "Level", "fixed")
power_comm_out
# -
power_comm_out_merged = pd.merge(power_comm_out, pd.Series(list(pd.unique(power_technodata.Time))[1:], name="Time"), how="cross")
power_comm_out_merged = power_comm_out_merged.drop(columns="Time_x")
power_comm_out = power_comm_out_merged.rename(columns={"Time_y":"Time"})
power_comm_out
# +
units = {
"ProcessName": ["Unit"],
"RegionName": ["-"],
"Time": ["Year"],
"Level": ["-"],
"CO2f": ["kt/PJ"],
"biomass": ["PJ/PJ"],
"coal": ["PJ/PJ"],
"gas": ["PJ/PJ"],
"geothermal": ["PJ/PJ"],
"hydro": ["PJ/PJ"],
"oil": ["PJ/PJ"],
"solar": ["PJ/PJ"],
"uranium": ["PJ/PJ"],
"wind": ["PJ/PJ"],
"electricity": ["PJ/PJ"],
}
units
# -
units_row = pd.DataFrame.from_dict(units, orient="columns")
units_row
power_comm_out = units_row.append(power_comm_out)
power_comm_out
# ## Generate CommOut
def generate_comm_out(units, processes, outputs):
if not all(process in outputs for process in processes):
raise ValueError("Missing values in outputs dictionary: {}".format(outputs))
else:
comm_out = pd.DataFrame.from_dict(outputs, orient='index')
comm_out.insert(0, "RegionName", "kenya")
comm_out.insert(1, "Time", 2020)
comm_out.insert(2, "Level", "fixed")
comm_out = comm_out.reset_index()
comm_out = comm_out.rename(columns={"index":"ProcessName"})
units_row = pd.DataFrame.from_dict(units, orient="columns")
comm_out = units_row.append(comm_out)
return comm_out
generate_comm_out(units, processes, crude_oil_output)
|
notebooks/converting/.ipynb_checkpoints/4.0-ajmk-CommIn-CommOut-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Programmatically Access TCGA Data using the Seven Bridges Cancer Genomics Cloud via the Datasets API
#
# TCGA is one of the world’s largest cancer genomics data collections, including more than eleven thousand patients, representing 33 cancers, and over half a million total files. Seven Bridges has created a unified metadata ontology from the diverse cancer studies, made this data available, and provided compute infrastructure to facilitate customized analyses on the Cancer Genomics Cloud (the CGC). The CGC provides powerful methods to query and reproducibly analyze TCGA data - alone or in conjunction with your own data.
#
# We continue to develop new methods of interacting with data on the CGC, however, we also appreciate that sometimes it is useful to be able to analyze data locally, or in an AWS environment that you have configured yourself. While the CGC has undergone thorough testing and is certified as a FISMA-moderate system, if you wish to analyze data in alternative locations, you must take the appropriate steps to ensure your computing environment is secure and compliant with current best practices. If you plan to download large numbers of files for local analysis, we recommend using the download utilities available from the Genomic Data Commons which have been specifically optimized for this purpose.
#
# Below, we provide a tutorial showing how to find and access TCGA data using the Datasets API. Alternatively, you can try to query TCGA data [using a SPARQL query](access_TCGA_on_AWS.ipynb).
#
# ## Objective
# Let's conduct an investigation to look for female Breast Cancer patients who are alive. We want their associated files to be open data, provide gene expression, and be produced by an experimental strategy of RNA-seq.
#
# ## Overview
# In this tutorial, we will demonstrate how you can use the [Datasets API](http://docs.cancergenomicscloud.org/docs/datasets-api-overview) to find all open access gene expression files obtained from RNA-Seq analysis of living female Breast Cancer patients.
#
# This method allows you to query the following TCGA entities:
# * cases
# * analytes
# * radiation therapies
# * drug therapies
# * follow ups
# * portions
# * aliquots
# * samples
# * slides
# * new tumor events
# * files
#
# The Datasets API is well-suited for browsing TCGA data.
#
# ## Prerequisites
# Before you begin this tutorial, you should:
#
# 1. **Set up your CGC account.** If you haven't already done so, navigate to https://cgc.sbgenomics.com/ and follow these [directions](http://docs.cancergenomicscloud.org/docs/sign-up-for-the-cgc) to register for the CGC. This tutorial uses Open Data, which is available to all CGC users. The same approach can be used by approved researchers to access Controlled Data. Learn more about TCGA data access here.
# 2. **Install the Seven Bridges' API Python library.** This tutorial uses the library sevenbridges-python. Learn how to [install it](http://docs.cancergenomicscloud.org/blog/programmatically-access-tcga-data-using-the-seven-bridges-cancer-genomics-cloud) before continuing.
# 3. **Obtain your authentication token.** You'll use your authentication token to encode your user credentials when interacting with the CGC programmatically. Learn how to [access your authentication token](http://docs.cancergenomicscloud.org/docs/get-your-authentication-token). It is important to store your authentication token in a safe place as it can be used to access your account. The time and location your token was last used is shown on the developer dashboard. If for any reason you believe your token has been compromised, you can regenerate it at any time.
# ## Query using the Datasets API
# The Datasets API is an API designed around the TCGA data structure and focused on search functionality. You can use the Datasets API to browse TCGA using API requests written in JSON. Queries made using the Datasets API return entities and are particularly suitable for browsing TCGA data.
#
# We'll write a Python script to issue our query into TCGA using the Datasets API. Since the Datasets API is not included in our Python library, sevenbridges-python, we will use two Python modules, json and requests, to interact with it instead. We'll use these modules to write a wrapper around the API request.
import json
from requests import request
# Below, we define a simple function to send and receive JSONs from the API using the correctly formatted HTTP calls. The necessary imports are handled above.
def api_call(path, method='GET', query=None, data=None, token=None):
base_url = 'https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/'
data = json.dumps(data) if isinstance(data, dict) \
or isinstance(data,list) else None
headers = {
'X-SBG-Auth-Token': token,
'Accept': 'application/json',
'Content-type': 'application/json',
}
response = request(method, base_url + path, params=query, \
data=data, headers=headers)
response_dict = response.json() if \
response.json() else {}
if response.status_code / 100 != 2:
print(response_dict)
raise Exception('Server responded with status code %s.' % response.status_code)
return response_dict
# Then, provide your authentication token, as shown below. Examples of proper coding of your auth\_token are available for [sevenbridges-python bindings](https://github.com/sbg/okAPI/blob/master/Recipes/CGC/Setup_API_environment.ipynb)
auth_token = '<PASSWORD> your authentication token <PASSWORD>'
# Now, we can define a query in JSON for TCGA data based on its [metadata](http://docs.cancergenomicscloud.org/docs/tcga-metadata).
#
# As specified above, we want to find **female**, **Breast Cancer** patients (**cases**) with a vital status (**alive**) and the associated **files** which are **open-access**, provide **Gene expression**, and came from the **experimental strategy** of **RNA-seq**. We will assign an exact value to the above properties.
#
# #### PROTIP:
# Extensive details of the Datasets API calls are available [here](http://docs.cancergenomicscloud.org/docs/query-via-the-datasets-api).
query_body = {
"entity": "files",
"hasAccessLevel" : "Open",
"hasDataType" : "Gene expression",
"hasExperimentalStrategy": "RNA-Seq",
"hasCase": {
"hasDiseaseType" : "Breast Invasive Carcinoma",
"hasGender" : "FEMALE",
"hasVitalStatus" : "Alive"
}
}
# The call below returns a dictionary containing the total number of records.
total = api_call(method='POST', path ='query/total', \
token=auth_token, data=query_body)
print("There are {} files matching the query".format(total['total']))
# Now, we can create an initial list of all records, 100 at a time. In the example below, this list is named `files_in_query`. We'll use this initial list to catalogue the data returned by the query.
#
# #### PROTIP
# Working with pagination in the API is beautifully described <a href="http://docs.cancergenomicscloud.org/docs/the-cgc-api#section-response-pagination">here</a>.
# +
files_in_query = []
from __future__ import division
from math import ceil
loops = int(ceil(total['total']/100))
for ii in range(0,loops):
files_in_query.append(api_call(method='POST', \
path =("query?offset=%i" % (100*ii)), \
token=auth_token, data=query_body))
print("%3.1f percent of files added" % (100*(ii+1)/loops))
# NOTE: each item in file_list is a list of 100 files from the query. Example below:
print('\n \n')
print(files_in_query[0]['_embedded']['files'][0])
print(files_in_query[1]['_embedded']['files'][0])
# -
# We've now successfully compiled a list of a catalogue of file ids! We'll access the data in the next step using the CGC API.
# ## Access TCGA data using the CGC API
# In this section, we will use the CGC API to access TCGA data. Since we are using the CGC API (as opposed to the Datasets API in the previous step), we will use the [sevenbridges-python bindings](http://sevenbridges-python.readthedocs.io/en/latest/). You should have already installed this library as described under the Prerequisites section. You may also wish to take a look at the library quickstart guide (http://sevenbridges-python.readthedocs.io/en/latest/quickstart/#authentication-and-configuration) before moving forward. Before initializing the library, we recommend creating a config file to store your authentication token for use by the CGC API.
#
# ### Set up your authentication token
# Since we're now using the CGC API, we need to provide our authentication credentials. Once you have installed sevenbridges-python, you can authenticate by storing your credentials in a config file, `.sbgrc`. Enter your credentials in the config file, as shown below, replacing the last line with your authentication token:
#
# ```bash
# [cgc]
# api-url = https://cgc-api.sbgenomics.com/v2
# auth-token = insert auth token here
# ```
#
# ### Initialize the sevenbridges-python library
# We've now installed sevenbridges-python and stored our credentials in a config file. Let's import the `api` class from the official `sevenbridges-python` bindings.
import sevenbridges as sbg
# Let's initialize the `api` object so the API knows our credentials.
# +
# [USER INPUT] specify platform {cgc, sbg}
prof = 'cgc'
config_file = sbg.Config(profile=prof)
api = sbg.Api(config=config_file)
# -
# ### Access TCGA data
# Now, we loop through the first ten files in the first item of the `files_in_query` list from above using the `id` key.
#
# We will now do the following with these ids:
#
# 1. Create a list of files on the CGC. From this point, it would be possible to take action on the CGC. For instance, you can use a bioinformatics workflow or tool on these files and start an analysis.
# 2. (optional) Generate a list of access links.
# 3. Access each of the ten files in this list. They will be saved to the Downloads folder in your local directory.
# +
# 1) Generate a list a file objects from the file_ids list
file_list = []
for f in files_in_query[0]['_embedded']['files'][0:10]:
file_list.append(api.files.get(id = f['id']))
print(file_list[-1].name)
# (BRANCH-POINT) Do something AWESOME with these files on the CGC
# 2) (optional) Generate a list of download links
dl_list = []
for f in file_list:
dl_list.append(f.download_info())
# 3) Download each of the files in the list to a _downloads_ folder in your local directory.
import os
dl_dir = 'downloads'
try:
os.stat(dl_dir)
except:
os.mkdir(dl_dir)
for f in file_list:
f.download(path = ("%s/%s" % (dl_dir, f.name)))
# -
# ## Conclusion
# Congratulations! You've learned to query TCGA data hosted on AWS using the Datasets API and then access this data using the CGC API. You can learn more about [querying TCGA with the Datasets API](http://docs.cancergenomicscloud.org/v1.0/docs/query-via-the-datasets-api) on our Knowledge Center.
#
# The next move is yours: take the data to the analytical tool and environment of your choice.
|
Tutorials/CGC/access_TCGA_on_AWS_via_DatasetsAPI.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pathlib import Path
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
from livelossplot.keras import PlotLossesCallback
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization, Conv2D, MaxPooling2D, Input
from keras.datasets import cifar10
from keras import regularizers
from keras.callbacks import LearningRateScheduler, Callback
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import keras
# -
# for training our model
train_values = pd.read_csv('train_values.csv', index_col='patient_id')
train_labels = pd.read_csv('train_labels.csv', index_col='patient_id')
test_values = pd.read_csv('test_values.csv', index_col='patient_id')
train_labels.heart_disease_present.value_counts().plot.bar(title='Number with Heart Disease')
#selected_features = ['age',
# 'sex',
# 'max_heart_rate_achieved',
# 'resting_blood_pressure']
selected_features =['slope_of_peak_exercise_st_segment',
'resting_blood_pressure',
'chest_pain_type',
'num_major_vessels',
'fasting_blood_sugar_gt_120_mg_per_dl',
'resting_ekg_results',
'serum_cholesterol_mg_per_dl',
'oldpeak_eq_st_depression',
'sex',
'age',
'max_heart_rate_achieved',
'exercise_induced_angina']
train_values_subset = train_values[selected_features]
X_test = test_values[selected_features]
predictors =train_values_subset
target = train_labels.heart_disease_present
X_train,X_val,Y_train,Y_val = train_test_split(predictors,target,test_size=0.10,random_state=0)
# +
# network parameters
batch_size = 128 #128
hidden_units = 256
dropout = 0.2
input_size=12
weight_decay= 1e-4
# +
model = Sequential()
model.add(Dense(1024, input_dim=input_size,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
#model.add(Dropout(dropout))
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
#model.add(Dropout(dropout))
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
#model.add(Dropout(dropout))
model.add(Dense(256,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
#model.add(Dropout(dropout))
model.add(Dense(256,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
# -
model.compile(loss='binary_crossentropy', #
optimizer='adam', #adam
metrics=['accuracy'])
model.fit(X_train, Y_train, validation_data=(X_val, Y_val), epochs=125, verbose=2, batch_size=256,
callbacks=[PlotLossesCallback()])
predictions=model.predict(X_test, batch_size=128)
submission_format = pd.read_csv('submission_format.csv', index_col='patient_id')
my_submission = pd.DataFrame(data=predictions,
columns=submission_format.columns,
index=submission_format.index)
my_submission.head()
my_submission.to_csv('submission2.csv')
#validation log loss =0.54680
|
model2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 15.603022, "end_time": "2022-02-05T04:47:29.575897", "exception": false, "start_time": "2022-02-05T04:47:13.972875", "status": "completed"} tags=[]
# !pip install 'git+https://github.com/katsura-jp/pytorch-cosine-annealing-with-warmup'
# + [markdown] papermill={"duration": 0.012346, "end_time": "2022-02-05T04:47:29.601965", "exception": false, "start_time": "2022-02-05T04:47:29.589619", "status": "completed"} tags=[]
# # MAIN
# + papermill={"duration": 8.720961, "end_time": "2022-02-05T04:47:38.335456", "exception": false, "start_time": "2022-02-05T04:47:29.614495", "status": "completed"} tags=[]
import os
import gc
import copy
import time
import random
import string
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from glob import glob
from tqdm.notebook import tqdm
from collections import defaultdict
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GroupKFold, KFold, StratifiedKFold
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModel, AutoConfig, AdamW
from cosine_annealing_warmup import CosineAnnealingWarmupRestarts
def set_seed(seed=42):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(seed)
# + papermill={"duration": 7.378815, "end_time": "2022-02-05T04:47:45.727153", "exception": false, "start_time": "2022-02-05T04:47:38.348338", "status": "completed"} tags=[]
CONFIG = {
"seed": 42,
"epochs": 3,
"model_name": "unitary/multilingual-toxic-xlm-roberta",
"train_batch_size": 8,
"valid_batch_size": 32,
"max_length": 128,
"learning_rate": 1e-5,
"scheduler": 'CosineAnnealingWarmupRestarts', #'MultiStepLR',
"min_lr": 1e-6,
"T_mult": 1,
"warmup_steps": 10,
"gamma": 1,
"weight_decay": 2e-5,
"n_fold": 5,
"n_accumulate": 1,
"num_classes": 1,
"margin": 0.5,
"device": torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
}
CONFIG["tokenizer"] = AutoTokenizer.from_pretrained(CONFIG['model_name'])
CONFIG['group'] = CONFIG['model_name'] + '_Baseline'
set_seed(CONFIG['seed'])
# + papermill={"duration": 0.032434, "end_time": "2022-02-05T04:47:45.774776", "exception": false, "start_time": "2022-02-05T04:47:45.742342", "status": "completed"} tags=[]
class JigsawDataset(Dataset):
def __init__(self, df, tokenizer, max_length):
self.df = df
self.max_len = max_length
self.tokenizer = tokenizer
self.more_toxic = df['more_toxic'].values
self.less_toxic = df['less_toxic'].values
def __len__(self):
return len(self.df)
def __getitem__(self, index):
more_toxic = self.more_toxic[index]
less_toxic = self.less_toxic[index]
inputs_more_toxic = self.tokenizer.encode_plus(
more_toxic,
truncation=True,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length'
)
inputs_less_toxic = self.tokenizer.encode_plus(
less_toxic,
truncation=True,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length'
)
target = 1
more_toxic_ids = inputs_more_toxic['input_ids']
more_toxic_mask = inputs_more_toxic['attention_mask']
less_toxic_ids = inputs_less_toxic['input_ids']
less_toxic_mask = inputs_less_toxic['attention_mask']
return {
'more_toxic_ids': torch.tensor(more_toxic_ids, dtype=torch.long),
'more_toxic_mask': torch.tensor(more_toxic_mask, dtype=torch.long),
'less_toxic_ids': torch.tensor(less_toxic_ids, dtype=torch.long),
'less_toxic_mask': torch.tensor(less_toxic_mask, dtype=torch.long),
'target': torch.tensor(target, dtype=torch.long)
}
class JigsawModel(nn.Module):
def __init__(self, model_name):
super(JigsawModel, self).__init__()
config = AutoConfig.from_pretrained(model_name)
config.update({
"output_hidden_states": True,
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
})
self.model = AutoModel.from_pretrained(model_name, config=config)
self.linear = nn.Linear(768, CONFIG['num_classes'])
def forward(self, ids, mask):
out = self.model(
input_ids=ids,
attention_mask=mask,
)
outputs = self.linear(out.last_hidden_state[:, 0, :])
return outputs
# + papermill={"duration": 0.060468, "end_time": "2022-02-05T04:47:45.849859", "exception": false, "start_time": "2022-02-05T04:47:45.789391", "status": "completed"} tags=[]
def criterion(outputs1, outputs2, targets):
return nn.MarginRankingLoss(margin=CONFIG['margin'])(outputs1, outputs2, targets)
def train_one_epoch(model, optimizer, scheduler, dataloader, device, epoch):
model.train()
dataset_size = 0
running_loss = 0.0
bar = tqdm(enumerate(dataloader), total=len(dataloader))
for step, data in bar:
more_toxic_ids = data['more_toxic_ids'].to(device, dtype = torch.long)
more_toxic_mask = data['more_toxic_mask'].to(device, dtype = torch.long)
less_toxic_ids = data['less_toxic_ids'].to(device, dtype = torch.long)
less_toxic_mask = data['less_toxic_mask'].to(device, dtype = torch.long)
targets = data['target'].to(device, dtype=torch.long)
batch_size = more_toxic_ids.size(0)
more_toxic_outputs = model(more_toxic_ids, more_toxic_mask)
less_toxic_outputs = model(less_toxic_ids, less_toxic_mask)
loss = criterion(more_toxic_outputs, less_toxic_outputs, targets)
loss = loss / CONFIG['n_accumulate']
loss.backward()
if (step + 1) % CONFIG['n_accumulate'] == 0:
optimizer.step()
# zero the parameter gradients
optimizer.zero_grad()
if scheduler is not None:
scheduler.step()
running_loss += (loss.item() * batch_size)
dataset_size += batch_size
epoch_loss = running_loss / dataset_size
bar.set_postfix(
Epoch=epoch,
Train_Loss=epoch_loss,
LR=optimizer.param_groups[0]['lr']
)
gc.collect()
return epoch_loss
@torch.no_grad()
def valid_one_epoch(model, dataloader, device, epoch):
model.eval()
dataset_size = 0
running_loss = 0.0
bar = tqdm(enumerate(dataloader), total=len(dataloader))
for step, data in bar:
more_toxic_ids = data['more_toxic_ids'].to(device, dtype = torch.long)
more_toxic_mask = data['more_toxic_mask'].to(device, dtype = torch.long)
less_toxic_ids = data['less_toxic_ids'].to(device, dtype = torch.long)
less_toxic_mask = data['less_toxic_mask'].to(device, dtype = torch.long)
targets = data['target'].to(device, dtype=torch.long)
batch_size = more_toxic_ids.size(0)
more_toxic_outputs = model(more_toxic_ids, more_toxic_mask)
less_toxic_outputs = model(less_toxic_ids, less_toxic_mask)
loss = criterion(more_toxic_outputs, less_toxic_outputs, targets)
running_loss += (loss.item() * batch_size)
dataset_size += batch_size
epoch_loss = running_loss / dataset_size
bar.set_postfix(
Epoch=epoch,
Valid_Loss=epoch_loss,
LR=optimizer.param_groups[0]['lr']
)
gc.collect()
return epoch_loss
@torch.no_grad()
def valid_trainingdata(model, dataloader, device):
model.eval()
dataset_size = 0
running_loss = 0.0
MORE_TOXIC_PREDS = []
LESS_TOXIC_PREDS = []
bar = tqdm(enumerate(dataloader), total=len(dataloader))
for step, data in bar:
more_toxic_ids = data['more_toxic_ids'].to(device, dtype = torch.long)
more_toxic_mask = data['more_toxic_mask'].to(device, dtype = torch.long)
less_toxic_ids = data['less_toxic_ids'].to(device, dtype = torch.long)
less_toxic_mask = data['less_toxic_mask'].to(device, dtype = torch.long)
more_toxic_outputs = model(more_toxic_ids, more_toxic_mask)
less_toxic_outputs = model(less_toxic_ids, less_toxic_mask)
MORE_TOXIC_PREDS.append(more_toxic_outputs.view(-1).cpu().detach().numpy())
LESS_TOXIC_PREDS.append(less_toxic_outputs.view(-1).cpu().detach().numpy())
MORE_TOXIC_PREDS = np.concatenate(MORE_TOXIC_PREDS)
LESS_TOXIC_PREDS = np.concatenate(LESS_TOXIC_PREDS)
gc.collect()
return MORE_TOXIC_PREDS, LESS_TOXIC_PREDS
def run_training(model, optimizer, scheduler, device, num_epochs, fold):
if torch.cuda.is_available():
print("[INFO] Using GPU: {}\n".format(torch.cuda.get_device_name()))
start = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_epoch_loss = np.inf
history = defaultdict(list)
for epoch in range(1, num_epochs + 1):
gc.collect()
train_epoch_loss = train_one_epoch(
model,
optimizer,
scheduler,
dataloader=train_loader,
device=CONFIG['device'],
epoch=epoch
)
val_epoch_loss = valid_one_epoch(
model,
valid_loader,
device=CONFIG['device'],
epoch=epoch
)
history['Train Loss'].append(train_epoch_loss)
history['Valid Loss'].append(val_epoch_loss)
# deep copy the model
if val_epoch_loss <= best_epoch_loss:
print(f"Validation Loss Improved ({best_epoch_loss}->{val_epoch_loss})")
best_epoch_loss = val_epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
PATH = f"Loss-Fold-{fold}.bin"
torch.save(model.state_dict(), PATH)
# Save a model file from the current directory
print(f"Model Saved")
print()
end = time.time()
time_elapsed = end - start
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, (time_elapsed % 3600) % 60
))
print("Best Loss: {:.4f}".format(best_epoch_loss))
# load best model weights
model.load_state_dict(best_model_wts)
return model, history
def prepare_loaders(df, fold):
df_train = df[df.fold != fold].reset_index(drop=True)
df_valid = df[df.fold == fold].reset_index(drop=True)
train_dataset = JigsawDataset(
df_train,
tokenizer=CONFIG['tokenizer'],
max_length=CONFIG['max_length']
)
valid_dataset = JigsawDataset(
df_valid,
tokenizer=CONFIG['tokenizer'],
max_length=CONFIG['max_length']
)
train_loader = DataLoader(
train_dataset,
batch_size=CONFIG['train_batch_size'],
num_workers=2,
shuffle=True,
pin_memory=True,
drop_last=True
)
valid_loader = DataLoader(
valid_dataset,
batch_size=CONFIG['valid_batch_size'],
num_workers=2,
shuffle=False,
pin_memory=True
)
return train_loader, valid_loader
def fetch_scheduler(optimizer, size=None):
if CONFIG['scheduler'] == 'CosineAnnealingLR':
scheduler = lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=CONFIG['T_max'],
eta_min=CONFIG['min_lr']
)
elif CONFIG['scheduler'] == 'CosineAnnealingWarmupRestarts':
scheduler = CosineAnnealingWarmupRestarts(
optimizer,
first_cycle_steps=size,
cycle_mult=CONFIG['T_mult'],
max_lr=CONFIG['learning_rate'],
min_lr=CONFIG['min_lr'],
warmup_steps=CONFIG['warmup_steps'],
gamma=CONFIG['gamma']
)
elif CONFIG['scheduler'] == 'MultiStepLR':
scheduler = lr_scheduler.MultiStepLR(
optimizer,
milestones=[size * i + 1 for i in range(1, CONFIG['epochs'])],
gamma=0.1
)
elif CONFIG['scheduler'] == None:
return None
return scheduler
# + papermill={"duration": 0.841832, "end_time": "2022-02-05T04:47:46.706684", "exception": false, "start_time": "2022-02-05T04:47:45.864852", "status": "completed"} tags=[]
class UnionFind():
def __init__(self, n):
self.n = n
self.parents = [-1] * n
def find(self, x):
if self.parents[x] < 0:
return x
else:
self.parents[x] = self.find(self.parents[x])
return self.parents[x]
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.parents[x] > self.parents[y]:
x, y = y, x
self.parents[x] += self.parents[y]
self.parents[y] = x
def get_group_unionfind(train: pd.DataFrame):
unique_text = pd.concat([train['less_toxic'], train['more_toxic']]).unique()
text2num = {text: i for i, text in enumerate(unique_text)}
num2text = {num: text for text, num in text2num.items()}
train['num_less_toxic'] = train['less_toxic'].map(text2num)
train['num_more_toxic'] = train['more_toxic'].map(text2num)
uf = UnionFind(len(unique_text))
for seq1, seq2 in train[['num_less_toxic', 'num_more_toxic']].to_numpy():
uf.union(seq1, seq2)
text2group = {num2text[i]: uf.find(i) for i in range(len(unique_text))}
train['group'] = train['less_toxic'].map(text2group)
train = train.drop(columns=['num_less_toxic', 'num_more_toxic'])
return train
train = pd.read_csv("../input/jigsaw-toxic-severity-rating/validation_data.csv")
# get fold
train = get_group_unionfind(train)
train['fold'] = -1
group_kfold = GroupKFold(n_splits=5)
for fold, (trn_idx, val_idx) in enumerate(group_kfold.split(train, train, train['group'])):
train.loc[val_idx, "fold"] = fold
# + papermill={"duration": 12866.690937, "end_time": "2022-02-05T08:22:13.412741", "exception": false, "start_time": "2022-02-05T04:47:46.721804", "status": "completed"} tags=[]
for fold in range(0, CONFIG['n_fold']):
print(f"====== Fold: {fold} ======")
# Create Dataloaders
train_loader, valid_loader = prepare_loaders(train, fold=fold)
size = train_loader.__len__()
print('train num_batch:', size)
model = JigsawModel(CONFIG['model_name'])
model.to(CONFIG['device'])
# Define Optimizer and Scheduler
optimizer = AdamW(
model.parameters(),
lr=CONFIG['learning_rate'],
weight_decay=CONFIG['weight_decay']
)
scheduler = fetch_scheduler(optimizer, size=size)
model, history = run_training(
model,
optimizer,
scheduler,
device=CONFIG['device'],
num_epochs=CONFIG['epochs'],
fold=fold
)
more_pred, less_pred = valid_trainingdata(
model,
valid_loader,
device=CONFIG['device']
)
train.loc[train['fold']==fold, 'more_pred'] = more_pred
train.loc[train['fold']==fold, 'less_pred'] = less_pred
del model, history, train_loader, valid_loader
gc.collect()
score = round((train['more_pred'] > train['less_pred']).mean(), 5)
print(f'CV: {score}')
train.to_csv('train_pred.csv', index=False)
# + papermill={"duration": 0.044589, "end_time": "2022-02-05T08:22:13.503541", "exception": false, "start_time": "2022-02-05T08:22:13.458952", "status": "completed"} tags=[]
|
exp/jigsaw-exp019-toxic-xlm-roberta.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''ML'': conda)'
# name: python3810jvsc74a57bd069fefe8a05ed067aa524d2e4687ce4fe98c67287ddfa6aea5f6acb986df8cc30
# ---
# +
# How to Normalize input so that each string will have same input
# 1- Using oov_token: Adds a token in place of a word that was not in the original corpus
# 2- Using padding: Pads all sentences such that all sentences match the length of the largest sentence
# +
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
sentences = [
'I love my dog',
'I love my cat',
'You love my dog!',
'Do you think my dog is amazing?'
]
tokenizer = Tokenizer(num_words = 100, oov_token="????") #num words = maximum 100 tokens
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences)
print("\nWord Index = " , word_index)
print("\nSequences = " , sequences)
print("\nPadded Sequences:")
print(padded)
# Try with words that the tokenizer wasn't fit to
test_data = [
'i really love my dog',
'my dog loves my manatee'
]
test_seq = tokenizer.texts_to_sequences(test_data)
print("\nTest Sequence = ", test_seq) #1s are the words not in the original corpus therefore replaced by the oov token '????'
padded = pad_sequences(test_seq, padding='post') # maxlen=10
print("\nPadded Test Sequence: ")
print(padded)
# -
|
Extra Codes/Machine Learning/Padding.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %matplotlib inline
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import cartopy.crs as crs
from cartopy.feature import NaturalEarthFeature
from wrf import to_np, getvar, smooth2d, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords
# Open the NetCDF file
ncfile = Dataset("/Users/ladwig/Documents/wrf_files/wrf_vortex_multi/wrfout_d01_2005-08-28_12:00:00")
# Get the sea level pressure
ctt = getvar(ncfile, "ctt", fill_nocloud=False)
slp = getvar(ncfile, "slp")
# Get the latitude and longitude points
lats, lons = latlon_coords(ctt)
# Get the cartopy mapping object
cart_proj = get_cartopy(ctt)
# Create a figure
fig = plt.figure(figsize=(12,9))
# Set the GeoAxes to the projection used by WRF
ax = plt.axes(projection=cart_proj)
# Download and add the states and coastlines
states = NaturalEarthFeature(category='cultural', scale='50m', facecolor='none',
name='admin_1_states_provinces_shp')
ax.add_feature(states, linewidth=.5)
ax.coastlines('50m', linewidth=0.8)
# Make the contour outlines and filled contours for the smoothed sea level pressure.
levels = np.arange(-80, 20, 5)
plt.contour(to_np(lons), to_np(lats), to_np(slp), 10, colors="black",
transform=crs.PlateCarree())
plt.contourf(to_np(lons), to_np(lats), to_np(ctt), levels=levels, transform=crs.PlateCarree(),
cmap=get_cmap("Greys"))
# Add a color bar
plt.colorbar(ax=ax, shrink=.88)
# Set the map limits. Not really necessary, but used for demonstration.
ax.set_xlim(cartopy_xlim(ctt))
ax.set_ylim(cartopy_ylim(ctt))
# Add the gridlines
ax.gridlines(color="black", linestyle="dotted")
plt.title("Cloud Top Temperature (degC)")
plt.show()
# +
# %matplotlib inline
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import cartopy.crs as crs
from cartopy.feature import NaturalEarthFeature
from wrf import to_np, getvar, smooth2d, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords
# Open the NetCDF file
ncfile = Dataset("/Users/ladwig/Documents/wrf_files/wrf_vortex_multi/wrfout_d01_2005-08-28_12:00:00")
# Get the sea level pressure
ctt = getvar(ncfile, "ctt", fill_nocloud=True, opt_thresh=1.0)
slp = getvar(ncfile, "slp")
# Get the latitude and longitude points
lats, lons = latlon_coords(ctt)
# Get the cartopy mapping object
cart_proj = get_cartopy(ctt)
# Create a figure
fig = plt.figure(figsize=(12,9))
# Set the GeoAxes to the projection used by WRF
ax = plt.axes(projection=cart_proj)
# Download and add the states and coastlines
states = NaturalEarthFeature(category='cultural', scale='50m', facecolor='none',
name='admin_1_states_provinces_shp')
ax.add_feature(states, linewidth=.5)
ax.coastlines('50m', linewidth=0.8)
# Make the contour outlines and filled contours for the smoothed sea level pressure.
levels = np.arange(-80, 20, 5)
plt.contour(to_np(lons), to_np(lats), to_np(slp), 10, colors="black",
transform=crs.PlateCarree())
plt.contourf(to_np(lons), to_np(lats), to_np(ctt), levels=levels, transform=crs.PlateCarree(),
cmap=get_cmap("Greys"))
# Add a color bar
plt.colorbar(ax=ax, shrink=.88)
# Set the map limits. Not really necessary, but used for demonstration.
ax.set_xlim(cartopy_xlim(ctt))
ax.set_ylim(cartopy_ylim(ctt))
# Add the gridlines
ax.gridlines(color="black", linestyle="dotted")
plt.title("Cloud Top Temperature (degC)")
plt.show()
# +
# %matplotlib inline
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import cartopy.crs as crs
from cartopy.feature import NaturalEarthFeature
from wrf import to_np, getvar, smooth2d, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords
# Open the NetCDF file
ncfile = Dataset("/Users/ladwig/Documents/wrf_files/wrf_vortex_multi/wrfout_d01_2005-08-28_12:00:00")
# Get the sea level pressure
cfrac = getvar(ncfile, "cfrac")[2, :]
slp = getvar(ncfile, "slp")
# Get the latitude and longitude points
lats, lons = latlon_coords(ctt)
# Get the cartopy mapping object
cart_proj = get_cartopy(ctt)
# Create a figure
fig = plt.figure(figsize=(12,9))
# Set the GeoAxes to the projection used by WRF
ax = plt.axes(projection=cart_proj)
# Download and add the states and coastlines
states = NaturalEarthFeature(category='cultural', scale='50m', facecolor='none',
name='admin_1_states_provinces_shp')
ax.add_feature(states, linewidth=.5)
ax.coastlines('50m', linewidth=0.8)
# Make the contour outlines and filled contours for the smoothed sea level pressure.
levels = np.arange(0, 1.1, .1)
plt.contour(to_np(lons), to_np(lats), to_np(slp), 10, colors="black",
transform=crs.PlateCarree())
plt.contourf(to_np(lons), to_np(lats), to_np(cfrac), levels=levels, transform=crs.PlateCarree(),
cmap=get_cmap("Greys_r"))
# Add a color bar
plt.colorbar(ax=ax, shrink=.88)
# Set the map limits. Not really necessary, but used for demonstration.
ax.set_xlim(cartopy_xlim(ctt))
ax.set_ylim(cartopy_ylim(ctt))
# Add the gridlines
ax.gridlines(color="black", linestyle="dotted")
plt.title("Cloud Fraction")
plt.show()
|
test/ipynb/Test_CTT.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import decimal
decimal.Decimal(0.1)
decimal.Decimal('.1')
a = decimal.Decimal('.1')
b = decimal.Decimal('.2')
a + b
a = decimal.Decimal(.1)
b = decimal.Decimal(.2)
a + b
0.1 + 0.6
0.1 + 0.7
0.1 + 0.8
0.1 + 0.2
0.1 + 0.3
0.2 + 0.3
0.2 + 0.5
0.1
|
python-tuts/0-beginner/Crazee Math.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import scipy
import thinkbayes2 as thb
import thinkplot
import numpy as np
from matplotlib import pyplot as plt
class Coin(thb.Beta):
def __init__(self, a, b):
plt.figure(figsize=(15,5))
thb.Beta.__init__(self, a, b)
self.n = 0
def Draw(self,label,steps = 401):
thinkplot.Pmf(self.MakePmf(steps, label=label), color="gray", alpha=(self.n+1)/10)
def Show(self):
thinkplot.Config(legend=True)
plt.show()
def Update(self, data):
self.n += 1
thb.Beta.Update(self, data)
self.Draw(str(self.n))
c = Coin(1,1)
c.Update((0,1))
c.Update((0,1))
c.Update((1,0))
c.Update((0,1))
c.Update((0,1))
c.Update((1,2))
c.Update((4,3)) # 6 in 15
c.Update((18, 17)) # 24 in 50
c.Update((218, 232)) # 242 in 500
c.Show()
# +
x = np.arange(16)
lambda_ = [1.5, 4.25]
colors = ["#348ABD","#A60628"]
plt.figure(figsize=(12,4))
plt.bar(x, scipy.stats.poisson.pmf(x, lambda_[0]),
color=colors[0], alpha=0.6, label="$\lambda={}$".format(lambda_[0]), lw="3")
plt.bar(x, scipy.stats.poisson.pmf(x, lambda_[1]),
color=colors[1], alpha=0.6, label="$\lambda={}$".format(lambda_[1]), lw="3")
plt.xticks(x+0.4, x)
plt.legend()
plt.show()
# +
a = np.linspace(0,4,100)
lambda_ = [0.5, 1.5]
plt.figure(figsize=(12,4))
for l,c in zip(lambda_, colors):
plt.plot(a, scipy.stats.expon.pdf(a, scale=1/l), lw=3, color=c)
plt.fill_between(a, scipy.stats.expon.pdf(a, scale=1/l), color=c, alpha=0.33)
plt.ylim(0,2)
plt.xlabel("$z$")
plt.ylabel("$z$ pdf")
plt.show()
# -
|
code/0book2-ch1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using the Cost-Benefit Matrix Objective
#
# The Cost-Benefit Matrix (`CostBenefitMatrix`) objective is an objective that assigns costs to each of the quadrants of a confusion matrix to quantify the cost of being correct or incorrect.
# ## Confusion Matrix
# [Confusion matrices](https://en.wikipedia.org/wiki/Confusion_matrix) are tables that summarize the number of correct and incorrectly-classified predictions, broken down by each class. They allow us to quickly understand the performance of a classification model and where the model gets "confused" when it is making predictions. For the binary classification problem, there are four possible combinations of prediction and actual target values possible:
#
# - true positives (correct positive assignments)
# - true negatives (correct negative assignments)
# - false positives (incorrect positive assignments)
# - false negatives (incorrect negative assignments)
# An example of how to calculate a confusion matrix can be found [here](../user_guide/model_understanding.ipynb).
# ## Cost-Benefit Matrix
#
# Although the confusion matrix is an incredibly useful visual for understanding our model, each prediction that is correctly or incorrectly classified is treated equally. For example, for detecting breast cancer, the confusion matrix does not take into consideration that it could be much more costly to incorrectly classify a malignant tumor as benign than it is to incorrectly classify a benign tumor as malignant. This is where the cost-benefit matrix shines: it uses the cost of each of the four possible outcomes to weigh each outcome differently. By scoring using the cost-benefit matrix, we can measure the score of the model by a concrete unit that is more closely related to the goal of the model. In the below example, we will show how the cost-benefit matrix objective can be used, and how it can give us better real-world impact when compared to using other standard machine learning objectives.
# ## Customer Churn Example
# #### Data
#
# In this example, we will be using a customer churn data set taken from [Kaggle](https://www.kaggle.com/blastchar/telco-customer-churn?select=WA_Fn-UseC_-Telco-Customer-Churn.csv).
#
#
# This dataset includes records of over 7000 customers, and includes customer account information, demographic information, services they signed up for, and whether or not the customer "churned" or left within the last month.
#
# The target we want to predict is whether the customer churned ("Yes") or did not churn ("No"). In the dataset, approximately 73.5% of customers did not churn, and 26.5% did. We will refer to the customers who churned as the "positive" class and the customers who did not churn as the "negative" class.
# +
from rayml.demos.churn import load_churn
from rayml.preprocessing import split_data
X, y = load_churn()
X.ww.set_types({'PaymentMethod':'Categorical', 'Contract': 'Categorical'}) # Update data types Woodwork did not correctly infer
X_train, X_holdout, y_train, y_holdout = split_data(X, y, problem_type='binary', test_size=0.3, random_seed=0)
# -
# In this example, let's say that correctly identifying customers who will churn (true positive case) will give us a net profit of \\$400, because it allows us to intervene, incentivize the customer to stay, and sign a new contract. Incorrectly classifying customers who were not going to churn as customers who will churn (false positive case) will cost \\$100 to represent the marketing and effort used to try to retain the user. Not identifying customers who will churn (false negative case) will cost us \\$200 to represent the lost in revenue from losing a customer. Finally, correctly identifying customers who will not churn (true negative case) will not cost us anything (\\$0), as nothing needs to be done for that customer.
# We can represent these values in our `CostBenefitMatrix` objective, where a negative value represents a cost and a positive value represents a profit--note that this means that the greater the score, the more profit we will make.
from rayml.objectives import CostBenefitMatrix
cost_benefit_matrix = CostBenefitMatrix(true_positive=400,
true_negative=0,
false_positive=-100,
false_negative=-200)
# #### AutoML Search with Log Loss
#
# First, let us run AutoML search to train pipelines using the default objective for binary classification (log loss).
# +
from rayml import AutoMLSearch
automl = AutoMLSearch(X_train=X_train, y_train=y_train, problem_type='binary', objective='log loss binary',
max_iterations=5, verbose=True)
automl.search()
ll_pipeline = automl.best_pipeline
ll_pipeline.score(X_holdout, y_holdout, ['log loss binary'])
# -
# When we train our pipelines using log loss as our primary objective, we try to find pipelines that minimize log loss. However, our ultimate goal in training models is to find a model that gives us the most profit, so let's score our pipeline on the cost benefit matrix (using the costs outlined above) to determine the profit we would earn from the predictions made by this model:
ll_pipeline_score = ll_pipeline.score(X_holdout, y_holdout, [cost_benefit_matrix])
print (ll_pipeline_score)
# Calculate total profit across all customers using pipeline optimized for Log Loss
total_profit_ll = ll_pipeline_score['Cost Benefit Matrix'] * len(X)
print (total_profit_ll)
# #### AutoML Search with Cost-Benefit Matrix
#
# Let's try rerunning our AutoML search, but this time using the cost-benefit matrix as our primary objective to optimize.
# +
automl = AutoMLSearch(X_train=X_train, y_train=y_train, problem_type='binary', objective=cost_benefit_matrix,
max_iterations=5, verbose=True)
automl.search()
cbm_pipeline = automl.best_pipeline
# -
# Now, if we calculate the cost-benefit matrix score on our best pipeline, we see that with this pipeline optimized for our cost-benefit matrix objective, we are able to generate more profit per customer. Across our 7043 customers, we generate much more profit using this best pipeline! Custom objectives like `CostBenefitMatrix` are just one example of how using rayml can help find pipelines that can perform better on real-world problems, rather than on arbitrary standard statistical metrics.
cbm_pipeline_score = cbm_pipeline.score(X_holdout, y_holdout, [cost_benefit_matrix])
print (cbm_pipeline_score)
# Calculate total profit across all customers using pipeline optimized for CostBenefitMatrix
total_profit_cbm = cbm_pipeline_score['Cost Benefit Matrix'] * len(X)
print (total_profit_cbm)
# Calculate difference in profit made using both pipelines
profit_diff = total_profit_cbm - total_profit_ll
print (profit_diff)
# Finally, we can graph the confusion matrices for both pipelines to better understand why the pipeline trained using the cost-benefit matrix is able to correctly classify more samples than the pipeline trained with log loss: we were able to correctly predict more cases where the customer would have churned (true positive), allowing us to intervene and prevent those customers from leaving.
# +
from rayml.model_understanding.graphs import graph_confusion_matrix
# pipeline trained with log loss
y_pred = ll_pipeline.predict(X_holdout)
graph_confusion_matrix(y_holdout, y_pred)
# -
# pipeline trained with cost-benefit matrix
y_pred = cbm_pipeline.predict(X_holdout)
graph_confusion_matrix(y_holdout, y_pred)
|
docs/source/demos/cost_benefit_matrix.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py3
# language: python
# name: py3
# ---
# +
import matplotlib.pyplot as plt
# %matplotlib inline
x1 = np.random.normal(loc=1, size=100)
x1 = (x1-min(x1))/(max(x1)-min(x1))
x2 = np.random.normal(loc=1, size=100)
x2 = (x2-min(x2))/(max(x2)-min(x2))
y = np.ones(100)*0
x1a = np.random.normal(loc=1, size=100)
x1a = (x1-min(x1))/(max(x1)-min(x1)) -1
x2a = np.random.normal(loc=1, size=100)
x2a = (x2-min(x2))/(max(x2)-min(x2)) -1
ya = np.ones(100)
x1b = np.random.normal(loc=1, size=100)
x1b = (x1b-min(x1b))/(max(x1b)-min(x1b)) -1
x2b = np.random.normal(loc=1, size=100)
x2b = (x2b-min(x2b))/(max(x2b)-min(x2b))
yb = np.ones(100)*2
plt.scatter(x1,x2)
plt.scatter(x1a,x2a)
plt.scatter(x1b,x2b)
plt.axvline(0)
plt.axhline(0)
# -
output = torch.tensor([[0.3416, 0.6336, 0.3775, 0.2556, 0.6288]])
target = torch.zeros_like(output)
target_indices = torch.tensor([[0,2,4]])
target.scatter_(1, target_indices, 1)
target
x_train1 = np.hstack([x1[:,np.newaxis],x2[:,np.newaxis]])
x_train2 = np.hstack([x1a[:,np.newaxis],x2a[:,np.newaxis]])
x_train3 = np.hstack([x1b[:,np.newaxis],x2b[:,np.newaxis]])
x_train = np.vstack([x_train1, x_train2, x_train3])
y_train = np.hstack([y, ya, yb])
# +
import torch
# https://pytorch.org/tutorials/beginner/pytorch_with_examples.html
teacher_model = torch.nn.Sequential(
torch.nn.Linear(2,100),
torch.nn.ReLU(),
torch.nn.Linear(100,50),
torch.nn.ReLU(),
torch.nn.Linear(50,3),
)
# teacher_model = torch.nn.Sequential(
# torch.nn.Linear(2,50),
# torch.nn.ReLU(),
# torch.nn.Linear(50,25),
# torch.nn.ReLU(),
# torch.nn.Linear(25,3),
# )
for m in teacher_model.children():
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
torch.nn.init.normal_(m.bias.data)
# +
teacher_model.train()
# loss_fn = torch.nn.BCELoss()
loss_fn = torch.nn.CrossEntropyLoss()
lr = 1e-4
optimizer = torch.optim.Adam(teacher_model.parameters(), lr=lr)
y_train_ = torch.LongTensor(y_train)
x_train_ = torch.tensor(x_train, dtype=torch.float32)
for i in range(10000):
y_pred = teacher_model(x_train_)
loss = loss_fn(torch.nn.LogSoftmax()(y_pred,), y_train_)
optimizer.zero_grad()
loss.backward()
optimizer.step()
teacher_model.eval()
y_pred = torch.nn.LogSoftmax()(teacher_model(x_train_))
loss = loss_fn((y_pred), y_train_)
print(loss)
sum(torch.argmax(torch.nn.LogSoftmax()(y_pred),dim=1) == y_train_).item()/len(y_train_) * 100
# -
# # KD
# - hyperparameter might be matter
# - student model itself can converge on train data if training it alone
# - but in KD model, student model's performance is poor.
# - it seems to be required to do hyperparameter grid search
# +
y_train_ = torch.LongTensor(y_train)
x_train_ = torch.tensor(x_train, dtype=torch.float32)
for p in teacher_model.parameters():
p.requires_grad_(False)
teacher_model.eval()
student_model.train()
loss_fn = torch.nn.CrossEntropyLoss()
lr = 1e-3
optimizer = torch.optim.Adam(teacher_model.parameters(), lr=lr)
loss_ls_ls = []
for lambda_factor in np.linspace(1,10,10):
loss_ls = []
for i, t in zip(range(10000), np.linspace(1, 10, 10000)):
student_model = torch.nn.Sequential(
torch.nn.Linear(2,50),
torch.nn.ReLU(),
torch.nn.Linear(50,25),
torch.nn.ReLU(),
torch.nn.Linear(25,3),
)
for m in student_model.children():
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
torch.nn.init.normal_(m.bias.data)
y_output = student_model(x_train_)
y_lesson = teacher_model(x_train_)
l1 = torch.nn.LogSoftmax()(y_output/t)
l2 = torch.nn.LogSoftmax()(y_lesson/t)
kl_loss = F.kl_div(l1, l2)
loss = loss_fn(torch.nn.LogSoftmax()(y_output), y_train_) + lambda_factor*(t**2)*kl_loss
loss_ls.append(loss.detach().cpu().numpy().item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_ls_ls.append(loss_ls)
student_model.eval()
student_output = torch.nn.LogSoftmax()(student_model(x_train_))
loss = loss_fn(student_output, y_train_)
print(loss)
print('student model accuracy(%):',sum(torch.argmax(student_output, dim=1) == y_train_).item()/len(y_train_) * 100)
teacher_output = torch.nn.LogSoftmax()(teacher_model(x_train_))
loss = loss_fn(teacher_output, y_train_)
print(loss)
print('teacher model accuracy(%):', sum(torch.argmax(teacher_output,dim=1) == y_train_).item()/len(y_train_) * 100)
print(torch.argmax(student_output, dim=1))
plt.plot(loss_ls)
|
Knowledge_Distillation/kd_implement_code.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import logging, io, json, warnings
logging.basicConfig(level="INFO")
warnings.filterwarnings('ignore')
import rasa_nlu
import rasa_core
import spacy
# +
nlu_md = """
## intent:greet
- hey
- hello there
- hi
- hello there
- good morning
- good evening
- moin
- hey there
- let's go
- hey dude
- goodmorning
- goodevening
- good afternoon
## intent:goodbye
- cu
- good by
- cee you later
- good night
- good afternoon
- bye
- goodbye
- have a nice day
- see you around
- bye bye
- see you later
## intent:mood_affirm
- yes
- indeed
- of course
- that sounds good
- correct
## intent:mood_deny
- no
- never
- I don't think so
- don't like that
- no way
- not really
## intent:mood_great
- perfect
- very good
- great
- amazing
- feeling like a king
- wonderful
- I am feeling very good
- I am great
- I am amazing
- I am going to save the world
- super
- extremely good
- so so perfect
- so good
- so perfect
## intent:mood_unhappy
- my day was horrible
- I am sad
- I don't feel very well
- I am disappointed
- super sad
- I'm so sad
- sad
- very sad
- unhappy
- bad
- very bad
- awful
- terrible
- not so good
- not very good
- extremly sad
- so saad
- Quite bad - can I get a cute picture of a [bird](group:birds), please?
- Really bad and only [doggo](group:shibes) pics and change that.
- Not good. The only thing that could make me fell better is a picture of a cute [kitten](group:cats).
- so sad. Only the picture of a [puppy](group:shibes) could make it better.
- I am very sad. I need a [cat](group:cats) picture.
- Extremely sad. Only the cute [doggo](group:shibes) pics can make me feel better.
- Bad. Please show me a [bird](group:birds) pic!
- Pretty bad to be honest. Can you show me a [puppy](group:shibes) picture to make me fell better?
## intent: inform
- A [dog](group:shibes)
- [dog](group:shibes)
- [bird](group:birds)
- a [cat](group:cats)
- [cat](group:cats)
- a [bird](group:birds)
- of a [dog](group:shibes)
- of a [cat](group:cats)
- a [bird](group:birds), please
- a [dog](group:shibes), please
"""
# %store nlu_md > nlu.md
# +
config = """
language: "en"
pipeline:
- name: "nlp_spacy"
- name: "tokenizer_spacy"
- name: "ner_crf"
- name: "intent_featurizer_spacy"
- name: "intent_classifier_sklearn"
- name: "ner_synonyms"
"""
# %store config > config.yml
# +
from rasa_nlu.training_data import load_data
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.model import Trainer
from rasa_nlu import config
# loading the nlu training samples
training_data = load_data("nlu.md")
# trainer to educate our pipeline
trainer = Trainer(config.load("config.yml"))
# train the model!
interpreter = trainer.train(training_data)
# store it for future use
model_directory = trainer.persist("./models/nlu", fixed_model_name="current")
# +
def pprint(o):
print(json.dumps(o, indent=2))
print(interpreter.parse("I am very sad. Could you send me a cat picture? "))
# -
from rasa_nlu.test import run_evaluation
run_evaluation("nlu.md", model_directory)
stories_md = """
## happy path
* greet
- utter_greet
* mood_great
- utter_happy
* mood_affirm
- utter_happy
* mood_affirm
- action_default_ask_affirmation
## sad path
* greet
- utter_greet
* mood_unhappy
- action_revert_fallback_events
* inform{"animal":"dog"}
- action_default_ask_rephrase
- utter_did_that_help
* mood_affirm
- utter_happy
"""
# %store stories_md > stories.md
# +
domain_yml="""
intents:
- greet
- goodbye
- mood_affirm
- mood_deny
- mood_great
- mood_unhappy
- inform
slots:
group:
type: text
entities:
- group
actions:
- utter_greet
- utter_did_that_help
- utter_happy
- utter_goodbye
- utter_unclear
- utter_ask_picture
- __main__.ApiAction
templates:
utter_greet:
- text: "Hey! How are you?"
utter_did_that_help:
- text: "Did that help you?"
utter_unclear:
- text: "I am not sure what you are aiming for."
utter_happy:
- text: "Great carry on!"
utter_goodbye:
- text: "Bye"
utter_ask_picture:
- text: "To cheer you up, I can show you a cute picture of a dog, cat or a bird. Which one do you choose?"
"""
# %store domain_yml > domain.yml
# +
from rasa_core.actions import Action
from rasa_core.events import SlotSet
from IPython.core.display import Image, display
import requests
class ApiAction(Action):
def name(self):
return "action_retrieve_image"
def run(self, dispatcher, tracker, domain):
group = tracker.get_slot('group')
r = requests.get('http://shibe.online/api/{}?count=1&urls=true&httpsUrls=true'.format(group))
response = r.content.decode()
response = response.replace('["',"")
response = response.replace('"]',"")
#display(Image(response[0], height=550, width=520))
# -
# +
from rasa_core.policies import FallbackPolicy, KerasPolicy, MemoizationPolicy
from rasa_core.agent import Agent
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.agent import Agent
# The fallback action will be executed if the intent recognition has #a confidence below nlu_threshold or if none of the dialogue #policies predict an action with confidence higher than #core_threshold.
fallback = FallbackPolicy(fallback_action_name="utter_greet",core_threshold=0.2,nlu_threshold=0.1)
agent = Agent('domain.yml', policies=[MemoizationPolicy(), KerasPolicy(), fallback])
# loading our neatly defined training dialogues
training_data = agent.load_data('stories.md')
agent.train(training_data)
agent.persist('models/dialogue')
# -
from rasa_core.agent import Agent
agent = Agent.load('models/dialogue', interpreter=model_directory)
print("Your bot is ready to talk! Type your messages here or send 'stop'")
while True:
a = input()
if a == 'stop':
break
responses = agent.handle_text(a)
for response in responses:
print(response["text"])
|
Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["remove_cell"]
# # The Case for Quantum Computers
# + [markdown] tags=["contents"]
# ## Contents
#
# 1. [The Complexity of Adding](#adding)
# 2. [Big O Notation](#big-o)
# 3. [Complexity Theory](#complexity)
# 4. [Beyond Digital Computation](#beyond)
# 5. [When to Use a Quantum Computer](#when)
# 6. [References](#references)
# -
# ## 1. The Complexity of Adding <a id="adding"></a>
#
# The case for quantum computers, simply put, is that they can solve certain problems that no classical computer ever could. To understand why this is, we first need to consider how much computational effort is required to solve certain problems.
#
# To begin, we can revisit the algorithm considered in the first section: adding two numbers.
#
# ```
# 9213
# + 1854
# = ????
# ```
#
# Adding two $n$-digit numbers can be done with a set of simple operations, each of which consists of just adding two single-digit numbers. To analyze the complexity of the procedure, we can think about how many of these basic additions are required and how this number depends on $n$. We'll refer to this number as $c(n)$.
#
# In the easiest case, where we don't need to carry a 1 at any point, only $n$ basic additions are required. In the worst case, we will need to perform $n$ carry operations, each of which will require an extra basic addition. From these considerations, we can conclude that $n \leq c(n) \leq 2n$.
#
#
# ## 2. Big O Notation <a id="big-o"></a>
#
# We can summarize this result by saying that $c(n)$ grows linearly with $n$. More generally, we can say that a linear function of $n$ can be found which acts as an upper bound for $c(n)$ when $n$ is large. Since this is a long and wordy sentence, we won't actually want to say this very often. Instead, we can express it more compactly using 'big O notation'.
#
# <p>
# <details>
# <summary>Definition: Big O notation (Click to expand)</summary>
# For some example functions $f(x)$ and $g(x)$ and parameter $x$, the statement $f(x) = O(g(x))$ means that there exist some finite numbers $M>0$ and $x_0$ such that
# $$
# f(x) \leq M g(x) \forall x>x_0.
# $$
# </details>
# </p>
#
# Big O notation is useful as it allows us to compare how the resources/runtime required by an algorithm scale with input size, independent of the specific platform and algorithm implementation under consideration. Below are examples of common scaling factors of a runtime $N$ as a function of input size $n$; it is clear that for a sufficiently large problem size the runtime of a $O(a^n)$ algorithm will exceed that of a $O(n^b)$ algorithm, where $a$ and $b$ are constants.
#
# <figure>
# <img src="images/1920px-Comparison_computational_complexity.png" alt="Drawing" style="max-width: 400px;"/>
# <figcaption>Comparisons of different time complexities. n is the number of input bits, and N is the number of operations required. [5]</figcaption>
# </figure>
#
# With this notation, the property described above is expressed simply as $c(n) = O(n)$. This captures the linear behavior without needing to dwell on the specifics. Therefore, independent of whether $c(n) = n$, $c(n) = 2n$, or something else, we can simply say that $c(n) = O(n)$.
#
# There is a hidden assumption in what we have considered so far. By talking about the number of digits, we have assumed the use of a specific number system. However, the number of digits will depend on which number system we are using, be it decimal, binary, or something else. For example, the number of bits $n_2$ required to express a number is related to the number of decimal digits $n_{10}$ required to express the same number by
#
# $n_2 = \left\lceil \frac{\log 10}{ \log 2} \, n_{10} \right\rceil \approx 3.3 \, n_{10}.$
#
# Since this too is a linear relationship, it does not change how we express the complexity using big O notation. We can equally say that $c(n_2) = O(n_2)$, $c(n_{10}) = O(n_{10})$, or even $c(n_{10}) = O(n_{2})$. It is for this reason that we can often simply speak of the number of digits, $n$, without needing to specify what number system is used.
#
#
# ## 3. Complexity Theory <a id="complexity"></a>
#
# Complexity theory is the study of the computational effort required to run any algorithm. By considering the best possible algorithm to solve a given problem, we can also study the computational effort inherent in solving this problem. For addition we already know the optimal algorithm, and so know that it is a problem with $O(n)$ complexity.
#
# Multiplication is not quite so simple. Algorithms you learned at school for multiplying two $n$-digit numbers will have required $O(n^2)$ basic operations, such as single-digit additions and multiplications. Though algorithms with lower asymptotic complexity have been found, it is widely regarded as impossible to perform multiplication with $O(n)$ complexity.
#
# Even so, multiplication is far from being the most complex problem. An example of a problem with far greater complexity is factorization: taking an $n$-digit number and finding its prime factors. The best known algorithm in this case has a complexity that is worse than $O\left(e^{n^{1/3}}\right)$. The exponential here means that the complexity grows very quickly and makes factorization a very hard problem to solve.
#
# To demonstrate this point using actual computation time, we can take a recent example.$^{1}$ Consider the following 829-digit number.
rsa_250 = 2140324650240744961264423072839333563008614715144755017797754920881418023447140136643345519095804679610992851872470914587687396261921557363047454770520805119056493106687691590019759405693457452230589325976697471681738069364894699871578494975937497937
# If you try using your computer to add or multiply numbers of this size, you'll find that it can solve such problems very quickly. If you multiply the number of processors your computer has with the number of seconds it takes to get the number of core-seconds, you are sure to find that very much less than 1 core-second is required.
#
# However, performing factorization on this number requires a supercomputer and around 2700 core-years, which eventually yields the following two factors.
p = 64135289477071580278790190170577389084825014742943447208116859632024532344630238623598752668347708737661925585694639798853367
q = 33372027594978156556226010605355114227940760344767554666784520987023841729210037080257448673296881877565718986258036932062711
p*q
# For the factorization of larger numbers, we easily get to a point where a planet-sized supercomputer would need to run for the age of the universe. Clearly, any such problem is practically impossible.
#
# So far we have considered only mathematical operations on $n$-digit numbers, with the complexity expressed as the number of simple single-digit operations required. However, complexity theory can be used to analyze any computational method for any kind of problem, be it searching databases, rendering graphics, simulating dynamics, or traversing a dungeon in *Legend of Zelda*. In each case, we are able to find a parameter or set of parameters that serve as our input size and express the complexity in terms of this input size using big O notation. For searching a database of $N$ entries, for example, the complexity is $O(N)$.
#
# Formally, defining the complexity of an algorithm depends on the exact theoretical model for computation we are using. Each model has a set of basic operations, known as primitive operations, with which any algorithm can be expressed. For Boolean circuits, as we considered in the first section, the primitive operations are the logic gates. For Turing machines, a hypothetical form of computer proposed by <NAME>, we imagine a device stepping through and manipulating information stored on a tape. The RAM model has a more complex set of primitive operations and acts as an idealized form of the computers we use every day. All these are models of digital computation, based on discretized manipulations of discrete values. Different as they may seem from each other, it turns out that it is very easy for each of them to simulate the others. This means that in most cases the computational complexity does not significantly depend on which of these models is used. Rather than stating complexity specifically for the RAM model or Turing machines, we can therefore simply speak of the complexity for digital computers.
# ## 4. Beyond Digital Computation <a id="beyond"></a>
#
# Though digital computers are dominant now, they are not the only form of computation. Analog computers were also widely studied and used in the past. Unlike the discrete values of digital computers, these are based on precise manipulations of continuously varying parameters. It has sometimes been claimed that such devices could quickly solve problems that are intractable for digital computers. However, such claims have never been realized. A major stumbling block for analog computers is the inability to build devices with arbitrarily high precision. In digital computers, the discretization means that errors must be relatively large in order to be noticeable, and methods for detecting and correcting such errors can then be implemented. In analog computers, however, errors can be arbitrarily small and impossible to detect, but still their effects can build up to ruin a computation.
#
# If one were to propose an ideal model of computation, it might seek to combine the robustness of a digital computer with the subtle manipulations of an analog computer. To achieve this we can look to quantum mechanics. We have already seen that qubits are a system with discrete outputs `0` and `1`, and yet can exist in states that can only be described by continuous parameters. This is a particular instance of the well-known notion of 'wave-particle' duality that is typical of quantum systems. They cannot be fully described as either discrete or continuous, but rather a combination of the two. As Einstein said,$^{2}$
#
# > 'It seems as though we must use sometimes the one theory and sometimes the other, while at times we may use either. We are faced with a new kind of difficulty. We have two contradictory pictures of reality; separately neither of them fully explains the phenomena...but together they do.'
#
# A quantum computer, whose primitive operations are gates applied to qubits, is therefore neither analog nor digital, but something unique. In further chapters we will explore the consequences of this unique nature. We will see that quantum computers can solve problems with a radically different complexity to digital computers. In fact, quantum computing is the only known technology that can be exponentially faster than classical computers for certain tasks, potentially reducing calculation times from years to minutes. We will also explore how quantum error correction can remove the effects of any imperfections.
# ## 5. When to Use a Quantum Computer <a id="when"></a>
#
# With qubits and quantum gates, we can design novel algorithms that are fundamentally different from digital and analog classical ones. In this way, we hope to find solutions to problems that are intractable for classical computers.
#
# One way in which this can be done is when we have some function for which we want to determine a global property. For example, if we want to find the value of some parameter $x$ for which some function $f(x)$ is a minimum, or the period of the function if $f(x)$ is periodic. An algorithm on a digital computer might use a process in which $f(x)$ is computed for a variety of different inputs in order to get sufficient information about the global property. With a quantum computer, however, the fact that we can create superposition states means that the function can be applied to many possible inputs simultaneously. This does not mean that we can access all possible outputs since measurement of such a state simply gives us a single result. However, we can instead seek to induce a quantum interference effect, which will reveal the global property we require.
#
# This general description illustrates the workings of many of the quantum algorithms that have already been discovered. One prominent example is Grover's algorithm, which reduces the complexity of searching through $N$ items from $O(N)$ to $O(N^{1/2})$. This quadratic speedup could be useful in many applications with tasks that can be expressed as an unstructured search, such as optimization problems and machine learning.
# + tags=["remove_input"]
# This code is to create the interactive figure
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, CustomJS, Slider
from bokeh.plotting import figure, show
from bokeh.embed import file_html
from bokeh.resources import CDN
import numpy as np
import IPython
x = np.arange(0,500)
y_linear = x
y_sqrt = 7.5*np.sqrt(x)
linear_source = ColumnDataSource(data=dict(x=x, y=y_linear))
sqrt_source = ColumnDataSource(data=dict(x=x, y=y_sqrt))
plot = figure(
plot_height=400,
plot_width=800,
sizing_mode="scale_width",
tools="reset,save",
x_range=[0, 500], y_range=[0, 500],
x_axis_label="Size of Problem",
y_axis_label="Time Taken to Find Solution")
plot.line('x', 'y', source=linear_source, line_width=3, line_alpha=0.6, color="blue", legend_label="Classical Search O(N)")
plot.line('x', 'y', source=sqrt_source, line_width=3, line_alpha=0.6, color="red", legend_label="Quantum Search O(√N)")
plot.legend.location = "top_left"
callback = CustomJS(args=dict(source=sqrt_source), code="""
var data = source.data;
var f = (10-cb_obj.value)*2 + 3
var x = data['x']
var y = data['y']
for (var i = 0; i < x.length; i++) {
y[i] = f*Math.sqrt(x[i])
}
source.change.emit();
""")
speed_slider = Slider(title="Relative Speed of Quantum Computer", value=7.5, start=1.0, end=10.0, step=0.1, show_value=False)
speed_slider.js_on_change('value', callback)
layout = column(plot, speed_slider)
caption = """
Comparing performance of algorithms across different platforms is difficult. What we can tell (through big-O-notation) is
despite the difference in speeds between our classical and quantum computers, for a large enough problem, the quantum search
algorithm will always out-perform the classical search algorithm."""
html_repr = file_html(layout, CDN)
html_fig = "<figure>{0}<figcaption>{1}</figcaption></figure>".format(html_repr, caption)
IPython.display.HTML(html_fig)
# -
# An even more impressive speedup is obtained with Shor's algorithm, which analyses periodic functions at the heart of the factorization problem. This allows a quantum solution for factoring $n$-digit numbers with complexity $O(n^3)$. This is a superpolynomial speedup compared with the complexity for digital computers, which is worse than $O\left(e^{n^{1/3}}\right)$.
#
# Another approach towards quantum algorithms is to use quantum computers to solve quantum problems. As we will see in the next chapter, expressing a quantum state requires an amount of information that scales exponentially with the number of qubits. Just writing down the state of $n$ qubits therefore becomes an intractable task for digital computers as $n$ increases. However, for a quantum computer we just need $n$ qubits to do the same job. This natural capability to express and manipulate quantum states allows us to study and better understand quantum systems of interest, such as molecules and fundamental particles.
#
# Applying and adapting quantum algorithms in different industries therefore has the promise of enabling disruptive use cases in business and science. These include breakthroughs in drug discovery, machine learning, materials discovery, option pricing, protein folding, and supply chain.$^{3}$ Particularly promising are those problems for which classical algorithms face inherent scaling limits and which do not require a large classical dataset to be loaded. For quantum advantage, a given problem's answers need to strongly depend on exponentially many entangled degrees of freedom with structure such that quantum mechanics evolves to a solution without having to go through all paths. Note, however, that the precise relationship between problems that are 'easy' for quantum computers (solvable in polynomial time) and other complexity-theoretic classes is still an open question.$^{4}$
#
# This is just a taste of how quantum algorithms can perform computation in an unique way. More details on these approaches can be found in later chapters. But first we need to look beyond the single qubit and invest some time into understanding the full set of quantum gates that we will need. This is the focus of the next chapter.
# ## 6. References <a id='references'></a>
#
# 1. https://lists.gforge.inria.fr/pipermail/cado-nfs-discuss/2020-February/001166.html
# 2. <NAME>, <NAME> (1938). The Evolution of Physics: The Growth of Ideas from Early Concepts to Relativity and Quanta. Cambridge University Press.
# 3. https://www.ibm.com/thought-leadership/institute-business-value/report/quantumstrategy
# 4. https://www.cs.virginia.edu/~robins/The_Limits_of_Quantum_Computers.pdf
# 5. Image: Cmglee / CC BY-SA (https://creativecommons.org/licenses/by-sa/4.0)
import qiskit.tools.jupyter
# %qiskit_version_table
|
content/ch-states/case-for-quantum.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HELLO WORLD IN PYTHON
# ## 1. Python
# <b>Python es un lenguaje de programación de código abierto</b>, orientado a objetos, muy simple y fácil de entender. Tiene una sintaxis sencilla que cuenta con una vasta biblioteca de herramientas, que hacen de Python un lenguaje de programación único.
# ### Principales Usos
# - Python para la automatización de tareas
# - Python en Big Data
# - Python en Data Science
# - Python en Desarrollo Web
# - Python en la Inteligencia Artificial (AI)
# <h3>Lenguaje de programación interpretado</h3>
#
# Un lenguaje interpretado es un tipo de lenguaje de programación para el cual la mayoría de sus implementaciones ejecutan instrucciones directa y libremente, sin compilar previamente un programa en instrucciones de lenguaje máquina.
#
# <img src='https://www.cursosgis.com/wp-content/uploads/4-97.jpg' >
# #### Nota
# El tipo de extensión de un programa en python es .py
# ## <a href="https://jupyter.org/"> 2. Jupyter Notebook</a>
# Es un <b>entorno de trabajo interactivo que permite desarrollar código en Python</b> de manera dinámica, a la vez que integrar en un mismo documento tanto bloques de código como texto, gráficas o imágenes. Es un SaaS utilizado ampliamente en análisis numérico, estadística y machine learning, entre otros campos de la informática y las matemáticas.
# ctrl+Enter -- correr celda
# shift+Enter -- correr celda y dirigirse al siguiente bloque de código(siguiente celda)
print("h<NAME>")
# ## 3. Hello World in Python
print("<NAME>")
# # EJERCICIOS
# Escribir un programa que almacene la cadena ¡<NAME>! en una variable y luego muestre por pantalla el contenido de la variable.
a = '<NAME>'
print(a)
|
Modulo1/.ipynb_checkpoints/2. Hello World Python-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# El amplificador puente es como se observa en la imagen.
import numpy as np
import sympy as sym
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from IPython.display import Image
sym.init_printing()
# #%matplotlib widget
# %matplotlib inline
Image(filename='amp_puente.png',width=300)
# VERIFICAR QUE Vo RESPONDE A LA SIGUIENTE ECUACION
sym.var('Va, Vb, Vc, Vd, Vo, Vp')
sym.var('R1, R2, R3, R4, R5, R6')
sym.var('Vo_')
display(sym.Eq(Vo_,sym.fu(((1+R6/R5)*(R2/(R1+R2)-R4/(R3+R4))*Vp))))
Vo_=sym.fu(((1+R6/R5)*(R2/(R1+R2)-R4/(R3+R4))*Vp))
find=sym.Matrix(([Va],[Vb],[Vc],[Vd],[Vo])) #Incognitas
#Se escriben tantas ecuacionenes como nodos haya
ec_nodo_0=sym.Eq(Vd,0)
ec_nodo_1=sym.Eq(Vb-Vc,Vp)
ec_nodo_2=sym.Eq((Vb-Vd)/R3+(Vc-Vd)/R4,0)
ec_nodo_3=sym.Eq(Va/R5+(Va-Vo)/R6,0)
ec_nodo_4=sym.Eq((Vb-Va)/R1+(Vb-Vd)/R3,(Va-Vc)/R2+(Vd-Vc)/R4)#Caso especial de superNodo
display(sym.Eq(Vo,sym.factor(sym.solve([ec_nodo_0,ec_nodo_1,ec_nodo_2,ec_nodo_3,ec_nodo_4],find)[Vo])))
Vo=sym.simplify(sym.factor(sym.solve([ec_nodo_0,ec_nodo_1,ec_nodo_2,ec_nodo_3,ec_nodo_4],find)[Vo]))
print('Se valida la ecuación?',np.invert(np.bool_(sym.simplify(Vo_-Vo))))
sym.simplify(Vo_-Vo)
# +
sym.var('Av,R, D, Vo_calc') # Si Av es la ganancia Av=(1+R6/R5) R1=R-D (Contrae) R2=R+D R1/R2=R4/R3
display(sym.Eq(Vo_calc,sym.simplify((Vo.subs({(R1,R-D),(R2,R+D),(R3,R+D),(R4,R-D),(R6,(Av-1)*R5)})))))
Vo_calc=sym.simplify((Vo.subs({(R1,R-D),(R2,R+D),(R3,R+D),(R4,R-D),(R6,(Av-1)*R5)})))
|
python/0/Amp_Puente.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="WlEzVD-jJpW4"
# # Import Packages
# +
from __future__ import print_function
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import gensim
import nltk
from time import time
from sklearn.model_selection import train_test_split,StratifiedKFold
from sklearn.metrics.pairwise import cosine_similarity
# Run it to obtain reproducible results across machines (from keras.io)
import tensorflow as tf
import random as rn
import os
from tensorflow.keras import optimizers,backend as K
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import tensorflow
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input,Dense, Dropout, Embedding,Conv1D,GlobalMaxPooling1D,Concatenate,GaussianNoise,LSTM,BatchNormalization
from tensorflow.keras import regularizers
# + [markdown] colab_type="text" id="D-V3m5YDKOjq"
# # Import Supporting Files
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" executionInfo={"elapsed": 414466, "status": "ok", "timestamp": 1581683580430, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06346350967386333294"}, "user_tz": -420} id="gKA3XWRuKRCr" outputId="31c1160a-0365-4107-8fbd-ec0f88e80a7d"
#Google Word2Vec Pretrained Model
model_w2v = gensim.models.KeyedVectors.load_word2vec_format('C:/Users/<NAME>/Desktop/NLP_Stacking_Ensemble/Pre-Trained Word Embedding/GoogleNews-vectors-negative300.bin', binary=True)
# #GloVe Pretrained Model
# link ='https://drive.google.com/open?id=1p1IN9O_fpSQzPTFB5Y8CCOMXPIPU1WGV'
# fluff, id = link.split('=')
# print (id) # Verify that you have everything after '='
# downloaded = drive.CreateFile({'id':id})
# downloaded.GetContentFile('glove.twitter.27B.200d.txt')
# def loadGloveModel(gloveFilePath):
# print("Loading Glove Model")
# f = open(gloveFilePath,'r',encoding='utf-8')
# model = {}
# for line in f:
# splitLine = line.split()
# word = splitLine[0]
# embedding = np.array([float(val) for val in splitLine[1:]])
# model[word] = embedding
# print("Done.",len(model)," words loaded!")
# return model
# model_GloVe=loadGloveModel('glove.twitter.27B.200d.txt')
# + [markdown] colab_type="text" id="f1nR6qOIKTSd"
# # Import Data
# +
df_train = pd.read_csv('C:/Users/<NAME>/Desktop/NLP_Stacking_Ensemble/df_train_final.csv')
df_train=df_train.drop(['Unnamed: 0','index'],1)
df_test = pd.read_csv('C:/Users/<NAME>/Desktop/NLP_Stacking_Ensemble/df_test_final.csv')
df_test=df_test.drop(['Unnamed: 0','index'],1)
# + [markdown] colab_type="text" id="XGLrSmEYTj4I"
# # Function Definition
# + colab={} colab_type="code" id="xrA9moSlTlzc"
def grid_model_W2V(df,input_noise,hidden_dropout,l2_regularizer,activation_conv_list,activation_list,hidden_nodes_1,hidden_nodes_2,optimizer,epochs,batch_size=32):
'''
Function for hyperparameter tuning W2V embedding
'''
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=500)
X=df['clean_text']
stratify=df['source']
y=df['sentiment score']
max_sequence_length = 50
for act_conv in activation_conv_list:
for act in activation_list:
cvscores = []
start=time()
for train,val in kfold.split(X,stratify):
X_train=X[train].tolist()
y_train=np.array(y[train])
X_val=X[val].tolist()
y_val=np.array(y[val])
# prepare tokenizer
t = Tokenizer()
t.fit_on_texts(X_train)
X_train = t.texts_to_sequences(X_train)
X_val = t.texts_to_sequences(X_val)
# Adding 1 because of reserved 0 index
vocab_size = len(t.word_index) + 1
X_train = pad_sequences(X_train, padding='post', maxlen=max_sequence_length)
X_val = pad_sequences(X_val, padding='post', maxlen=max_sequence_length)
embedding_size=300
# create a weight matrix for words in training docs
W2V_embedding_matrix = np.zeros((vocab_size, embedding_size))
for word, i in t.word_index.items():
if word in model_w2v:
W2V_embedding_vector = model_w2v[word]
W2V_embedding_matrix[i] = W2V_embedding_vector
#Define Multiple Filter Sizes
submodels = []
embedding_layer = Embedding(vocab_size, embedding_size, weights=[W2V_embedding_matrix], input_length=max_sequence_length, trainable=False)
sequence_input = Input(shape=(max_sequence_length,), dtype='int32')
for kw in (1,2,3,4): # kernel sizes
embedded_sequences = embedding_layer(sequence_input)
sub=GaussianNoise(input_noise)(embedded_sequences)
sub_submodels=[]
for i in range(25):
subsub=BatchNormalization()(sub)
subsub=Conv1D(1,kw,padding='valid',activation=act_conv,strides=1)(sub)
subsub=GlobalMaxPooling1D()(subsub)
subsubmodel=Model(sequence_input,subsub)
sub_submodels.append(subsubmodel)
sub_submodel_outputs=[subsubmodel.output for subsubmodel in sub_submodels]
sub_out=Concatenate(axis=1)(sub_submodel_outputs)
submodel=Model(sequence_input,sub_out)
submodels.append(submodel)
#Concatenate Multiple Filter Sizes
submodel_outputs = [submodel.output for submodel in submodels]
out = Concatenate(axis=1)(submodel_outputs)
out = Dropout(hidden_dropout)(out)
out = Dense(hidden_nodes_1,activation=act,use_bias=True,bias_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l2_regularizer),activity_regularizer=regularizers.l2(l2_regularizer))(out)
out = Dense(hidden_nodes_2,activation='tanh',use_bias=True,bias_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l2_regularizer),activity_regularizer=regularizers.l2(l2_regularizer))(out)
out = Dense(1)(out)
big_model = Model(sequence_input,out)
big_model.compile(loss='MSE', optimizer=optimizer, metrics=['cosine_proximity'])
history=big_model.fit(X_train,y_train,
batch_size=batch_size,epochs=epochs,
validation_data=(X_val, y_val),verbose=False)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
loss, cos = big_model.evaluate(X_val, y_val, verbose=False)
print('Cosine Similarity: {}\n'.format(cos))
cvscores.append(cos)
end=time()
print('Time Elapsed: {}'.format(end-start))
print('Epochs: {}'.format(epochs))
print('Optimizer: {}'.format(optimizer))
print('Convolutional Activation Function: {}'.format(act_conv))
print('Dense Activation Function: {}'.format(act))
print('Hidden Nodes 1: {}'.format(hidden_nodes_1))
print('Hidden Nodes 2: {}'.format(hidden_nodes_2))
print('Input Noise Std: {}'.format(input_noise))
print('Hidden Dropout Rate: {}'.format(hidden_dropout))
print('Stratified k-Fold Cosine Similarity Avg Score: {}, Std Score: {}'.format(np.mean(cvscores),np.std(cvscores)))
def model_W2V(df,input_noise,hidden_dropout,l2_regularizer,act_conv,act,hidden_nodes_1,hidden_nodes_2,optimizer,epochs):
'''
Function for prediction
'''
X_train=df['clean_text'].tolist()
y_train=np.array(df['sentiment score'])
max_sequence_length = 50
start=time()
# prepare tokenizer
t = Tokenizer()
t.fit_on_texts(X_train)
X_train = t.texts_to_sequences(X_train)
# Adding 1 because of reserved 0 index
vocab_size = len(t.word_index) + 1
X_train = pad_sequences(X_train, padding='post', maxlen=max_sequence_length)
embedding_size=300
# create a weight matrix for words in training docs
W2V_embedding_matrix = np.zeros((vocab_size, embedding_size))
for word, i in t.word_index.items():
if word in model_w2v:
W2V_embedding_vector = model_w2v[word]
W2V_embedding_matrix[i] = W2V_embedding_vector
#Define Multiple Filter Sizes
submodels = []
embedding_layer = Embedding(vocab_size, embedding_size, weights=[W2V_embedding_matrix], input_length=max_sequence_length, trainable=False)
sequence_input = Input(shape=(max_sequence_length,), dtype='int32')
for kw in (1,2,3,4): # kernel sizes
embedded_sequences = embedding_layer(sequence_input)
sub=GaussianNoise(input_noise)(embedded_sequences)
sub_submodels=[]
for i in range(25):
subsub=BatchNormalization()(sub)
subsub=Conv1D(1,kw,padding='valid',activation=act_conv,strides=1)(sub)
subsub=GlobalMaxPooling1D()(subsub)
subsubmodel=Model(sequence_input,subsub)
sub_submodels.append(subsubmodel)
sub_submodel_outputs=[subsubmodel.output for subsubmodel in sub_submodels]
sub_out=Concatenate(axis=1)(sub_submodel_outputs)
submodel=Model(sequence_input,sub_out)
submodels.append(submodel)
#Concatenate Multiple Filter Sizes
submodel_outputs = [submodel.output for submodel in submodels]
out = Concatenate(axis=1)(submodel_outputs)
out = Dropout(hidden_dropout)(out)
out = Dense(hidden_nodes_1,activation=act,use_bias=True,bias_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l2_regularizer),activity_regularizer=regularizers.l2(l2_regularizer))(out)
out = Dense(hidden_nodes_2,activation='tanh',use_bias=True,bias_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l2_regularizer),activity_regularizer=regularizers.l2(l2_regularizer))(out)
out = Dense(1)(out)
big_model = Model(sequence_input,out)
big_model.compile(loss='MSE', optimizer=optimizer, metrics=['cosine_proximity'])
# print(big_model.summary())
big_model.fit(X_train,y_train, batch_size=32,epochs=epochs,verbose=True)
return big_model
def grid_model_W2V_lstm(df,input_noise,hidden_dropout,l2_regularizer,activation_list,hidden_nodes_1,hidden_nodes_2,lstm_nodes_1,lstm_nodes_2,optimizer,epochs,batch_size=32):
'''
Function for hyperparameter tuning W2V embedding
'''
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=500)
X=df['clean_text']
stratify=df['source']
y=df['sentiment score']
max_sequence_length = 50
for act in activation_list:
cvscores = []
start=time()
for train,val in kfold.split(X,stratify):
X_train=X[train].tolist()
y_train=np.array(y[train])
X_val=X[val].tolist()
y_val=np.array(y[val])
# prepare tokenizer
t = Tokenizer()
t.fit_on_texts(X_train)
X_train = t.texts_to_sequences(X_train)
X_val = t.texts_to_sequences(X_val)
# Adding 1 because of reserved 0 index
vocab_size = len(t.word_index) + 1
X_train = pad_sequences(X_train, padding='post', maxlen=max_sequence_length)
X_val = pad_sequences(X_val, padding='post', maxlen=max_sequence_length)
embedding_size=300
# create a weight matrix for words in training docs
W2V_embedding_matrix = np.zeros((vocab_size, embedding_size))
for word, i in t.word_index.items():
if word in model_w2v:
W2V_embedding_vector = model_w2v[word]
W2V_embedding_matrix[i] = W2V_embedding_vector
#Define LSTM
sequence_input = Input(shape=(max_sequence_length,), dtype='int32')
embedding_layer = Embedding(vocab_size, embedding_size, weights=[W2V_embedding_matrix], input_length=max_sequence_length, trainable=False)
embedded_sequences = embedding_layer(sequence_input)
out=GaussianNoise(input_noise)(embedded_sequences)
# out=LSTM(100,return_sequences=True,dropout=hidden_dropout,recurrent_dropout=hidden_dropout)(out)
# out=LSTM(70,dropout=hidden_dropout,recurrent_dropout=hidden_dropout)(out)
out=tf.keras.layers.CuDNNLSTM(lstm_nodes_1,return_sequences=True)(out)
out=tf.keras.layers.CuDNNLSTM(lstm_nodes_2)(out)
out = Dropout(hidden_dropout)(out)
out = Dense(hidden_nodes_1,activation=act,kernel_regularizer=regularizers.l2(l2_regularizer),activity_regularizer=regularizers.l2(l2_regularizer))(out)
out = Dense(hidden_nodes_2,activation='tanh',kernel_regularizer=regularizers.l2(l2_regularizer),activity_regularizer=regularizers.l2(l2_regularizer))(out)
out = Dense(1)(out)
model = Model(sequence_input,out)
model.compile(loss='MSE', optimizer=optimizer, metrics=['cosine_proximity'])
# print(model.summary())
history=model.fit(X_train,y_train, batch_size=batch_size,epochs=epochs, validation_data=(X_val, y_val),verbose=False)
plt.figure(figsize=(6,6))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.ylim((0,0.25))
plt.legend(['train', 'val'], loc='upper left')
plt.show()
loss, cos = model.evaluate(X_val, y_val, verbose=False)
print('Cosine Similarity: {}\n'.format(cos))
cvscores.append(cos)
end=time()
print('Time Elapsed: {}'.format(end-start))
print('Epochs: {}'.format(epochs))
print('Optimizer: {}'.format(optimizer))
print('Dense Activation Function: {}'.format(act))
print('LSTM Nodes 1: {}'.format(lstm_nodes_1))
print('LSTM Nodes 2: {}'.format(lstm_nodes_2))
print('Hidden Nodes 1: {}'.format(hidden_nodes_1))
print('Hidden Nodes 2: {}'.format(hidden_nodes_2))
print('Input Noise Std: {}'.format(input_noise))
print('Hidden Dropout Rate: {}'.format(hidden_dropout))
print('Stratified k-Fold Cosine Similarity Avg Score: {}, Std Score: {}'.format(np.mean(cvscores),np.std(cvscores)))
def model_W2V_lstm(df,input_noise,hidden_dropout,l2_regularizer,act,hidden_nodes_1,hidden_nodes_2,lstm_nodes_1,lstm_nodes_2,optimizer,epochs,batch_size=32):
'''
Function for prediction
'''
X_train=df['clean_text'].tolist()
y_train=np.array(df['sentiment score'])
max_sequence_length = 50
start=time()
# prepare tokenizer
t = Tokenizer()
t.fit_on_texts(X_train)
X_train = t.texts_to_sequences(X_train)
# Adding 1 because of reserved 0 index
vocab_size = len(t.word_index) + 1
X_train = pad_sequences(X_train, padding='post', maxlen=max_sequence_length)
embedding_size=300
# create a weight matrix for words in training docs
W2V_embedding_matrix = np.zeros((vocab_size, embedding_size))
for word, i in t.word_index.items():
if word in model_w2v:
W2V_embedding_vector = model_w2v[word]
W2V_embedding_matrix[i] = W2V_embedding_vector
#Define LSTM
sequence_input = Input(shape=(max_sequence_length,), dtype='int32')
embedding_layer = Embedding(vocab_size, embedding_size, weights=[W2V_embedding_matrix], input_length=max_sequence_length, trainable=False)
embedded_sequences = embedding_layer(sequence_input)
out=GaussianNoise(input_noise)(embedded_sequences)
# out=LSTM(100,return_sequences=True,dropout=hidden_dropout,recurrent_dropout=hidden_dropout)(out)
# out=LSTM(70,dropout=hidden_dropout,recurrent_dropout=hidden_dropout)(out)
out=tf.keras.layers.CuDNNLSTM(lstm_nodes_1,return_sequences=True)(out)
out=tf.keras.layers.CuDNNLSTM(lstm_nodes_2)(out)
out = Dropout(hidden_dropout)(out)
out = Dense(hidden_nodes_1,activation=act,kernel_regularizer=regularizers.l2(l2_regularizer),activity_regularizer=regularizers.l2(l2_regularizer))(out)
out = Dense(hidden_nodes_2,activation='tanh',kernel_regularizer=regularizers.l2(l2_regularizer),activity_regularizer=regularizers.l2(l2_regularizer))(out)
out = Dense(1)(out)
model = Model(sequence_input,out)
model.compile(loss='MSE', optimizer=optimizer, metrics=['cosine_proximity'])
# print(model.summary())
model.fit(X_train,y_train, batch_size=batch_size,epochs=epochs,verbose=True)
return model
# + [markdown] colab_type="text" id="CyYmTK-7Kb3T"
# # CNN
# + colab={"base_uri": "https://localhost:8080/", "height": 101} colab_type="code" executionInfo={"elapsed": 405886, "status": "ok", "timestamp": 1581683580772, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06346350967386333294"}, "user_tz": -420} id="9lg2QEagLQzJ" outputId="a3dd9c3a-e3bf-4e32-e81e-4992a1005338"
t = Tokenizer()
text_lst=['Mathematics is Fun','Especially Linear Algebra']
print('Corpus: {}'.format(text_lst))
t.fit_on_texts(text_lst)
print('Word index: {}'.format(t.word_index))
new_text=['Linear algebra is Mathematics']
print('Input New text: {}'.format(new_text[0]))
a=t.texts_to_sequences(new_text)
print('New text index sequences: {}'.format(a))
print('Padding with maximum length equal 10: {}'.format(pad_sequences(a, padding='post', maxlen=10)))
# + [markdown] colab_type="text" id="6eQu4tfqzZOv"
# ## Hyperparameter Tuning
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" executionInfo={"elapsed": 1785795, "status": "ok", "timestamp": 1581685397188, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06346350967386333294"}, "user_tz": -420} id="7qFVoaMPTbbe" outputId="0a655566-15f9-44ab-ac1f-22a82859bcbe"
adam=tensorflow.keras.optimizers.Adam(learning_rate=0.0005)
grid_model_W2V(df_train,hidden_nodes_1=15,hidden_nodes_2=10,
input_noise=0.01,hidden_dropout=0.45,l2_regularizer=0.005,
activation_conv_list=['tanh'],activation_list=['tanh'],
epochs=75,optimizer=adam)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" executionInfo={"elapsed": 3304747, "status": "ok", "timestamp": 1581691418045, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06346350967386333294"}, "user_tz": -420} id="F5L21NXpsotl" outputId="427e9ab3-5b88-4c25-b20c-675327034a62"
adam=tensorflow.keras.optimizers.Adam(learning_rate=0.0005)
grid_model_W2V(df_train,hidden_nodes_1=15,hidden_nodes_2=15,
input_noise=0.01,hidden_dropout=0.45,l2_regularizer=0.008,
activation_conv_list=['tanh'],activation_list=['tanh'],
epochs=75,optimizer=adam)
# + [markdown] colab_type="text" id="ShK0Y-Jhzhom"
# ## Prediction
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 1424569, "status": "ok", "timestamp": 1581693280308, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06346350967386333294"}, "user_tz": -420} id="OkXIVNdBzXcO" outputId="f571be14-d38a-42e7-c723-bdcdd6587306"
adam=tensorflow.keras.optimizers.Adam(learning_rate=0.0005)
final_model=model_W2V(df_train,hidden_nodes_1=15,hidden_nodes_2=15,
input_noise=0.01,hidden_dropout=0.45,l2_regularizer=0.008,
act_conv='tanh',act='tanh',
epochs=75,optimizer=adam)
final_model.save("gdrive/My Drive/TA/Seminar 1/Jupyter Notebook/model_CNN_W2V.h5")
# + [markdown] colab_type="text" id="xVj6byVJz1vM"
# # LSTM
# + [markdown] colab_type="text" id="SCeuIN9oPFtZ"
# ## Hyperparameter Tuning
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 4464189, "status": "ok", "timestamp": 1581703752889, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06346350967386333294"}, "user_tz": -420} id="MJdir71_RmGR" outputId="5aa168ff-4153-401d-f278-761b0a4a09d6"
adam=tensorflow.keras.optimizers.Adam(learning_rate=0.0001)
grid_model_W2V_lstm(df_train,
input_noise=0.1,hidden_dropout=0.3,l2_regularizer=0.05,
activation_list=['relu'],lstm_nodes_1=100,lstm_nodes_2=100,
hidden_nodes_1=50,hidden_nodes_2=10,
optimizer=adam,epochs=350,batch_size=32)
# + [markdown] colab_type="text" id="EL32pY1gPJWV"
# ## Prediction
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 852561, "status": "ok", "timestamp": 1581705042273, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06346350967386333294"}, "user_tz": -420} id="OUDc5JclQmcg" outputId="b732bd43-d7ac-4256-d56a-0a389700a386"
adam=tensorflow.keras.optimizers.Adam(learning_rate=0.0001)
final_model=model_W2V_lstm(df_train,
input_noise=0.1,hidden_dropout=0.3,l2_regularizer=0.05,
act='relu',lstm_nodes_1=100,lstm_nodes_2=100,
hidden_nodes_1=50,hidden_nodes_2=10,
optimizer=adam,epochs=350,batch_size=32)
final_model.save("gdrive/My Drive/TA/Seminar 1/Jupyter Notebook/model_LSTM_W2V.h5")
|
Model/CNN LSTM.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# # Module 5: Batch ingestion via SageMaker Processing job (Sklearn)
#
# ---
# **Note:** Please set kernel to `Python 3 (Data Science)` and select instance to `ml.t3.medium`
#
# ## Contents
#
# 1. [Setup](#Setup)
# 1. [Create Sklearn SageMaker Processing script](#Create-Sklearn-SageMaker-Processing-script)
# 1. [Run batch ingestion processing job](#Run-batch-ingestion-processing-job)
# 1. [Verify processing job results](#Verify-processing-job-results)
# # Setup
# #### Imports
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker import get_execution_role
from random import randint
import sagemaker
import logging
import json
logger = logging.getLogger('__name__')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
logger.info(f'Using SageMaker version: {sagemaker.__version__}')
# #### Essentials
role = get_execution_role()
logger.info(f'Role = {role}')
sagemaker_session = sagemaker.Session()
region = sagemaker_session.boto_region_name
featurestore_runtime_client = sagemaker_session.boto_session.client('sagemaker-featurestore-runtime', region_name=region)
default_bucket = sagemaker_session.default_bucket()
logger.info(f'Default bucket = {default_bucket}')
prefix = 'sagemaker-feature-store'
# #### Copy partitions data from local to S3
# !aws s3 cp .././data/partitions/ s3://{default_bucket}/{prefix}/partitions/ --recursive
# # Create Sklearn SageMaker Processing script
# +
# %%writefile ./scripts/batch_ingest_sm_sklearn.py
import subprocess
import sys
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'sagemaker'])
from sagemaker.feature_store.feature_group import FeatureGroup
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from datetime import datetime, timezone, date
import pandas as pd
import sagemaker
import argparse
import logging
import time
import os
sagemaker_session = sagemaker.Session()
logger = logging.getLogger('__name__')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
label_encoder = LabelEncoder()
min_max_scaler = MinMaxScaler()
n_cores = os.cpu_count()
def get_file_paths(directory):
file_paths = []
for root, directories, files in os.walk(directory):
for file_name in files:
if file_name.endswith('.csv'):
file_path = os.path.join(root, file_name)
file_paths.append(file_path)
return file_paths
def get_delta_in_days(date_time) -> int:
today = date.today()
delta = today - date_time.date()
return delta.days
def apply_transforms(df: pd.DataFrame) -> pd.DataFrame:
df['is_reordered'] = df['is_reordered'].astype(int)
df['purchased_on'] = pd.to_datetime(df['purchased_on'], format='%Y-%m-%d %H:%M:%S')
df['n_days_since_last_purchase'] = df['purchased_on'].apply(lambda x: get_delta_in_days(x))
df['n_days_since_last_purchase'] = min_max_scaler.fit_transform(df[['n_days_since_last_purchase']])
df.drop('purchased_on', axis=1, inplace=True)
return df
def ingest_data(args: argparse.Namespace) -> None:
files = get_file_paths('/opt/ml/processing/input/')
logger.info(f'Files: {files}')
df = pd.concat([pd.read_csv(file) for file in files], ignore_index=True)
df = apply_transforms(df)
logger.info(f'Ingesting a total of [{df.shape[0]}] rows from {len(files)} files')
logger.info(f'Ingesting into feature group [{args.feature_group_name}] using {args.num_processes} processes and {args.num_workers} workers')
fg = FeatureGroup(name=args.feature_group_name, sagemaker_session=sagemaker_session)
response = fg.ingest(data_frame=df, max_processes=args.num_processes, max_workers=args.num_workers, wait=True)
"""
The ingest call above returns an IngestionManagerPandas instance as a response. Zero based indices of rows
that failed to be ingested are captured via failed_rows in this response. By asserting this count to be 0,
we validated that all rows were successfully ingested without a failure.
"""
assert len(response.failed_rows) == 0
def parse_args() -> None:
parser = argparse.ArgumentParser()
# if num_processes is not set, we set it to the number of vCPUs by default
parser.add_argument('--num_processes', type=int, default=n_cores)
# if num_workers is not set, we default it to 4 i.e., 4 threads per python process
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--feature_group_name', type=str)
args, _ = parser.parse_known_args()
return args
if __name__ == '__main__':
logger.info('BATCH INGESTION - STARTED')
args = parse_args()
ingest_data(args)
logger.info('BATCH INGESTION - COMPLETED')
# -
# # Run batch ingestion processing job
# +
# %store -r orders_feature_group_name
s3_uri_prefix = f's3://{default_bucket}/{prefix}/partitions/'
# REUSE orders feature group name from module 1
feature_group_name = orders_feature_group_name
# +
# %%time
sklearn_processor = SKLearnProcessor(framework_version='0.20.0',
role=role,
instance_type='ml.m5.xlarge',
instance_count=2,
base_job_name='fscw-sm-processing-sklearn-fs-ingestion',
env={'AWS_DEFAULT_REGION': region})
"""
Note: It is recommended to set the num_processes argument below to the total number of cores (vCPUs)in your
processing node and set the num_workers to 4. num_workers here denotes number of threads per python process.
In this example, since we are using instance_type=m1.m5.xlarge (set above) for our processing node, we have
set num_processes=4 (an m5.xlarge instance has 4 cores) and num_workers=4 below.
"""
sklearn_processor.run(code='./scripts/batch_ingest_sm_sklearn.py',
arguments = ['--num_processes', '4',
'--num_workers', '4',
'--feature_group_name', feature_group_name],
inputs=[ProcessingInput(s3_data_type='S3Prefix',
source=s3_uri_prefix,
s3_data_distribution_type='ShardedByS3Key',
destination='/opt/ml/processing/input')],
logs=False) # set logs=True to enable logging
# -
# # Verify processing job results
# +
order_id = f'O{randint(1, 100000)}'
logger.info(f'order_id={order_id}')
feature_record = featurestore_runtime_client.get_record(FeatureGroupName=feature_group_name,
RecordIdentifierValueAsString=order_id)
print(json.dumps(feature_record, indent=2))
# -
|
05-module-scalable-batch-ingestion/m5_nb1_sm_processing_sklearn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: xpython
# language: python
# name: xpython
# ---
# + [markdown] deletable=false editable=false
# Copyright 2020 <NAME> and made available under [CC BY-SA](https://creativecommons.org/licenses/by-sa/4.0) for text and [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) for code.
# -
# ## Background
#
# We will be working with data on graduate school admissions. The data has four variables:
#
# - `admit`: the admittance status (0=not admitted, 1=admitted)
# - `gre`: the student's GRE score
# - `gpa`: the student's GPA
# - `rank`: rank of the student's undergraduate institution (1=highest to 4=lowest prestige)
#
# In this session, you will solve several problems using this data.
# Import the `pandas` library, which lets us work with dataframes.
# +
import pandas as pd
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="i!#]:2XI=^qLb$e.|iwo">pd</variable></variables><block type="importAs" id="8u3elQqk_!6!WoHrlj}e" x="73" y="63"><field name="libraryName">pandas</field><field name="libraryAlias" id="i!#]:2XI=^qLb$e.|iwo">pd</field></block></xml>
# -
# Load a dataframe with the data in "datasets/binary.csv" and display it
# +
dataframe = pd.read_csv('datasets/binary.csv')
dataframe
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable><variable id="i!#]:2XI=^qLb$e.|iwo">pd</variable></variables><block type="variables_set" id="YMNSvjU:9aS0`rADBobh" x="29" y="215"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field><value name="VALUE"><block type="varDoMethod" id="h[BIiU^0[[vbD`zoBn6+"><field name="VAR" id="i!#]:2XI=^qLb$e.|iwo">pd</field><field name="MEMBER">read_csv</field><data>pd:read_csv</data><value name="INPUT"><block type="text" id="HyH?(x3/MuPXE`T5;)[@"><field name="TEXT">datasets/binary.csv</field></block></value></block></value></block><block type="variables_get" id="uS,Sc{|xLGBqCK`F3*-*" x="8" y="300"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field></block></xml>
# -
# Show the 3rd row to the final row (total 398 rows).
# +
dataframe[2 : ]
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable></variables><block type="lists_getSublist" id="5hFiqAq|QPxp%Xc:h/@A" x="8" y="518"><mutation at1="true" at2="false"></mutation><field name="WHERE1">FROM_START</field><field name="WHERE2">LAST</field><value name="LIST"><block type="variables_get" id="Y`nWHnCXvg4!GpZ![Z8S"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field></block></value><value name="AT1"><block type="math_number" id=":;n{~x;%6oD9(oxp_*Pt"><field name="NUM">3</field></block></value></block></xml>
# -
# Show the last two columns of the data.
# +
dataframe[['gpa', 'rank']]
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable></variables><block type="indexer" id="oVNd/g7vyxV[^cAbT$JX" x="8" y="300"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field><value name="INDEX"><block type="lists_create_with" id="^oGt7#=i@OMTRwaf:kdd"><mutation items="2"></mutation><value name="ADD0"><block type="text" id="=k__Y{.gxL5z.AJpll.A"><field name="TEXT">gpa</field></block></value><value name="ADD1"><block type="text" id="uQyKHkAg(%zJh!AAX=[#"><field name="TEXT">rank</field></block></value></block></value></block></xml>
# -
# Show the rows where `gpa` is less than 3.
# +
dataframe[(dataframe['gpa'] < 3)]
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable></variables><block type="indexer" id="Y*eD.jv!V*/5.i:+x([t" x="19" y="390"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field><value name="INDEX"><block type="logic_compare" id="FbNIQ=Q,*rY%1.XvHA^q"><field name="OP">LT</field><value name="A"><block type="indexer" id="d3u5qTTbrdfW-F2:-QR2"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field><value name="INDEX"><block type="text" id="rX][Xdf,hW{pZ4}3u$Sq"><field name="TEXT">gpa</field></block></value></block></value><value name="B"><block type="math_number" id="P*wB;;JrXkeRhwQyoDmg"><field name="NUM">3</field></block></value></block></value></block></xml>
# -
# <!-- -->
# <!-- -->
|
E1/solutions/ps-far-na-solution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pwd
# Dependencies and Setup
import pandas as pd
import pathlib
import csv
import scipy.stats as st
import numpy as np
# +
# Create paths for csv
employment_unified_2015_data = "../Merged Employment/Resources/output_employment_data/Unified2015.csv"
employment_elementary_2015_data = "../Merged Employment/Resources/output_employment_data/Elementary2015.csv"
employment_secondary_2015_data = "../Merged Employment/Resources/output_employment_data/Secondary2015.csv"
# +
# UNIFIED 2015
employment_unified_2015 = pd.read_csv(employment_unified_2015_data)
# pd.DataFrame(employment_unified_2015)
# +
# ELEMENTARY 2015
employment_elementary_2015 = pd.read_csv(employment_elementary_2015_data)
# pd.DataFrame(employment_elementary_2015)
# +
# SECONDARY 2015
employment_secondary_2015 = pd.read_csv(employment_secondary_2015_data)
# pd.DataFrame(employment_secondary_2015)
# +
list_of_files = [
"../Merged Employment/Resources/output_employment_data/Unified2015.csv",
"../Merged Employment/Resources/output_employment_data/Elementary2015.csv",
"../Merged Employment/Resources/output_employment_data/Secondary2015.csv"
]
# Format pd.read_csv(filename).assign(year=2015)
employment_2015_df = []
for i,filename in enumerate(list_of_files):
X1 = pd.read_csv(filename)
X1["year"] = 2015
employment_2015_df.append(X1)
# Combine the data into a single dataset
employment_2015_data = pd.concat(employment_2015_df)
employment_2015_merged = pd.DataFrame(employment_2015_data)
# +
# Save csv file
employment_2015_merged.to_csv("../Merged Employment/Resources/merged_employment_yearly/Merged_Employment_2015.csv")
# -
employment_2015_merged
|
Employment Data Output/Data Cleaning/2015 Employment Merged.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Programming with Python
# ## Basic data types
# Like most languages, Python has many basic types including Integers, Floats, Booleans, and Strings. These data types behave in ways that are like other programming languages (C/C++, Java, MATLAB).
# **Numbers**: Integers and floats work as you would expect from other languages
# Note that unlike many languages, Python does **not** have unary increment (`x++`) or decrement (`x--`) operators. “#” starts the comments inline.
# !which python
x = 3
print(type(x)) # Prints "<class 'int'>"
print(x) # Prints "3"
print(x + 1) # Addition; prints "4"
print(x - 1) # Subtraction; prints "2"
print(x * 2) # Multiplication; prints "6"
print(x ** 2) # Exponentiation; prints "9"
x += 1
print(x) # Prints "4"
x *= 2
print(x) # Prints "8"
y = 2.5
print(type(y)) # Prints "<class 'float'>"
print(y, y + 1, y * 2, y ** 2) # Prints "2.5 3.5 5.0 6.25"
# Python also has built-in types for complex numbers; you can find all the details in the [documentation](https://docs.python.org/3.6/library/stdtypes.html#numeric-types-int-float-complex).
# **Booleans**: Python implements all the usual operators for Boolean logic, but uses English words rather than symbols (&&, ||, etc.):
t = True
f = False
print(type(t)) # Prints "<class 'bool'>"
print(t and f) # Logical AND; prints "False"
print(t or f) # Logical OR; prints "True"
print(not t) # Logical NOT; prints "False"
print(t != f) # Logical XOR; prints "True"
# **Strings**: Python has great support for strings:
hello = 'hello' # String literals can use single quotes
world = "world" # or double quotes; it does not matter.
print(hello) # Prints "hello"
print(len(hello)) # String length; prints "5"
hw = hello + ' ' + world # String concatenation
print(hw) # prints "hello world"
hw12 = '%s %s %d' % (hello, world, 12) # sprintf style string formatting
print(hw12) # prints "hello world 12"
# You can find a list of all string methods in the [documentation](https://docs.python.org/3.6/library/stdtypes.html#string-methods).
# ## Containers
# Python includes several built-in container types: lists, dictionaries, sets, and tuples.
# ### Lists
# A list is the Python equivalent of an array, but is resizable and can contain elements of different types:
# Note: python starts index from “0”, for example, xs = [3,1,2], this first element of xs is xs[0] that is number 3.
#
xs = [3, 1, 2] # Create a list
print(xs, xs[2]) # Prints "[3, 1, 2] 2"
print(xs[-1]) # Negative indices count from the end of the list; prints "2"
xs[2] = 'foo' # Lists can contain elements of different types
print(xs) # Prints "[3, 1, 'foo']"
xs.append('bar') # Add a new element to the end of the list
print(xs) # Prints "[3, 1, 'foo', 'bar']"
x = xs.pop() # Remove and return the last element of the list
print(x, xs) # Prints "bar [3, 1, 'foo']"
# As usual, you can find all the gory details about lists in the [documentation](https://docs.python.org/3.6/tutorial/datastructures.html#more-on-lists).
# **Slicing**: In addition to accessing list elements one at a time, Python provides concise syntax to access sub-lists; this is known as slicing:
nums = list(range(5)) # range is a built-in function that creates a list of integers
print(nums) # Prints "[0, 1, 2, 3, 4]"
print(nums[2:4]) # Get a slice from index 2 to 4 (exclusive); prints "[2, 3]"
print(nums[2:]) # Get a slice from index 2 to the end; prints "[2, 3, 4]"
print(nums[:2]) # Get a slice from the start to index 2 (exclusive); prints "[0, 1]"
print(nums[:]) # Get a slice of the whole list; prints "[0, 1, 2, 3, 4]"
print(nums[:-1]) # Slice indices can be negative; prints "[0, 1, 2, 3]"
nums[2:4] = [8, 9] # Assign a new sublist to a slice
print(nums) # Prints "[0, 1, 8, 9, 4]"
# We will see slicing again in the context of numpy arrays from **Numpy** Packages later.
# **Loops**: You can loop over the elements of a list like this:
animals = ['cat', 'dog', 'monkey']
for animal in animals:
print(animal)
# Prints "cat", "dog", "monkey", each on its own line.
# If you want access to the index of each element within the body of a loop, use the built-in **enumerate** function:
# +
animals = ['cat', 'dog', 'monkey']
for idx, animal in enumerate(animals):
print('#%d: %s' % (idx + 1, animal))
# Prints "#1: cat", "#2: dog", "#3: monkey", each on its own line
for idx, animal in enumerate(animals):
print(f'#{idx}: {animal}')
# -
# **List comprehensions**: When programming, frequently we want to transform one type of data into another. As a simple example, consider the following code that computes square numbers:
nums = [0, 1, 2, 3, 4]
squares = []
for x in nums:
squares.append(x ** 2)
print(squares) # Prints [0, 1, 4, 9, 16]
# You can make this code simpler using a **list comprehension**:
nums = [0, 1, 2, 3, 4]
squares = [x ** 2 for x in nums]
print(squares) # Prints [0, 1, 4, 9, 16]
# List comprehensions can also contain conditions:
nums = [0, 1, 2, 3, 4]
even_squares = [x ** 2 for x in nums if x % 2 == 0]
print(even_squares) # Prints "[0, 4, 16]"
# ### Dictionaries
# A dictionary stores (key, value) pairs, like a **Map** in **Java** or an **object** in **Javascript**. You can use it like this:
d = {'cat': 'cute', 'dog': 'furry'} # Create a new dictionary with some data
print(d['cat']) # Get an entry from a dictionary; prints "cute"
print('cat' in d) # Check if a dictionary has a given key; prints "True"
d['fish'] = 'wet' # Set an entry in a dictionary
print(d['fish']) # Prints "wet"
# print(d['monkey']) # KeyError: 'monkey' not a key of d
print(d.get('monkey', 'N/A')) # Get an element with a default; prints "N/A"
print(d.get('fish', 'N/A')) # Get an element with a default; prints "wet"
del d['fish'] # Remove an element from a dictionary
print(d.get('fish', 'N/A')) # "fish" is no longer a key; prints "N/A"
# You can find all you need to know about dictionaries in the [documentation](https://docs.python.org/3.6/library/stdtypes.html#dict).
# **Loops**: It is easy to iterate over the keys in a dictionary:
d = {'person': 2, 'cat': 4, 'spider': 8}
for animal in d:
legs = d[animal]
print('A %s has %d legs' % (animal, legs))
# Prints "A person has 2 legs", "A cat has 4 legs", "A spider has 8 legs"
# If you want access to keys and their corresponding values, use the items method:
d = {'person': 2, 'cat': 4, 'spider': 8}
for animal, legs in d.items():
print('A %s has %d legs' % (animal, legs))
# Prints "A person has 2 legs", "A cat has 4 legs", "A spider has 8 legs"
# **Dictionary comprehensions**: These are similar to list comprehensions, but allow you to easily construct dictionaries. For example:
nums = [0, 1, 2, 3, 4]
even_num_to_square = {x: x ** 2 for x in nums if x % 2 == 0}
print(even_num_to_square) # Prints "{0: 0, 2: 4, 4: 16}"
# ### Sets
# A set is an unordered collection of distinct elements. As a simple example, consider the following:
animals = {'cat', 'dog'}
print('cat' in animals) # Check if an element is in a set; prints "True"
print('fish' in animals) # prints "False"
animals.add('fish') # Add an element to a set
print('fish' in animals) # Prints "True"
print(len(animals)) # Number of elements in a set; prints "3"
animals.add('cat') # Adding an element that is already in the set does nothing
print(len(animals)) # Prints "3"
animals.remove('cat') # Remove an element from a set
print(len(animals)) # Prints "2"
# As usual, everything you want to know about sets can be found in the [documentation](https://docs.python.org/3.6/library/stdtypes.html#set).
# **Loops**: Iterating over a set has the same syntax as iterating over a list; however, since sets are unordered, you cannot make assumptions about the order in which you visit the elements of the set:
animals = {'cat', 'dog', 'fish'}
for idx, animal in enumerate(animals):
print('#%d: %s' % (idx + 1, animal))
# Prints "#1: fish", "#2: dog", "#3: cat"
# **Set comprehensions**: Like lists and dictionaries, we can easily construct sets using set comprehensions:
from math import sqrt
nums = {int(sqrt(x)) for x in range(30)}
print(nums) # Prints "{0, 1, 2, 3, 4, 5}"
# ### Tuples
# A tuple is an (immutable) ordered list of values. A tuple is in many ways similar to a list; one of the most important differences is that tuples can be used as keys in dictionaries and as elements of sets, while lists cannot. Here is a trivial example:
#
d = {(x, x + 1): x for x in range(10)} # Create a dictionary with tuple keys
t = (5, 6) # Create a tuple
print(type(t)) # Prints "<class 'tuple'>"
print(d[t]) # Prints "5"
print(d[(1, 2)]) # Prints "1"
# The [documentation](https://docs.python.org/3.6/tutorial/datastructures.html#tuples-and-sequences) has more information about tuples.
# ## Functions
# Python functions are defined using the def keyword. For example:
#
# +
def sign(x):
if x > 0:
return 'positive'
elif x < 0:
return 'negative'
else:
return 'zero'
for x in [-1, 0, 1]:
print(sign(x))
# Prints "negative", "zero", "positive"
# -
# We will often define functions to take optional keyword arguments, like this:
# +
def hello(name, loud=False):
if loud:
print('HELLO, %s!' % name.upper())
else:
print('Hello, %s' % name)
hello('Bob') # Prints "Hello, Bob"
hello('Fred', loud=True) # Prints "HELLO, FRED!"
# -
# There is a lot more information about Python functions in the [documentation](https://docs.python.org/3.6/tutorial/controlflow.html#defining-functions).
# ## Classes
# The syntax for defining classes in Python is straightforward:
#
# +
class Greeter(object):
# Constructor
def __init__(self, name):
self.name = name # Create an instance variable
# Instance method
def greet(self, loud=False):
if loud:
print('HELLO, %s!' % self.name.upper())
else:
print('Hello, %s' % self.name)
g = Greeter('Fred') # Construct an instance of the Greeter class
g.greet() # Call an instance method; prints "Hello, Fred"
g.greet(loud=True) # Call an instance method; prints "HELLO, FRED!"
# -
# You can read a lot more about Python classes in the [documentation](https://docs.python.org/3.6/tutorial/classes.html).
|
01.Python/Python_01_Basic.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 作業 : (Kaggle)鐵達尼生存預測
# ***
# - 分數以網站評分結果為準, 請同學實際將提交檔(*.csv)上傳試試看
# https://www.kaggle.com/c/titanic/submit
# # [作業目標]
# - 試著模仿範例寫法, 在鐵達尼生存預測中, 觀察觀查混合泛化 (Blending) 的寫法與效果
# # [作業重點]
# - 觀察混合泛化的準確度 (In[14]), 是否比單一模型準確度為高 (In[11~13])
# - 除了我們的權重, 同學也可以試著自行調整權重 (注意:權重和=1), 看看有什麼影響
# - Hint : 除了權重, 分類預測的調整, 還可以調整什麼地方?
# +
# 做完特徵工程前的所有準備 (與前範例相同)
import pandas as pd
import numpy as np
import copy, time
import warnings
warnings.filterwarnings('ignore')
from IPython.display import display
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
data_path = 'data/'
df_train = pd.read_csv(data_path + 'titanic_train.csv')
df_test = pd.read_csv(data_path + 'titanic_test.csv')
train_Y = df_train['Survived']
ids = df_test['PassengerId']
df_train = df_train.drop(['PassengerId', 'Survived'] , axis=1)
df_test = df_test.drop(['PassengerId'] , axis=1)
df = pd.concat([df_train,df_test])
df.head()
# -
# 檢查 DataFrame 空缺值的狀態
def na_check(df_data):
data_na = (df_data.isnull().sum() / len(df_data)) * 100
data_na = data_na.drop(data_na[data_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :data_na})
display(missing_data.head(10))
na_check(df)
# 以下 In[3]~In[10] 只是鐵達尼預測中的一組特徵工程, 並以此組特徵工程跑參數, 若更換其他特徵工程, In[10]的參數需要重新跑
# Sex : 直接轉男 0 女 1
df["Sex"] = df["Sex"].map({"male": 0, "female":1})
# Fare : 用 log 去偏態, 0 則直接取 0
df["Fare"] = df["Fare"].map(lambda i: np.log(i) if i > 0 else 0)
# Age : 缺值用中位數補
df["Age"] = df["Age"].fillna(df['Age'].median())
# Title 的 特徵工程 : 將各種頭銜按照類型分類, 最後取 One Hot
df_title = [i.split(",")[1].split(".")[0].strip() for i in df["Name"]]
df["Title"] = pd.Series(df_title)
df["Title"] = df["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
df["Title"] = df["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3})
df["Title"] = df["Title"].astype(int)
df = pd.get_dummies(df, columns = ["Title"])
# 新建:家庭大小 (Fsize)特徵, 並依照大小分別建獨立欄位
df["Fsize"] = df["SibSp"] + df["Parch"] + 1
df['Single'] = df['Fsize'].map(lambda s: 1 if s == 1 else 0)
df['SmallF'] = df['Fsize'].map(lambda s: 1 if s == 2 else 0)
df['MedF'] = df['Fsize'].map(lambda s: 1 if 3 <= s <= 4 else 0)
df['LargeF'] = df['Fsize'].map(lambda s: 1 if s >= 5 else 0)
# Ticket : 如果不只是數字-取第一個空白之前的字串(去除'.'與'/'), 如果只是數字-設為'X', 最後再取 One Hot
Ticket = []
for i in list(df.Ticket):
if not i.isdigit() :
Ticket.append(i.replace(".","").replace("/","").strip().split(' ')[0])
else:
Ticket.append("X")
df["Ticket"] = Ticket
df = pd.get_dummies(df, columns = ["Ticket"], prefix="T")
# Cabib 依照第一碼分類, 再取 One Hot
df["Cabin"] = pd.Series([i[0] if not pd.isnull(i) else 'X' for i in df['Cabin'] ])
df = pd.get_dummies(df, columns = ["Cabin"], prefix="Cabin")
# +
# Embarked, Pclass 取 One Hot
df = pd.get_dummies(df, columns = ["Embarked"], prefix="Em")
df["Pclass"] = df["Pclass"].astype("category")
df = pd.get_dummies(df, columns = ["Pclass"], prefix="Pc")
# 捨棄 Name 欄位
df.drop(labels = ["Name"], axis = 1, inplace = True)
# -
# 確認缺值 與 目前的資料表內容
na_check(df)
df.head()
# +
# 將資料最大最小化
df = MinMaxScaler().fit_transform(df)
# 將前述轉換完畢資料 df , 重新切成 train_X, test_X
train_num = train_Y.shape[0]
train_X = df[:train_num]
test_X = df[train_num:]
# 使用三種模型 : 邏輯斯迴歸 / 梯度提升機 / 隨機森林, 參數使用 Random Search 尋找
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
lr = LogisticRegression(tol=0.001, penalty='l2', fit_intercept=True, C=1.0)
gdbt = GradientBoostingClassifier(tol=100, subsample=0.75, n_estimators=250, max_features=20,
max_depth=6, learning_rate=0.03)
rf = RandomForestClassifier(n_estimators=100, min_samples_split=2, min_samples_leaf=1,
max_features='sqrt', max_depth=6, bootstrap=True)
# -
# 線性迴歸預測檔 (結果有部分隨機, 請以 Kaggle 計算的得分為準, 以下模型同理)
lr.fit(train_X, train_Y)
lr_pred = lr.predict_proba(test_X)[:,1]
sub = pd.DataFrame({'PassengerId': ids, 'Survived': lr_pred})
sub['Survived'] = sub['Survived'].map(lambda x:1 if x>0.5 else 0)
sub.to_csv('titanic_lr.csv', index=False)
# 梯度提升機預測檔
gdbt.fit(train_X, train_Y)
gdbt_pred = gdbt.predict_proba(test_X)[:,1]
sub = pd.DataFrame({'PassengerId': ids, 'Survived': gdbt_pred})
sub['Survived'] = sub['Survived'].map(lambda x:1 if x>0.5 else 0)
sub.to_csv('titanic_gdbt.csv', index=False)
# 隨機森林預測檔
rf.fit(train_X, train_Y)
rf_pred = rf.predict_proba(test_X)[:,1]
sub = pd.DataFrame({'PassengerId': ids, 'Survived': rf_pred})
sub['Survived'] = sub['Survived'].map(lambda x:1 if x>0.5 else 0)
sub.to_csv('titanic_rf.csv', index=False)
# # 作業
# * 雖然同樣是混合泛化,分類預測其實與回歸預測有相當多的差異性,
# 因為鐵達尼預測的結果是 '生存/死亡',輸出不是 0 就是 1
# 因此要用權重混合時,需要以以機率的形式混合,因此我們在作業前幾格當中,先幫各位同學把預測值寫成了機率的形式
# (請同學把下列程式完成,並將結果提交到 Kaggle 網站看看結果)
#
# * 但是光是這樣,分類問題的混合泛化就能比單模預測還要好嗎?
# 已經快要期中考了,這裡請同學挑戰看看,還有沒有什麼方法可以改進混合泛化的結果?
# 混合泛化預測檔
"""
Your Code Here
"""
blending_pred = lr_pred*0.1 + gdbt_pred*0.1 + rf_pred*0.8
sub = pd.DataFrame({'PassengerId': ids, 'Survived': blending_pred})
sub['Survived'] = sub['Survived'].map(lambda x:1 if x>0.5 else 0)
sub.to_csv('titanic_blending.csv', index=False)
|
HomeWork/Day_049_HW.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# name: python2
# ---
# + [markdown] id="i96Gnqezd8an" colab_type="text"
# # MNIST - teaching computers to read
# + [markdown] id="BHrjJMAOmsu6" colab_type="text"
# First lets enable and check GPUs are working on this notebook. Should shave a few secs-mins off training time later
# + id="M2EwVrl3nlHp" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# GPU setup
import tensorflow as tf
import timeit
# See https://www.tensorflow.org/tutorials/using_gpu#allowing_gpu_memory_growth
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.device('/cpu:0'):
random_image_cpu = tf.random_normal((100, 100, 100, 3))
net_cpu = tf.layers.conv2d(random_image_cpu, 32, 7)
net_cpu = tf.reduce_sum(net_cpu)
with tf.device('/gpu:0'):
random_image_gpu = tf.random_normal((100, 100, 100, 3))
net_gpu = tf.layers.conv2d(random_image_gpu, 32, 7)
net_gpu = tf.reduce_sum(net_gpu)
sess = tf.Session(config=config)
# Test execution once to detect errors early.
try:
sess.run(tf.global_variables_initializer())
except tf.errors.InvalidArgumentError:
print(
'\n\nThis error most likely means that this notebook is not '
'configured to use a GPU. Change this in Notebook Settings via the '
'command palette (cmd/ctrl-shift-P) or the Edit menu.\n\n')
raise
def cpu():
sess.run(net_cpu)
def gpu():
sess.run(net_gpu)
# Runs the op several times.
# print('Time (s) to convolve 32x7x7x3 filter over random 100x100x100x3 images '
# '(batch x height x width x channel). Sum of ten runs.')
# print('CPU (s):')
# cpu_time = timeit.timeit('cpu()', number=10, setup="from __main__ import cpu")
# print(cpu_time)
# print('GPU (s):')
# gpu_time = timeit.timeit('gpu()', number=10, setup="from __main__ import gpu")
# print(gpu_time)
# print('GPU speedup over CPU: {}x'.format(int(cpu_time/gpu_time)))
# sess.close()
# + [markdown] id="IOMjcTxPeBpx" colab_type="text"
# Teaching computers to see numbers with fancy math.
#
# In tensorflow we first outline and create computational graphs and then execute them.
#
# Think of a written number input to a computer. It's 28x28 pixels We can unroll this into a simpler vector which still retains identifying information. e.g. A 8 has more lines and dark spaces than a 0.
# + id="IvCYFOpveEoI" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}], "base_uri": "https://localhost:8080/", "height": 241} outputId="8871edb3-89e2-4fc0-e7f7-1c325d7f4a3b" executionInfo={"status": "ok", "timestamp": 1520307796820, "user_tz": 300, "elapsed": 347, "user": {"displayName": "Kaust<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
import random
total = []
for i in range(0, 10):
ex = []
for i in range(0, 10):
n = random.randint(0, 1)
ex.append(n)
total.append(n)
print(ex)
print('Unrolled to:')
print(total)
# + [markdown] id="75A0REeCFW1m" colab_type="text"
# First we need to get the training data. MNIST is just a large set of handwritten numbers someone painstakingly labelled.
#
# Tensorflow provides some nice convenient builtins for this.
#
# Note: The input data comes in as one_hot vectors, i.e. they're large swaths of 0s with >0s where the black marks are for numbers. These are unrolled from their normal shape into a linear shape of 768pixels
# + id="hrIh3qM1CU7Y" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}], "base_uri": "https://localhost:8080/", "height": 85} outputId="4bbbf57a-6d98-47f1-f24f-f344d475a289" executionInfo={"status": "ok", "timestamp": 1520307799442, "user_tz": 300, "elapsed": 795, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import math
DATA_DIR = '/tmp/data'
mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
# + [markdown] id="jH-5L_wdCmDa" colab_type="text"
# We create a placeholder to store our inputs, and a variable that will evolve with training to better predict the outcome
#
# We use a linear function to then map our simplified input into evidence.
#
# We also add a bias so we can say that some things are more independent of the input
#
# This is simply doing a matrix multiplication.
#
# + id="gqKVJb1r-9Rx" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
x = tf.placeholder(tf.float32, [None, 784])
y = tf.matmul(x, W) + b
# + [markdown] id="GTdigRcGGxLQ" colab_type="text"
# Next lets set up a placeholder to hold data incoming from the labels that we can use to refine and train our model.
# For this we set up another placeholder with an unknown length but a shape of 10 since we have 10 distinct digits
# + id="vClPsDYpGvRW" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
y_ = tf.placeholder(tf.float32, [None, 10])
# + [markdown] id="JDSsTDJxGuzk" colab_type="text"
# The next step is to set up the training.
# We use softmax with logits to map our linear model into the shape we want, a probability distribution over 10 classes.
#
# Softmax is a generalized variant of logistic regression and instead of mapping to a binary output it maps to different classes
#
# A logit is an inverse sigmoid which maps from a linear 0/1 to a bernoulli probability distribtion.
#
# The next step is implementing the cross entropy function, which derives the differences between our model outputs and the ground truth. A lower cross entropy in this case means we are closer to the truth
# + id="5dfYJm5sHNzO" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
# + [markdown] id="JY5XkG2HI9Gg" colab_type="text"
# Now that we've defined the computational graph:
# input -> unroll -> matmul -> softmax -> output
#
# We can set up the training mechanism.
#
# For this we'll use gradient descent, optimizing for a reduction in cross entropy.
# GD is basically a glorified chain rule that works via backwards propagation instead of forward propagation due to mathmetical effeciencies. This will walk our computational graph generating derivatives to track how every node affects one output.
#
# We use GD with a learning rate of 0.5 and tensorflow will slowly shift the variables towards the direction we want.
# A higher learning rate may make us overshoot our ideal weights, a lower one may leave us stuck in a local minima or take forever.
# + id="Xqv9C_SAImhc" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
gd_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# + [markdown] id="JzlEONgiK2Pd" colab_type="text"
# Now all that's left to do is create a tensorflow session and execute our graph.
#
# We apply parameters for the number of steps we want to use and the batch size for training i.e. how many random data points we get from our training set each step. Larger batch sizes and number of steps can lead to more accurate models
#
# + id="zNKPvMknwYuN" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def train_and_check(NUM_STEPS, MINIBATCH_SIZE):
with tf.device('/gpu:0'):
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(NUM_STEPS):
batch_xs, batch_ys = mnist.train.next_batch(MINIBATCH_SIZE)
sess.run(gd_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
res = accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print res
return res
# + id="HUs4yuMjK11r" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}], "base_uri": "https://localhost:8080/", "height": 34} outputId="49def603-7e44-4f67-dcdb-42e40cfeee27" executionInfo={"status": "ok", "timestamp": 1520307817059, "user_tz": 300, "elapsed": 2637, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
NUM_STEPS = 3000
MINIBATCH_SIZE = 100
steps = 1000
batch = 116
res = []
data = {}
accuracy = train_and_check(steps, batch)
# for i in range(100, 120):
# print 'results for %d steps and %d batch size' % (NUM_STEPS, i)
# accuracy = train_and_check(stepsi, batchi)
# data.update({accuracy: {'steps': stepsi, 'batch': batchi}})
# + [markdown] id="T3fhJuJzLeTw" colab_type="text"
# Now lets see how we did. Probably 92%
# + [markdown] id="dt4FEgvP055h" colab_type="text"
# I wonder how model params affect accuracy
# TODO: plot
# + id="YBPnP1G5Lfr4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}], "base_uri": "https://localhost:8080/", "height": 286} outputId="40d21ecb-6e31-4332-c601-ced78757b3dd" executionInfo={"status": "error", "timestamp": 1520306650374, "user_tz": 300, "elapsed": 309, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
# correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
import matplotlib.pyplot as plt
print 'Accuracy vs step size'
# print steps
# plt.plot([v['steps'] for v in data.values() if v['batch'] == 144], [k in data.keys()])
# plt.show()
# print('Accuracy vs batch size')
# plt.plot([v['batch'] for v in data.values() if v['steps'] == 900], data.keys())
# plt.show()
# plt.plot(data.values(), data.keys())
# plt.show()
# + [markdown] id="nrKKxUjUd4ev" colab_type="text"
# # Deep MNIST
# + [markdown] id="0yQX530BMi6_" colab_type="text"
# But the machine learning gods are displeased with 92%. And we dont want to piss of our eventual AI overlords so lets see if we can do better
#
# We'll dive a bit deeper and explore a multilayer convolutional network≥
#
# The human brain is a place of chaos and noise, and this keeps us sane. So we'll do the same here to prevent overfitting.
#
# We'll be using ReLu (rectified linear unit) neurons with a small amount of noise and a slight positive intial bias to make sure theyre all alive and happy.
# + id="7KAsar95MaMb" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# + [markdown] id="udgaG-H9PcbD" colab_type="text"
# Next up is convolution and pooling.
# A biologically inspired if naive way to think of convolutions is the effect of your environment on you.
# We can draw an example from neuroscience wherein the firing of a single neuron is affected by the activation and states of the neurons around it, with many stimuli converging to either create or inhibit a response.
#
# Alternatively it's the smoothing or smearing effect of one function on another.
#
# We convolute with a stride size of 1, 1 step at a time, and pad the output to maintain the same shape
# + id="rQyYdrlIPaIA" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# + [markdown] id="rhWqJ-upWZkf" colab_type="text"
# Ah pooling, here's my naive implementation of maxpool: [maxpool.py](https://github.com/kaustavha/cs_homeschool/blob/master/learn_algos/maxpool.py)
#
# The gist of it is, given a large matrix we move across with a preset sliding window size, gathering the max value in each window, thereby reducing the matrix
# + id="4okj5NnoW162" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# + [markdown] id="vbblp7roXYf0" colab_type="text"
# Now we can start building our CNN layer 1.
# We setup our neurons weights and bias.
#
# This layer will compute 32 output features for each 5x5 patch of the incoming tensor. We'll have one input channel into the neuron, and output channels equal to the number of features we compute. You can think of features are important or distinguishing characteristics in the image.
#
# We also create a bias variable with the shape of our output feature set.
#
# I think of this similiar to a biological neuron convoluting incoming visual info and sending it forward to different neurons for different purposes. Someone with terrible handwriting will probably understand bad handwriting better, and neurons wired towards sex steroid generation will care about different things than those wired towards hunger centers.
#
# + id="a3UcNzUVZQki" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# + [markdown] id="Dwjb55ncXCSq" colab_type="text"
# Now to apply the layer we begin by reshaping our incoming image to a 4D vector, and add information regarding width, height, color channels.
#
# + id="zs4haTvpaYnd" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
x_image = tf.reshape(x, [-1,28,28,1])
# + [markdown] id="GiST5LtiXYG9" colab_type="text"
# Now we pass the image through our relu neurons with the pre-set weight and bias then run maxpool over the output.
# This will reduce the size of the matrix to 14x14 since we are taking 2x2 sized windows and concentrate information our NN cares about
# + id="9XHdKjHVa9dN" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# + [markdown] id="C7lUqCarbIQL" colab_type="text"
# Now we add a second layer. Similiar to the last one, further reducing our image size and increasing the number of generated features
# + id="q0GPDpfGbNvt" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# + [markdown] id="LOYfeCTbbeaL" colab_type="text"
# Now we add a slightly different special layer. A densely connected layer. With 1024 neurons this layer can process the whole image and benefit from the feature generation from previous layers.
#
# This is also in some ways biologically inspired, neuronal tracts exist that run the length of the brain connecting distal zones. Although we're not sure if they play a role in processing or merely ferrying.
#
# We have to once again reshape the incoming vector, apply our weights and bias and run it through the relu function
# + id="lsa7hY6zcB4Z" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# + [markdown] id="JsM91qwEbM8s" colab_type="text"
# To reduce overfitting further from what we managed with our biases and weights, we apply a dropout layer.
#
# This layer will be turned off during testing but will run during training.
# Relaxed learning but concentrated focused testing.
#
# The paper behind dropout mentions inspiration from biological reproductive fitness and how genes are dropped when offspring are created.
# There's also neuroscientific inspiration wherein we see a reduction in neuronal density and connections from pruning as people age which results in improved performance over infantile over-activated states.
# + id="-paZlhqNcEUX" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# + [markdown] id="E791Kau5dtVh" colab_type="text"
# Finally we add a readout layer. Similiar to the softmax layer which gathered the inputs and
# + id="fxD63jTEfUcQ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# + [markdown] id="I0h_lsHzfYgU" colab_type="text"
# One main difference is that we'll be replacing the simpler gradient descent optimization technique with ADAM.
#
# Unlike GD which maintains a constant learning rate ADAM computes individual adaptive learning rates for different paremeters from estimates of the exponential moving average of the gradient, squared gradient and parameters beta1 and 2, the first and second moments of the gradient.
#
# ADAM outperforms :
# RMSProp - does well on noisy problems
# AdaGrad - does well on NLP & CV problems
# + id="vGDtA_WggxDh" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}], "base_uri": "https://localhost:8080/", "height": 544} outputId="f3bef7e3-9223-4125-8b59-ebdafecd9fc4" executionInfo={"status": "ok", "timestamp": 1520308138027, "user_tz": 300, "elapsed": 294880, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
with tf.device('/gpu:0'):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# sess.run(tf.global_variables_initializer())
for i in range(30000):
batch_xs, batch_ys = mnist.train.next_batch(64)
if i%1000 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch_xs, y_: batch_ys, keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
# + [markdown] id="DyzucOubsYuA" colab_type="text"
# We'll probably get around 99.2% accuracy. Up 7%
# + [markdown] id="t2Ced_PRnOE8" colab_type="text"
# # Slim and nielsen net
# + [markdown] id="9GpV9wfOnZnR" colab_type="text"
# Todo: Slim and Nielsen net explanation
# + id="mDs3bbdxnRyU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
import tensorflow.contrib.slim as slim
MEAN = np.mean(mnist.train.images)
STD = np.std(mnist.train.images)
# + id="OoLaxOnDn1Br" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# Convenience method for reshaping images. The included MNIST dataset stores images
# as Nx784 row vectors. This method reshapes the inputs into Nx28x28x1 images that are
# better suited for convolution operations and rescales the inputs so they have a
# mean of 0 and unit variance.
import numpy as np
def resize_images(images):
reshaped = (images - MEAN)/STD
reshaped = np.reshape(reshaped, [-1, 28, 28, 1])
assert(reshaped.shape[1] == 28)
assert(reshaped.shape[2] == 28)
assert(reshaped.shape[3] == 1)
return reshaped
# + [markdown] id="T47u1J-jn7js" colab_type="text"
# Nielsent net:
# - 28x28 input
# - conv layer w/ 20 kernels, stride = 1, size=5
# - 2x2 maxpool
# - conv lyer with 40 kernels, stride=1, size=5
# - 2x2 maxpool
# - fully connected layer w/ 1000 hidden units and dropout
# - 2nd fully connected layer, same as above
# - output layer of 10
# - Trained with momentumOptimizer
# + id="J9BQXEfqQGqT" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def nielsen_net(inputs, is_training, scope='NielsenNet'):
with tf.variable_scope(scope, 'NielsenNet'):
# First Group: Convolution + Pooling 28x28x1 => 28x28x20 => 14x14x20
net = slim.conv2d(inputs, 20, [5, 5], padding='SAME', scope='layer1-conv')
net = slim.max_pool2d(net, 2, stride=2, scope='layer2-max-pool')
# Second Group: Convolution + Pooling 14x14x20 => 10x10x40 => 5x5x40
net = slim.conv2d(net, 40, [5, 5], padding='VALID', scope='layer3-conv')
net = slim.max_pool2d(net, 2, stride=2, scope='layer4-max-pool')
# Reshape: 5x5x40 => 1000x1
net = tf.reshape(net, [-1, 5*5*40])
# Fully Connected Layer: 1000x1 => 1000x1
net = slim.fully_connected(net, 1000, scope='layer5')
net = slim.dropout(net, is_training=is_training, scope='layer5-dropout')
# Second Fully Connected: 1000x1 => 1000x1
net = slim.fully_connected(net, 1000, scope='layer6')
net = slim.dropout(net, is_training=is_training, scope='layer6-dropout')
# Output Layer: 1000x1 => 10x1
net = slim.fully_connected(net, 10, scope='output')
net = slim.dropout(net, is_training=is_training, scope='output-dropout')
return net
# + id="GKEjkVnGQOhx" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
with tf.device('/gpu:0'):
tf.reset_default_graph()
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Create the placeholder tensors for the input images (x), the training labels (y_actual)
# and whether or not dropout is active (is_training)
x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1], name='Inputs')
y_actual = tf.placeholder(tf.float32, shape=[None, 10], name='Labels')
is_training = tf.placeholder(tf.bool, name='IsTraining')
# Pass the inputs into nielsen_net, outputting the logits
logits = nielsen_net(x, is_training, scope='NielsenNetTrain')
# Use the logits to create four additional operations:
#
# 1: The cross entropy of the predictions vs. the actual labels
# 2: The number of correct predictions
# 3: The accuracy given the number of correct predictions
# 4: The update step, using the MomentumOptimizer
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_actual))
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_actual, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_step = tf.train.MomentumOptimizer(0.01, 0.5).minimize(cross_entropy)
# To monitor our progress using tensorboard, create two summary operations
# to track the loss and the accuracy
loss_summary = tf.summary.scalar('loss', cross_entropy)
accuracy_summary = tf.summary.scalar('accuracy', accuracy)
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter('/tmp/nielsen-net', sess.graph)
eval_data = {
x: resize_images(mnist.validation.images),
y_actual: mnist.validation.labels,
is_training: False
}
for i in xrange(100000):
images, labels = mnist.train.next_batch(100)
summary, _ = sess.run([loss_summary, train_step], feed_dict={x: resize_images(images), y_actual: labels, is_training: True})
train_writer.add_summary(summary, i)
if i % 1000 == 0:
summary, acc = sess.run([accuracy_summary, accuracy], feed_dict=eval_data)
train_writer.add_summary(summary, i)
print("Step: %5d, Validation Accuracy = %5.2f%%" % (i, acc * 100))
test_data = {
x: resize_images(mnist.test.images),
y_actual: mnist.test.labels,
is_training: False
}
acc = sess.run(accuracy, feed_dict=test_data)
print("Test Accuracy = %5.2f%%" % (100 * acc))
# + [markdown] id="6LWelzcsQOOh" colab_type="text"
#
# + [markdown] id="itwIRDXCQS8I" colab_type="text"
# # Nielsen net with more layers and ADAM, WIP
# + id="mPJuW0GKn21Z" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def nielsen_net(inputs, is_training, scope='NielsenNet'):
with tf.variable_scope(scope, 'NielsenNet'):
# First Group: Convolution + Pooling 28x28x1 => 28x28x20 => 14x14x20
net = slim.conv2d(inputs, 20, [5, 5], padding='SAME', scope='layer1-conv')
net = slim.max_pool2d(net, 2, stride=2, scope='layer2-max-pool')
# Second Group: Convolution + Pooling 14x14x20 => 10x10x40 => 5x5x40
net = slim.conv2d(net, 40, [5, 5], padding='VALID', scope='layer3-conv')
net = slim.max_pool2d(net, 2, stride=2, scope='layer4-max-pool')
# Reshape: 5x5x40 => 1000x1
net = tf.reshape(net, [-1, 5*5*40])
# MLP
# net = slim.stack(net, slim.fully_connected, [1000,1024,2048], scope='fc')
# # Fully Connected Layer: 1000x1 => 1000x1
net = slim.fully_connected(net, 1000, scope='layer5')
net = slim.dropout(net, is_training=is_training, scope='layer5-dropout')
# # Second Fully Connected: 1000x1 => 1000x1
net = slim.fully_connected(net, 1000, scope='layer6')
net = slim.dropout(net, is_training=is_training, scope='layer6-dropout')
# # Second Fully Connected: 1000x1 => 1000x1
# net = slim.fully_connected(net, 1000, scope='layer7')
# net = slim.dropout(net, is_training=is_training, scope='layer7-dropout')
# Output Layer: 1000x1 => 10x1
net = slim.fully_connected(net, 10, scope='output')
net = slim.dropout(net, is_training=is_training, scope='output-dropout')
return net
# + id="EFC-w9JzXrq9" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def mcdnnSingle(inputs, is_training, scope='mcdnn'):
with tf.variable_scope(scope, 'mcdnn'):
net = slim.conv2d(inputs, 20, [4, 4], padding='SAME', scope='layer1-conv')
net = slim.max_pool2d(net, 2, stride=2, scope='layer2-max-pool')
net = slim.conv2d(inputs, 40, [5, 5], padding='SAME', scope='layer3-conv')
net = slim.max_pool2d(net, 3, stride=3, scope='layer4-max-pool')
net = slim.fully_connected(net, 150, scope='layer5-fully-connected')
net = slim.fully_connected(net, 10, scope='output')
return net
# + [markdown] id="snEwmOEwXnIN" colab_type="text"
# def mcdnnSingle:
#
# + id="jL_QhoPLohGs" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}], "base_uri": "https://localhost:8080/", "height": 1816} outputId="fb476651-95e4-412b-a374-05c0d64170a2" executionInfo={"status": "error", "timestamp": 1520307619683, "user_tz": 300, "elapsed": 1017, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
with tf.device('/gpu:0'):
tf.reset_default_graph()
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Create the placeholder tensors for the input images (x), the training labels (y_actual)
# and whether or not dropout is active (is_training)
x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1], name='Inputs')
y_actual = tf.placeholder(tf.float32, shape=[None, 10], name='Labels')
is_training = tf.placeholder(tf.bool, name='IsTraining')
# Pass the inputs into nielsen_net, outputting the logits
# logits = nielsen_net(x, is_training, scope='NielsenNetTrain')
logits = mcdnnSingle(x, is_training, scope='mcdnn')
# Use the logits to create four additional operations:
#
# 1: The cross entropy of the predictions vs. the actual labels
# 2: The number of correct predictions
# 3: The accuracy given the number of correct predictions
# 4: The update step, using the MomentumOptimizer
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_actual))
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_actual, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# swap put momentum for adam
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# train_step = tf.train.MomentumOptimizer(0.01, 0.5).minimize(cross_entropy)
# To monitor our progress using tensorboard, create two summary operations
# to track the loss and the accuracy
loss_summary = tf.summary.scalar('loss', cross_entropy)
accuracy_summary = tf.summary.scalar('accuracy', accuracy)
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter('/tmp/nielsen-net', sess.graph)
eval_data = {
x: resize_images(mnist.validation.images),
y_actual: mnist.validation.labels,
is_training: False
}
steps = 900 # original is 100k
batch = 112 # original is 100
for i in xrange(steps):
images, labels = mnist.train.next_batch(batch)
summary, _ = sess.run([loss_summary, train_step], feed_dict={x: resize_images(images), y_actual: labels, is_training: True})
train_writer.add_summary(summary, i)
if i % 1000 == 0:
summary, acc = sess.run([accuracy_summary, accuracy], feed_dict=eval_data)
train_writer.add_summary(summary, i)
print("Step: %5d, Validation Accuracy = %5.2f%%" % (i, acc * 100))
test_data = {
x: resize_images(mnist.test.images),
y_actual: mnist.test.labels,
is_training: False
}
acc = sess.run(accuracy, feed_dict=test_data)
print("Test Accuracy = %5.2f%%" % (100 * acc))
# + [markdown] id="q00WlzJyvFKr" colab_type="text"
# comes to 99.45%
#
# + [markdown] id="fNGDGrklnWS4" colab_type="text"
# TODO: try to use the trained model to read numbers stored in our local drive instance
#
# Input
# 25 4 2
# Expected Output
# THINK,OUTTHINK,THINK,THINK,THINK,OUTTHINK,19,18,17,OUT,15,14,13,OUTTHINK,11,10,9,OUT,7,6,5,OUT,3,THINK,1
#
# + [markdown] id="wAh_ao_hnFMg" colab_type="text"
# # Using the model to identify new input numbers
# + id="j92XR563xYRQ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}], "base_uri": "https://localhost:8080/", "height": 51} outputId="0646d17b-4dce-4fc9-fed8-b53aa6488061" executionInfo={"status": "ok", "timestamp": 1520213307417, "user_tz": 300, "elapsed": 2088, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
# !pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# PyDrive reference:
# https://googledrive.github.io/PyDrive/docs/build/html/index.html
# 2. Create & upload a file text file.
uploaded = drive.CreateFile({'title': 'Sample upload.txt'})
uploaded.SetContentString('Sample upload file content')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))
# 3. Load a file by ID and print its contents.
downloaded = drive.CreateFile({'id': uploaded.get('id')})
print('Downloaded content "{}"'.format(downloaded.GetContentString()))
# + id="7U40qVIRxviq" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}], "base_uri": "https://localhost:8080/", "height": 347} outputId="d20dd1de-a4f4-4b47-9fe1-0d0fec134c81" executionInfo={"status": "ok", "timestamp": 1520356115322, "user_tz": 300, "elapsed": 1037, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
from google.colab import auth
auth.authenticate_user()
from googleapiclient.discovery import build
drive_service = build('drive', 'v3')
from PIL import Image
file_id = '13M-dLDt5hmG3bxmfBol_W5UnjyGU40lK'
import io
from googleapiclient.http import MediaIoBaseDownload
request = drive_service.files().get_media(fileId=file_id)
downloaded = io.BytesIO()
downloader = MediaIoBaseDownload(downloaded, request)
done = False
while done is False:
# _ is a placeholder for a progress object that we ignore.
# (Our file is small, so we skip reporting progress.)
_, done = downloader.next_chunk()
downloaded.seek(0)
img_str = downloaded.read()
dd = io.BytesIO(img_str)
# downloaded.seek(0)
# file = downloaded.read()
# import locale
# locale.getdefaultlocale()
# file1_open = open(file, encoding=locale.getdefaultlocale()[1])
# file1_content = file1_open.read()
# print('Downloaded file contents are: {}'.format(file1_content))
pil_im = Image.open(dd)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
size = 28,28
pil_im.thumbnail(size, Image.ANTIALIAS)
imgplot = plt.imshow(pil_im)
# print('Downloaded file contents are: {}'.format(downloaded.read()))
# + id="PaSuGk_zM0DM" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# get opencv
# !apt-get -qq install -y libsm6 libxext6 && pip install -q -U opencv-python
import cv2
from scipy import ndimage
# + [markdown] id="14vH-1-jlGk1" colab_type="text"
# TODO: FIX
# https://medium.com/@o.kroeger/tensorflow-mnist-and-your-own-handwritten-digits-4d1cd32bbab4
# + id="IC_EBpv9UeL7" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}], "base_uri": "https://localhost:8080/", "height": 269} outputId="857bd333-7b16-44da-c22e-96c1c4500b28" executionInfo={"status": "error", "timestamp": 1520273109860, "user_tz": 300, "elapsed": 453, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
# image proc
nparr = np.fromstring(img_str, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_GRAYSCALE)
gray = cv2.resize(255-img, (28,28))
(thresh, gray) = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
while np.sum(gray[0]) == 0:
gray = gray[1:]
while np.sum(gray[:,0]) == 0:
gray = np.delete(gray,0,1)
while np.sum(gray[-1]) == 0:
gray = gray[:-1]
while np.sum(gray[:,-1]) == 0:
gray = np.delete(gray,-1,1)
rows,cols = gray.shape
if rows > cols:
factor = 20.0/rows
rows = 20
cols = int(round(cols*factor))
gray = cv2.resize(gray, (cols,rows))
else:
factor = 20.0/cols
cols = 20
rows = int(round(rows*factor))
gray = cv2.resize(gray, (cols, rows))
colsPadding = (int(math.ceil((28-cols)/2.0)),int(math.floor((28-cols)/2.0)))
rowsPadding = (int(math.ceil((28-rows)/2.0)),int(math.floor((28-rows)/2.0)))
gray = np.lib.pad(gray,(rowsPadding,colsPadding),'constant')
def getBestShift(img):
cy,cx = ndimage.measurements.center_of_mass(img)
rows,cols = img.shape
shiftx = np.round(cols/2.0-cx).astype(int)
shifty = np.round(rows/2.0-cy).astype(int)
return shiftx,shifty
def shift(img,sx,sy):
rows,cols = img.shape
M = np.float32([[1,0,sx],[0,1,sy]])
shifted = cv2.warpAffine(img,M,(cols,rows))
return shifted
gray = np.lib.pad(gray,(rowsPadding,colsPadding),'constant')
shiftx,shifty = getBestShift(gray)
shifted = shift(gray,shiftx,shifty)
gray = shifted
flatten = gray.flatten() / 255.0
img_asarr = np.zeros((1,784))
img_asarr[0] = flatten
# + id="G0KaZ0AvLWz_" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}], "base_uri": "https://localhost:8080/", "height": 105} outputId="b2f530ad-4ff6-4b3b-fdf2-631936bec59a" executionInfo={"status": "ok", "timestamp": 1520272703004, "user_tz": 300, "elapsed": 581, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
# gray = cv2.imread(dd, cv2.IMREAD_GRAYSCALE)
# file_bytes = np.asarray(bytearray(dd), dtype=np.uint8)
# nparr = np.fromstring(img_str, np.uint8)
# img = cv2.imdecode(nparr, cv2.IMREAD_GRAYSCALE)
# gray = cv2.resize(255-img, (28,28))
# flatten = gray.flatten() / 255.0
# img_asarr = np.zeros((1,784))
# img_asarr[0] = flatten
# corvals = np.zeros((1,10))
# corval = np.zeros((10))
# corval[6] = 1
# corvals[0] = corval
# print x, y_
# pred = tf.argmax(y,1)
tf.initialize_all_variables().run()
with tf.Session() as sess:
tf.initialize_all_variables().run()
label = sess.run(y,
feed_dict={x: img_asarr})
print label
# print sess.run(pred, feed_dict={x: img_asarr, y_: corvals})
# print sess.run(accuracy, feed_dict={x: img_asarr, y_: corvals})
# + id="xPKsX5qHuBO1" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}], "base_uri": "https://localhost:8080/", "height": 728} outputId="96e06a05-2d0d-4a86-c8c3-c563ee1e4ca0" executionInfo={"status": "error", "timestamp": 1520270066165, "user_tz": 300, "elapsed": 429, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-SGJ1BLtauTk/AAAAAAAAAAI/AAAAAAAAAAA/4Cxjig_XDts/s50-c-k-no/photo.jpg", "userId": "105154027733464709839"}}
print len(mnist.test.images[0])
# x = tf.Variable(pil_im, name='x')
# print x
# x = tf.reshape(x, shape=[-1])
# print x
# im = double(rgb2gray(pil_im))
# im = im(:)
# im = im./max(im)
# x = im
# x = pil_im
# x = tf.image.encode_png(x)
# x = np.array(x)
# x = x.ravel()
# x = tf.placeholder(x)
from array import *
# data_image = array('B')
# pixel = pil_im.load()
# width, height = pil_im.size
# for x in range(0,width):
# for y in range(0,height):
# data_image.append(pixel[y,x])
# x = data_image
# imagedata = np.zeros((-1,28,28), dtype=np.uint8)
# import matplotlib.image as imagick
# x = imagick.inread(pil_im)
# x = x.shape
# x = np.array(x, dtype=np.uint8)
# x = x.shape
# x = x.ravel()
# x = hash(tuple(x))
# x = np.matrix(x)
with tf.Session() as sess:
new_image_label= sess.run(y,
feed_dict={x: dd})
print new_image_label
# + [markdown] id="ZdOrJXkoOPh-" colab_type="text"
# # Glossary and links
#
# Difference between trainers well explained
# https://stackoverflow.com/questions/36162180/gradient-descent-vs-adagrad-vs-momentum-in-tensorflow
#
# Sourcefor nielsennet
# https://medium.com/initialized-capital/we-need-to-go-deeper-a-practical-guide-to-tensorflow-and-inception-50e66281804f
# https://github.com/initialized/tensorflow-tutorial/blob/master/mnist-slim/MNIST%20Slim.ipynb
#
#
# TODO: figure out mcdnn
# https://arxiv.org/pdf/1202.2745.pdf
# https://stackoverflow.com/questions/41990014/load-multiple-models-in-tensorflow
|
learn_nn/MNIST_basic.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Refresher
#
# In this notebook, I would like to show you the top 5 things you will need to know in order for you to gain maximum benefit from this workshop.
# ## 0. Python is Zero-Indexed
# +
# If you have a list of numbers:
my_list = [1, 5, 2, 3, 9]
# What is the index of the number "3"?
x = 1 # change this line
my_list[x]
# -
# ## 1. Namespaces are a cool thing.
# +
# e.g. How do I use the numpy trigonometric functions?
import numpy as np # you can abbreviate a package import name in order to not use
np.tan(3) # change this function call to get the cosine of 3. Also, does "numpy.tan(3)" work
# -
# ## 2. Python "classes" are objects that have methods.
# +
from datetime import datetime
class Watch(object):
def __init__(self):
self.stopwatch_timer = 0
def current_time(self):
return str(datetime.now())
w = Watch()
# Write the funciton call that tells the current time on the watch.
# -
# ## 3. Numbers can be represented numerically or as strings
x = (3 == '3') # change this line to make x evaluate to True
x
# ## 4. Dictionaries are really useful
# +
from datetime import datetime # usually, I would opt to place all the import statements at the top.
useful_info = dict()
# Add the key-value pairs to the useful_info dictionary:
# - 'instructor_name':'<NAME>'
# - 'date_today':datetime.today()
# -
# ## 5. Write functions to encapsulate code for reuse.
# +
# Write a function that gives the sum of all numbers up to a passed in number.
# Hint: a for-loop is the easiest implementation.
# Hint: if you're feeling fancy, go ahead and use numpy.
def sum_up_to(x):
"""
Sums up all of the numbers from 1 to x.
"""
###
pass
###
# Then, write another function that uses that function to compute the sums for every even digit up to a number y.
def sums_for_even_digits(y):
"""
Computes the sum_up_to(x) for every even number up to y.
- If y is even, y is included.
- If y is odd, y is not included.
An example function call is as such:
>>> sums_for_even_digits(10)
[0, 3, 10, 21, 36, 55]
"""
### Write your code below.
pass
###
# -
|
01 Python Refresher.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from local.torch_basics import *
from local.test import *
from local.core import *
from local.layers import *
from local.data.all import *
from local.notebook.showdoc import show_doc
from local.optimizer import *
from local.learner import *
from local.metrics import *
from local.text.core import *
from local.text.data import *
from local.text.models.core import *
from local.text.models.awdlstm import *
from local.callback.rnn import *
from local.callback.all import *
# # Integration test on Wikitext-2
#
# > Training a Language Model on WT2
# ## Data
path = untar_data(URLs.WIKITEXT_TINY)
# The dataset comes with all the wrticles concatenated. We split them to be able to shuffle at the beginning of each epoch.
# +
def istitle(line):
return len(re.findall(r'^ = [^=]* = $', line)) != 0
def read_file(filename):
articles = L()
with open(filename, encoding='utf8') as f:
lines = f.readlines()
current_article = ''
for i,line in enumerate(lines):
current_article += line.replace('<unk>', UNK)
if i < len(lines)-2 and lines[i+1] == ' \n' and istitle(lines[i+2]):
articles.append(current_article.split(' '))
current_article = ''
articles.append(current_article.split(' '))
return articles
# -
# Then we put our list of tokenized texts together in an `LM_Dataset`. It will return tuples of sequences of `seq_len`, with the second sequence between the first one shifted by one on the right.
trn_txt = read_file(path/'train.txt')
val_txt = read_file(path/'valid.txt')
tst_txt = read_file(path/'test.txt')
all_texts = np.concatenate([val_txt, trn_txt, tst_txt])
df = pd.DataFrame({'texts':all_texts})
df.head()
# +
#df_tok,count = tokenize_df(df, ['texts'])
# -
count = Counter([p for t in df["texts"].values for p in t])
vocab = make_vocab(count)
splits = [list(range(len(val_txt), len(df))), list(range(len(val_txt)))]
tfm = Numericalize(make_vocab(count))
dsrc = DataSource(df["texts"].values, [tfm], filts=splits)
bs,sl = 104,72
train_dl = LMDataLoader(dsrc.train, bs=bs, seq_len=sl, after_batch=[Cuda()], shuffle=True, num_workers=8)
valid_dl = LMDataLoader(dsrc.valid, bs=2*bs, seq_len=sl, after_batch=[Cuda()], num_workers=8)
dbch = DataBunch(train_dl, valid_dl)
dbch.show_batch()
# ## Model
config = awd_lstm_lm_config.copy()
config.update({'input_p': 0.6, 'output_p': 0.4, 'weight_p': 0.5, 'embed_p': 0.1, 'hidden_p': 0.2})
model = get_language_model(AWD_LSTM, len(vocab), config=config)
opt_func = partial(Adam, wd=0.1, eps=1e-7)
cb_funcs = [partial(MixedPrecision, clip=0.1), partial(RNNTrainer, alpha=2, beta=1)]
learn = Learner(model, dbch, loss_func=CrossEntropyLossFlat(), opt_func=opt_func, cb_funcs=cb_funcs, metrics=[accuracy, Perplexity()])
learn.fit_one_cycle(1, 5e-3, moms=(0.8,0.7,0.8), div=10)
# Full training
learn.fit_one_cycle(90, 5e-3, moms=(0.8,0.7,0.8), div=10)
|
dev/35_tutorial_wikitext.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TensorFlow-GPU-2.2
# language: python
# name: tf-gpu
# ---
# ## Installation and Setup
#
# Installation is a two-step process. First, install spaCy using either conda or pip. Next, download the specific model you want, based on language.<br> For more info visit https://spacy.io/usage/
# #### 1. From the command line or terminal:
# > `conda install -c conda-forge spacy`
# > <br>*or*<br>
# > `pip install -U spacy`
# > ### Alternatively you can create a virtual environment:
# > `conda create -n spacyenv python=3 spacy=2`
# #### 2. Next, also from the command line (you must run this as admin or use sudo):
# > `python -m spacy download en`
# > ### If successful, you should see a message like:
# > **`Linking successful`**<br>
# > ` C:\Anaconda3\envs\spacyenv\lib\site-packages\en_core_web_sm -->`<br>
# > ` C:\Anaconda3\envs\spacyenv\lib\site-packages\spacy\data\en`<br>
# > ` `<br>
# > ` You can now load the model via spacy.load('en')`
# # Working with spaCy in Python
#
# This is a typical set of instructions for importing and working with spaCy. Don't be surprised if this takes awhile - spaCy has a fairly large library to load:
# +
# Import spaCy and load the language library
import spacy
nlp = spacy.load('en_core_web_sm') # This is an english model of spaCy
# Create a Doc object
doc = nlp(u'Tesla is looking at buying U.S. startup for $6 million')
# Print each token separately
for token in doc:
print(token.text, token.pos_, token.dep_)
# -
# This doesn't look very user-friendly, but right away we see some interesting things happen:
# 1. Tesla is recognized to be a Proper Noun, not just a word at the start of a sentence
# 2. U.S. is kept together as one entity (we call this a 'token')
#
# As we dive deeper into spaCy we'll see what each of these abbreviations mean and how they're derived. We'll also see how spaCy can interpret the last three tokens combined `$6 million` as referring to ***money***.
# ___
# # spaCy Objects
#
# After importing the spacy module in the cell above we loaded a **model** and named it `nlp`.<br>Next we created a **Doc** object by applying the model to our text, and named it `doc`.<br>spaCy also builds a companion **Vocab** object that we'll cover in later sections.<br>The **Doc** object that holds the processed text is our focus here.
# ___
# # Pipeline
# When we run `nlp`, our text enters a *processing pipeline* that first breaks down the text and then performs a series of operations to tag, parse and describe the data. Image source: https://spacy.io/usage/spacy-101#pipelines
# <img src="pipeline1.png" width="600">
# We can check to see what components currently live in the pipeline. In later sections we'll learn how to disable components and add new ones as needed.
# >**tagger** - Assign part-of-speech tags.
# >**parser** - Assigns Dependency Parser
# >**ner** - Detect and label named entities
nlp.pipeline
nlp.pipe_names
# ___
# ## Tokenization
# The first step in processing text is to split up all the component parts (words & punctuation) into "tokens". These tokens are annotated inside the Doc object to contain descriptive information. We'll go into much more detail on tokenization in an upcoming lecture. For now, let's look at another example:
doc2 = nlp(u"Tesla isn't looking into startups anymore.")
for token in doc2:
print(token.text, token.pos_, token.dep_)
# Notice how `isn't` has been split into two tokens. spaCy recognizes both the root verb `is` and the negation attached to it. Notice also that both the extended whitespace and the period at the end of the sentence are assigned their own tokens.
# It's important to note that even though `doc2` contains processed information about each token, it also retains the original text:
print(doc2)
print(doc2[0])
print(type(doc2))
# ___
# ## Part-of-Speech Tagging (POS)
# The next step after splitting the text up into tokens is to assign parts of speech. In the above example, `Tesla` was recognized to be a ***proper noun***. Here some statistical modeling is required. For example, words that follow "the" are typically nouns.
#
# For a full list of POS Tags visit https://spacy.io/api/annotation#pos-tagging
doc2[0].pos_
# ___
# ## Dependencies
# We also looked at the syntactic dependencies assigned to each token. `Tesla` is identified as an `nsubj` or the ***nominal subject*** of the sentence.
# For a full list of Syntactic Dependencies visit https://spacy.io/api/annotation#dependency-parsing
# <br>A good explanation of typed dependencies can be found [here](https://nlp.stanford.edu/software/dependencies_manual.pdf)
doc2[0].dep_
# To see the full name of a tag use `spacy.explain(tag)`
spacy.explain('PROPN')
spacy.explain('nsubj')
# ___
# ## Additional Token Attributes
# We'll see these again in upcoming lectures. For now we just want to illustrate some of the other information that spaCy assigns to tokens:
# |Tag|Description|doc2[0].tag|
# |:------|:------:|:------|
# |`.text`|The original word text<!-- .element: style="text-align:left;" -->|`Tesla`|
# |`.lemma_`|The base form of the word|`tesla`|
# |`.pos_`|The simple part-of-speech tag|`PROPN`/`proper noun`|
# |`.tag_`|The detailed part-of-speech tag|`NNP`/`noun, proper singular`|
# |`.shape_`|The word shape – capitalization, punctuation, digits|`Xxxxx`|
# |`.is_alpha`|Is the token an alpha character?|`True`|
# |`.is_stop`|Is the token part of a stop list, i.e. the most common words of the language?|`False`|
# Lemmas(the base form of the word):
print(doc2[4].text)
print(doc2[4].lemma_)
# Simple Parts-of-Speech & Detailed Tags:
print(doc2[4].pos_)
print(doc2[4].tag_ + ' / ' + spacy.explain(doc2[4].tag_))
# Word Shapes:
print(doc2[0].text+': '+doc2[0].shape_)
print(doc[5].text+' : '+doc[5].shape_)
# Boolean Values:
print(doc2[0].is_alpha)
print(doc2[0].is_stop)
# ___
# ## Spans
# Large Doc objects can be hard to work with at times. A **span** is a slice of Doc object in the form `Doc[start:stop]`.
doc3 = nlp(u'Although commmonly attributed to <NAME> from his song "Beautiful Boy", \
the phrase "Life is what happens to us while we are making other plans" was written by \
cartoonist <NAME> and published in Reader\'s Digest in 1957, when Lennon was 17.')
life_quote = doc3[16:30]
print(life_quote)
type(life_quote)
# ___
# ## Sentences
# Certain tokens inside a Doc object may also receive a "start of sentence" tag. While this doesn't immediately build a list of sentences, these tags enable the generation of sentence segments through `Doc.sents`. Later we'll write our own segmentation rules.
doc4 = nlp(u'This is the first sentence. This is another sentence. This is the last sentence.')
for sent in doc4.sents:
print(sent)
doc4[6].is_sent_start
# _______________________________________________________________________________________________________________________
# ### Using spaCy in a dataset
#
# The dataset is collection of tweets by <NAME> from November 16,2012 and September 29,2017
#
# In this Kernel, We will learn how to use spaCy in Python to perform a few things of NLP. This is just starter pack for analysis of tweets. There are no steps for cleaning the text
# +
import numpy as np
import pandas as pd
import os
#print(os.listdir("../input"))
import spacy
import random
from collections import Counter #for counting
import seaborn as sns #for visualization
os.getcwd()
# -
nlp = spacy.load('en')
#Reading tweets
tweets = pd.read_csv("data_elonmusk.csv",encoding='latin1')
tweets = tweets.assign(Time=pd.to_datetime(tweets.Time)).drop('row ID', axis='columns')
tweets.head(10)
tweets.info()
tweets.shape
# **Sampling the tweets text**
random.seed(123)
text = tweets.Tweet[random.sample(range(1,240),10)]
text
# #### Annotation
# Let us begin our NLP journey with Lingustic Annotation, which means marking each and every word with its linguistic type like if it's a NOUN, VERB and so on. This help us in giving grammatical labels to our Text Corpus. The function nlp() takes only string so let us use str() to combine all our rows above into one long string.
text_combined = str(text)
doc = nlp(text_combined)
print(doc)
# #### Tokenization
# doc is the annotated text (that we did using the loaded langauge model). Now, let us tokenize our text. Tokenization has been done along with the above process. We can now print the **chunks**. The tokenized parts are called chunks. As a naive description, tokenization is nothing but breaking the long sentences/text corpus into a small chunks (or mostly words).
for token in doc:
print(token)
# Perhaps, we don't want to see everything but just NOUNs. Below is the code how we can print only the nouns in the text.
nouns = list(doc.noun_chunks)
nouns
# Sometimes, we might need to tokenization based on sentences. Let's say we've got Chat Transcript from Customer Service and in that case we need to tokenize our transcript based on sentences.
list(doc.sents)
# #### Named Entity Recognition (NER)
# NER is the process of extracting Named Entities like Person, Organization, Location and other such infromation from our Text Corpus. spaCy also has an object displacy that lets us visualize our text with NER. We can display Named Entities using the following code:
for ent in doc.ents:
print(ent.text,ent.label_)
# **spaCy** also allows to visualize Named Entities along woith the Text Labels.
spacy.displacy.render(doc, style='ent',jupyter=True)
# #### Lemmatization
# Lemmetiztion is the process of retrieving the root word of the current word. Lemmatization is an essential process in NLP to bring different variants of a single word to one root word.
for token in doc:
print(token.text, token.lemma_)
# As you can see in the above output, words like aligning and values have been converted to their root words align and value.
# #### Dependency Parser Visualization
spacy.displacy.render(doc, style='dep',jupyter=True)
|
MAIN/2. Introduction to NLP with SpaCy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Problem 1: BST Traversal
# This problem builds on Problem 1 of Homework 7 in which you wrote a binary search tree.
#
# ### Part 1
#
# As discussed in lecture, three different types to do a depth-first traversal are: preorder, inorder, and postorder. Here is a reference: [Tree Traversal](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search).
#
# Write an iterator class called `DFSTraversal` with the following specifications:
#
# * `__init__(self, tree, traversalType)`: Constructor takes a `BinaryTree` object and one of the enums from `DFSTraversalTypes`
#
# ```python
# from enum import Enum
#
# class DFSTraversalTypes(Enum):
# PREORDER = 1
# INORDER = 2
# POSTORDER = 3
# ```
#
# * `changeTraversalType(self, traversalType)`: Change the traversal type
# * `__iter__(self)`: This is the initialization of an iterator
# * `__next__(self)`: This is called in the iterator for getting the next value
#
# Here's how you might use your `DFSTraversal` class:
#
# ```python
# input_array = [3, 9, 2, 11]
# bt = BinaryTree()
# for val in input_array:
# bt.insert(val)
# traversal = DFSTraversal(bt, DFSTraversalTypes.INORDER)
# for val in traversal:
# print(val)
# 2
# 3
# 9
# 11
# ```
#
# ### Part 2
# Put your `BinaryTree` class (from homework 7) and your `DFSTraversal` class (from Part 1 of this homework) in a file titled `TreeTraversal.py`.
# +
# from HW7
class TreeNode:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
self.parent = None
class BinaryTree:
def __init__(self):
self.root = None
def insert(self, val):
if self.root is None:
self.root = TreeNode(val)
else:
self._insert(val, self.root)
def _insert(self, val, node):
if val <= node.val:
if node.left is not None:
self._insert(val, node.left)
else:
node.left = TreeNode(val)
node.left.parent = node
else:
if node.right is not None:
self._insert(val, node.right)
else:
node.right = TreeNode(val)
node.right.parent = node
def find(self, val):
if self.root is None:
return None
else:
return self._find(val, self.root)
def _find(self, val, node):
if val == node.val:
return node
elif val < node.val and node.left is not None:
return self._find(val, node.left)
elif val > node.val and node.right is not None:
return self._find(val, node.right)
else:
return None
def getValues(self, depth):
if self.root is None:
return []
else:
val_list = []
self._getValues(depth, self.root, val_list)
return val_list
def _getValues(self, depth, node, vals=[]):
if depth == 0:
vals.append(node.val)
else:
if node.left is not None:
self._getValues(depth-1, node.left, vals)
else:
for i in range(int(2**(depth-1))):
vals.append(None)
if node.right is not None:
self._getValues(depth-1, node.right, vals)
else:
for i in range(int(2**(depth-1))):
vals.append(None)
return vals
def max_depth(self, root):
if root is None:
return 0
else:
return max(self.max_depth(root.left), self.max_depth(root.right)) + 1
def __len__(self):
return self.max_depth(self.root)
# +
from enum import Enum
class DFSTraversalTypes(Enum):
PREORDER = 1
INORDER = 2
POSTORDER = 3
# -
class DFSTraversal:
def __init__(self, tree, traversalType):
self.traversalType = traversalType.name
self.tree = tree
def changeTraversalType(self, traversalType):
self.traversalType = traversalType.name
def preorder(self, node):
if node is not None:
yield node.val
yield from self.preorder(node.left)
yield from self.preorder(node.right)
def inorder(self, node):
if node is not None:
yield from self.inorder(node.left)
yield node.val
yield from self.inorder(node.right)
def postorder(self, node):
if node is not None:
yield from self.postorder(node.left)
yield from self.postorder(node.right)
yield node.val
def __iter__(self):
if self.traversalType == 'PREORDER':
yield from self.preorder(self.tree.root)
elif self.traversalType == 'INORDER':
yield from self.inorder(self.tree.root)
elif self.traversalType == 'POSTORDER':
yield from self.postorder(self.tree.root)
# +
input_array = [3, 9, 2, 11]
bt = BinaryTree()
for val in input_array:
bt.insert(val)
traversal = DFSTraversal(bt, DFSTraversalTypes.PREORDER)
for val in traversal:
print(val)
# -
traversal.changeTraversalType(DFSTraversalTypes.INORDER)
for val in traversal:
print(val)
traversal.changeTraversalType(DFSTraversalTypes.POSTORDER)
for val in traversal:
print(val)
traversal.changeTraversalType(DFSTraversalTypes.INORDER)
# traversal.inorder(bt.root)
for val in traversal:
print(val)
bt2 = BinaryTree()
arr = [20, 10, 17, 14, 3, 0]
for i in arr:
bt2.insert(i)
print("Height of binary tree is {}.\n".format(len(bt2)))
for i in range(len(bt2)):
print("Level {0} values: {1}".format(i, bt2.getValues(i)))
traversal2 = DFSTraversal(bt2, DFSTraversalTypes.INORDER)
for val in traversal2:
print(val)
# +
# %%file TreeTraversal.py
class TreeNode:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
self.parent = None
class BinaryTree:
def __init__(self):
self.root = None
def insert(self, val):
if self.root is None:
self.root = TreeNode(val)
else:
self._insert(val, self.root)
def _insert(self, val, node):
if val <= node.val:
if node.left is not None:
self._insert(val, node.left)
else:
node.left = TreeNode(val)
node.left.parent = node
else:
if node.right is not None:
self._insert(val, node.right)
else:
node.right = TreeNode(val)
node.right.parent = node
def find(self, val):
if self.root is None:
return None
else:
return self._find(val, self.root)
def _find(self, val, node):
if val == node.val:
return node
elif val < node.val and node.left is not None:
return self._find(val, node.left)
elif val > node.val and node.right is not None:
return self._find(val, node.right)
else:
return None
def getValues(self, depth):
if self.root is None:
return []
else:
val_list = []
self._getValues(depth, self.root, val_list)
return val_list
def _getValues(self, depth, node, vals=[]):
if depth == 0:
vals.append(node.val)
else:
if node.left is not None:
self._getValues(depth-1, node.left, vals)
else:
for i in range(int(2**(depth-1))):
vals.append(None)
if node.right is not None:
self._getValues(depth-1, node.right, vals)
else:
for i in range(int(2**(depth-1))):
vals.append(None)
return vals
def max_depth(self, root):
if root is None:
return 0
else:
return max(self.max_depth(root.left), self.max_depth(root.right)) + 1
def __len__(self):
return self.max_depth(self.root)
from enum import Enum
class DFSTraversalTypes(Enum):
PREORDER = 1
INORDER = 2
POSTORDER = 3
class DFSTraversal:
def __init__(self, tree, traversalType):
self.traversalType = traversalType.name
self.tree = tree
def changeTraversalType(self, traversalType):
self.traversalType = traversalType.name
def preorder(self, node):
if node is not None:
yield node.val
yield from self.preorder(node.left)
yield from self.preorder(node.right)
def inorder(self, node):
if node is not None:
yield from self.inorder(node.left)
yield node.val
yield from self.inorder(node.right)
def postorder(self, node):
if node is not None:
yield from self.postorder(node.left)
yield from self.postorder(node.right)
yield node.val
def __iter__(self):
if self.traversalType == 'PREORDER':
yield from self.preorder(self.tree.root)
elif self.traversalType == 'INORDER':
yield from self.inorder(self.tree.root)
elif self.traversalType == 'POSTORDER':
yield from self.postorder(self.tree.root)
# -
# ## Problem 2: Markov Chains
#
# [Markov Chains](https://en.wikipedia.org/wiki/Markov_chain) are widely used to model and predict discrete events. Underlying Markov chains are Markov processes which make the assumption that the outcome of a future event only depends on the event immediately preceeding it. In this exercise, we will be assuming that weather has Markov properties (e.g. today's weather is dependent only on yesterday's weather). We will use the Markov assumption to create a basic model for predicting weather.
# To begin, let's categorize weather into 7 types: ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing'].
#
# In the `weather.csv` file accompanying this homework, each row corresponds to one type of weather (in the order given above) and each column is the probability of one type of weather occurring the following day (also in the order given above).
#
# The $ij$th element is the probability that the $j$th weather type occurs after the $i$th weather type. So for example, (1,2) is the probability a cloudy day occurs after a sunny day.
#
# Take a look at the data. Make sure you see how if the previous day was sunny, the following day will have a 0.4 probability of being sunny as well. If the previous day was raining (index $i = 3$), then the following day (index $j$) has a 0.05 probability of being windy ($j = 5$).
import pandas as pd
df = pd.read_csv('weather.csv', header=None)
df
# ### Part 1: Parse the `.csv` file into a `Numpy` array
#Load CSV file -- hint: you can use np.genfromtxt()
import numpy as np
data = np.genfromtxt("weather.csv", delimiter=",")
data
data.shape
# ### Part 2: Create a class called `Markov` that has the following methods:
#
# * `load_data(array)`: loads the Numpy 2D array and stores it as a class variable.
# * `get_prob(previous_day, following_day)`: returns the probability of `following_day` weather given `previous_day` weather.
#
# **Note:** `previous_day` and `following_day` should be passed in string form (e.g. "sunny"), as opposed to an index (e.g. 0).
class Markov:
def __init__(self):
# implement here
self.weather = {"sunny":0, "cloudy":1, "rainy":2, "snowy":3, "windy":4, "hailing":5}
def load_data(self, array):
# implement here
self.array = array
def get_prob(self, previous_day, following_day):
# implement here -- returns a probability
i = self.weather[previous_day]
j = self.weather[following_day]
return self.array[i,j]
m = Markov()
m.load_data(data)
m.get_prob("sunny", "rainy")
m.get_prob("rainy", "windy")
# ## Problem 3: Iterators
# Iterators are a convenient way to walk along your Markov chain.
#
# #### Part 1: Using your `Markov` class from Problem 3, write `Markov` as an iterator by implementing the `__iter__()` and `__next__()` methods.
#
# Remember:
# * `__iter__()` should return the iterator object and should be implicitly called when the loop begins
# * The `__next()__` method should return the next value and is implicitly called at each step in the loop.
#
# Each 'next' step should be stochastic (i.e. randomly selected based on the relative probabilities of the following day weather types) and should return the next day's weather as a string (e.g. "sunny") rather than an index (e.g. 0).
# +
class MarkovIterator:
def __init__(self, markov):
self.markov = markov;
self.current_idx = self.markov.current_idx
self.table = self.markov.array
def __next__(self):
rand_num = np.random.random()
try:
nextProb = self.table[self.current_idx];
except IndexError:
raise StopIteration()
cdf = np.zeros(nextProb.shape);
for i in range(nextProb.shape[0]):
cdf[i] = nextProb[i] + cdf[i-1]
# print(cdf, rand_num)
next_idx = 0
for i in range(nextProb.shape[0]):
if rand_num <= cdf[i]:
next_idx = i
break
current_str = self.markov.idx2str[self.current_idx]
self.current_idx = next_idx
return current_str
def __iter__(self):
return self
class Markov:
def __init__(self, current_weather = "sunny"):
self.idx2str = ["sunny", "cloudy", "rainy", "snowy", "windy", "hailing"]
self.weather = {"sunny":0, "cloudy":1, "rainy":2, "snowy":3, "windy":4, "hailing":5}
self.current_idx = self.weather[current_weather]
def load_data(self, array):
self.array = array
def get_prob(self, previous_day, following_day):
i = self.weather[previous_day]
j = self.weather[following_day]
return self.array[i,j]
def set_current(self, current_weather):
self.current_idx = self.weather[current_weather]
def __iter__(self):
return MarkovIterator(self)
# +
m2 = Markov()
m2.load_data(data)
print(m2.current_idx)
m2.set_current("hailing")
print(m2.current_idx)
iter1 = iter(m2)
iter2 = iter(m2)
print(next(iter1))
print(next(iter1))
print(next(iter1))
print(next(iter1))
# print(next(iter(m2)))
# -
# #### Part 2: We want to predict what weather will be like in a week for 5 different cities.
#
# Now that we have our `Markov` iterator, we can try to predict what the weather will be like in seven days from now.
#
# Given each city's current weather in the dictionary `city_weather` (see below), simulate what the weather will be like in 7 days from now. Rather than just producing one prediction per city, simulate 100 such predictions per city and store the most commonly occuring prediction.
#
# In your submission, print a dictionary `city_weather_predictions` that has each city as a key and the most commonly predicted weather as the corresponding value.
#
# **Note**: Don't worry if your values don't seem to make intuitive sense. We made up the weather probabilities.
city_weather = {
'New York': 'rainy',
'Chicago': 'snowy',
'Seattle': 'rainy',
'Boston': 'hailing',
'Miami': 'windy',
'Los Angeles': 'cloudy',
'San Fransisco': 'windy'
}
# +
from collections import Counter
total_predictions = {}
nPredictions = 7
nSimulations = 100
m1 = Markov()
m1.load_data(data)
for city, weather in city_weather.items():
m1.set_current(weather)
simulations = []
for i in range(nSimulations):
predictor = iter(m1)
next(predictor)
predictions = []
for j in range(nPredictions):
predictions.append(next(predictor))
simulations.append(predictions)
total_predictions[city] = simulations
city_weather_predictions = {}
for c, w in total_predictions.items():
most_common = []
for i in range(nPredictions):
each_day = [item[i] for item in total_predictions[c]]
most_common.append(Counter(each_day).most_common(1)[0][0])
city_weather_predictions[c] = most_common
# -
city_weather_predictions
|
homeworks/HW8/HW8_final.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
import pooch
url = "https://www.ldeo.columbia.edu/~rpa/float_data_4901412.zip"
files = pooch.retrieve(url, processor=pooch.Unzip(), known_hash="2a703c720302c682f1662181d329c9f22f9f10e1539dc2d6082160a469165009")
files
T = np.load('/home/amalpjose/.cache/pooch/7e6685dbe2a3c0b0870f770f3ef413d9-float_data_4901412.zip.unzip/float_data/T.npy')
level = np.load('/home/amalpjose/.cache/pooch/7e6685dbe2a3c0b0870f770f3ef413d9-float_data_4901412.zip.unzip/float_data/levels.npy')
S = np.load('/home/amalpjose/.cache/pooch/7e6685dbe2a3c0b0870f770f3ef413d9-float_data_4901412.zip.unzip/float_data/S.npy')
P = np.load('/home/amalpjose/.cache/pooch/7e6685dbe2a3c0b0870f770f3ef413d9-float_data_4901412.zip.unzip/float_data/P.npy')
date = np.load('/home/amalpjose/.cache/pooch/7e6685dbe2a3c0b0870f770f3ef413d9-float_data_4901412.zip.unzip/float_data/date.npy')
lat = np.load('/home/amalpjose/.cache/pooch/7e6685dbe2a3c0b0870f770f3ef413d9-float_data_4901412.zip.unzip/float_data/lat.npy')
lon = np.load('/home/amalpjose/.cache/pooch/7e6685dbe2a3c0b0870f770f3ef413d9-float_data_4901412.zip.unzip/float_data/lon.npy')
T.shape, S.shape, P.shape
np.shape(level)
# ### basic plots
# plot T vs level
plt.plot(T, level);
# S vs level
plt.plot(S, level);
# S vs level
plt.plot(P, level);
T.shape, S.shape, P.shape, level.shape,
# +
# row(level) wise mean and standard deviation for temp
T_mean = np.nanmean(T, axis=1) # to deal with the NaN values generated.
T_std = np.nanstd(T, axis=1)
T_mean.shape, T_std.shape
# +
# row(level) wise mean and standard deviation for salinity
S_mean = np.nanmean(S, axis=1)
S_std = np.nanstd(S, axis=1)
S_mean.shape, S_std.shape
# +
# row(level) wise mean and standard deviation for p
P_mean = np.nanmean(P, axis=1)
P_std = np.nanstd(P, axis=1)
P_mean.shape, P_std.shape
# -
# plotting
sns.set()
plt.style.use('seaborn-whitegrid')
#plot T_mean
#plt.plot(, level)
plt.errorbar(T_mean, level, yerr=T_std, ecolor='red');
# +
# plt S mean and errbar
plt.errorbar(S_mean, level, yerr=S_std, ecolor='red');
# +
# plt P mean and errbar
plt.errorbar(P_mean, level, yerr=P_std, ecolor='red' );
# -
# ### scatterplot of the lon - lat positions of the ARGO
# +
plt.scatter(lon, lat, color='black');
# -
|
simple_ARGO_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Using PyMC3 for Bayesian Analysis
# I will use PyMC3 to do some Bayesian analysis. I will follow the book by Davidson-Pilon.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# %matplotlib inline
txt_data = np.loadtxt('data/txtdata.csv')
n_txt_data = len(txt_data)
days = np.arange(n_txt_data)
fig, ax = plt.subplots(1, 1, figsize = (25,9))
ax.bar(days, txt_data, color = 'xkcd:violet')
ax.set_xlabel(r'No. of Days', fontsize = 20)
ax.set_ylabel('No. of text messages', fontsize = 20)
ax.tick_params(axis = 'both', labelsize = 20 )
ax.set_facecolor('0.80')
ax.grid(which = 'both', axis = 'both', color = 'xkcd:white')
ax.set_axisbelow(True)
# We want to model the texting behaviour of the author over various days. Since we are counting text messages and appropriate distribution function to consider would be the Poisson distribution. However since we want to check for texting behaviour we might want to consider two different Poisson distributions to get a better understanding.
#
# Let us denote the text message count on the $i^{th}$ day as $C_i$. Since we are couunting here we can furthermore write:
#
# $C_i = Poisson\left(\lambda\right)$
#
# If we assume that $\lambda$ might change after somedayy, then we can assume two values $\lambda_1$ and say after $\tau$ days we have $\lambda_2$. The specific time $\tau$ is known as the *switchpoint*.
#
# $$
# \lambda =
# \begin{cases}
# \lambda_1 & \text{if } t \lt \tau \cr
# \lambda_2 & \text{if } t \ge \tau
# \end{cases}
# $$
#
# Now in order to estimate $\lambda_1$ and $\lambda_2$ using Bayesian inference we need to have a prior distribution. We know that we can use an exponential distribution function to describe non-discrete positive numbers. This exponential distribution function will have a parameter of its own. let us call this parameter $\alpha$
#
# \begin{align}
# &\lambda_1 \sim \text{Exp}( \alpha ) \\\
# &\lambda_2 \sim \text{Exp}( \alpha )
# \end{align}
#
#
# Since the data is noisy it would be hard to decide a prior for $\tau$. We can thus consider a uniform prior in this case:
#
# \begin{align}
# & \tau \sim \text{DiscreteUniform(1,70) }\\\\
# & \Rightarrow P( \tau = k ) = \frac{1}{70}
# \end{align}
#
# #### Using PyMC3
import pymc3 as pm
import theano.tensor as tt
with pm.Model() as model:
alpha = 1.0/txt_data.mean() # Recall count_data is the
# variable that holds our txt counts
lambda_1 = pm.Exponential("lambda_1", alpha)
lambda_2 = pm.Exponential("lambda_2", alpha)
tau = pm.DiscreteUniform("tau", lower=0, upper=n_txt_data - 1)
# We can now create a function lambda_ which will allow us to switch between $\lambda_1$ and $\lambda_2$ after time $\tau$.
with model:
idx = np.arange(n_txt_data) # Index
lambda_ = pm.math.switch(tau > idx, lambda_1, lambda_2)
# All the parameters $\lambda_1, \lambda_2, \text{ and } \tau$ are random number and so is lambda_ .
with model:
observation = pm.Poisson("obs", lambda_, observed=txt_data)
# The variable `observation` combines our data, `count_data`, with our proposed data-generation scheme, given by the variable `lambda_`, through the `observed` keyword.
#
# The code below will be explained in Chapter 3, but I show it here so you can see where our results come from. One can think of it as a *learning* step. The machinery being employed is called *Markov Chain Monte Carlo* (MCMC), which I also delay explaining until Chapter 3. This technique returns thousands of random variables from the posterior distributions of $\lambda_1, \lambda_2$ and $\tau$. We can plot a histogram of the random variables to see what the posterior distributions look like. Below, we collect the samples (called *traces* in the MCMC literature) into histograms.
with model:
step = pm.Metropolis()
trace = pm.sample(10000, tune=5000,step=step)
lambda_1_samples = trace['lambda_1']
lambda_2_samples = trace['lambda_2']
tau_samples = trace['tau']
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize = (15, 15))
ax1.hist(lambda_1_samples, histtype='stepfilled', bins=30, alpha=0.6,
label="posterior of $\lambda_1$", color="#A60628", normed=True)
ax1.legend(loc = 'best')
ax1.set_xlim([15, 30])
ax1.set_xlabel("$\lambda_1$")
ax1.set_facecolor('0.80')
ax1.grid(which = 'both', axis = 'both', color = 'xkcd:white', linestyle='dashed')
ax1.set_axisbelow(True)
ax2.hist(lambda_2_samples, histtype='stepfilled', bins=30, alpha=0.5,
label="posterior of $\lambda_2$", color="#7A68A6", normed=True)
ax2.legend(loc='best')
ax2.set_xlim([15, 30])
ax2.set_xlabel("$\lambda_2$")
ax2.set_facecolor('0.80')
ax2.grid(which = 'both', axis = 'both', color = 'xkcd:white', linestyle='dashed')
ax2.set_axisbelow(True)
w = 1.0 / tau_samples.shape[0] * np.ones_like(tau_samples)
ax3.hist(tau_samples, bins=n_txt_data, alpha=1,
label=r"posterior of $\tau$",
color="#467821", weights=w, rwidth=2.)
ax3.legend(loc='best')
ax3.set_xlim([35, len(txt_data)-20])
ax3.set_xlabel("Days")
ax3.set_ylabel('Probability')
ax3.set_facecolor('0.80')
ax3.grid(which = 'both', axis = 'both', color = 'xkcd:white', linestyle='dashed')
ax3.set_axisbelow(True)
# We see that the values of $\lambda_1$ and $\lambda_2$ is around 18 and 23 respectively. Additionally we see that the change in text messages received takes place over a span of 4 days, with the likelihood that the change in the number of text takes place on the 45th day is around 50 %. We would like to see how this change of number of text messages received takes place over time.
# +
fig, ax = plt.subplots(1,1, figsize = (15, 5))
# tau_samples, lambda_1_samples, lambda_2_samples contain
# N samples from the corresponding posterior distribution
N = tau_samples.shape[0]
expected_texts_per_day = np.zeros(n_txt_data)
for day in range(n_txt_data):
#ix is the boolean index of all tay samples corresponding to the switch point occuring prior to the value of day
ix = day<tau_samples
# Each posterior sample corresponds to a value for tau.
# for each day, that value of tau indicates whether we're "before"
# (in the lambda1 "regime") or
# "after" (in the lambda2 "regime") the switchpoint.
# by taking the posterior sample of lambda1/2 accordingly, we can average
# over all samples to get an expected value for lambda on that day.
# As explained, the "message count" random variable is Poisson distributed,
# and therefore lambda (the poisson parameter) is the expected value of
# "message count".
expected_texts_per_day[day] = (lambda_1_samples[ix].sum()
+ lambda_2_samples[~ix].sum()) / N
ax.plot(range(n_txt_data), expected_texts_per_day, '-', color = 'xkcd:magenta', linewidth = 5, alpha = 0.7, label = 'Expected Text Per Day')
ax.bar(days, txt_data, color = 'xkcd:violet', label = 'Observed texts per day')
ax.legend(loc = 'best')
ax.set_xlabel(r'Days', fontsize = 20)
ax.set_ylabel('No. of text messages', fontsize = 20)
ax.tick_params(axis = 'both', labelsize = 20 )
ax.set_facecolor('0.80')
ax.grid(which = 'both', axis = 'both', color = 'xkcd:white')
ax.set_axisbelow(True)
|
Markov Chain Monte Carlo/.ipynb_checkpoints/Bayesian Statistics using PyMC3-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
import torch
import math
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
# Create random input and output data
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
y = torch.sin(x)
# Randomly initialize weights
a = torch.randn((), device=device, dtype=dtype)
b = torch.randn((), device=device, dtype=dtype)
c = torch.randn((), device=device, dtype=dtype)
d = torch.randn((), device=device, dtype=dtype)
learning_rate = 1e-6
for t in range(2000):
# Forward pass: compute predicted y
y_pred = a + b * x + c * x ** 2 + d * x ** 3
# Compute and print loss
loss = (y_pred - y).pow(2).sum().item()
if t % 100 == 99:
print(t, loss)
# Backprop to compute gradients of a, b, c, d with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_a = grad_y_pred.sum()
grad_b = (grad_y_pred * x).sum()
grad_c = (grad_y_pred * x ** 2).sum()
grad_d = (grad_y_pred * x ** 3).sum()
# Update weights using gradient descent
a -= learning_rate * grad_a
b -= learning_rate * grad_b
c -= learning_rate * grad_c
d -= learning_rate * grad_d
print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')
test_y=a.item()+b.item()*x+c.item()*x**2+d.item()*x**3
print(test_y)
print(y[0])
# -
|
machine_learning_related/using_pytorch.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setting Up Python For Scientific Computing
# (c) 2016 <NAME>. This work is licensed under a [Creative Commons Attribution License CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/). All code contained herein is licensed under an [MIT license](https://opensource.org/licenses/MIT)
# ---
# In this tutorial, we will set up a scientific Python computing environment using the [Anaconda python distribution by Continuum Analytics](https://www.continuum.io/downloads).
# ## Why Python?
# As is true in human language, there are [hundreds of computer programming languages](https://en.wikipedia.org/wiki/List_of_programming_languages). While each has their own merit, the major languages for scientific computing are C, C++, R, MATLAB, Python, Java, and Fortran. [MATLAB](https://www.mathworks.com) and [Python](https://www.python.org) are similar in syntax and typically read as if it were written in plain english. This makes both languages a useful tool for teaching but they are also very powerful languages and are **very** actively used in real-life research. MATLAB is proprietary while Python is open source. A benefit of being open source is that anyone can write and release Python packages. For science, there are many wonderful community-driven packages such as [NumPy](http://www.numpy.org), [SciPy](http://www.scipy.org), [scikit-image](http://scikit-image.org), and [Pandas](http://pandas.pydata.org) just to name a few.
# ## Installing Python 3.5 with Anaconda
# ### Python 3.5 vs Python 2.7
# There are two dominant versions of Python used for scientific computing, Python 2.7.x and Python 3.5.x. We are at an interesting crossroads between these two versions. The most recent release (Python 3.5.0 as of December 2016) is not backwards compatible with previous versions of Python. While there are still some packages written for Python 2.7 that have not been modified for compatibility with Python 3.5, a large number have transitioned. As this will be the future for scientific computing with Python, we will use Python 3.5.0 for these tutorials.
# ### Anaconda
# There are several Python distributions available for MacOS, Windows, and Linux. The two most popular, [Enthought Canopy](https://www.enthought.com/products/canopy/) and [Anaconda](https://www.continuum.io/why-anaconda) are specifically designed for scientific computing and data science work. For this course, we will use the Anaconda Python 3.5 distribution. To install the correct version, follow the instructions below.
#
# 1. Navigate to [the Anaconda download page](https://www.continuum.io/downloads) and download the Python 3.5 graphical installer. You will be asked for your email address which you should provide. If you are affiliated with a university, you should use your `.edu` address as you will have access to some useful goodies unavailable to the public.
# 2. Launch the installer and follow the onscreen instructions.
# 3. Open the newly installed **Anaconda Navigator** application.
#
# Congratulations! You now have the beginnings of a scientific Python distribution.
# ### Launching an interpreter through Anaconda Navigator
# Unlike [MATLAB](https://www.mathworks.com), another popular scientific computing language, Python does not have an *official* graphical user interface (GUI). Rather, we will be writing Python scripts in a text editor and running them through the [IPython interpreter](https://ipython.org/project.html) (also referred to in Anaconda as the 'qtconsole'). Here we will be able to tell our computer to execute snippets of code and run Python scripts. To launch the IPython interpreter, open the Anaconda Navigator application and click on 'Launch' under the 'qtconsole', shown in the screenshot below.
# 
# You should now be greeted with a white window with some information about your IPython version and an input prompt reading `In[1]`. Before we begin coding in Python, we will need to install two packages.
# ### Installing extra packages using Conda
# With the Anaconda Python distribution, you can install verified packages (scientific and non-scientific) through the [Conda](http://conda.pydata.org/docs/) package manager. **Note that you do not have to download Conda separately. This comes packaged with Anaconda**. To install packages through Conda, we must manually enter their names on the command line. For the purposes of these tutorials, we will only need to install/upgrade two packages -- [Seaborn for plotting styling](http://seaborn.pydata.org) and an update IPython to [IPython 5.0](http://blog.jupyter.org/2016/07/08/ipython-5-0-released/). Rather than do this on the command line, we can install these directly from the IPython interpreter. In your IPython interpreter, type the following lines.
#
# ```
# In[1]: ! conda upgrade ipython --yes
# In[2]: ! conda install seaborn --yes
# ```
#
# Note that the flag `--yes` is telling Conda that you agree to upgrade the packages on your computer that might not be compatible with other Python packages. You can remove the `--yes` tag, but you will have to approve them manually.
#
# Once you have executed these commands, close the IPython interpreter window and open a new one.
# ## Installing Atom text editor
# While we now have everything we need to execute Python scripts, we need an editor to write them with. A particularly useful one is [Atom](https://atom.io), but any text editor should work. To install Atom on your machine, follow the instructions below.
#
# 1. [Navigate to the Atom](https://atom.io) homepage and follow the instructions for installation.
#
# 2. Once installed, launch Atom and navigate to `Packages -> Settings View -> Open` and scroll to the bottom of the page. Make sure the setting `Tab Length` is set to 4. Below that, make sure `Tab Type` is set to `soft`. This is important as indentation and white space is interpreted in Python.
#
# ### Setting up the directory structure
# For this course (and your coding in 'real life'), it will help if you follow a specific directory structure for your code and data. During this course, we will be writing a lot of Python scripts that will load in data. So you can directly follow along in class, it is important that you and the instructors have the same directory structure. To make this structure, open Atom and follow the instructions below.
#
# 1. Navigate to `File -> Add Project Folder` and make a new folder in your home directory. On MacOS and Linux, this will be in `/Users/YOUR_USERNAME/`. On Windows, this will be XXX.
#
# 2. Name this project `pboc`.
#
# 3. Now `pboc` should appear on the left-hand side of your editor. Right-click on `pboc` and make a new folder called `data`. This is where all of our data from the class will live.
#
# Now, if everything went well, your Atom editor window should look like this on the left-hand side.
# 
# ## Your first script and reading these tutorials
# This tutorial (as all others in this course) are written as [Jupyter notebooks]() which are documents which contain cells for writing text and math as well as cells that contain and excute block of Python code. While we will be writing python code in our Atom text editor, these tutorials will serve as a useful reference that not only shows the code and output, but an explaination of the biological and physical principles behind it. For these tutorials, code and it's output are rendered in two boxes as is shown below.
# This is a comment and is not read by Python
print('Hello! This is the print function. Python will print this line below')
# The box with the gray background contains the python code while the output is in the box with the white background. When reading these tutorials, you may want to retype (or copy-and-paste) the code lines into Atom or in the IPython interpreter directly.
#
# If you have followed the steps above, we are finally ready to write our first Python script. In your Atom window, create a new file named `my_first_script.py` and save it within your `pboc` root directory (not in `data`). You can do this by going to `File -> New File` then `File -> Save` and navigate to your `pboc` folder. Now, in the `my_first_script.py` file, we'll generate a plot of one of my favorite functions. Type (or copy and paste) the following lines into you script file and save it.
#
# Now we are finally ready to write our first Python script. In your Atom window, create a new file named `my_first_script.py` and save it within your `pboc` root directory (not in `data`). You can do this by going to `File -> New File` then `File -> Save` and navigate to your `pboc` folder. Now, in the `my_first_script.py` file, we'll generate a plot of one of my favorite functions. Type (or copy and paste) the following lines into you script file and save it by going to `File -> Save`.
# +
# Import Python packages necessary for this script
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Generate a beautiful sinusoidal curve
x = np.linspace(0, 2*np.pi, 500)
y = np.sin(2 * np.sin(2 * np.sin(2 * x)))
plt.plot(x, y)
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.show()
# -
# Once you have this file saved, open a new IPython interpreter through the Anaconda Navigator window and type the following commands.
# ```
# In [1]: cd pboc
# In [2]: %matplotlib
# In [3]: %run my_first_script.py
# ```
#
# The first command navigates to the correct directory. The second command allows for us to keep typing while plots are being shown. The third command runs the script we just wrote through the IPython interpreter. The percentage signs for `In [2]:` and `In [3]:` are called Python magic fuctions and are [explained in the python syntax tutorial](). While just typing `matplotlib` and `run my_first_script.py` will work, it is better style to use these magic functions.
#
#
# If everything works as expected, you should see the plot below.
# These commands are for showing the plot in this notebook only.
# %matplotlib inline
plt.plot(x, y)
plt.xlabel('$x$')
plt.ylabel('$y$')
# With that, you are now set up to do some scientific computing in Python! For the rest of the course, we will be going through this same procedure to computationally explore principles of physical biology. To this end, our computer screens will typically look something like this:
#
# 
#
# although you can code however you feel comfortable!
# ## What is Jupyter?
# [Jupyter Notebooks](http://jupyter.org) are very useful tools for writing code, text, and math into a single document. In fact, this (and all other tutorials) were written in Jupyter noteooks. While we won't use them in this class, I strongly suggest you learn about them by following [this excellent tutorial](http://bebi103.caltech.edu/2016/tutorials/t0b_intro_to_jupyter_notebooks.html) written by a Caltech Professor of Biology and Biological Engineering, [<NAME>](http://www.bois.caltech.edu).
|
code/nb/setting_up_python.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="vQUlhd9D9fgu"
# # Test endpoint
# +
import json
import requests
from captum.attr import visualization as viz
# +
url = <ENDPOINT URL>
token = <ENDPOINT TOKEN>
sentences = <LIST OF SENTENCES>
# +
headers = {
"Content-Type": "application/json",
"X-Bedrock-Api-Token": token,
}
data = json.dumps({"sentences": sentences, "bool_xai": 1})
response = requests.post(url, headers=headers, data=data)
print(response.ok)
print(response.json())
# -
y_prob = response.json()["y_prob"]
attributes = response.json().get("attributes")
# +
i = 0 # select sentence
true_label = None
result_vis = viz.VisualizationDataRecord(
attributes[i]["attributions"],
y_prob[i],
(y_prob[i] > 0.5),
true_label,
"label",
sum(attributes[i]["attributions"]),
attributes[i]["tokens"],
attributes[i]["delta"],
)
df = viz.visualize_text([result_vis])
# -
|
nlp/test_endpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Using SFRmaker with NHDPlus High Resolution
# This notebook demostrates how to use `sfrmaker` to build an SFR package with an NHDPlus HR file geodatabase (or set of file geodatabases) obtained from the USGS National Map download client.
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from shapely.geometry import box
from flopy.discretization import StructuredGrid
import geopandas as gpd
import sfrmaker
sfrmaker.__version__
# In this demo, two HUC-4 file geodatabases (HUC0202 and HUC0204; each clipped to a reduced size) are used to make a single SFR network. NHDPlusHR networks for HUC-4 drainage basins are avaialble from the national map downloader as file geodatabases: https://apps.nationalmap.gov/downloader/#/
# ## 1. Preview NHDPlusHR geodatabases
# +
NHDPlusHR_paths = ['../neversink_rondout/NHDPLUS_HR_1.gdb', '../neversink_rondout/NHDPLUS_HR_2.gdb']
# first NHDPlus HR file geodatabase -- derrived from a section of HUC_0202 for this demo
gdb1 = gpd.read_file(NHDPlusHR_paths[0], driver='OpenFileGDB', layer='NHDFlowline')
# seccond NHDPlus HR file geodatabase -- derrived from a section of HUC_0204 for this demo
gdb2 = gpd.read_file(NHDPlusHR_paths[1], driver='OpenFileGDB', layer='NHDFlowline')
# -
# ### Plot the raw NHD HR lines from the two geodatabases
fig, ax = plt.subplots(figsize=(6,8))
gdb1.plot(ax=ax, color='red', label='NHDPLUS_HR_1.gdb')
gdb2.plot(ax=ax, color='blue', label='NHDPLUS_HR_2.gdb')
ax.legend()
plt.show()
# #### Note: NHDPlusHR fileGDBs have EPSG:4269 CRS
assert gdb1.crs == gdb2.crs
nhdhr_epsg = gdb1.crs
print(nhdhr_epsg)
# ## 2. Filter network using shapefile boundary
# The network can be filterd using a shapefile of the model domain
# +
boundary_file = '../neversink_rondout/Model_Extent.shp'
model_boundary = gpd.read_file(boundary_file)
# +
fig, ax = plt.subplots(figsize=(8,10))
gdb1.plot(ax=ax, color='red', label='NHDPLUS_HR_1.gdb')
gdb2.plot(ax=ax, color='blue', label='NHDPLUS_HR_2.gdb')
model_boundary.plot(ax=ax, facecolor='lightgray', edgecolor='black', label='Model Boundary')
# plot boundary box that will be used for filtering
bbox_geometry = [box(x1, y1, x2, y2) for x1,y1,x2,y2 in model_boundary.bounds.values]
bbox = gpd.GeoDataFrame(geometry=bbox_geometry, crs=model_boundary.crs)
bbox.plot(ax=ax, facecolor='None', edgecolor='green', linestyle='--', label='Boundary bounding box')
LegendElement = [
mpatches.mlines.Line2D([], [], color='red', label='NHDPLUS_HR_1.gdb'),
mpatches.mlines.Line2D([], [], color='blue', label='NHDPLUS_HR_2.gdb'),
mpatches.Patch(facecolor='lightgray', edgecolor='black', label='Model Boundary'),
mpatches.Patch(facecolor='None', edgecolor='green',
linestyle='--', label='Model boundary\nbounding box'),
]
ax.legend(handles=LegendElement, loc='best')
plt.show()
# -
# ## 3. Option to remove reaches with unwated FCodes
# NHDPlusHR flowlines include feature codes (FCodes) that describe reach attributes. In certain cases, a user may not wish to include certain types of features present in the NHDPlusHR geodatabase in the SFR network. A complete list of FCodes is show below:
# 
# **Look at FCodes in `gdb2`**
gdb2.FCode.unique().astype(int).tolist()
# **We can remove any segments that have FCodes we don't want to include in the final SFR network**
# In this case, we want to exclude any lines classified as aqueduct pipelines.
# +
drop_fcodes = [
42803 # Aqueduct pipeline
]
gdb2.loc[gdb2.FCode.isin(drop_fcodes)]
# -
fig, ax = plt.subplots(figsize=(6,8))
gdb2.plot(ax=ax, color='black', zorder=0, label='Lines to keep')
gdb2.loc[gdb2.FCode.isin(drop_fcodes)].plot(ax=ax, color='red', zorder=1, label='Line to remove\n(Pipeline)')
ax.legend()
plt.show()
# ## 4. Make an `sfrmaker.lines` instance using `from_nhdplus_hr`
# + tags=["{", "\"tags\":", "[", "\"hide-output\"", "]", "}"]
lines = sfrmaker.Lines.from_nhdplus_hr(NHDPlusHR_paths,
filter=boundary_file,
drop_fcodes=drop_fcodes,
epsg=4269
)
# -
# **Check out the `lines` DataFrame**
lines.df.head()
# ### visualize the SFR network
# The extent is limited to lines that intersect the bounding box of the supplied model boundary shapefile. Note: the aqueduct pipeline (highlighted above) in the north central part of the network was also removed.
# +
fig, ax = plt.subplots(figsize=(10,8))
lines.df.plot(ax=ax)
model_boundary.plot(ax=ax, facecolor='lightgray', edgecolor='black', label='Model Boundary')
bbox.plot(ax=ax, facecolor='None', edgecolor='green', linestyle='--', label='Boundary bounding box')
LegendElement = [
mpatches.mlines.Line2D([], [], color='#1f77b4', label='sfrmaker lines'),
mpatches.Patch(facecolor='lightgray', edgecolor='black', label='Model Boundary'),
mpatches.Patch(facecolor='None', edgecolor='green',
linestyle='--', label='Model boundary\nbounding box'),
]
ax.legend(handles=LegendElement, loc='best')
plt.show()
# -
# ## 5. Create a streamflow routing dataset and write an SFR package input file for MODDLOW
# **This requires an `sfrmaker.lines` instance (created in step 4) and a `sfrmaker.grid` instance**
# we can start by building a synthetic Flopy strucutred grid
flopy_structuredgrid = StructuredGrid(delc=np.full(160,100.),
delr=np.full(220,100.),
epsg=5070,
xoff=1742953.0226834335,
yoff=2279064.250857591,
lenuni='meters'
)
# ...then pass it to sfrmaker, to create an sfrmaker model grid. The active area can be defined as the model boundary shapefile.
# + tags=["{", "\"tags\":", "[", "\"hide-output\"", "]", "}"]
grid = sfrmaker.StructuredGrid.from_modelgrid(flopy_structuredgrid,
active_area=boundary_file)
# -
# **Note: The model grid is in a different CRS (EPSG=5070) from the NHD_HR dataset (EPSG=). sfrmaker reprojects the sfr lines to the modelgrid during the `to_sfr` method.**
sfrdata = lines.to_sfr(grid=grid)
# ### write the sfr data set to a MODFLOW SFR file
# Now, we can write the sfr data set to a MODFLOW SFR file. Normally, one likely would pass a Flopy model instance to `lines.to_sfr`. Here, no model instance is passed -- the package is written independent of a model for illustrative purposes.
# %%capture
sfrdata.write_package(filename='../neversink_rondout/nhd_hr_demo.sfr', version='mf6')
sfrdata.write_shapefiles('shps/nhd_hr_demo')
# ### Finally, we can review our completed SFR network
# below, we plot sfr routing and outlets
# +
routing = gpd.read_file('shps/nhd_hr_demo_sfr_routing.shp')
outlets = gpd.read_file('shps/nhd_hr_demo_sfr_outlets.shp')
model_boundary_5070 = model_boundary.to_crs(epsg=5070)
fig, ax = plt.subplots(figsize=(10,8))
routing.plot(ax=ax, zorder=1)
outlets.plot(ax=ax, c='red', zorder=2, label='outlets')
model_boundary_5070.plot(ax=ax, facecolor='None',
edgecolor='gray',
zorder=0
)
LegendElement = [
mpatches.mlines.Line2D([], [], color='red', linewidth=0., marker='o', label='sfr outlet'),
mpatches.mlines.Line2D([], [], color='#1f77b4', label='sfr routing'),
mpatches.Patch(facecolor='None', edgecolor='gray', label='Model Boundary\n(active area)')
]
ax.legend(handles=LegendElement, loc='best')
plt.show()
# -
|
examples/Notebooks/lines_from_NHDPlusHR_demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Kh29EEg2I9qu"
# Generate figures for Lecture 5.
# + id="QTTO1Po7I_7k" executionInfo={"status": "ok", "timestamp": 1640885786807, "user_tz": 480, "elapsed": 229, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwJbFKBBICkDJdVudgsTuWkkyr0jrw5PxmRvic=s64", "userId": "09527353465813384085"}}
import numpy as np
import matplotlib.pyplot as plt
import pywt
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="2fxCXRgaJIei" executionInfo={"status": "ok", "timestamp": 1640885787078, "user_tz": 480, "elapsed": 279, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwJbFKBBICkDJdVudgsTuWkkyr0jrw5PxmRvic=s64", "userId": "09527353465813384085"}} outputId="b2aa8a3e-a950-4184-c922-e8284be23c4b"
L = 1
N_grid = 2**10
grid = np.linspace(0, 1, N_grid)
fourier_ex = np.sin(10*np.pi*grid)
gabor_ex = np.exp( - ( (1/(2*0.025))*(grid- 0.5)**2 ) )*fourier_ex
plt.plot(grid, fourier_ex, grid, gabor_ex)
plt.xlabel('t')
plt.show()
# + [markdown] id="R1b_j-k6bmjh"
# Example scalogram computed on the [El Nino dataset](http://paos.colorado.edu/research/wavelets/wave_idl) of quarterly sea surface measurements from 1871 to 1991.
# + id="slBpXxcYc2ec" executionInfo={"status": "ok", "timestamp": 1640885798149, "user_tz": 480, "elapsed": 253, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwJbFKBBICkDJdVudgsTuWkkyr0jrw5PxmRvic=s64", "userId": "09527353465813384085"}}
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 614} id="Y1tgVAM5Jf8h" executionInfo={"status": "ok", "timestamp": 1640885802199, "user_tz": 480, "elapsed": 1394, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwJbFKBBICkDJdVudgsTuWkkyr0jrw5PxmRvic=s64", "userId": "09527353465813384085"}} outputId="68e23a14-18cc-46e6-b5e3-7e0b9b700ead"
dataset = "http://paos.colorado.edu/research/wavelets/wave_idl/sst_nino3.dat"
df_nino = pd.read_table(dataset)
N = df_nino.shape[0]
t0=1871 # initial year
dt=0.25 # data measured 4 times each year
time = np.arange(0, N) * dt + t0
data = df_nino.values.squeeze()
fig, ax = plt.subplots(figsize=(15, 10))
ax.plot(time, data)
ax.set_xlabel('Year', fontsize=18)
ax.set_ylabel('Temperature', fontsize=18)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 635} id="sUzDHnd1br9b" executionInfo={"status": "ok", "timestamp": 1640885804636, "user_tz": 480, "elapsed": 653, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwJbFKBBICkDJdVudgsTuWkkyr0jrw5PxmRvic=s64", "userId": "09527353465813384085"}} outputId="a6b52f4c-ffc1-4a9c-c08d-9d5508e849d2"
scales = np.arange(1, 128)
[wc, freq] = pywt.cwt(data, scales, 'gaus3', dt)
power = wc**2
period = 1. / freq
levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8]
contourlevels = np.log2(levels)
fig, ax = plt.subplots(figsize=(15, 10))
im = ax.contourf(time, np.log2(period), np.log2(power), contourlevels, extend='both')
ax.set_title('Power Spectrum of Wavelet Transform', fontsize=20)
ax.set_xlabel('Time', fontsize=18)
ax.set_ylabel('Period (Years)', fontsize=18)
yticks = 2**np.arange(np.ceil(np.log2(period.min())), np.ceil(np.log2(period.max())))
ax.set_yticks(np.log2(yticks))
ax.set_yticklabels(yticks)
cbar_ax = fig.add_axes([0.95, 0.5, 0.03, 0.25])
fig.colorbar(im, cax=cbar_ax, orientation="vertical")
plt.show()
# + [markdown] id="UTJ8ghX3oxP7"
# A little interactive demo to play around and get a feel for how the wavelet transform sees different scales.
# + colab={"base_uri": "https://localhost:8080/", "height": 614} id="ZWWNzRoWo4sY" executionInfo={"status": "ok", "timestamp": 1640886807540, "user_tz": 480, "elapsed": 803, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwJbFKBBICkDJdVudgsTuWkkyr0jrw5PxmRvic=s64", "userId": "09527353465813384085"}} outputId="dc397368-73c1-46aa-fb7a-10fb1301566a"
def g(x, kk, ss, tt):
val = np.exp( - ( (1/(2*(ss**2)))*(x- tt)**2 ) )*np.sin(kk*np.pi*x)
return val
k = 300
s = 0.05
t = 0.25
g_vals = g(grid, k, s, t) # + g(grid, k/2, s/2, t/2)
fig, ax = plt.subplots(figsize=(15, 10))
ax.plot(grid, g_vals)
ax.set_xlabel('t', fontsize=18)
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="86RUSgguqR2P" executionInfo={"status": "ok", "timestamp": 1640885905119, "user_tz": 480, "elapsed": 211, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwJbFKBBICkDJdVudgsTuWkkyr0jrw5PxmRvic=s64", "userId": "09527353465813384085"}} outputId="8e92c63f-f88f-458e-d5c5-43f0f027dedc"
print(pywt.wavelist(kind='continuous')) # in case you want to change the wavelet
# + colab={"base_uri": "https://localhost:8080/"} id="gZriaU7tpkG7" executionInfo={"status": "ok", "timestamp": 1640886812844, "user_tz": 480, "elapsed": 700, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwJbFKBBICkDJdVudgsTuWkkyr0jrw5PxmRvic=s64", "userId": "09527353465813384085"}} outputId="55c3e155-795c-4209-f645-97e15fe7bbfe"
g_scales = np.arange(1, 64)
dt = L/N_grid
[gwc, gwf] = pywt.cwt(g_vals, scales, 'cmor', sampling_period=dt)
fig, ax = plt.subplots(figsize=(15, 10))
ax.contourf(grid, gwf, np.abs(gwc), extend='both')
ax.set_xlabel('t', fontsize=18)
ax.set_ylabel('freq (Hz)', fontsize=18)
# + id="NOecsRzYqXKH" executionInfo={"status": "aborted", "timestamp": 1640885787483, "user_tz": 480, "elapsed": 53, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwJbFKBBICkDJdVudgsTuWkkyr0jrw5PxmRvic=s64", "userId": "09527353465813384085"}}
|
course_notes/AMATH582-Lec5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 0.044322, "end_time": "2021-11-16T09:22:33.717723", "exception": false, "start_time": "2021-11-16T09:22:33.673401", "status": "completed"} tags=[]
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# + [markdown] papermill={"duration": 0.018578, "end_time": "2021-11-16T09:22:33.755925", "exception": false, "start_time": "2021-11-16T09:22:33.737347", "status": "completed"} tags=[]
# # Please UPVOTE if you like my Notebook !!
# + [markdown] papermill={"duration": 0.018776, "end_time": "2021-11-16T09:22:33.793690", "exception": false, "start_time": "2021-11-16T09:22:33.774914", "status": "completed"} tags=[]
# # Import Dependencies
# + papermill={"duration": 0.889648, "end_time": "2021-11-16T09:22:34.702311", "exception": false, "start_time": "2021-11-16T09:22:33.812663", "status": "completed"} tags=[]
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
import datetime
# + papermill={"duration": 0.133399, "end_time": "2021-11-16T09:22:34.855933", "exception": false, "start_time": "2021-11-16T09:22:34.722534", "status": "completed"} tags=[]
df = pd.read_csv('/kaggle/input/seasonal-variation-in-births/BirthMonthsData.csv')
# + papermill={"duration": 0.052139, "end_time": "2021-11-16T09:22:34.928347", "exception": false, "start_time": "2021-11-16T09:22:34.876208", "status": "completed"} tags=[]
df.sample(5)
# + papermill={"duration": 0.031262, "end_time": "2021-11-16T09:22:34.979241", "exception": false, "start_time": "2021-11-16T09:22:34.947979", "status": "completed"} tags=[]
df.shape
# + papermill={"duration": 0.056382, "end_time": "2021-11-16T09:22:35.055436", "exception": false, "start_time": "2021-11-16T09:22:34.999054", "status": "completed"} tags=[]
df.info()
# + papermill={"duration": 0.028757, "end_time": "2021-11-16T09:22:35.105342", "exception": false, "start_time": "2021-11-16T09:22:35.076585", "status": "completed"} tags=[]
df['Number_of_Births'] = df['Number_of_Births'].astype(int)
# + papermill={"duration": 0.046977, "end_time": "2021-11-16T09:22:35.172347", "exception": false, "start_time": "2021-11-16T09:22:35.125370", "status": "completed"} tags=[]
df.info()
# + [markdown] papermill={"duration": 0.020024, "end_time": "2021-11-16T09:22:35.213044", "exception": false, "start_time": "2021-11-16T09:22:35.193020", "status": "completed"} tags=[]
# # Number of Birth Yearwise
# + papermill={"duration": 0.044038, "end_time": "2021-11-16T09:22:35.277393", "exception": false, "start_time": "2021-11-16T09:22:35.233355", "status": "completed"} tags=[]
df.isnull().sum()
# + papermill={"duration": 0.033477, "end_time": "2021-11-16T09:22:35.331252", "exception": false, "start_time": "2021-11-16T09:22:35.297775", "status": "completed"} tags=[]
df1 = df.groupby('Year')
# + papermill={"duration": 0.047918, "end_time": "2021-11-16T09:22:35.405047", "exception": false, "start_time": "2021-11-16T09:22:35.357129", "status": "completed"} tags=[]
df1.head()
# + papermill={"duration": 0.037736, "end_time": "2021-11-16T09:22:35.466358", "exception": false, "start_time": "2021-11-16T09:22:35.428622", "status": "completed"} tags=[]
df1['Number_of_Births'].sum().reset_index()
# + papermill={"duration": 0.032159, "end_time": "2021-11-16T09:22:35.521489", "exception": false, "start_time": "2021-11-16T09:22:35.489330", "status": "completed"} tags=[]
df2 = df1['Number_of_Births'].sum().reset_index()
df2.columns = ['Year','Total_Birth']
# + papermill={"duration": 1.602255, "end_time": "2021-11-16T09:22:37.148213", "exception": false, "start_time": "2021-11-16T09:22:35.545958", "status": "completed"} tags=[]
ax=sns.catplot(x = "Year", y="Total_Birth", kind ='bar', data =df2, height = 8 , aspect = 3,legend = True)
plt.title(" Number of birth Year Wise ")
plt.xlabel('Year')
plt.ylabel('Total number of Births')
plt.grid(True)
# + [markdown] papermill={"duration": 0.022375, "end_time": "2021-11-16T09:22:37.193388", "exception": false, "start_time": "2021-11-16T09:22:37.171013", "status": "completed"} tags=[]
# # Data Cleaning
# + papermill={"duration": 0.034584, "end_time": "2021-11-16T09:22:37.250711", "exception": false, "start_time": "2021-11-16T09:22:37.216127", "status": "completed"} tags=[]
df['Month'].value_counts()
# + papermill={"duration": 0.029947, "end_time": "2021-11-16T09:22:37.303622", "exception": false, "start_time": "2021-11-16T09:22:37.273675", "status": "completed"} tags=[]
df1 = df
# + papermill={"duration": 0.041368, "end_time": "2021-11-16T09:22:37.368382", "exception": false, "start_time": "2021-11-16T09:22:37.327014", "status": "completed"} tags=[]
df1 = df1.drop(df1[df1['Month'] == 'Total'].index)
# + papermill={"duration": 0.073216, "end_time": "2021-11-16T09:22:37.465146", "exception": false, "start_time": "2021-11-16T09:22:37.391930", "status": "completed"} tags=[]
df1 = df1.drop(df1[df1['Month'] == 'January - March'].index)
df1 = df1.drop(df1[df1['Month'] == 'April - June'].index)
df1 = df1.drop(df1[df1['Month'] == 'July - September'].index)
df1 = df1.drop(df1[df1['Month'] == 'October - December'].index)
df1 = df1.drop(df1[df1['Month'] == 'Unknown'].index)
# + papermill={"duration": 0.037917, "end_time": "2021-11-16T09:22:37.527272", "exception": false, "start_time": "2021-11-16T09:22:37.489355", "status": "completed"} tags=[]
df1 = df1.drop(df1[df1['Record Type'] == 'Vital statistics from census'].index)
# + [markdown] papermill={"duration": 0.023062, "end_time": "2021-11-16T09:22:37.573871", "exception": false, "start_time": "2021-11-16T09:22:37.550809", "status": "completed"} tags=[]
# # Month-wise Record Type
# + papermill={"duration": 0.037203, "end_time": "2021-11-16T09:22:37.634276", "exception": false, "start_time": "2021-11-16T09:22:37.597073", "status": "completed"} tags=[]
df['Record Type'].value_counts()
# + papermill={"duration": 0.640744, "end_time": "2021-11-16T09:22:38.298438", "exception": false, "start_time": "2021-11-16T09:22:37.657694", "status": "completed"} tags=[]
plt.figure(figsize= (16,8))
sns.countplot(df['Month'], hue = df['Record Type'])
plt.tight_layout()
plt.legend(
title ="Record Type",
bbox_to_anchor =(0.85, 0,0, 1))
plt.grid(True)
plt.show()
# + [markdown] papermill={"duration": 0.024886, "end_time": "2021-11-16T09:22:38.348548", "exception": false, "start_time": "2021-11-16T09:22:38.323662", "status": "completed"} tags=[]
# # Top 25 Country with maximun number of Birth
# + papermill={"duration": 0.042192, "end_time": "2021-11-16T09:22:38.415432", "exception": false, "start_time": "2021-11-16T09:22:38.373240", "status": "completed"} tags=[]
raw_company_list = df["Country or Area"].value_counts()[0:25].reset_index()
company_list = raw_company_list['index'].values.tolist()
data = df[df['Country or Area'].isin(company_list)]
# + papermill={"duration": 0.405461, "end_time": "2021-11-16T09:22:38.845347", "exception": false, "start_time": "2021-11-16T09:22:38.439886", "status": "completed"} tags=[]
plt.figure(figsize = (30,8))
sns.countplot(data = data, x = 'Country or Area')
plt.grid(True)
plt.xticks(rotation=45)
plt.xlabel("Country")
plt.ylabel(" Total Number of Birth")
plt.title('Top 25 Country having maximun number of Birth')
plt.show()
# + [markdown] papermill={"duration": 0.028272, "end_time": "2021-11-16T09:22:38.901046", "exception": false, "start_time": "2021-11-16T09:22:38.872774", "status": "completed"} tags=[]
# # Reliability Distribution
# + papermill={"duration": 0.036355, "end_time": "2021-11-16T09:22:38.963852", "exception": false, "start_time": "2021-11-16T09:22:38.927497", "status": "completed"} tags=[]
df1= df['Reliability'].value_counts()
# + papermill={"duration": 0.038454, "end_time": "2021-11-16T09:22:39.029030", "exception": false, "start_time": "2021-11-16T09:22:38.990576", "status": "completed"} tags=[]
df['Reliability'].value_counts()
# + papermill={"duration": 0.340899, "end_time": "2021-11-16T09:22:39.398033", "exception": false, "start_time": "2021-11-16T09:22:39.057134", "status": "completed"} tags=[]
plt.pie(df1,autopct='%.0f%%',labels=['Final figure, complete ', 'Provisional figure','Other estimate'],radius=2,explode = (0.3,0.5, 0.5),shadow=True)
plt.legend(
title ="Reliability",
bbox_to_anchor =(1, 0.7,0.6, 1))
plt.show()
# + [markdown] papermill={"duration": 0.028436, "end_time": "2021-11-16T09:22:39.455369", "exception": false, "start_time": "2021-11-16T09:22:39.426933", "status": "completed"} tags=[]
# # Thank you !!¶
# Please upvote if you liked my work :):)
|
seasonal-birth-data-visualization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Chapter 1: TensorFlow 2: eager variables and useful functions
# ### Importing and checking TensorFlow
import tensorflow as tf
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution is: {}".format(tf.executing_eagerly()))
print("Keras version: {}".format(tf.keras.__version__))
# #### Detect GPU:
# +
var = tf.Variable([3, 3])
if tf.test.is_gpu_available():
print('Running on GPU')
print('GPU #0?')
print(var.device.endswith('GPU:0'))
else:
print('Running on CPU')
# -
# ### TensorFlow variables
# #### Tensor Flow supports all the data types you would expect: tf.int32 , tf.float64 and tf.complex64 for example.
# #### For a full list, please see: https://www.tensorflow.org/api_docs/python/tf/dtypes/DType
# #### The default int type is tf.int32 and the default float type is tf.float32
# #### Eager execution is the default
t0 = 24 # python variable
t1 = tf.Variable(42) # tensor variable
t2 = tf.Variable([ [ [0., 1., 2.], [3., 4., 5.] ], [ [6., 7., 8.], [9., 10., 11.] ] ]) # tensor variable
t0, t1, t2
f1 = tf.Variable(89.)
f1
f1.assign(98.)
f1
f64 = tf.Variable(89, dtype = tf.float64)
f64.dtype
# #### TensorFlow constants
m_o_l = tf.constant(42)
m_o_l
m_o_l.numpy()
unit = tf.constant(1, dtype = tf.int64)
unit
# #### The rank (dimensions) of a tensor
tf.rank(t2)
tf.rank(t2).numpy()
# #### Notice the shape of a tensor, i.e. the number of elements in each dimension
t2 = tf.Variable([ [ [0., 1., 2.], [3., 4., 5.] ], [ [6., 7., 8.], [9., 10., 11.] ] ]) # tensor variable
print(t2.shape)
# #### Tensors maybe reshaped, preserving the total size, and retain the same values, as is often required for constructing neural networks.
r1 = tf.reshape(t2,[2,6]) # 2 rows 6 cols
r2 = tf.reshape(t2,[1,12]) # 1 rows 12 cols
r1
r2
# #### Specifying an element of a tensor
t3 = t2[1, 0, 2] # slice 1, row 0, column 2
t3
# #### Find the size (total number of elements) of a tensor
s = tf.size(input=t2).numpy()
s
# #### Casting tensor to numpy variable
# +
print(t2.numpy())
print(t2[1, 0, 2].numpy())
# -
# #### Find data type of a tensor
t3.dtype
# #### element wise multiplication with overloaded operator
t2*t2
# #### Tensorflow variables support broadcasting:
t4 = t2*4
print(t4)
# #### TensorFlow constants, transpose of a matrix and matrix multiplication, eagerly.
u = tf.constant([[3,4,3]])
v = tf.constant([[1,2,1]])
tf.matmul(u, tf.transpose(a=v))
# #### Casting a tensor to another datatype ...
t1
i = tf.cast(t1, dtype=tf.float32)
i
# #### ... with truncation
j = tf.cast(tf.constant(4.9), dtype=tf.int32)
j
# +
#adding tensors
i = tf.Variable(66)
j = tf.Variable(33)
tf.add(i,j)
# -
# #### Ragged Tensors
# +
ragged =tf.ragged.constant([[5, 2, 6, 1], [], [4, 10, 7], [8], [6,7]])
print(ragged)
print(ragged[0,:])
print(ragged[1,:])
print(ragged[2,:])
print(ragged[3,:])
print(ragged[4,:])
# -
print(tf.RaggedTensor.from_row_splits(values=[5, 2, 6, 1, 4, 10, 7, 8, 6, 7],row_splits=[0, 4, 4, 7, 8, 10]))
# + active=""
#
# -
# ### Let's now take a look at some useful TensorFlow functions¶
x = [1,3,5,7,11]
y = 5
s = tf.math.squared_difference( x, y) #() x-y)*(x-y) with broadcasting
s
# ### tf.reduce_mean(input_tensor, axis=None, keepdims=None, name=None)
# #### Note that this is equivalent to np.mean, except that it infers the return data type from the input tensor whereas np.mean allows you to specify the output type, (defaulting to float64)
numbers = tf.constant([[4., 5.], [7., 3.]])
# #### Find mean across all axes
tf.reduce_mean(input_tensor=numbers) #( 4. + 5. + 7. + 3.)/4 = 4.75
# #### Find mean across columns (i.e. reduce rows)
tf.reduce_mean(input_tensor=numbers, axis=0) # [ (4. + 7. )/2 , (5. + 3.)/2 ].
tf.reduce_mean(input_tensor=numbers, axis=0, keepdims=True)
# #### Find mean across rows (i.e. reduce columns)
tf.reduce_mean(input_tensor=numbers, axis=1) # [ (4. + 5. )/2 , (7. + 3. )/2]
tf.reduce_mean(input_tensor=numbers, axis=1, keepdims=True)
# #### Output a tensor of the given shape filled with values from a normal distribution.
tf.random.normal(shape = (3,2), mean=10, stddev=2, dtype=tf.float32, seed=None, name=None)
# #### Example
ran = tf.random.normal(shape = (3,2), mean=10.0, stddev=2.0)
print(ran)
tf.random.uniform(shape = (2,4), minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)
# #### Output a tensor of the given shape filled with values from a uniform distribution.
# #### Example
ran = tf.random.uniform(shape = (2,2), maxval=20, dtype=tf.int32)
print(ran)
# #### To output tensors with repeatable values over runs, set a seed in tf.set_random_seeds()
# +
tf.random.set_seed(11)
ran1 = tf.random.uniform(shape = (2,2), maxval=10, dtype = tf.int32)
ran2 = tf.random.uniform(shape = (2,2), maxval=10, dtype = tf.int32)
print(ran1) #Call 1
print(ran2)
tf.random.set_seed(11) #same seed
ran1 = tf.random.uniform(shape = (2,2), maxval=10, dtype = tf.int32)
ran2 = tf.random.uniform(shape = (2,2), maxval=10, dtype = tf.int32)
print(ran1)
print(ran2)
# -
# #### Example
# #### Simulate 10 throws of two six-sided dice. Store the results
# #### in a 10x3 matrix.
# #### adapted for eager execution from:
# #### https://colab.research.google.com/notebooks/mlcc/creating_and_manipulating_tensors.ipynb#scrollTo=iFIOcnfz_Oqw
# #### We're going to place dice throws inside two separate
# #### 10x1 matrices. We could have placed dice throws inside
# #### a single 10x2 matrix, but adding different columns of
# #### the same matrix is tricky. We also could have placed
# #### dice throws inside two 1-D tensors (vectors); doing so
# #### would require transposing the result.
#
# +
dice1 = tf.Variable(tf.random.uniform([10, 1], minval=1, maxval=7, dtype=tf.int32))
dice2 = tf.Variable(tf.random.uniform([10, 1], minval=1, maxval=7, dtype=tf.int32))
# We may add dice1 and dice2 since they share the same shape and size.
dice_sum = dice1 + dice2
# We've got three separate 10x1 matrices. To produce a single
# 10x3 matrix, we'll concatenate them along dimension 1.
resulting_matrix = tf.concat(values=[dice1, dice2, dice_sum], axis=1) # join in a column
print(resulting_matrix)
# -
# #### To output the index of the element with the largest value across the axes of a tensor.
# +
# 1-D tensor
t5 = tf.constant([2, 11, 5, 42, 7, 19, -6, -11, 29])
print(t5)
i = tf.argmax(input=t5)
print('index of max; ', i)
print('Max element: ',t5[i].numpy())
i = tf.argmin(input=t5,axis=0).numpy()
print('index of min: ', i)
print('Min element: ',t5[i].numpy())
t6 = tf.reshape(t5, [3,3])
print(t6)
i = tf.argmax(input=t6,axis=0).numpy() # max arg down rows
print('indices of max down rows; ', i)
i = tf.argmin(input=t6,axis=0).numpy() # min arg down rows
print('indices of min down rows ; ',i)
print(t6)
i = tf.argmax(input=t6,axis=1).numpy() # max arg across cols
print('indices of max across cols: ',i)
i = tf.argmin(input=t6,axis=1).numpy() # min arg across cols
print('indices of min across cols: ',i)
# +
#################BEGIN CODE INCOMPATIBLE WITH TF2 because tf.contrib has gone ##################
################# need to locate integrate.odeint and fix #############################
# -
# #### integration: tf.contrib.integrate.odeint(func, y0, t, rtol=1e-06, atol = 1e-12, method=None, options=None, full_output=False, name=None)
# #### examples:
# solve dy/dt = 1/y # this integrates to y^2 = 2*t + 1 with the given boundary condition of y0 =1
func = lambda y,_: 1/y
tf.contrib .integrate.odeint(func, 1., [0, 1, 2],full_output=True)
# solve `dy/dt = -y`, this integrate to y = e^-t (exp(-t)) with the given boundary condition of y0 =1
func = lambda y, _: -y #i.e. y = exp(-t)
tf.contrib.integrate.odeint(func, 1., [0, 1, 2]) # i.e 1, e^-1, e^-2
# +
#### longer example
# -
# #### The Lorenz system is a system of ordinary differential equations.
# #### The Lorenz attractor is a set of chaotic solutions of the Lorenz system which,
# #### when plotted, resemble a butterfly or figure eight.
# +
# eager version, adapted from https://www.tensorflow.org/versions/r1.4/api_guides/python/contrib.integrate
# restart kernel before executing this cell
import tensorflow as tf
#import tensorflow.contrib.eager as tfe
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
rho = 28.0
sigma = 10.0
beta = 8.0/3.0
def lorenz_equation(state, t):
x, y, z = tf.unstack(state) # pop parameters off the stack
dx = sigma * (y - x)
dy = x * (rho - z) - y
dz = x * y - beta * z
return tf.stack([dx, dy, dz]) # push parameters onto a stack
init_state = tf.constant([0, 2, 20], dtype=tf.float64)
time_intervals = np.linspace(0, 50, num=5000)
tensor_state = tf.integrate.odeint(lorenz_equation, init_state, time_intervals, full_output=False)
x, y, z = tf.transpose(a=tensor_state)
plt.plot(x, z)
# +
#################### END OF INCOMPATIBLE CODE ######################
# -
# #### Saving and restoring variables
#
variable = tf.Variable([[1,3,5,7],[11,13,17,19]])
checkpoint= tf.train.Checkpoint(var=variable)
save_path = checkpoint.save('./vars')
variable.assign([[0,0,0,0],[0,0,0,0]])
variable
checkpoint.restore(save_path)
var
# #### Creating a callable TensorFlow graph from a Python function.
# +
def f1(x, y):
return tf.reduce_mean(input_tensor=tf.multiply(x ** 2, 5) + y**2)
f2 = tf.function(f1)
x = tf.constant([4., -5.])
y = tf.constant([2., 3.])
# f1 and f2 return the same value, but f2 executes as a TensorFlow graph
assert f1(x,y).numpy() == f2(x,y).numpy()
# -
# +
# -
|
Chapter01/Chapter1_TF2_alpha.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/itz-kin/BSCpE-1-2-CPEN21A/blob/main/Loop_Statement.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="fkstaI_UKMns"
# ##For Loop
# + colab={"base_uri": "https://localhost:8080/"} id="zEnNsNFUJTa2" outputId="e2498f46-c8b5-470a-9839-3f6eeb3092fb"
week = ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]
for x in week:
print(x)
# + [markdown] id="EyQJzY21LNi3"
# The break statement
# + colab={"base_uri": "https://localhost:8080/"} id="pQ1fBuJ1K3lT" outputId="1ba4cd1b-a4ae-40bb-d1ab-4aa54416b299"
for x in week:
print(x)
if x=="Thursday":
break
# + colab={"base_uri": "https://localhost:8080/"} id="Y57zcEpELt_0" outputId="42300a98-dcd0-4f71-a913-bfa542b98988"
#The break statement
for x in week:
if x=="Thursday":
break
print(x)
# + [markdown] id="hNBxwzSYMP-J"
# Looping through string
# + colab={"base_uri": "https://localhost:8080/"} id="oPz5JbxsMHvG" outputId="d41c475d-23d4-4922-93e1-7693a1f171f6"
for x in "Programming with Python":
print(x)
# + [markdown] id="FnBsAS8TMclY"
# The range() Function
# + colab={"base_uri": "https://localhost:8080/"} id="_IwESaMWMhar" outputId="f8d5aad9-b319-4bc9-b5e1-785f8b2dbeb7"
for x in range(10):
print(x)
# + [markdown] id="8JmLa69mM_Ze"
# Nested Loops
# + colab={"base_uri": "https://localhost:8080/"} id="FqXbKamVNB15" outputId="f62c76d3-d8d6-43e5-c794-c10988cdff0a"
adjective=["red","big","tasty"]
fruits=["apple","banana","cherry"]
for x in adjective:
for y in fruits:
print(x,y)
# + [markdown] id="RHmHppw5OXku"
# While Loop
# + [markdown] id="hkrSkBPsPFEa"
# The break statement
# + colab={"base_uri": "https://localhost:8080/"} id="V5PhIY-_XKd5" outputId="7fa27043-a3ea-47a0-e4dd-05176a5cddcd"
i=10
while i>6:
print(i)
i-=1 #Assignment operator for subtraction
# + colab={"base_uri": "https://localhost:8080/"} id="t_5yaJwFPEiF" outputId="da3ad620-8794-49f2-8fd0-ba91ffd00bc9"
i=10
while i>6:
print(i)
if i==8:
break
i-=1
# + [markdown] id="qOORlR_oQRuZ"
# The continue Statement
# + colab={"base_uri": "https://localhost:8080/"} id="E33GutomSNct" outputId="67423f97-b22d-4f59-e4a6-017a91007613"
i=10
while i>6:
i=i-1
if i==8:
continue
print(i)
i-=1
# + [markdown] id="mnU8EIRrRPJE"
# The else statement
# + colab={"base_uri": "https://localhost:8080/"} id="wKtR-_pzRY0r" outputId="888c523b-bf72-4d99-c151-7b38463c4d9a"
i=10
while i>6:
i=i-1
print(i)
else:
print("i is no longer greater than 6")
# + [markdown] id="XNIcbpxvR62x"
# Let's try
# + [markdown] id="6dx4qSNaTHh1"
# Application 1
# + colab={"base_uri": "https://localhost:8080/"} id="QnLfUpZ2SyTh" outputId="4397bd47-44c4-48aa-8730-a3ec028aed5e"
value=["Value 0","Value 1","Value 2","Value 3","Vaue 4","Value 5","Value 6","Value 7","Value 8","Value 9","Value 10"]
for x in value:
print(x)
# + [markdown] id="saE7IgYxTDXM"
# Application 2
# + colab={"base_uri": "https://localhost:8080/"} id="BMbdjIWvR8yE" outputId="dd8b33e6-bc5b-4863-fe87-42e3a9672194"
i=20
while i>4:
i=i-1
print(i)
else:
print("i is no longer greater than 3")
|
Loop_Statement.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import plotting
# ## Time to solution of the hybrid implementations as a function of the number of pixels
n_pxs = [2073600, 3686400, 8294400, 58982400, 92160000, 144000000, 207360000, 368640000]
opti5_1 = [5.829926, 5.650608, 5.630285, 5.864720, 6.126829, 5.538630, 7.750466, 8.004335]
opti5IO_1 = [5.040886, 4.792057, 5.338017, 4.936353, 5.175358, 5.444855, 5.411343, 7.152145]
opti5_2 = [12.982650, 11.044830, 11.009116, 12.281082, 12.312272, 12.941025, 11.823999, 13.001319]
opti5IO_2 = [11.363304, 10.163350, 10.130225, 10.525090, 11.651807, 11.780566, 11.787382, 12.071465]
all_optis = [opti5_1, opti5IO_1, opti5_2, opti5IO_2]
all_labels = ['opti5_1gpu_per_proc', 'opti5-IO_1gpu_per_proc', 'opti5_2gpus_per_proc', 'opti5-IO_2gpus_per_proc']
plotting.plot_tts(n_pxs, all_optis, xscale='log', yscale='log',
labels=all_labels, legend_title='Hybrid implementation',
xlabel='Number of pixels', ylabel='Time to solution [s]')
plotting.plot_tts(n_pxs, all_optis, xscale='log', yscale='log',
labels=all_labels, legend_title='Hybrid implementation',
xlabel='Number of pixels', ylabel='Time to solution [s]',
filename='hybrid-optis-px', saveas='svg')
# ## Time to solution of the hybrid implementations as a function of max_iteration
n_iters = [1000, 10000, 100000, 1000000, 10000000]
opti5_1 = [6.835836, 7.149354, 9.049461, 28.296037, 213.494676]
opti5IO_1 = [5.134905, 6.206310, 7.030240, 27.620681, 212.990762]
opti5_2 = [13.778960, 13.163628, 12.919661, 17.671077, 78.764135]
opti5IO_2 = [13.163628, 12.813159, 12.776552, 12.719661, 76.724623]
all_optis = [opti5_1, opti5IO_1, opti5_2, opti5IO_2]
all_labels = ['opti5_1gpu_per_proc', 'opti5-IO_1gpu_per_proc', 'opti5_2gpus_per_proc', 'opti5-IO_2gpus_per_proc']
plotting.plot_tts(n_iters, all_optis, xscale='log', yscale='log',
labels=all_labels, legend_title='Hybrid implementation',
xlabel='max_iteration', ylabel='Time to solution [s]')
plotting.plot_tts(n_iters, all_optis, xscale='log', yscale='log',
labels=all_labels, legend_title='Hybrid implementation',
xlabel='max_iteration', ylabel='Time to solution [s]',
filename='hybrid-optis-maxiter', saveas='svg')
|
plots/HybridOptis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def convert(imgf, labelf, outf, n):
f = open(imgf, "rb")
o = open(outf, "w")
l = open(labelf, "rb")
f.read(16)
l.read(8)
images = []
for i in range(n):
image = [ord(l.read(1))]
for j in range(28*28):
image.append(ord(f.read(1)))
images.append(image)
for image in images:
o.write(",".join(str(pix) for pix in image)+"\n")
f.close()
o.close()
l.close()
convert("train-images-idx3-ubyte", "train-labels-idx1-ubyte",
"mnist_train.csv", 60000)
convert("t10k-images-idx3-ubyte", "t10k-labels-idx1-ubyte",
"mnist_test.csv", 10000)
import numpy as np
import pandas as pd
df_orig_train = pd.read_csv('mnist_train.csv')
df_orig_test = pd.read_csv('mnist_test.csv')
df_orig_train.rename(columns={'5':'label'}, inplace=True)
df_orig_test.rename(columns={'7':'label'}, inplace=True)
df_orig_train.head()
df_orig_test.head()
df_orig_train.to_csv('mnist_train_final.csv', index=False)
df_orig_test.to_csv('mnist_test_final.csv', index=False)
|
MNIST-to-CSV.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style="width: 100%; overflow: hidden;">
# <div style="width: 150px; float: left;"> <img src="data/D4Sci_logo_ball.png" alt="Data For Science, Inc" align="left" border="0"> </div>
# <div style="float: left; margin-left: 10px;">
# <h1>Natural Language Processing For Everyone</h1>
# <h1>Text Representation</h1>
# <p><NAME><br/>
# <a href="http://www.data4sci.com/">www.data4sci.com</a><br/>
# @bgoncalves, @data4sci</p></div>
# </div>
# In this lesson we will see in some details how we can best represent text in our application. Let's start by importing the modules we will be using:
# +
import string
from collections import Counter
from pprint import pprint
import gzip
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import watermark
# %matplotlib inline
# %load_ext watermark
# -
# List out the versions of all loaded libraries
# %watermark -n -v -m -g -iv
# Set the default style
plt.style.use('./d4sci.mplstyle')
# We choose a well known nursery rhyme, that has the added distinction of having been the first audio ever recorded, to be the short snippet of text that we will use in our examples:
text = """Mary had a little lamb, little lamb,
little lamb. 'Mary' had a little lamb
whose fleece was white as snow.
And everywhere that Mary went
Mary went, MARY went. Everywhere
that mary went,
The lamb was sure to go"""
# ## Tokenization
#
# The first step in any analysis is to tokenize the text. What this means is that we will extract all the individual words in the text. For the sake of simplicity, we will assume that our text is well formed and that our words are delimited either by white space or punctuation characters.
print(string.punctuation)
def extract_words(text):
temp = text.split() # Split the text on whitespace
text_words = []
for word in temp:
# Remove any punctuation characters present in the beginning of the word
while word[0] in string.punctuation:
word = word[1:]
# Remove any punctuation characters present in the end of the word
while word[-1] in string.punctuation:
word = word[:-1]
# Append this word into our list of words.
text_words.append(word.lower())
return text_words
# After this step we now have our text represented as an array of individual, lowercase, words:
text_words = extract_words(text)
print(text_words)
# As we saw during the video, this is a wasteful way to represent text. We can be much more efficient by representing each word by a number
# +
word_dict = {}
word_list = []
vocabulary_size = 0
text_tokens = []
for word in text_words:
# If we are seeing this word for the first time, create an id for it and added it to our word dictionary
if word not in word_dict:
word_dict[word] = vocabulary_size
word_list.append(word)
vocabulary_size += 1
# add the token corresponding to the current word to the tokenized text.
text_tokens.append(word_dict[word])
# -
# When we were tokenizing our text, we also generated a dictionary **word_dict** that maps words to integers and a **word_list** that maps each integer to the corresponding word.
print("Word list:", word_list, "\n\n Word dictionary:")
pprint(word_dict)
# These two datastructures already proved their usefulness when we converted our text to a list of tokens.
print(text_tokens)
# Unfortunately, while this representation is convenient for memory reasons it has some severe limitations. Perhaps the most important of which is the fact that computers naturally assume that numbers can be operated on mathematically (by addition, subtraction, etc) in a way that doesn't match our understanding of words.
#
# ## One-hot encoding
#
# One typical way of overcoming this difficulty is to represent each word by a one-hot encoded vector where every element is zero except the one corresponding to a specific word.
def one_hot(word, word_dict):
"""
Generate a one-hot encoded vector corresponding to *word*
"""
vector = np.zeros(len(word_dict))
vector[word_dict[word]] = 1
return vector
# So, for example, the word "fleece" would be represented by:
print(vocabulary_size)
print(len(word_dict))
fleece_hot = one_hot("fleece", word_dict)
print(fleece_hot)
# This vector has every element set to zero, except element 6, since:
print(word_dict["fleece"])
fleece_hot[6] == 1
print(fleece_hot.sum())
# ## Bag of words
#
# We can now use the one-hot encoded vector for each word to produce a vector representation of our original text, by simply adding up all the one-hot encoded vectors:
# +
text_vector1 = np.zeros(vocabulary_size)
for word in text_words:
hot_word = one_hot(word, word_dict)
text_vector1 += hot_word
print(text_vector1)
# -
# In practice, we can also easily skip the encoding step at the word level by using the *word_dict* defined above:
# +
text_vector = np.zeros(vocabulary_size)
for word in text_words:
text_vector[word_dict[word]] += 1
print(text_vector)
# -
# Naturally, this approach is completely equivalent to the previous one and has the added advantage of being more efficient in terms of both speed and memory requirements.
#
# This is known as the __bag of words__ representation of the text. It should be noted that these vectors simply contains the number of times each word appears in our document, so we can easily tell that the word *mary* appears exactly 6 times in our little nursery rhyme.
text_vector[word_dict["mary"]]
# A more pythonic (and efficient) way of producing the same result is to use the standard __Counter__ module:
word_counts = Counter(text_words)
pprint(word_counts)
# From which we can easily generate the __text_vector__ and __word_dict__ data structures:
# +
items = list(word_counts.items())
# Extract word dictionary and vector representation
word_dict2 = dict([[items[i][0], i] for i in range(len(items))])
text_vector2 = [items[i][1] for i in range(len(items))]
# -
word_counts['mary']
# And let's take a look at them:
text_vector
print("Text vector:", text_vector2, "\n\nWord dictionary:")
pprint(word_dict2)
# The results using this approach are slightly different than the previous ones, because the words are mapped to different integer ids but the corresponding values are the same:
for word in word_dict.keys():
if text_vector[word_dict[word]] != text_vector2[word_dict2[word]]:
print("Error!")
# As expected, there are no differences!
# ## Term Frequency
# The bag of words vector representation introduced above relies simply on the frequency of occurence of each word. Following a long tradition of giving fancy names to simple ideas, this is known as __Term Frequency__.
# Intuitively, we expect the the frequency with which a given word is mentioned should correspond to the relevance of that word for the piece of text we are considering. For example, **Mary** is a pretty important word in our little nursery rhyme and indeed it is the one that occurs the most often:
sorted(items, key=lambda x:x[1], reverse=True)
# However, it's hard to draw conclusions from such a small piece of text. Let us consider a significantly larger piece of text, the first 100 MB of the english Wikipedia from: http://mattmahoney.net/dc/textdata. For the sake of convenience, text8.gz has been included in this repository in the **data/** directory. We start by loading it's contents into memory as an array of words:
# +
data = []
for line in gzip.open("data/text8.gz", 'rt'):
data.extend(line.strip().split())
# -
# Now let's take a look at the first 50 words in this large corpus:
data[:50]
# And the top 10 most common words
# +
counts = Counter(data)
sorted_counts = sorted(list(counts.items()), key=lambda x: x[1], reverse=True)
for word, count in sorted_counts[:10]:
print(word, count)
# -
# Surprisingly, we find that the most common words are not particularly meaningful. Indeed, this is a common occurence in Natural Language Processing. The most frequent words are typically auxiliaries required due to gramatical rules.
#
# On the other hand, there is also a large number of words that occur very infrequently as can be easily seen by glancing at the word freqency distribution.
# +
dist = Counter(counts.values())
dist = list(dist.items())
dist.sort(key=lambda x:x[0])
dist = np.array(dist)
norm = np.dot(dist.T[0], dist.T[1])
plt.loglog(dist.T[0], dist.T[1]/norm)
plt.xlabel("count")
plt.ylabel("P(count)")
plt.title("Word frequency distribution")
plt.gcf().set_size_inches(11, 8)
# -
# ## Stopwords
# One common technique to simplify NLP tasks is to remove what are known as Stopwords, words that are very frequent but not meaningful. If we simply remove the most common 100 words, we significantly reduce the amount of data we have to consider while losing little information.
# +
stopwords = set([word for word, count in sorted_counts[:100]])
clean_data = []
for word in data:
if word not in stopwords:
clean_data.append(word)
print("Original size:", len(data))
print("Clean size:", len(clean_data))
print("Reduction:", 1-len(clean_data)/len(data))
# -
clean_data[:50]
# Wow, our dataset size was reduced almost in half!
# In practice, we don't simply remove the most common words in our corpus but rather a manually curate list of stopwords. Lists for dozens of languages and applications can easily be found online.
# ## Term Frequency/Inverse Document Frequency
# One way of determining of the relative importance of a word is to see how often it appears across multiple documents. Words that are relevant to a specific topic are more likely to appear in documents about that topic and much less in documents about other topics. On the other hand, less meaningful words (like **the**) will be common across documents about any subject.
# To measure the document frequency of a word we will need to have multiple documents. For the sake of simplicity, we will treat each sentence of our nursery rhyme as an individual document:
print(text)
# +
corpus_text = text.split('.')
corpus_words = []
for document in corpus_text:
doc_words = extract_words(document)
corpus_words.append(doc_words)
# -
# Now our corpus is represented as a list of word lists, where each list is just the word representation of the corresponding sentence:
print(len(corpus_words))
pprint(corpus_words)
# Let us now calculate the number of documents in which each word appears:
# +
document_count = {}
for document in corpus_words:
word_set = set(document)
for word in word_set:
document_count[word] = document_count.get(word, 0) + 1
pprint(document_count)
# -
# As we can see, the word __Mary__ appears in all 4 of our documents, making it useless when it comes to distinguish between the different sentences. On the other hand, words like __white__ which appear in only one document are very discriminative. Using this approach we can define a new quantity, the ___Inverse Document Frequency__ that tells us how frequent a word is across the documents in a specific corpus:
def inv_doc_freq(corpus_words):
number_docs = len(corpus_words)
document_count = {}
for document in corpus_words:
word_set = set(document)
for word in word_set:
document_count[word] = document_count.get(word, 0) + 1
IDF = {}
for word in document_count:
IDF[word] = np.log(number_docs/document_count[word])
return IDF
# Where we followed the convention of using the logarithm of the inverse document frequency. This has the numerical advantage of avoiding to have to handle small fractional numbers.
#
# We can easily see that the IDF gives a smaller weight to the most common words and a higher weight to the less frequent:
corpus_words
# +
IDF = inv_doc_freq(corpus_words)
pprint(IDF)
# -
# As expected **Mary** has the smallest weight of all words 0, meaning that it is effectively removed from the dataset. You can consider this as a way of implicitly identify and remove stopwords. In case you do want to keep even the words that appear in every document, you can just add a 1. to the argument of the logarithm above:
#
# \begin{equation}
# \log\left[1+\frac{N_d}{N_d\left(w\right)}\right]
# \end{equation}
#
# When we multiply the term frequency of each word by it's inverse document frequency, we have a good way of quantifying how relevant a word is to understand the meaning of a specific document.
def tf_idf(corpus_words):
IDF = inv_doc_freq(corpus_words)
TFIDF = []
for document in corpus_words:
TFIDF.append(Counter(document))
for document in TFIDF:
for word in document:
document[word] = document[word]*IDF[word]
return TFIDF
tf_idf(corpus_words)
# Now we finally have a vector representation of each of our documents that takes the informational contributions of each word into account. Each of these vectors provides us with a unique representation of each document, in the context (corpus) in which it occurs, making it posssible to define the similarity of two documents, etc.
# ## Porter Stemmer
# There is still, however, one issue with our approach to representing text. Since we treat each word as a unique token and completely independently from all others, for large documents we will end up with many variations of the same word such as verb conjugations, the corresponding adverbs and nouns, etc.
#
# One way around this difficulty is to use stemming algorithm to reduce words to their root (or stem) version. The most famous Stemming algorithm is known as the **Porter Stemmer** and was introduced by <NAME> in 1980 [Program 14, 130 (1980)](https://dl.acm.org/citation.cfm?id=275705)
#
# The algorithm starts by defining consonants (C) and vowels (V):
V = set('aeiouy')
C = set('bcdfghjklmnpqrstvwxz')
# The stem of a word is what is left of that word after a speficic ending has been removed. A function to do this is easy to implement:
def get_stem(suffix, word):
"""
Extract the stem of a word
"""
if word.lower().endswith(suffix.lower()): # Case insensitive comparison
return word[:-len(suffix)]
return None
# It also defines words (or stems) to be sequences of vowels and consonants of the form:
# \begin{equation}
# [C](VC)^m[V]
# \end{equation}
# where $m$ is called the **measure** of the word and [] represent optional sections.
def measure(orig_word):
"""
Calculate the "measure" m of a word or stem, according to the Porter Stemmer algorthim
"""
word = orig_word.lower()
optV = False
optC = False
VC = False
m = 0
pos = 0
# We can think of this implementation as a simple finite state machine
# looks for sequences of vowels or consonants depending of the state
# in which it's in, while keeping track of how many VC sequences it
# has encountered.
# The presence of the optional V and C portions is recorded in the
# optV and optC booleans.
# We're at the initial state.
# gobble up all the optional consonants at the beginning of the word
while pos < len(word) and word[pos] in C:
pos += 1
optC = True
while pos < len(word):
# Now we know that the next state must be a vowel
while pos < len(word) and word[pos] in V:
pos += 1
optV = True
# Followed by a consonant
while pos < len(word) and word[pos] in C:
pos += 1
optV = False
# If a consonant was found, then we matched VC
# so we should increment m by one. Otherwise,
# optV remained true and we simply had a dangling
# V sequence.
if not optV:
m += 1
return m
# Let's consider a simple example. The word __crepusculars__ should have measure 4:
#
# [cr] (ep) (usc) (ul) (ars)
#
# and indeed it does.
word = "crepusculars"
print(measure(word))
# (agr) = (VC)
word = "agr"
print(measure(word))
# The Porter algorithm sequentially applies a series of transformation rules over a series of 5 steps (step 1 is divided in 3 substeps and step 5 in 2). The rules are only applied if a certain condition is true.
#
# In addition to possibily specifying a requirement on the measure of a word, conditions can make use of different boolean functions as well:
# +
def ends_with(char, stem):
"""
Checks the ending of the word
"""
return stem[-1] == char
def double_consonant(stem):
"""
Checks the ending of a word for a double consonant
"""
if len(stem) < 2:
return False
if stem[-1] in C and stem[-2] == stem[-1]:
return True
return False
def contains_vowel(stem):
"""
Checks if a word contains a vowel or not
"""
return len(set(stem) & V) > 0
# -
# Finally, we define a function to apply a specific rule to a word or stem:
def apply_rule(condition, suffix, replacement, word):
"""
Apply Porter Stemmer rule.
if "condition" is True replace "suffix" by "replacement" in "word"
"""
stem = get_stem(suffix, word)
if stem is not None and condition is True:
# Remove the suffix
word = stem
# Add the replacement suffix, if any
if replacement is not None:
word += replacement
return word
# Now we can see how rules can be applied. For example, this rule, from step 1b is successfully applied to __pastered__:
word = "plastered"
suffix = "ed"
stem = get_stem(suffix, word)
apply_rule(contains_vowel(stem), suffix, None, word)
stem
contains_vowel(stem)
# While try applying the same rule to **bled** will fail to pass the condition resulting in no change.
word = "bled"
suffix = "ed"
stem = get_stem(suffix, word)
apply_rule(contains_vowel(stem), suffix, None, word)
stem
contains_vowel(stem)
# For a more complex example, we have, in Step 4:
word = "adoption"
suffix = "ion"
stem = get_stem(suffix, word)
apply_rule(measure(stem) > 1 and (ends_with("s", stem) or ends_with("t", stem)), suffix, None, word)
ends_with("t", stem)
ends_with("s", stem)
measure(stem)
# In total, the Porter Stemmer algorithm (for the English language) applies several dozen rules (see https://tartarus.org/martin/PorterStemmer/def.txt for a complete list). Implementing all of them is both tedious and error prone, so we abstain from providing a full implementation of the algorithm here. High quality implementations can be found in all major NLP libraries such as [NLTK](http://www.nltk.org/howto/stem.html).
#
# The dificulties of defining matching rules to arbitrary text cannot be fully resolved without the use of Regular Expressions (typically implemented as Finite State Machines like our __measure__ implementation above), a more advanced topic that is beyond the scope of this course.
# <div style="width: 100%; overflow: hidden;">
# <img src="data/D4Sci_logo_full.png" alt="Data For Science, Inc" align="center" border="0" width=300px>
# </div>
|
1. Text Representation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Playing Atari games with DQN
#
#
# Now, let's learn how to create a deep Q network to play Atari games with stable baselines.
#
# First, let's import the necessary modules:
from stable_baselines import DQN
# Since we are dealing with Atari games we can use a convolutional neural network instead
# of a vanilla neural network. So, we use CnnPolicy:
from stable_baselines.deepq.policies import CnnPolicy
# We learned that we preprocess the game screen before feeding it to the agent. With
# baselines, we don't have to preprocess manually, instead, we can make use of make_atari
# module which takes care of preprocessing the game screen:
from stable_baselines.common.atari_wrappers import make_atari
# Now, let's create an Atari game environment. Let's create the Ice Hockey game
# environment:
env = make_atari('IceHockeyNoFrameskip-v4')
# Instantiate the agent:
agent = DQN(CnnPolicy, env, verbose=1)
# Train the agent:
agent.learn(total_timesteps=25000)
# After training the agent, we can have a look at how our trained agent performs in the
# environment:
state = env.reset()
while True:
action, _ = agent.predict(state)
next_state, reward, done, info = env.step(action)
state = next_state
env.render()
|
14. Deep Reinforcement Learning with Stable Baselines/14.04. Playing Atari games with DQN and its variants.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="xKPj4b4J1dk4"
from google.colab import drive
drive.mount('/content/gdrive')
# + id="i54ofDXg1eo-"
# %cd /content/gdrive/My Drive/Optimizasyon
# + id="pzuMsETE1eid"
# !chmod +x Miniconda3-py37_4.8.3-Linux-x86_64.sh
# !time bash ./Miniconda3-py37_4.8.3-Linux-x86_64.sh -b -f -p /usr/local
# !time conda config --set always_yes yes --set changeps1 no
# !time conda install -q -y -c conda-forge python=3.7
# !time conda install -q -y -c conda-forge rdkit==2020.09.2
import sys
sys.path.append('/usr/local/lib/python3.7/site-packages/')
# + id="0asQbeSvIqpT"
import rdkit
from rdkit import Chem
# + id="jgQ93LYf1i6W"
# !pip install tensorflow==1.15
# !pip install torch==1.6.0 torchvision==0.7.0
# !pip install scipy
# !pip install keras==2.3.1
# !pip install sklearn
# !pip install 'h5py==2.10.0' --force-reinstall
# + id="d4T9JPnK7Go3"
from keras.layers import Dense, Dropout, Activation, BatchNormalization, Input
from keras.models import Model
from keras.optimizers import SGD
from keras.models import load_model
from keras import backend as K
from sklearn.utils import class_weight
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from get_data import GetData
import json
import pandas as pd
import numpy as np
import torch
import sys
import copy
import tensorflow as tf
sys.path.append('/content/gdrive/My Drive/Optimizasyon')
from jtnn import *
sys.path.append('/content/gdrive/My Drive/Optimizasyon')
vocab = [x.strip("\r\n ") for x in open("/content/gdrive/My Drive/Optimizasyon/unique_canonical_train_vocab.txt")]
vocab = Vocab(vocab)
hidden_size = 450
latent_size = 56
depth = 3
stereo = True
model_jtvae = JTNNVAE(vocab, hidden_size, latent_size, depth, stereo=stereo)
model_jtvae.load_state_dict(torch.load("/content/gdrive/My Drive/Optimizasyon/model.iter-9-6000", map_location=torch.device('cpu'))) # opts.model_path
with open('./L1000CDS_subset.json', 'r') as f:
L = json.load(f)
obj = GetData(L=L, cell_line='MCF7', descriptor='jtvae', n_fold=5, random_state=42, random_genes=False, csv_file='JTVAE_Representations.csv')
x, y, folds = obj.get_up_genes()
trn_x = x.drop(['SMILES'], axis=1).values.astype('float')
scaler = StandardScaler()
scaler.fit(trn_x)
# + id="iQPpQKnZAOv4"
def get_loss(predictions, targets): # cross-entropy
if isinstance(targets, list):
targets = np.asarray(targets)
if isinstance(predictions, list):
predictions = np.asarray(predictions)
N = predictions.shape[0]
return -np.sum((targets*np.log(predictions)) + ((1-targets)*np.log(1-predictions))) / N
def optimize(initial_list, model_up, model_dn, target_up, scaler, target_dn, h=0.00001, lr=0.01, early_stop=5):
initial = scaler.transform(np.asarray(initial_list))
temp = initial
min_loss_updated = copy.deepcopy(temp)
min_ = 999
cnt = 0
gradient = np.zeros((initial.shape[0], 56))
print('h:', h, 'lr:', lr)
target_up_list = []
target_dn_list = []
for _ in range(initial.shape[0]):
target_up_list.append(target_up)
target_dn_list.append(target_dn)
print('Up target:', target_up_list[0])
print('Dn target:', target_dn_list[0])
for k in range(0, 1000):
for idx in range(56):
x_1 = copy.deepcopy(temp)
x_2 = copy.deepcopy(temp)
x_1[:, idx] = x_1[:, idx] + h
pred_list = [x for x in model_up.predict(x_1)]
y_1_up = np.zeros((pred_list[0].shape[0], len(pred_list)))
for i in range(len(pred_list)):
y_1_up[:, i] = pred_list[i].flatten()
pred_list = [x for x in model_up.predict(x_2)]
y_2_up = np.zeros((pred_list[0].shape[0], len(pred_list)))
for i in range(len(pred_list)):
y_2_up[:, i] = pred_list[i].flatten()
pred_list = [x for x in model_dn.predict(x_1)]
y_1_dn = np.zeros((pred_list[0].shape[0], len(pred_list)))
for i in range(len(pred_list)):
y_1_dn[:, i] = pred_list[i].flatten()
pred_list = [x for x in model_dn.predict(x_2)]
y_2_dn = np.zeros((pred_list[0].shape[0], len(pred_list)))
for i in range(len(pred_list)):
y_2_dn[:, i] = pred_list[i].flatten()
for j in range(initial.shape[0]):
loss_1_up = get_loss(predictions=y_1_up[j], targets=target_up_list[j])
loss_2_up = get_loss(predictions=y_2_up[j], targets=target_up_list[j])
loss_1_dn = get_loss(predictions=y_1_dn[j], targets=target_dn_list[j])
loss_2_dn = get_loss(predictions=y_2_dn[j], targets=target_dn_list[j])
gradient[j, idx] = ((loss_1_up + loss_1_dn) - (loss_2_up + loss_2_dn)) / h
temp = temp - lr*gradient
pred_list = [x for x in model_up.predict(temp)]
y_up = np.zeros((pred_list[0].shape[0], len(pred_list)))
for i in range(len(pred_list)):
y_up[:, i] = pred_list[i].flatten()
pred_list = [x for x in model_dn.predict(temp)]
y_dn = np.zeros((pred_list[0].shape[0], len(pred_list)))
for i in range(len(pred_list)):
y_dn[:, i] = pred_list[i].flatten()
loss_up = get_loss(predictions=y_up, targets=target_up_list)
loss_dn = get_loss(predictions=y_dn, targets=target_dn_list)
loss = loss_up + loss_dn
print('Iter:', k+1, 'Loss:', loss)
if min_ > loss:
min_ = loss
min_loss_updated = copy.deepcopy(temp)
cnt = 0
else:
cnt += 1
if cnt == early_stop:
print("Early stopped.", 'Loss:' + str(loss))
break
return scaler.inverse_transform(min_loss_updated)
# + id="nyvw_9gbMoOL"
cell_line = 'MCF7'
gene_target_up = pd.read_csv('harmonizome_dn_binarized_use_for_up_model.csv')
gene_target_dn = pd.read_csv('harmonizome_up_binarized_use_for_dn_model.csv')
gene_target_up = gene_target_up[gene_target_up['disease2'] == 'Breast Cancer_3744']
gene_target_dn = gene_target_dn[gene_target_dn['disease2'] == 'Breast Cancer_3744']
dis_df = pd.read_csv('approved_drug_for_breast_cancer_smiles_jtvae.csv') # MCF7
model_up = load_model(cell_line + '_multi_task_model_up.h5')
model_dn = load_model(cell_line + '_multi_task_model_dn.h5')
file_name_up = cell_line + '_multi_task_gene_list_up.txt'
f = open(file_name_up, 'r')
lines = f.readlines()
gene_list_up = [line.strip() for line in lines]
gene_target_up = gene_target_up[gene_list_up].values
file_name_dn = cell_line + '_multi_task_gene_list_dn.txt'
f = open(file_name_dn, 'r')
lines = f.readlines()
gene_list_dn = [line.strip() for line in lines]
gene_target_dn = gene_target_dn[gene_list_dn].values
# + id="Bj8cfYKoMv_P"
cikarilan_smi_lst = []
baslangic_smi_lst = []
baslangic_features = []
distance_lst = []
optimized_smi_lst = []
for idx in range(len(dis_df)):
if (dis_df.values[idx, 2] in cikarilan_smi_lst) or (len(dis_df.values[idx, 2]) > 105):
continue
arr = np.random.uniform(low=-1.5, high=1.5, size=(56,))
baslangic_feat = dis_df.values[idx, 5:].astype('float') + arr
dec_smiles = model_jtvae.reconstruct2(torch.from_numpy(np.asarray([baslangic_feat[0:28]])).float(),
torch.from_numpy(np.asarray([baslangic_feat[28:56]])).float())
cikarilan_smi = dis_df.values[idx, 2]
baslangic_smi = dec_smiles
distance = np.linalg.norm(dis_df.values[idx, 5:].astype('float') - baslangic_feat)
optimize_edilen_smi = ""
print('Cikarilan Smiles:', cikarilan_smi, 'Baslangic Smiles:', baslangic_smi, 'Distance:', distance)
cikarilan_smi_lst.append(cikarilan_smi)
baslangic_smi_lst.append(baslangic_smi)
baslangic_features.append(baslangic_feat)
distance_lst.append(distance)
updated_feat = optimize(initial_list=baslangic_features,
model_up=model_up, model_dn=model_dn,
target_up=gene_target_up[0],
target_dn=gene_target_dn[0],
scaler=scaler)
for feature in updated_feat:
optimize_edilen_smi = model_jtvae.reconstruct2(torch.from_numpy(np.asarray([feature[0:28]])).float(),
torch.from_numpy(np.asarray([feature[28:56]])).float())
optimized_smi_lst.append(optimize_edilen_smi)
print('optimized_smiles:', optimize_edilen_smi)
# + id="oCAwujtOM5yb"
|
Colab/multi-task_model_opt_with_listed_input.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import missingno as msno
import matplotlib.pyplot as plt
df = pd.read_excel('./../data/2021_analisisasociado.xlsx')
df
df.info()
from janitor import clean_names
df = clean_names(df)
df.info()
df.info (memory_usage='deep')
df.shape
df.set_index('no_asociado', inplace = True)
df
df.columns.values
df.columns.to_list()
df.dtypes
df.describe(include='O').T
df.duplicated().sum()
df.tipo_contrato.mode()
df.isna().sum().sort_values()
print(df['mora'].max())
print(df['mora'].min())
print(df['mora'].mean())
print(df['mora'].median())
missing_values_count = df.isna().sum()
missing_values_count
# + tags=[]
total_cell = np.product(df.shape)
total_missing = missing_values_count.sum()
(total_missing/total_cell)*100
# porcentaje de datos nulos
# -
msno.bar(df);
# La biblioteca Missingno ofrece una forma muy agradable de visualizar la distribución de los valores de NaN.
# Missingno es una biblioteca de Python y compatible con Pandas.
# import missingno as msno
# grafica donde se visualizan los datos con valor y descuenta los nulos
msno.matrix(df);
df[df.isna().any(1)]
# +
# cuantos asociados activos hay?
df.count()['activos']
# -
# cual es la mayor y menor antiguedad
print(df['antiguedad'].max())
print(df['antiguedad'].min())
print(df['antiguedad'].mean())
# cual es la mayor y menor edad
print(df['edad'].max())
print(df['edad'].min())
print(df['edad'].mean())
df.describe
df.describe()
|
notebooks/analisisasociados.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="kDj8H2gkIh-j" outputId="4fad2af5-08e0-4da7-9f5e-ec867971571f"
from google.colab import drive
drive.mount('/content/drive')
# + id="2ddgx3z2HhxF"
import os
import pandas as pd
import numpy as np
import spacy
import string
import matplotlib.pyplot as plt
import seaborn as sns
import timeit
import collections
from IPython.display import HTML, IFrame
from textblob import TextBlob
from wordcloud import WordCloud
from tqdm import tqdm_notebook
from torchtext import data
import torch
# + colab={"base_uri": "https://localhost:8080/"} id="VnhJr26lH4mF" outputId="2adb045c-47f0-40ad-e9a0-636d71647326"
# !pip install transformers
# + colab={"base_uri": "https://localhost:8080/", "height": 67, "referenced_widgets": ["78c0d046aa75419ebdcf42dbd44dba3f", "9d1204f7e4434b37b7bb70167a32f860", "607f91b4efa14425879b2e6f08685d31", "5789b10cb6214254a229d520166473ce", "ac3d9da708f04d938907b8df1ea72d9f", "062c426c007a4266905376a406304de5", "8c86251ee82945e0b0e2f1057b2a01ee", "135a67702a97408c8045d03609c6c233"]} id="ifaZjmF8HhxI" outputId="b900e0a7-968b-4938-a5cb-11fa0a8e67f9"
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# + colab={"base_uri": "https://localhost:8080/"} id="_qua8IjpHhxJ" outputId="56d89da0-e554-463d-b25c-593542af3687"
len(tokenizer.vocab)
# + colab={"base_uri": "https://localhost:8080/"} id="ayfJVCJZHhxJ" outputId="5200dbb6-a30b-4253-84fc-a639721bf361"
tokens = tokenizer.tokenize('Hello WORLD how ARE yoU?')
print(tokens)
indexes = tokenizer.convert_tokens_to_ids(tokens)
print(indexes)
# + colab={"base_uri": "https://localhost:8080/"} id="Nmj20ua6HhxJ" outputId="651e2196-5b96-41e6-a62c-048ce0b1e700"
init_token = tokenizer.cls_token
eos_token = tokenizer.sep_token
pad_token = tokenizer.pad_token
unk_token = tokenizer.unk_token
print(init_token, eos_token, pad_token, unk_token)
# + colab={"base_uri": "https://localhost:8080/"} id="JkdEYnrXHhxJ" outputId="3137618b-b337-49e6-ea70-ce6da8c920e5"
init_token_idx = tokenizer.convert_tokens_to_ids(init_token)
eos_token_idx = tokenizer.convert_tokens_to_ids(eos_token)
pad_token_idx = tokenizer.convert_tokens_to_ids(pad_token)
unk_token_idx = tokenizer.convert_tokens_to_ids(unk_token)
print(init_token_idx, eos_token_idx, pad_token_idx, unk_token_idx)
# + colab={"base_uri": "https://localhost:8080/"} id="desZKyoTHhxK" outputId="8c948d68-16c5-4fc3-ba76-6b9f6b18f327"
init_token_idx = tokenizer.cls_token_id
eos_token_idx = tokenizer.sep_token_id
pad_token_idx = tokenizer.pad_token_id
unk_token_idx = tokenizer.unk_token_id
print(init_token_idx, eos_token_idx, pad_token_idx, unk_token_idx)
# + colab={"base_uri": "https://localhost:8080/"} id="Nvxvpg9fHhxK" outputId="6f36cf8f-f74e-4690-ed98-5ef0e4bacae0"
max_input_length = tokenizer.max_model_input_sizes['bert-base-uncased']
print(max_input_length)
# + id="KHM9VH6eHhxK"
def tokenize_and_cut(sentence):
tokens = tokenizer.tokenize(sentence)
tokens = tokens[:max_input_length-2]
return tokens
# + colab={"base_uri": "https://localhost:8080/"} id="q1HdgElMHhxK" outputId="d646d449-e8ad-4d46-a105-e856074e7292"
from torchtext import data
TEXT = data.Field(batch_first = True,
use_vocab = False,
tokenize = tokenize_and_cut,
preprocessing = tokenizer.convert_tokens_to_ids,
init_token = init_token_idx,
eos_token = eos_token_idx,
pad_token = pad_token_idx,
unk_token = unk_token_idx)
LABEL = data.LabelField(dtype = torch.float)
print("loading from csv ...")
tv_datafields = [("review", TEXT), ("label", LABEL)]
# Step two construction our dataset.
train, valid, test = data.TabularDataset.splits(path='/content/drive/MyDrive/SENTIMENT',
train="train.csv", validation="valid.csv",
test="test_dataset.csv", format="csv",
skip_header=True, fields=tv_datafields)
print(train[0].__dict__.keys())
# + colab={"base_uri": "https://localhost:8080/"} id="KapzTqfcHhxK" outputId="cba494dd-d303-4774-9787-6b067ae3f71b"
print(f"Number of training examples: {len(train)}")
print(f"Number of validation examples: {len(valid)}")
print(f"Number of testing examples: {len(test)}")
# + colab={"base_uri": "https://localhost:8080/"} id="RuPuP95JHhxK" outputId="83377aad-5cf7-45d5-8374-bd73688cc8a1"
print(vars(train.examples[6]))
# + colab={"base_uri": "https://localhost:8080/"} id="3IIm8qbfHhxK" outputId="cac3f520-ce81-44eb-85b3-9abe1a61d346"
tokens = tokenizer.convert_ids_to_tokens(vars(train.examples[6])['review'])
print(tokens)
# + id="tVYPIW8uHhxL"
LABEL.build_vocab(train)
# + colab={"base_uri": "https://localhost:8080/"} id="IsVGRlgAHhxL" outputId="b8cb1ef0-d7c3-4ce0-f262-64c77f4ac92e"
print(LABEL.vocab.stoi)
# + id="l1iqNGeUHhxL"
BATCH_SIZE = 128
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iter , valid_iter , test_iter = data.BucketIterator.splits((train,valid,test), device=device,
batch_sizes=(32,32,32), sort_key=lambda x: len(x.review),
sort_within_batch=True, repeat=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 117, "referenced_widgets": ["3d0b2571250b45dea274fc40c94025d9", "3df9fbc059ad437489a57068d6935e7a", "52bd85cc28a7457abb151a39874e0ae9", "da622f4cd7014e689c0ab7e08e63f808", "0379d9d0410f442f8ae206ef836288b7", "4de716557af04ce7b90d8166f1a17c43", "7acbd2d616fd49539ca48e9efadfd59b", "627424240cbe4c4ea3b5fad1cbdb9216", "a8a3d3c6e27c47138eaac74632d84395", "<KEY>", "<KEY>", "83624591a4e1417daa40ee22c54ded26", "<KEY>", "6f1f917fbe15457aba8109adc1d53c79", "83562a52b4ba4a328fe07de382a3afa7", "<KEY>"]} id="m-B0kviVHhxL" outputId="c7b9c672-f18a-41d8-a0ca-241ced7cde10"
from transformers import BertTokenizer, BertModel
bert = BertModel.from_pretrained('bert-base-uncased')
# + id="M2Nkxr3PHhxL"
import torch.nn as nn
class BERTGRUSentiment(nn.Module):
def __init__(self,
bert,
hidden_dim,
output_dim,
n_layers,
bidirectional,
dropout):
super().__init__()
self.bert = bert
embedding_dim = bert.config.to_dict()['hidden_size']
self.rnn = nn.GRU(embedding_dim,
hidden_dim,
num_layers = n_layers,
bidirectional = bidirectional,
batch_first = True,
dropout = 0 if n_layers < 2 else dropout)
self.out = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
#text = [batch size, sent len]
with torch.no_grad():
embedded = self.bert(text)[0]
#embedded = [batch size, sent len, emb dim]
_, hidden = self.rnn(embedded)
#hidden = [n layers * n directions, batch size, emb dim]
if self.rnn.bidirectional:
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))
else:
hidden = self.dropout(hidden[-1,:,:])
#hidden = [batch size, hid dim]
output = self.out(hidden)
#output = [batch size, out dim]
return output
# + id="LhP390xoHhxL"
HIDDEN_DIM = 256
OUTPUT_DIM = 1
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.25
model = BERTGRUSentiment(bert,
HIDDEN_DIM,
OUTPUT_DIM,
N_LAYERS,
BIDIRECTIONAL,
DROPOUT)
# + colab={"base_uri": "https://localhost:8080/"} id="lGZ40U-NHhxL" outputId="54a2912a-2ff4-4d22-a752-f5cdcea2773c"
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
# + id="x1Aey13DHhxL"
for name, param in model.named_parameters():
if name.startswith('bert'):
param.requires_grad = False
# + colab={"base_uri": "https://localhost:8080/"} id="MTKmAAo7HhxL" outputId="0c75f2fa-502d-4ff5-c34f-6d37f2466d3a"
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
# + colab={"base_uri": "https://localhost:8080/"} id="7T1tvg20HhxM" outputId="3335e4d9-c268-42b3-8531-4f74d8c13fab"
for name, param in model.named_parameters():
if param.requires_grad:
print(name)
# + id="UFhpCSCkHhxM"
import torch.optim as optim
optimizer = optim.Adam(model.parameters())
# + id="o27WJB8sHhxM"
criterion = nn.BCEWithLogitsLoss()
# + id="-tt98ElhHhxM"
model = model.to(device)
criterion = criterion.to(device)
# + id="01tw3ILJHhxM"
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum() / len(correct)
return acc
# + id="jPlUALFUHhxM"
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.review).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# + id="KrF6Wd97HhxM"
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch.review).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# + id="HBIBeSTXHhxM"
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
# + colab={"base_uri": "https://localhost:8080/"} id="QbkAA4_iHhxN" outputId="27824a07-3d83-4a1e-9b8d-dcfbfb506d89"
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iter, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iter, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut6-model.pt')
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
# + colab={"base_uri": "https://localhost:8080/"} id="dEXxuyosHhxN" outputId="ea305e42-456f-4938-e613-e828c395089b"
model.load_state_dict(torch.load('tut6-model.pt'))
test_loss, test_acc = evaluate(model, test_iter, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
# + id="7PBI5sCNHhxN"
def predict_sentiment(model, tokenizer, sentence):
model.eval()
tokens = tokenizer.tokenize(sentence)
tokens = tokens[:max_input_length-2]
indexed = [init_token_idx] + tokenizer.convert_tokens_to_ids(tokens) + [eos_token_idx]
tensor = torch.LongTensor(indexed).to(device)
tensor = tensor.unsqueeze(0)
prediction = torch.sigmoid(model(tensor))
return prediction.item()
# + colab={"base_uri": "https://localhost:8080/"} id="ZiPSvBd_HhxN" outputId="4cb1f734-9eca-4d3a-f880-ca2ed5328175"
predict_sentiment(model, tokenizer, "This film is terrible")
# + colab={"base_uri": "https://localhost:8080/"} id="068HMaayHhxN" outputId="094ee237-084f-4c78-c14e-d33b43d600f1"
predict_sentiment(model, tokenizer, "This film is not bad")
|
sentiment analysis/BERTGRU.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="1GWJAmQDX7TJ"
# # CNN for CIFAR-10 with Dropout Layers
# + colab={} colab_type="code" id="dkwUnEgrX8qC"
import torch
import torch.nn.functional as F
from torch import nn, optim
from torchvision import datasets, transforms
# + colab={} colab_type="code" id="KoMN_RT7X932"
import numpy as np
# + colab={} colab_type="code" id="KbgspWmOYE8J"
# %matplotlib inline
from matplotlib import pyplot as plt
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="FVgAoW8roYHJ" outputId="73e4a6c4-c484-470e-cf8a-16f0694abdbf"
np.random.seed(42)
torch.manual_seed(42)
# + [markdown] colab_type="text" id="qhRY36ErC1fh"
# ### Description
# + [markdown] colab_type="text" id="XIWWNhFEC6YW"
#
#
# Classification of the CIFAR-10 dataset using using a convolutional neural network (CNN) with and without dropout layers.
# + [markdown] colab_type="text" id="_ZQv9h8AYQmb"
# ### GPU
# + [markdown] colab_type="text" id="Q-2bttuHYatC"
# CNNs run very efficiently on a GPU. If a GPU is available we definitely want to take advantage of it. We can automatically set a `device` variable which will define a GPU if it's available and otherwise fallback to a CPU:
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="2yrySYBEYJaF" outputId="277b6c13-c514-446a-b8db-603d568dd5dd"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# + [markdown] colab_type="text" id="JmToWYgFvzfs"
# ## Data Set
# + [markdown] colab_type="text" id="YJjbE50fv1FW"
# ### CIFAR-10
# + [markdown] colab_type="text" id="rPiOVwktlOR6"
# The CIFAR-10 dataset is a benchmark dataset for computer vision containing 60000 images divided in 10 classe. The dataset is composed of different photographs of the following classes: airplane, automobile, bird, cat deer, dog, frog, horse, ship, truck. The images are $32 \times 32$ pixels in size and are composed of three channels (RGB).
# + [markdown] colab_type="text" id="8-3I1kM7v2rH"
# ### Loading the Dataset
# + [markdown] colab_type="text" id="8z1Yk-EqsTYf"
# As usual, we want to load the data into`torch.tensor`s and apply a transformation so that the data is normalised in order to reduce data skewness. We use the `transforms.Normalize` function in order to normalise our data.
# + colab={"base_uri": "https://localhost:8080/", "height": 101, "referenced_widgets": ["60369ee21aec4c258d17488e231046ac", "bdc1e9cab37b444db21bbe898b0bd1c2", "1362b036e21143ebbca42f6c17aa4cd4", "<KEY>", "<KEY>", "dfd4177ac2e5456c98d0374c3d4b3905", "<KEY>", "3cf5b6f8b23b4fb6b35e99e7da66b4ce"]} colab_type="code" id="SXDl5KTIcRd3" outputId="0869a8da-5365-49ad-89b1-89b78b3dfcdb"
# Define usual transformation to tensor with normalisation
transform = transforms.Compose(
[
# Transform to tensor and normalise
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
# Download and load the training dataset
trainset = datasets.CIFAR10("data", download=True, train=True, transform=transform)
# Download and load the training dataset
testset = datasets.CIFAR10("data", download=True, train=False, transform=transform)
# + [markdown] colab_type="text" id="yz7B9L8q0Lbf"
#
# Finally we can define our `DataLoader` and also get some iterators for the datasets:
# + colab={} colab_type="code" id="QF6O-pNly9am"
# Create DataLoader with batches
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, drop_last=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, drop_last=True)
# Create iterators
trainiter = iter(trainloader)
testiter = iter(testloader)
# + [markdown] colab_type="text" id="GwtCZcXr15Hz"
# #### Visualizing Images and Labels
# + [markdown] colab_type="text" id="8EnlxbtI2ekB"
# Let's define a dictionary mapping labels (numbers from 0 to 9 denoting one of the 10 CIFAR-10 classes) to the acutal classes names:
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="9-sD0b91158E" outputId="a45bf217-1e69-457d-8a12-0f93487358da"
label_to_name = {
i : name
for i, name in enumerate(["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"])
}
print(label_to_name)
# + [markdown] colab_type="text" id="nXghpfSIdiah"
# We can visualise one batch of images from the training set:
# + colab={"base_uri": "https://localhost:8080/", "height": 873} colab_type="code" id="jv4sS5_x3HEI" outputId="085d771f-8486-43fd-88f6-439a1e03fd99"
images, labels = next(trainiter)
fig = plt.figure(figsize=(12,12))
for idx in range(64):
ax = fig.add_subplot(8, 8, idx + 1, xticks=[], yticks=[])
# Un-normalize image
img = images[idx].numpy() * 0.5 + 0.5
plt.imshow(np.transpose(img, (1, 2, 0)))
name = label_to_name[labels[idx].item()]
ax.set_title(name, fontdict={"fontsize": 12})
plt.tight_layout()
plt.show()
# + [markdown] colab_type="text" id="JuPRGLlc4792"
# ## CNN woth Dropout Layers
# + [markdown] colab_type="text" id="MqQmePT_m3ss"
# We can now define a CNN with three convolutional and max pooling layers and two fully connected layers. We also add two `nn.Dropout` layers between the last convolutional layer and the first fully connected layer, and between the two fully connected layers. The dropout layers will randomly drop some nodes in of the neural network and therefore act as a regularization technique.
# + colab={} colab_type="code" id="InHsaVLi47LB"
class CNN(nn.Module):
def __init__(self, dropoutp = 0.0):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(3, 16, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(16, 32, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 64, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
)
self.fc = nn.Sequential(
nn.Dropout(dropoutp),
nn.Linear(64 * 4 * 4, 512),
nn.ReLU(),
nn.Dropout(dropoutp),
nn.Linear(512, 10),
nn.LogSoftmax(dim=1),
)
def forward(self, x):
# Input shape
assert x.shape == (64, 3, 32, 32)
x = self.conv(x)
# Flatten features for fully connected layers
x = x.view(-1, 64 * 4 * 4)
assert x.shape == (64, 64 * 4 * 4)
x = self.fc(x)
assert x.shape == (64, 10)
return x
# + [markdown] colab_type="text" id="_D97DhPlvi4s"
# _Note_: Here we define the convolutional part (feature detection) and the fully conntected part (classification) of the CNN in two `nn.Sequential` models. This will allow to print out the actual architecture whit `print(model)`.
# + [markdown] colab_type="text" id="9v3wlK1ynuVK"
# Once the model is defined we can instanciate it and move it to the `device`:
# + colab={"base_uri": "https://localhost:8080/", "height": 382} colab_type="code" id="mrbSV4b08yUB" outputId="167fefaa-2b32-4945-d68d-dc02dc4980bc"
modeltest = CNN()
modeltest.to(device)
# + [markdown] colab_type="text" id="rpS3wR1hn7Rm"
# ### Test Forward Pass
# + [markdown] colab_type="text" id="jz3ixf2noDHx"
# In order to check that our model definition works correctly, we can try to propagate on batch of images through the network. It is important to notice that the data from the `DataLoader` has to be moved to the `device` as well (since the model is now working on the `device`). We can also plot the class probabilities of this untrained model:
# + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="xE5qywnu9zo4" outputId="f90c128f-44b2-413a-93df-80622c06c632"
images, labels = next(trainiter)
# Move images and labels to the GPU
images, labels = images.to(device), labels.to(device)
modeltest.eval()
with torch.no_grad(): # Do not track gradients
# Perform forward pass
out = modeltest(images)
# Compute class probabilities
# The model returns log-probabilities
p = torch.exp(out)
print(torch.mean(p, dim=0))
# + [markdown] colab_type="text" id="OjakN1U6oiXY"
# As expected for a model with random weights, the class probabilities for each class are close to $1/10$.
# + [markdown] colab_type="text" id="JV0MVFt0_YPH"
# ### Training
# + [markdown] colab_type="text" id="8RxOwQhZpjma"
# We can now define our training loop including validation where we also save the parameters of the model leading the the lowest loss on the validation set, which will be useful later. We make the use of `model.train()` and `model.eval()` to specify when the model is training and when the model is being evaluated (validated).
# + [markdown] colab_type="text" id="9kuOxN0fFHLO"
# _Note_: The `nn.Dropout` layers have different behaviours during training (where nodes should be dropped) and evaluation (where nothing should happen). `model.train()` and `model.eval()` are used to make sure that the layers behave correctly.
# + colab={} colab_type="code" id="Ht5oyxmh_bwB"
def train(model, optimizer, loss_function, trainloader, testloader, paramsfile="model.pth", epochs=30, device=device):
"""
Train a model by optimizing a loss_function using the given optimizer.
Model and data are moved to the set device.
"""
import time
import os
# Create directory for parameters file
savedir = os.path.dirname(paramsfile)
try:
os.mkdir(savedir)
except FileExistsError:
pass
# Move model to device
model.to(device)
train_losses, valid_losses, accuracies = [], [], []
min_valid_loss = np.Inf
# Loop over epochs
for epoch in range(epochs):
# Start epoch timer
time_init = time.time()
# Initialize total epoch loss
epoch_loss = 0
# Training
model.train()
for images, labels in trainloader:
# Move data to GPU
images, labels = images.to(device), labels.to(device)
# Initialize the gradients to zero
optimizer.zero_grad()
# Perform forward pass
output = model(images)
# Compute the loss
loss = loss_function(output, labels)
# Perform backpropagation
loss.backward()
# Update the weights
optimizer.step()
# Accumulate total epoch loss
epoch_loss += loss.item()
else:
valid_loss = 0
accuracy = 0
# Validation
# No need totrack gradients during validation
with torch.no_grad():
model.eval()
for images, labels in testloader:
# Move data to GPU
images, labels = images.to(device), labels.to(device)
# Perform forward pass
output = model(images)
# Compute class probabilities
p = torch.exp(output)
# Compute the loss
valid_loss += loss_function(output, labels).item()
# Get class of top prediction
_, top_c = p.topk(1, dim=1)
# Get truth table for the predicted classes
equals = (top_c == labels.view_as(top_c)).type(torch.FloatTensor)
# Compute accuracy
accuracy += torch.mean(equals)
# Store losses
train_losses.append(epoch_loss/len(trainloader))
valid_losses.append(valid_loss/len(testloader))
accuracies.append(accuracy.item()/len(testloader)*100)
# Stop epoch timer
time_end = time.time()
# Save model
if valid_losses[-1] < min_valid_loss:
torch.save(model.state_dict(), paramsfile)
min_valid_loss = valid_losses[-1]
if (epoch + 1) % 5 == 0:
print(f"--- Epoch {epoch+1:2} ---")
print(f"Train Loss: {train_losses[-1]:.5f}")
print(f"Valid Loss: {valid_losses[-1]:.5f}")
print(f"Valid Accuracy: {accuracies[-1]:.2f}%")
print(f"Time: {time_end - time_init:.3f} s")
return train_losses, valid_losses, accuracies
# + colab={} colab_type="code" id="rWt2XL0FkaNh"
# Loss function (for both models)
nll_loss = nn.NLLLoss()
# + [markdown] colab_type="text" id="qEA35M1OxS11"
# We can not define the model with a dropout probability of `0.5` and train it:
# + colab={"base_uri": "https://localhost:8080/", "height": 885} colab_type="code" id="ip3eie4Mk8W_" outputId="c3dd853c-ac03-4f42-8fc8-b67a992e4971"
# Create a model to train with data augmentation
dmodel = CNN(dropoutp=0.5)
# Create an optimizer for training with data augmentation
doptimizer = optim.SGD(dmodel.parameters(), lr=0.05)
dtrainloss, dvalidloss, daccuracy = train(dmodel,
doptimizer,
nll_loss,
trainloader,
testloader,
"models/augmodel.pth",
epochs=50)
# + [markdown] colab_type="text" id="QwIxKczns_IL"
# For comparison, we can also train a model without dropout layers (i.e. with a dropout probability of `0.0`):
# + colab={"base_uri": "https://localhost:8080/", "height": 885} colab_type="code" id="z38OQq0UPJOn" outputId="3d3c9537-8dbe-4367-b65c-c2b748967da6"
# Create a model to train with data augmentation
model = CNN()
# Create an optimizer for training with data augmentation
optimizer = optim.SGD(model.parameters(), lr=0.05)
trainloss, validloss, accuracy = train(model,
optimizer,
nll_loss,
trainloader,
testloader,
"models/model.pth",
epochs=50)
# + [markdown] colab_type="text" id="SniHP5dIFv4r"
# After training the model with and without dropouts, we can evaluate the impact of such regularisation technique by plotting the training and validation losses:
# + colab={"base_uri": "https://localhost:8080/", "height": 350} colab_type="code" id="-jjQTJ6G_oPn" outputId="3aedf840-6f80-47c4-afa9-b7a1f089b4ff"
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(1, 2, 1)
ax1.plot(trainloss, "o-", label="Train Loss")
ax1.plot(validloss, "o-", label="Valid Loss")
ax1.set_title("No Data Augmentation")
ax1.set_xlabel("Epoch")
ax1.set_ylabel("Loss")
ax1.legend()
ax2 = fig.add_subplot(1, 2, 2, sharey=ax1)
ax2.plot(dtrainloss, "o-", label="Train Loss")
ax2.plot(dvalidloss, "o-", label="Valid Loss")
ax2.set_title("Data Augmentation")
ax2.set_xlabel("Epoch")
ax2.legend()
plt.show()
# + [markdown] colab_type="text" id="H2WdG8FIKx4o"
# We see that for the model without dropout layers, the validation loss start increasing after a few epochs. This is a clear symptom of overfitting. In contrast, the model with dropout layers allows the validation loss to decrease further, along with the training loss.
# + [markdown] colab_type="text" id="PF056lL4GK8_"
# We can also look at what happens to the total accuracy on the validation set:
# + colab={"base_uri": "https://localhost:8080/", "height": 281} colab_type="code" id="yYQjxBZ6iu4h" outputId="d31461cc-6dc3-457e-f7d9-70d72c642e8d"
plt.figure()
plt.plot(daccuracy, "o-", label="w/ Dropout")
plt.plot(accuracy, "o-", label="w/o Dropout")
plt.legend()
plt.title("Total Accuracy")
plt.show()
# + [markdown] colab_type="text" id="cSkmmk9BGPtL"
# We see that the model with dropout layers is able to reach an higher overall accuracy.
# + [markdown] colab_type="text" id="4ideIGVxLTKt"
# Finally, we can look at the acccuracy of the predictions:
#
# + colab={} colab_type="code" id="VeEiOmlHLMct"
from collections import defaultdict
def printaccuracy(model, testloader):
"""
Compute accuracy for different classes.
"""
# Stop tracking gradients
with torch.no_grad():
# Put model in evaluation mode
model.eval()
# Keep track of correctly assigned classes and total number of classes
c_correct = defaultdict(int)
c_total = defaultdict(int)
for images, labels in testloader:
# Move data to GPU
images, labels = images.to(device), labels.to(device)
# Perform forward pass
output = model(images)
# Compute class probabilities
p = torch.exp(output)
# Get probability and class of top prediction
top_p, top_c = p.topk(1, dim=1)
# Get truth table for the predicted classes
equals = (top_c == labels.view_as(top_c))
# Count correctly labelled examples
for i in range(len(labels)):
label = labels[i].item()
c_correct[label] += equals[i].item()
c_total[label] += 1
for i in range(10):
correct, total = c_correct[i], c_total[i]
print(f"Test accuracy of {label_to_name[i]:10}: {correct/total * 100:.2f}% ({correct}/{total})")
print(f"Test accuracy: {sum(c_correct.values())/sum(c_total.values()) * 100:.2f}%")
# + colab={"base_uri": "https://localhost:8080/", "height": 208} colab_type="code" id="NHaAdf_jQpW-" outputId="2a3b27af-40a2-4f0b-cf38-f0e806b4491a"
printaccuracy(dmodel, testloader)
# + colab={"base_uri": "https://localhost:8080/", "height": 208} colab_type="code" id="_DewOzTORmLz" outputId="2401ad7e-d7c5-44dc-cc75-a906179e46b1"
printaccuracy(model, testloader)
|
00_Deep_Learning_PyTorch/06_cnn/03_CIFAR10-CNN-dropout.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture 3.8: Learning Better Pt.2
#
# [**Lecture Slides**](https://docs.google.com/presentation/d/1Gb_jGPy0YIH8qDi2AgBQEwaoT8uPbyG_5N3dHPlthrM/edit?usp=sharing)
#
# This lecture, we are going to regularize a model to improve its generalization.
#
# **Learning goals:**
#
# - overfit a polynomial regression model
# - use ridge regularization to prevent overfitting
# - analyse the effect of the regularization weight on model parameter sizes
# - train a regularized logistic regression model with polynomial features
# ## 1. Introduction
#
# This is part 2 of "Learning Better". We'll be using the same dataset and trying to solve the same task as notebook 3.7. If you haven't been through notebook 3.7, please start there.
# ## 2. High Polynomial Regression
#
# Let's load our instagram planning dataset from csv into a `DataFrame`:
# +
import pandas as pd
df = pd.read_csv('instagram_planning.csv')
df.head()
# -
# To refresh our minds since last lecture, let's visualize the dataset again:
# +
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(df['planned_minutes'], df['actual_minutes'], alpha=0.6)
ax.set_xlabel('planned online time (min)')
ax.set_ylabel('time spent online (min) ')
ax.set_title('Online Procrastination');
# -
# Remember the single feature is the number of minutes we _plan_ to spend online, and the numerical label is the _actual_ amount of time we spend online. 📱
#
# In notebook 3.7, we had success adding polynomial features to our linear regression model. The resulting non-linear hypothesis was a better fit to the labels. We are therefore tempted to power up the model even more... let's turn it up all the way to 11! 🎸
#
# Let's create our feature matrix and label vector:
X = df['planned_minutes'].values.reshape(-1, 1)
y = df['actual_minutes'].values
# Then add the polynomial features of degree 11, and standardize them using sklearn proprocessors:
# +
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.linear_model import LinearRegression
poly = PolynomialFeatures(11, include_bias=False)
poly = poly.fit(X)
X_poly = poly.transform(X)
scaler = StandardScaler()
scaler = scaler.fit(X_poly)
X_poly_scaled = scaler.transform(X_poly)
# -
# Finally, we can train our superpowered polynomial regression:
reg = LinearRegression().fit(X_poly_scaled, y)
# Just like notebook 3.7, we can visualize the optimized hypothesis by using `.predict()`. Let's put this code in a function, so that we may reuse it later:
# +
import numpy as np
def plot_polynomial_regression(X, y, reg, poly, scaler):
fig = plt.figure()
ax = fig.add_subplot(111)
# plot the examples
ax.scatter(X, y, alpha=0.6)
# create feature matrix
xmin, xmax = ax.get_xlim()
x_line = np.linspace(xmin, xmax, 30).reshape(-1, 1)
# apply feature transformations
x_line_poly = poly.transform(x_line)
x_line_poly_scaled = scaler.transform(x_line_poly)
# predict
y_line = reg.predict(x_line_poly_scaled)
# plot the hypothesis
ax.plot(x_line, y_line, c='g', linewidth=3)
# formatting
ax.set_xlim(xmin, xmax)
ax.set_xlabel('planned online time (min)')
ax.set_ylabel('time spent online (min)')
ax.set_title('Online Procrastination');
plot_polynomial_regression(X, y, reg, poly, scaler)
# -
# ... what happened? we were expecting awesome modeling power, but this hypothesis looks all crooked! The value are shooting up for small values of `planned_minutes`, and dropping for large values of the feature, both of which make no sense. 😜
#
# This is a typical example of _overfitting_. The model is too powerful and tries to fit the noise rather than the underlying pattern.
#
# One way of preventing this from happening, is to chill out with the degree polynomial features and maybe keep it to 2 like last lecture. 💆♂️ However sometimes it is difficult to know which exact polynomial degree, or which model "power", to use. Therefore, a more robust way of preventing overfitting is _regularization_.
#
# ## 3. Ridge Regression
#
# Overfitting typically happens when some of the model parameters become very large. Large $\theta$ suggests "abrupt" variations in the hypothesis, and is a sign that the model is trying to fit noise.
#
# We can check the model parameters inside the fitted `LinearRegression` instance:
theta = np.append(reg.intercept_, reg.coef_)
theta
# Recall that we have added polynomial features of degree 11. As expected, $\theta$ is a vector of length 12: one $\theta_{i}$ per feature, plus one intercept term.
#
# Some of these $\theta$ values are _very_ large! We're seeing quite a few values in the tens of millions. There is no universal guideline about an "appropriate" range for $\theta$ values, but keep in mind that our features are standardized, and our labels are in the range of hundreds. Thus, there is no reason for $\theta_{i} > 1000000$. 🚓
#
# Ridge regularization penalizes large $\theta_{i}$ values by adding a regularization term to the cost function.
#
# $$
# J(\theta) = \frac{1}{m}\sum_{i=1}^{m}(h_{\theta}(x^{(i)} - y^{(i)})^{2} + \alpha \sum_{j=1}^{n}\theta_{j}^{2}
# $$
#
# Hopefully this can help keep our model parameters under control!
#
# sklearn directly integrates ridge regularization into the model api. Therefore all we have to do is switch the vanilla `LinearRegression` model with a [`Ridge`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html) instance:
# +
from sklearn.linear_model import Ridge
reg = Ridge(alpha=1, random_state=0).fit(X_poly_scaled, y)
# -
# It's that easy! sklearn takes care of choosing the correct regularized cost function, and optimizing it using gradient descent. If you need a refresher on what these steps are, check out lecture 3.5
#
# We can now check if our model parameters are better behaved:
theta = np.append(reg.intercept_, reg.coef_)
theta
# All $\theta_{i}$ are in the range of tens, so the ridge regularization worked! Our model is less likely to be overfit. We can verify this graphically:
plot_polynomial_regression(X, y, reg, poly, scaler)
# The hypothesis no longer seems to model noisy data points. Instead it was "smoothed" by our ridge regularization, and we can expect it to generalize better.
#
# ℹ️ Note how the hypothesis is very similar to the polynomial regression of degree 4 we have trained in the final task of last lecture. This agrees with the fact that model parameter values beyond $\theta_{4}$ are quite small compared to the rest. Small $\theta_{i}$ values mean that the features don't contribute much to the overall hypothesis. This is like saying that we gave extra "polynomial power" to the regression model, but the optimization procedure found this extra power unnecessary. 💁♂️This is an indication that polynomial features of degree 11 might have been overkill here!
# ## 4. Analysis
# We haven't talked about the `Ridge` argument: `alpha`. The regularization term added to the MSE cost function is multiplied by a _regularization weight_ , $\alpha$. This allows us to control the strength of the regularization penalty. Let's investigate what is the effect of $\alpha$ on our model parameter values $\theta$.
#
# We'll train 100 different `Ridge` regression models, each with a different value of $\alpha$. We can then plot the values of $\theta$ and observe their relationship:
#
# +
def train_ridge(alpha, X, y):
reg = Ridge(alpha=alpha).fit(X_poly_scaled, y)
return np.append(reg.intercept_, reg.coef_)
alphas = np.linspace(0.000001, 1, 100)
thetas = [train_ridge(alpha, X, y) for alpha in alphas]
thetas = np.array(thetas)
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(0, thetas.shape[1]):
theta = thetas[:, i]
ax.plot(alphas, theta)
ax.set_xlabel('α')
ax.set_ylabel('θ')
ax.set_title('Model Parameters vs Regularization Weight');
# -
# Each colored line is a parameter $\theta_{i}$. We can spot a reduction in the absolute value of the model parameters, but it happens all scrunched up for very small values of $\alpha$. Let's "zoom in" a little:
# +
alphas = np.linspace(0.000001, 0.0001, 100)
thetas = [train_ridge(alpha, X, y) for alpha in alphas]
thetas = np.array(thetas)
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(0, thetas.shape[1]):
theta = thetas[:, i]
ax.plot(alphas, theta)
ax.set_xlabel('α')
ax.set_ylabel('θ')
ax.set_title('Model Parameters vs Regularization Weight');
# -
# Here we can clearly see the effect of $\alpha$ on the model parameters. For big $\alpha$, the effect of the regularization is stronger, and the large values of $\theta$ are more penalized. This trend slows down and the $\theta$ values converge around $\alpha \approx 0.0001$. This means that for this regression model, there is no additional regularization provided by $\alpha > 0.0001$
#
# ℹ️ Notice that this $\alpha$ threshold of 0.0001 is much smaller than the default 1.0 used in the [section above](#3.-Ridge-Regression). If a regularized model is _underfit_ , it can be helpful to lower $\alpha$ in order to increase modeling power.
# ## 5. Ridge Classification
#
# The penalty term added to the cost function in ridge regularization isn't specific to the MSE cost function... it can also be applied to our logistic regression's cross-entropy cost function!
#
# You are going to learn how to apply ridge regularization in classification task _all on your own_. No need to worry, this is very similar to the sections above since sklearn has a model api class especially dedicated to ridge regularization for logistic regression: [`RidgeClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeClassifier.html).
#
# 💪💪💪 Train a ridge regularized polynomial logistic regression classifier. And then tell your friends, because that's a badass model name. 😎Here's a few steps to help out:
# - Load the `wine_quality.csv` dataset
# - use the `sulphates` and `alcohol` columns as features, and the `tasty` column as categorical label
# - store the feature matrix and label vector in two NumPy `ndarray`s, `X` and `y`
# - use a polynomial feature preprocessor named `poly` and a feature scaler called `scaler`
# - name your `RidgeClassifier` instance `clf` (for classifier)
# - use polynomial degree 5, and default regularization weight 1.0
#
# You are only expected to _fit_ your model. You can then run the two cells below to unit test your code, and visualize the non-linear decision boundary.
# +
import pandas as pd
import numpy as np
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
df = pd.read_csv('wine_quality.csv')
#Features and label
X = np.asarray(df[['sulphates', 'alcohol']])
y = np.asarray(df['tasty'])
#polynomial
poly = PolynomialFeatures(5, include_bias=False)
poly = poly.fit(X)
X_poly = poly.transform(X)
#Feature Scale
scaler = StandardScaler()
scaler = scaler.fit(X_poly)
X_poly_scaled = scaler.transform(X_poly)
#Ridge Classifier
clf = RidgeClassifier(alpha = 1.0).fit(X_poly_scaled,y)
# +
def plot_decision_boundary(ax, X, poly, scaler, clf):
h = .02 # step size in the mesh
# create a mesh to plot in
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, h),
np.arange(x2_min, x2_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
X_mesh = np.c_[xx1.ravel(), xx2.ravel()]
X_mesh_poly = poly.transform(X_mesh)
X_mesh_poly_scaled = scaler.transform(X_mesh_poly)
Z = clf.predict(X_mesh_poly_scaled)
# Put the result into a color plot
Z = Z.reshape(xx1.shape)
ax.contour(xx1, xx2, Z, cmap=plt.cm.Greens)
def plot_classification(X, y, poly, scaler, clf):
colors = sns.color_palette('husl').as_hex()
fig = plt.figure(dpi=120)
ax = fig.add_subplot(111)
# plot points
positives = X[y == 1]
negatives = X[y == 0]
ax.scatter(positives[:, 0], positives[:, 1], alpha=0.4, s=8,c=colors[0], label='tasty')
ax.scatter(negatives[:, 0], negatives[:, 1], alpha=0.4, s=8, c=colors[4], label='not tasty')
# plot decision boundary
plot_decision_boundary(ax, X, poly, scaler, clf)
# formatting
ax.set_ylim((-2, 4))
ax.set_xlim((-2, 4))
ax.set_xlabel('Sulphates (norm)')
ax.set_ylabel('Alcohol (norm)')
ax.legend()
ax.set_title('Wine Tastiness vs Sulphates & Alcohol Contents');
# +
import math
def test_classification():
X_predict = np.array([1, -1]).reshape(1, 2)
X_predict_poly = poly.transform(X_predict)
X_predict_poly_scaled = scaler.transform(X_predict_poly)
y_predict = clf.decision_function(X_predict_poly_scaled)
assert math.isclose(y_predict, -0.05693, rel_tol=1e-4)
print('Success! 🎉')
test_classification()
plot_classification(X, y, poly, scaler, clf)
# -
# Congrats on training your first model from scratch! 🎊
#
# 🧠 Can you tell which sides of the decision boundary are positive vs negative predictions?
#
# 🧠 Are there graphical signs that the regularization worked?
# ## 6. Summary
#
# Today, we learned about **regularization**. First we defined **overfitting**, and how avoiding it promises a better model **generalization**. We then explained how to avoid overfitting, by preventing model parameters values from getting too big. We understood that this can be implemented by adding an extra term to the model's **cost-function**. We also highlighted how a data scientist must find the right **balance** between underfit and overfit models. We tested the **ridge** regularization technique on a polynomial regression model of degree 11. We then analysed the influence of the **regularization weight**, $\alpha$, before implementing our own **regularized classifier**.
#
# # Resources
#
# ## Core Resources
#
# - [**Slides**](https://docs.google.com/presentation/d/1Gb_jGPy0YIH8qDi2AgBQEwaoT8uPbyG_5N3dHPlthrM/edit?usp=sharing)
# - [Regularization in ML](https://towardsdatascience.com/regularization-in-machine-learning-76441ddcf99a)
# Comprehensive list of cost-function regularization methods
#
# ## Additional Resources
#
# - [Visualizing Ridge Regression](https://xavierbourretsicotte.github.io/animation_ridge.html)
|
learning_better_pt.2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import pandas as pd
import json
import os
import matplotlib.pyplot as plt
import seaborn as sb
from steves_utils.summary_utils import (
get_experiments_from_path
)
from steves_utils.utils_v2 import (
get_experiments_base_path
)
# +
trials = get_experiments_from_path(
os.path.join(get_experiments_base_path(), "hyperparameters_1/seeds/oracle.run2")
)
all_trials = pd.DataFrame(columns=[
"source_val_label_accuracy",
"source_val_label_loss",
"target_val_label_accuracy",
"target_val_label_loss",
"total_epochs_trained",
"total_experiment_time_secs",
])
for trial in trials:
f = pd.DataFrame(trial["results"])
f = f[all_trials.columns]
f = f.iloc[0] # Unknown why, but pandas is repeating trials for each domain in the trial!
all_trials = all_trials.append(f)
all_trials = all_trials.reset_index(drop=True)
all_trials
# -
m = all_trials[["source_val_label_accuracy", "target_val_label_accuracy"]].mean()
m
s = all_trials[["source_val_label_accuracy", "target_val_label_accuracy"]].std()
s
ax = m.plot(kind = "bar", y = "mean", legend = False, title = "Average accuracy for varying seed", xerr = s)
ax.set_ylim(0,1)
ax = sb.boxplot(x=all_trials["source_val_label_accuracy"])
ax = sb.boxplot(x=all_trials["target_val_label_accuracy"])
# ### We conclude that varying the seed does not have a meaningful impact on accuracy
# +
trials = get_experiments_from_path(
os.path.join(get_experiments_base_path(), "hyperparameters_1/dataset_seeds/oracle.run2")
)
all_trials = pd.DataFrame(columns=[
"source_val_label_accuracy",
"source_val_label_loss",
"target_val_label_accuracy",
"target_val_label_loss",
"total_epochs_trained",
"total_experiment_time_secs",
])
for trial in trials:
f = pd.DataFrame(trial["results"])
f = f[all_trials.columns]
f = f.iloc[0] # Unknown why, but pandas is repeating trials for each domain in the trial!
all_trials = all_trials.append(f)
all_trials = all_trials.reset_index(drop=True)
all_trials
# -
ax = sb.boxplot(x=all_trials["source_val_label_accuracy"])
ax = sb.boxplot(x=all_trials["target_val_label_accuracy"])
# ### We conclude that varying the dataset_seed does not have a meaningful impact on accuracy
|
analysis/hyperparameters_1/summary.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting started
#
# The main thesis behind this published resource is that children have adverse events after drug exposure not just from the disease process but also from the biological processes from their growth and development. The National Institute of Child and Human Development defines 7 stages of child development for which we estimate signals: term neonate (0-27 days), infancy (28 days-1 years), toddler (1 year-2 years), early childhood (2-5 years), middle childhood (6-11 years), early adolescence 12-17 years, and late adolescence (18-21 years). The database contains tables of drug safety signals with drug, adverse event, and biological ontologies to facilitate teasing apart influences and contributions adverse drug events during childhood.
# ## An Example
#
# The package helps with identifying, retrieving, and loading the database.
#
# Retrieve the database:
# !curl -o ../data/effect_peds_19q2_v0.3_20211119.sqlite https://pds-database.s3.amazonaws.com/effect_peds_19q2_v0.3_20211119.sqlite
# Connect to the database:
import sqlite3
db_file="../data/effect_peds_19q2_v0.3_20211119.sqlite"
con = sqlite3.connect(db_file)
# Do something:
#https://www.sqlitetutorial.net/sqlite-python/sqlite-python-select/
def select_all_table(conn,table=""):
"""
Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
cur.execute("SELECT * FROM "+table)
names = list(map(lambda x: x[0], cur.description))
rows = cur.fetchall()
lst = []
lst.append(names)
for row in rows:
lst.append(row)
return lst
import pandas as pd
# +
tmp = select_all_table(con,table="ade")
ade_table = pd.DataFrame(tmp[1:],columns=tmp[0])
display(ade_table.head())
print(ade_table.shape)
tmp = select_all_table(con,table="ade_nichd")
ade_nichd_table = pd.DataFrame(tmp[1:],columns=tmp[0])
display(ade_nichd_table.head())
print(ade_nichd_table.shape)
# -
plot_dat = pd.merge(
ade_table.loc[:,['ade','cluster_id']],
ade_nichd_table.loc[:,['ade','nichd','norm','gam_score_90mse']],
on="ade")
plot_dat.head()
plot_dat.cluster_id = pd.Categorical(plot_dat.cluster_id,ordered=True)
import matplotlib.pyplot as plt
import seaborn as sns
# +
fig,ax = plt.subplots(dpi=300)
sns.lineplot(x='nichd',y='norm',
ci='sd',
hue='cluster_id',
data=plot_dat,
ax=ax
)
plt.legend(bbox_to_anchor=(1, 1),
loc='upper left',
title='Signal pattern',
title_fontsize=14,
labels=['Plateau','Increase','Inverse Plateau',"Decrease"],
ncol=1)
plt.xticks(rotation = 45)
plt.xlabel('')
plt.ylabel('Normalized drug-event signal')
# -
# Closing the database
con.close()
|
vignettes/Getting_Started.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Ruby 2.5.0
# language: ruby
# name: ruby
# ---
# +
require 'charty'
charty = Charty::Plotter.new(:matplot)
Charty::Matplot.activate_iruby_integration
# -
require 'daru'
df = Daru::DataFrame.new({'a':[1,2,3,4], 'b':[4,5,6,7], 'c':[8, 9, 10, 11]})
charty.table = df
bar = charty.to_bar(:price, :sales)
bar.render('sample')
boxplot = charty.to_boxplot(:price, :sales)
boxplot.render('sample')
bubble = charty.to_bubble(:price, :sales, :id)
bubble.render('sample')
curve = charty.to_curve(:price, :sales)
curve.render('sample')
scatter = charty.to_scatter(:price, :sales)
scatter.render('sample')
errorbar = charty.to_errorbar(:price, :sales)
errorbar.render('sample')
hst= charty.to_hst(:price, :sales)
hst.render('sample')
|
examples/daru.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DAT257x: Reinforcement Learning Explained
#
# ## Lab 5: Temporal Difference Learning
#
# ### Exercise 5.2: SARSA Agent
# +
import numpy as np
import sys
from collections import defaultdict
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.simple_rooms import SimpleRoomsEnv
from lib.envs.windy_gridworld import WindyGridworldEnv
from lib.envs.cliff_walking import CliffWalkingEnv
from lib.simulation import Experiment
# -
class Agent(object):
def __init__(self, actions):
self.actions = actions
self.num_actions = len(actions)
def act(self, state):
raise NotImplementedError
class SarsaAgent(Agent):
def __init__(self, actions, epsilon=0.01, alpha=0.5, gamma=1):
super(SarsaAgent, self).__init__(actions)
## Initialize empty dictionary here
self.Q = defaultdict(lambda: np.zeros(self.num_actions))
## In addition, initialize the value of epsilon, alpha and gamma
self.epsilon = epsilon
self.alpha = alpha
self.gamma = gamma
def stateToString(self, state):
mystring = ""
if np.isscalar(state):
mystring = str(state)
else:
for digit in state:
mystring += str(digit)
return mystring
def act(self, state):
stateStr = self.stateToString(state)
action = np.random.randint(0, self.num_actions)
## TODO 2
## Implement epsilon greedy policy here
A = np.ones(self.num_actions, dtype=float) * self.epsilon / self.num_actions
best_actions = np.argwhere(self.Q[stateStr] == np.amax(self.Q[stateStr])).flatten()
for i in best_actions:
A[i] += (1.0 - self.epsilon)/best_actions.size
action = np.random.choice(np.arange(self.num_actions), p = A)
#print(best_actions)
return action
def learn(self, state1, action1, reward, state2, action2):
state1Str = self.stateToString(state1)
state2Str = self.stateToString(state2)
## TODO 3
## Implement the sarsa update here
td_target = reward + self.gamma * self.Q[state2Str][action2]
td_delta = td_target - self.Q[state1Str][action1]
self.Q[state1Str][action1] += self.alpha * td_delta
#print(state1Str + ": " + str(self.Q[state1Str]))
"""
SARSA Update
Q(s,a) <- Q(s,a) + alpha * (reward + gamma * Q(s',a') - Q(s,a))
or
Q(s,a) <- Q(s,a) + alpha * (td_target - Q(s,a))
or
Q(s,a) <- Q(s,a) + alpha * td_delta
"""
interactive = True
# %matplotlib nbagg
env = SimpleRoomsEnv()
agent = SarsaAgent(range(env.action_space.n))
experiment = Experiment(env, agent)
experiment.run_sarsa(10, interactive)
interactive = False
# %matplotlib inline
env = SimpleRoomsEnv()
agent = SarsaAgent(range(env.action_space.n))
experiment = Experiment(env, agent)
experiment.run_sarsa(50, interactive)
interactive = True
# %matplotlib nbagg
env = CliffWalkingEnv()
agent = SarsaAgent(range(env.action_space.n))
experiment = Experiment(env, agent)
experiment.run_sarsa(10, interactive)
interactive = False
# %matplotlib inline
env = CliffWalkingEnv()
agent = SarsaAgent(range(env.action_space.n))
experiment = Experiment(env, agent)
experiment.run_sarsa(100, interactive)
interactive = False
# %matplotlib inline
env = WindyGridworldEnv()
agent = SarsaAgent(range(env.action_space.n))
experiment = Experiment(env, agent)
experiment.run_sarsa(50, interactive)
|
ReinforcementLearning/DAT257x/library/LabFiles/Module 5/Ex 5.2 SARSA Agent.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/MGokcayK/AdvancedProbabilityofDiscereSystems/blob/master/Homework1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="UHttCYIc7cf7" colab_type="text"
# # TR
# ##Ödev : Herhangi bir permutasyon problemi tanımlayın ve Python codu ile çözün.
#
# ### Soru :
# 3,4,6,8,9 rakamlarını kullanarak her rakamın birden fazla kez kullanmamak şartıyla oluşturacağımız rakamların kaç tanesi 1000'den büyüktür?
#
# ### Çözüm :
# Eğer 5 rakam arasından 3 ya da daha az sayıda rakam seçersek bu sayılar 1000'den küçük olacaktır. Dolayısıyla 5 rakam arasından en az 4 rakam seçmemiz gerekir.
#
# $$ P^{5}_{4} + P^{5}_{5} = \frac{5!}{(5-4)!} + \frac{5!}{(5-5)!} $$
# $$ = \frac{5!}{(1)!} + \frac{5!}{(0)!} $$
# $$ = 120 + 120 = 240 $$
#
#
# -------------------------------------------------------------------------------
# # EN
# ##Homework : Define any permutation problem and solve it with Python code.
#
# ### Question :
# How many numbers greater than 1000 can be formed with the digits 3,4,6,8,9 if a digit cannot occur more than once in a number?
#
# ### Solution :
#
# If we select 3 or less digits from 5 digits, it will be less than 1000. Therefore, we need to select at least 4 digits from 5 digits set. So;
#
# $$ P^{5}_{4} + P^{5}_{5} = \frac{5!}{(5-4)!} + \frac{5!}{(5-5)!} $$
# $$ = \frac{5!}{(1)!} + \frac{5!}{(0)!} $$
# $$ = 120 + 120 = 240 $$
# + id="Ele9cjlz9jS_" colab_type="code" outputId="fef29908-0391-4dc5-f6b9-25c8ef72abd4" colab={"base_uri": "https://localhost:8080/", "height": 1000}
from pylab import *
from itertools import permutations
def factorial(n):
'''
Factorial = n * (n-1) * (n-2) * ... * 3 * 2 * 1
TR; n sayısının faktöriyelini hesaplayan fonksiyon.
EN; Function that calculates factorial of n.
'''
f = 1
for i in range(n,0,-1):
f *= i
return f
def ordered_permutation(n, r):
'''
Permutation = n! / (n-r)!
TR; n nesnenin r nesle ile olan sıralı permutasyonu.
EN; n object's ordered permutation with r objects.
'''
return factorial(n) / factorial(n-r)
def show_digists(digits, r=None, show_elements_num = False):
'''
TR; r(seçilen nesne sayısı) sayısına göre rakamların permutasyonlarını gösteren fonksiyon.
EN; Function show permutation of digits with respect to r(taken object number).
'''
perm = permutations(digits, r) # TR; built-in fonksiyon, EN; built-in function.
perm = list(perm)
n = len(perm) # TR; Eleman sayısı, EN; Element number.
for i in perm:
print (i)
if show_elements_num:
print("TR; Eleman sayısı : {}, EN; Element number : {}".format(n,n))
if __name__ == '__main__':
P_5_4 = ordered_permutation(5,4) # P(5,4) TR; 5 in 4'lu permutasyonu. EN; Permutation 5 and 4
P_5_5 = ordered_permutation(5,5) # P(5,5) TR; 5 in 5'lu permutasyonu. EN; Permutation 5 and 5
sum = P_5_4 + P_5_5 # TR; Toplam, EN; Summation
print("TR; Toplam eleman sayısı.: {}, \nEN; Sum of element number. : {}".format(sum, sum))
digits = [3,4,6,8,9]
show_digists(digits, 4, True)
show_digists(digits, 5, True)
|
Homework1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# + [markdown] deletable=true editable=true
# # Analyze LDL by thresholding at 145mg/dL
# + deletable=true editable=true
using Distributed
addprocs(30)
nprocs()
# + deletable=true editable=true
using MendelIHT
using SnpArrays
using DataFrames
using Distributions
using DelimitedFiles
using BenchmarkTools
using Random
using LinearAlgebra
using GLM
using CSV
using Dates
# + [markdown] deletable=true editable=true
# # Import data
#
# # + ** For description of what each phenotype column means**, see here:
# https://www.ncbi.nlm.nih.gov/projects/gap/cgi-bin/dataset.cgi?study_id=phs000276.v2.p1&phv=129612&phd=&pha=2896&pht=2005&phvf=&phdf=&phaf=&phtf=&dssp=1&consent=&temp=1
#
# # + **We have already matched up phenotype with genotypes** in the files below
# + deletable=true editable=true
# import full genotype data
kevin_stampeed = SnpArray("../kevin_imputed.bed")
# import full phenotype data
phenotype_data = CSV.read("full_phenotype_sorted", delim=',', header=true)
# + [markdown] deletable=true editable=true
# # Filtered phenotype and genotype data for LDL (FS_KOL_L)
# + deletable=true editable=true
# exclude samples without HDL measurements
LDL = phenotype_data[:FS_KOL_L]
missing_LDL_data = LDL .== "X"
# exclude people that are fasting
fasting_data = phenotype_data[:FASTING_STATUS]
contains_nonfasting_blood = (fasting_data .== "1") .+ (fasting_data .== "X")
# exlucde people on diabetes medication
diabetes_med = phenotype_data[:a10atc]
contains_diabetes_medication = (diabetes_med .== "1") .+ (diabetes_med .== "X")
# exclude SNPs with maf < 0.01 and SNPs with HWE p-value < 0.00001
rowmask, snps_to_keep = SnpArrays.filter(kevin_stampeed, min_success_rate_per_row=1.0,
min_success_rate_per_col=1.0, min_maf=0.01, min_hwe_pval=1e-5)
# combine
samples_to_exclude = missing_LDL_data .+ contains_nonfasting_blood .+ contains_diabetes_medication .+ rowmask
samples_to_keep = samples_to_exclude .== 0
@show count(snps_to_keep)
@show count(samples_to_keep)
# + deletable=true editable=true
SnpArrays.filter("../kevin_imputed", samples_to_keep, snps_to_keep, des="kevin_imputed_filtered_LDL")
# + [markdown] deletable=true editable=true
# ## Compute top 2 principal components on resulting file using plink2
#
# The following command was executed:
# ```
# ./plink2 --bfile kevin_imputed_filtered_LDL --pca 2
# ```
# + [markdown] deletable=true editable=true
# # Begin analysis
#
# Here we truncate the LDL level at 145 mg/dL, which is the mid-range for [borderline-high LDL cholesterol category](https://medlineplus.gov/ldlthebadcholesterol.html).
# + deletable=true editable=true
# first check genotype and phenotype files actually match
genotype_order = CSV.read("kevin_imputed_filtered_LDL.fam", delim=' ', header=false)[:, 1]
phenotype_order = phenotype_data[samples_to_keep, 2]
all(phenotype_order .== genotype_order)
# + deletable=true editable=true
cutoff = 145
molecular_weight = 386.654 #g/mol
# convert LDL from mmol/L to mg/dL
LDL_converted = parse.(Float64, LDL[samples_to_keep]) .* molecular_weight ./ 10
# truncate
LDL_truncated = LDL_converted .>= cutoff
y = convert(Vector{Float64}, LDL_truncated)
@show count(!iszero, y)
# check truncation
[y LDL_converted]
# + deletable=true editable=true
x = SnpArray("kevin_imputed_filtered_LDL.bed")
# + [markdown] deletable=true editable=true
# # add non-genetic covariates
# + deletable=true editable=true
n, p = size(x)
z = zeros(n, 4)
# add intercept
z[:, 1] .= ones(n)
# add sexOCPG
for i in 1:n
if phenotype_data[:SEX][i] == 1 #males
z[i, 2] = 0.0
else
# oral contraceptive: 0 = no, 1 = yes, X = unknown
# pregnancy: 1 = yes, 2 = no, 3 and X = unknown
my_covariates = (phenotype_data[:ZP4202U][i], phenotype_data[:ZT20][i])
my_covariates == ("0", "1") && (z[i, 2] = 1)
my_covariates == ("0", "2") && (z[i, 2] = 2)
my_covariates == ("0", "3") && (z[i, 2] = 3)
my_covariates == ("0", "X") && (z[i, 2] = 3)
my_covariates == ("1", "1") && (z[i, 2] = 4)
my_covariates == ("1", "2") && (z[i, 2] = 5)
my_covariates == ("1", "3") && (z[i, 2] = 6)
my_covariates == ("1", "X") && (z[i, 2] = 6)
my_covariates == ("X", "1") && (z[i, 2] = 7)
my_covariates == ("X", "2") && (z[i, 2] = 8)
my_covariates == ("X", "3") && (z[i, 2] = 9)
my_covariates == ("X", "X") && (z[i, 2] = 9)
end
end
# add first 2 principal components
pc = CSV.read("kevin_imputed_filtered_LDL.eigenvec", delim="\t", header=true)
pc1 = pc[:, 3]
pc2 = pc[:, 4]
# standardize all covariates
mean_sexOCPG = mean(z[:, 2])
mean_pc1 = mean(pc1)
mean_pc2 = mean(pc2)
std_pc1 = std(pc1)
std_pc2 = std(pc2)
std_sexOCPG = std(z[:, 2])
z[:, 2] .= (z[:, 2] .- mean_sexOCPG) ./ std_sexOCPG
z[:, 3] .= (pc1 .- mean_pc1) ./ std_pc1
z[:, 4] .= (pc2 .- mean_pc2) ./ std_pc2
# + [markdown] deletable=true editable=true
# # Run Cross validation
# + deletable=true editable=true
#set seed
Random.seed!(2019)
d = Bernoulli
l = canonicallink(d())
path = collect(1:20)
num_folds = 5
folds = rand(1:num_folds, size(x, 1))
# + deletable=true editable=true
println("start time = " * string(Dates.format(now(), "HH:MM")))
mses = cv_iht(d(), l, x, z, y, 1, path, folds, num_folds, debias=false, parallel=true)
println("end time = " * string(Dates.format(now(), "HH:MM")))
# + [markdown] deletable=true editable=true
# # Run with full dataset on best $k$
# + deletable=true editable=true
xbm = SnpBitMatrix{Float64}(x, model=ADDITIVE_MODEL, center=true, scale=true);
@show k = argmin(mses)
d = Normal
l = canonicallink(d())
result = L0_reg(x, xbm, z, y, 1, k, d(), l, debias=true)
# + deletable=true editable=true
estimated_b = result.beta
position = findall(!iszero, estimated_b)
found_snps = CSV.read("kevin_imputed_filtered_LDL.bim", delim='\t', header=false)[position, :]
# + deletable=true editable=true
xbm = SnpBitMatrix{Float64}(x, model=ADDITIVE_MODEL, center=true, scale=true);
@show k = argmin(mses)
d = Normal
l = canonicallink(d())
result = L0_reg(x, xbm, z, y, 1, k, d(), l, debias=false)
# + deletable=true editable=true
estimated_b = result.beta
position = findall(!iszero, estimated_b)
found_snps = CSV.read("kevin_imputed_filtered_LDL.bim", delim='\t', header=false)[position, :]
# + deletable=true editable=true
|
figures/stampeed/LDL/LDL_thresholded_145.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.6 64-bit (conda)
# name: python_defaultSpec_1598588341112
# ---
# # Python Programming Tutorials (Computer Science)
#
# The 🦉 [Socratica](https://www.youtube.com/channel/UCW6TXMZ5Pq6yL6_k5NZ2e0Q) YouTube Channel has a 33-video [playlist](https://www.youtube.com/playlist?list=PLi01XoE8jYohWFPpC17Z-wWhPOSuh8Er-) devoted to the introduction of Python.
#
# ## #5 Numbers in Python Version 3
# %run video-00.py
# +
from IPython import display
video = display.YouTubeVideo('_87ASgggEg0')
video
display.HTML(f'<a href="{video.src}">link</a>')
# -
# For Python version 3.x we have:
#
# - integers
# - floats
# - complex numbers
a = 28 # The integer literal will allow Python to infer the type.
type(a)
# To assign a float, write a number with a decimal point:
e = 2.718281828
type(e)
# To assign a complex number, add an integer to a number with a decimal point, followed by a _j_:
z = 3 + 5.7j
type(z)
# note that in Python:
#
# $$j = \sqrt{-1}$$
# Decompose this imaginary number with the `real` and `imag` properties of the complex type:
z.real
z.imag
|
socratica/video-05.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scipy.stats import uniform, expon, norm
import numpy as np
# Wahrscheinlichkeitsdichtefunktion für Uniform [1, 10] an der Stelle 5
# print(uniform.pdf(x=5, loc=1, scale=9))
# Dieser Wert ist keine Wahrscheinlichkeit sondern nur der Wert der Wahrscheinlichkeitsdichtefunktion. Die Dichte der Wahrscheinlichkeiten hat diesen Wert
[print("{}\that Wert {}".format(x, uniform.pdf(x=x, loc=1, scale=9))) for x in range(0, 12)]
# Das ist die funktion für das Bestimmte Integral --> CDF = Cumulative density function Wahrscheinlichkeitsdichtefunktion
[print("{}\that Wert {}".format(x, uniform.cdf(x=x, loc=1, scale=9))) for x in range(0, 12)]
# Berechnung der Wahrscheinlichkeitsdichte
uniform.cdf(x=4.8, loc=1, scale=9) - uniform.cdf(x=1.2, loc=1, scale=9)
# Wichtig ist auch die generierung von Zufallsvariablen
uniform.rvs(loc=1, scale=9, size=5)
# Gibt 5 Zufallsvariablen im gegenen Range
y= 1
[(y* np.e**(-y*x)) for x in range(0, 12)]
expon.cdf(x=4, scale=1/3)
# Wahrscheinlichkeit, dass jemand einen IQ von > 130 hat
1 - norm.cdf(x=130, loc=100, scale=15)
# loc = mittelpunkt; scale = quardatwurzel von standardabweichung
# welchen IQ haben 95% von allen Menschen?
# mit ppf werden die quantile berechnet
"95% der Menschen haben einen IQ von {} bis {}".format(norm.ppf(q=0.025, loc=100, scale=15),norm.ppf(q=0.975, loc=100, scale=15))
# Der Anhang ppf(...) zum Befehl norm ist die Abkürzung für probability
# point function. Diese Funktion ist die Umkehrung der cdf()-Funktion. Sie
# bestimmt aus einem Wert eine Wahrscheinlichkeit.
norm.cdf(x=115, loc=100, scale=15) - norm.cdf(x=85, loc=100, scale=15)
# D.h., etwa 2/3 der Bevölkerung haben einen IQ zwischen 85 und 115.
# hier kommt die Standardnormalverteilung --> µ = 0 und σ^2 = 1
norm.cdf(x=1.13)
#Für welchen Wert von z ist Φ(z) = 0.7910? (zu R)
#Anders gefragt: Was ist Φ−1(0.7910)?
norm.ppf(q=0.7910)
# Was ist P(Z ≤ −0.2)?
# Weil die Standardnormalverteilung um null herum symmetrisch ist, ist die Fläche links von −0.2 wegen der Symmetrie genau so gross wie die Fläche rechts von 0.2. D.h.:
# P(Z ≤ −0.2) = P(Z ≥ 0.2) = 1 − P(Z ≤ 0.2)
# Dies überprüfen wir leicht mit Python (zu R)
print(norm.cdf(x=-0.2))
print(1 - norm.cdf(x=0.2))
|
Semester/SW03/.ipynb_checkpoints/Skript-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dynamic vs Static Typing
#
# Static typing languages do no allow variables to change the type of value being assigned
#
# - c
# - c++
# - java
#
# Dynamic typing languages can be assigned any type of value
#
# - python
# # type function
#
# - a built-in function to determine the type of the object referenced by a variable
a = "hello"
type(a)
a = 10
type(a)
a = lambda x: x**2
type(a)
a = 3 + 4j
type(a)
|
python-deepdive/deepdive1/section03/section_03_18.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Begining of Life
#
# The purpose of this notebook is to establish basic beginning of life parameters of this concept including $k_{eff}$ and neutron energy spectrum.
# +
import openmc
import openmc.deplete
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
# +
fuel = openmc.Material(name="uo2")
fuel.add_element("U", 1, percent_type="ao", enrichment=15.5)
fuel.add_element("O", 2)
fuel.set_density("g/cc", 10.5)
clad = openmc.Material(name='clad');
clad.add_element("Zr",1);
clad.set_density('g/cc',6.0);
water = openmc.Material(name='water');
water.add_element('O',1);
water.add_element('H',2)
water.set_density('g/cc',0.712); # high temperature density
water.add_s_alpha_beta('c_H_in_H2O');
materials = openmc.Materials([fuel,clad,water]);
# -
# ## Pin Cell Geometry
#
# The goal is to establish a pin-cell to provide a simplified representation of the basic criticality and (especially) neutron energy spectrum for this concept.
# +
h_core = 300.;
h_fuel = 200.;
r_fuel = 0.42;
r_pin = 0.45;
P_D = 1.15;
pitch = P_D*(2*r_pin);
fuel_temp = 900; # K, representative fuel temperature
mod_temp = 600; # K, representative moderator temperature
# fuel cylinder:
fuel_cyl = openmc.model.RightCircularCylinder([0.,0.,-h_fuel/2.],
h_fuel, r_fuel);
fuel.volume = np.pi*(r_fuel**2)*h_fuel;
# pin cylinder
pin_cyl = openmc.model.RightCircularCylinder([0.,0.,-(h_fuel+(r_pin-r_fuel))/2.],
h_fuel+(r_pin-r_fuel)*2.,r_pin);
# pin cell container
core_cell = openmc.model.RectangularParallelepiped(-pitch/2.,pitch/2.,
-pitch/2.,pitch/2.,
-h_core/2.,h_core/2.,
boundary_type='reflective');
fuel_cell = openmc.Cell();
fuel_cell.region = -fuel_cyl
fuel_cell.fill = fuel;
fuel_cell.temperature = fuel_temp;
clad_cell = openmc.Cell();
clad_cell.region = +fuel_cyl & -pin_cyl;
clad_cell.fill = clad;
mod_cell = openmc.Cell();
mod_cell.region = +pin_cyl & -core_cell;
mod_cell.fill = water
root_univ = openmc.Universe();
root_univ.add_cells([fuel_cell,clad_cell,mod_cell]);
geometry = openmc.Geometry();
geometry.root_universe = root_univ;
materials.export_to_xml();
geometry.export_to_xml();
# +
settings = openmc.Settings();
settings.run_mode = 'eigenvalue';
settings.particles = 20000;
settings.batches = 200;
settings.inactive = 50
box = openmc.stats.Box(lower_left = (-r_fuel,-r_fuel,-h_fuel/2.),
upper_right = (r_fuel,r_fuel,h_fuel/2.),
only_fissionable=True);
src = openmc.Source(space=box);
settings.source = src;
settings.temperature['method']='interpolation';
settings.export_to_xml();
# -
root_univ.plot(width=(pitch,pitch));
# +
# get flux energy spectrum in the fuel
# filter for the fuel cell
cell_filter = openmc.CellFilter(fuel_cell)
# set up energy bins
N = 1001
energy_bins = np.logspace(-3,7,num=N)
energy_filter = openmc.EnergyFilter(values=energy_bins)
# establish flux tally
flux = openmc.Tally(name='flux')
flux.scores = ['flux']
flux.filters = [cell_filter,energy_filter]
tallies = openmc.Tallies([flux])
tallies.export_to_xml()
# +
operator = openmc.deplete.Operator(geometry,settings,"chain_endfb71_pwr.xml")
power = 1e4;
days = 24*3600;
time_steps = [0.1*days, 0.1*days,0.3*days,0.5*days,1.*days,30.*days,30.*days,100.*days,365.*days,365.*days,365.*days,730.*days,730.*days]
integrator = openmc.deplete.PredictorIntegrator(operator,time_steps,power=power);
integrator.integrate()
# -
results = openmc.deplete.ResultsList.from_hdf5('./depletion_results.h5')
# +
time,k = results.get_eigenvalue()
time /= (24*60*60)
plt.errorbar(time,k[:,0],yerr=k[:,1])
plt.title('Burnup Result for LEU Pincell')
plt.xlabel('Time [d]')
plt.ylabel('%k_{eff} \pm \sigma$')
plt.grid()
|
examples/micro_reactor/haleu_pwr/burnup/.ipynb_checkpoints/haleu_burnup-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Tracking multi faces
import cv2
import dlib
import face_recognition
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
def load_data(path):
face_encodings_in_room = []
face_names_in_room = []
known_face_encodings_array = np.load(path + "known_face_encoding.npy")
known_face_names = np.load(path + "known_face_names.npy")
# Convert nparray -> list to face encoding
len_face = len(known_face_names)
known_face_encodings_array = known_face_encodings_array.reshape(len_face, 128)
known_face_encodings_list = []
for i in range(len_face):
known_face_encodings_list.append(known_face_encodings_array[i])
return known_face_encodings_list, known_face_names
from threading import Thread
import cv2
import imutils
class WebcamVideoStream:
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
self.stream.set(3, 640)
self.stream.set(4, 480)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
# Start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
return self.frame
def stop(self):
self.stopped = True
known_face_ecodings_list, known_face_names = load_data("../data/numpy/")
tracker = cv2.MultiTracker_create()
def computer_area(temp_box, list_box):
dx = min(a.xmax,)
# +
# video_capture = WebcamVideoStream(src=0).start()
# number_frame = 0
# font = cv2.FONT_HERSHEY_SIMPLEX
# tracker = cv2.MultiTracker_create()
# init_once = False
# name_boxes = []
# boxes_known = []
# name_known = []
# while True:
# frame = video_capture.read()
# # rgb_frame = frame[:, :, ::-1]
# frame_process = frame
# if (number_frame % 20 == 0):
# # Delete tracker and initi tracker
# face_locations = face_recognition.face_locations(frame)
# face_encodings = face_recognition.face_encodings(frame, face_locations)
# if init_once == True:
# tracker.clear()
# tracker = cv2.MultiTracker_create()
# # Create tracker with name known from before frame, and init name_boxes with people known
# #
# name_boxes_temp = [] # name of people
# boxes_known = [] # box of people known
# for newbow, name in zip(boxes, name_boxes):
# if name != "Unknown":
# ok = tracker.add(cv2.TrackerKCF_create(), frame, (newbow[0], newbow[1] , newbow[2], newbow[3]))
# name_boxes_temp.append(name)
# boxes_known.append(newbow)
# print(name_boxes_temp)
# # With people known, continue face recognition in next frame
# # And only change name unknown if (area between unknown and predict > 90 %)
# name_boxes = name_boxes_temp
# for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# if len(known_face_ecodings_list) > 0:
# distance = face_recognition.face_distance(known_face_ecodings_list, face_encoding)
# point = np.min(distance)
# index_point_min = np.argmin(distance)
# # print(top, right, bottom, left)
# # left top,
# # ok = tracker.add(cv2.TrackerMIL_create(), frame, (139, 510, 325, 324))
# temp_box = (top, left, bottom, right)
# if point > 0.4:
# name = "Unknown"
# ok = tracker.add(cv2.TrackerKCF_create(), frame, (left, top, (right-left), (bottom-top)))
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 127, 0), 3)
# cv2.putText(frame, name, (left + 6, top - 6), font, 1.0, (0, 0, 255), 1)
# name_boxes.append(name)
# elif point <= 0.4:
# name = known_face_names[index_point_min]
# print(name)
# if name not in name_boxes:
# ok = tracker.add(cv2.TrackerKCF_create(), frame, (left, top, (right-left), (bottom-top)))
# cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 3)
# cv2.putText(frame, name, (left + 6, top - 6), font, 1.0, (0, 0, 255), 1)
# # boxes_known.append((left, top, (right-left), ()))
# name_boxes.append(name)
# else:
# ok, boxes = tracker.update(frame)
# print(tracker)
# print(boxes)
# init_once = True
# for newbox, name in zip(boxes, name_boxes):
# p1 = (int(newbox[0]), int(newbox[1]))
# p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
# cv2.rectangle(frame, p1, p2, (0, 0, 255))
# cv2.putText(frame, name, p1, font, 1.0, (0, 0, 255), 1)
# cv2.imshow("frame", frame)
# number_frame += 1
# # Press Q on keyboard to stop recording
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# -
video_capture = WebcamVideoStream(src=0).start()
|
src/Webcam_Face_Recognition_Tracking_Reload.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Encodings
#
# Encodings are a set of rules mapping string characters to their binary representations. Python supports dozens of different encoding as seen here in [this link](https://docs.python.org/3/library/codecs.html#standard-encodings). Because the web was originally in English, the first encoding rules mapped binary code to the English alphabet.
#
# The English alphabet has only 26 letters. But other languages have many more characters including accents, tildes and umlauts. As time went on, more encodings were invented to deal with languages other than English. The utf-8 standard tries to provide a single encoding schema that can encompass all text.
#
# The problem is that it's difficult to know what encoding rules were used to make a file unless somebody tells you. The most common encoding by far is utf-8. Pandas will assume that files are utf-8 when you read them in or write them out.
#
# Run the code cell below to read in the population data set.
import pandas as pd
df = pd.read_csv('../data/population_data.csv', skiprows=4)
# Pandas should have been able to read in this data set without any issues. Next, run the code cell below to read in the 'mystery.csv' file.
df.head()
import pandas as pd
df = pd.read_csv('mystery.csv')
# You should have gotten an error: **UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte**. This means pandas assumed the file had a utf-8 encoding but had trouble reading in the data file.
#
# Your job in the next cell is to figure out the encoding for the mystery.csv file.
# +
# TODO: Figure out what the encoding is of the myster.csv file
# HINT: pd.read_csv('mystery.csv', encoding=?) where ? is the string for an encoding like 'ascii'
# HINT: This link has a list of encodings that Python recognizes https://docs.python.org/3/library/codecs.html#standard-encodings
# Python has a file containing a dictionary of encoding names and associated aliases
# This line imports the dictionary and then creates a set of all available encodings
# You can use this set of encodings to search for the correct encoding
# If you'd like to see what this file looks like, execute the following Python code to see where the file is located
# from encodings import aliases
# aliases.__file__
from encodings.aliases import aliases
alias_values = set(aliases.values())
# TODO: iterate through the alias_values list trying out the different encodings to see which one or ones work
# HINT: Use a try - except statement. Otherwise your code will produce an error when reading in the csv file
# with the wrong encoding.
# HINT: In the try statement, print out the encoding name so that you know which one(s) worked.
for i in alias_values:
try:
test = pd.read_csv('mystery.csv', encoding=i)
print(i)
except:
print('UnicodeDecodeError')
# -
# # Conclusion
#
# There are dozens of encodings that Python can handle; however, Pandas assumes a utf-8 encoding. This makes sense since utf-8 is very common. However, you will sometimes come across files with other encodings. If you don't know the encoding, you have to search for it.
#
# Note, as always, there is a solution file for this exercise. Go to File->Open.
#
# There is a Python library that can be of some help when you don't know an encoding: chardet. Run the code cells below to see how it works.
#
# install the chardet library
# !pip install chardet
# +
# import the chardet library
import chardet
# use the detect method to find the encoding
# 'rb' means read in the file as binary
with open("mystery.csv", 'rb') as file:
print(chardet.detect(file.read()))
|
Data_Engineering/ETL Pipelines/9_encodings_exercise/9_encodings_exercise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="lh6u3ytiPtZP" outputId="4301863a-f912-4cf2-9bef-dd2c2172d620" colab={"base_uri": "https://localhost:8080/"}
# !pip install -U git+https://github.com/d2l-ai/d2l-en.git@master
# + id="DcWTJw9CPKm5"
from d2l import torch as d2l
import torch
from torch import nn
# + id="2twJjMR3PeFW"
class Seq2SeqAttentionDecoder(d2l.Decoder):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
dropout=0, **kwargs):
super(Seq2SeqAttentionDecoder, self).__init__(**kwargs)
self.attention_cell = d2l.MLPAttention(
num_hiddens, num_hiddens, num_hiddens, dropout)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = nn.GRU(
embed_size + num_hiddens, num_hiddens, num_layers,
dropout=dropout)
self.dense = nn.Linear(num_hiddens, vocab_size)
def init_state(self, enc_outputs, enc_valid_len, *args):
outputs, hidden_state = enc_outputs
# Transpose outputs to (batch_size, seq_len, num_hiddens)
return (outputs.permute(1, 0, 2), hidden_state, enc_valid_len)
def forward(self, X, state):
enc_outputs, hidden_state, enc_valid_len = state
X = self.embedding(X).permute(1, 0, 2)
outputs = []
for x in X:
# query shape: (batch_size, 1, num_hiddens)
query = torch.unsqueeze(hidden_state[-1], dim=1)
# context has same shape as query
context = self.attention_cell(
query, enc_outputs, enc_outputs, enc_valid_len)
# Concatenate on the feature dimension
x = torch.cat((context, torch.unsqueeze(x, dim=1)), dim=-1)
# Reshape x to (1, batch_size, embed_size + num_hiddens)
out, hidden_state = self.rnn(x.permute(1, 0, 2), hidden_state)
outputs.append(out)
outputs = self.dense(torch.cat(outputs, dim=0))
return outputs.permute(1, 0, 2), [enc_outputs, hidden_state,
enc_valid_len]
# + id="nmGBLUoEPfdd" outputId="9f9ac8af-04a6-4152-e290-30a6aeae26f7" colab={"base_uri": "https://localhost:8080/"}
encoder = d2l.Seq2SeqEncoder(vocab_size=10, embed_size=8,
num_hiddens=16, num_layers=2)
encoder.eval()
decoder = Seq2SeqAttentionDecoder(vocab_size=10, embed_size=8,
num_hiddens=16, num_layers=2)
decoder.eval()
X = torch.zeros((4, 7), dtype=torch.long)
state = decoder.init_state(encoder(X), None)
out, state = decoder(X, state)
out.shape, len(state), state[0].shape, len(state[1]), state[1][0].shape
# + id="PGVf59y9PhBw" outputId="004ab030-b7b7-4d85-8819-8ed2c4ccac03" colab={"base_uri": "https://localhost:8080/", "height": 279}
embed_size, num_hiddens, num_layers, dropout = 32, 32, 2, 0.1
batch_size, num_steps = 64, 10
lr, num_epochs, device = 0.005, 250, d2l.try_gpu()
train_iter, src_vocab, tgt_vocab = d2l.load_data_nmt(batch_size, num_steps)
encoder = d2l.Seq2SeqEncoder(
len(src_vocab), embed_size, num_hiddens, num_layers, dropout)
decoder = Seq2SeqAttentionDecoder(
len(tgt_vocab), embed_size, num_hiddens, num_layers, dropout)
model = d2l.EncoderDecoder(encoder, decoder)
d2l.train_s2s_ch9(model, train_iter, lr, num_epochs, tgt_vocab, device)
# + id="Fmp-KUjkPje4" outputId="0f70c03d-d78f-4a49-e694-cf3d3778c8a7" colab={"base_uri": "https://localhost:8080/"}
engs = ['go .', "i lost .", 'i\'m home .', 'he\'s calm .']
fras = ['va !', 'j\'ai perdu .', 'je suis chez moi .', 'il est calme .']
d2l.translate(engs, fras, model, src_vocab, tgt_vocab, num_steps, device)
|
Ch10_Attention_Mechanisms/seq2seq_attention.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1> Time series prediction, end-to-end </h1>
#
# This notebook illustrates several models to find the next value of a time-series:
# <ol>
# <li> Linear
# <li> DNN
# <li> CNN
# <li> RNN
# </ol>
PROJECT = "qwiklabs-gcp-636667ae83e902b6" # Replace with your PROJECT
BUCKET = "qwiklabs-gcp-636667ae83e902b6_al" # Replace with your BUCKET
REGION = "us-east1" # Choose an available region for AI Platform
TFVERSION = "1.13" # TF version for AI Platform
SEQ_LEN = 50
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['SEQ_LEN'] = str(SEQ_LEN)
os.environ['TFVERSION'] = '1.13'
# <h3> Simulate some time-series data </h3>
#
# Essentially a set of sinusoids with random amplitudes and frequencies.
import tensorflow as tf
print(tf.__version__)
# +
import numpy as np
import seaborn as sns
def create_time_series():
freq = (np.random.random()*0.5) + 0.1 # 0.1 to 0.6
ampl = np.random.random() + 0.5 # 0.5 to 1.5
noise = [np.random.random()*0.3 for i in range(SEQ_LEN)] # -0.3 to +0.3 uniformly distributed
x = np.sin(np.arange(0,SEQ_LEN) * freq) * ampl + noise
return x
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
for i in range(0, 5):
sns.tsplot( create_time_series(), color=flatui[i%len(flatui)] ); # 5 series
# +
def to_csv(filename, N):
with open(filename, 'w') as ofp:
for lineno in range(0, N):
seq = create_time_series()
line = ",".join(map(str, seq))
ofp.write(line + '\n')
import os
try:
os.makedirs('data/sines/')
except OSError:
pass
to_csv('data/sines/train-1.csv', 1000) # 1000 sequences
to_csv('data/sines/valid-1.csv', 250)
# -
# !head -5 data/sines/*-1.csv
# <h3> Train model locally </h3>
#
# Make sure the code works as intended.
#
# The `model.py` and `task.py` containing the model code is in <a href="sinemodel">sinemodel/</a>
#
# **Complete the TODOs in `model.py` before proceeding!**
#
# Once you've completed the TODOs, set `--model` below to the appropriate model (linear,dnn,cnn,rnn,rnn2 or rnnN) and run it locally for a few steps to test the code.
# + language="bash"
# DATADIR=$(pwd)/data/sines
# OUTDIR=$(pwd)/trained/sines
# rm -rf $OUTDIR
# gcloud ai-platform local train \
# --module-name=sinemodel.task \
# --package-path=${PWD}/sinemodel \
# -- \
# --train_data_path="${DATADIR}/train-1.csv" \
# --eval_data_path="${DATADIR}/valid-1.csv" \
# --output_dir=${OUTDIR} \
# --model=rnn --train_steps=1000 --sequence_length=$SEQ_LEN
# -
# Ten steps:
# * Linear: RMSE = 0.86
# * DNN: RMSE = 0.44
# * RNN: RMSE = 0.46
#
# 1000 steps:
# * Linear:
# * DNN:
# * RNN:
#
#
# <h3> Cloud ML Engine </h3>
#
# Now to train on Cloud ML Engine with more data.
import shutil
shutil.rmtree('data/sines', ignore_errors=True)
os.makedirs('data/sines/')
for i in range(0,10):
to_csv('data/sines/train-{}.csv'.format(i), 1000) # 1000 sequences
to_csv('data/sines/valid-{}.csv'.format(i), 250)
# + language="bash"
# gsutil -m rm -rf gs://${BUCKET}/sines/*
# gsutil -m cp data/sines/*.csv gs://${BUCKET}/sines
# + language="bash"
# for MODEL in linear dnn cnn rnn rnn2 rnnN; do
# OUTDIR=gs://${BUCKET}/sinewaves/${MODEL}
# JOBNAME=sines_${MODEL}_$(date -u +%y%m%d_%H%M%S)
# gsutil -m rm -rf $OUTDIR
# gcloud ml-engine jobs submit training $JOBNAME \
# --region=$REGION \
# --module-name=sinemodel.task \
# --package-path=${PWD}/sinemodel \
# --job-dir=$OUTDIR \
# --staging-bucket=gs://$BUCKET \
# --scale-tier=BASIC_GPU \
# --runtime-version=$TFVERSION \
# -- \
# --train_data_path="gs://${BUCKET}/sines/train*.csv" \
# --eval_data_path="gs://${BUCKET}/sines/valid*.csv" \
# --output_dir=$OUTDIR \
# --train_steps=3000 --sequence_length=$SEQ_LEN --model=$MODEL
# done
# -
# ## Monitor training with TensorBoard
#
# Use this cell to launch tensorboard. If tensorboard appears blank try refreshing after 5 minutes
from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/sinewaves'.format(BUCKET))
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print('Stopped TensorBoard with pid {}'.format(pid))
# ## Results
#
# Complete the below table with your own results! Then compare your results to the results in the solution notebook.
#
# | Model | Sequence length | # of steps | Minutes | RMSE |
# | --- | ----| --- | --- | --- |
# | linear | 50 | 3000 | - | - |
# | dnn | 50 | 3000 | - | - |
# | cnn | 50 | 3000 | - | - |
# | rnn | 50 | 3000 | - | - |
# | rnn2 | 50 | 3000 | - | - |
# | rnnN | 50 | 3000 | - | - |
# Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
courses/machine_learning/deepdive/09_sequence/labs/sinewaves.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Begee Database
# +
# The usual preamble
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# Make the graphs a bit prettier, and bigger
pd.set_option('display.mpl_style', 'default')
import numpy as np
import IPython
import seaborn
import os
# -
def groupby_counts(df, columns):
"""Group datagrame by columns and return the number of rows for each grouping."""
grouped = df.groupby(columns)
df = grouped[columns[0]].agg([len])
df = df.reset_index()
df = df.sort('len', ascending=False)
return df
def GetFiles(location):
# Change to directory
os.chdir(location)
# List to hold file names
FileNames = []
# Find any file that ends with ".tsv"
for files in os.listdir("."):
if files.endswith(".tsv"):
FileNames.append(files)
# Create a list of dataframes
df_list = []
for fname in FileNames:
loc_file = location + fname
df = pd.read_table(loc_file)
# Tag record to file name
df['File'] = fname
# Extract species name
df['SP'] = df.File.str.extract('(^[A-Za-z]+_[A-Za-z]+)')
df_list += [df]
df = pd.concat(df_list)
return df
# ## Header of present/absent file
# Your path will be different, please modify the path below.
location = r'/Users/csanchez/Dropbox/Proyectos_Activos/bgee/jupyter/Complete/'
present_df = GetFiles(location)
present_df.head()
# ## Header of over/under expression across anatomy files
# Your path will be different, please modify the path below.
location = r'/Users/csanchez/Dropbox/Proyectos_Activos/bgee/jupyter/Anatomy/'
anatomy_df = GetFiles(location)
anatomy_df.head()
# ## Header of over/under expression across life stages files
# Your path will be different, please modify the path below.
location = r'/Users/csanchez/Dropbox/Proyectos_Activos/bgee/jupyter/Life/'
life_df = GetFiles(location)
life_df.head()
# ## Total Number of genes
# get the total number of unique genes in bgee database
present_genes_id = present_df['Gene ID'].unique()
anatomy_genes_id = anatomy_df['Gene ID'].unique()
life_genes_id = life_df['Gene ID'].unique()
genes_id_df = pd.DataFrame({'files_group' : pd.Series(['Presence/Absence of expression', 'Over/Under-expression across anatomy', 'Over-/Under-expression across life stages']),
'gene_number' : pd.Series([len(present_genes_id), len(anatomy_genes_id), len(life_genes_id)])})
genes_id_df.index = genes_id_df['files_group']
genes_id_df.plot(kind='bar')
# ## Number of genes per species
grouped_present_df = groupby_counts(present_df, ['SP', 'Gene ID'])
grouped_anatomy_df = groupby_counts(anatomy_df, ['SP', 'Gene ID'])
grouped_life_df = groupby_counts(life_df, ['SP', 'Gene ID'])
df1 = pd.DataFrame(grouped_present_df['SP'].value_counts(), columns=['present'])
df2 = pd.DataFrame(grouped_anatomy_df['SP'].value_counts(), columns=['anatomy'])
df3 = pd.DataFrame(grouped_life_df['SP'].value_counts(), columns=['life'])
df = pd.concat([df1, df2, df3], axis = 1).fillna(0)
df['order'] = df['present'] + df['anatomy'] + df['life']
df = df.sort('order', ascending=False)
IPython.core.pylabtools.figsize(3, 5)
cmap = seaborn.cubehelix_palette(15, start=2, rot=0, gamma=1, hue=1, light=0.95, dark=0.3, as_cmap=True)
seaborn.heatmap(df.drop(['order'], axis=1), cmap=cmap)
# ## Number of anatomical structure per species
grouped_present_df = groupby_counts(present_df, ['SP', 'Anatomical entity ID'])
grouped_anatomy_df = groupby_counts(anatomy_df, ['SP', 'Anatomical entity ID'])
grouped_life_df = groupby_counts(life_df, ['SP', 'Anatomical entity ID'])
df1 = pd.DataFrame(grouped_present_df['SP'].value_counts(), columns=['present'])
df2 = pd.DataFrame(grouped_anatomy_df['SP'].value_counts(), columns=['anatomy'])
df3 = pd.DataFrame(grouped_life_df['SP'].value_counts(), columns=['life'])
df = pd.concat([df1, df2, df3], axis = 1).fillna(0)
df['order'] = df['present'] + df['anatomy'] + df['life']
df = df.sort('order', ascending=False)
IPython.core.pylabtools.figsize(3, 5)
cmap = seaborn.cubehelix_palette(15, start=2, rot=0, gamma=1, hue=1, light=0.95, dark=0.3, as_cmap=True)
seaborn.heatmap(df.drop(['order'], axis=1), cmap=cmap)
# ## Number of develomental stages per species
grouped_present_df = groupby_counts(present_df, ['SP', 'Developmental stage ID'])
grouped_anatomy_df = groupby_counts(anatomy_df, ['SP', 'Developmental stage ID'])
grouped_life_df = groupby_counts(life_df, ['SP', 'Developmental stage ID'])
df1 = pd.DataFrame(grouped_present_df['SP'].value_counts(), columns=['present'])
df2 = pd.DataFrame(grouped_anatomy_df['SP'].value_counts(), columns=['anatomy'])
df3 = pd.DataFrame(grouped_life_df['SP'].value_counts(), columns=['life'])
df = pd.concat([df1, df2, df3], axis = 1).fillna(0)
df['order'] = df['present'] + df['anatomy'] + df['life']
df = df.sort('order', ascending=False)
IPython.core.pylabtools.figsize(3, 5)
cmap = seaborn.cubehelix_palette(15, start=2, rot=0, gamma=1, hue=1, light=0.95, dark=0.3, as_cmap=True)
seaborn.heatmap(df.drop(['order'], axis=1), cmap=cmap)
# ## Number of genes with diferent kind of expresion per species
# +
grouped_df = groupby_counts(present_df, ['SP', 'Expression', 'Gene ID'])
grouped_df = groupby_counts(grouped_df, ['SP', 'Expression'])
df1 = grouped_df.pivot('SP', 'Expression', 'len').fillna(0)
grouped_df = groupby_counts(anatomy_df, ['SP', 'Differential expression', 'Gene ID'])
grouped_df = groupby_counts(grouped_df, ['SP', 'Differential expression'])
df2 = grouped_df.pivot('SP', 'Differential expression', 'len').fillna(0)
grouped_df = groupby_counts(life_df, ['SP', 'Differential expression', 'Gene ID'])
grouped_df = groupby_counts(grouped_df, ['SP', 'Differential expression'])
df3 = grouped_df.pivot('SP', 'Differential expression', 'len').fillna(0)
df = pd.concat([df1, df2, df3], axis=1, keys=['Presents Files', 'Anatomy Files', 'Life Files']).fillna(0)
df['order'] = df['Presents Files']['absent'] + df['Presents Files']['present'] + df['Anatomy Files']['no diff expression'] + df['Anatomy Files']['over-expression'] + df['Anatomy Files']['under-expression'] + df['Life Files']['no diff expression'] + df['Life Files']['over-expression'] + df['Life Files']['under-expression']
df = df.sort('order', ascending=False)
df = df.drop([('Presents Files', 'high ambiguity'), ('Presents Files', 'low ambiguity')], axis=1)
df = df.drop([('Anatomy Files', 'high ambiguity'), ('Anatomy Files', 'low ambiguity')], axis=1)
#df = df.drop([('Life Files', 'high ambiguity'), ('Life Files', 'low ambiguity')], axis=1)
IPython.core.pylabtools.figsize(5, 5)
cmap = seaborn.cubehelix_palette(15, start=2, rot=0, gamma=1, hue=1, light=0.95, dark=0.3, as_cmap=True)
seaborn.heatmap(df.drop(['order'], axis=1), cmap=cmap);
# -
# ## Number of conditions per species
# +
grouped_df = groupby_counts(present_df, ['SP', 'Developmental stage name', 'Anatomical entity name'])
df1 = pd.DataFrame(grouped_df['SP'].value_counts(), columns=['present'])
grouped_df = groupby_counts(anatomy_df, ['SP', 'Developmental stage name', 'Anatomical entity name'])
df2 = pd.DataFrame(grouped_df['SP'].value_counts(), columns=['anatomy'])
grouped_df = groupby_counts(life_df, ['SP', 'Developmental stage name', 'Anatomical entity name'])
df3 = pd.DataFrame(grouped_df['SP'].value_counts(), columns=['life'])
df = pd.concat([df1, df2, df3], axis = 1).fillna(0)
df['order'] = df['present'] + df['anatomy'] + df['life']
df = df.sort('order', ascending=False)
IPython.core.pylabtools.figsize(3, 5)
cmap = seaborn.cubehelix_palette(15, start=2, rot=0, gamma=1, hue=1, light=0.95, dark=0.3, as_cmap=True)
seaborn.heatmap(df.drop(['order'], axis=1), cmap=cmap)
|
pipeline/download_files/analyze_all_files.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import dependencies for sql
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect
engine = create_engine("sqlite:///.hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
import pandas as pd
Base.classes.keys()
measurements_df = pd.read_csv('clean_measurements.csv')
stations_df = pd.read_csv('clean_stations.csv')
measurements_df['date'] = pd.to_datetime(measurements_df['date'],format="%Y-%m-%d")
measurements_df.info()
Measurements = Base.classes.measurments
session = Session(engine)
measurements_df['tobs'][0:5]
# +
for i in range(len(measurements_df['station'])):
measure = Measurements(station=measurements_df['station'][i], date=measurements_df['date'][i]\
, prcp=measurements_df['prcp'][i], tobs=measurements_df['tobs'][i] )
session.add(measure)
session.commit()
# -
engine.execute('select * from measurment').fetchall()
session.new
Station = Base.classes.station
stations_df.info()
# +
for i in range(len(stations_df['station'])):
station = Station(station=stations_df['station'][i], name=stations_df['name'][i], latitude=stations_df['longitude'][i], \
elevation=stations_df['elevation'][i])
session.add(station)
session.commit()
# -
query = "SELECT * FROM measurments"
query_df = pd.read_sql_query(query, session.bind)
query_df
query = "select date, max(prcp) from measurments group by date order by date desc limit 365"
precipitation_df = pd.read_sql_query(query, session.bind)
precipitation_df.head()
import numpy as np
import matplotlib.pyplot as plt
plt.bar(range(365), precipitation_df['max(prcp)'])
plt.xticks(range(365), precipitation_df['date'], rotation=45)
plt.title('Percipitation From the Last Year')
plt.xlabel('Date')
plt.ylabel('Inches')
plt.savefig('Year_Prcp')
query = "select * from station"
query_df = pd.read_sql_query(query, session.bind)
query_df
query = "select count(station) from station"
query_df = pd.read_sql_query(query, session.bind)
query_df
print(query_df)
query = "select station.station, count(*)\
from measurments join station\
on measurments.station = station.station\
group by station.station order by count(*) desc"
query_df = pd.read_sql_query(query, session.bind)
query_df
|
.ipynb_checkpoints/climate_analysis-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from multiprocessing import cpu_count
from pathlib import Path
from warnings import warn
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition, inset_axes, mark_inset
from scipy.interpolate import RegularGridInterpolator, UnivariateSpline
from scipy.optimize import fmin
from tqdm.auto import tqdm
from bounded_rand_walkers.cpp import (
bound_map,
freehand,
generate_data,
get_binned_2D,
get_binned_data,
get_cached_filename,
get_max_step,
)
from bounded_rand_walkers.shaper_generation import gen_rad_shaper_exact
from bounded_rand_walkers.utils import cache_dir, get_centres, match_ref, normalise
mpl.rc_file("matplotlibrc")
Path("plots").mkdir(exist_ok=True)
# +
# Generate a single set of data.
pdf_kwargs = dict(width=2.0)
def get_raw_f_i(r):
"""Calculate f_i at given radii."""
return np.array([freehand([c, 0], **pdf_kwargs) for c in r])
bound_name = "circle"
n_bins = 300 # Nr. of x, y bins.
n_raw_shaper = 10000 # Number of data points used to construct the shaper spline.
n_rad = 200 # Number of radial bins.
data_kwargs = dict(
cache_dir=cache_dir,
samples=int(1e7),
seed=np.arange(10),
blocks=2,
bound_name=bound_name,
pdf_name="freehand",
**pdf_kwargs
)
# -
# #### Use the analytical f_t distribution to inform the bin sizes.
#
# Bins will be created from 0 all the way up to the maximum expected bin size.
# +
max_step = get_max_step(bound_name)
# Analytical f_i cdf spline.
cdf_f_i_radii = np.linspace(0, 2 * max_step, 1000000)
cdf_f_i_analytical = np.cumsum(get_raw_f_i(cdf_f_i_radii))
cdf_f_i_analytical /= cdf_f_i_analytical[-1]
cdf_f_i_spl = UnivariateSpline(cdf_f_i_radii, cdf_f_i_analytical, s=0, ext="const")
# 1D shaper calculation.
shaper_radii = np.linspace(0, max_step, n_raw_shaper)
raw_r_shaper = gen_rad_shaper_exact(shaper_radii, vertices=bound_name)
# Using a spline, calculate the shaper at further points.
shaper_spl = UnivariateSpline(shaper_radii, raw_r_shaper, s=0, ext="zeros")
# Generate a spline for the 'cumulative' shaper to estimate average values across bins.
cumsum_shaper = np.cumsum(raw_r_shaper)
cumsum_shaper /= cumsum_shaper[-1]
cum_shaper_spl = UnivariateSpline(shaper_radii, cumsum_shaper, s=0, ext="const")
# +
n_shaper_spl = 1000 # Number of points for the spline plots.
fig, axes = plt.subplots(1, 2, figsize=(6, 2.52))
ax = axes[0]
ax.plot(shaper_radii, raw_r_shaper, marker="x", label="sampled", alpha=0.6)
spl_shaper_radii = np.linspace(0, max_step, n_shaper_spl)
ax.plot(spl_shaper_radii, shaper_spl(spl_shaper_radii), label="spline")
ax.set_xlabel(r"$\ell$")
ax.set_ylabel("shaper")
ax2 = plt.axes([0, 0, 1, 1])
# Start of the inset region (and up until the maximum possible step size).
x_start = 1.995
ax2.set_xticks([])
ax2.set_yticks([])
sel = (x_start <= shaper_radii) & (shaper_radii <= max_step)
ax2.plot(shaper_radii[sel], raw_r_shaper[sel], marker="x")
sel_rad = np.linspace(x_start, max_step, 20)
ax2.plot(sel_rad, shaper_spl(sel_rad))
ip = InsetPosition(ax, [0.45, 0.525, 0.51, 0.43])
ax2.set_axes_locator(ip)
mark_inset(ax, ax2, loc1=3, loc2=4, fc="none", ec="0.5")
ax.legend(loc=3)
ax = axes[1]
ax.plot(spl_shaper_radii, cum_shaper_spl(spl_shaper_radii))
ax.set_ylabel("cumulative shaper")
ax.set_xlabel(r"$\ell$")
plt.subplots_adjust(wspace=0.3)
# -
# #### Approximate the CDF
# +
# Create a function to calculate the analytical transformed f_t.
def get_f_t_r_analytical(r):
return get_raw_f_i(r) * shaper_spl(r) * r
# Take many samples in order to calculate the cdf.
cdf_r = np.linspace(0, max_step, 1000000)
cdf_t_r_analytical = np.cumsum(get_f_t_r_analytical(cdf_r))
cdf_t_r_analytical /= cdf_t_r_analytical[-1]
# Use spline interpolation to calculate the CDF at arbitrary positions based on the above.
cdf_f_t_spl = UnivariateSpline(cdf_r, cdf_t_r_analytical, s=0, ext="const")
fig, axes = plt.subplots(1, 2, figsize=(6, 2.5))
plot_ls = np.linspace(0, max_step, 1000)
axes[0].plot(plot_ls, get_f_t_r_analytical(plot_ls))
axes[0].set_xlabel(r"$\ell$")
axes[0].set_ylabel(r"$f_t$")
axes[1].plot(plot_ls, cdf_f_t_spl(plot_ls))
axes[1].set_xlabel(r"$\ell$")
axes[1].set_ylabel(r"CDF ($f_t$)")
fig.tight_layout()
# -
# #### Generate numerics
# +
filenames = get_cached_filename(squeeze=False, **data_kwargs)
if not all(p.is_file() for p in filenames):
generate_data(
squeeze=False, max_workers=cpu_count(), cache_only=True, **data_kwargs
)
(
g_x_edges,
g_y_edges,
g_x_centres,
g_y_centres,
f_t_x_edges,
f_t_y_edges,
f_t_x_centres,
f_t_y_centres,
f_t_r_edges,
f_t_r_centres,
g_numerical,
f_t_numerical,
f_t_r_numerical,
) = get_binned_data(
filenames=filenames,
n_bins=n_bins,
g_bounds=(-0.5, 0.5),
f_bounds=(-1, 1),
f_t_r_edges=np.linspace(0, max_step, n_rad),
)
# +
# Select valid elements.
r_shaper = np.diff(cum_shaper_spl(f_t_r_edges))
valid_r = r_shaper > 0
f_i_r_num_radii = f_t_r_centres[valid_r].copy()
# Analytical calculations.
f_i_r_analytical = np.diff(cdf_f_i_spl(f_t_r_edges))
# 1D analytical transformed distribution.
f_t_r_analytical = f_i_r_analytical * r_shaper
# Analytical transformed.
f_i_r_analytical_trans = f_t_r_analytical[valid_r] / r_shaper[valid_r]
f_t_norm = normalise(f_t_r_edges, f_t_r_analytical * f_t_r_centres)
# Numerics calculations.
# 1D reconstruction of the intrinsic pdf as a function of step length only.
f_i_r_num = f_t_r_numerical[valid_r] / r_shaper[valid_r]
# +
# Analytical normed f_t.
# Reference for all f_t plots.
f_t_r_analytical_norm = normalise(f_t_r_edges, f_t_r_analytical * f_t_r_centres)
f_t_num_factor = match_ref(
x=f_t_r_centres,
y=f_t_r_numerical,
ref_x=f_t_r_centres,
ref_y=f_t_r_analytical_norm,
return_factor=True,
)
# +
# Analytical transformed, normed f_i.
# This is the reference for all f_i plots (albeit over a restricted interval).
f_i_r_analytical_trans_norm = normalise(
f_i_r_num_radii, f_i_r_analytical_trans * f_i_r_num_radii
)
f_i_ref_radii_sel = f_i_r_num_radii < fmin(
lambda x: (cdf_f_t_spl(x[0]) - 0.8) ** 2, [0.7 * max_step], disp=False
)
f_i_ref_radii = f_i_r_num_radii[f_i_ref_radii_sel]
f_i_ref = f_i_r_analytical_trans_norm[f_i_ref_radii_sel]
# Higher samples numerical scaling factor.
f_i_num_factor = match_ref(
x=f_i_r_num_radii,
y=f_i_r_num,
ref_x=f_i_ref_radii,
ref_y=f_i_ref,
return_factor=True,
)
# Analytical scaling factor.
f_i_analytical_factor = match_ref(
x=f_t_r_centres,
y=f_i_r_analytical * f_t_r_centres,
ref_x=f_i_ref_radii,
ref_y=f_i_ref,
return_factor=True,
)
# +
bin_width = np.mean(np.diff(f_t_r_edges))
analytical_f_i_edges = np.arange(0, 1.05 * max_step + bin_width, bin_width)
analytical_f_i_centres = get_centres(analytical_f_i_edges)
analytical_f_i_args = (
analytical_f_i_centres,
f_i_analytical_factor
* np.diff(cdf_f_i_spl(analytical_f_i_edges))
* analytical_f_i_centres,
)
analytical_f_i_kwargs = dict(
label=r"Analyt. $\tilde{f}_i(\ell)$",
zorder=1,
linestyle="-",
c="C2",
)
# Plot f_t and f_i.
fig, axes = plt.subplots(1, 2, figsize=(6.3, 2.52))
# Plot f_t.
ax = axes[0]
ax.plot(
f_t_r_centres,
f_t_r_analytical_norm,
label="Analytical",
zorder=1,
)
ax.plot(
f_t_r_centres,
f_t_num_factor * f_t_r_numerical,
label="Numerical",
zorder=2,
linestyle="--",
c="C1",
)
# Plot f_i.
ax = axes[1]
(t1,) = ax.plot(
f_i_r_num_radii,
f_i_r_analytical_trans_norm,
# label="Transformed Analytical",
zorder=2,
linestyle="--",
c="C0",
)
(t2,) = ax.plot(
*analytical_f_i_args,
**analytical_f_i_kwargs,
)
(b1,) = ax.plot(
f_i_r_num_radii,
f_i_num_factor * f_i_r_num,
# label="Numerical",
zorder=1,
c="C1",
linestyle="--",
)
# Fix y-axis limits so they do not blow up due to the variability of the below.
ylim = ax.get_ylim()
ax.set_ylim(*ylim)
ax.autoscale(False)
# Labels.
axes[0].set_xlabel(r"$\ell$")
axes[0].set_ylabel(r"$\tilde{f}_t(\ell)$")
axes[1].set_xlabel(r"$\ell$")
axes[1].set_ylabel(r"$\tilde{f}_i(\ell)$", labelpad=-18)
# Grids and titles.
for ax, title in zip(axes, ["(a)", "(b)"]):
ax.text(0, 1.04, title, transform=ax.transAxes)
ylim = (-0.01, 1.5)
# Add vertical lines to show the maximum step size.
max_step_kwargs = dict(
linestyle="--",
alpha=0.6,
color="k",
# Uncomment to show legend item for this line.
# label=r"Max. $\ell\ (\sqrt{2})$",
)
for ax in axes:
ax.vlines(max_step, -1, 3, **max_step_kwargs)
x_extent = np.max(analytical_f_i_edges)
margin = x_extent * 0.015
for ax in axes:
ax.set_ylim(*ylim)
ax.set_xlim(-margin, x_extent + margin)
axes[1].set_yticks(np.arange(0, 1.2, 0.2))
axes[1].set_yticklabels(["0.0", "", "", "", "", "1.0"])
# Legends.
axes[0].legend(loc="upper left")
l1 = axes[1].legend([t1, t2], ["Transf. Analytical", "Analytical"], loc=(0.34, 0.03))
l2 = axes[1].legend([b1], ["Numerical"], loc="upper left")
axes[1].add_artist(l1)
# Move the two subplots closer to each other.
fig.tight_layout()
# Finally, save into the 'plots' directory.
fig.savefig((Path("plots") / f"{bound_name}_reconstruction").with_suffix(".png"))
|
examples/reconstruction_circle.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
from bertviz import model_view
from transformers import DistilBertModel, DistilBertTokenizer, utils
# -
def show_model_view(model, tokenizer, text):
inputs = tokenizer.encode_plus(text, return_tensors='pt', add_special_tokens=True)
input_ids = inputs['input_ids']
attention = model(input_ids)[-1]
input_id_list = input_ids[0].tolist() # Batch index 0
tokens = tokenizer.convert_ids_to_tokens(input_id_list)
model_view(attention, tokens)
# + pycharm={"is_executing": false}
utils.logging.set_verbosity_error() # Remove line to see warnings
model_version = 'distilbert-base-uncased'
do_lower_case = True
model = DistilBertModel.from_pretrained(model_version, output_attentions=True)
tokenizer = DistilBertTokenizer.from_pretrained(model_version, do_lower_case=do_lower_case)
text = "The quick brown fox jumps over the lazy dogs"
show_model_view(model, tokenizer, text)
|
notebooks/model_view_distilbert.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Predicting Boston Housing Prices
#
# ## Updating a model using SageMaker
#
# _Deep Learning Nanodegree Program | Deployment_
#
# ---
#
# In this notebook, we will continue working with the [Boston Housing Dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html). Our goal in this notebook will be to train two different models and to use SageMaker to switch a deployed endpoint from using one model to the other. One of the benefits of using SageMaker to do this is that we can make the change without interrupting service. What this means is that we can continue sending data to the endpoint and at no point will that endpoint disappear.
#
# ## General Outline
#
# Typically, when using a notebook instance with SageMaker, you will proceed through the following steps. Of course, not every step will need to be done with each project. Also, there is quite a lot of room for variation in many of the steps, as you will see throughout these lessons.
#
# 1. Download or otherwise retrieve the data.
# 2. Process / Prepare the data.
# 3. Upload the processed data to S3.
# 4. Train a chosen model.
# 5. Test the trained model (typically using a batch transform job).
# 6. Deploy the trained model.
# 7. Use the deployed model.
#
# In this notebook we will be skipping step 5, testing the model. In addition, we will perform steps 4, 6 and 7 multiple times with different models.
# ## Step 0: Setting up the notebook
#
# We begin by setting up all of the necessary bits required to run our notebook. To start that means loading all of the Python modules we will need.
# +
# %matplotlib inline
import os
import numpy as np
import pandas as pd
from pprint import pprint
import matplotlib.pyplot as plt
from time import gmtime, strftime
from sklearn.datasets import load_boston
import sklearn.model_selection
# -
# In addition to the modules above, we need to import the various bits of SageMaker that we will be using.
# +
import sagemaker
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
from sagemaker.predictor import csv_serializer
# This is an object that represents the SageMaker session that we are currently operating in. This
# object contains some useful information that we will need to access later such as our region.
session = sagemaker.Session()
# This is an object that represents the IAM role that we are currently assigned. When we construct
# and launch the training job later we will need to tell it what IAM role it should have. Since our
# use case is relatively simple we will simply assign the training job the role we currently have.
role = get_execution_role()
# -
# ## Step 1: Downloading the data
#
# Fortunately, this dataset can be retrieved using sklearn and so this step is relatively straightforward.
boston = load_boston()
# ## Step 2: Preparing and splitting the data
#
# Given that this is clean tabular data, we don't need to do any processing. However, we do need to split the rows in the dataset up into train, test and validation sets.
# +
# First we package up the input data and the target variable (the median value) as pandas dataframes. This
# will make saving the data to a file a little easier later on.
X_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names)
Y_bos_pd = pd.DataFrame(boston.target)
# We split the dataset into 2/3 training and 1/3 testing sets.
X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33)
# Then we split the training set further into 2/3 training and 1/3 validation sets.
X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33)
# -
# ## Step 3: Uploading the training and validation files to S3
#
# When a training job is constructed using SageMaker, a container is executed which performs the training operation. This container is given access to data that is stored in S3. This means that we need to upload the data we want to use for training to S3. We can use the SageMaker API to do this and hide some of the details.
#
# ### Save the data locally
#
# First we need to create the train and validation csv files which we will then upload to S3.
# This is our local data directory. We need to make sure that it exists.
data_dir = '../data/boston'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# +
# We use pandas to save our train and validation data to csv files. Note that we make sure not to include header
# information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed
# that the first entry in each row is the target variable.
pd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)
pd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)
# -
# ### Upload to S3
#
# Since we are currently running inside of a SageMaker session, we can use the object which represents this session to upload our data to the 'default' S3 bucket. Note that it is good practice to provide a custom prefix (essentially an S3 folder) to make sure that you don't accidentally interfere with data uploaded from some other notebook or project.
# +
prefix = 'boston-update-endpoints'
val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)
train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)
# -
# ## Step 4 (A): Train the XGBoost model
#
# Now that we have the training and validation data uploaded to S3, we can construct our XGBoost model and train it. We will be making use of the high level SageMaker API to do this which will make the resulting code a little easier to read at the cost of some flexibility.
#
# To construct an estimator, the object which we wish to train, we need to provide the location of a container which contains the training code. Since we are using a built in algorithm this container is provided by Amazon. However, the full name of the container is a bit lengthy and depends on the region that we are operating in. Fortunately, SageMaker provides a useful utility method called `get_image_uri` that constructs the image name for us.
#
# To use the `get_image_uri` method we need to provide it with our current region, which can be obtained from the session object, and the name of the algorithm we wish to use. In this notebook we will be using XGBoost however you could try another algorithm if you wish. The list of built in algorithms can be found in the list of [Common Parameters](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html).
# +
# As stated above, we use this utility method to construct the image name for the training container.
xgb_container = get_image_uri(session.boto_region_name, 'xgboost')
# Now that we know which container to use, we can construct the estimator object.
xgb = sagemaker.estimator.Estimator(xgb_container, # The name of the training container
role, # The IAM role to use (our current role in this case)
train_instance_count=1, # The number of instances to use for training
train_instance_type='ml.m4.xlarge', # The type of instance ot use for training
output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),
# Where to save the output (the model artifacts)
sagemaker_session=session) # The current SageMaker session
# -
# Before asking SageMaker to begin the training job, we should probably set any model specific hyperparameters. There are quite a few that can be set when using the XGBoost algorithm, below are just a few of them. If you would like to change the hyperparameters below or modify additional ones you can find additional information on the [XGBoost hyperparameter page](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html)
xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
objective='reg:linear',
early_stopping_rounds=10,
num_round=200)
# Now that we have our estimator object completely set up, it is time to train it. To do this we make sure that SageMaker knows our input data is in csv format and then execute the `fit` method.
# +
# This is a wrapper around the location of our train and validation data, to make sure that SageMaker
# knows our data is in csv format.
s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='text/csv')
s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='text/csv')
xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})
# -
# ## Step 5: Test the trained model
#
# We will be skipping this step for now.
#
#
# ## Step 6 (A): Deploy the trained model
#
# Even though we used the high level approach to construct and train the XGBoost model, we will be using the lower level approach to deploy it. One of the reasons for this is so that we have additional control over how the endpoint is constructed. This will be a little more clear later on when construct more advanced endpoints.
#
# ### Build the model
#
# Of course, before we can deploy the model, we need to first create it. The `fit` method that we used earlier created some model artifacts and we can use these to construct a model object.
# +
# Remember that a model needs to have a unique name
xgb_model_name = "boston-update-xgboost-model" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# We also need to tell SageMaker which container should be used for inference and where it should
# retrieve the model artifacts from. In our case, the xgboost container that we used for training
# can also be used for inference and the model artifacts come from the previous call to fit.
xgb_primary_container = {
"Image": xgb_container,
"ModelDataUrl": xgb.model_data
}
# And lastly we construct the SageMaker model
xgb_model_info = session.sagemaker_client.create_model(
ModelName = xgb_model_name,
ExecutionRoleArn = role,
PrimaryContainer = xgb_primary_container)
# -
# ### Create the endpoint configuration
#
# Once we have a model we can start putting together the endpoint. Recall that to do this we need to first create an endpoint configuration, essentially the blueprint that SageMaker will use to build the endpoint itself.
# +
# As before, we need to give our endpoint configuration a name which should be unique
xgb_endpoint_config_name = "boston-update-xgboost-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# And then we ask SageMaker to construct the endpoint configuration
xgb_endpoint_config_info = session.sagemaker_client.create_endpoint_config(
EndpointConfigName = xgb_endpoint_config_name,
ProductionVariants = [{
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1,
"InitialInstanceCount": 1,
"ModelName": xgb_model_name,
"VariantName": "XGB-Model"
}])
# -
# ### Deploy the endpoint
#
# Now that the endpoint configuration has been created, we can ask SageMaker to build our endpoint.
#
# **Note:** This is a friendly (repeated) reminder that you are about to deploy an endpoint. Make sure that you shut it down once you've finished with it!
# +
# Again, we need a unique name for our endpoint
endpoint_name = "boston-update-endpoint-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# And then we can deploy our endpoint
endpoint_info = session.sagemaker_client.create_endpoint(
EndpointName = endpoint_name,
EndpointConfigName = xgb_endpoint_config_name)
# -
endpoint_dec = session.wait_for_endpoint(endpoint_name)
# ## Step 7 (A): Use the model
#
# Now that our model is trained and deployed we can send some test data to it and evaluate the results.
response = session.sagemaker_runtime_client.invoke_endpoint(
EndpointName = endpoint_name,
ContentType = 'text/csv',
Body = ','.join(map(str, X_test.values[0])))
pprint(response)
result = response['Body'].read().decode("utf-8")
pprint(result)
Y_test.values[0]
# ## Shut down the endpoint
#
# Now that we know that the XGBoost endpoint works, we can shut it down. We will make use of it again later.
session.sagemaker_client.delete_endpoint(EndpointName = endpoint_name)
# ## Step 4 (B): Train the Linear model
#
# Suppose we are working in an environment where the XGBoost model that we trained earlier is becoming too costly. Perhaps the number of calls to our endpoint has increased and the length of time it takes to perform inference with the XGBoost model is becoming problematic.
#
# A possible solution might be to train a simpler model to see if it performs nearly as well. In our case, we will construct a linear model. The process of doing this is the same as for creating the XGBoost model that we created earlier, although there are different hyperparameters that we need to set.
# +
# Similar to the XGBoost model, we will use the utility method to construct the image name for the training container.
linear_container = get_image_uri(session.boto_region_name, 'linear-learner')
# Now that we know which container to use, we can construct the estimator object.
linear = sagemaker.estimator.Estimator(linear_container, # The name of the training container
role, # The IAM role to use (our current role in this case)
train_instance_count=1, # The number of instances to use for training
train_instance_type='ml.m4.xlarge', # The type of instance ot use for training
output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),
# Where to save the output (the model artifacts)
sagemaker_session=session) # The current SageMaker session
# -
# Before asking SageMaker to train our model, we need to set some hyperparameters. In this case we will be using a linear model so the number of hyperparameters we need to set is much fewer. For more details see the [Linear model hyperparameter page](https://docs.aws.amazon.com/sagemaker/latest/dg/ll_hyperparameters.html)
linear.set_hyperparameters(feature_dim=13, # Our data has 13 feature columns
predictor_type='regressor', # We wish to create a regression model
mini_batch_size=200) # Here we set how many samples to look at in each iteration
# Now that the hyperparameters have been set, we can ask SageMaker to fit the linear model to our data.
linear.fit({'train': s3_input_train, 'validation': s3_input_validation})
# ## Step 6 (B): Deploy the trained model
#
# Similar to the XGBoost model, now that we've fit the model we need to deploy it. Also like the XGBoost model, we will use the lower level approach so that we have more control over the endpoint that gets created.
#
# ### Build the model
#
# Of course, before we can deploy the model, we need to first create it. The `fit` method that we used earlier created some model artifacts and we can use these to construct a model object.
# +
# First, we create a unique model name
linear_model_name = "boston-update-linear-model" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# We also need to tell SageMaker which container should be used for inference and where it should
# retrieve the model artifacts from. In our case, the linear-learner container that we used for training
# can also be used for inference.
linear_primary_container = {
"Image": linear_container,
"ModelDataUrl": linear.model_data
}
# And lastly we construct the SageMaker model
linear_model_info = session.sagemaker_client.create_model(
ModelName = linear_model_name,
ExecutionRoleArn = role,
PrimaryContainer = linear_primary_container)
# -
# ### Create the endpoint configuration
#
# Once we have the model we can start putting together the endpoint by creating an endpoint configuration.
# +
# As before, we need to give our endpoint configuration a name which should be unique
linear_endpoint_config_name = "boston-linear-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# And then we ask SageMaker to construct the endpoint configuration
linear_endpoint_config_info = session.sagemaker_client.create_endpoint_config(
EndpointConfigName = linear_endpoint_config_name,
ProductionVariants = [{
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1,
"InitialInstanceCount": 1,
"ModelName": linear_model_name,
"VariantName": "Linear-Model"
}])
# -
# ### Deploy the endpoint
#
# Now that the endpoint configuration has been created, we can ask SageMaker to build our endpoint.
#
# **Note:** This is a friendly (repeated) reminder that you are about to deploy an endpoint. Make sure that you shut it down once you've finished with it!
# +
# Again, we need a unique name for our endpoint
endpoint_name = "boston-update-endpoint-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# And then we can deploy our endpoint
endpoint_info = session.sagemaker_client.create_endpoint(
EndpointName = endpoint_name,
EndpointConfigName = linear_endpoint_config_name)
# -
endpoint_dec = session.wait_for_endpoint(endpoint_name)
# ## Step 7 (B): Use the model
#
# Just like with the XGBoost model, we will send some data to our endpoint to make sure that it is working properly. An important note is that the output format for the linear model is different from the XGBoost model.
response = session.sagemaker_runtime_client.invoke_endpoint(
EndpointName = endpoint_name,
ContentType = 'text/csv',
Body = ','.join(map(str, X_test.values[0])))
pprint(response)
result = response['Body'].read().decode("utf-8")
pprint(result)
Y_test.values[0]
# ## Shut down the endpoint
#
# Now that we know that the Linear model's endpoint works, we can shut it down.
session.sagemaker_client.delete_endpoint(EndpointName = endpoint_name)
# ## Step 6 (C): Deploy a combined model
#
# So far we've constructed two separate models which we could deploy and use. Before we talk about how we can change a deployed endpoint from one configuration to another, let's consider a slightly different situation. Suppose that before we switch from using only the XGBoost model to only the Linear model, we first want to do something like an A-B test, where we send some of the incoming data to the XGBoost model and some of the data to the Linear model.
#
# Fortunately, SageMaker provides this functionality. And to actually get SageMaker to do this for us is not too different from deploying a model in the way that we've already done. The only difference is that we need to list more than one model in the production variants parameter of the endpoint configuration.
#
# A reasonable question to ask is, how much data is sent to each of the models that I list in the production variants parameter? The answer is that it depends on the weight set for each model.
#
# Suppose that we have $k$ models listed in the production variants and that each model $i$ is assigned the weight $w_i$. Then each model $i$ will receive $w_i / W$ of the traffic where $W = \sum_{i} w_i$.
#
# In our case, since we have two models, the linear model and the XGBoost model, and each model has weight 1, we see that each model will get 1 / (1 + 1) = 1/2 of the data sent to the endpoint.
# +
# As before, we need to give our endpoint configuration a name which should be unique
combined_endpoint_config_name = "boston-combined-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# And then we ask SageMaker to construct the endpoint configuration
combined_endpoint_config_info = session.sagemaker_client.create_endpoint_config(
EndpointConfigName = combined_endpoint_config_name,
ProductionVariants = [
{ # First we include the linear model
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1,
"InitialInstanceCount": 1,
"ModelName": linear_model_name,
"VariantName": "Linear-Model"
}, { # And next we include the xgb model
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1,
"InitialInstanceCount": 1,
"ModelName": xgb_model_name,
"VariantName": "XGB-Model"
}])
# -
# Now that we've created the endpoint configuration, we can ask SageMaker to construct the endpoint.
#
# **Note:** This is a friendly (repeated) reminder that you are about to deploy an endpoint. Make sure that you shut it down once you've finished with it!
# +
# Again, we need a unique name for our endpoint
endpoint_name = "boston-update-endpoint-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# And then we can deploy our endpoint
endpoint_info = session.sagemaker_client.create_endpoint(
EndpointName = endpoint_name,
EndpointConfigName = combined_endpoint_config_name)
# -
endpoint_dec = session.wait_for_endpoint(endpoint_name)
# ## Step 7 (C): Use the model
#
# Now that we've constructed an endpoint which sends data to both the XGBoost model and the linear model we can send some data to the endpoint and see what sort of results we get back.
response = session.sagemaker_runtime_client.invoke_endpoint(
EndpointName = endpoint_name,
ContentType = 'text/csv',
Body = ','.join(map(str, X_test.values[0])))
pprint(response)
# Since looking at a single response doesn't give us a clear look at what is happening, we can instead take a look at a few different responses to our endpoint
for rec in range(10):
response = session.sagemaker_runtime_client.invoke_endpoint(
EndpointName = endpoint_name,
ContentType = 'text/csv',
Body = ','.join(map(str, X_test.values[rec])))
pprint(response)
result = response['Body'].read().decode("utf-8")
print(result)
print(Y_test.values[rec])
# If at some point we aren't sure about the properties of a deployed endpoint, we can use the `describe_endpoint` function to get SageMaker to return a description of the deployed endpoint.
pprint(session.sagemaker_client.describe_endpoint(EndpointName=endpoint_name))
# ## Updating an Endpoint
#
# Now suppose that we've done our A-B test and the new linear model is working well enough. What we'd like to do now is to switch our endpoint from sending data to both the XGBoost model and the linear model to sending data only to the linear model.
#
# Of course, we don't really want to shut down the endpoint to do this as doing so would interrupt service to whoever depends on our endpoint. Instead, we can ask SageMaker to **update** an endpoint to a new endpoint configuration.
#
# What is actually happening is that SageMaker will set up a new endpoint with the new characteristics. Once this new endpoint is running, SageMaker will switch the old endpoint so that it now points at the newly deployed model, making sure that this happens seamlessly in the background.
session.sagemaker_client.update_endpoint(EndpointName=endpoint_name, EndpointConfigName=linear_endpoint_config_name)
# To get a glimpse at what is going on, we can ask SageMaker to describe our in-use endpoint now, before the update process has completed. When we do so, we can see that the in-use endpoint still has the same characteristics it had before.
pprint(session.sagemaker_client.describe_endpoint(EndpointName=endpoint_name))
# If we now wait for the update process to complete, and then ask SageMaker to describe the endpoint, it will return the characteristics of the new endpoint configuration.
endpoint_dec = session.wait_for_endpoint(endpoint_name)
pprint(session.sagemaker_client.describe_endpoint(EndpointName=endpoint_name))
# ## Shut down the endpoint
#
# Now that we've finished, we need to make sure to shut down the endpoint.
session.sagemaker_client.delete_endpoint(EndpointName = endpoint_name)
# ## Optional: Clean up
#
# The default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook.
# +
# First we will remove all of the files contained in the data_dir directory
# !rm $data_dir/*
# And then we delete the directory itself
# !rmdir $data_dir
# -
|
Tutorials/Boston Housing - Updating an Endpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
#
# This project is intended to research the colleges admission statistics and determine if there is correlation between selectivity and graduate earnings. This may potentially be usefull for parents and students as they will get a better understanding what college to choose.
#
#
#
#
#
# # Background and Related Work
#
# 1. This article explores the correlation between selectivity and the graduate earnings after 10 years. They concluded that graduades from selective colleges earn more, but school alone is not the only factor determening the income:
# https://www.insidehighered.com/news/2016/08/22/study-finds-graduates-most-selective-colleges-enjoy-earnings-payoff#:~:text=The%20results%20show%20that%2C%20after,and%2019%20percent%20more%20than
# # Research Questions
#
# I want to learn if colleges with stricter admission metrics report higer student earnings after graduation.
#
# Hypothesis:
# 1. Private-nonprofit colleges report higer student earnings after graduation.
# 2. Colleges with lower admission rate report higer student earnings after graduation.
# 3. Colleges with higher SAT average report higer student earnings after graduation.
# # Methodology
#
# I will be using pearson coeficient and linear regression to test for linear correlation of the two variables (columns), for example acceptance rate and earnings. Linear regression is the easiest method when we want to measure the correlation between two quantitative variables. To test for the validity of analysis I will find p-values and make sure they are very small. To visualize data I will produce a heatmap with peason correlation coefficients and I will also plot datapoints to produce a scatteplot and include a line of best fit. For a scatterplot for example explanatory (independent) variable would be acceptance rate and be on the x-axis and the response (dependent) variable would be earnings and be on the y-axis.
# # Data selected for analysis:
# The dataset that was taken from here: https://www.arcgis.com/home/item.html?id=912cf0e0028c4e26a23daf51443ef9fa, csv version included in the repo. The dataset includes information about ~ 6500 US colleges. The information includes name of the college, different metrics for location, type, number of students, SAT scores, tuition costs, financial aid, avg graduate earnings , etc.
# # Results
# Below are all the libraries we will need for this analysis.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import seaborn
import statsmodels.api as s
# # 1. Preparation of a Dataset
#
# Access the csv file and make a subset of a dataframe with the columns we want to include.
# + tags=[]
excel_file_path = 'College_Scorecard_Data.csv'
df = pd.read_csv(excel_file_path, na_filter=True)
df=df[[ 'NAME', 'STATE', 'TYPE', 'REGION',
'ADM_RATE', 'SAT_AVG', 'COST', 'EARNINGS']]
df[:10]
# -
# Add a new column for Return of Investment (ROI) by dividing earnings after graduation by the cost of attendance.
# + tags=[]
df['ROI'] = np.where(df['EARNINGS'] < 1, df['COST'], df['EARNINGS']/df['COST'])
# -
# # 2. Analysis of the Dataset
#
# 1. Begin with descriptive statistics.
df.describe(include='all')
# 2. Find top 10 colleges in Washington State by earnings after graduation.
df[df['STATE'] == "WA"].nlargest(10, 'EARNINGS')[['NAME', 'EARNINGS']]
# 3. Next find top 10 colleges in WA by the Return of Investment
df[df['STATE'] == "WA"].nlargest(10, 'ROI')[['NAME', 'EARNINGS','ROI']]
# This information may be usefull to families that can't afford expensive colleges but want to maximize return on investment.
# # 3. Analysis - Type of College vs Earnings
#
# 1. Find median earnings for every type of college.
median = df['EARNINGS'].groupby(df['TYPE']).agg(np.median)
median
# 2. Visualize it using bar graph.
median.plot(kind='barh')
plt.xlabel('Earnings',fontsize=18)
plt.title('Type of College vs Earnings',fontsize=15)
plt.ylabel('')
# We can see that Private Nonprofit colleges earn the most on average.
# # 4. Analysis - Linear Correlations
#
# 1. Find Pearson correlation between the variables.
# + tags=[]
pearsoncorr = df.corr(method='pearson', min_periods=0)
pearsoncorr
# -
# 2. visualize it using heatmap.
plt.figure(figsize=(8,8))
seaborn.heatmap(df.corr(), annot=True, cmap="coolwarm")
# We see that SAT average has the biggest linear correlation with earnings, correlation between cost and earnings is also big, corelation between admission rate and earnings is moderate negative.
# # Correlation - Admission Rate vs Earnings
#
# 1. Create a subset with two variables of interest and drop na values, this will allow us to perform regression analysis later, since we need paired data.
df1=df[['ADM_RATE', 'EARNINGS']].dropna()
df1[:10]
# 2. Check that we have enough rows for the analysis
# + tags=[]
len(df1)
# + [markdown] tags=[]
# 3. Find the p-value and make sure its value is small. Find the r coeeficient.
#
# -
from scipy.stats import pearsonr
pearsonr(df1['ADM_RATE'], df1['EARNINGS'])
# The p-value is small and the Pearson correlation is -0.284. It's a weak-moderate negative linear correlation.
# 4. Create a regression model, this will be used in the next step for visualization
from scipy.stats import linregress
X=df1[['ADM_RATE']]
y=df1[['EARNINGS']]
regressor = LinearRegression()
regressor.fit(X, y)
y_pred = regressor.predict(X)
# 5. Create a scatterplot and the line of best fit to visualize the correlation between admission rate and earnings.
plt.scatter(X,y,s=5)
plt.plot(X,y_pred, color='red')
plt.title('Admission Rate vs Earnings')
plt.xlabel('Admission Rate')
plt.ylabel('Earnings')
plt.show
# From the plot it's hard to conclude if this graph is linear. The correlation is may actually be monotonic but exponential. As the admission rate falls below 0.2, we see that the earnings begin a steep ascend. We need further analysis. More testing is needed to validate the relationship.
# # 5. Correlation - SAT vs Earnings
#
# Here we perform the same analysis as for the Admission rate vs Earnings. (I'm not sure how to reduce the redundancy)
#
# 1. Create a subset with two variables of interest and drop na values, this will allow us to perform regression analysis later.
df2=df[['SAT_AVG', 'EARNINGS']].dropna()
df2[:10]
# 2. Find the p-value and make sure its value is small. Find the r coeeficient.
#
from scipy.stats import pearsonr
pearsonr(df2['SAT_AVG'], df2['EARNINGS'])
# The p value is small, the r coeffcient is 0.696, which is a strong positive correlation.
# 3. Create a regression model, this will be used in the next step for visualization
from scipy.stats import linregress
X=df2[['SAT_AVG']]
y=df2[['EARNINGS']]
regressor = LinearRegression()
regressor.fit(X, y)
y_pred = regressor.predict(X)
# 4. Create a scatterplot and the line of best fit to visualize the correlation between SAT scores and earnings.
plt.scatter(X,y,s=5)
plt.plot(X,y_pred, color='red')
plt.title('SAT Scores vs Earnings')
plt.xlabel('SAT Scores')
plt.ylabel('Earnings')
plt.show
# This is a monotonic relationship, where Earnings increase as SAT scores increase. This looks like a linear relationship,
# but there is still a hint to an exponential relationship, since the Earnings begin a steep ascend after SAT scores
# get to 1400. More testing is needed to validate the relationship.
# # Limitations
#
# This dataset is most suitable for comparing universities between each other. It's not sufficient enough to compare student performance and correlate it to future earnings. The reason for this is that each college in this dataset only provides the average values for every variable so we can's compare the student performance within the colleges.
#
# The earnings are also averaged and not separated into majors within the colleges, which may sway the results, since technical colleges will earn more than art colleges and there is no way to separate them apart.
#
# This dataset provides us only with graduate earnings straight out of the college. If we were provided with more earning checkpoints such as at 5, 10 and 20 years after graduation, we would be more confident in our findings.
#
#
# # Conclusion
#
# We can conclude that colleges with lower admission rate and higher average SAT scores and cost of attendance report higher graduate earnings straight out of the college. On top of that, college type also plays a role in future earnings, where private nonprofit colleges report the highest student earnings after graduation, Private for profit collegest report the least earnings and public colleges are in between.
|
.ipynb_checkpoints/Analysis-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convert Colored Images to Gray Scale Images and analyze the results for time taken
import cv2
import shutil
import os
import time
from matplotlib import pyplot as plt
# ### Generate Images 100,200,300
for i in range(5):
os.mkdir(f"{i+1}KImages")
for j in range(100*(i+1)):
shutil.copy("./FolderWithImages/check.jpg",dst=f"{i+1}KImages/copy{j}.jpg")
# ## Convert Images to Gray Scale and Note the time taken
data = {}
for i in range(5):
start = time.time()
images = os.listdir(f'{i+1}KImages')
for image in images:
check = cv2.imread(f'./{i+1}KImages/{image}')
gray = cv2.cvtColor(check,cv2.COLOR_BGR2GRAY)
os.remove(f'./{i+1}KImages/{image}')
cv2.imwrite(f'./{i+1}KImages/{image}',gray)
end = time.time()
data[f'{i+1}KImages'] = end - start
# ## Plot the results
plt.plot(list(data.keys()),list(data.values()))
plt.title("Time Taken to Convert Images to GrayScale")
plt.xlabel("Number of Files(K)")
plt.ylabel("Time Taken")
plt.show()
|
Convert Color to GrayScale/.ipynb_checkpoints/CtoBGresults-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
print("hi")
print('hi')
#integer
a=45
b=5.6
print(a+b)
type(a)
type(b)
name
print(name)
name="divya"
name1="varshni"
print(name)
name+"space"+name1
type(name)
lst=["nivi",1,546,89.9,[1,2]]
print(lst)
lst
lst[1]
lst[0]
lst[4][0]
lst.append("divya")
lst
lst.index(546)
lst
lst.index(546)
lst.index(-1)
lst(-1)
lst[-1]
lst[-3]
dit={"name":"sai","age":15}
dit
dit.get(name)
dit
dit.get('name')
dit['name']
dit.items()
dit.keys()
dit.pop('name')
dit
dit["scl"]="kvs"
dit
st={'s','a',1,2,3,3,6}
st
st1={"s",1}
st1.issubset(st)
tup=("hi","@","hi","s")
tup
tup.count("hi")
tup.count("@")
abc=True
abc
|
note.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import datetime
import math
import numpy
Burpple = pd.read_excel(r"TanjReviews2016Onwards.xlsx")
Dates = Dates = Burpple['Date'].tolist()
condition = ''
Form = []
why = []
day = ''
mon = ''
year = ''
monyear = ''
Date = ''
for i in Dates:
condition = i.split()[2]
if condition == "ago":
day = i.split()[0]
monyear = "May 2018"
year = "2018"
Date = day + " " + monyear
Form.append(Date)
why.append(year)
elif condition == "at":
mon = i.split()[0]
day = i.split()[1]
year = "2018"
Date = day+" "+ mon + " 2018"
Form.append(Date)
why.append(year)
else:
day = i.split()[1]
day = day.replace(',','')
mon = i.split()[0]
year = i.split()[2]
Date = day + " " + mon + " " + year
Form.append(Date)
why.append(year)
num = len(Burpple)
NewDate = []
Quart = []
Q = 0
yy = ''
strdate = ''
for i in range (0, num):
Date = datetime.datetime.strptime(Form[i], '%d %b %Y').strftime('%d/%m/%Y')
Date = datetime.datetime.strptime(Date,'%d/%m/%Y')
NewDate.append(Date)
Q=math.ceil(Date.month/3.)
#print(Q)
Quart.append(Q)
#strdate = str(Date)
#yy = Date.Year
#bpyear.append(yy)
FormatDate = pd.Series(NewDate)
Burpple['Formated_Date'] = FormatDate.values
Quarter = pd.Series(Quart)
Burpple['Quarter'] = Quarter.values
Year = pd.Series(why)
Burpple['Year'] = Year.values
ex = pd.ExcelWriter('(TP) Quarters.xlsx', options={'encoding':'utf-8'})
Burpple.to_excel(ex, 'Data', index=False)
### sorting the data by Years
y2016 = pd.DataFrame()
y2017 = pd.DataFrame()
y2018 = pd.DataFrame()
for i in range (0, num):
if why[i] == '2016':
wek = pd.DataFrame(Burpple.iloc[i])
wek = numpy.transpose(wek)
y2016 = pd.concat([y2016, wek], axis = 0)
elif why[i] == '2017':
wek = pd.DataFrame(Burpple.iloc[i])
wek = numpy.transpose(wek)
y2017 = pd.concat([y2017, wek], axis = 0)
else:
wek = pd.DataFrame(Burpple.iloc[i])
wek = numpy.transpose(wek)
y2018 = pd.concat([y2018, wek], axis = 0)
y2016 = y2016.reset_index(drop = True)
y2017 = y2017.reset_index(drop = True)
y2018 = y2018.reset_index(drop = True)
#next decide how you want to split the data.
#seperate each year into diff files? How about the average sentiment?
### Resturant Avg_sentiment Quarter_1 Quarter_2 Quarter_3 Quarter_4
### xxx 0.01 0.2 0.021 0.45 0.46
# -
Burpple
|
Sentimental Analysis/Cafes in Tanjong Pagar/Codes/(Tanjong Pagar) Quarters.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fit halo mass to shear profile: 3. accounting for the redshift distribution of source galaxies
#
# _the LSST-DESC CLMM team_
#
#
# This notebook demonstrates how to use `clmm` to estimate a WL halo mass from observations of a galaxy cluster when source galaxies follow a given distribution (the Chang. (2013) implemented in `clmm`). It uses several functionalities of the support `mock_data` module to produce mock datasets.
#
# - Setting things up, with the proper imports.
# - Generating 2 datasets: an ideal datasetwith source galaxies following the Chang et al. (2013) redshift distribution; a noisy dataset where photoz errors and shape noise are also included.
# - Computing the binned reduced tangential shear profile, for the 2 datasets, using logarithmic binning.
# - Setting up the "single source plane" model (model1) and a model accounting for the redshift distribution (model2). As already seen in Example2, model1 will yield a bias mass reconstruction. Accounting for the redshift distribution in the model (model2) solves that issue.
# - Perform a simple fit using `scipy.optimize.curve_fit` and visualize the results.
# ## Setup
# First, we import some standard packages.
# +
try: import clmm
except:
import notebook_install
notebook_install.install_clmm_pipeline(upgrade=False)
import clmm
import matplotlib.pyplot as plt
import numpy as np
from astropy import units
plt.rcParams['font.family']=['gothambook','gotham','gotham-book','serif']
clmm.__version__
# -
# Next, we import `clmm`'s core modules.
import clmm.dataops as da
import clmm.galaxycluster as gc
import clmm.theory as theory
from clmm import Cosmology
# We then import a support modules for a specific data sets.
# `clmm` includes support modules that enable the user to generate mock data in a format compatible with `clmm`.
# ## Making mock data
from clmm.support import mock_data as mock
# For reproducibility:
np.random.seed(11)
# To create mock data, we need to define a true cosmology, which is currently done with [`astropy`'s cosmology library](http://docs.astropy.org/en/stable/cosmology/index.html).
cosmo = Cosmology(H0 = 70.0, Omega_dm0 = 0.27 - 0.045, Omega_b0 = 0.045, Omega_k0 = 0.0)
# We now set some parameters for a mock galaxy cluster:
cluster_m = 1.e15 # M200,m [Msun]
logm = np.log(cluster_m)/np.log(10)
concentration = 4
cluster_ra = 0.
cluster_dec = 0.
cluster_z = 0.4
ngals = 10000
# Then we use the `mock_data` support module to generate 2 galaxy catalogs:
# - `ideal_data`: galaxies distributed according to the Chang et al. (2013) redshift distribution.
# - `noisy_data`: `ideal_data` + photoz errors + shape noise
#
# (Galaxies have a minimum redshift defined as $z_{cluster} + 0.1$)
ideal_data = mock.generate_galaxy_catalog(cluster_m, cluster_z, concentration, cosmo, 'chang13', zsrc_min = cluster_z + 0.1, ngals=ngals)
noisy_data = mock.generate_galaxy_catalog(cluster_m, cluster_z, concentration, cosmo, 'chang13', zsrc_min = cluster_z + 0.1,
shapenoise=0.05, photoz_sigma_unscaled=0.05, ngals=ngals)
# The galaxy catalogs are converted to a `clmm.GalaxyCluster` object and may be saved for later use.
# +
cluster_id = "CL_ideal"
gc_object = clmm.GalaxyCluster(cluster_id, cluster_ra, cluster_dec,
cluster_z, ideal_data)
gc_object.save('ideal_GC.pkl')
cluster_id = "CL_noisy"
gc_object = clmm.GalaxyCluster(cluster_id, cluster_ra, cluster_dec,
cluster_z, noisy_data)
gc_object.save('noisy_GC.pkl')
# -
# Any saved clmm.GalaxyCluster object may be read in for analysis.
cl_ideal = clmm.GalaxyCluster.load('ideal_GC.pkl') # background galaxies distributed according to Chang et al. (2013)
cl_noisy = clmm.GalaxyCluster.load('noisy_GC.pkl') # Chang et al. (2013) + shapenoise + photozerror
# ### Redshift of galaxies generated by mock data are distributed following the Chang. (2013) redshift distribution.
plt.figure(figsize=(7,5))
plt.hist(cl_ideal.galcat['z'], density = True, bins = 50)
plt.axvline(x = cluster_z, color='orange', label = 'cluster redshift')
plt.xlabel(r'$z_{src}$', fontsize = 20)
plt.ylabel(r'$N(z$)', fontsize = 20)
plt.legend()
plt.xlim(0,5)
# ## Deriving observables
# ### Computing shear
# `clmm.GalaxyCluster.compute_tangential_and_cross_components` calculates the tangential and cross shears for each source galaxy in the cluster.
cl_ideal.compute_tangential_and_cross_components(geometry="flat")
cl_noisy.compute_tangential_and_cross_components(geometry="flat")
pass
# ### Radially binning the data
bin_edges = da.make_bins(0.2, 4, 15, method='evenlog10width')
# `clmm.GalaxyCluster.make_radial_profile` evaluates the average shear of the galaxy catalog in bins of radius.
cl_ideal.make_radial_profile("Mpc", bins=bin_edges, cosmo=cosmo, gal_ids_in_bins=True)
cl_noisy.make_radial_profile("Mpc", bins=bin_edges,cosmo=cosmo, gal_ids_in_bins=True)
pass
# After this, the object acquires the `clmm.GalaxyCluster.profile` attribute.
# ## Create the reduced tangential shear models
# We consider two options:
# - First, the naive and *wrong* approach: the reduced tangential shear in a given radial bin $j$ is given by $g_t(\theta_j, \langle z_s \rangle)$, where $\langle z_s \rangle$ is the average redshift in the bin. In that case, the corresponding model is simply given by the fucntion below:
def model_reduced_tangential_shear_singlez(r, logm, z_src):
m = 10.**logm
gt_model = clmm.compute_reduced_tangential_shear(r,
m, concentration,
cluster_z, z_src, cosmo,
delta_mdef=200,
halo_profile_model='nfw')
return gt_model
# - Second, the reduced tangential shear in a given radial bin accounts properly for the redshift distribution in the bin as $\langle g_t(\theta_j, z_s)\rangle \neq g_t(\theta_j, \langle z_s \rangle$). Formally, the reduced tangential shear that corresponds to a continuous distribution of source galaxy redshift $N(z)$ can be expressed as:
# $$
# g_t(\theta) = \langle g_t(\theta, z_s)\rangle_{z_{cluster}} = \int_{z_{cluster}}^{+\infty}dz_sN(z_s)g_t(\theta, z_s)
# $$
# If the inidividual redshifts of the background galaxies are known, we can directly build a model based on data, such that in the bin $j$:
# $$
# g_t(\theta_j) = \frac{1}{N(\theta_j)}\sum\limits_{i = 1}^{N(\theta)}g_t(\theta_j, z_i)
# $$
# where $N(\theta_j)$ is the number of galaxies in bin $j$. The corresponding model is given below.
def model_reduced_tangential_shear_zdistrib(radius, logm, data, catalog, profile):
m = 10**logm
gt_model = []
for i in range(len(radius)):
r = profile['radius'][i]
galist = profile['gal_id'][i]
z_list = catalog.galcat['z'][galist]
shear = clmm.compute_reduced_tangential_shear(r, m, concentration,
cluster_z, z_list, cosmo, delta_mdef=200,
halo_profile_model='nfw')
gt_model.append(np.mean(shear))
return gt_model
# ### Before fitting, let's first vizualise these models using the known true mass
logm = np.log10(cluster_m)
r = cl_ideal.profile['radius']
gt_model_ideal_singlez = model_reduced_tangential_shear_singlez(r, logm, cl_ideal.profile['z'])
gt_model_ideal_zdistrib = model_reduced_tangential_shear_zdistrib(r,logm, ideal_data, cl_ideal, cl_ideal.profile)
gt_model_noisy_singlez = model_reduced_tangential_shear_singlez(r,logm, cl_noisy.profile['z'])
gt_model_noisy_zdistrib = model_reduced_tangential_shear_zdistrib(r,logm, noisy_data, cl_noisy, cl_noisy.profile)
# +
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
plt.title('ideal data', fontsize=20)
plt.errorbar(r,cl_ideal.profile['gt'],cl_ideal.profile['gt_err'],c='k',linestyle='',
marker='o', label=r'ideal data, $M_{input}$ = %.2e Msun' % cluster_m)
plt.loglog(r,gt_model_ideal_zdistrib,'b', label=r'model w/ zdistrib, $M_{input}$ = %.2e Msun' % cluster_m)
plt.loglog(r,gt_model_ideal_singlez,'-y', label=r'model w/o zdistrib, $M_{input}$ = %.2e Msun' % cluster_m)
plt.xlabel('r [Mpc]', fontsize = 20)
plt.ylabel(r'$g_t$', fontsize = 20)
plt.xlim(min(cl_ideal.profile['radius']), max(cl_ideal.profile['radius']))
plt.legend(fontsize = 15)
plt.subplot(1,2,2)
plt.title('noisy data', fontsize=20)
plt.errorbar(r,cl_noisy.profile['gt'],cl_noisy.profile['gt_err'],c='k',linestyle='',
marker='o',label=r'noisy data, $M_{input}$ = %.2e Msun' % cluster_m)
plt.loglog(r,gt_model_noisy_zdistrib,'-b', label=r'model w/ zdistrib, $M_{input}$ = %.2e Msun' % cluster_m)
plt.loglog(r,gt_model_noisy_singlez,'-y', label=r'model w/o zdistrib, $M_{input}$ = %.2e Msun' % cluster_m)
plt.xlabel('r [Mpc]', fontsize = 20)
plt.ylabel(r'$g_t$', fontsize = 20)
plt.xlim(min(cl_noisy.profile['radius']), max(cl_noisy.profile['radius']))
plt.legend(fontsize = 15)
# -
# The naive model that uses the average redshift in the bin clearly does not give the right description of the ideal data (left panel), and will yield biased mass results if used for fitting (see below). For ideal data, the model that accounts for the redshift distribution is, by construction, an excellent description of the data (solid blue line). The same is true for noisy data (right panel), although the noise make the naive model appear "less biased".
# ## Mass fitting
# We estimate the best-fit mass using `scipy.optimize.curve_fit`. We compare estimated mass for noisy and ideal data, using both models described above (naive with average redshift or the model taking into account the redshift distribution). The choice of fitting $\log_{10} M$ instead of $M$ lowers the range of pre-defined fitting bounds from several order of magnitude for the mass to unity. From the associated error $\Delta (\log_{10}M)$ we calculate the error to mass as $\Delta M = M_{fit}\log(10)\Delta (\log_{10}M)$.
from clmm.support.sampler import fitters
# +
popt,pcov = fitters['curve_fit'](lambda r, logm:model_reduced_tangential_shear_zdistrib(r, logm, ideal_data, cl_ideal, cl_ideal.profile),
cl_ideal.profile['radius'],
cl_ideal.profile['gt'],
cl_ideal.profile['gt_err'], bounds=[10.,16.])
m_est_ideal_zdistrib = 10.**popt[0]
m_est_err_ideal_zdistrib = m_est_ideal_zdistrib * np.sqrt(pcov[0][0]) * np.log(10)
popt,pcov = fitters['curve_fit'](lambda r, logm:model_reduced_tangential_shear_singlez(r, logm, cl_ideal.profile['z']),
cl_ideal.profile['radius'],
cl_ideal.profile['gt'],
cl_ideal.profile['gt_err'], bounds=[10.,17.])
m_est_ideal_singlez = 10.**popt[0]
m_est_err_ideal_singlez = m_est_ideal_singlez * np.sqrt(pcov[0][0]) * np.log(10)
popt,pcov = fitters['curve_fit'](lambda r, logm:model_reduced_tangential_shear_zdistrib(r, logm, noisy_data, cl_noisy, cl_noisy.profile),
cl_noisy.profile['radius'],
cl_noisy.profile['gt'],
cl_noisy.profile['gt_err'], bounds=[10.,16.])
m_est_noisy_zdistrib = 10.**popt[0]
m_est_err_noisy_zdistrib = m_est_noisy_zdistrib * np.sqrt(pcov[0][0]) * np.log(10)
popt,pcov = fitters['curve_fit'](lambda r, logm:model_reduced_tangential_shear_singlez(r, logm, cl_noisy.profile['z']),
cl_noisy.profile['radius'],
cl_noisy.profile['gt'],
cl_noisy.profile['gt_err'], bounds=[10.,16.])
m_est_noisy_singlez = 10.**popt[0]
m_est_err_noisy_singlez = m_est_noisy_singlez * np.sqrt(pcov[0][0]) * np.log(10)
# +
print(f'The input mass = {cluster_m:.2e} Msun\n')
print("Without accounting for the redshift distribution in the model\n")
print(f'Best fit mass for ideal data = {m_est_ideal_singlez:.2e} +/- {m_est_err_ideal_singlez:.2e} Msun')
print(f'Best fit mass for noisy data = {m_est_noisy_singlez:.2e} +/- {m_est_err_noisy_singlez:.2e} Msun\n')
print("Accounting for the redshift distribution in the model\n")
print(f'Best fit mass for ideal data = {m_est_ideal_zdistrib:.2e} +/- {m_est_err_ideal_zdistrib:.2e} Msun')
print(f'Best fit mass for noisy data = {m_est_noisy_zdistrib:.2e} +/- {m_est_err_noisy_zdistrib:.2e} Msun')
# -
# As expected, the reconstructed mass is biased when the redshift distribution is not accounted for in the model
# ## Visualization of the results
# For visualization purpose, we calculate the reduced tangential shear predicted by the model with estimated masses for noisy and ideal data.
gt_est_ideal_zdistrib = model_reduced_tangential_shear_zdistrib(r,np.log(m_est_ideal_zdistrib)/np.log(10), ideal_data, cl_ideal, cl_ideal.profile)
gt_est_noisy_zdistrib = model_reduced_tangential_shear_zdistrib(r,np.log(m_est_noisy_zdistrib)/np.log(10), noisy_data, cl_noisy, cl_noisy.profile)
gt_est_ideal_singlez = model_reduced_tangential_shear_singlez(r,np.log(m_est_ideal_singlez)/np.log(10), cl_ideal.profile['z'])
gt_est_noisy_singlez = model_reduced_tangential_shear_singlez(r,np.log(m_est_noisy_singlez)/np.log(10), cl_noisy.profile['z'])
# We compare to tangential shear obtained with theoretical mass. We plot the reduced tangential shear models first when redshift distribution is accounted for in the model then for the naive approach, with respective best-fit masses.
# +
plt.figure(figsize=( 20 , 6 ))
plt.subplot( 1 , 2 , 1 )
plt.title(r'tangential shear $g_t$ (ideal data)', fontsize=20)
plt.errorbar(r,cl_ideal.profile['gt'],cl_ideal.profile['gt_err'],c='k',linestyle='',
marker='o', label=r'ideal data, $M_{input}$ = %.1e Msun' % cluster_m)
plt.loglog(r,gt_est_ideal_zdistrib,'-b',
label=fr'model w/ zdistrib, M_fit = {m_est_ideal_zdistrib:.2e} $\pm$ {m_est_err_ideal_zdistrib:.2e} Msun')
plt.loglog(r,gt_est_ideal_singlez,'-y',\
label=fr'model w/o zdistrib, M_fit = {m_est_ideal_singlez:.2e} $\pm$ {m_est_err_ideal_singlez:.2e} Msun')
plt.xlabel('r [Mpc]', fontsize = 20)
plt.ylabel(r'$g_t$', fontsize = 20)
plt.xlim(min(cl_ideal.profile['radius']), max(cl_ideal.profile['radius']))
plt.legend(fontsize = 15)
plt.subplot( 1 , 2 , 2 )
plt.title(r'tangential shear $g_t$ (noisy data)', fontsize=20)
plt.errorbar(r,cl_noisy.profile['gt'],cl_noisy.profile['gt_err'],c='k',linestyle='', marker='o', label=r'noisy data, $M_{input}$ = %.1e Msun' % cluster_m)
#plt.loglog(r,gt_model_noisy,'-r', label='model, $M_{input}$ = %.3e Msun' % cluster_m)
plt.loglog(r,gt_est_noisy_zdistrib,'-b',
label=fr'model w/ zdistrib, M_fit = {m_est_noisy_zdistrib:.2e} $\pm$ {m_est_err_noisy_zdistrib:.2e} Msun')
plt.loglog(r,gt_est_noisy_singlez,'-y',
label=fr'model w/o zdistrib, M_fit = {m_est_noisy_singlez:.2e} $\pm$ {m_est_err_noisy_singlez:.2e} Msun')
plt.xlabel('r [Mpc]', fontsize = 20)
plt.ylabel(r'$g_t$', fontsize = 20)
plt.xlim(min(cl_noisy.profile['radius']), max(cl_noisy.profile['radius']))
plt.legend(fontsize = 15)
# -
# We note that the reconstruction of mass is biaised when redshift distribution is not accounted for the model, and is smaller compared to input mass. It is associated to the increase of the reduced tangential shear with the source redshift $z_s$ for a given radius $r$.
|
examples/Example3_Fit_Halo_Mass_to_Shear_Catalog.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Object-Oriented Programming
#
# In this lecture we will start looking at object-oriented programming (OOP). This is one of the major components of IN1910. In this lecture we will start to look at what OOP is, and how to program object-oriented in Python. In later lectures we will cover some more theory of object-oriented programming and also touch on it in C++.
# #### Other references
#
# Object-oriented programming in Python is fairly well covered by *Langtangen*. Especially so in:
# * **Chapter 7: Introduction to Classes** Gives a good introduction to defining classes and Python syntax.
# * **Chapter 9: Object-Oriented Programming** Gives a better overview of theory and gives more technical examples.
#
# If you want a different introduction to OOP in Python, these two lectures from MIT Open courseware might be a good place to start
# * [Lecture 8: Object-Oriented Programming](https://youtu.be/-DP1i2ZU9gk)
# * [Lecture 9: Python Classes and Inheritance](https://www.youtube.com/watch?v=FlGjISF3l78)
# ### What is Object-Oriented Programming?
#
# Object-oriented programming (OOP for short), is a [*programming paradigm*](https://en.wikipedia.org/wiki/Programming_paradigm). Put simply, a paradigm is a way to think about, organize, and write code. Paradigms are used to classify and describe languages and programming styles. Many other paradigms exists, and you might have heard of some of them. Some examples are: procedural programming, functional programming, imperative programming, and symbolic programming. A programming language or piece of software doesn't necessarily belong to a single paradigm, and there aren't hard lines between the different paradigm either.
#
# Object-oriented programming is a way of solving problems, or building software, by defining data as *objects*, and specifying how those objects behave and interact. It is quite a popular way of developing software, because when done correctly it leads to code that is easy to understand, use and extend.
#
# #### The origins of OOP
#
# Object-oriented programming was invented in Norway by <NAME> and <NAME> in 1967, they called their new language, the first object-oriented language, *Simula*. While Simula was revolutionary and historically important language, it didn't see widespread adoption. Inspired by Simula, the language *Smalltalk* came on the scene in 1972 and saw a wider use, pushing the use of OOP. Smalltalk still sees use today, mainly as a means of teaching OOP and introductory programming. In 1983 *C++* arrived on the scene, and can be described as the first "industrial" object-oriented language. C++ was invented by <NAME>. He thought Simula had some nice features for large software development, but considered it to be to slow to be practical. He therefore started developing *"C with classes"*, which would eventually become C++.
#
# Most popular modern computer languages are *multi-paradigm*, meaning they support multiple possible paradigms. Some popular modern languages that support object-orientation are Python, Java, C++, C#, PHP, and Ruby.
# <img src="fig/oop_drake_meme.png" width=300>
# <center>**Figure:** You might have thought the ostehøvel was the greatest Norwegian invention, but Simula is where it's at.</center>
# #### What are objects?
#
# In Python, all variables are *objects*. Each object is of a given *type* (string, list, dictionary, etc) and it has some data stored in it. In addition, the objects has some defined behavior for how we interact with it, and how it interacts with other objects. We can for example append new elements to a list, or add lists together.
#
# One of of the reasons OOP is so intuitive is that it mirrors real life. To make sense of the world around us when we think or talk we tend to divide everything into specific conecepts or *objects*. Cars are objects, buildings, people are objects too in the software sense. It isn't only physical things that are objects either, we split time into hours, days, and years. And these are objects too. We can think of a "job" as a object. In this sense, an object is a data abstraction that attempts to capture some important property or feature.
#
# Everything in Python are objects, so when you are programming in Python, you are defining and using objects all the time. Does this mean you are programming object-oriented? Not necessarily. As we mentioned in the introduction the important thing is *thinking* and *solving problems* in an object-oriented manner. Meaning to do OOP properly one should ask what kind of objects are best for organizing a program and solving problems. Probably the most important task of programming object-oriented is therefore to define new data types, and defining how objects of that type should be represented and interact.
#
# ##### Example: A contact list
#
# Think about the contact list on your phone. For each contact, you can store information of different types: name, different phone numbers, emails, etc. Some contacts you have many pieces of information about, some you only have a single piece of information.
#
# Let us say we want to implement such a contact list for a new phone system we are making. We might start thinking how to solve this problem. It makes sense to think of each individual contact as an individual *object*. Each piece of information belongs to a given contact, so it should be part of that object in the code.
#
# To begin with, we only want to store information about each contact, and so dictionaries would be a natural choice for this. We can use key-value pairs to store the information we want, and ignore the pieces of information we don't want. Other parts of the software system can then go in and access information from the contact dictionary as needed
# +
contact = {"name": "Lisa",
"email": "<EMAIL>",
"mobile_number": "767828292"}
print(contact["email"])
# -
# However, our dictionary can only store information on the contact, we cannot add new functionality. A more general way to do this would instead be to define a custom Contact-*class*. Classes are way to define new data types. That way we can create variables that are of type *contact*, instead of dictionary. The upside is that we can choose exactly how our contact datatype should behave.
class Contact:
def __init__(self, name, email=None, cellphone=None):
self.name = name
self.email = email
self.cellphone = cellphone
# With this class defined, we can now create new contact object as follows
#
contact = Contact("Lisa", email="<EMAIL>", cellphone="767828292")
print(contact.email)
# Our contact object is no longer a dictionary, but a `Contact`-object, a new data type we defined ourself. We can check this by writing out the type:
print(type(contact))
# So far we have only added the same information as the dictionary, but we could now go ahead and change how this object behaves. We could for example add a method that starts a new call to the person, so that we could do `contact.initiate_call()`, or a button to start an email to the person, and so on.
# ### Attributes, Fields, Methods
#
# Classes are custom data types, and are extremely central to object-oriented programming. When you are developing object-oriented, most of your development will go into defining classes and how they interact.
#
# When defining a new class, you need to specify what data an object of that type should contain, and what functionality it has. We call these the *attributes* of the class/object, and these can be split into two groupings:
# * fields are pieces of data stored in the object
# * methods are functions we can call
#
# #### Class vs Instance
#
# When we define a class, we are defining a new data type. To actually use it, we have to define a new object of that type, we call this an *instance* of the class. In our contact-list example, the `Contact` class is the general class we defined, then we can implement specific instances of the class, one for "Lisa", one for "Frank" and so on. The class is thus the abstract *concept* of a contact, and objects are the specific cases of that type. A different example: A Nissan leaf is a given type of car, and can be represented as a class. A specific car with the licensce plate "EM93277" (courtesy of random.org) is an *instance* of the Nissan leaf class. The specific car is an object of the *type* Nissan leaf. In this sense, the class is like the blue-print, it specifies how objects of that type should be built.
#
# #### Naming Conventions
#
# According to the [PEP8](https://www.python.org/dev/peps/pep-0008/?#class-names) style-guide (more on this next week), class names should use the CapWords convention (also known as CamelCase). This means every word in the class name should be capitalized, and the words should **not** be separated by underscores.
#
# Some examples: `Person`, `Polynomial`, `Vector3D`, `BankAccount`, `FileReader`.
#
# Specific instances of a class however, should always be lowercase. So when we define a variable of a given class, we would write for example:
# ```
# u = Vector3D()
# poly = Polynomial()
# acc9302100 = BankAccount()
# ```
# You should be strict about this rule, as it makes it much easier to differentiate the *classes* themselves (the general datatype) from instances of that class (the specific objects) in your code.
# #### Example: A deck of cards
#
# Time for another example. Let us say we are implementing some card games. Now, we can easily represent a deck of cards using lists of strings in Python, no problem. However, for most card games, we need to use the exact same operations: get a new deck, shuffle the deck, draw cards, etc. It would be useful to implement the code to do this one place, and then just use that functionality each time we implement a new card game.
#
# We do this by implementing a `Deck` class. After having implemented our class, our game code itself will be much simpler, because we can now simply call `deck.shuffle()` or `deck.draw(5)` and so on. This adds a layer of abstraction to our code, and hides away the ugly implementation details "behind the scenes". This is called *encapsulation*, and is one of the motivations for using OOP. We can say we hide the implementation, but expose an easy-to-use and easy-to-understand *interface* we, or others, can use to achieve what we want.
# +
import random
class Deck:
def __init__(self):
self.cards = []
for s in ('D', 'H', 'C', 'S'):
for v in range(1, 14):
self.cards.append((s,v))
def shuffle(self):
random.shuffle(self.cards)
def draw(self, n=1):
return [self.cards.pop() for _ in range(n)]
def shuffle_into_deck(self, cards):
self.cards.extend(cards)
self.shuffle()
# -
# Here we implement a class that has four methods. The first method is the constructor (called `__init__`, we explain why later), this is run everytime we create a new object of the class. In this case we initialize a deck of cards by adding all the cards in the deck using a double-loop. We also add a `shuffle` method that shuffles the deck, a `draw` method that draws $n$ cards from the deck, and a `shuffle_into_deck` for when we want to shuffle cards back into the deck. These are just simple examples of functionality that makes sense for a deck of cards, and we could add plenty more.
# Once we have taken the time to build the `Deck`-class, we can easily create decks of cards, and use them, without having to think too much about the underlying details of the implementation. Implementing a class therefore represents adding a new layer of abstraction to our code.
# +
deck = Deck()
deck.shuffle()
hand = deck.draw(5)
print(hand)
# -
# ### The use of `self`
#
# In our class examples so far, we have simply used the `self`-syntax without explaining it. It is a normal point of confusion for those learning Python, and even those who learn how to define the classes and method using `self` might not really understand *why* it works like it does.
#
# When we call a method on a given instance of a class, like for example
# ```
# deck.shuffle()
# ```
# This is interpreted behind the scenes by Python as
# ```
# Deck.shuffle(deck)
# ```
# And so the method `Deck.shuffle`, even though it is written inside the class, actually just acts like a stand-alone function, the object is simply passed in and manipulated. And so when we define the function we call the first input-argument `self`, because this is the *object itself* (in this case `self = deck`). Note that you can, technically, call this argument something other than `self`, it is just a function argument, like any other. However, it is considered good code style to always use `self` and it will be confusing for others if you use something else. (Most langagues use either "self" or "this".)
# ### Interfaces
#
# In our deck of cards example, we have specific methods the user can use to interact with the deck of cards, and we can call this the interface of the class. To put it simply, the interface is what is "visible" or usable from outside the class. If you build a large and complex class, all anyone really needs to interact with it is a good understanding of its interface. In this sense, the interface itself is an abstraction tool. Take a car for example, the interface used for driving that gar is the steering wheel, the pedals and the gear stick. However, under the hood there is an engine and lots of complicated machinery.
#
# The act of hiding dirty implementation or technical details under the hood is called *encapsulation*. Encapsulation is useful for keeping code nice and tidy and user friendly. Also, it means that the details under the hood can change without issue, as long as the external interface remains the same. A mechanic can replace the engine in your car, but you will still be able to drive it as before, because the interface is the same.
#
# In some languages, like Java and C++, you can explicity define interfaces that define what a class must contain. These are often described as "contracts", as they state what a given class that follows that interface must contain.
#
# ## Special Methods
#
# In Python, defining classes often involve implementing *special* methods, also known as *magic* methods. These are methods we do not expect to call explicitly. Rather, we expect Python to call them for us as needed, depending on the situation. They are extremely useful to learn how to use, and they lead to elegant, understandable and usable code. Becoming adept at special methods is cruical if you want to make good Pythonic code.
#
# Special methods have specific names, so Python knows what to call. They all use *dunders*, meaning they have to leading and trailing underscores. You have already seen an example of this, the constructor: `__init__`.
# ### The constructor (`__init__`)
#
# You have already seen the most important example of special methods, the constructor, or initialization special method (`__init__`). This function is called every time you define a new instance of a class.
# +
from math import pi
class Sphere:
def __init__(self, radius):
self.radius = radius
self.area = 4*pi*radius**2
self.volume = 4*pi*radius**3/3
football = Sphere(11)
print(football.area)
print(football.volume)
# -
# Once we define a new instance of the class, the `__init__`-method is called behind the scenes. As usual, the first argument is called "self", because this will be the object itself. In addition we send in any additional arugments we need to define an object of the given class. In this example we only use radius, but we could add density, color, and so on. In our constructor we also choose to compute the area and volume of the sphere. This happens automatically when the object is created, so we can imediately read out the computed values from our object.
# ### Printing out instances of custom classes
#
# If you attempt to call `print` on a custom object, you'll find that it doesn't produce output that is super helpful.
print(football)
# Reading the output we get out the type, in this case `__main__.Sphere` (main because we defined it in our main script, and didn't import it), it tells us it is an object, and then it specifies where in memory the object is stored.
#
# To fix this we have to add the string special method (`__str__`). This special method is called when we try to turn our custom object into a string by writing `str(u)`. When calling `print(u)`, Python actually first converts the object to a string behind the scenes before printing it (unlike for example `sys.stdout.write`, where you have to do it manually).
#
# This means: if we implement a `__str__` that returns a nicely formatted string, we can print our object exactly as we want it.
# +
from math import pi
class Sphere:
def __init__(self, radius):
self.radius = radius
self.area = 4*pi*radius**2
self.volume = 4*pi*radius**3/3
def __str__(self):
return(f"Sphere({self.radius})")
football = Sphere(11)
print(football)
# -
# A different special method than the string method is the representation special method (`__repr__`). The main difference is that `__str__` is meant to be human readable, so it works with `print()` and such. This means that `__str__` can be informal, and ambigious, as long as it is somewhat informative. On the other hand we have `__repr__`, which is meant to be unambigious and more formal. It should be a unique representation of the object so that it could be used to recreate that object later. When calling `repr(u)` we should get a string that we for example can write to a file, then later load in and execute that statement to get the object back. So `eval(repr(u))` should give `u`.
#
# Repr is also called when printing out elements inside a list for example, so if we write out our football inside a list, the output will be as before, despite having implemented a str-method. To fix this we would need to add a repr instead, or in addtion to, the str-method.
print([football])
# ## Making callable objects
#
# Another important special method is the *call* method (`__call__`). If a class has an implemented call method, we can call a given object as though it were a function. This is because Python will use the call-method behind the scenes. This is extremely useful when we want to create objects that represent mathematical functions.
#
# Say for example we want to make a class for quadratic functions, which can be written as
# $$f(x) = ax^2 + bx + c.$$
# Here, there a three free parameters, the coefficients $a$, $b$, and $c$. We therefore let the constructor take these in. We then implement the mathematical function itself as the call special method:
# +
class Quadratic:
def __init__(self, a, b, c):
self.coefficients = a, b, c
def __call__(self, x):
a, b, c = self.coefficients
return a*x**2 + b*x + c
f = Quadratic(1, 2, 1)
print(f(4))
# -
# Here, when we assign `f = Quadratic(1, 2, 1)`, we are creating a new object of the class that represents the function $f(x) = x^2 + 2x + 1$. It isn't actually a Python function, but it is *callable*. We can therefore use it as if it were a normal function and call it by writing `f(4)`. When we write this, Python interprets it behind-the-scenes as
# ```Python
# Quadratic.__call__(f, 4)
# ```
#
# We can now easily create quadratic functions and use them as normal functions:
# +
import numpy as np
import matplotlib.pyplot as plt
f = Quadratic(1, 2, 1)
g = Quadratic(-1, 4, 4)
h = Quadratic(0, -2, 3)
x = np.linspace(-4, 4, 101)
plt.plot(x, f(x))
plt.plot(x, g(x))
plt.plot(x, h(x))
plt.show()
# -
# There are several advantages to implementing these quadratic functions as actual Quadratic-objects, rather than as normal Python functions. For one, they now have a custom type, which we can check using `isinstance(f, Quadratic)`. Thus, other parts of our code *knows* that the function is a quadratic function. We can also extend the class adding plenty of useful functionality. We could for example add functionality for adding or subtracting functions, producing new Quadratic-objects. This cannot be done with normal functions. Or we could add a method for returning the derivative. If you do this week's exercises, you will get the chance to implement all of these for a general degree polynomial.
# The fact that we can use callable objects as if they were functions is very useful in Python, and it is an example of [*polymorphism*](https://en.wikipedia.org/wiki/Polymorphism_(computer_science)). Polymorphism is another of the pillars of OOP, but the concept is a bit tricky to define or understand. The term itself comes from greek and means "many forms". It is a means of generalizing code, in that we can write different code that behaves the same under given circumstances, and thus we can use those objects as long as they have a given property we need. In this case, we can create objects that acts and feels like functions, and we can use them for any purpose were we need a Python function.
#
# This is an example of Python's [*duck typing*](https://en.wikipedia.org/wiki/Duck_typing). This term comes from the saying: *"If it walks like a duck and it quacks like a duck"*. What we mean by this is that what *type* a given object is isn't that important, what is important is it's behavior. In Python we therefore shouldn't check wether a given input is a Python function, instead we should check wether it is *callable*. This can be done with the built-in function:
# ```Python
# callable(f)
# ```
#
# #### Example: The derivative class
#
# Let's say we are given a function `newton(f, dfdx, x0)` that implements Newton's method. We want to use this function to find the roots of a function $f(x)$, however, we don't know the derivative $f'(x)$, that must also be sent in and we cannot find it analytically. Now, we know how to derivate the function numerically, but we must send in a *callable* function as an argument, we cannot send in an array of values of the derivative.
#
# We can circumvent this by creating a `Derivative` class that creates a callable object of `f`:
class Derivative:
def __init__(self, f, h=1e-6):
self.f = f
self.h = h
def __call__(self, x):
h = self.h
return (self.f(x+h) - self.f(x-h))/(2*h)
# And using this class, we can implement the derivative as `dfdx = Derivative(f)`. The Newton's method function is now happy, as it has an object it can call to gain a specific value for any $x$. As an example, and test, let us check a trig function:
# +
x = 0.8281
f = np.sin
dfdx = Derivative(f)
print(dfdx(x))
print(np.cos(x))
# -
# ### More Special Methods
#
# We have shown just some special methods, and we will look at more examples in the next lecture. For a more comprehensive list and examples, see for example [this site](http://www.diveintopython3.net/special-method-names.html).
# ## Properties
#
# Earlier, we had an example for a class representing a sphere, let us repeat it here:
class Sphere:
def __init__(self, radius):
self.radius = radius
self.area = 4*pi*radius**2
self.volume = 4*pi*radius**3/3
# While this works, it might not be that good in practice. For one thing, there is nothing to prevent us from going in to change the one of these fields, without changing the others.
# +
football = Sphere(11)
print(football.area)
football.radius = 8
print(football.area)
# -
# When we change the radius, the area doesn't change! A normal user might expect that they can change the radius and that this should work, and so our code is not that user friendly, and definitely not foolproof.
#
#
#
# One work around we might think of is simply to make the area and the volume into methods to be called, instead of data fields:
class Sphere:
def __init__(self, radius):
self.radius = radius
def area(self):
return 4*pi*self.radius**2
def volume(self):
return 4*pi*self.radius**3/3
# +
football = Sphere(11)
print(football.area())
football.radius = 8
print(football.area())
# -
# Now the code works as expected. However, it might be confusing to the user that the `radius` is a data field, while `area` and `volume` as methods, and thus need to be called. Just writing `football.area` would mean the *method* itself, and not the result of the calculation. This confusion could lead to some weird behavior that might be tricky to catch.
#
# One way to fix this is to implement the `area` and `volume` as properties in Python. To do this we use the *decorator* `@property`. A decorater is something you place outside a function/method definition and it changes the functionality of that function in some way. If we add the decorator to these methods, then we don't have to call them, but can treat them as normal data fields:
# +
class Sphere:
def __init__(self, radius):
self.radius = radius
@property
def area(self):
return 4*pi*self.radius**2
@property
def volume(self):
return 4*pi*self.radius**3/3
football = Sphere(11)
print(football.area)
print(type(football.area))
# -
# Note that we can access `football.area` without calling the method, we simply treat it as a data.field. Checking the type confirms that it does indeed behave as a `float`!
# Using the property decorator allows us to implement a method that compiles or computes the data behind the scenes in some way, but allows the user to treat that property as a data field on the outside. We can now change the radius, and the `area` and `volume` properties will also change.
#
# However, our code still is a bit weird, as we can only change the radius, and the other two follows, we cannot for specify the area or volume, and get the radius to change automatically. If you were to try to change the area or volume property directly:
# ```Python
# football.area = 1200
# ```
# You would get the following error:
# ```
# AttributeError: can't set attribute
# ```
# However, we *can* define this behavior into our class by defining a *setter*-method for our property. This is also done using a decorator:
# +
from math import pi, sqrt
class Sphere:
def __init__(self, radius):
self.radius = radius
@property
def area(self):
return 4*pi*self.radius**2
@area.setter
def area(self, area):
if area < 0:
raise ValueError("Area cannot be negative")
self.radius = sqrt(area/(4*pi))
# -
# We can now change the radius data field, and the area will change automatically, or we can set the area, and the radius will change automatically (we skipped the volume method now, but we could add this as well). This is very useful if we want to have some control overt the value that is set. For example, it should not be allowed to set a negative area.
# +
football = Sphere(11)
print(f"{'Radius':>10} | {'Area':<10}")
print(f"{football.radius:>10.1f} | {football.area:<10.1f}")
football.radius = 10
print(f"{football.radius:>10.1f} | {football.area:<10.1f}")
football.area = 1200
print(f"{football.radius:>10.1f} | {football.area:<10.1f}")
# -
# And we we try to set a negative error. We will get an error
football.area = -1
# ### Avoiding repeated computations
#
# Our sphere class now works quite well and is user friendly. They can access the radius, surface area and volume as floats, and can set them directly and the others change as a result. However, this implementation might not be that efficient, as every time the user accesses `.area` or `.volume`, the actual values are computed from scratch. For such a simple computation, this isn't a big deal, but say this class was more complicated and involved a computation taking a minute to complete. It would be annoying to repeat this computation again and again. To get around this, we would store the computed values as *internal* data in the class, that the user should not interact with from outside the class. These are designated by a leading underscore in Python
class Sphere:
def __init__(self, radius):
self._radius = radius
self._area = 4*pi*radius**2
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, r):
if r < 0:
raise ValueError("Radius cannot be negative")
self._radius = r
self._area = 4*pi*self.radius**2
@property
def area(self):
return self._area
@area.setter
def area(self, area):
if area < 0:
raise ValueError("Area cannot be negative")
self._radius = sqrt(area/(4*pi))
self._area = area
# Now, there is only a computation carried out every time any variable changes. This is more efficient in terms of computations, but less in terms of memory, as we store more data in the class.
# ### Private and Public variables
#
# In our final example, the user has the properties `football.radius` and `football.area` they can interact with, while the class itself stores the internal data in the fields `_radius` and `_area`. We call the first two *public* properties or variables. Here, public means that they are accessible from outside the class. The latter two variables are *private*, meaning they should only be accessed from inside the class and are not meant to be used from outside.
#
# Note that we give the private variables leading underscores in their name, which indicates they are private. In Python, there is no way to enforce private variables however, and the leading underscore is just a convention. Thus the user can go inn and change these directly: `football._radius = 9`, however, this is breaking the convention that one should change a private variable, and if this breaks the object, it is the users fault. Other languages, such as Java and C++ however, *do* enforce private and public variables. If you attempt to access a private variable from outside, an error is thrown.
#
|
book/docs/lectures/python/intro_to_oop.ipynb
|