markdown stringlengths 0 1.02M | code stringlengths 0 832k | output stringlengths 0 1.02M | license stringlengths 3 36 | path stringlengths 6 265 | repo_name stringlengths 6 127 |
|---|---|---|---|---|---|
Сами вектора весов не совпали, но значения оптимизируемой функции близки, так что будем считать, что все ок. Изучаем скорость сходимости для $\lambda = 0.001$: | orac = make_oracle('breast-cancer_scale.txt', penalty='l1', reg=0.001)
point = optimizer(orac, w0)
errs = optimizer.errs
title = 'lambda = 0.001'
convergence_plot(optimizer.times, errs, 'вермя работы, с', title)
convergence_plot(optimizer.orac_calls, errs, 'кол-во вызовов оракула', title)
convergence_plot(list(range(1, optimizer.n_iter + 1)), errs, 'кол-во итераций', title) | _____no_output_____ | Apache-2.0 | HW_exam/.ipynb_checkpoints/Exam_Prazdnichnykh-checkpoint.ipynb | AntonPrazdnichnykh/HSE.optimization |
Кажется, что скорость сходимости опять линейная Изучаем зависимость скорости сходимости и количества ненулевых компонент в решении от $\lambda$ | lambdas = [10**(-i) for i in range(8, 0, -1)]
non_zeros = []
for reg in lambdas:
orac = make_oracle('breast-cancer_scale.txt', penalty='l1', reg=reg)
point = optimizer(orac, w0)
convergence_plot(list(range(1, optimizer.n_iter + 1)), optimizer.errs, 'кол-во итераций',
f"lambda = {reg}")
non_zeros.append(len(np.nonzero(point)[0]))
plot(lambdas, non_zeros, '# nonzero components') | _____no_output_____ | Apache-2.0 | HW_exam/.ipynb_checkpoints/Exam_Prazdnichnykh-checkpoint.ipynb | AntonPrazdnichnykh/HSE.optimization |
Делаем те же выводы Построим напоследок грфики для значений оптимизируемой функции и критерия остановки (ещё разок) в зависимости от итерации ($\lambda = 0.001$) | orac = make_oracle('breast-cancer_scale.txt', penalty='l1', reg=0.001)
point = optimizer(orac, w0)
title = 'lambda = 0.001'
value_plot(list(range(1, optimizer.n_iter + 1)), optimizer.values, 'кол-во итераций', title)
convergence_plot(list(range(1, optimizer.n_iter + 1)), optimizer.errs, 'кол-во итераций', title) | _____no_output_____ | Apache-2.0 | HW_exam/.ipynb_checkpoints/Exam_Prazdnichnykh-checkpoint.ipynb | AntonPrazdnichnykh/HSE.optimization |
Implementing BERT with SNGP | !pip install tensorflow_text==2.7.3
!pip install -U tf-models-official==2.7.0
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import sklearn.metrics
import sklearn.calibration
import tensorflow_hub as hub
import tensorflow_datasets as tfds
import numpy as np
import tensorflow as tf
import pandas as pd
import json
import official.nlp.modeling.layers as layers
import official.nlp.optimization as optimization | _____no_output_____ | MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Implement a standard BERT classifier following which classifies text | gpus = tf.config.list_physical_devices('GPU')
gpus
# Standard BERT model
PREPROCESS_HANDLE = 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'
MODEL_HANDLE = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3'
class BertClassifier(tf.keras.Model):
def __init__(self,
num_classes=150, inner_dim=768, dropout_rate=0.1,
**classifier_kwargs):
super().__init__()
self.classifier_kwargs = classifier_kwargs
# Initiate the BERT encoder components.
self.bert_preprocessor = hub.KerasLayer(PREPROCESS_HANDLE, name='preprocessing')
self.bert_hidden_layer = hub.KerasLayer(MODEL_HANDLE, trainable=True, name='bert_encoder')
# Defines the encoder and classification layers.
self.bert_encoder = self.make_bert_encoder()
self.classifier = self.make_classification_head(num_classes, inner_dim, dropout_rate)
def make_bert_encoder(self):
text_inputs = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
encoder_inputs = self.bert_preprocessor(text_inputs)
encoder_outputs = self.bert_hidden_layer(encoder_inputs)
return tf.keras.Model(text_inputs, encoder_outputs)
def make_classification_head(self, num_classes, inner_dim, dropout_rate):
return layers.ClassificationHead(
num_classes=num_classes,
inner_dim=inner_dim,
dropout_rate=dropout_rate,
**self.classifier_kwargs)
def call(self, inputs, **kwargs):
encoder_outputs = self.bert_encoder(inputs)
classifier_inputs = encoder_outputs['sequence_output']
return self.classifier(classifier_inputs, **kwargs)
| _____no_output_____ | MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Build SNGP model To implement a BERT-SNGP model designed by Google researchers | class ResetCovarianceCallback(tf.keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
"""Resets covariance matrix at the begining of the epoch."""
if epoch > 0:
self.model.classifier.reset_covariance_matrix()
class SNGPBertClassifier(BertClassifier):
def make_classification_head(self, num_classes, inner_dim, dropout_rate):
return layers.GaussianProcessClassificationHead(
num_classes=num_classes,
inner_dim=inner_dim,
dropout_rate=dropout_rate,
gp_cov_momentum=-1,
temperature=30.,
**self.classifier_kwargs)
def fit(self, *args, **kwargs):
"""Adds ResetCovarianceCallback to model callbacks."""
kwargs['callbacks'] = list(kwargs.get('callbacks', []))
kwargs['callbacks'].append(ResetCovarianceCallback())
return super().fit(*args, **kwargs) | _____no_output_____ | MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Load train and test datasets | is_train = pd.read_json('is_train.json')
is_train.columns = ['question','intent']
is_test = pd.read_json('is_test.json')
is_test.columns = ['question','intent']
oos_test = pd.read_json('oos_test.json')
oos_test.columns = ['question','intent']
is_test.shape | _____no_output_____ | MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Make the train and test data. | #Generate codes
is_data = is_train.append(is_test)
is_data.intent = pd.Categorical(is_data.intent)
is_data['code'] = is_data.intent.cat.codes
#in-scope evaluation data
is_test = is_data[15000:19500]
is_test_queries = is_test.question
is_test_labels = is_test.intent
is_test_codes = is_test.code
is_eval_data = (tf.convert_to_tensor(is_test_queries), tf.convert_to_tensor(is_test_codes))
is_train = is_data[0:15000]
is_train_queries = is_train.question
is_train_labels = is_train.intent
is_train_codes = is_train.code
training_ds_queries = tf.convert_to_tensor(is_train_queries)
training_ds_labels = tf.convert_to_tensor(is_train_codes)
is_test.shape | _____no_output_____ | MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Create a OOD evaluation dataset. For this, combine the in-scope test data 'is_test' and out-of-scope 'oos_test' data. Assign label 0 for in-scope and label 1 for out-of-scope data | train_size = len(is_train)
test_size = len(is_test)
oos_size = len(oos_test)
# Combines the in-domain and out-of-domain test examples.
oos_queries= tf.concat([is_test['question'], oos_test['question']], axis=0)
oos_labels = tf.constant([0] * test_size + [1] * oos_size)
# Converts into a TF dataset.
oos_eval_dataset = tf.data.Dataset.from_tensor_slices(
{"text": oos_queries, "label": oos_labels}) | _____no_output_____ | MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Train and evaluate | TRAIN_EPOCHS = 4
TRAIN_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 256
#@title
def bert_optimizer(learning_rate,
batch_size=TRAIN_BATCH_SIZE, epochs=TRAIN_EPOCHS,
warmup_rate=0.1):
"""Creates an AdamWeightDecay optimizer with learning rate schedule."""
train_data_size = train_size
steps_per_epoch = int(train_data_size / batch_size)
num_train_steps = steps_per_epoch * epochs
num_warmup_steps = int(warmup_rate * num_train_steps)
# Creates learning schedule.
lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0)
return optimization.AdamWeightDecay(
learning_rate=lr_schedule,
weight_decay_rate=0.01,
epsilon=1e-6,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
optimizer = bert_optimizer(learning_rate=1e-4)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = tf.metrics.SparseCategoricalAccuracy()
fit_configs = dict(batch_size=TRAIN_BATCH_SIZE,
epochs=TRAIN_EPOCHS,
validation_batch_size=EVAL_BATCH_SIZE,
validation_data=is_eval_data) | _____no_output_____ | MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Model 1 - Batch size of 32 & 3 epochs | sngp_model = SNGPBertClassifier()
sngp_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
sngp_model.fit(training_ds_queries, training_ds_labels, **fit_configs) | Epoch 1/2
938/938 [==============================] - 481s 494ms/step - loss: 0.8704 - sparse_categorical_accuracy: 0.8241 - val_loss: 0.2888 - val_sparse_categorical_accuracy: 0.9473
Epoch 2/2
938/938 [==============================] - 464s 495ms/step - loss: 0.0647 - sparse_categorical_accuracy: 0.9853 - val_loss: 0.1979 - val_sparse_categorical_accuracy: 0.9598
| MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Model 2 - Batch size of 16 & 2 epochs | sngp_model2 = SNGPBertClassifier()
sngp_model2.compile(optimizer=optimizer, loss=loss, metrics=metrics)
sngp_model2.fit(training_ds_queries, training_ds_labels, **fit_configs) | Epoch 1/3
938/938 [==============================] - 480s 495ms/step - loss: 0.9506 - sparse_categorical_accuracy: 0.8029 - val_loss: 0.3883 - val_sparse_categorical_accuracy: 0.9376
Epoch 2/3
938/938 [==============================] - 462s 493ms/step - loss: 0.0989 - sparse_categorical_accuracy: 0.9769 - val_loss: 0.2342 - val_sparse_categorical_accuracy: 0.9522
Epoch 3/3
938/938 [==============================] - 462s 493ms/step - loss: 0.0272 - sparse_categorical_accuracy: 0.9939 - val_loss: 0.2013 - val_sparse_categorical_accuracy: 0.9598
| MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Model 3 - Batch size of 16 & 4 epochs | sngp_model3 = SNGPBertClassifier()
sngp_model3.compile(optimizer=optimizer, loss=loss, metrics=metrics)
sngp_model3.fit(training_ds_queries, training_ds_labels, **fit_configs) | Epoch 1/4
938/938 [==============================] - 477s 493ms/step - loss: 0.9459 - sparse_categorical_accuracy: 0.8066 - val_loss: 0.3804 - val_sparse_categorical_accuracy: 0.9393
Epoch 2/4
938/938 [==============================] - 465s 496ms/step - loss: 0.1192 - sparse_categorical_accuracy: 0.9730 - val_loss: 0.2526 - val_sparse_categorical_accuracy: 0.9511
Epoch 3/4
938/938 [==============================] - 466s 497ms/step - loss: 0.0372 - sparse_categorical_accuracy: 0.9917 - val_loss: 0.2169 - val_sparse_categorical_accuracy: 0.9564
Epoch 4/4
938/938 [==============================] - 465s 496ms/step - loss: 0.0135 - sparse_categorical_accuracy: 0.9974 - val_loss: 0.1992 - val_sparse_categorical_accuracy: 0.9629
| MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Evaluate OOD performance Evaluate how well the model can detect the unfamiliar out-of-domain queries. |
def oos_predict(model, ood_eval_dataset, **model_kwargs):
oos_labels = []
oos_probs = []
ood_eval_dataset = ood_eval_dataset.batch(EVAL_BATCH_SIZE)
for oos_batch in ood_eval_dataset:
oos_text_batch = oos_batch["text"]
oos_label_batch = oos_batch["label"]
pred_logits = model(oos_text_batch, **model_kwargs)
pred_probs_all = tf.nn.softmax(pred_logits, axis=-1)
pred_probs = tf.reduce_max(pred_probs_all, axis=-1)
oos_labels.append(oos_label_batch)
oos_probs.append(pred_probs)
oos_probs = tf.concat(oos_probs, axis=0)
oos_labels = tf.concat(oos_labels, axis=0)
return oos_probs, oos_labels | _____no_output_____ | MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Computes the OOD probabilities as $1 - p(x)$, where $p(x)=softmax(logit(x))$ is the predictive probability. | sngp_probs, ood_labels = oos_predict(sngp_model, oos_eval_dataset)
sngp_probs2, ood_labels2 = oos_predict(sngp_model2, oos_eval_dataset)
sngp_probs3, ood_labels3 = oos_predict(sngp_model3, oos_eval_dataset)
ood_probs = 1 - sngp_probs
ood_probs2 = 1 - sngp_probs2
ood_probs3 = 1 - sngp_probs3
plt.rcParams['figure.dpi'] = 140
DEFAULT_X_RANGE = (-3.5, 3.5)
DEFAULT_Y_RANGE = (-2.5, 2.5)
DEFAULT_CMAP = colors.ListedColormap(["#377eb8", "#ff7f00"])
DEFAULT_NORM = colors.Normalize(vmin=0, vmax=1,)
DEFAULT_N_GRID = 100
ood_uncertainty = ood_probs * (1 - ood_probs)
ood_uncertainty2 = ood_probs2 * (1 - ood_probs2)
ood_uncertainty3 = ood_probs3 * (1 - ood_probs3)
s1 = np.array(sngp_probs.numpy())
print(s1[3000])
s2 = np.array(sngp_probs2.numpy())
print(s2[2000])
s3 = np.array(sngp_probs3.numpy())
print(s3[1000]) | 0.9983203
| MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
Compute the Area under precision-recall curve (AUPRC) for OOD probability v.s. OOD detection accuracy. | precision, recall, _ = sklearn.metrics.precision_recall_curve(ood_labels, ood_probs)
precision2, recall2, _ = sklearn.metrics.precision_recall_curve(ood_labels2, ood_probs2)
precision3, recall3, _ = sklearn.metrics.precision_recall_curve(ood_labels3, ood_probs3)
print((precision3)
print(recall3) | _____no_output_____ | MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
[0.23380874 0.23362956 0.23368421 ... 1. 1. 1. ][1. 0.999 0.999 ... 0.002 0.001 0. ] | sklearn.metrics.recall_score(oos_labels, ood_labels3, average='weighted')
sklearn.metrics.precision_score(oos_labels, ood_labels3, average='weighted')
auprc = sklearn.metrics.auc(recall, precision)
print(f'SNGP AUPRC: {auprc:.4f}')
auprc2 = sklearn.metrics.auc(recall2, precision2)
print(f'SNGP AUPRC 2: {auprc2:.4f}')
auprc3 = sklearn.metrics.auc(recall3, precision3)
print(f'SNGP AUPRC 3: {auprc3:.4f}')
prob_true, prob_pred = sklearn.calibration.calibration_curve(
ood_labels, ood_probs, n_bins=10, strategy='quantile')
prob_true2, prob_pred2 = sklearn.calibration.calibration_curve(
ood_labels2, ood_probs2, n_bins=10, strategy='quantile')
prob_true3, prob_pred3 = sklearn.calibration.calibration_curve(
ood_labels3, ood_probs3, n_bins=10, strategy='quantile')
plt.plot(prob_pred, prob_true)
plt.plot([0., 1.], [0., 1.], c='k', linestyle="--")
plt.xlabel('Predictive Probability')
plt.ylabel('Predictive Accuracy')
plt.title('Calibration Plots, SNGP')
plt.show()
plt.plot(prob_pred2, prob_true2)
plt.plot([0., 1.], [0., 1.], c='k', linestyle="--")
plt.xlabel('Predictive Probability')
plt.ylabel('Predictive Accuracy')
plt.title('Calibration Plots, SNGP')
plt.show()
plt.plot(prob_pred3, prob_true3)
plt.plot([0., 1.], [0., 1.], c='k', linestyle="--")
plt.xlabel('Predictive Probability')
plt.ylabel('Predictive Accuracy')
plt.title('Calibration Plots, SNGP')
plt.show()
# calculate scores
auc1 = roc_auc_score(oos_labels, ood_probs)
auc2 = roc_auc_score(oos_labels, ood_probs2)
auc3 = roc_auc_score(oos_labels, ood_probs3)
# summarize scores
print('SNGP Model 1: ROC AUC=%.3f' % (auc1))
print('SNGP Model 2: ROC AUC=%.3f' % (auc2))
print('SNGP Model 3: ROC AUC=%.3f' % (auc3))
# calculate roc curves
fpr1, tpr1, _ = roc_curve(oos_labels, ood_probs)
fpr2, tpr2, _ = roc_curve(oos_labels, ood_probs2)
fpr3, tpr3, _ = roc_curve(oos_labels, ood_probs3)
# plot the roc curve for the model
pyplot.plot(fpr1, tpr1, marker='.', label='SNGP Model 1')
pyplot.plot(fpr2, tpr2, marker='*', label='SNGP Model 2')
pyplot.plot(fpr3, tpr3, marker='+', label='SNGP Model 3')
# axis labels
pyplot.xlabel('False Positive Rate (Precision)')
pyplot.ylabel('True Positive Rate (Recall)')
# show the legend
pyplot.legend()
# show the plot
pyplot.show() | SNGP Model 1: ROC AUC=0.972
SNGP Model 2: ROC AUC=0.973
SNGP Model 3: ROC AUC=0.973
| MIT | sngp_with_bert_aws.ipynb | tejashrigadre/Anomaly-detection-for-chat-bots |
T81-558: Applications of Deep Neural Networks**Module 2: Python for Machine Learning*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). Module 2 MaterialMain video lecture:* Part 2.1: Introduction to Pandas [[Video]](https://www.youtube.com/watch?v=bN4UuCBdpZc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_02_1_python_pandas.ipynb)* Part 2.2: Categorical Values [[Video]](https://www.youtube.com/watch?v=4a1odDpG0Ho&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_02_2_pandas_cat.ipynb)* Part 2.3: Grouping, Sorting, and Shuffling in Python Pandas [[Video]](https://www.youtube.com/watch?v=YS4wm5gD8DM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_02_3_pandas_grouping.ipynb)* **Part 2.4: Using Apply and Map in Pandas for Keras** [[Video]](https://www.youtube.com/watch?v=XNCEZ4WaPBY&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_02_4_pandas_functional.ipynb)* Part 2.5: Feature Engineering in Pandas for Deep Learning in Keras [[Video]](https://www.youtube.com/watch?v=BWPTj4_Mi9E&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_02_5_pandas_features.ipynb) Google CoLab InstructionsThe following code ensures that Google CoLab is running the correct version of TensorFlow. | try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False | Note: not using Google CoLab
| Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
Part 2.4: Apply and Map If you've ever worked with Big Data or functional programming languages before, you've likely heard of map/reduce. Map and reduce are two functions that apply a task that you create to a data frame. Pandas supports functional programming techniques that allow you to use functions across en entire data frame. In addition to functions that you write, Pandas also provides several standard functions for use with data frames. Using Map with DataframesThe map function allows you to transform a column by mapping certain values in that column to other values. Consider the Auto MPG data set that contains a field **origin_name** that holds a value between one and three that indicates the geographic origin of each car. We can see how to use the map function to transform this numeric origin into the textual name of each origin.We will begin by loading the Auto MPG data set. | import os
import pandas as pd
import numpy as np
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/auto-mpg.csv",
na_values=['NA', '?'])
pd.set_option('display.max_columns', 7)
pd.set_option('display.max_rows', 5)
display(df) | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
The **map** method in Pandas operates on a single column. You provide **map** with a dictionary of values to transform the target column. The map keys specify what values in the target column should be turned into values specified by those keys. The following code shows how the map function can transform the numeric values of 1, 2, and 3 into the string values of North America, Europe and Asia. | # Apply the map
df['origin_name'] = df['origin'].map(
{1: 'North America', 2: 'Europe', 3: 'Asia'})
# Shuffle the data, so that we hopefully see
# more regions.
df = df.reindex(np.random.permutation(df.index))
# Display
pd.set_option('display.max_columns', 7)
pd.set_option('display.max_rows', 10)
display(df) | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
Using Apply with DataframesThe **apply** function of the data frame can run a function over the entire data frame. You can use either be a traditional named function or a lambda function. Python will execute the provided function against each of the rows or columns in the data frame. The **axis** parameter specifies of the function is run across rows or columns. For axis = 1, rows are used. The following code calculates a series called **efficiency** that is the **displacement** divided by **horsepower**. | efficiency = df.apply(lambda x: x['displacement']/x['horsepower'], axis=1)
display(efficiency[0:10]) | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
You can now insert this series into the data frame, either as a new column or to replace an existing column. The following code inserts this new series into the data frame. | df['efficiency'] = efficiency | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
Feature Engineering with Apply and Map In this section, we will see how to calculate a complex feature using map, apply, and grouping. The data set is the following CSV:* https://www.irs.gov/pub/irs-soi/16zpallagi.csv This URL contains US Government public data for "SOI Tax Stats - Individual Income Tax Statistics." The entry point to the website is here:* https://www.irs.gov/statistics/soi-tax-stats-individual-income-tax-statistics-2016-zip-code-data-soi Documentation describing this data is at the above link.For this feature, we will attempt to estimate the adjusted gross income (AGI) for each of the zip codes. The data file contains many columns; however, you will only use the following:* STATE - The state (e.g., MO)* zipcode - The zipcode (e.g. 63017)* agi_stub - Six different brackets of annual income (1 through 6) * N1 - The number of tax returns for each of the agi_stubsNote, the file will have six rows for each zip code, for each of the agi_stub brackets. You can skip zip codes with 0 or 99999.We will create an output CSV with these columns; however, only one row per zip code. Calculate a weighted average of the income brackets. For example, the following six rows are present for 63017:|zipcode |agi_stub | N1 ||--|--|-- ||63017 |1 | 4710 ||63017 |2 | 2780 ||63017 |3 | 2130 ||63017 |4 | 2010 ||63017 |5 | 5240 ||63017 |6 | 3510 |We must combine these six rows into one. For privacy reasons, AGI's are broken out into 6 buckets. We need to combine the buckets and estimate the actual AGI of a zipcode. To do this, consider the values for N1:* 1 = 1 to 25,000* 2 = 25,000 to 50,000* 3 = 50,000 to 75,000* 4 = 75,000 to 100,000* 5 = 100,000 to 200,000* 6 = 200,000 or moreThe median of each of these ranges is approximately:* 1 = 12,500* 2 = 37,500* 3 = 62,500 * 4 = 87,500* 5 = 112,500* 6 = 212,500Using this you can estimate 63017's average AGI as:```>>> totalCount = 4710 + 2780 + 2130 + 2010 + 5240 + 3510>>> totalAGI = 4710 * 12500 + 2780 * 37500 + 2130 * 62500 + 2010 * 87500 + 5240 * 112500 + 3510 * 212500>>> print(totalAGI / totalCount)88689.89205103042```We begin by reading in the government data. | import pandas as pd
df=pd.read_csv('https://www.irs.gov/pub/irs-soi/16zpallagi.csv') | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
First, we trim all zip codes that are either 0 or 99999. We also select the three fields that we need. | df=df.loc[(df['zipcode']!=0) & (df['zipcode']!=99999),
['STATE','zipcode','agi_stub','N1']]
pd.set_option('display.max_columns', 0)
pd.set_option('display.max_rows', 10)
display(df) | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
We replace all of the **agi_stub** values with the correct median values with the **map** function. | medians = {1:12500,2:37500,3:62500,4:87500,5:112500,6:212500}
df['agi_stub']=df.agi_stub.map(medians)
pd.set_option('display.max_columns', 0)
pd.set_option('display.max_rows', 10)
display(df) | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
Next, we group the data frame by zip code. | groups = df.groupby(by='zipcode') | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
The program applies a lambda is applied across the groups, and then calculates the AGI estimate. | df = pd.DataFrame(groups.apply(
lambda x:sum(x['N1']*x['agi_stub'])/sum(x['N1']))) \
.reset_index()
pd.set_option('display.max_columns', 0)
pd.set_option('display.max_rows', 10)
display(df) | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
We can now rename the new agi_estimate column. | df.columns = ['zipcode','agi_estimate']
pd.set_option('display.max_columns', 0)
pd.set_option('display.max_rows', 10)
display(df) | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
Finally, we check to see that our zip code of 63017 got the correct value. | df[ df['zipcode']==63017 ] | _____no_output_____ | Apache-2.0 | t81_558_class_02_4_pandas_functional.ipynb | AritraJana1810/t81_558_deep_learning |
Charting a path into the data science field This project attempts to shed light on the path or paths to becoming a data science professional in the United States.Data science is a rapidly growing field, and the demand for data scientists is outpacing supply. In the past, most Data Scientist positions went to people with PhDs in Computer Science. I wanted to know if that is changing in light of both the increased job openings and the expanding definition of data science that has come with more companies realizing the wealth of raw data they have available for analysis, and how that can help to grow and refine their businesses. Business Questions1. Do you need a a formal degree?2. What programming language(s) do data science professionals need to know?3. What are the preferred online learning platforms to gain data science knowledge and skills? DataSince 2017, Kaggle ('The world's largest data science community') has annually surveyed its users on demographics, practices, and preferences. This notebook explores the data from Kaggle's 2020 Machine Learning and Data Science survey. A caveat: Kaggle is heavy on Machine Learning and competitions, and while it claims over 8 million users the group may not be representative of the overall data science community. Additionally,survey respondents are self-selected, so we can't extrapolate any findings to the data science community as a whole, but the trends and demographics amongst Kaggle survey takers may still offer insights about data science professionals. The first step is importing the necessary libraries and data. | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import textwrap
%matplotlib inline
from matplotlib.ticker import PercentFormatter
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('./kaggle_survey_2020_responses.csv')
low_memory = False | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
Initial data exploration and cleaningLet's take a look at the survey data. | # Let's look at the first 5 rows of the dataset
df.head() | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
One thing we can see from this: some questions are tied to a single column, with a number of answers possible; these questions only allowed survey respondents to choose one answer from among the options. Other questions take up multiple columns, with each column tied to a specific answer; these were questions that allowed users to choose more than one option as the answer ('select all that apply'). The two types of questions will require different approaches to data preparation. But first, we'll do some cleaning. The top row of data contains the question titles. We'll remove that, as well as the first column of survey completion time values. | # Removing the first column and the first row
df.drop(['Time from Start to Finish (seconds)'], axis=1, inplace=True)
df = df.loc[1:, :]
df.head()
df.shape | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
There are over 20,000 responses, with 354 answer fields. Data preparation and filtering To improve readability of visualizations, we'll aggregate some fields, shorten some labels, and re-order categories. | # Aggregating the nonbinary answers
df.loc[(df.Q2 == 'Prefer not to say'), 'Q2'] = 'Other Response'
df.loc[(df.Q2 == 'Prefer to self-describe'),'Q2'] = 'Other Response'
df.loc[(df.Q2 == 'Nonbinary'), 'Q2'] = 'Other Response'
# Abbreviating country name
df.loc[(df.Q3 == 'United States of America'),'Q3']='USA'
# Shortening education level descriptions
df.loc[(df.Q4 == 'Doctoral degree'),'Q4']='PhD'
df.loc[(df.Q4 == 'Master’s degree'),'Q4']='Master’s'
df.loc[(df.Q4 == 'Bachelor’s degree'),'Q4']='Bachelor’s'
df.loc[(df.Q4 == "Some college/university study without earning a bachelor’s degree"), 'Q4']='Some college/university'
df.loc[(df.Q4 == 'No formal education past high school'), 'Q4']='High school'
df.loc[(df.Q4 == 'I prefer not to answer'), 'Q4']='Prefer not to answer'
# Ordering education levels by reverse typical chronological completion
q4_order = [
'PhD',
'Master’s',
'Professional degree',
'Bachelor’s',
'Some college/university',
'High school',
'Prefer not to answer']
# Putting coding experience answers in order from shortest time to longest
q6_order = [
'I have never written code',
'< 1 years',
'1-2 years',
'3-5 years',
'5-10 years',
'10-20 years',
'20+ years']
df.loc[(df.Q37_Part_9 == 'Cloud-certification programs (direct from AWS, Azure, GCP, or similar)'), 'Q37_Part_9']='Cloud-certification programs'
df.loc[(df.Q37_Part_10 == 'University Courses (resulting in a university degree)'), 'Q37_Part_10']='University Courses resulting in a degree' | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
We're going to focus on the US answers from currently employed Kagglers. | # Filtering for just US responses
us_df = df[df['Q3'] == 'USA']
# Filtering to only include currently employed Kagglers
q5_order = [
'Data Scientist',
'Software Engineer',
'Data Analyst',
'Research Scientist',
'Product/Project Manager',
'Business Analyst',
'Machine Learning Engineer',
'Data Engineer',
'Statistician',
'DBA/Database Engineer',
'Other']
us_df = us_df[us_df['Q5'].isin(q5_order)] | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
We're interested in the demographic questions at the beginning, plus coding experience, coding languages used, and online learning platforms used. | # Filtering to only include specific question columns
us_df = us_df.loc[:, ['Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Q6', 'Q7_Part_1', 'Q7_Part_2','Q7_Part_3','Q7_Part_4','Q7_Part_5',
'Q7_Part_6', 'Q7_Part_7','Q7_Part_8','Q7_Part_9','Q7_Part_10','Q7_Part_11', 'Q7_Part_12', 'Q7_OTHER',
'Q37_Part_1', 'Q37_Part_2', 'Q37_Part_3', 'Q37_Part_4', 'Q37_Part_5', 'Q37_Part_6', 'Q37_Part_7',
'Q37_Part_8', 'Q37_Part_9', 'Q37_Part_10','Q37_Part_11', 'Q37_OTHER']]
us_df.isna().sum() | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
Not much in the way of missing values in the first 6 questions; that changes for the multiple-column questions, as expected, since users only filled in the column when they were choosing that particular option. We'll address that by converting the missing values to zeros in the helper functions. | us_df.shape | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
This will be the data for our analysis -- covering 1680 currently employed Kagglers in the US. Helper functions A few functions to help with data visualizations. The first two plot a barchart with a corresponding list of the counts and percentages for the values; one handles single-column questions and the other handles multiple-column questions. The third and fourth are heatmap functions -- one for single-column questions, and one for multiple-column questions. | def list_and_bar(qnum, q_order, title):
'''
INPUT:
qnum - the y-axis variable, a single-column question
q_order - the order to display responses on the barchart
title - the title of the barchart
OUTPUT:
1. A list of responses to the selected question, in descending order
2. A horizontal barchart showing the values, in sorted order
'''
# creating a dataframe of values to include both raw counts and percentages
val_list = pd.DataFrame()
val_list['Count'] = us_df[qnum].value_counts()
pct = round(val_list * 100/us_df[qnum].count(),2)
val_list['Pct'] = pct
print(val_list)
fig, ax = plt.subplots(1, 1, figsize=(12,6))
ax = us_df[qnum].value_counts()[q_order].plot(kind='barh')
# reversing the order of y axis --
# the horizontal barchart displays values in the reverse order of a regular barchart (i.e., where the barchart might show
# a - b - c left to right, the corresponding horizontal barchart would show c at the top, and a at the bottom)
ax.invert_yaxis()
plt.title(title, fontsize = 14, fontweight = 'bold')
plt.show()
def list_and_bar_mc(mc_df, title):
'''
INPUT:
mc_df - a dataframe consisting of answers to a specific multiple-column question
title - the title of the barchart
OUTPUT:
1. A list of responses to the selected question, in descending order
2. A horizontal barchart showing the values, also in descending order
'''
print(mc_df)
fig, ax = plt.subplots(1, 1, figsize=(12,6))
mc_df['Count'].sort_values().plot(kind='barh')
plt.title(title, fontsize = 14, fontweight = 'bold')
plt.show()
def heatmap(qnum_a, qnum_b, title, order_rows, columns):
'''
INPUT:
qnum_a - the x-axis variable, a single-column question
qnum_b - the y-axis variable, a single-column question
title - the title of the heatmap, describing the variables in the visualization
order_rows - sorted order for the y-axis
columns - sorted order for the x-axis
OUTPUT:
A heatmap showing the correlation between the two chosen variables
'''
vals = us_df[[qnum_a, qnum_b]].groupby(qnum_b)[qnum_a].value_counts().unstack()
# getting the total number of responses for the columns in order to calculate the % of the total
vals_rowsums = pd.DataFrame([vals.sum(axis=0).tolist()], columns=vals.columns, index=['All'])
vals = pd.concat([vals_rowsums, vals], axis=0)
# convert to %
vals = ((vals.T / (vals.sum(axis=1) + 0.001)).T) * 100
order = order_rows
columns = columns
vals = vals.reindex(order).reindex(columns = columns)
fig, ax = plt.subplots(1, 1, figsize=[12,6])
ax = sns.heatmap(ax = ax, data = vals, cmap = 'GnBu', cbar_kws = {'format': '%.0f%%'})
plt.title(title, fontsize = 14, fontweight = 'bold')
ax.set_xlabel('')
ax.set_ylabel('')
plt.show()
def heatmap_mc(qnum, qnum_mc, title, columns, order_rows):
'''
INPUT:
qnum - the y-axis variable, a single-column question
qnum_mc - the x-axis variable, a question with multiple columns of answers
title - the title of the heatmap, describing the variables in the visualization
order_rows - sorted order for the y-axis
columns - a list of column names, representing the multiple-column answer options, ordered
OUTPUT:
1. A heatmap showing the correlation between the two specified variables
2. avg_num - the average number of answer options chosen for the multiple column question
'''
# creating a dataframe with the single-column question
df_qnum = us_df[qnum]
df_qnum = pd.DataFrame(df_qnum)
# creating a dataframe containing all the columns for a given multiple-column question
cols_mc = [col for col in us_df if col.startswith(qnum_mc)]
df_mc = us_df[cols_mc]
df_mc.columns = columns
# converting column values to binary 0 or 1 values (1 if the user chose that answer, 0 if not)
df_mc = df_mc.notnull().astype(int)
# joining the dataframes together
df_join = df_qnum.join(df_mc)
# aggregating counts for each answer option and re-ordering dataframe
df_agg = df_join.groupby([qnum]).agg('sum')
df_agg = df_agg.reindex(order_rows)
df_agg['users'] = df_join.groupby(qnum)[qnum].count()
df_agg = df_agg.div(df_agg.loc[:, 'users'], axis=0)
df_agg.drop(columns='users', inplace=True)
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
ax = sns.heatmap(ax = ax, data = df_agg, cmap = 'GnBu')
cbar = ax.collections[0].colorbar
cbar.ax.yaxis.set_major_formatter(PercentFormatter(1, 0))
plt.title(title, fontsize = 14, fontweight = 'bold')
ax.set_xlabel('')
ax.set_ylabel('')
plt.show()
# finding the average number of answers chosen for the multiple column options, minus tabulations for 'None'
df_temp = df_join
df_temp.drop('None', axis = 1, inplace = True)
rowsums = df_temp.sum(axis = 1)
avg_num = round(rowsums.mean(), 2)
print('Average number of options chosen by survey respondents: ' + str(avg_num) + '.')
| _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
Analysis and visualizations We'll start by looking at the age and gender distribution, just to get an overview of the response community. | plt.figure(figsize=[12,6])
us_ages = us_df['Q1'].value_counts().sort_index()
sns.countplot(data = us_df, x = 'Q1', hue = 'Q2', order = us_ages.index)
plt.title('Age and Gender Distribution') | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
The survey response pool skews heavily male, with most US Kagglers between the ages of 25 and 45. | list_and_bar('Q6', q6_order, 'Years of Coding Experience') | Count Pct
3-5 years 367 22.00
20+ years 349 20.92
5-10 years 334 20.02
10-20 years 288 17.27
1-2 years 171 10.25
< 1 years 104 6.24
I have never written code 55 3.30
| CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
Around 80 percent of those responding have 3 or more years experience coding. 1. Do you need a formal degree to become a data science professional? Let's look at formal education, and how it correlates with job title. | list_and_bar('Q4', q4_order, 'Highest Level of Education Attained')
list_and_bar('Q5', q5_order, 'Current Job Title')
heatmap('Q4', 'Q5', 'Roles by Education Level', q5_order, q4_order) | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
Question 1 analysis With almost 49% of the responses, a Master's degree was by far the most common level of education listed, more than double the next most popular answer. Other notable observations: * Sixty-eight percent of US Kagglers hold a Master's Degree or higher. * Research scientists and statisticians are most likely to hold PhDs, followed by Data Scientists. * Relatively few survey respondents (around 5%) indicate they do not have at least a Bachelor's degree. * Only 23% of those responding hold the title of Data Scientist, but it is nonetheless the title with the highest count. Arguably anyone who is active on Kaggle and who would complete their survey considers themself to be either in, or interested in, the data science field, if not actively working as a Data Scientist. Question 2. What programming language(s) do Data Scientists need to know? Now we'll turn to programming languages used. As this is a "Select all that apply" question, with each language option appearing as a separate column, we need to do some processing to get the data into a format for easier graphing and analysis. | # creating a dataframe of the language options and the number of times each language was selected
languages = pd.DataFrame()
for col in us_df.columns:
if(col.startswith('Q7_')):
language = us_df[col].value_counts()
languages = languages.append({'Language':language.index[0], 'Count':language[0]}, ignore_index=True)
languages = languages.set_index('Language')
languages = languages.sort_values(by = 'Count', ascending = False)
languages_tot = sum(languages.Count)
languages['Pct'] = round((languages['Count'] * 100 / languages_tot), 2)
list_and_bar_mc(languages, 'Programming Languages Used')
heatmap_mc('Q5', 'Q7', 'Language Use by Role', languages.index, q5_order)
heatmap_mc('Q4', 'Q7','Language Use by Education Level', languages.index, q4_order)
heatmap_mc('Q6', 'Q7', 'Language Use by Years Coding', languages.index, q6_order) | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
Question 2 analysis Python was the most widely used language, followed by SQL and R. Python held the top spot across almost all job roles -- only Statisticians listed another language (SQL) higher -- and for all education levels and coding experience. R enjoys widespread popularity across education level and years coding as well; SQL shows a high number of users overall, but they are more concentrated in people holding Master's or PhD degrees, working as Statisticians, Data Scientists and Data Analysts. Kagglers reported using 2-3 languages on a regular basis. 3. What are the preferred online learning platforms to gain data science knowledge and skills? Regarding online learning, Kaggle's survey asked, "On which platforms have you begun or completed data science courses? (Select all that apply)." We'll handle the answers similarly to the language data. | # creating a dataframe of online course providers and the number of times each was selected by users
platforms = pd.DataFrame()
for col in us_df.columns:
if(col.startswith('Q37_')):
platform = us_df[col].value_counts()
platforms = platforms.append({'Platform':platform.index[0], 'Count':platform[0]}, ignore_index=True)
platforms = platforms.set_index('Platform')
platforms = platforms.sort_values(by = 'Count', ascending=False)
platforms_tot = sum(platforms.Count)
platforms['Pct'] = round((platforms['Count'] * 100 / platforms_tot), 2)
list_and_bar_mc(platforms, 'Learning Platforms Used')
heatmap_mc('Q5', 'Q37', 'Learning Platform Use by Role', platforms.index, q5_order)
heatmap_mc('Q4', 'Q37', 'Learning Platform Use by Education Level', platforms.index, q4_order) | _____no_output_____ | CNRI-Python | Charting a path into the data science field.ipynb | khiara/DSND_Kaggle_2020_Survey |
基本程序设计- 一切代码输入,请使用英文输入法 | print('hello word')
print 'hello' | _____no_output_____ | Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415 | radius = 1.0
area = radius * radius * 3.14 # 将后半部分的结果赋值给变量area
# 变量一定要有初始值!!!
# radius: 变量.area: 变量!
# int 类型
print(area) | 3.14
| Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval | radius = input('请输入半径') # input得到的结果是字符串类型
radius = float(radius)
area = radius * radius * 3.14
print('面积为:',area) | 请输入半径10
面积为: 314.0
| Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
- 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒,数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天 | day = eval(input('week'))
plus_day = eval(input('plus'))
| _____no_output_____ | Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
计算表达式和运算优先级 增强型赋值运算 类型转换- float -> int- 四舍五入 round EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法 Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1 | celsius = input('请输入温度')
celsius = float(celsius)
fahrenheit = (9/5) * celsius + 32
print(celsius,'Celsius is',fahrenheit,'Fahrenheit') | 请输入温度43
43.0 Celsius is 109.4 Fahrenheit
| Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
- 2 | radius = input('请输入半径')
length = input('请输入高')
radius = float(radius)
length = float(length)
area = radius * radius * 3.14
volume = area * length
print('The area is',area)
print('The volume is',volume) | 请输入半径5.5
请输入高12
The area is 94.985
The volume is 1139.82
| Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
- 3 | feet = input('请输入英尺')
feet = float(feet)
meter = feet * 0.305
print(feet,'feet is',meter,'meters') | 请输入英尺16.5
16.5 feet is 5.0325 meters
| Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
- 4 | M = input('请输入水量')
initial = input('请输入初始温度')
final = input('请输入最终温度')
M = float(M)
initial = float(initial)
final = float(final)
Q = M * (final - initial) * 4184
print('The energy needed is ',Q) | 请输入水量55.5
请输入初始温度3.5
请输入最终温度10.5
The energy needed is 1625484.0
| Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
- 5 | cha = input('请输入差额')
rate = input('请输入年利率')
cha = float(cha)
rate = float(rate)
interest = cha * (rate/1200)
print(interest) | 请输入差额1000
请输入年利率3.5
2.916666666666667
| Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
- 6 | start = input('请输入初始速度')
end = input('请输入末速度')
time = input('请输入时间')
start = float(start)
end =float(end)
time = float(time)
a = (end - start)/time
print(a) | 请输入初始速度5.5
请输入末速度50.9
请输入时间4.5
10.088888888888889
| Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
- 7 进阶 - 8 进阶 | a,b = eval(input('>>'))
print(a,b)
print(type(a),type(b))
a = eval(input('>>'))
print(a) | >>1,2,3,4,5,6
(1, 2, 3, 4, 5, 6)
| Apache-2.0 | 7.16.ipynb | zhayanqi/mysql |
Part 1: Initailize Plot Agent | plot_agent = ThreeBar(big_traj_folder, data_folder) | _____no_output_____ | MIT | notebooks/sum_backbone_stack_hb_0.ipynb | yizaochen/enmspring |
Part 2: Make/Read DataFrame | makedf = False
if makedf:
plot_agent.ini_b_agent()
plot_agent.ini_s_agent()
plot_agent.ini_h_agent()
plot_agent.make_df_for_all_host()
plot_agent.read_df_for_all_host() | _____no_output_____ | MIT | notebooks/sum_backbone_stack_hb_0.ipynb | yizaochen/enmspring |
Part 2: Bar Plot | figsize = (1.817, 1.487)
hspace = 0
plot_agent.plot_main(figsize, hspace)
svg_out = path.join(drawzone_folder, 'sum_bb_st_hb.svg')
plt.savefig(svg_out, dpi=200)
plt.show()
from enmspring.graphs_bigtraj import BackboneMeanModeAgent
host = 'a_tract_21mer'
interval_time = 500
b_agent = BackboneMeanModeAgent(host, big_traj_folder, interval_time)
b_agent.preprocess_all_small_agents()
b_agent.d_smallagents[(0,500)].laplacian_mat
b_agent.initialize_all_maps()
b_agent.n_window
from enmspring.hb_k import HBResidPlotV1
bigtraj_folder = '/home/ytcdata/bigtraj_fluctmatch'
df_folder = '/home/yizaochen/Documents/dna_2021_drawzone/local_hb'
interval_time = 500
plot_agent = HBResidPlotV1(bigtraj_folder, interval_time, df_folder)
plot_agent.read_mean_std_df()
plot_agent.df_mean
plot_agent.df_std | _____no_output_____ | MIT | notebooks/sum_backbone_stack_hb_0.ipynb | yizaochen/enmspring |
Load & Preprocess Data Cornell Movie Dialogues Corpus | corpus_name = "cornell movie-dialogs corpus"
corpus = os.path.join("data", corpus_name)
def printLines(file, n=10):
with open(file, 'rb') as datafile:
lines = datafile.readlines()
for line in lines[:n]:
print(line)
printLines(os.path.join(corpus, "movie_lines.txt"))
# Splits each line of the file into a dictionary of fields
def loadLines(fileName, fields):
lines = {}
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
lineObj = {}
for i, field in enumerate(fields):
lineObj[field] = values[i]
lines[lineObj['lineID']] = lineObj
return lines
# Groups fields of lines from `loadLines` into conversations based on *movie_conversations.txt*
def loadConversations(fileName, lines, fields):
conversations = []
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
# Convert string to list (convObj["utteranceIDs"] == "['L598485', 'L598486', ...]")
utterance_id_pattern = re.compile('L[0-9]+')
lineIds = utterance_id_pattern.findall(convObj["utteranceIDs"])
# Reassemble lines
convObj["lines"] = []
for lineId in lineIds:
convObj["lines"].append(lines[lineId])
conversations.append(convObj)
return conversations
# Extracts pairs of sentences from conversations
def extractSentencePairs(conversations):
qa_pairs = []
for conversation in conversations:
# Iterate over all the lines of the conversation
for i in range(len(conversation["lines"]) - 1): # We ignore the last line (no answer for it)
inputLine = conversation["lines"][i]["text"].strip()
targetLine = conversation["lines"][i+1]["text"].strip()
# Filter wrong samples (if one of the lists is empty)
if inputLine and targetLine:
qa_pairs.append([inputLine, targetLine])
return qa_pairs
# Define path to new file
datafile = os.path.join(corpus, "formatted_movie_lines.txt")
delimiter = '\t'
# Unescape the delimiter
delimiter = str(codecs.decode(delimiter, "unicode_escape"))
# Initialize lines dict, conversations list, and field ids
lines = {}
conversations = []
MOVIE_LINES_FIELDS = ["lineID", "characterID", "movieID", "character", "text"]
MOVIE_CONVERSATIONS_FIELDS = ["character1ID", "character2ID", "movieID", "utteranceIDs"]
# Load lines and process conversations
print("\nProcessing corpus...")
lines = loadLines(os.path.join(corpus, "movie_lines.txt"), MOVIE_LINES_FIELDS)
print("\nLoading conversations...")
conversations = loadConversations(os.path.join(corpus, "movie_conversations.txt"),
lines, MOVIE_CONVERSATIONS_FIELDS)
# Write new csv file
print("\nWriting newly formatted file...")
with open(datafile, 'w', encoding='utf-8') as outputfile:
writer = csv.writer(outputfile, delimiter=delimiter, lineterminator='\n')
for pair in extractSentencePairs(conversations):
writer.writerow(pair)
# Print a sample of lines
print("\nSample lines from file:")
printLines(datafile)
# Default word tokens
PAD_token = 0 # Used for padding short sentences
SOS_token = 1 # Start-of-sentence token
EOS_token = 2 # End-of-sentence token
class Voc:
def __init__(self, name):
self.name = name
self.trimmed = False
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
self.num_words = 3 # Count SOS, EOS, PAD
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.num_words += 1
else:
self.word2count[word] += 1
# Remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print('keep_words {} / {} = {:.4f}'.format(
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
self.num_words = 3 # Count default tokens
for word in keep_words:
self.addWord(word)
MAX_LENGTH = 10 # Maximum sentence length to consider
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?']+", r" ", s)
s = re.sub(r"\s+", r" ", s).strip()
return s
# Read query/response pairs and return a voc object
def readVocs(datafile, corpus_name):
print("Reading lines...")
# Read the file and split into lines
lines = open(datafile, encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
voc = Voc(corpus_name)
return voc, pairs
# Returns True iff both sentences in a pair 'p' are under the MAX_LENGTH threshold
def filterPair(p):
# Input sequences need to preserve the last word for EOS token
return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH
# Filter pairs using filterPair condition
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
# Using the functions defined above, return a populated voc object and pairs list
def loadPrepareData(corpus, corpus_name, datafile, save_dir):
print("Start preparing training data ...")
voc, pairs = readVocs(datafile, corpus_name)
print("Read {!s} sentence pairs".format(len(pairs)))
pairs = filterPairs(pairs)
print("Trimmed to {!s} sentence pairs".format(len(pairs)))
print("Counting words...")
for pair in pairs:
voc.addSentence(pair[0])
voc.addSentence(pair[1])
print("Counted words:", voc.num_words)
return voc, pairs
# Load/Assemble voc and pairs
save_dir = os.path.join("data", "save")
voc, pairs = loadPrepareData(corpus, corpus_name, datafile, save_dir)
# Print some pairs to validate
print("\npairs:")
for pair in pairs[:10]:
print(pair)
MIN_COUNT = 3 # Minimum word count threshold for trimming
def trimRareWords(voc, pairs, MIN_COUNT):
# Trim words used under the MIN_COUNT from the voc
voc.trim(MIN_COUNT)
# Filter out pairs with trimmed words
keep_pairs = []
for pair in pairs:
input_sentence = pair[0]
output_sentence = pair[1]
keep_input = True
keep_output = True
# Check input sentence
for word in input_sentence.split(' '):
if word not in voc.word2index:
keep_input = False
break
# Check output sentence
for word in output_sentence.split(' '):
if word not in voc.word2index:
keep_output = False
break
# Only keep pairs that do not contain trimmed word(s) in their input or output sentence
if keep_input and keep_output:
keep_pairs.append(pair)
print("Trimmed from {} pairs to {}, {:.4f} of total".format(len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs)))
return keep_pairs
# Trim voc and pairs
pairs = trimRareWords(voc, pairs, MIN_COUNT) | keep_words 8610 / 20279 = 0.4246
Trimmed from 70086 pairs to 57379, 0.8187 of total
| MIT | chatbot.ipynb | Kevinz930/Alexiri-chatbot- |
Prepare Data for Models | def indexesFromSentence(voc, sentence):
return [voc.word2index[word] for word in sentence.split(' ')] + [EOS_token]
def zeroPadding(l, fillvalue=PAD_token):
return list(itertools.zip_longest(*l, fillvalue=fillvalue))
def binaryMatrix(l, value=PAD_token):
m = []
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token == PAD_token:
m[i].append(0)
else:
m[i].append(1)
return m
# Returns padded input sequence tensor and lengths
def inputVar(l, voc):
indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]
lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
padVar = torch.LongTensor(padList)
return padVar, lengths
# Returns padded target sequence tensor, padding mask, and max target length
def outputVar(l, voc):
indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]
max_target_len = max([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
mask = binaryMatrix(padList)
mask = torch.BoolTensor(mask)
padVar = torch.LongTensor(padList)
return padVar, mask, max_target_len
# Returns all items for a given batch of pairs
def batch2TrainData(voc, pair_batch):
pair_batch.sort(key=lambda x: len(x[0].split(" ")), reverse=True)
input_batch, output_batch = [], []
for pair in pair_batch:
input_batch.append(pair[0])
output_batch.append(pair[1])
inp, lengths = inputVar(input_batch, voc)
output, mask, max_target_len = outputVar(output_batch, voc)
return inp, lengths, output, mask, max_target_len
# Example for validation
small_batch_size = 5
batches = batch2TrainData(voc, [random.choice(pairs) for _ in range(small_batch_size)])
input_variable, lengths, target_variable, mask, max_target_len = batches
print("input_variable:", input_variable)
print("lengths:", lengths)
print("target_variable:", target_variable)
print("mask:", mask)
print("max_target_len:", max_target_len) | input_variable: tensor([[ 33, 42, 83, 181, 279],
[ 97, 67, 59, 341, 31],
[ 32, 1089, 735, 33, 10],
[ 10, 260, 112, 32, 2],
[ 563, 33, 16, 15, 0],
[ 46, 121, 15, 2, 0],
[ 82, 1727, 2, 0, 0],
[ 10, 10, 0, 0, 0],
[ 2, 2, 0, 0, 0]])
lengths: tensor([9, 9, 7, 6, 4])
target_variable: tensor([[ 56, 125, 5, 616, 22],
[ 53, 548, 68, 175, 73],
[ 33, 10, 10, 59, 7],
[ 47, 2, 33, 1905, 3516],
[ 15, 0, 32, 10, 4119],
[ 2, 0, 204, 2, 10],
[ 0, 0, 10, 0, 2],
[ 0, 0, 2, 0, 0]])
mask: tensor([[ True, True, True, True, True],
[ True, True, True, True, True],
[ True, True, True, True, True],
[ True, True, True, True, True],
[ True, False, True, True, True],
[ True, False, True, True, True],
[False, False, True, False, True],
[False, False, True, False, False]])
max_target_len: 8
| MIT | chatbot.ipynb | Kevinz930/Alexiri-chatbot- |
Encoder | class EncoderRNN(nn.Module):
def __init__(self, hidden_size, embedding, n_layers=1, dropout=0):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = embedding
# Initialize GRU; the input_size and hidden_size params are both set to 'hidden_size'
# because our input size is a word embedding with number of features == hidden_size
self.gru = nn.GRU(hidden_size, hidden_size, n_layers,
dropout=(0 if n_layers == 1 else dropout), bidirectional=True)
def forward(self, input_seq, input_lengths, hidden=None):
# Convert word indexes to embeddings
embedded = self.embedding(input_seq)
# Pack padded batch of sequences for RNN module
packed = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
# Forward pass through GRU
outputs, hidden = self.gru(packed, hidden)
# Unpack padding
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
# Sum bidirectional GRU outputs
outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:]
# Return output and final hidden state
return outputs, hidden | _____no_output_____ | MIT | chatbot.ipynb | Kevinz930/Alexiri-chatbot- |
Decoder | # Luong attention layer
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
if self.method not in ['dot', 'general', 'concat']:
raise ValueError(self.method, "is not an appropriate attention method.")
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.FloatTensor(hidden_size))
def dot_score(self, hidden, encoder_output):
return torch.sum(hidden * encoder_output, dim=2)
def general_score(self, hidden, encoder_output):
energy = self.attn(encoder_output)
return torch.sum(hidden * energy, dim=2)
def concat_score(self, hidden, encoder_output):
energy = self.attn(torch.cat((hidden.expand(encoder_output.size(0), -1, -1), encoder_output), 2)).tanh()
return torch.sum(self.v * energy, dim=2)
def forward(self, hidden, encoder_outputs):
# Calculate the attention weights (energies) based on the given method
if self.method == 'general':
attn_energies = self.general_score(hidden, encoder_outputs)
elif self.method == 'concat':
attn_energies = self.concat_score(hidden, encoder_outputs)
elif self.method == 'dot':
attn_energies = self.dot_score(hidden, encoder_outputs)
# Transpose max_length and batch_size dimensions
attn_energies = attn_energies.t()
# Return the softmax normalized probability scores (with added dimension)
return F.softmax(attn_energies, dim=1).unsqueeze(1)
class LuongAttnDecoderRNN(nn.Module):
def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1):
super(LuongAttnDecoderRNN, self).__init__()
# Keep for reference
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
# Define layers
self.embedding = embedding
self.embedding_dropout = nn.Dropout(dropout)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout))
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.attn = Attn(attn_model, hidden_size)
def forward(self, input_step, last_hidden, encoder_outputs):
# Note: we run this one step (word) at a time
# Get embedding of current input word
embedded = self.embedding(input_step)
embedded = self.embedding_dropout(embedded)
# Forward through unidirectional GRU
rnn_output, hidden = self.gru(embedded, last_hidden)
# Calculate attention weights from the current GRU output
attn_weights = self.attn(rnn_output, encoder_outputs)
# Multiply attention weights to encoder outputs to get new "weighted sum" context vector
context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
# Concatenate weighted context vector and GRU output using Luong eq. 5
rnn_output = rnn_output.squeeze(0)
context = context.squeeze(1)
concat_input = torch.cat((rnn_output, context), 1)
concat_output = torch.tanh(self.concat(concat_input))
# Predict next word using Luong eq. 6
output = self.out(concat_output)
output = F.softmax(output, dim=1)
# Return output and final hidden state
return output, hidden | _____no_output_____ | MIT | chatbot.ipynb | Kevinz930/Alexiri-chatbot- |
Training Procedure | def maskNLLLoss(inp, target, mask):
nTotal = mask.sum()
crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)).squeeze(1))
loss = crossEntropy.masked_select(mask).mean()
loss = loss.to(device)
return loss, nTotal.item()
def train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder, embedding,
encoder_optimizer, decoder_optimizer, batch_size, clip, max_length=MAX_LENGTH):
# Zero gradients
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Set device options
input_variable = input_variable.to(device)
target_variable = target_variable.to(device)
mask = mask.to(device)
# Lengths for rnn packing should always be on the cpu
lengths = lengths.to("cpu")
# Initialize variables
loss = 0
print_losses = []
n_totals = 0
# Forward pass through encoder
encoder_outputs, encoder_hidden = encoder(input_variable, lengths)
# Create initial decoder input (start with SOS tokens for each sentence)
decoder_input = torch.LongTensor([[SOS_token for _ in range(batch_size)]])
decoder_input = decoder_input.to(device)
# Set initial decoder hidden state to the encoder's final hidden state
decoder_hidden = encoder_hidden[:decoder.n_layers]
# Determine if we are using teacher forcing this iteration
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
# Forward batch of sequences through decoder one time step at a time
if use_teacher_forcing:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
# Teacher forcing: next input is current target
decoder_input = target_variable[t].view(1, -1)
# Calculate and accumulate loss
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
else:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
# No teacher forcing: next input is decoder's own current output
_, topi = decoder_output.topk(1)
decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]])
decoder_input = decoder_input.to(device)
# Calculate and accumulate loss
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
# Perform backpropatation
loss.backward()
# Clip gradients: gradients are modified in place
_ = nn.utils.clip_grad_norm_(encoder.parameters(), clip)
_ = nn.utils.clip_grad_norm_(decoder.parameters(), clip)
# Adjust model weights
encoder_optimizer.step()
decoder_optimizer.step()
return sum(print_losses) / n_totals
def trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer, embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size, print_every, save_every, clip, corpus_name, loadFilename):
# Load batches for each iteration
training_batches = [batch2TrainData(voc, [random.choice(pairs) for _ in range(batch_size)])
for _ in range(n_iteration)]
# Initializations
print('Initializing ...')
start_iteration = 1
print_loss = 0
if loadFilename:
start_iteration = checkpoint['iteration'] + 1
# Training loop
print("Training...")
for iteration in range(start_iteration, n_iteration + 1):
training_batch = training_batches[iteration - 1]
# Extract fields from batch
input_variable, lengths, target_variable, mask, max_target_len = training_batch
# Run a training iteration with batch
loss = train(input_variable, lengths, target_variable, mask, max_target_len, encoder,
decoder, embedding, encoder_optimizer, decoder_optimizer, batch_size, clip)
print_loss += loss
# Print progress
if iteration % print_every == 0:
print_loss_avg = print_loss / print_every
print("Iteration: {}; Percent complete: {:.1f}%; Average loss: {:.4f}".format(iteration, iteration / n_iteration * 100, print_loss_avg))
print_loss = 0
# Save checkpoint
if (iteration % save_every == 0):
directory = os.path.join(save_dir, model_name, corpus_name, '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size))
if not os.path.exists(directory):
os.makedirs(directory)
torch.save({
'iteration': iteration,
'en': encoder.state_dict(),
'de': decoder.state_dict(),
'en_opt': encoder_optimizer.state_dict(),
'de_opt': decoder_optimizer.state_dict(),
'loss': loss,
'voc_dict': voc.__dict__,
'embedding': embedding.state_dict()
}, os.path.join(directory, '{}_{}.tar'.format(iteration, 'checkpoint'))) | _____no_output_____ | MIT | chatbot.ipynb | Kevinz930/Alexiri-chatbot- |
Evaluation | class GreedySearchDecoder(nn.Module):
def __init__(self, encoder, decoder, voc):
super(GreedySearchDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.voc = voc
def forward(self, input_seq, input_length, max_length):
# Forward input through encoder model
encoder_outputs, encoder_hidden = self.encoder(input_seq, input_length)
# Prepare encoder's final hidden layer to be first hidden input to the decoder
decoder_hidden = encoder_hidden[:decoder.n_layers]
# Initialize decoder input with SOS_token
decoder_input = torch.ones(1, 1, device=device, dtype=torch.long) * SOS_token
# Initialize tensors to append decoded words to
all_tokens = torch.zeros([0], device=device, dtype=torch.long)
all_scores = torch.zeros([0], device=device)
# Iteratively decode one word token at a time
for _ in range(max_length):
# Forward pass through decoder
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
# Obtain most likely word token and its softmax score
decoder_scores, decoder_input = torch.max(decoder_output, dim=1)
# Print words and scores
# print('all tokens', all_tokens)
print('all tokens words', [voc.index2word[token.item()] for token in all_tokens])
if all_tokens.nelement() > 0 and int(decoder_input[0]) == self.voc.word2index['.']: # and int(all_tokens[-1]) == 2
decoder_scores, decoder_input = torch.kthvalue(decoder_output, 2)
# Record token and score
all_tokens = torch.cat((all_tokens, decoder_input), dim=0)
all_scores = torch.cat((all_scores, decoder_scores), dim=0)
# Prepare current token to be next decoder input (add a dimension)
decoder_input = torch.unsqueeze(decoder_input, 0)
# Return collections of word tokens and scores
return all_tokens, all_scores
def evaluate(encoder, decoder, searcher, voc, sentence, max_length=MAX_LENGTH):
### Format input sentence as a batch
# words -> indexes
indexes_batch = [indexesFromSentence(voc, sentence)]
# Create lengths tensor
lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
# Transpose dimensions of batch to match models' expectations
input_batch = torch.LongTensor(indexes_batch).transpose(0, 1)
# Use appropriate device
input_batch = input_batch.to(device)
lengths = lengths.to("cpu")
# Decode sentence with searcher
tokens, scores = searcher(input_batch, lengths, max_length)
# indexes -> words
decoded_words = [voc.index2word[token.item()] for token in tokens]
return decoded_words
def evaluateInput(encoder, decoder, searcher, voc):
input_sentence = ''
while True:
try:
# Get input sentence
input_sentence = input('> ')
# Check if it is quit case
if input_sentence == 'q' or input_sentence == 'quit': break
# Normalize sentence
input_sentence = normalizeString(input_sentence)
# Evaluate sentence
output_words = evaluate(encoder, decoder, searcher, voc, input_sentence)
# Format and print response sentence
output_words[:] = [x for x in output_words if not (x == 'EOS' or x == 'PAD')] # or x == '.'
print('human:', input_sentence)
print('Bot:', ' '.join(output_words))
except KeyError:
print("Error: Encountered unknown word.") | _____no_output_____ | MIT | chatbot.ipynb | Kevinz930/Alexiri-chatbot- |
Embeddings | # load pre-trained word2Vec model
import gensim.downloader as api
model = api.load('word2vec-google-news-300')
weights_w2v = torch.FloatTensor(model.vectors)
# load pre-trained Gloves 42B-300d model
# model = gensim.models.KeyedVectors.load_word2vec_format('glove.42B.300d.w2vformat.txt')
corpus = os.path.join("glove", "glove.42B.300d.w2vformat.txt")
model = gensim.models.KeyedVectors.load_word2vec_format(corpus)
weights_42b = torch.FloatTensor(model.vectors)
# load pre-trained Gloves 6B-300d model
corpus = os.path.join("glove", "glove.6B.300d.w2vformat.txt")
model = gensim.models.KeyedVectors.load_word2vec_format(corpus)
weights_6b = torch.FloatTensor(model.vectors)
# Configure models
model_name = 'cb_model'
# attn_model = 'dot'
#attn_model = 'general'
attn_model = 'concat'
hidden_size = 300 # 500 -> 300 to fit Gloves model
encoder_n_layers = 3 # 2 -> 3
decoder_n_layers = 3 # 2 -> 3
dropout = 0.1
batch_size = 64
# Set checkpoint to load from; set to None if starting from scratch
loadFilename = None
checkpoint_iter = 5000
# loadFilename = os.path.join(save_dir, model_name, corpus_name,
# '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size),
# '{}_checkpoint.tar'.format(checkpoint_iter))
# Load model if a loadFilename is provided
if loadFilename:
# If loading on same machine the model was trained on
checkpoint = torch.load(loadFilename)
# If loading a model trained on GPU to CPU
#checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))
encoder_sd = checkpoint['en']
decoder_sd = checkpoint['de']
encoder_optimizer_sd = checkpoint['en_opt']
decoder_optimizer_sd = checkpoint['de_opt']
embedding_sd = checkpoint['embedding']
voc.__dict__ = checkpoint['voc_dict']
print('Building encoder and decoder ...')
# Initialize word embeddings
# embedding = nn.Embedding(voc.num_words, hidden_size)
embedding = nn.Embedding.from_pretrained(weights_w2v) # Choose embedding model
if loadFilename:
embedding.load_state_dict(embedding_sd)
# Initialize encoder & decoder models
encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)
decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)
if loadFilename:
encoder.load_state_dict(encoder_sd)
decoder.load_state_dict(decoder_sd)
# Use appropriate device
encoder = encoder.to(device)
decoder = decoder.to(device)
print('Models built and ready to go!') | Building encoder and decoder ...
Models built and ready to go!
| MIT | chatbot.ipynb | Kevinz930/Alexiri-chatbot- |
Run Model Training | # Configure training/optimization
clip = 50.0
teacher_forcing_ratio = 1.0
learning_rate = 0.0001
decoder_learning_ratio = 6.0 # 5.0 -> 4.0
n_iteration = 5000 # 4000 -> 5000
print_every = 1
save_every = 500
# Ensure dropout layers are in train mode
encoder.train()
decoder.train()
# Initialize optimizers
print('Building optimizers ...')
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)
if loadFilename:
encoder_optimizer.load_state_dict(encoder_optimizer_sd)
decoder_optimizer.load_state_dict(decoder_optimizer_sd)
# If you have cuda, configure cuda to call
for state in encoder_optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
for state in decoder_optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
# Run training iterations
print("Starting Training!")
trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer,
embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size,
print_every, save_every, clip, corpus_name, loadFilename) | _____no_output_____ | MIT | chatbot.ipynb | Kevinz930/Alexiri-chatbot- |
Evaluation | # Set dropout layers to eval mode
encoder.eval()
decoder.eval()
# Initialize search module
searcher = GreedySearchDecoder(encoder, decoder, voc)
evaluateInput(encoder, decoder, searcher, voc) | > hey
all tokens words []
all tokens words ['i']
all tokens words ['i', "don't"]
all tokens words ['i', "don't", 'bacon']
all tokens words ['i', "don't", 'bacon', 'sandwich']
all tokens words ['i', "don't", 'bacon', 'sandwich', 'sandwich']
all tokens words ['i', "don't", 'bacon', 'sandwich', 'sandwich', 'bacon']
all tokens words ['i', "don't", 'bacon', 'sandwich', 'sandwich', 'bacon', 'sandwich']
all tokens words ['i', "don't", 'bacon', 'sandwich', 'sandwich', 'bacon', 'sandwich', 'bacon']
all tokens words ['i', "don't", 'bacon', 'sandwich', 'sandwich', 'bacon', 'sandwich', 'bacon', 'sandwich']
human: hey
Bot: i don't bacon sandwich sandwich bacon sandwich bacon sandwich
| MIT | chatbot.ipynb | Kevinz930/Alexiri-chatbot- |
Data UnderstandingIn order to get a better understanding of the busiest times in seattle, we will take a look at the dataset. Access & ExploreFirst, let's read and explore the data | import pandas as pd
import matplotlib.pyplot as plt
#Import Calendar dataset
df_cal=pd.read_csv('calendar.csv', thousands=',')
pd.set_option("display.max_columns", None)
df_cal.head()
#Check if any empty records for the price
df_cal['price'].isnull().value_counts() | _____no_output_____ | CNRI-Python | Seattle Busiest Time.ipynb | ShadyHanafy/Shady |
Data Preparation & AnalysisNow we will prepare the data and make some convertions to prepare the data for visualization Wrangle and Clean | #Convert price to numerical value
df_cal["price"] = df_cal["price"].str.replace('[$,,,]',"").astype(float)
#Impute the missing data of price columns with mean
df_cal['price'].fillna((df_cal['price'].mean()), inplace=True)
#Create new feature represent the month of a year
df_cal['month'] = pd.DatetimeIndex(df_cal['date']).month
df_cal.head() | _____no_output_____ | CNRI-Python | Seattle Busiest Time.ipynb | ShadyHanafy/Shady |
Data VisualizationNow we will visualize our dataset to get the required answer for the main question that which time is the busiest in seattle all over the year and its reflection on price | #Plot the busiest seattle time of the year
busytime=df_cal.groupby(['month']).price.mean()
busytime.plot(kind = 'bar', title="BusyTime")
#Plot the price range accross the year
busytime_price=df_cal.groupby(['month']).mean()['price'].sort_values().dropna()
busytime_price.plot(kind="bar");
plt.title("Price Trend over year"); | _____no_output_____ | CNRI-Python | Seattle Busiest Time.ipynb | ShadyHanafy/Shady |
0.0. IMPORTS | import math
import pandas as pd
import inflection
import numpy as np
import seaborn as sns
import matplotlib as plt
import datetime
from IPython.display import Image | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
0.1. Helper Functions 0.2. Loading Data | # read_csv é um metodo da classe Pandas
# Preciso "unzipar" o arquivo antes?
# low_memory para dizer se ele lê o arquivo todo (False) ou em pedações (True), ele costuma avisar qual o melhor para a situação
df_sales_raw = pd.read_csv("data/train.csv.zip", low_memory=False)
df_store_raw = pd.read_csv("data/store.csv", low_memory=False)
# Merge (arquivo de referencia, arquivo a ser anexado a essa referencia, como quero fazer o merge, coluna que é igual nos 2 datasets para servir de chave )
# Merge também é um método da classe Pandas
df_raw = pd.merge( df_sales_raw, df_store_raw, how="left", on="Store" )
df_sales_raw.head()
df_store_raw.head()
# Plotar uma linha aleatória para ver se deu certo com o método sample
df_raw.sample() | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
1.0. STEP 01 - DATA DESCRIPTION | df1 = df_raw.copy() | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
1.1. Rename Columns Para ganhar velocidade no desenvolvimento! | df_raw.columns
# Estão até bem organizadas, formato candle (ou camble?) case, mas no mundo real pode ser bem diferente! rs
cols_old = ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo',
'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment',
'CompetitionDistance', 'CompetitionOpenSinceMonth',
'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',
'Promo2SinceYear', 'PromoInterval']
snakecase = lambda x: inflection.underscore( x )
cols_new = list( map( snakecase, cols_old) )
# Rename
df1.columns = cols_new
df1.columns | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
1.2. Data Dimensions Saber qual a quantidade de linhas e colunas do dataset | # O shape printa linhas e colunas do dataframe em que primeiro elemento são as rows
# Pq ali são as chaves que ele usa? Isso tem a ver com placeholder?
print( "Number of Rows: {}".format( df1.shape[0] ) )
print( "Number of Cols: {}".format( df1.shape[1] ) ) | Number of Rows: 1017209
Number of Cols: 18
| MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
1.3. Data Types | # Atente que não usamos os parênteses aqui. Isso pq estamos vendo uma propriedade e não usando um método?
# O default do pandas é assumir o que não for int como object. Object é o "caracter" dentro do Pandas
# Atente para o date, precisamos mudar de object para datetime!
df1.dtypes
df1["date"] = pd.to_datetime( df1["date"] )
df1.dtypes | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
1.4. Check NA | # O método isna vai mostrar todas as linhas que tem pelo menos uma coluna com um NA (vazia)
# Mas como eu quero ver a soma disso por coluna, uso o método sum
df1.isna().sum()
# Precisamos tratar esses NAs.
# Existem basicamente 3 maneiras:
# 1. Descartar essas linhas (fácil e rápido; mas jogando dado fora)
# 2. Usando algoritmos de machine learning. Tem alguns metodos de input NA que voce pode, por exemplo, substituir as colunas vazias pelo proprio comportamento da coluna (e.g. mediana, media...)
# 3. Entendendo o negócio para colocar valores nos NAs e recuperar dados. | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
1.5. Fillout NA | df1["competition_distance"].max()
#competition_distance: distance in meters to the nearest competitor store
# Se pensarmos que não ter o dado nessa coluna significa um competidor estar muito longe geograficamente e, portanto, se assumirmos os valores como muito maiores que a distancia máxima encontrada resolveria o problema?
# Quando uso função lambda, posso usar tudo conforme o nome da variável que defino, no caso x
# Função apply vai aplicar essa logica a todas as linhas do dataset
# Aplica função apply só na coluna competition_distance
# O resultado eu quero sobrescrever na minha coluna original
df1["competition_distance"] = df1["competition_distance"].apply( lambda x: 200000.0 if math.isnan( x ) else x)
#competition_open_since_month - gives the approximate year and month of the time the nearest competitor was opened
# PREMISSA: Podemos assumir que se essa coluna for NA eu vou copiar a data de venda (extrair o mês)
# Pq isso? já pensando na etapa a frente de feature engineering... tem algumas variaveis que derivamos do tempo que são muito importantes pra representar o comportamento, uma delas é: quanto tempo faz desde que o evento aconteceu
# A informação de competição proxima é muito importante pois influencia nas vendas! (entao evitamos ao maximo excluir esses dados)
# Primeiro tenho que ver se é NA, uso a classe math. Se isso for verdade, vou pegar a coluna "date" e extrair o mês dela. Se não for verdade, mantem.
# Vou usar função lambda, então posso colocar como x os df1.
# Vou aplicar (função apply) isso ao longo das colunas (axis=1). Não precisamos fazer isso no "competition_distance" pois lá estavamos avaliando apenas 1 coluna. Preciso explicitar para a função apply quando tenho mais de uma coluna
# O resultado disso eu vou sobrescrever a coluna "competition_open_since_month"
df1["competition_open_since_month"] = df1.apply( lambda x: x["date"].month if math.isnan( x["competition_open_since_month"] ) else x["competition_open_since_month"] , axis=1)
#competition_open_since_year - gives the approximate year and month of the time the nearest competitor was opened
# Mesma lógica da coluna acima, só que em anos
df1["competition_open_since_year"] = df1.apply( lambda x: x["date"].year if math.isnan( x["competition_open_since_year"] ) else x["competition_open_since_year"] , axis=1)
#promo2 - Promo2 is a continuing and consecutive promotion for some stores: 0 = store is not participating, 1 = store is participating
#promo2_since_week - describes the year and calendar week when the store started participating in Promo2
# Dados NA nessa coluna querem dizer que a loja não participa da promoção
# Similar ao de cima
df1["promo2_since_week"] = df1.apply( lambda x: x["date"].week if math.isnan( x["promo2_since_week"] ) else x["promo2_since_week"] , axis=1)
#promo2_since_year
df1["promo2_since_year"] = df1.apply( lambda x: x["date"].year if math.isnan( x["promo2_since_year"] ) else x["promo2_since_year"] , axis=1)
#promo_interval - describes the consecutive intervals Promo2 is started, naming the months the promotion is started anew. E.g. "Feb,May,Aug,Nov" means each round starts in February, May, August, November of any given year for that store (meses que a promoção ficou ativa)
# Vamos fazer um split dessa coluna e criar uma lista: se a minha data estiver dentro dessa lista (promoção ativa) eu vou criar uma coluna falando que a promo2 foi ativa
# Cria coluna auxiliar
month_map = {1: "Jan",2: "Feb",3: "Mar",4: "Apr",5: "May",6: "Jun",7: "Jul",8: "Aug",9: "Sep",10: "Oct",11: "Nov",12: "Dec"}
# Se o valor na coluna promo_interval for NA, substituo por 0 (não há promoção ativa). inplace=True pois não quero que ele retorne nenhum valor (faça a modificação direto na coluna)
df1["promo_interval"].fillna(0, inplace=True)
# ??? Pq aqui usamos o map ao inves do apply?
df1["month_map"] = df1["date"].dt.month.map( month_map )
# Se o mês da coluna month_map estiver na promoção, vamos colocar 1, se não estiver, 0
# Temos aluns zeros na coluna "promo_interval" que são lojas que não aderiram a promo2
# 0 if df1["promo_interval"] == 0 else 1 if df1["month_map"] in df1["promo_interval"].split( "," ) else 0
# Como vou usar mais de uma coluna preciso especificar a direção
# apply(lambda x: 0 if x["promo_interval"] == 0 else 1 if df1["month_map"] in x["promo_interval"].split( "," ) else 0, axis=1 )
# Não vou aplicar no dataset todo, vou filtrar pra ficar mais fácil:
# Vou criar uma nova coluna is_promo que vai ser 1 ou 0
df1["is_promo"] = df1[["promo_interval","month_map"]].apply(lambda x: 0 if x["promo_interval"] == 0 else 1 if x["month_map"] in x["promo_interval"].split( "," ) else 0, axis=1 )
df1.isna().sum()
# Agora a coluna "competition_distance" não tem mais NA e o valor maximo é 200000
df1["competition_distance"].max()
# Pegando linhas aleatorias. T para mostrar a transposta
df1.sample(5).T | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
1.6. Change Types | # Importante checar se alguma operação feita na etapa anterior alterou algum dado anterior
# Método dtypes
# competition_open_since_month float64
# competition_open_since_year float64
# promo2_since_week float64
# promo2_since_year float64
# Na verdade essas variaveis acima deveriam ser int (mês e ano)
df1.dtypes
# Método astype nesse caso vai aplicar o int sob essa coluna e vai salvar de volta
df1["competition_open_since_month"] = df1["competition_open_since_month"].astype(int)
df1["competition_open_since_year"] = df1["competition_open_since_year"].astype(int)
df1["promo2_since_week"] = df1["promo2_since_week"].astype(int)
df1["promo2_since_year"] = df1["promo2_since_year"].astype(int)
df1.dtypes | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
1.7. Descriptive Statistics Ganhar conhecimento de negócio e detectar alguns erros | # Central Tendency = mean, median
# Dispersion = std, min, max, range, skew, kurtosis
# Precisamos separar nossas variáveis entre numéricas e categóricas.
# A estatística descritiva funciona para os dois tipos de variáveis, mas a forma com que eu construo a estatistica
# descritiva é diferente.
# Vou separar todas as colunas que são numéricas:
# método select_dtypes e vou passar uma lista de todos os tipos de variaveis que quero selecionar
# datetime64(ns) = dado de tempo (date)
# ??? Qual a diferença do int64 e int32?
num_attributes = df1.select_dtypes( include=["int64","int32","float64"] )
cat_attributes = df1.select_dtypes( exclude=["int64", "float64","int32","datetime64[ns]"] )
num_attributes.sample(2)
cat_attributes.sample(2) | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
1.7.1 Numerical Attributes | # Apply para aplicar uma operação em todas as colunas e transformar num dataframe pra facilitar a visualização
# Transpostas para ter metricas nas colunas e features nas linhas
# central tendency
ct1 = pd.DataFrame( num_attributes.apply ( np.mean) ).T
ct2 = pd.DataFrame( num_attributes.apply ( np.median ) ).T
# dispersion
d1 = pd.DataFrame( num_attributes.apply( np.std )).T
d2 = pd.DataFrame( num_attributes.apply( min )).T
d3 = pd.DataFrame( num_attributes.apply( max )).T
d4 = pd.DataFrame( num_attributes.apply( lambda x: x.max() - x.min() )).T
d5 = pd.DataFrame( num_attributes.apply( lambda x: x.skew() )).T
d6 = pd.DataFrame( num_attributes.apply( lambda x: x.kurtosis() )).T
# Para concatenar todas essas métricas na ordem que quero ver:
# obs: Classe Pandas
# Tem que transpor e resetar o index (Pq???)
m = pd.concat([d2,d3,d4,ct1,ct2,d1,d5,d6]).T.reset_index()
# Vamos nomear as colunas para não aparecer o index padrão
m.columns = ["attributes","min","max","range","mean","median","std","skew","kurtosis"]
m
# Avaliando por exemplo vendas: min 0, max 41k. Media e mediana parecidas, nao tenho um deslocamento da Normal muito grande.
# Skew proxima de 0 - muito proxima de uma normal
# Kurtosis proximo de 1 - nao tem um pico muuuito grande
# Plotando as sales passando as colunas que quero mostrar
# Obs: Você consegue mudar o tamanho do plot usando os parâmetros height e aspect. Um exemplo ficaria assim:
# sns.displot(df1['sales'], height=8, aspect=2)
# Descobri isso procurando a função displot direto na documentação do seaborn: https://seaborn.pydata.org/generated/seaborn.displot.html#seaborn.displot
sns.displot( df1["sales"], height=8, aspect=2)
# Skew alta, alta concentração de valores no começo
# Meus competidores estão muito proximos
sns.displot( df1["competition_distance"]) | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
1.7.2 Categorical Attributes Vai de boxblot! | # ??? No do Meigarom só apareceu os: state_holiday, store_type, assortment, promo_interval e month_map
# Tirei os int32 tambem dos categoricos
cat_attributes.apply( lambda x: x.unique().shape[0] )
# Meigarom prefere o seaborn do que o matplotlib
# sns.boxplot( x= y=, data= )
# x = linha que vai ficar como referencia
# y = o que quero medir (no caso, as vendas)
sns.boxplot( x="state_holiday", y="sales", data=df1 )
# Se plotamos da forma acima não da pra ver nada... (variaveis com ranges mt diferentes)
# Vamos filtrar os dados para plotar:
# ??? Pq esse 0 é uma string e nao um numero? df1["state_holiday"] != "0"
aux1 = df1[(df1["state_holiday"] != "0") & (df1["sales"] > 0)]
# plt.subplot = para plotar um do lado do outro
plt.pyplot.subplot( 1, 3, 1)
sns.boxplot( x="state_holiday", y="sales", data=aux1)
plt.pyplot.subplot( 1, 3, 2)
sns.boxplot( x="store_type", y="sales", data=aux1)
plt.pyplot.subplot( 1, 3, 3)
sns.boxplot( x="assortment", y="sales", data=aux1)
# Boxplot:
# Linha do meio é a mediana: chegou na metade dos valores (em termos de posição), aquele valor é sua mediana
# Limite inferior da barra: 25º quartil (quartil 25) e o limite superior é o quartil 75
# Os ultimos tracinhos são em cima o maximo e embaixo o minimo. Todos os pontos acima do tracinho de maximo são considerados outliers (3x o desvio padrão)
# assortment = mix de produtos | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
2.0. STEP 02 - FEATURE ENGINEERING Para quê fazer a Feature Engineering? Para ter as variáveis DISPONÍVEIS para ESTUDO durante a Análise Exploratória dos Dados. Pra não ter bagunça, crie as variáveis ANTES na análise exploratória!!! Vou usar uma classe Image para colocar a imagem do mapa mental: | df2 = df1.copy() | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
2.1. Hypothesis Mind Map | Image ("img/mind-map-hypothesis.png") | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
2.2. Hypothesis Creation 2.2.1 Store Hypothesis 1. Stores with greater number of employees should sell more. 2. Stores with greater stock size should sell more. 3. Stores with bigger size should sell more. 4. Stores with smaller size should sell less. 5. Stores with greater assortment should sell more. 6. Stores with more competitors nearby should sell less. 7. Stores with competitors for longer should sell more. 2.2.2 Product Hypothesis 1. Stores with more marketing should sell more. 2. Stores that exhibit more products in the showcase sell more. 3. Stores that have lower prices on products should sell more. 4. Stores that have lower prices for longer on products should sell more. 5. Stores with more consecutive sales should sell more. 2.2.3Time-based Hypothesis 1. Stores with more days in holidays should sell less. 2. Stores that open in the first 6 months should sell more. 3. Stores that open on weekends should sell more. 2.3. Final Hypothesis List As hipóteses das quais temos os dados, vão para a lista final de hipóteses. 1. Stores with greater assortment should sell more.2. Stores with more competitors nearby should sell less.3. Stores with competitors for longer should sell more. 4. Stores with active sales for longer should sell more.5. Stores with more days on sale should sell more.7. Stores with more consecutive sales should sell more.8. Stores opened during the Christmas holiday should sell more.9. Stores should sell more over the years.10. Stores should sell more in the second half of the year.11. Stores should sell more after the 10th of each month.12. Stores should sell less on weekends.13. Stores should sell less during school holidays. 2.4. Feature Engineering | # year
df2['year'] = df2['date'].dt.year
# month
df2['month'] = df2['date'].dt.month
# day
df2['day'] = df2['date'].dt.day
# week of year
df2['week_of_year'] = df2['date'].dt.isocalendar().week
# year week
# aqui não usaremos nenhum metodo, e sim mudaremos a formatação da data apenas
# ele fala do strftime no bônus
df2['year_week'] = df2['date'].dt.strftime( '%Y-%W' )
# week of year
# ps: <ipython-input-35-d06c5b7375c4>:9: FutureWarning: Series.dt.weekofyear and Series.dt.week have been deprecated. Please use Series.dt.isocalendar().week instead.
# df2["week_of_year"] = df2["date"].dt.weekofyear
df2["week_of_year"] = df2["date"].dt.isocalendar().week
# ??? Não era pra week_of_year ser igual à semana que aparece na coluna "year_week"? é diferente!
df2.sample(10).T
# competition since
# ja temos a coluna "date" para comparar, mas a informação de competition since está quebrada, temos coluna com year
# e outra com month
# Precisamos juntar as duas em uma data e fazer a substração das duas
# método datetime vem de uma classe também chamada datetime
# datetime.datetime( year=, month=, day= )
# datetime.datetime( year= df2["competition_open_since_year"], month= df2["competition_open_since_month"], day= 1 )
# Vamos usar a função acima para todas as linhas do dataframe vamos usar lambda com variavel x e depois usar o apply
# day = 1 pois nao temos informação sobre o dia
# o apply vai precisar do axis pois estou usando duas colunas diferentes
df2["competition_since"] = df2.apply(lambda x: datetime.datetime( year= x["competition_open_since_year"], month= x["competition_open_since_month"], day= 1), axis=1 )
# com esse comando acima geramos a coluna "competition_since" no formato 2008-09-01 00:00:00.
# Agora precisamos ver a diferença dessa data com a date para saber o tempo de "competition since"
# df2['date'] - df2['competition_since'] )/30
# divido por 30 pq quero manter a glanularidade em dias
# o .days vai extrair os dias desse datetime e salva como inteiro em uma nova coluna 'competition_time_month'
df2['competition_time_month'] = ( ( df2['date'] - df2['competition_since'] )/30 ).apply( lambda x: x.days ).astype( int )
df2.head().T
# promo since, mesma estratégia acima
# Mas para as promoçoes temos uma dificuldade a mais pois temos a coluna promo2 e informação de ano e semana
# não temos de mês
# Vamos fazer um join dos caracteres e depois converter na data
# Mas para juntar as variáveis assim precisamos que as 2 sejam strings (astype converte)
# colocamos o "-" pra ficar no formato ano - semana do ano
# df2['promo_since'] = df2['promo2_since_year'].astype( str ) + '-' + df2['promo2_since_week'].astype( str )
# "promo_since" agora e string, nao é datetime
df2['promo_since'] = df2['promo2_since_year'].astype( str ) + '-' + df2['promo2_since_week'].astype( str )
# Deu uma complicada nesse promo, mas bora lá...
# Truque para converter o que geramos aqui em cima que ficou como string para data: datetime.datetime.strptime( x + '-1', '%Y-%W-%w' ). strptime( o que vai
# mostrar, "formato")
# x pq vamos aplicar para todas as linhas do dataframe
# /7 para ter em semanas
df2['promo_since'] = df2['promo_since'].apply( lambda x: datetime.datetime.strptime( x + '-1', '%Y-%W-%w' ) - datetime.timedelta( days=7 ) )
# Agora que temos duas datas só falta subtrair...
df2['promo_time_week'] = ( ( df2['date'] - df2['promo_since'] )/7 ).apply( lambda x: x.days ).astype( int )
#Obs:
# %W Week number of the year (Monday as the first day of the week).
# All days in a new year preceding the first Monday are considered to be in week 0
# %w Weekday as a decimal number.
# assortment (describes an assortment level: a = basic, b = extra, c = extended)
# Mudar as letras para o que isso representa pra ficar mais facil a leitura:
# Pq else e não elif na estrutura dentro do lambda???
# ??? object type é tipo string?
# Nao preciso usar o axis pq só vou usar a coluna "assortment"
# assortment
df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended' )
# Mesma coisa do assortment no "state holiday"
# state holiday
df2['state_holiday'] = df2['state_holiday'].apply( lambda x: 'public_holiday' if x == 'a' else 'easter_holiday' if x == 'b' else 'christmas' if x == 'c' else 'regular_day' )
df2.head().T | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
3.0. STEP 03 - VARIABLES FILTERING | # Antes de qualquer coisa, ao começar um novo passo, copia o dataset do passo anterior e passa a trabalhar com um novo
df3 = df2.copy()
df3.head() | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
3.1. ROWS FILTERING | # "open" != 0 & "sales" > 0
df3 = df3[(df3["open"] != 0) & (df3["sales"] > 0)] | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
3.2. COLUMNS SELECTION | # Vamos "dropar" as colunas que não queremos
# A "open" está aqui pois após tirarmos as linhas cujos dados da coluna "open" eram 0, só sobraram valores 1, então é uma coluna 'inútil'
cols_drop = ['customers', 'open', 'promo_interval', 'month_map']
# Drop é um metodo da classe Pandas (quais colunas e sentido); axis 0 = linhas, axis 1 = colunas
df3 = df3.drop( cols_drop, axis=1 )
df3.columns | _____no_output_____ | MIT | m03_v01_store_sales_prediction.ipynb | luana-afonso/DataScience-Em-Producao |
Dependencies | import os
import cv2
import shutil
import random
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tensorflow import set_random_seed
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, cohen_kappa_score
from keras import backend as K
from keras.models import Model
from keras.utils import to_categorical
from keras import optimizers, applications
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback, LearningRateScheduler
from keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input
# Set seeds to make the experiment more reproducible.
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
set_random_seed(0)
seed = 0
seed_everything(seed)
%matplotlib inline
sns.set(style="whitegrid")
warnings.filterwarnings("ignore") | Using TensorFlow backend.
| MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Load data | hold_out_set = pd.read_csv('../input/aptos-data-split/hold-out.csv')
X_train = hold_out_set[hold_out_set['set'] == 'train']
X_val = hold_out_set[hold_out_set['set'] == 'validation']
test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')
print('Number of train samples: ', X_train.shape[0])
print('Number of validation samples: ', X_val.shape[0])
print('Number of test samples: ', test.shape[0])
# Preprocecss data
X_train["id_code"] = X_train["id_code"].apply(lambda x: x + ".png")
X_val["id_code"] = X_val["id_code"].apply(lambda x: x + ".png")
test["id_code"] = test["id_code"].apply(lambda x: x + ".png")
X_train['diagnosis'] = X_train['diagnosis'].astype('str')
X_val['diagnosis'] = X_val['diagnosis'].astype('str')
display(X_train.head()) | Number of train samples: 2929
Number of validation samples: 733
Number of test samples: 1928
| MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Model parameters | # Model parameters
N_CLASSES = X_train['diagnosis'].nunique()
BATCH_SIZE = 16
EPOCHS = 40
WARMUP_EPOCHS = 5
LEARNING_RATE = 1e-4
WARMUP_LEARNING_RATE = 1e-3
HEIGHT = 320
WIDTH = 320
CHANNELS = 3
ES_PATIENCE = 5
RLROP_PATIENCE = 3
DECAY_DROP = 0.5
def kappa(y_true, y_pred, n_classes=5):
y_trues = K.cast(K.argmax(y_true), K.floatx())
y_preds = K.cast(K.argmax(y_pred), K.floatx())
n_samples = K.cast(K.shape(y_true)[0], K.floatx())
distance = K.sum(K.abs(y_trues - y_preds))
max_distance = n_classes - 1
kappa_score = 1 - ((distance**2) / (n_samples * (max_distance**2)))
return kappa_score
def step_decay(epoch):
lrate = 30e-5
if epoch > 3:
lrate = 15e-5
if epoch > 7:
lrate = 7.5e-5
if epoch > 11:
lrate = 3e-5
if epoch > 15:
lrate = 1e-5
return lrate
def focal_loss(y_true, y_pred):
gamma = 2.0
epsilon = K.epsilon()
pt = y_pred * y_true + (1-y_pred) * (1-y_true)
pt = K.clip(pt, epsilon, 1-epsilon)
CE = -K.log(pt)
FL = K.pow(1-pt, gamma) * CE
loss = K.sum(FL, axis=1)
return loss | _____no_output_____ | MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Pre-procecess images | train_base_path = '../input/aptos2019-blindness-detection/train_images/'
test_base_path = '../input/aptos2019-blindness-detection/test_images/'
train_dest_path = 'base_dir/train_images/'
validation_dest_path = 'base_dir/validation_images/'
test_dest_path = 'base_dir/test_images/'
# Making sure directories don't exist
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
# Creating train, validation and test directories
os.makedirs(train_dest_path)
os.makedirs(validation_dest_path)
os.makedirs(test_dest_path)
def crop_image(img, tol=7):
if img.ndim ==2:
mask = img>tol
return img[np.ix_(mask.any(1),mask.any(0))]
elif img.ndim==3:
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask = gray_img>tol
check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]
if (check_shape == 0): # image is too dark so that we crop out everything,
return img # return original image
else:
img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))]
img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))]
img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))]
img = np.stack([img1,img2,img3],axis=-1)
return img
def circle_crop(img):
img = crop_image(img)
height, width, depth = img.shape
largest_side = np.max((height, width))
img = cv2.resize(img, (largest_side, largest_side))
height, width, depth = img.shape
x = width//2
y = height//2
r = np.amin((x, y))
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (x, y), int(r), 1, thickness=-1)
img = cv2.bitwise_and(img, img, mask=circle_img)
img = crop_image(img)
return img
def preprocess_image(base_path, save_path, image_id, HEIGHT, WIDTH, sigmaX=10):
image = cv2.imread(base_path + image_id)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = circle_crop(image)
image = cv2.resize(image, (HEIGHT, WIDTH))
image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0), sigmaX), -4 , 128)
cv2.imwrite(save_path + image_id, image)
# Pre-procecss train set
for i, image_id in enumerate(X_train['id_code']):
preprocess_image(train_base_path, train_dest_path, image_id, HEIGHT, WIDTH)
# Pre-procecss validation set
for i, image_id in enumerate(X_val['id_code']):
preprocess_image(train_base_path, validation_dest_path, image_id, HEIGHT, WIDTH)
# Pre-procecss test set
for i, image_id in enumerate(test['id_code']):
preprocess_image(test_base_path, test_dest_path, image_id, HEIGHT, WIDTH) | _____no_output_____ | MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Data generator | train_datagen=ImageDataGenerator(rescale=1./255,
rotation_range=360,
horizontal_flip=True,
vertical_flip=True)
valid_datagen=ImageDataGenerator(rescale=1./255)
train_generator=train_datagen.flow_from_dataframe(
dataframe=X_train,
directory=train_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="categorical",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
valid_generator=valid_datagen.flow_from_dataframe(
dataframe=X_val,
directory=validation_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="categorical",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
test_generator=valid_datagen.flow_from_dataframe(
dataframe=test,
directory=test_dest_path,
x_col="id_code",
batch_size=1,
class_mode=None,
shuffle=False,
target_size=(HEIGHT, WIDTH),
seed=seed) | Found 2929 validated image filenames belonging to 5 classes.
Found 733 validated image filenames belonging to 5 classes.
Found 1928 validated image filenames.
| MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Model | def create_model(input_shape, n_out):
input_tensor = Input(shape=input_shape)
base_model = applications.DenseNet169(weights=None,
include_top=False,
input_tensor=input_tensor)
base_model.load_weights('../input/keras-notop/densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5)(x)
x = Dense(2048, activation='relu')(x)
x = Dropout(0.5)(x)
final_output = Dense(n_out, activation='softmax', name='final_output')(x)
model = Model(input_tensor, final_output)
return model | _____no_output_____ | MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Train top layers | model = create_model(input_shape=(HEIGHT, WIDTH, CHANNELS), n_out=N_CLASSES)
for layer in model.layers:
layer.trainable = False
for i in range(-5, 0):
model.layers[i].trainable = True
class_weights = class_weight.compute_class_weight('balanced', np.unique(X_train['diagnosis'].astype('int').values), X_train['diagnosis'].astype('int').values)
metric_list = ["accuracy", kappa]
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=metric_list)
model.summary()
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history_warmup = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
class_weight=class_weights,
verbose=1).history | Epoch 1/5
183/183 [==============================] - 81s 445ms/step - loss: 1.3357 - acc: 0.5731 - kappa: 0.3848 - val_loss: 1.0849 - val_acc: 0.5083 - val_kappa: -0.2198
Epoch 2/5
183/183 [==============================] - 68s 373ms/step - loss: 0.9705 - acc: 0.6499 - kappa: 0.6185 - val_loss: 1.0448 - val_acc: 0.5760 - val_kappa: 0.1622
Epoch 3/5
183/183 [==============================] - 69s 379ms/step - loss: 0.9260 - acc: 0.6571 - kappa: 0.6398 - val_loss: 1.2030 - val_acc: 0.4881 - val_kappa: -0.4510
Epoch 4/5
183/183 [==============================] - 69s 378ms/step - loss: 0.8650 - acc: 0.6837 - kappa: 0.6950 - val_loss: 1.0301 - val_acc: 0.5425 - val_kappa: 0.0034
Epoch 5/5
183/183 [==============================] - 69s 377ms/step - loss: 0.8863 - acc: 0.6640 - kappa: 0.6651 - val_loss: 0.9225 - val_acc: 0.6444 - val_kappa: 0.5296
| MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Fine-tune the complete model | for layer in model.layers:
layer.trainable = True
# lrstep = LearningRateScheduler(step_decay)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
rlrop = ReduceLROnPlateau(monitor='val_loss', mode='min', patience=RLROP_PATIENCE, factor=DECAY_DROP, min_lr=1e-6, verbose=1)
callback_list = [es, rlrop]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=metric_list)
model.summary()
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callback_list,
class_weight=class_weights,
verbose=1).history | Epoch 1/40
183/183 [==============================] - 139s 757ms/step - loss: 0.6850 - acc: 0.7466 - kappa: 0.8268 - val_loss: 0.5695 - val_acc: 0.7908 - val_kappa: 0.8843
Epoch 2/40
183/183 [==============================] - 87s 478ms/step - loss: 0.5764 - acc: 0.7828 - kappa: 0.8835 - val_loss: 0.5638 - val_acc: 0.7880 - val_kappa: 0.8543
Epoch 3/40
183/183 [==============================] - 89s 487ms/step - loss: 0.5302 - acc: 0.7968 - kappa: 0.8996 - val_loss: 0.4854 - val_acc: 0.8298 - val_kappa: 0.9267
Epoch 4/40
183/183 [==============================] - 90s 493ms/step - loss: 0.4941 - acc: 0.8060 - kappa: 0.9220 - val_loss: 0.5247 - val_acc: 0.8061 - val_kappa: 0.9171
Epoch 5/40
183/183 [==============================] - 89s 487ms/step - loss: 0.4654 - acc: 0.8279 - kappa: 0.9285 - val_loss: 0.4637 - val_acc: 0.8145 - val_kappa: 0.9086
Epoch 6/40
183/183 [==============================] - 90s 491ms/step - loss: 0.4864 - acc: 0.8170 - kappa: 0.9225 - val_loss: 0.4663 - val_acc: 0.8326 - val_kappa: 0.9399
Epoch 7/40
183/183 [==============================] - 90s 493ms/step - loss: 0.4761 - acc: 0.8265 - kappa: 0.9363 - val_loss: 0.6075 - val_acc: 0.8006 - val_kappa: 0.8896
Epoch 8/40
183/183 [==============================] - 90s 494ms/step - loss: 0.4110 - acc: 0.8473 - kappa: 0.9440 - val_loss: 0.5248 - val_acc: 0.8229 - val_kappa: 0.9262
Epoch 00008: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-05.
Epoch 9/40
183/183 [==============================] - 89s 486ms/step - loss: 0.4127 - acc: 0.8477 - kappa: 0.9442 - val_loss: 0.4522 - val_acc: 0.8187 - val_kappa: 0.9232
Epoch 10/40
183/183 [==============================] - 91s 498ms/step - loss: 0.4236 - acc: 0.8498 - kappa: 0.9455 - val_loss: 0.4969 - val_acc: 0.8173 - val_kappa: 0.9069
Epoch 11/40
183/183 [==============================] - 92s 503ms/step - loss: 0.3767 - acc: 0.8562 - kappa: 0.9504 - val_loss: 0.5195 - val_acc: 0.7950 - val_kappa: 0.8966
Epoch 12/40
183/183 [==============================] - 93s 509ms/step - loss: 0.3427 - acc: 0.8696 - kappa: 0.9628 - val_loss: 0.5767 - val_acc: 0.8131 - val_kappa: 0.9236
Epoch 00012: ReduceLROnPlateau reducing learning rate to 2.499999936844688e-05.
Epoch 13/40
183/183 [==============================] - 92s 505ms/step - loss: 0.2877 - acc: 0.8839 - kappa: 0.9645 - val_loss: 0.4223 - val_acc: 0.8424 - val_kappa: 0.9401
Epoch 14/40
183/183 [==============================] - 93s 510ms/step - loss: 0.2880 - acc: 0.8910 - kappa: 0.9704 - val_loss: 0.4906 - val_acc: 0.8103 - val_kappa: 0.9350
Epoch 15/40
183/183 [==============================] - 92s 505ms/step - loss: 0.2696 - acc: 0.9003 - kappa: 0.9719 - val_loss: 0.4484 - val_acc: 0.8271 - val_kappa: 0.9320
Epoch 16/40
183/183 [==============================] - 93s 509ms/step - loss: 0.2698 - acc: 0.8996 - kappa: 0.9774 - val_loss: 0.4540 - val_acc: 0.8229 - val_kappa: 0.9406
Epoch 00016: ReduceLROnPlateau reducing learning rate to 1.249999968422344e-05.
Epoch 17/40
183/183 [==============================] - 92s 504ms/step - loss: 0.2323 - acc: 0.9197 - kappa: 0.9798 - val_loss: 0.5455 - val_acc: 0.7894 - val_kappa: 0.8988
Epoch 18/40
183/183 [==============================] - 94s 515ms/step - loss: 0.2399 - acc: 0.9132 - kappa: 0.9767 - val_loss: 0.4185 - val_acc: 0.8508 - val_kappa: 0.9487
Epoch 19/40
183/183 [==============================] - 93s 507ms/step - loss: 0.2322 - acc: 0.9157 - kappa: 0.9791 - val_loss: 0.5034 - val_acc: 0.8061 - val_kappa: 0.9174
Epoch 20/40
183/183 [==============================] - 93s 508ms/step - loss: 0.2174 - acc: 0.9167 - kappa: 0.9826 - val_loss: 0.4698 - val_acc: 0.8452 - val_kappa: 0.9419
Epoch 21/40
183/183 [==============================] - 93s 507ms/step - loss: 0.2468 - acc: 0.9157 - kappa: 0.9800 - val_loss: 0.5091 - val_acc: 0.8131 - val_kappa: 0.9259
Epoch 00021: ReduceLROnPlateau reducing learning rate to 6.24999984211172e-06.
Epoch 22/40
183/183 [==============================] - 92s 501ms/step - loss: 0.1998 - acc: 0.9276 - kappa: 0.9841 - val_loss: 0.4864 - val_acc: 0.8285 - val_kappa: 0.9446
Epoch 23/40
183/183 [==============================] - 93s 507ms/step - loss: 0.2131 - acc: 0.9232 - kappa: 0.9844 - val_loss: 0.4938 - val_acc: 0.8173 - val_kappa: 0.9299
Restoring model weights from the end of the best epoch
Epoch 00023: early stopping
| MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Model loss graph | sns.set_style("whitegrid")
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex='col', figsize=(20, 18))
ax1.plot(history['loss'], label='Train loss')
ax1.plot(history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history['acc'], label='Train accuracy')
ax2.plot(history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
ax3.plot(history['kappa'], label='Train kappa')
ax3.plot(history['val_kappa'], label='Validation kappa')
ax3.legend(loc='best')
ax3.set_title('Kappa')
plt.xlabel('Epochs')
sns.despine()
plt.show()
# Create empty arays to keep the predictions and labels
lastFullTrainPred = np.empty((0, N_CLASSES))
lastFullTrainLabels = np.empty((0, N_CLASSES))
lastFullValPred = np.empty((0, N_CLASSES))
lastFullValLabels = np.empty((0, N_CLASSES))
# Add train predictions and labels
for i in range(STEP_SIZE_TRAIN+1):
im, lbl = next(train_generator)
scores = model.predict(im, batch_size=train_generator.batch_size)
lastFullTrainPred = np.append(lastFullTrainPred, scores, axis=0)
lastFullTrainLabels = np.append(lastFullTrainLabels, lbl, axis=0)
# Add validation predictions and labels
for i in range(STEP_SIZE_VALID+1):
im, lbl = next(valid_generator)
scores = model.predict(im, batch_size=valid_generator.batch_size)
lastFullValPred = np.append(lastFullValPred, scores, axis=0)
lastFullValLabels = np.append(lastFullValLabels, lbl, axis=0)
lastFullComPred = np.concatenate((lastFullTrainPred, lastFullValPred))
lastFullComLabels = np.concatenate((lastFullTrainLabels, lastFullValLabels))
train_preds = [np.argmax(pred) for pred in lastFullTrainPred]
train_labels = [np.argmax(label) for label in lastFullTrainLabels]
validation_preds = [np.argmax(pred) for pred in lastFullValPred]
validation_labels = [np.argmax(label) for label in lastFullValLabels]
complete_labels = [np.argmax(label) for label in lastFullComLabels] | _____no_output_____ | MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Model Evaluation Confusion Matrix Original thresholds | labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
def plot_confusion_matrix(train, validation, labels=labels):
train_labels, train_preds = train
validation_labels, validation_preds = validation
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
train_cnf_matrix = confusion_matrix(train_labels, train_preds)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation')
plt.show()
plot_confusion_matrix((train_labels, train_preds), (validation_labels, validation_preds)) | _____no_output_____ | MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Quadratic Weighted Kappa | def evaluate_model(train, validation):
train_labels, train_preds = train
validation_labels, validation_preds = validation
print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic'))
print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic'))
print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds+validation_preds, train_labels+validation_labels, weights='quadratic'))
evaluate_model((train_preds, train_labels), (validation_preds, validation_labels)) | Train Cohen Kappa score: 0.962
Validation Cohen Kappa score: 0.900
Complete set Cohen Kappa score: 0.950
| MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Apply model to test set and output predictions | step_size = test_generator.n//test_generator.batch_size
test_generator.reset()
preds = model.predict_generator(test_generator, steps=step_size)
predictions = np.argmax(preds, axis=1)
results = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions})
results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4])
# Cleaning created directories
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path) | _____no_output_____ | MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Predictions class distribution | fig = plt.subplots(sharex='col', figsize=(24, 8.7))
sns.countplot(x="diagnosis", data=results, palette="GnBu_d").set_title('Test')
sns.despine()
plt.show()
results.to_csv('submission.csv', index=False)
display(results.head()) | _____no_output_____ | MIT | Model backlog/DenseNet169/133 - DenseNet169 - Classification - Refactor.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Introduction Now that you've seen the layers a convnet uses to extract features, it's time to put them together and build a network of your own! Simple to Refined In the last three lessons, we saw how convolutional networks perform **feature extraction** through three operations: **filter**, **detect**, and **condense**. A single round of feature extraction can only extract relatively simple features from an image, things like simple lines or contrasts. These are too simple to solve most classification problems. Instead, convnets will repeat this extraction over and over, so that the features become more complex and refined as they travel deeper into the network. Convolutional Blocks It does this by passing them through long chains of **convolutional blocks** which perform this extraction.These convolutional blocks are stacks of `Conv2D` and `MaxPool2D` layers, whose role in feature extraction we learned about in the last few lessons. -->Each block represents a round of extraction, and by composing these blocks the convnet can combine and recombine the features produced, growing them and shaping them to better fit the problem at hand. The deep structure of modern convnets is what allows this sophisticated feature engineering and has been largely responsible for their superior performance. Example - Design a Convnet Let's see how to define a deep convolutional network capable of engineering complex features. In this example, we'll create a Keras `Sequence` model and then train it on our Cars dataset. Step 1 - Load Data This hidden cell loads the data. | #$HIDE_INPUT$
# Imports
import os, warnings
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
# Reproducability
def set_seed(seed=31415):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
set_seed()
# Set Matplotlib defaults
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
plt.rc('image', cmap='magma')
warnings.filterwarnings("ignore") # to clean up output cells
# Load training and validation sets
ds_train_ = image_dataset_from_directory(
'../input/car-or-truck/train',
labels='inferred',
label_mode='binary',
image_size=[128, 128],
interpolation='nearest',
batch_size=64,
shuffle=True,
)
ds_valid_ = image_dataset_from_directory(
'../input/car-or-truck/valid',
labels='inferred',
label_mode='binary',
image_size=[128, 128],
interpolation='nearest',
batch_size=64,
shuffle=False,
)
# Data Pipeline
def convert_to_float(image, label):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
ds_train = (
ds_train_
.map(convert_to_float)
.cache()
.prefetch(buffer_size=AUTOTUNE)
)
ds_valid = (
ds_valid_
.map(convert_to_float)
.cache()
.prefetch(buffer_size=AUTOTUNE)
)
| _____no_output_____ | Apache-2.0 | notebooks/computer_vision/raw/tut5.ipynb | guesswhohaha/learntools |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.