code
stringlengths
2.5k
150k
kind
stringclasses
1 value
#### Copyright 2017 Google LLC. ``` # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 로지스틱 회귀 **학습 목표:** * 이전 실습의 주택 가격 중간값 예측 모델을 이진 분류 모델로 재편한다 * 이진 분류 문제에서 로지스틱 회귀와 선형 회귀의 효과를 비교한다 이전 실습과 동일하게 캘리포니아 주택 데이터 세트를 사용하되, 이번에는 특정 지역의 거주 비용이 높은지 여부를 예측하는 이진 분류 문제로 바꿔 보겠습니다. 또한 기본 특성으로 일단 되돌리겠습니다. ## 이진 분류 문제로 전환 데이터 세트의 타겟은 숫자(연속 값) 특성인 `median_house_value`입니다. 이 연속 값에 임계값을 적용하여 부울 라벨을 만들 수 있습니다. 특정 지역을 나타내는 특성이 주어질 때 거주 비용이 높은 지역인지를 예측하려고 합니다. 데이터 학습 및 평가를 위한 타겟을 준비하기 위해, 분류 임계값을 주택 가격 중앙값에 대한 75번째 백분위수(약 265,000)로 정의하겠습니다. 주택 가격이 임계값보다 높으면 라벨이 `1`로, 그렇지 않으면 라벨이 `0`으로 지정됩니다. ## 설정 아래 셀을 실행하여 데이터를 로드하고 입력 특성 및 타겟을 준비합니다. ``` from __future__ import print_function import math from IPython import display from matplotlib import cm from matplotlib import gridspec from matplotlib import pyplot as plt import numpy as np import pandas as pd from sklearn import metrics import tensorflow as tf from tensorflow.python.data import Dataset tf.logging.set_verbosity(tf.logging.ERROR) pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format california_housing_dataframe = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv", sep=",") california_housing_dataframe = california_housing_dataframe.reindex( np.random.permutation(california_housing_dataframe.index)) ``` 아래 코드가 이전 실습에 비해 다른 점을 확인하세요. `median_house_value`를 타겟으로 사용하는 대신 `median_house_value_is_high`라는 이진 타겟을 새로 만들고 있습니다. ``` def preprocess_features(california_housing_dataframe): """Prepares input features from California housing data set. Args: california_housing_dataframe: A Pandas DataFrame expected to contain data from the California housing data set. Returns: A DataFrame that contains the features to be used for the model, including synthetic features. """ selected_features = california_housing_dataframe[ ["latitude", "longitude", "housing_median_age", "total_rooms", "total_bedrooms", "population", "households", "median_income"]] processed_features = selected_features.copy() # Create a synthetic feature. processed_features["rooms_per_person"] = ( california_housing_dataframe["total_rooms"] / california_housing_dataframe["population"]) return processed_features def preprocess_targets(california_housing_dataframe): """Prepares target features (i.e., labels) from California housing data set. Args: california_housing_dataframe: A Pandas DataFrame expected to contain data from the California housing data set. Returns: A DataFrame that contains the target feature. """ output_targets = pd.DataFrame() # Create a boolean categorical feature representing whether the # median_house_value is above a set threshold. output_targets["median_house_value_is_high"] = ( california_housing_dataframe["median_house_value"] > 265000).astype(float) return output_targets # Choose the first 12000 (out of 17000) examples for training. training_examples = preprocess_features(california_housing_dataframe.head(12000)) training_targets = preprocess_targets(california_housing_dataframe.head(12000)) # Choose the last 5000 (out of 17000) examples for validation. validation_examples = preprocess_features(california_housing_dataframe.tail(5000)) validation_targets = preprocess_targets(california_housing_dataframe.tail(5000)) # Double-check that we've done the right thing. print("Training examples summary:") display.display(training_examples.describe()) print("Validation examples summary:") display.display(validation_examples.describe()) print("Training targets summary:") display.display(training_targets.describe()) print("Validation targets summary:") display.display(validation_targets.describe()) ``` ## 선형 회귀의 성능 측정 로지스틱 회귀가 효과적인 이유를 확인하기 위해, 우선 선형 회귀를 사용하는 단순 모델을 학습시켜 보겠습니다. 이 모델에서는 `{0, 1}` 집합에 속하는 값을 갖는 라벨을 사용하며 `0` 또는 `1`에 최대한 가까운 연속 값을 예측하려고 시도합니다. 또한 출력을 확률로 해석하려고 하므로 `(0, 1)` 범위 내에서 출력되는 것이 이상적입니다. 그런 다음 임계값 `0.5`를 적용하여 라벨을 결정합니다. 아래 셀을 실행하여 [LinearRegressor](https://www.tensorflow.org/api_docs/python/tf/estimator/LinearRegressor)로 선형 회귀 모델을 학습시킵니다. ``` def construct_feature_columns(input_features): """Construct the TensorFlow Feature Columns. Args: input_features: The names of the numerical input features to use. Returns: A set of feature columns """ return set([tf.feature_column.numeric_column(my_feature) for my_feature in input_features]) def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None): """Trains a linear regression model. Args: features: pandas DataFrame of features targets: pandas DataFrame of targets batch_size: Size of batches to be passed to the model shuffle: True or False. Whether to shuffle the data. num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely Returns: Tuple of (features, labels) for next data batch """ # Convert pandas data into a dict of np arrays. features = {key:np.array(value) for key,value in dict(features).items()} # Construct a dataset, and configure batching/repeating. ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit ds = ds.batch(batch_size).repeat(num_epochs) # Shuffle the data, if specified. if shuffle: ds = ds.shuffle(10000) # Return the next batch of data. features, labels = ds.make_one_shot_iterator().get_next() return features, labels def train_linear_regressor_model( learning_rate, steps, batch_size, training_examples, training_targets, validation_examples, validation_targets): """Trains a linear regression model. In addition to training, this function also prints training progress information, as well as a plot of the training and validation loss over time. Args: learning_rate: A `float`, the learning rate. steps: A non-zero `int`, the total number of training steps. A training step consists of a forward and backward pass using a single batch. batch_size: A non-zero `int`, the batch size. training_examples: A `DataFrame` containing one or more columns from `california_housing_dataframe` to use as input features for training. training_targets: A `DataFrame` containing exactly one column from `california_housing_dataframe` to use as target for training. validation_examples: A `DataFrame` containing one or more columns from `california_housing_dataframe` to use as input features for validation. validation_targets: A `DataFrame` containing exactly one column from `california_housing_dataframe` to use as target for validation. Returns: A `LinearRegressor` object trained on the training data. """ periods = 10 steps_per_period = steps / periods # Create a linear regressor object. my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) linear_regressor = tf.estimator.LinearRegressor( feature_columns=construct_feature_columns(training_examples), optimizer=my_optimizer ) # Create input functions. training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value_is_high"], batch_size=batch_size) predict_training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value_is_high"], num_epochs=1, shuffle=False) predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets["median_house_value_is_high"], num_epochs=1, shuffle=False) # Train the model, but do so inside a loop so that we can periodically assess # loss metrics. print("Training model...") print("RMSE (on training data):") training_rmse = [] validation_rmse = [] for period in range (0, periods): # Train the model, starting from the prior state. linear_regressor.train( input_fn=training_input_fn, steps=steps_per_period ) # Take a break and compute predictions. training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn) training_predictions = np.array([item['predictions'][0] for item in training_predictions]) validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn) validation_predictions = np.array([item['predictions'][0] for item in validation_predictions]) # Compute training and validation loss. training_root_mean_squared_error = math.sqrt( metrics.mean_squared_error(training_predictions, training_targets)) validation_root_mean_squared_error = math.sqrt( metrics.mean_squared_error(validation_predictions, validation_targets)) # Occasionally print the current loss. print(" period %02d : %0.2f" % (period, training_root_mean_squared_error)) # Add the loss metrics from this period to our list. training_rmse.append(training_root_mean_squared_error) validation_rmse.append(validation_root_mean_squared_error) print("Model training finished.") # Output a graph of loss metrics over periods. plt.ylabel("RMSE") plt.xlabel("Periods") plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(training_rmse, label="training") plt.plot(validation_rmse, label="validation") plt.legend() return linear_regressor linear_regressor = train_linear_regressor_model( learning_rate=0.000001, steps=200, batch_size=20, training_examples=training_examples, training_targets=training_targets, validation_examples=validation_examples, validation_targets=validation_targets) ``` ## 작업 1: 예측의 LogLoss 계산 가능성 확인 **예측을 조사하여 LogLoss를 계산하는 데 사용될 수 있는지 확인합니다.** `LinearRegressor`는 L2 손실을 사용하므로 출력이 확률로 해석되는 경우 오분류에 효과적으로 페널티를 부여하지 못합니다. 예를 들어 음성 예가 양성으로 분류되는 확률이 0.9인 경우와 0.9999인 경우에는 커다란 차이가 있어야 하지만 L2 손실은 두 경우를 분명하게 구분하지 않습니다. 반면, `LogLoss`는 이러한 "신뢰 오차"에 훨씬 큰 페널티를 부여합니다. `LogLoss`는 다음과 같이 정의됩니다. $$Log Loss = \sum_{(x,y)\in D} -y \cdot log(y_{pred}) - (1 - y) \cdot log(1 - y_{pred})$$ 하지만 우선 예측 값을 가져오는 것이 먼저입니다. `LinearRegressor.predict`를 사용하여 값을 가져올 수 있습니다. 예측과 타겟이 주어지면 `LogLoss`를 계산할 수 있는지 확인해 보세요. ### 해결 방법 해결 방법을 보려면 아래를 클릭하세요. ``` predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets["median_house_value_is_high"], num_epochs=1, shuffle=False) validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn) validation_predictions = np.array([item['predictions'][0] for item in validation_predictions]) _ = plt.hist(validation_predictions) ``` ## 작업 2: 로지스틱 회귀 모델을 학습시키고 검증세트로 LogLoss 계산 로지스틱 회귀를 사용하려면 `LinearRegressor` 대신 [LinearClassifier](https://www.tensorflow.org/api_docs/python/tf/estimator/LinearClassifier)를 사용합니다. 아래 코드를 완성하세요. **NOTE**: `LinearClassifier` 모델에서 `train()` 및 `predict()`를 실행할 때는 반환된 dict의 `"probabilities"` 키를 통해 예측된 실수값 확률에 액세스할 수 있습니다(예: `predictions["probabilities"]`). Sklearn의 [log_loss](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html) 함수를 사용하면 이러한 확률로 LogLoss를 편리하게 계산할 수 있습니다. ``` def train_linear_classifier_model( learning_rate, steps, batch_size, training_examples, training_targets, validation_examples, validation_targets): """Trains a linear classification model. In addition to training, this function also prints training progress information, as well as a plot of the training and validation loss over time. Args: learning_rate: A `float`, the learning rate. steps: A non-zero `int`, the total number of training steps. A training step consists of a forward and backward pass using a single batch. batch_size: A non-zero `int`, the batch size. training_examples: A `DataFrame` containing one or more columns from `california_housing_dataframe` to use as input features for training. training_targets: A `DataFrame` containing exactly one column from `california_housing_dataframe` to use as target for training. validation_examples: A `DataFrame` containing one or more columns from `california_housing_dataframe` to use as input features for validation. validation_targets: A `DataFrame` containing exactly one column from `california_housing_dataframe` to use as target for validation. Returns: A `LinearClassifier` object trained on the training data. """ periods = 10 steps_per_period = steps / periods # Create a linear classifier object. my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) linear_classifier = # YOUR CODE HERE: Construct the linear classifier. # Create input functions. training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value_is_high"], batch_size=batch_size) predict_training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value_is_high"], num_epochs=1, shuffle=False) predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets["median_house_value_is_high"], num_epochs=1, shuffle=False) # Train the model, but do so inside a loop so that we can periodically assess # loss metrics. print("Training model...") print("LogLoss (on training data):") training_log_losses = [] validation_log_losses = [] for period in range (0, periods): # Train the model, starting from the prior state. linear_classifier.train( input_fn=training_input_fn, steps=steps_per_period ) # Take a break and compute predictions. training_probabilities = linear_classifier.predict(input_fn=predict_training_input_fn) training_probabilities = np.array([item['probabilities'] for item in training_probabilities]) validation_probabilities = linear_classifier.predict(input_fn=predict_validation_input_fn) validation_probabilities = np.array([item['probabilities'] for item in validation_probabilities]) training_log_loss = metrics.log_loss(training_targets, training_probabilities) validation_log_loss = metrics.log_loss(validation_targets, validation_probabilities) # Occasionally print the current loss. print(" period %02d : %0.2f" % (period, training_log_loss)) # Add the loss metrics from this period to our list. training_log_losses.append(training_log_loss) validation_log_losses.append(validation_log_loss) print("Model training finished.") # Output a graph of loss metrics over periods. plt.ylabel("LogLoss") plt.xlabel("Periods") plt.title("LogLoss vs. Periods") plt.tight_layout() plt.plot(training_log_losses, label="training") plt.plot(validation_log_losses, label="validation") plt.legend() return linear_classifier linear_classifier = train_linear_classifier_model( learning_rate=0.000005, steps=500, batch_size=20, training_examples=training_examples, training_targets=training_targets, validation_examples=validation_examples, validation_targets=validation_targets) ``` ### 해결 방법 해결 방법을 보려면 아래를 클릭하세요. ``` def train_linear_classifier_model( learning_rate, steps, batch_size, training_examples, training_targets, validation_examples, validation_targets): """Trains a linear classification model. In addition to training, this function also prints training progress information, as well as a plot of the training and validation loss over time. Args: learning_rate: A `float`, the learning rate. steps: A non-zero `int`, the total number of training steps. A training step consists of a forward and backward pass using a single batch. batch_size: A non-zero `int`, the batch size. training_examples: A `DataFrame` containing one or more columns from `california_housing_dataframe` to use as input features for training. training_targets: A `DataFrame` containing exactly one column from `california_housing_dataframe` to use as target for training. validation_examples: A `DataFrame` containing one or more columns from `california_housing_dataframe` to use as input features for validation. validation_targets: A `DataFrame` containing exactly one column from `california_housing_dataframe` to use as target for validation. Returns: A `LinearClassifier` object trained on the training data. """ periods = 10 steps_per_period = steps / periods # Create a linear classifier object. my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) linear_classifier = tf.estimator.LinearClassifier( feature_columns=construct_feature_columns(training_examples), optimizer=my_optimizer ) # Create input functions. training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value_is_high"], batch_size=batch_size) predict_training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value_is_high"], num_epochs=1, shuffle=False) predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets["median_house_value_is_high"], num_epochs=1, shuffle=False) # Train the model, but do so inside a loop so that we can periodically assess # loss metrics. print("Training model...") print("LogLoss (on training data):") training_log_losses = [] validation_log_losses = [] for period in range (0, periods): # Train the model, starting from the prior state. linear_classifier.train( input_fn=training_input_fn, steps=steps_per_period ) # Take a break and compute predictions. training_probabilities = linear_classifier.predict(input_fn=predict_training_input_fn) training_probabilities = np.array([item['probabilities'] for item in training_probabilities]) validation_probabilities = linear_classifier.predict(input_fn=predict_validation_input_fn) validation_probabilities = np.array([item['probabilities'] for item in validation_probabilities]) training_log_loss = metrics.log_loss(training_targets, training_probabilities) validation_log_loss = metrics.log_loss(validation_targets, validation_probabilities) # Occasionally print the current loss. print(" period %02d : %0.2f" % (period, training_log_loss)) # Add the loss metrics from this period to our list. training_log_losses.append(training_log_loss) validation_log_losses.append(validation_log_loss) print("Model training finished.") # Output a graph of loss metrics over periods. plt.ylabel("LogLoss") plt.xlabel("Periods") plt.title("LogLoss vs. Periods") plt.tight_layout() plt.plot(training_log_losses, label="training") plt.plot(validation_log_losses, label="validation") plt.legend() return linear_classifier linear_classifier = train_linear_classifier_model( learning_rate=0.000005, steps=500, batch_size=20, training_examples=training_examples, training_targets=training_targets, validation_examples=validation_examples, validation_targets=validation_targets) ``` ## 작업 3: 검증 세트로 정확성 계산 및 ROC 곡선 도식화 분류에 유용한 몇 가지 측정항목은 모델 [정확성](https://en.wikipedia.org/wiki/Accuracy_and_precision#In_binary_classification), [ROC 곡선](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) 및 AUC(ROC 곡선 아래 영역)입니다. 이러한 측정항목을 조사해 보겠습니다. `LinearClassifier.evaluate`는 정확성 및 AUC 등의 유용한 측정항목을 계산합니다. ``` evaluation_metrics = linear_classifier.evaluate(input_fn=predict_validation_input_fn) print("AUC on the validation set: %0.2f" % evaluation_metrics['auc']) print("Accuracy on the validation set: %0.2f" % evaluation_metrics['accuracy']) ``` `LinearClassifier.predict` 및 Sklearn의 [roc_curve](http://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics) 등으로 계산되는 클래스 확률을 사용하여 ROC 곡선을 도식화하는 데 필요한 참양성률 및 거짓양성률을 가져올 수 있습니다. ``` validation_probabilities = linear_classifier.predict(input_fn=predict_validation_input_fn) # Get just the probabilities for the positive class. validation_probabilities = np.array([item['probabilities'][1] for item in validation_probabilities]) false_positive_rate, true_positive_rate, thresholds = metrics.roc_curve( validation_targets, validation_probabilities) plt.plot(false_positive_rate, true_positive_rate, label="our model") plt.plot([0, 1], [0, 1], label="random classifier") _ = plt.legend(loc=2) ``` **작업 2에서 학습시킨 모델의 학습 설정을 조정하여 AUC를 개선할 수 있는지 확인해 보세요.** 어떤 측정항목을 개선하면 다른 측정항목이 악화되는 경우가 종종 나타나므로, 적절하게 균형이 맞는 설정을 찾아야 합니다. **모든 측정항목이 동시에 개선되는지 확인해 보세요.** ``` # TUNE THE SETTINGS BELOW TO IMPROVE AUC linear_classifier = train_linear_classifier_model( learning_rate=0.000005, steps=500, batch_size=20, training_examples=training_examples, training_targets=training_targets, validation_examples=validation_examples, validation_targets=validation_targets) evaluation_metrics = linear_classifier.evaluate(input_fn=predict_validation_input_fn) print("AUC on the validation set: %0.2f" % evaluation_metrics['auc']) print("Accuracy on the validation set: %0.2f" % evaluation_metrics['accuracy']) ``` ### 해결 방법 가능한 해결 방법을 보려면 아래를 클릭하세요. 효과적인 해결 방법 중 하나는 과적합이 나타나지 않는 범위 내에서 더 오랫동안 학습하는 것입니다. 이렇게 하려면 단계 수, 배치 크기 또는 둘 모두를 늘리면 됩니다. 모든 측정항목이 동시에 개선되므로 손실 측정항목은 AUC와 정확성 모두를 적절히 대변합니다. AUC를 몇 단위만 개선하려 해도 굉장히 많은 추가 반복이 필요합니다. 이는 흔히 나타나는 상황이지만, 이렇게 작은 개선이라도 비용을 투자할 가치는 충분합니다. ``` linear_classifier = train_linear_classifier_model( learning_rate=0.000003, steps=20000, batch_size=500, training_examples=training_examples, training_targets=training_targets, validation_examples=validation_examples, validation_targets=validation_targets) evaluation_metrics = linear_classifier.evaluate(input_fn=predict_validation_input_fn) print("AUC on the validation set: %0.2f" % evaluation_metrics['auc']) print("Accuracy on the validation set: %0.2f" % evaluation_metrics['accuracy']) ```
github_jupyter
# Dynamic programming - 세부 계산으로 나뉘어 지는 하나의 큰 문제를 세부 계산 결과를 미리 구해서 저장한 후 큰 계산의 결과를 빠르게 도출하는 문제 해결 기법 ### fibonacci - 피보나치의 수학적 개념 ``` for Natural number n, a(n) = 1 (n<=2) a(n) = a(n-1) + a(n-2) # use recursive methos def fibo(n): if n <2: return 1 return fibo(n-1)+fibo(n-2) def memoize(func): memo = {} def wrapped(n): if n in memo: return memo[n] else: result = func(n) memo[n] = result return result return wrapped @memoize def fibonacci(n): return fibonacci(n-1) + fibonacci(n-2) ``` #### memoization + recursive - 함수의 외부에 계산 결과를 저장하는 해시테이블을 하나 준비하고 계산 결과를 캐시하도록 함 - 메모이제이션은 이랍ㄴ적인 알고리즘이므로 데코레이터로 일반화 할 수 있음 ``` %timeit -n 1 -r 10 -o fibo(10) memo = {0:1,1:1} def fib_m(n): if n in memo: return memo[n] else: result = fib_m(n-1) + fib_m(n-2) memo[n] = result return result %timeit -n 1 -r 10 -o fib_m(10) def memoize(f): cache = {} def decorated_function(*args): if args in cache: return cache[args] else: cache[args] = f(*args) return cache[args] return decorated_function # decorates the function to use a cache @memoize def fib_d(n): if n == 0: return 0 if n <= 2: return 1 return fib_d(n-1) + fib_d(n-2) # %timeit -n 1 -r 10 -o fib_d(10) print("평범한 recursive fibonacci :") %timeit -n 10 -r 10 -o fibo(10) print() print("그냥 메모이제이션 recursive fibonacci :") %timeit -n 10 -r 10 -o fib_m(10) print() print("데코레이터를 넣은 메모이제이션 recursive fibonacci :") %timeit -n 10 -r 10 -o fib_d(10) ``` - dynamic programming은 이러한 종류의 문제를 풀어내는 강력한 기술 - 접근은 단순한 아이디어이며 코딩 역시 단순한 편 - 알고리즘이라기보다 패러다임에 가까움 1. 탑-다운 + 메모이제이션이라고 부르기도 함 + 큰 계산을 작은 계산으로 나누고 작은 계산의 결과를 저장 재사용 2. 바텀-업 + 문제를 분석하여 작은 문제들이 풀려나가는 순서를 확인한 후 사소한 문제부터 풀어나간 + 이 과정에서 작은 문제들은 전체 문제가 풀리기 전에 모두 풀리는것이 보장됨 + 이것이 동적 계획법 - divide&concour와는 차이가 있음 - 분할정복에서는 서브 문제들이 오버래핑 되지 않음 - 예를 들어 병합정렬이나 퀸소드 정렬 등 * recursive는 탑다운 방식의 해결에 자주 쓰임 - 즉 큰 문제를 작은 문제들로 나나ㅜ고 작은 문제는 같은 방식(더 작은 문제로 나눔)으로 계산 - 이 접근법에서는 CPU 시간이 점점 많이 쓰이고 시간 복잡도가 증가함 - 반면 동적 계획법에서는 작은 문제를 여러번 풀어지지 않으며 한번 풀린 서브 문제의 답은 전체 문제를 최적화하는데 사용됨 - 동적 계획의 문제는 중간과정에서 불필요한 서브 문제의 해도 모두 구한다는 것 - 반면 재귀는 필요한 계산만 함 - 따라서 동적 계획으로 문제를 풀때는 이런 부작용을 최소화할 필요가 있음 10 -> 1 로 가는 최단 경로 구하는 법 => 1+min(Pn-1, Pn/2, Pn/3) - 중첩되는 서브 문제의성질이라고 볼 수 있음 - 주어진 수에 대한 최적해는 주어진 수에 대한 서브 문제(n보다 작은 양의 정수들의 경로의 길이)에 의존 - 따라서 memoization이나 bottom-up으로 풀어 갈 수 있음 ``` @memoization def path_memo(n): if n<2: return 0 else: test = [path_memo(n-1)] if n%2 == 0: test.append(path_memo(n/2)) elif n%3 == 0: test.append(path_memo(n/3)) result = 1+min(test) return result path_memo(10) def path_dp(n): paths = [0,0] + [0]*n # 값을 담아 둘 공간인가? intervals = [1,2,3] # min을 구하기 위해 연산에 사용될 파라미터 들 for i in range(2, n+1): paths[i] = paths[i-1] + 1 if i%2 == 0: paths[i] = min((paths[i], 1+paths[i/2])) elif i%3 == 0: paths[i] = min((paths[i], 1+paths[i/3])) return paths[n] # path_dp(10) ## 번외 30명 중 같은 생일이 있는게 두 명이상일 확률? # 365!/((365-30+1)!*265^30) from random import randint def f(k=10000): r = 0 for _ in range(k): s = set() for _ in range(30): s.add(randint(1, 365)) r += 1 if len(s) == 30 else 0 return r/k for _ in range(10): print(f"p={f():.2%}") import random test_arr = random.sample(range(10), 5) def recur_test_5(a, left, right, check): # print("i=",left,"j=",right) n = len(a) # print('a',a) # print() if left < n-1: # left는 index로 쓸거임 맨마지막 j와 비교위해 n-1보다 작아야함 if check: # right = left+1 if right < n: # right 역시 마찬가지 index니까 n보다 작아야함 print(a[left], a[right]) if a[left] > a[right]: a[left], a[right] = a[right],a[left] recur_test_5(a, left, right+1, None) # 조건문에 걸리지 않으면 여기서 계속 recursive else: # min을 맨앞으로 보내고 다음 인덱스 recur_test_5(a, left+1, right+2, 'check') return a test_arr = random.sample(range(10), 5) print(test_arr) recur_test_5(test_arr,0,1,'check') ```
github_jupyter
``` pwd import numpy as np import pandas as pd import seaborn as sns %matplotlib inline ``` ## Import Data-Set ``` data = pd.read_csv('House_Price.csv', header= 0) data.head() data.shape ``` ## Adding sold column where 0 denotes not sold and 1 denotes sold ``` data['sold'] = np.random.randint(0,2, size = (len(data),1)) data.head() #data[data['sold'] == 0] ##data['sold'].unique() ``` ## EDD ``` data.info() data.describe() ## Data Pattern sns.boxplot(y= 'n_hot_rooms', data= data) ## Data Is skewed sns.jointplot(x='rainfall', y= 'sold', data=data) ## has outliers data.info() ## sns.countplot(x='airport', data=data) ## OKK ## sns.countplot(x= 'waterbody', data= data) ## OKK sns.countplot(x= 'bus_ter', data= data) ## taking only one values ``` # Observations: ### 1. crime_rate has max of 86 but mean of 3.6 : OUTLIER ### 2. dist 1-4 (shows same data): merge into one var to show all columns ### 3. n_hos_rooms has outliers {Boxplot} ### 4. rainfall ranges from 3-60: potential outliers ### 5. bus_ter takes only yes value: Variable is categorical with constant value : remove ``` ## outliers using Flooring and capping ## 99% Upp_Lvl = np.percentile(data.n_hot_rooms, [99])[0] data[(data.n_hot_rooms) > Upp_Lvl] data.n_hot_rooms[(data.n_hot_rooms > 3* Upp_Lvl)] = 3*Upp_Lvl ## 1% low_lvl = np.percentile(data.rainfall, [1])[0] data.rainfall[(data.rainfall < 0.3 * low_lvl)] = 0.3*low_lvl data.describe() ``` ## outliers are removed ## Missing Values ``` ## Missing Values were seen in n_hos_beds data.info() data.n_hos_beds = data.n_hos_beds.fillna(data.n_hos_beds.mean()) data.info() ``` ### Missing values taken care of: OKK ``` ## Variable Transformation ## Average Distance as a representative for all other dist data['avg_dist'] = (data.dist1 + data.dist2 + data.dist3 + data.dist4)/4 data = data.drop(['dist1', 'dist2', 'dist3', 'dist4'], axis= 1) data = data.drop(['crime_rate', 'bus_ter'], axis= 1) data = pd.get_dummies(data) data = data.drop(['airport_NO', 'waterbody_None'], axis=1) data.describe() ``` Now the data looks much cleaned so we can attempt to git into classification logistic model ``` ## Y= sold ## x= price x = data[['price']] y = data[['sold']] ## Using[['sold']] because we want 2 dimension from sklearn.linear_model import LogisticRegression ## # steps clf_lrs = LogisticRegression() #1 clf_lrs.fit(x,y) # 2 clf_lrs.coef_ ##3 clf_lrs.intercept_ ##3 ## we can also use statsmodels.api ## Drawback : we need to add constant term import statsmodels.api as sn x_const = sn.add_constant(x) ##x_const.head() import statsmodels.discrete.discrete_model as sm logit = sm.Logit(y,x_const).fit() logit.summary() ``` ## Multiple predictors ``` X = data.drop(['sold'], axis= 1) Y = data['sold'] #X.head() ``` ## SKlearn ``` model1 = LogisticRegression() model1.fit(X,Y) model1.coef_ model1.intercept_ ``` ## Statsmodels ``` X_const1 = sn.add_constant(X) logit_model2 = sm.Logit(Y, X_const1).fit() logit_model2.summary() model1.predict_proba(X) Y_pred = model1.predict(X) Y_pred ## By default takes threshold as 0.5 i,e > 0.5 -- yes ||| < 0.5 -- No Y_pred1 = (model1.predict_proba(X)[:, 1] > 0.4) Y_pred1 from sklearn.metrics import confusion_matrix confusion_matrix(Y, Y_pred1) from sklearn.metrics import precision_score, recall_score from sklearn.metrics import roc_auc_score precision_score(Y, Y_pred) recall_score(Y,Y_pred) precision_score(Y, Y_pred1) recall_score(Y, Y_pred1) roc_auc_score(Y, Y_pred) roc_auc_score(Y,Y_pred1) ``` ### Both 0.5 and 0.4 threshold gives me good accuracy ## Test-Train Split ``` from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0) clf_LR = LogisticRegression() clf_LR.fit(X_train, Y_train) Y_test_predict = clf_LR.predict(X_test) confusion_matrix(Y_test, Y_test_predict) accuracy_score(Y_test, Y_test_predict) ```
github_jupyter
``` """ - This notebook is used to move files for images - Do it on all dataset - Need: meta-data merge (of copper and non-copper) - Image folder - Logic: move copper / non-copper images based on meta-data (find the date Y of x% copper images, and use that Y to separate train / test) # add function: if we've already separate them once, when we want to recut x% --> merge copper_train, copper_test into a folde -> redo the cutting again """ """ Testing Block """ xxx = glob.glob("/data/put_data/seanyu/ccp/Raw/Non_Copper/*.png") xxx = [os.path.basename(i) for i in xxx] arr = [] for i in xxx: if i in list(df_non_copper.pid): pass else: arr.append(i) """ for i in arr: shutil.move(src='/data/put_data/seanyu/ccp/Raw/Copper/' + i, dst='/data/put_data/seanyu/ccp/Raw/i_am_test/' + i) """ #a = pd.DataFrame({'pid' : arr}) #a.to_csv("missed_copper.csv") len(arr) df_copper.shape import os import glob import numpy as np import scipy as sp import pandas as pd import shutil import datetime import time # meta-data of all images f_meta_copper = '/data/put_data/seanyu/ccp/Copper_meta_revised_0809.csv' f_meta_non_copper = '/data/put_data/seanyu/ccp/Non_copper_meta_revised_0809.csv' # source of all images copper_img_src = '/data/put_data/seanyu/ccp/Raw/Copper/' non_copper_img_src = '/data/put_data/seanyu/ccp/Raw/Non_Copper/' # where to put split images d_base = '/data/put_data/seanyu/ccp/clean_date_cut/' # how many percentage to become testing set thres = 0.5 # to kill old images? kill_old = False #----------------------# # start check folder path if not os.path.exists(d_base): os.mkdir(d_base) else: print('base dir has already exist') if not os.path.exists(d_base + '/thres' + str(int(thres * 100))): os.mkdir(d_base + '/thres' + str(int(thres * 100))) else: print('base dir lv2 has already exist') # define target folders dst_copper_train = d_base + '/thres' + str(int(thres * 100)) + '/copper_train/' dst_copper_test = d_base + '/thres' + str(int(thres * 100)) + '/copper_test/' dst_non_copper_train = d_base + '/thres' + str(int(thres * 100)) + '/non_copper_train/' dst_non_copper_test = d_base + '/thres' + str(int(thres * 100)) + '/non_copper_test/' if kill_old: shutil.rmtree(dst_copper_train) shutil.rmtree(dst_copper_test) shutil.rmtree(dst_non_copper_train) shutil.rmtree(dst_non_copper_test) else: pass # generate folder os.mkdir(dst_copper_train) if not os.path.exists(dst_copper_train) else 'NO' os.mkdir(dst_copper_test) if not os.path.exists(dst_copper_test) else 'NO' os.mkdir(dst_non_copper_train) if not os.path.exists(dst_non_copper_train) else 'NO' os.mkdir(dst_non_copper_test) if not os.path.exists(dst_non_copper_test) else 'NO' # read meta-data df_copper = pd.read_csv(f_meta_copper) df_non_copper = pd.read_csv(f_meta_non_copper) # check duplicate print(df_copper[df_copper.duplicated('pid')]) print('------') print(df_non_copper[df_non_copper.duplicated('pid')]) df_copper['date'] = [time.strptime(i.split(" ")[0], '%Y/%m/%d') for i in list(df_copper.datetime)] df_non_copper['date'] = [time.strptime(j.split(" ")[0], '%Y/%m/%d') for j in list(df_non_copper.datetime)] # sort by date and reset the index df_copper = df_copper.sort_values('date', ascending=False) df_non_copper = df_non_copper.sort_values('date', ascending=False) df_copper = df_copper.reset_index(drop = True) df_non_copper = df_non_copper.reset_index(drop = True) #df_copper['date'][0] < time.strptime('2016/03/01', '%Y/%m/%d') cut_date = df_copper.iloc[int(len(df_copper) * thres)]['date'] df_copper_test = df_copper[df_copper['date'] >= cut_date] df_copper_train = df_copper[df_copper['date'] < cut_date] df_non_copper_test = df_non_copper[df_non_copper['date'] >= cut_date] df_non_copper_train = df_non_copper[df_non_copper['date'] < cut_date] # check ratio of data set print('ratio of copper train/test: ' + str(len(df_copper_train)/np.float(len(df_copper)) ) + '/' + str(len(df_copper_test)/np.float(len(df_copper)) )) print('ratio of non-copper train/test: ' + str(len(df_non_copper_train)/np.float(len(df_non_copper)) ) + '/' + str(len(df_non_copper_test)/np.float(len(df_non_copper)) )) # don't save meta-data --> force use taking data from dir def copy_images(dir_src, dir_dst, df_go): im_list = list(df_go.pid) n_fail = 0 for i in im_list: im_src = dir_src + '/' + i im_dst = dir_dst + '/' + i try: shutil.copyfile(src= im_src, dst= im_dst) except IOError: n_fail += 1 print('done, numbers of transfer failed: ' + str(n_fail)) copy_images(dir_src = copper_img_src, dir_dst = dst_copper_train, df_go = df_copper_train) copy_images(dir_src = copper_img_src, dir_dst = dst_copper_test, df_go = df_copper_test) copy_images(dir_src = non_copper_img_src, dir_dst = dst_non_copper_train, df_go = df_non_copper_train) copy_images(dir_src = non_copper_img_src, dir_dst = dst_non_copper_test, df_go = df_non_copper_test) ### non_copper_img_src im_non_copper_all = glob.glob(non_copper_img_src + '/*.png') print(len(im_non_copper_all)) print(df_non_copper.shape) im_non_copper_all = [os.path.basename(i) for i in im_non_copper_all] x = list(set(list(df_non_copper.pid) ) - set(im_non_copper_all)) ind = 0 for i in x: if os.path.exists(copper_img_src + i): ind += 1 ind ## 139 images should modify the meta-data --> change the is_copper_defect to 'N' (path is correct, current in the non-copper) x ```
github_jupyter
``` from openrtdynamics2.dsp import * import math import numpy as np import openrtdynamics2.lang, openrtdynamics2.dsp as dy import openrtdynamics2.py_execute as dyexe import openrtdynamics2.targets as tg import os import matplotlib.pyplot as plt #%matplotlib widget # https://github.com/matplotlib/ipympl from vehicle_lib.vehicle_lib import * system = dy.enter_system() velocity = dy.system_input( dy.DataTypeFloat64(1), name='velocity', default_value=5.0, value_range=[0, 25], title="vehicle velocity [m/s]") s1 = dy.system_input( dy.DataTypeFloat64(1), name='s1', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 1 [rad/s]") s2 = dy.system_input( dy.DataTypeFloat64(1), name='s2', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 2 [rad/s]") s3 = dy.system_input( dy.DataTypeFloat64(1), name='s3', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 3 [rad/s]") s4 = dy.system_input( dy.DataTypeFloat64(1), name='s4', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 4 [rad/s]") s5 = dy.system_input( dy.DataTypeFloat64(1), name='s5', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 5 [rad/s]") initial_steering = dy.system_input( dy.DataTypeFloat64(1), name='initial_steering', default_value=-0.0, value_range=[-40, 40], title="initial steering angle [degrees]") * dy.float64(math.pi / 180.0) initial_orientation = dy.system_input( dy.DataTypeFloat64(1), name='initial_orientation', default_value=0.0, value_range=[-360, 360], title="initial orientation angle [degrees]") * dy.float64(math.pi / 180.0) # parameters wheelbase = 3.0 # sampling time Ts = 0.01 steering_rate = dy.float64(0) cnt = dy.counter() steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(200), new_value=s1 ) steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(400), new_value=s2 ) steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(600), new_value=s3 ) steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(800), new_value=s4 ) steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(1000), new_value=s5 ) # linearly increasing steering angle delta = dy.euler_integrator( steering_rate, Ts, initial_state=initial_steering ) delta = dy.saturate(u=delta, lower_limit=-math.pi/2.0, upper_limit=math.pi/2.0) # the model of the vehicle x, y, psi, x_dot, y_dot, psi_dot = discrete_time_bicycle_model(delta, velocity, Ts, wheelbase, psi0=initial_orientation) # # outputs: these are available for visualization in the html set-up # dy.append_output(x, 'x') dy.append_output(y, 'y') dy.append_output(psi, 'psi') dy.append_output(delta, 'steering') # generate code for Web Assembly (wasm), requires emcc (emscripten) to build code_gen_results = dy.generate_code(template=tg.TargetCppWASM(), folder="generated/trajectory_generation", build=True) # dy.clear() from IPython.display import JSON JSON(code_gen_results['manifest']) compiled_system = dyexe.CompiledCode(code_gen_results) testsim = dyexe.SystemInstance(compiled_system) N=3000 input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : -0.1 } # sim_results = run_batch_simulation(testsim, input_data, N ) sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] ) sim_results['y'] plt.figure() plt.plot(sim_results['x'], sim_results['y']) plt.show() plt.figure() plt.plot( sim_results['steering']) testsim = dyexe.SystemInstance(compiled_system) N=600 plt.figure() for s2 in np.linspace(-0.0,-0.2,5): input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2 } sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] ) plt.plot(sim_results['x'], sim_results['y']) testsim = dyexe.SystemInstance(compiled_system) N=600+200 plt.figure() for s2 in np.linspace(-0.0,-0.2,5): for s3 in np.linspace(-0.0,-0.2,5): input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2, 's3' : s3 } sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] ) plt.plot(sim_results['x'], sim_results['y']) testsim = dyexe.SystemInstance(compiled_system) N=600+200+200 plt.figure() for s2 in np.linspace(-0.0,-0.2,5): for s3 in np.linspace(-0.0,-0.1,5): for s4 in np.linspace(0.1,+0.2,4): input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2, 's3' : s3, 's4' : s4 } sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] ) plt.plot(sim_results['x'], sim_results['y']) testsim = dyexe.SystemInstance(compiled_system) N=600+200+200+200 plt.figure() for s2 in np.linspace(-0.0,-0.2,5): for s3 in np.linspace(-0.0,-0.1,5): for s4 in np.linspace(0.1,+0.2,4): for s5 in np.linspace(-0.1,+0.1,4): input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2, 's3' : s3, 's4' : s4, 's5' : s5 } sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] ) plt.plot(sim_results['x'], sim_results['y']) ```
github_jupyter
# NumPy numpy is python's package for doing math that is more advanced than +-*/ This includes special functions like cosine, exponential, sqrt, ... On top of this we can use numpy to generate samples from many types of random variables numpy also has a powerful data type to define vectors, matrices, and tensors With these data types numpy also allows us to do linear algebra - matrix multiplication and matrix-vector solutions ``` # the first step of using numpy is to tell python to use it import numpy as np print(np.cos(np.pi)) print(np.sqrt(1.21)) print(np.log(np.exp(5.2))) # we can create numpy arrays by converting lists # this is a vector vec = np.array([1,2,3]) print(vec) # we can create matrices by converting lists of lists mat = np.array([[1,2,1],[4,5,9],[1,8,9]]) print('') print(mat) print('') print(mat.T) # there are lots of other ways to create numpy arrays vec2 = np.arange(0,15) print(vec2) print('') vec3 = np.arange(3,21,6) print(vec3) vec4 = np.linspace(0,5,10) print(vec4) print('') print(vec4.reshape(5,2)) vec4_reshaped = vec4.reshape(5,2) print(vec4_reshaped) print(vec4) mat2 = np.zeros([5,3]) print(mat2) mat3 = np.ones((3,5)) print('') print(mat3) mat4 = np.eye(5) print('') print(mat4) # we can +-*/ arrays together if they're the right size vec5 = np.arange(1,6) vec6 = np.arange(3,8) print(vec5) print(vec6) print(vec5+vec6) print(vec5*vec6) print(1/vec5) print(np.sqrt(vec6)) # we can do matrix multiplication print(mat) print('') print(vec) print() product = np.matmul(mat,vec) print(product) print(np.linalg.solve(mat,product)) print('') print(np.linalg.inv(mat)) # we can find the unique values in an array vec7 = np.array(['blue','red','orange','purple','purple','orange','Red',6]) print(vec7) print(np.unique(vec7)) # we can also use numpy to generate samples of a random variable rand_mat = np.random.rand(5,5) # uniform random variable print(rand_mat) rand_mat2 = np.random.randn(10,5) # standard normal random variable print('') print(rand_mat2) # we can also use numpy for statistical tools on arrays print(np.mean(rand_mat)) print(np.std(rand_mat2)) print(np.min(rand_mat)) print(np.max(rand_mat2)) # break here for next video! # how do we access entries in a numpy vector rand_vec = np.random.randn(19) print(rand_vec) print(rand_vec[6]) # we can access multiple entries at once using : print(rand_vec[4:9]) # we can also access multiple non-consecutive entries using np.arange print(np.arange(0,15,3)) print(rand_vec[np.arange(0,15,3)]) # what about matrices print(rand_mat) print(rand_mat[1][2]) print(rand_mat[1,2]) print(rand_mat[0:2,1:3]) # let's change some values in an array! print(rand_vec) rand_vec[3:5] = 4 print('') print(rand_vec) rand_vec[3:5] = [1,2] print('') print(rand_vec) print(rand_mat) rand_mat[1:3,3:5] = 0 print('') print(rand_mat) sub_mat = rand_mat[0:2,0:3] print(sub_mat) sub_mat[:] = 3 print(sub_mat) print(rand_mat) sub_mat2 = rand_mat[0:2,0:3].copy() sub_mat2[:] = 99 print(sub_mat2) print(rand_mat) # break here for next video # we can also access entries with logicals rand_vec = np.random.randn(15) print(rand_vec) print(rand_vec>0) print(rand_vec[rand_vec>0]) print(rand_mat2) print(rand_mat2[rand_mat2>0]) print(rand_vec) print('') rand_vec[rand_vec>0.5] = -5 print(rand_vec) # let's save some arrays on the disk for use later! np.save('saved_file_name',rand_mat2) np.savez('zipped_file_name',rand_mat=rand_mat,rand_mat2=rand_mat2) # now let's load it loaded_vec = np.load('saved_file_name.npy') loaded_zip = np.load('zipped_file_name.npz') print(loaded_vec) print('') print(loaded_zip) print(loaded_zip['rand_mat']) print('') print(loaded_zip['rand_mat2']) new_array = loaded_zip['rand_mat'] print(new_array) # we can also save/load as text files...but only single variables np.savetxt('text_file_name.txt',rand_mat,delimiter=',') rand_mat_txt = np.loadtxt('text_file_name.txt',delimiter=',') print(rand_mat) print('') print(rand_mat_txt) ```
github_jupyter
<a href="https://colab.research.google.com/github/mtoce/DS-Unit-2-Kaggle-Challenge/blob/master/module3-cross-validation/Assig_3_LS_DS_223.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Lambda School Data Science *Unit 2, Sprint 2, Module 3* --- # Cross-Validation ## Assignment - [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset. - [ ] Continue to participate in our Kaggle challenge. - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV. - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.) - [ ] Commit your notebook to your fork of the GitHub repo. You won't be able to just copy from the lesson notebook to this assignment. - Because the lesson was ***regression***, but the assignment is ***classification.*** - Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification. So you will have to adapt the example, which is good real-world practice. 1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) 2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...` 3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values) 4. If you’re doing a multi-class classification problem — such as whether a waterpump is functional, functional needs repair, or nonfunctional — then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)) ## Stretch Goals ### Reading - Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation - Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107) - Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation - Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb) - Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) ### Doing - Add your own stretch goals! - Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details. - In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives. - _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6: > You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ... The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? # Pre-Model Prep ### Setup You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab (run the code cell below). ``` %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/' !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # New code for Pandas Profiling version 2.4 # from pandas_profiling import ProfileReport # profile = ProfileReport(train, minimal=True).to_notebook_iframe() # profile ``` ## Train / Validate / Test Split ``` import pandas as pd from sklearn.model_selection import train_test_split train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv')) test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv') sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv') train.shape, test.shape train, val = train_test_split(train, train_size=0.80, test_size=0.20, stratify=train['status_group'], random_state=17) train.shape, val.shape, test.shape ``` ## Wrangle Function ``` import numpy as np def wrangle(X): """Wrangle train, validate, and test sets in the same way""" # Prevent SettingWithCopyWarning X = X.copy() # About 3% of the time, latitude has small values near zero, # outside Tanzania, so we'll treat these values like zero. X['latitude'] = X['latitude'].replace(-2e-08, 0) # When columns have zeros and shouldn't, they are like null values. # So we will replace the zeros with nulls, and impute missing values later. # Also create a "missing indicator" column, because the fact that # values are missing may be a predictive signal. cols_with_zeros = ['longitude', 'latitude', 'construction_year', 'gps_height', 'population'] for col in cols_with_zeros: X[col] = X[col].replace(0, np.nan) X[col+'_MISSING'] = X[col].isnull() # Drop duplicate columns duplicates = ['quantity_group', 'payment_type'] X = X.drop(columns=duplicates) # Drop recorded_by (never varies) and id (always varies, random) unusable_variance = ['recorded_by', 'id'] X = X.drop(columns=unusable_variance) # replace population=1 with np.NaN X['population'] = X['population'].replace({1: np.NaN}) # return the wrangled dataframe return X train = wrangle(train) val = wrangle(val) test = wrangle(test) #train.dtypes train.head() # cardinality ``` ## Reduce Cardinality for HC Features ``` def ReduceCardinality(X): '''Reduces Cardinality for features to make them model-relevant X: input DF n: cardinality of features for model ''' X = X.copy() # Reduce cardinality of specific features # date_recorded: change to just year X['date_recorded'] = pd.to_datetime(X['date_recorded']).dt.year # Quickly impute just construction_year column since I we need for new features # X['construction_year'] = X['construction_year'].fillna(value=X['construction_year'].median()) #------------------------------------------------------------------------------- # Feature Engineering X['time_til_inspection'] = X['date_recorded'] - X['construction_year'] #------------------------------------------------------------------------------- # keep top 24 values and replace 25th with other # df.apply(lambda x: x == 'OTHER' if x.name == ['subvillage', 'funder', 'installer', 'wpt_name', 'lga', 'scheme_name'] else ) # subvillage: keep top 24 top1 = X['subvillage'].value_counts()[:24].index X.loc[~X['subvillage'].isin(top1), 'subvillage'] = 'OTHER' # funder: keep top 25 top2 = X['funder'].value_counts()[:24].index X.loc[~X['funder'].isin(top2), 'funder'] = 'OTHER' # installer: keep top 25 top3 = X['installer'].value_counts()[:24].index X.loc[~X['installer'].isin(top3), 'installer'] = 'OTHER' # wpt_name: keep top 15 top4 = X['wpt_name'].value_counts()[:24].index X.loc[~X['wpt_name'].isin(top4), 'wpt_name'] = 'OTHER' # lga: keep top 25 top5 = X['lga'].value_counts()[:24].index X.loc[~X['lga'].isin(top5), 'lga'] = 'OTHER' # ward: keep top 25 top6 = X['ward'].value_counts()[:24].index X.loc[~X['ward'].isin(top6), 'ward'] = 'OTHER' # scheme_name: keep top 25 top7 = X['scheme_name'].value_counts()[:24].index X.loc[~X['scheme_name'].isin(top7), 'scheme_name'] = 'OTHER' return X # train = ReduceCardinality(train) # val = ReduceCardinality(val) # test = ReduceCardinality(test) ``` ## Manual Impute Function ``` def ComplexImputer(X): '''Imputes NaN values in multiple different ways depending on the column''' X = X.copy() # Column lists for different imputer methods cols_mean = ['gps_height', 'latitude', 'longitude'] cols_median = ['construction_year', 'population', 'public_meeting'] cols_mode = ['permit', 'scheme_management'] # replace these columns' NaNs with the mean of column X[cols_mean] = X[cols_mean].fillna(value=X[cols_mean].mean()) # replace these columns' NaNs with median X[cols_median] = X[cols_median].fillna(value=X[cols_median].median()) # replace these columns' NaNs with mode X['permit'] = X['permit'].fillna(value=True) X['scheme_management'] = X['scheme_management'].fillna(value='VWC') return X ``` # Final Function to Run on Data before we send into Pipeline ``` def Wrangle2(X): """Wrangle train, validate, and test sets in the same way""" X = X.copy() # run our manual imputer & cardinality functions X = ComplexImputer(X) X = ReduceCardinality(X) return X train = Wrangle2(train) val = Wrangle2(val) test = Wrangle2(test) train.isnull().sum() ``` ## Get Baseline ``` # define target target = 'status_group' # select baseline - categorical - which one is the mode? functional: 54% is the baseline train['status_group'].value_counts(normalize=True) train_features = train.drop([target], axis=1) numeric_features = train_features.select_dtypes(include='number').columns.tolist() cardinality = train_features.select_dtypes(exclude='number').nunique() categorical_features = cardinality[cardinality <= 25].index.tolist() features = numeric_features + categorical_features # Arrange data into X features matrix and y target vector X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] ``` # Make Pipeline ``` import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.model_selection import RandomizedSearchCV import matplotlib.pyplot as plt %%time pipeline = make_pipeline( ce.OrdinalEncoder(), StandardScaler(), RandomForestClassifier(n_jobs=-1, random_state=17) ) # run pipeline fit pipeline.fit(X_train, y_train) # Predict on test y_pred = pipeline.predict(X_test) print('Train Accuracy: ', pipeline.score(X_train, y_train)) print('Validation Accuracy: ', pipeline.score(X_val, y_val)) # declare parameter distributions and run randomized search cross-validation param_distributions = { 'randomforestclassifier__n_estimators': [50, 75, 100, 125, 150, 175, 200], 'randomforestclassifier__max_depth': [16, 18, 20, 22, 24, 26], 'randomforestclassifier__min_samples_split': [None, 2, 4, 6, 8], 'randomforestclassifier__criterion': ['gini', 'entropy'], } search = RandomizedSearchCV( pipeline, param_distributions=param_distributions, n_iter=10, cv=5, scoring='accuracy', verbose=10, return_train_score=True, n_jobs=-1 ) search.fit(X_train, y_train) print('Best hyperparameters: ', search.best_params_) print('Cross-validation Accuracy: ', search.best_score_) #n_estimators=100, min_samples_split=4, criterion='entropy' pipeline_best = search.best_estimator_ pipeline_best.fit(X_train, y_train) y_pred = pipeline_best.predict(X_test) print('Pipeline-best Train Accuracy: ', pipeline_best.score(X_train, y_train)) print('Pipeline-best Validation Accuracy: ', pipeline_best.score(X_val, y_val)) ``` ## Plot Feature Importances ``` # model = pipeline.named_steps['randomforestclassifier'] # encoder = pipeline.named_steps['ordinalencoder'] # encoded_columns = encoder.transform(X_val).columns # importances = pd.Series(model.feature_importances_, encoded_columns) # plt.figure(figsize=(10,30)) # importances.sort_values().plot.barh(); ``` ## Save to .csv and submit to Kaggle ``` sample_submission = pd.read_csv('sample_submission.csv') submission = sample_submission.copy() submission['status_group'] = y_pred submission.to_csv('submission.csv', index=False) ```
github_jupyter
# Experimenting With APIs In this notebook, we will be experimenting with the APIs and create helper functions. ``` from tqdm.auto import tqdm import itertools import os # my imports from helpers import kube from helpers import workload from helpers import util from helpers import request_funcs # experiment utility functionality used in experiments from exp_util import * ``` ## Kubernetes API We use the kubernetes API to fetch the current status of each deployment and see how many instances they have. ``` # print details of a deployment get_knative_watch_info('bench1') ``` ## Deploying On Knative In this section, we will develop the functionality to deploy a given workload to **knative** using the `kn` CLI. The other options were using [CRDs](https://stackoverflow.com/questions/61384188/how-to-deploy-a-knative-service-with-kubernetes-python-client-library), or using [kubectl apply](https://developers.redhat.com/coderland/serverless/deploying-serverless-knative#invoking_your_service_from_the_command_line) but `kn` seems to be more powerful. ``` # making sure kn is set up correctly !kn service ls # EXP_CONFIG_NAME_DEFAULT = 'bench1_sleep_rand2_1000_200' EXP_CONFIG_NAME_DEFAULT = 'bench1_cpu_io_rps' exp_config_name = os.getenv("EXP_CONFIG_NAME", EXP_CONFIG_NAME_DEFAULT) exp_file = f"configs/{exp_config_name}.json" workload_spec = util.load_json_file(exp_file) kn_command = kube.get_kn_command(**workload_spec) print(kn_command) # to run the command, we can simply use: # !{kn_command} ``` # Workload Specification ``` # get user workload function from the file user_workload_func = request_funcs.workload_funcs[workload_spec['request_func']] # get ready count callback get_ready_cb = lambda: get_knative_watch_info(workload_spec['name'])['ready_replicas'] print('ready callback:', get_ready_cb()) # create logger and check one execution of workload func wlogger = workload.WorkloadLogger(get_ready_cb=get_ready_cb) simple_worker_func = lambda: wlogger.worker_func(user_workload_func) # add worker func to workload spec workload_spec['simple_worker_func'] = simple_worker_func simple_worker_func() # wlogger.monitoring_thread.start() # wlogger.record_conc_loop() # wlogger.monitor_conc_loop() wlogger.start_capturing() time.sleep(7) wlogger.stop_capturing() wlogger.get_recorded_data() ``` # Performing A Series of Experiments ``` target_list = workload_spec['exp_spec']['target_list'] rps_list = workload_spec['exp_spec']['rps_list'] overall_results = [] total_configs = list(itertools.product(target_list, rps_list)) for target, rps in tqdm(total_configs): single_res_name = perform_experiment(rps=rps, target=target, base_workload_spec=workload_spec, wlogger=wlogger) overall_results.append({ 'target': target, 'rps': rps, 'res_name': single_res_name, 'request_func': workload_spec['request_func'], 'workload_name': workload_spec['name'], 'is_rps': workload_spec['is_rps'], 'exp_time_mins': workload_spec['exp_spec']['time_mins'], }) now = get_time_with_tz() res_name = 'overview_' + now.strftime('res-%Y-%m-%d_%H-%M-%S') res_file_name = res_name + ".csv" res_folder = f'results/{workload_spec["exp_spec"]["name"]}/' df = pd.DataFrame(data=overall_results) df.to_csv(os.path.join(res_folder, res_file_name)) df ```
github_jupyter
# Exercise 02 - Functions and Getting Help ! ## 1. Complete Your Very First Function Complete the body of the following function according to its docstring. *HINT*: Python has a builtin function `round` ``` def round_to_two_places(num): round(num,2) # Replace this body with your own code. # ("pass" is a keyword that does literally nothing. We used it as a placeholder, # so that it will not raise any errors, # because after we begin a code block, Python requires at least one line of code) ``` ## 2. Explore the Built-in Function The help for `round` says that `ndigits` (the second argument) may be negative. What do you think will happen when it is? Try some examples in the following cell? Can you think of a case where this would be useful? ``` num=75.5 round(num,-2) # Usefull when you want to round the number on the left side of decimal point ``` ## 3. More Function Giving the problem of candy-sharing friends Alice, Bob and Carol tried to split candies evenly. For the sake of their friendship, any candies left over would be smashed. For example, if they collectively bring home 91 candies, they will take 30 each and smash 1. Below is a simple function that will calculate the number of candies to smash for *any* number of total candies. **Your task**: - Modify it so that it optionally takes a second argument representing the number of friends the candies are being split between. If no second argument is provided, it should assume 3 friends, as before. - Update the docstring to reflect this new behaviour. ``` def to_smash(total_candies,num): if(num==null):num=3 return total_candies % num ``` ## 4. Taste some Errors It may not be fun, but reading and understanding **error messages** will help you improve solving problem skills. Each code cell below contains some commented-out buggy code. For each cell... 1. Read the code and predict what you think will happen when it's run. 2. Then uncomment the code and run it to see what happens. *(**Tips**: In the kernel editor, you can highlight several lines and press `ctrl`+`/` to toggle commenting.)* 3. Fix the code (so that it accomplishes its intended purpose without throwing an exception) <!-- TODO: should this be autochecked? Delta is probably pretty small. --> ``` round_to_two_places(9.9999) x = -10 y = 5 # Which of the two variables above has the smallest absolute value? smallest_abs = min(abs(x),abs(y)) def f(x): y = abs(x) return y print(f(5)) ``` ## 5. More and more Functions For this question, we'll be using two functions imported from Python's `time` module. ### Time Function The [time](https://docs.python.org/3/library/time.html#time.time) function returns the number of seconds that have passed since the Epoch (aka [Unix time](https://en.wikipedia.org/wiki/Unix_time)). <!-- We've provided a function called `seconds_since_epoch` which returns the number of seconds that have passed since the Epoch (aka [Unix time](https://en.wikipedia.org/wiki/Unix_time)). --> Try it out below. Each time you run it, you should get a slightly larger number. ``` # Importing the function 'time' from the module of the same name. # (We'll discuss imports in more depth later) from time import time t = time() print(t, "seconds since the Epoch") ``` ### Sleep Function We'll also be using a function called [sleep](https://docs.python.org/3/library/time.html#time.sleep), which makes us wait some number of seconds while it does nothing particular. (Sounds useful, right?) You can see it in action by running the cell below: ``` from time import sleep duration = 5 print("Getting sleepy. See you in", duration, "seconds") sleep(duration) print("I'm back. What did I miss?") ``` ### Your Own Function With the help of these functions, complete the function **`time_call`** below according to its docstring. <!-- (The sleep function will be useful for testing here since we have a pretty good idea of what something like `time_call(sleep, 1)` should return.) --> ``` def time_call(fn, arg): import time start_time = time.time() fn(arg) return time.time() - start_time() ``` How would you verify that `time_call` is working correctly? Think about it... ## 6. 🌶️ Reuse your Function *Note: this question depends on a working solution to the previous question.* Complete the function below according to its docstring. ``` def slowest_call(fn, arg1, arg2, arg3): slowest_time = time_call(fn,arg1) if (slowest_time<=time_call(fn,arg2)) :slowest_time=time_call(fn,arg2) if (slowest_time<=time_call(fn,arg3)) :slowest_time=time_call(fn,arg3) return slowest_time ``` # Keep Going
github_jupyter
``` import tensorflow as tf from keras.backend.tensorflow_backend import set_session # config = tf.ConfigProto() # config.gpu_options.allocator_type = 'BFC' #A "Best-fit with coalescing" algorithm, simplified from a version of dlmalloc. # config.gpu_options.per_process_gpu_memory_fraction = 0.3 # config.gpu_options.allow_growth = True # set_session(tf.Session(config=config)) ## LIMIT GPU USAGE config = tf.ConfigProto(log_device_placement=True) config.gpu_options.allow_growth = True # don't pre-allocate memory; allocate as-needed config.gpu_options.per_process_gpu_memory_fraction = 0.95 # limit memory to be allocated set_session(tf.Session(config=config)) # create sess w/ above settings print(tf.test.is_built_with_cuda()) import tensorflow as tf if tf.test.gpu_device_name(): print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) else: print("Please install GPU version of TF") # keras example imports from keras.models import load_model ## extra imports to set GPU options import tensorflow as tf from keras import backend as k k.get_session().close() ################################### # TensorFlow wizardry config = tf.ConfigProto() # Don't pre-allocate memory; allocate as-needed config.gpu_options.allow_growth = True # Only allow a total of half the GPU memory to be allocated config.gpu_options.per_process_gpu_memory_fraction = 0.95 # Create a session with the above options specified. k.tensorflow_backend.set_session(tf.Session(config=config)) ################################### print(tf.test.is_built_with_cuda()) from math import sqrt from numpy import concatenate from matplotlib import pyplot from pandas import read_csv from pandas import DataFrame from pandas import concat from sklearn.metrics import mean_squared_error from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM import os import numpy as np from tabulate import tabulate from statistics import mean #this function is to get the time string like h:m:s #======================================================================================== def getTime(time): time=time%(24*3600) hours=time//3600 time%=3600 minutes=time//60 time%=60 seconds=time periods=[('hours',int(hours)),('minutes',int(minutes)),('seconds',int(seconds))] time_string=':'.join('{}'.format(value) for name,value in periods) return time_string #======================================================================================== import csv dirpath = os.getcwd() path = dirpath + '/LSTMExperimentResults_AfterDefense/1%/RMSE_Confidence_Interval (epoch= 10, batch = 20 , neurons = 10).csv' myfile1 = open(path,'w', newline='') writer1 = csv.writer(myfile1) heading =['Samples','Number of Observations','RMSEValues','Mean','Standard Errors','Upper Bound','Lower Bound','Execution Time'] writer1.writerow(heading) myfile1.close() ``` https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/ https://machinelearningmastery.com/time-series-forecasting-long-short-term-memory-network-python/ https://machinelearningmastery.com/multi-step-time-series-forecasting-long-short-term-memory-networks-python/ https://machinelearningmastery.com/models-sequence-prediction-recurrent-neural-networks/ https://machinelearningmastery.com/how-to-develop-rnn-models-for-human-activity-recognition-time-series-classification/ ``` # convert series to supervised learning def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = DataFrame(data) cols, names = list(), list() # input sequence (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] # forecast sequence (t, t+1, ... t+n) for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] # put it all together agg = concat(cols, axis=1) agg.columns = names # drop rows with NaN values if dropnan: agg.dropna(inplace=True) return agg # specify the number of lag hours list_n_mins = [1,5,10,15,30,60] #percentage = ['5%','10%','15%','20%','25%','30%','35%','40%','45%','50%'] percentage = ['1%'] for n_mins in list_n_mins: for percent in percentage: import time start_time = time.time() # load dataset dirpath = os.getcwd() dataset = read_csv(dirpath + '/datasetForLSTM/60_'+percent+'_Allcombine.csv', header=0, index_col=0) values = dataset.values epochs = 10 batch_size = 20 neurons = 10 n_features = 6 # frame as supervised learning reframed = series_to_supervised(values, n_mins, 1) print(reframed.head()) # drop columns we don't want to predict # for number of mins = 1, drop columns ==> [6,7,8,9,10] , index 11 for gridlock # for number of mins = 5, drop columns ==> [30,31,32,33,34] , index 35 for gridlock # for number of mins = 10, drop columns ==> [60,61,62,63,64] , index 65 for gridlock # for number of mins = 15, drop columns ==> [90,91,92,93,94] , index 95 for gridlock # for number of mins = 30, drop columns ==> [180,181,182,183,184] , index 95 for gridlock # for number of mins = 60, drop columns ==> [360,361,362,363,364] , index 365 for gridlock # for number of mins = 90, drop columns ==> [540,541,542,543,544] , index 545 for gridlock if n_mins == 1: reframed.drop(reframed.columns[[6,7,8,9,10]], axis=1, inplace=True) if n_mins == 5: reframed.drop(reframed.columns[[30,31,32,33,34]], axis=1, inplace=True) if n_mins == 10: reframed.drop(reframed.columns[[60,61,62,63,64]], axis=1, inplace=True) if n_mins == 15: reframed.drop(reframed.columns[[90,91,92,93,94]], axis=1, inplace=True) if n_mins == 30: reframed.drop(reframed.columns[[180,181,182,183,184]], axis=1, inplace=True) if n_mins == 60: reframed.drop(reframed.columns[[360,361,362,363,364]], axis=1, inplace=True) # reframed.to_csv(dirpath + '/datasetForLSTM/60_5%_Allcombine_reframed.csv',index=False ) #print(reframed.head()) reframed.columns # split into train and test sets values = reframed.values n_train_mins =80 * 181 train = values[:n_train_mins, :] test = values[n_train_mins:, :] # split into input and outputs n_obs = n_mins * n_features train_X, train_y = train[:, :n_obs], train[:, -1] test_X, test_y = test[:, :n_obs], test[:, -1] print(train_X.shape, len(train_X), train_y.shape) # reshape input to be 3D [samples, timesteps, features] train_X = train_X.reshape((train_X.shape[0], n_mins, n_features)) test_X = test_X.reshape((test_X.shape[0], n_mins, n_features)) print(train_X.shape, train_y.shape, test_X.shape, test_y.shape) # design network model = Sequential() model.add(LSTM(neurons, input_shape=(train_X.shape[1], train_X.shape[2]))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') # fit network history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, validation_data=(test_X, test_y), verbose=0, shuffle=False) # plot history pyplot.plot(history.history['loss'], label='train') pyplot.plot(history.history['val_loss'], label='test') pyplot.xlabel('epoch') pyplot.ylabel('loss') pyplot.legend() pyplot.savefig(dirpath + '/LSTMExperimentResults_AfterDefense/1%/'+percent+'_'+str(n_mins)+'min_'+str(epochs)+'epochs_'+str(batch_size)+'batch_size_'+str(neurons)+'neurons.png') pyplot.clf() # pyplot.show() # make a prediction yhat = model.predict(test_X) # temp_yhat = yhat # temp_yhat = [np.round(num) for num in yhat] # pyplot.plot(test_y, 'r-',label='actual') # pyplot.show() # pyplot.plot(temp_yhat, 'b-',label='predict') # pyplot.show() test_X.shape repeats = 10 rmse_list = list() acutal_predicted_df = DataFrame() acutal_predicted_df['actual']= test_y acutal_predicted_df['predicted']= yhat acutal_predicted_df.to_csv(dirpath + '/LSTMExperimentResults_AfterDefense/1%/'+percent+'_'+str(n_mins)+'min_'+str(epochs)+'epochs_'+str(batch_size)+'batch_size_'+str(neurons)+'neurons.csv', index=False) for r in range(repeats): # make a prediction test_X, test_y = test[:, :n_obs], test[:, -1] test_X = test_X.reshape((test_X.shape[0], n_mins, n_features)) yhat = model.predict(test_X) yhat.shape test_X = test_X.reshape((test_X.shape[0], test_X.shape[1]*test_X.shape[2])) test_y = test_y.reshape((len(test_y), 1)) # invert scaling for forecast inv_yhat = concatenate((yhat, test_X), axis=1) #print(tabulate(inv_yhat, headers=['inv_yhat'], tablefmt='orgtbl')) inv_y = concatenate((test_y, test_X), axis=1) #print(tabulate(inv_y, headers=['inv_y'], tablefmt='orgtbl')) # calculate RMSE rmse = sqrt(mean_squared_error(inv_y, inv_yhat)) rmse_list.append(rmse) import numpy as np import scipy.stats import csv a = 1.0 * np.array(rmse_list) n = len(a) mean, se = np.mean(a), scipy.stats.sem(a) h = se * scipy.stats.t.ppf((1 + 0.95) / 2., n-1) elapsed_time = getTime(time.time() - start_time) myfile = open(dirpath + '/LSTMExperimentResults_AfterDefense/1%/RMSE_Confidence_Interval (epoch= 10, batch = 20 , neurons = 10).csv', 'a', newline='') writer = csv.writer(myfile) with myfile: writer.writerow( [percent,n_mins , rmse_list,mean, se, mean-h, mean+h,elapsed_time]) ```
github_jupyter
``` %matplotlib nbagg import numpy as np import strid import matplotlib.pyplot as plt import scipy.signal ``` # System identification with Poly reference Least Squares Complex Frequency-domain estimator This notebook shows how `strid` can be used to obtain the modal properties from measurements of a system subjected to stochastic loading with the Poly reference Least Squares Complex Frequency-domain (pLSCF) estimator. First we load the data from a shear frame with known modal properties, when we perform the actual system identification. ``` data = np.load("results/data-stochastic.npz") y = data["y"] fs = data["fs"] true_f = data["true_frequencies"] true_xi = data["true_damping"] true_modeshapes = data["true_modeshapes"] ``` ## System identification Use the measured outputs $y$ to determine the system properties of the shear frame. pLSCF is a frequency domain identification method and first we estimate the positive lag PSD matrix as a representation of the system in the frequency domain. Then use the `PolyReferenceLSCF` instance with the PSD matrix to identify the system properties. ``` Syyp = strid.find_positive_psd_matrix(y, y, nfft=2**11) f = strid.get_frequency_vector(fs, Syyp.shape[2]) fig, ax = plt.subplots() ax.semilogy(f, np.abs(np.trace(Syyp))) ax.set(xlabel="Frequency (Hz)", ylabel="Positive lag PSD") fdid = strid.PolyReferenceLSCF(Syyp, fs) ``` We will use a stabilization diagram to determine the physical modes (poles), first we perform system identification from the data with model order between 5 and 50. ``` nmax = 50 modes = {} for i, order in enumerate(range(5, nmax+1)): N, D = fdid.perform(order, nmax) modes[order] = strid.Mode.find_modes_from_rmfd(N, D, fdid.fs) ``` and then we plot the identified modes in the stabilization diagram and we pick the stable modes. ``` stabdiag = strid.StabilizationDiagram() stabdiag.plot(modes) stabdiag.axes_psd.semilogy(f, np.trace(np.abs(Syyp)), color=(0., 0., 0., .5), lw=.3) ``` Finally, we can access the picked modes from the `picked_modes` property of the stabilization diagram object, see below. ``` modes = stabdiag.picked_modes ``` # Comparison between estimated and true modes Below, we compare the identified modes against the exact solution. ``` fig = plt.figure("Damping estimate") axd = fig.add_axes((0.1, 0.1, .8, .8)) axd.set(xlabel='Frequency', ylabel='Damping ratio', title='Estimated and true frequency and damping', ylim=(0, .10) ) figmodes, axes = plt.subplots(ncols=3, nrows=3, dpi=144) res = [] for n in range(true_f.size): ax = axes.flatten()[n] un = true_modeshapes[n] fn = true_f[n] xin = true_xi[n] nmax = np.argmax([strid.modal_assurance_criterion(mode.v, un) for mode in modes]) mode = modes[nmax] line, = axd.plot(mode.f, mode.xi, 'x') line, = axd.plot(fn, xin, 'o', mec=line.get_color(), mfc=(0, 0, 0, 0)) ferr = (mode.f-fn)/fn * 100 xierr = (mode.xi-xin)/xin*100 mac = strid.modal_assurance_criterion(un, mode.v) res.append([n, ferr, xierr, mac*100,]) v_true = np.r_[0., un] v = np.r_[0, mode.v] v = strid.modal_scale_factor(v, v_true)*v z = np.arange(v.size) ax.plot(v_true, z, label='True') ax.plot(v.real, z, label='Estimated') if n == 2: ax.legend(bbox_to_anchor=(.5, 1.20), loc='lower center', ncol=2) axd.legend(['Estimated', 'True'],) ax.axvline(0., color=(0, 0, 0, .3)) ax.set_title(f"Mode {n}") ax.axis('off') ax.set_xlim(-.5, .5) ``` We can also save the saved modes for further analysis, see below. ``` import pickle with open("results/modes.pkl", 'wb') as fout: pickle.dump(modes, fout) ``` See how you can plot and animate the mode shapes identified here with the `strid` package and also other options in the example [03-visualization-of-mode-shapes](03-visualization-of-mode-shapes.ipynb)
github_jupyter
``` import random import os from PIL import Image import numpy as np import matplotlib.pyplot as plt import copy import pickle #generate stats from collections import defaultdict from collections import Counter %matplotlib inline import json #load categories yolo and coco names import numpy as np x = np.loadtxt('categories/9k.names',delimiter='\n',dtype=str) yolo=x.tolist() x = np.loadtxt('categories/coco.names',delimiter='\n',dtype=str) coco=x.tolist() wordtocnt = ['zero','one','two','three','four','five','six','seven','eight','nine', 'ten','eleven','twelve','thirteen','fourteen','fifteen'] def getnounsimple(question): newtxt = question.replace('?','') newtxt = newtxt.replace('How many','') newtxt = newtxt.replace('how many','') tokens = nltk.word_tokenize(newtxt) for lookup in ['can','are','does','is','giraffe','giraffes','zebra']: try: noun = tokens[0:tokens.index(lookup)+1] break except: noun = '' return ' '.join(noun) import nltk lemmatizer = nltk.stem.WordNetLemmatizer() def getnoun(sentence): tokens = nltk.word_tokenize(sentence) tagged = nltk.pos_tag(tokens) tag = tagged[2:] nouns = [] for i,j in enumerate(tag): if j[1] =='NNS' or j[1] =='NNP': nouns = tag[0:i+1] break #print (nouns) if len(nouns) == 0: return getnounsimple(sentence) return ' '.join([lemmatizer.lemmatize(noun[0]) for noun in nouns]) def issimple(noun): noun = noun.strip(" ") split = noun.split(" ") if len(split) == 1: return True return False print (issimple('black dogs')) v7wtell = Visual7wTell('visual7w/dataset_v7w_telling.json') v7wtell.info() from vqatools.visual7w import Visual7wPoint,Visual7wTell v7wpoint = Visual7wPoint('visual7w/dataset_v7w_pointing.json') v7wpoint.info() v7wpoint.boxidtobox[794494] v7wpoint.showQA(265555) v7wpoint.qidtoqa[265555] # howques = getQuesIds(quesType='how') countques = v7wtell.getCountquesids() print ("ther are %d count questions" %(len(countques))) v7wtell.showQA(random.choice(countques)) countans =defaultdict(int) for qid in countques: qa= v7wtell.qidtoqa[qid] countans[qa.get('answer')] +=1 prec = list(countans.keys()) prec nounstats = defaultdict(list) blank=0 for qid in countques: qa= v7wtell.qidtoqa[qid] noun = getnoun(qa.get('question')) if noun == '': print (qa.get('question')) blank+=1 nounstats[noun].append(qid) print ("Noun: ",noun) simple=[] commplex=[] simpleyolo=[] simplecoco=[] for noun in nounstats.keys(): if issimple(noun): simple.append(noun) else: commplex.append(noun) if noun in yolo: simpleyolo.append(noun) if noun in coco: simplecoco.append(noun) simplequest= [ len(nounstats[noun]) for noun in simple] print ("no of simple questions:",sum(simplequest)) complexquest= [ len(nounstats[noun]) for noun in commplex] print ("no of complex questions:",sum(complexquest)) simplequest= [ len(nounstats[noun]) for noun in simpleyolo] print ("no of simple questions in yolo:",sum(simplequest)) simplequest= [ len(nounstats[noun]) for noun in simplecoco] print ("no of simple questions in coco:",sum(simplequest)) ```
github_jupyter
# WeatherPy ---- #### Note * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. ``` # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time # Import API key api_key = "2b5d420394a2cb8a1fed6e5574203e5f" #from api_keys import api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) ``` ## Generate Cities List ``` # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(low=-90.000, high=90.000, size=1500) lngs = np.random.uniform(low=-180.000, high=180.000, size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) # URL url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + api_key response = requests.get(f"{url}&q={city}").json() response ``` ### Perform API Calls * Perform a weather check on each city using a series of successive API calls. * Include a print log of each city as it'sbeing processed (with the city number and city name). ``` # Hold info city_name = [] cloudiness = [] country = [] date = [] humidity = [] lat = [] lng = [] max_temp = [] wind_speed = [] record = 1 print(f"Beginning Data Retrieval") print(f"-----------------------------") #Loop through the cities in the city list for city in cities: try: response = requests.get(f"{url}&q={city}").json() city_name.append(response["name"]) cloudiness.append(response["clouds"]["all"]) country.append(response["sys"]["country"]) date.append(response["dt"]) humidity.append(response["main"]["humidity"]) max_temp.append(response["main"]["temp_max"]) lat.append(response["coord"]["lat"]) lng.append(response["coord"]["lon"]) wind_speed.append(response["wind"]["speed"]) city_record = response["name"] print(f"Processing Record {record} | {city_record}") record= record + 1 time.sleep(1.00) # If no record found "skip" to next call except: print(f"{city_record} not found. Skipping...") continue print ("Process Completed") ``` ### Convert Raw Data to DataFrame * Export the city data into a .csv. * Display the DataFrame ``` # Collect Retrieved Information weatherpy_info = { "City": city_name, "Cloudiness":cloudiness, "Country":country, "Date":date, "Humidity": humidity, "Lat":lat, "Lng":lng, "Max Temp": max_temp, "Wind Speed":wind_speed } # Convert to PD.DataFrame weather_data = pd.DataFrame(weatherpy_info) # Display count of weather data values weather_data.count() # Save data frame to CSV weather_data.to_csv('openweathermap_data.csv') weather_data.head() ``` ### Plotting the Data * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels. * Save the plotted figures as .pngs. #### Latitude vs. Temperature Plot ``` # Display scatter graph plt.scatter(weather_data["Lat"], weather_data["Max Temp"], marker="o", s=10) plt.title("City Latitude vs. Max Temperature(12/01/19)") plt.ylabel("Max. Temperature (F)") plt.xlabel("Latitude") plt.grid(True) # Save the figure plt.savefig("LatitudeVsTemperaturePlot.png") # Show plot plt.show() ``` #### Latitude vs. Humidity Plot ``` # Display scatter graph plt.scatter(weather_data["Lat"], weather_data["Humidity"], marker="o", s=10) plt.title("City Latitude vs. Humidity(12/01/19)") plt.ylabel("Humidity (%)") plt.xlabel("Latitude") plt.grid(True) # Save the figure plt.savefig("LatitudeVsHumidityPlot.png") # Show plot plt.show() ``` #### Latitude vs. Cloudiness Plot ``` # Display scatter graph plt.scatter(weather_data["Lat"], weather_data["Cloudiness"], marker="o", s=10) plt.title("City Latitude vs. Cloudiness(12/01/19)") plt.ylabel("Cloudiness (%)") plt.xlabel("Latitude") plt.grid(True) # Save the figure plt.savefig("LatitudeVsCloudiness.png") # Show plot plt.show() ``` #### Latitude vs. Wind Speed Plot ``` # Display scatter graph plt.scatter(weather_data["Lat"], weather_data["Wind Speed"], marker="o", s=10) plt.title("City Latitude vs. Wind Speed(12/01/19)") plt.ylabel("Wind Speed (mph)") plt.xlabel("Latitude") plt.grid(True) # Save the figure plt.savefig("Output_Plots/Wind_Speed_vs_Latitude.png") # Show plot plt.show() ```
github_jupyter
## Demo Tracking pose and face ## Use input size : - file cfg - yolo3_weight.h5 when convert - file train.py ``` import colorsys import cv2 import dlib import face_recognition from keras import backend as K from keras.utils import multi_gpu_model from keras.layers import Input from keras.models import load_model from imutils.video import WebcamVideoStream import numpy as np import os from PIL import Image, ImageFont, ImageDraw from timeit import default_timer as timer from scipy.spatial import distance from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body from yolo3.utils import letterbox_image # from threading import Thread # import cv2 # import imutils # class WebcamVideoStream: # def __init__(self, src=0): # self.stream = cv2.VideoCapture(src) # self.stream.set(3, 800) # self.stream.set(4, 600) # (self.grabbed, self.frame) = self.stream.read() # self.stopped = False # def start(self): # # Start the thread to read frames from the video stream # Thread(target=self.update, args=()).start() # return self # def update(self): # while True: # if self.stopped: # return # (self.grabbed, self.frame) = self.stream.read() # def read(self): # return self.frame # def stop(self): # self.stopped = True os.environ['CUDA_VISIBLE_DEVICES'] = '0' gpu_num = 1 model_path = '../logs/human_pose_dataset_1400_416_yolo/trained_weights_final.h5' anchors_path = '../model_data/yolo_anchors.txt' classes_path = '../model_data/human_pose.txt' score = 0.7 iou = 0.25 model_image_size = (416, 416) sess = K.get_session() # Get class classes_path = os.path.expanduser(classes_path) with open(classes_path) as f: class_names = f.readlines() class_names = [c.strip() for c in class_names] # Anchors anchors_path = os.path.expanduser(anchors_path) with open(anchors_path) as f: anchors = f.readline() anchors = [float(x) for x in anchors.split(',')] anchors = np.array(anchors).reshape(-1, 2) # Load model model_path = os.path.expanduser(model_path) assert model_path.endswith('.h5'), 'Keras model end with file .h5' num_anchors = len(anchors) num_classes = len(class_names) is_tiny_version = num_anchors==6 try: yolo_model = load_model(model_path, compile=False) except: if is_tiny_version: yolo_model = tiny_yolo_body(Input(shape=(None, None, 3)), num_anchors//2, num_classes) else: yolo_model = yolo_body(Input(shape=(None, None, 3)), num_anchors//3, num_classes) yolo_model.load_weights(model_path) else: yolo_model.layers[-1].output_shape[-1] == num_anchors/len(yolo_model.output) * (num_classes + 5), 'Mismatch between model and given anchor and class sizes' print("{} model, anchors, and classes loaded.".format(model_path)) centerpoints = [] namefromcenterpoint = [] face_encodings_in_room = [] face_names_in_room = [] known_face_encodings_array = np.load("../data/numpy/known_face_encoding.npy") known_face_names = np.load("../data/numpy/known_face_names.npy") # Convert nparray -> List to face_encoding len_of_array_known_face_names = len(known_face_names) known_face_encodings_array = known_face_encodings_array.reshape(len_of_array_known_face_names, 128) known_face_encodings = [] for i in range(len_of_array_known_face_names): known_face_encodings.append(known_face_encodings_array[i]) def compare_points(centerpoints, point): distance_centerpoint = [distance.euclidean(centerpoints[i], point) for i in range(len(centerpoints))] index_distance_min = np.argmin(distance_centerpoint) distance_min = np.min(distance_centerpoint) return index_distance_min, distance_min # Use face recognition def detect_name(frame, face_locations, face_encodings, known_face_encodings, box, label): top, left, bottom, right = box for (top1, right1, bottom1, left1), face_encoding in zip(face_locations, face_encodings): distance = face_recognition.face_distance(known_face_encodings, face_encoding) min_distance = np.min(distance) index_point_min = np.argmin(distance) if min_distance < 0.5: name = known_face_names[index_point_min] print(name) # cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 3) label = name + ": " + label cv2.putText(frame, label, (left + 6, top + 20), font, 1.0, (0, 0, 255), 1) else: label = "unknown" + ": " + label cv2.putText(frame, label, (left + 6, top + 20), font, 1.0, (0, 0, 255), 1) return frame, name input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = yolo_eval(yolo_model.output, anchors, len(class_names), input_image_shape, score_threshold=score, iou_threshold=iou) num_frame = 0 font = cv2.FONT_HERSHEY_DUPLEX centerpoints = [] namefromcenterpoint = [] # Video capture video_capture = WebcamVideoStream(src=0).start() while True: num_frame += 1 # Read video frame and flip camera frame = video_capture.read() frame = cv2.flip(frame, 1) frame_process = np.copy(frame) # Detect state standing and sleeping and sitting image = Image.fromarray(frame_process) # Process detect hand and recognition furniture boxed_image = letterbox_image(image, tuple(reversed(model_image_size))) image_data = np.array(boxed_image, dtype='float32') image_data /= 255. image_data = np.expand_dims(image_data, 0) # Rim keras backend tensorflow forward neural network out_boxes, out_scores, out_classes = sess.run([boxes, scores, classes], feed_dict={ yolo_model.input: image_data, input_image_shape: [image.size[1], image.size[0]], K.learning_phase(): 0 }) for i, c in reversed(list(enumerate(out_classes))): predicted_class = class_names[c] box = out_boxes[i] score = out_scores[i] label = '{} {:.2f}'.format(predicted_class, score) top, left, bottom, right = box print(type(top)) top = int(top) left = int(left) bottom = int(bottom) right = int(right) cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 3) # Tracking object use center point x_point_new = (top + bottom) / 2. y_point_new = (left + right) / 2. point = np.asarray([x_point_new, y_point_new]) if centerpoints: index_distance_min, distance_min = compare_points(centerpoints, point) # Compare distance min with (bottom - top) / 4 if distance_min < (bottom - top) / 4.: # point new same name index distance min name = namefromcenterpoint[index_distance_min] label = name + ": " + label + "don't compute" cv2.putText(frame, label, (left + 6, top + 20), font, 1.0, (0, 0, 255), 1) # Update center point centerpoints[index_distance_min] = point else: #-------------------------------------------------------# # Face recognition crop_img = frame_process[top:bottom, left:right] # Convert the image from BGR color to RGB to face_recognition use rgb_frame = crop_img[:, :, ::-1] # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(rgb_frame) face_encodings = face_recognition.face_encodings(rgb_frame, face_locations) if not face_encodings: cv2.putText(frame, label, (left + 6, top + 20), font, 1.0, (0, 0, 255), 1) else: frame, name = detect_name(frame, face_locations, face_encodings, known_face_encodings, known_face_names, (top, left, bottom, right), label) centerpoints.append(point) namefromcenterpoint.append(name) else: # Face recognition crop_img = frame_process[top:bottom, left:right] # Convert the image from BGR color to RGB to face_recognition use rgb_frame = crop_img[:, :, ::-1] # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(rgb_frame) face_encodings = face_recognition.face_encodings(rgb_frame, face_locations) if not face_encodings: cv2.putText(frame, label, (left + 6, top + 20), font, 1.0, (0, 0, 255), 1) else: frame, name = detect_name(frame, face_locations, face_encodings, known_face_encodings, known_face_names, (top, left, bottom, right), label) centerpoints.append(point) namefromcenterpoint.append(name) # #-------------------------------------------------------# cv2.imshow("Frame", frame) # if cv2.waitKey(1) & 0xFF == ord('q'): break input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = yolo_eval(yolo_model.output, anchors, len(class_names), input_image_shape, score_threshold=score, iou_threshold=iou) num_frame = 0 font = cv2.FONT_HERSHEY_DUPLEX center_points_id = {} # Video capture video_capture = WebcamVideoStream(src=0).start() while True: num_frame += 1 # Read video frame and flip camera frame = video_capture.read() frame = cv2.flip(frame, 1) frame_process = np.copy(frame) # #-------------------------------------------------------# # # Face recognition # # Convert the image from BGR color to RGB to face_recognition use # rgb_frame = frame_process[:, :, ::-1] # # Find all the faces and face encodings in the current frame of video # face_locations = face_recognition.face_locations(rgb_frame) # face_encodings = face_recognition.face_encodings(rgb_frame, face_locations) # for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings): # distance = face_recognition.face_distance(known_face_encodings, face_encoding) # min_distance = np.min(distance) # index_point_min = np.argmin(distance) # if min_distance < 0.5: # name = known_face_names[index_point_min] # print(name) # cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 3) # cv2.putText(frame, name, (left + 6, top - 6), font, 1.0, (255, 255, 0), 1) # #-----------------------------------------------------------# # Detect state standing and sleeping and sitting image = Image.fromarray(frame_process) # Process detect hand and recognition furniture boxed_image = letterbox_image(image, tuple(reversed(model_image_size))) image_data = np.array(boxed_image, dtype='float32') image_data /= 255. image_data = np.expand_dims(image_data, 0) out_boxes, out_scores, out_classes = sess.run([boxes, scores, classes], feed_dict={ yolo_model.input: image_data, input_image_shape: [image.size[1], image.size[0]], K.learning_phase(): 0 }) for i, c in reversed(list(enumerate(out_classes))): predicted_class = class_names[c] box = out_boxes[i] score = out_scores[i] label = '{} {:.2f}'.format(predicted_class, score) top, left, bottom, right = box top = max(0, np.floor(top + 0.5).astype('int32')) left = max(0, np.floor(left + 0.5).astype('int32')) bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32')) right = min(image.size[0], np.floor(right + 0.5).astype('int32')) cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 3) # cv2.putText(frame, label, (left + 6, top - 6), font, 1.0, (255, 0, 255), 1) # Tracking object use center point x_point_new = (top + bottom) / 2. y_point_new = (left + right) / 2. point = np.asarray([x_point_new, y_point_new]) distance_centerpoint = [distance.euclidean(centerpoints[i], point) for i in range(len(centerpoints))] index_distance_min = np.argmin(distance_centerpoint) distance_min = np.min(distance_centerpoint) if distance_centerpoint < (bottom - top) / 10.: name = namefromcenterpoint[index_distance_min] label = name + ": " + label + "don't compute" cv2.putText(frame, label, (left + 6, top + 20), font, 1.0, (0, 0, 255), 1) else: #-------------------------------------------------------# # Face recognition crop_img = frame_process[top:bottom, left:right] # Convert the image from BGR color to RGB to face_recognition use rgb_frame = crop_img[:, :, ::-1] # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(rgb_frame) face_encodings = face_recognition.face_encodings(rgb_frame, face_locations) if not face_encodings: cv2.putText(frame, label, (left + 6, top + 20), font, 1.0, (0, 0, 255), 1) else: frame, name = detect_name(frame, face_locations, face_encodings, known_face_encodings, known_face_names, box, label) centerpoints.append(point) namefromcenterpoint.append(name) # for (top1, right1, bottom1, left1), face_encoding in zip(face_locations, face_encodings): # distance = face_recognition.face_distance(known_face_encodings, face_encoding) # min_distance = np.min(distance) # index_point_min = np.argmin(distance) # if min_distance < 0.5: # name = known_face_names[index_point_min] # print(name) # # cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 3) # label = name + ": " + label # cv2.putText(frame, label, (left + 6, top + 20), font, 1.0, (0, 0, 255), 1) # #-------------------------------------------------------# cv2.imshow("Frame", frame) # if cv2.waitKey(1) & 0xFF == ord('q'): break x, y = (2, 3) x ``` ## Test ``` import cv2 import matplotlib.pyplot as plt img = cv2.imread("data/image/hanoi.jpg") plt.imshow(img) img_crop = img[0:800, 1000:1400] plt.imshow(img_crop) a = [] if not a: print("true") else: print("False") a = "dam_van_tai" a.split() import os os.path.split(a) ```
github_jupyter
<a href="https://colab.research.google.com/github/moh2236945/Natural-language-processing/blob/master/Apply%20features%20extrating%20and%20text%20normalization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import re import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import string import nltk import numpy as np %matplotlib inline train=pd.read_csv('/content/train_E6oV3lV.csv') test=pd.read_csv('/content/test_tweets_anuFYb8.csv') train.head() ``` data has 3 columns id, label, and tweet. ***label*** is the binary target variable and ***tweet*** contains the tweets that we will clean and preprocess. ``` #Removing @ to do this we sure to combine train and test together fires combi=train.append(test,ignore_index=True) combi.shape def remove_pattern(input_text,pattern): r=re.findall(pattern,input_text) for i in r: input_text=re.sub(i,'',input_text) return input_text ``` create a new column tidy_tweet, it contain the cleaned and processed tweets. **Note** that we have passed “@[\w]*” as the pattern to the remove_pattern function. It is actually a regular expression which will pick any word starting with ‘@’. ``` combi['tidy_tweet']=np.vectorize(remove_pattern)(combi['tweet'],"@[\w]*") combi.head() #removing Punction,Number&Special chars combi['tidy_tweet']=combi['tidy_tweet'].str.replace('[^a-zA-Z#]', "") combi.head() ``` Removing Short Words ``` combi['tidy_tweet'] = combi['tidy_tweet'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3])) ``` Text Normalization Steps: Tokenization > Normalization ``` tokenized_tweet = combi['tidy_tweet'].apply(lambda x: x.split()) # tokenizing tokenized_tweet.head() from nltk.stem.porter import * stemmer = PorterStemmer() tokenized_tweet = tokenized_tweet.apply(lambda x: [stemmer.stem(i) for i in x]) # stemming #stitch these tokens back together. for i in range(len(tokenized_tweet)): tokenized_tweet[i] = ' '.join(tokenized_tweet[i]) combi['tidy_tweet'] = tokenized_tweet #Understanding the common words used in the tweets: WordCloud #A wordcloud is a visualization wherein the most frequent words appear in large size and the less frequent words appear in smaller sizes. all_words = ' '.join([text for text in combi['tidy_tweet']]) from wordcloud import WordCloud wordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(all_words) plt.figure(figsize=(10, 7)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis('off') plt.show() #Words in non racist/sexist tweets normal_words =' '.join([text for text in combi['tidy_tweet'][combi['label'] == 0]]) wordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(normal_words) plt.figure(figsize=(10, 7)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis('off') plt.show() # Understanding the impact of Hashtags on tweets sentiment # function to collect hashtags def hashtag_extract(x): hashtags = [] # Loop over the words in the tweet for i in x: ht = re.findall(r"#(\w+)", i) hashtags.append(ht) return hashtags # extracting hashtags from non racist/sexist tweets HT_regular = hashtag_extract(combi['tidy_tweet'][combi['label'] == 0]) # extracting hashtags from racist/sexist tweets HT_negative = hashtag_extract(combi['tidy_tweet'][combi['label'] == 1]) # unnesting list HT_regular = sum(HT_regular,[]) HT_negative = sum(HT_negative,[]) #Non-Racist/Sexist Tweets a = nltk.FreqDist(HT_regular) d = pd.DataFrame({'Hashtag': list(a.keys()), 'Count': list(a.values())}) # selecting top 20 most frequent hashtags d = d.nlargest(columns="Count", n = 20) plt.figure(figsize=(16,5)) ax = sns.barplot(data=d, x= "Hashtag", y = "Count") ax.set(ylabel = 'Count') plt.show() #extract Features from Cleaned tweets from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import gensim #Bag-of-Words Features bow_vectorizer = CountVectorizer(max_df=0.90, min_df=2, max_features=1000, stop_words='english') bow = bow_vectorizer.fit_transform(combi['tidy_tweet']) bow.shape #TF-IDF Features tfidf_vectorizer = TfidfVectorizer(max_df=0.90, min_df=2, max_features=1000, stop_words='english') tfidf = tfidf_vectorizer.fit_transform(combi['tidy_tweet']) tfidf.shape #Word Embedding tokenized_tweet = combi['tidy_tweet'].apply(lambda x: x.split()) # tokenizing model_w2v = gensim.models.Word2Vec( tokenized_tweet, size=200, # desired no. of features/independent variables window=5, # context window size min_count=2, sg = 1, # 1 for skip-gram model hs = 0, negative = 10, # for negative sampling workers= 2, # no.of cores seed = 34) model_w2v.train(tokenized_tweet, total_examples= len(combi['tidy_tweet']), epochs=20) #Preparing Vectors for Tweets def word_vector(tokens, size): vec = np.zeros(size).reshape((1, size)) count = 0. for word in tokens: try: vec += model_w2v[word].reshape((1, size)) count += 1. except KeyError: # handling the case where the token is not in vocabulary continue if count != 0: vec /= count return vec wordvec_arrays = np.zeros((len(tokenized_tweet), 200)) for i in range(len(tokenized_tweet)): wordvec_arrays[i,:] = word_vector(tokenized_tweet[i], 200) wordvec_df = pd.DataFrame(wordvec_arrays) wordvec_df.shape from tqdm import tqdm tqdm.pandas(desc="progress-bar") from gensim.models.doc2vec import LabeledSentence def add_label(twt): output = [] for i, s in zip(twt.index, twt): output.append(LabeledSentence(s, ["tweet_" + str(i)])) return output labeled_tweets = add_label(tokenized_tweet) # label all the tweets labeled_tweets[:6] ```
github_jupyter
``` from db_module import GetEvents import pandas as pd from pathlib import Path import mplfinance as mpf df = pd.read_csv(Path("./Resources/BTC-5YRS-D.csv"), index_col="Date", parse_dates=True, infer_datetime_format=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) display(df.head()) display(df.tail()) df = df.dropna() mpf.plot(df, type="candle", style="yahoo", volume=True) events = db_strat.GetEvents(df) new_df = events.Output() new_df.head() class GetEvents: def __init__(self, df): self.df = df # Check if there is a previous bar def update_cert(self, cert, index, df): # Create certifying candle # count number of null values to see if is first row null_count = 0 for value in cert.values(): if value == None: null_count += 1 # update prev dictionary if all null values # if first row it will update with those values if null_count == 5: cert['t'] = index cert['o'] = float(df.loc[index]['Open']) cert['c'] = float(df.loc[index]['Close']) cert['h'] = float(df.loc[index]['High']) cert['l'] = float(df.loc[index]['Low']) # return the certying candle dictionary return cert # update prev with new prev values (new prev is old curr) else: if index + datetime.timedelta(days=1): cert['t'] = (index) cert['o'] = float(df.loc[index]['Open']) cert['c'] = float(df.loc[index]['Close']) cert['h'] = float(df.loc[index]['High']) cert['l'] = float(df.loc[index]['Low']) # return the certying candle dictionary return cert def update_curr(self, curr, index, df): # Create current dictionary of current candle idx = df.index # format datetime index dt_object = (index - datetime.timedelta(days=1)).to_datetime64() if sum(df.index == dt_object) > 0: index -= datetime.timedelta(days=1) curr['t'] = (index) curr['o'] = float(df.loc[index]['Open']) curr['c'] = float(df.loc[index]['Close']) curr['h'] = float(df.loc[index]['High']) curr['l'] = float(df.loc[index]['Low']) # return the current candle dictionary return curr else: pass def update_prev(self, prev, index, df): # Create the previous candle idx = df.index dt_object = (index - datetime.timedelta(days=2)).to_datetime64() if sum(df.index == dt_object) > 0: index -= datetime.timedelta(days=2) prev['t'] = (index) prev['o'] = float(df.loc[index]['Open']) prev['c'] = float(df.loc[index]['Close']) prev['h'] = float(df.loc[index]['High']) prev['l'] = float(df.loc[index]['Low']) # return the previous candle dictionary return prev else: pass def find_max(self, cert_candle, curr_candle, prev_candle): # get local max max_dict = {} if curr_candle and prev_candle: if (curr_candle['c'] > cert_candle['c']) & (curr_candle['c'] > prev_candle['c']): max_dict = curr_candle return max_dict else: return None def find_min(self, cert_candle, curr_candle, prev_candle): # get local min min_dict = {} if curr_candle and prev_candle: if (curr_candle['c'] < cert_candle['c']) & (curr_candle['c'] < prev_candle['c']): min_dict = curr_candle return min_dict else: return None def create_dataframe(self, df): """ Create DataFrame with local minimums and maximums to create signals for double bottom creation Accepts DataFrames with columns formatted as 'Open', 'Close', 'High', 'Low'. More columns can be added. """ # # store previous candle prev = { 't' : None, 'o' : None, 'c' : None, 'h' : None, 'l' : None } # store current candle curr = { 't' : None, 'o' : None, 'c' : None, 'h' : None, 'l' : None } # store certifying candle (certifies if the max/min is created) cert = { 't' : None, 'o' : None, 'c' : None, 'h' : None, 'l' : None } # Init lists for max and min events max_ = [] min_ = [] # Iterrate through dataframes indexes for index in df.index: cert_candle = update_cert(cert, index, df) curr_candle = update_curr(curr, index, df) prev_candle = update_prev(prev, index, df) # pont f arbitrary - 5 periods out # when finished will have 6 values mx = find_max(cert_candle, curr_candle, prev_candle) mn = find_min(cert_candle, curr_candle, prev_candle) # max stores close and high # min stores close and low if mx == None: max_.append(0) else: max_.append(1) if mn == None: min_.append(0) else: min_.append(1) # Adding min/max events to original dataframe then creating a copy with columns for double bottom events max_min_df = pd.DataFrame({'Max': max_, 'Min': min_}) max_min_df = max_min_df[['Max', 'Min']].shift(-1) max_min_df['Date'] = df.index max_min_df.set_index('Date', inplace=True) df_features = pd.concat([df, max_min_df], axis=1) df_features[['A', 'B', 'C', 'D', 'E', 'F', 'DB']] = 0.0 return df_features def get_events(self, new_df): """Returns dataframe with binary encoded events""" new_df['Events'] = 0 events_list = [None, None, None, None, None] search_c = {} buy = None sell = None total_events = 0 for index in new_df.index: # Start search for A if A is none if events_list[0] == None: if new_df.loc[index]['Max'] == 1: events_list[0] = index new_df.loc[index]['A'] = 1.0 # Show that A exists elif events_list != None: new_df.loc[index]['A'] = 1.0 # Reset if does not exist else: new_df.loc[index]['A'] = 0 # Start search for B if B is none if (events_list[1] == None) & (events_list[0] != None): if new_df.loc[index]['Min'] == 1: if new_df.loc[index]['Close'] < new_df.loc[events_list[0]]['Close']: events_list[1] = index new_df.loc[index]['B'] = 1.0 # Show that B exists elif events_list[1] != None: new_df.loc[index]['B'] = 1.0 # Reset if does not exist else: new_df.loc[index]['B'] = 0 # Start search for C if (events_list[2] == None) & (events_list[1] != None) & (events_list[0] != None): if new_df.loc[index]['Max'] == 1: # Establish conditions condition_1 = (new_df.loc[index]['Close'] > new_df.loc[events_list[1]]['Close']) condition_2 = (new_df.loc[index]['Close'] < new_df.loc[events_list[0]]['Close']) # if C is in range between B and A if condition_1 & condition_2: events_list[2] = index new_df.loc[index]['C'] = 1.0 # if C is higher than A elif new_df.loc[index]['Close'] > new_df.loc[events_list[0]]['Close']: events_list = [events_list[0], None, None, None, None] # Show if C exists elif events_list[2] != None: new_df.loc[index]['C'] = 1.0 # Reset if does not exist else: new_df.loc[index]['C'] = 0 # Start search for D if (events_list[3] == None) & (events_list[2] != None) & (events_list[1] != None) & (events_list[0] != None): if new_df.loc[index]['Min'] == 1: # Establish conditions condition_1 = (new_df.loc[index]['Close'] < new_df.loc[events_list[1]]['Close']) condition_2 = (new_df.loc[index]['Close'] > new_df.loc[events_list[1]]['Low']) condition_3 = (new_df.loc[index]['Low'] < new_df.loc[events_list[1]]['Close']) condition_4 = (new_df.loc[index]['Low'] > new_df.loc[events_list[1]]['Low']) # if D is in the range of B's low and close if condition_1 & condition_2 | condition_3 & condition_4: events_list[3] = index new_df.loc[index]['D'] = 1 # Search between B and D for highest max for index in new_df.loc[events_list[1]:events_list[3]].index: if new_df.loc[index]['Max'] == 1: search_c[index] = new_df.loc[index]['Close'] # find max c value and add it to the events list max_c = max(search_c, key = lambda x: search_c[x]) events_list[2] = max_c max_c = max_c.to_datetime64() new_df.loc[max_c]['C'] = 1 # if D is less than B's low elif (new_df.loc[index]['Close'] < new_df.loc[events_list[1]]['Low']): events_list = [max_c, None, None, None, None] search_c = {} # Show if D exists elif events_list[3] != None: new_df.loc[index]['D'] = 1.0 # Reset if does not exist else: new_df.loc[index]['D'] = 0 # start search E trade (entry) if (events_list[4] == None) & (events_list[3] != None) & (events_list[2] != None) & (events_list[1] != None) & (events_list[0] != None): if new_df.loc[index]['Close'] > new_df.loc[events_list[2]]['Close']: events_list[4] = index new_df.loc[index]['E'] = 1 new_df.loc[index]['DB'] = 1 if events_list[4] != None: events_list = [None, None, None, None, None] # restart if close below D elif new_df.loc[index]['Close'] < new_df.loc[events_list[3]]['Low']: display(events_list) new_a = events_list[2] events_list = [new_a, None, None, None, None] search_c = {} if new_df.loc[index]['DB'] == 1: buy = index sell = (index + datetime.timedelta(days=5)).to_datetime64() total_events += 1 new_df.loc[index]['Events'] = total_events if sum(new_df.index == sell) > 0: new_df.loc[buy]['F'] = 0 new_df.loc[sell]['F'] = float(new_df.loc[sell]['Close'] - new_df.loc[buy]['Close']) new_df.loc[sell]['DB'] = -1 print(events_list) print(new_df.loc[index]) return new_df def output(self): return self.get_events(self.create_dataframe(self.df)) ```
github_jupyter
``` # This cell is added by sphinx-gallery !pip install mrsimulator --quiet %matplotlib inline import mrsimulator print(f'You are using mrsimulator v{mrsimulator.__version__}') ``` # Czjzek distribution, ²⁷Al (I=5/2) 3QMAS ²⁷Al (I=5/2) 3QMAS simulation of amorphous material. In this section, we illustrate the simulation of a quadrupolar MQMAS spectrum arising from a distribution of the electric field gradient (EFG) tensors from amorphous material. We proceed by employing the Czjzek distribution model. ``` import numpy as np import matplotlib.pyplot as plt from scipy.stats import multivariate_normal from mrsimulator import Simulator from mrsimulator.methods import ThreeQ_VAS from mrsimulator.models import CzjzekDistribution from mrsimulator.utils.collection import single_site_system_generator ``` ## Generate probability distribution ``` # The range of isotropic chemical shifts, the quadrupolar coupling constant, and # asymmetry parameters used in generating a 3D grid. iso_r = np.arange(101) / 1.5 + 30 # in ppm Cq_r = np.arange(100) / 4 # in MHz eta_r = np.arange(10) / 9 # The 3D mesh grid over which the distribution amplitudes are evaluated. iso, Cq, eta = np.meshgrid(iso_r, Cq_r, eta_r, indexing="ij") # The 2D amplitude grid of Cq and eta is sampled from the Czjzek distribution model. Cq_dist, e_dist, amp = CzjzekDistribution(sigma=1).pdf(pos=[Cq_r, eta_r]) # The 1D amplitude grid of isotropic chemical shifts is sampled from a Gaussian model. iso_amp = multivariate_normal(mean=58, cov=[4]).pdf(iso_r) # The 3D amplitude grid is generated as an uncorrelated distribution of the above two # distribution, which is the product of the two distributions. pdf = np.repeat(amp, iso_r.size).reshape(eta_r.size, Cq_r.size, iso_r.size) pdf *= iso_amp pdf = pdf.T ``` The two-dimensional projections from this three-dimensional distribution are shown below. ``` _, ax = plt.subplots(1, 3, figsize=(9, 3)) # isotropic shift v.s. quadrupolar coupling constant ax[0].contourf(Cq_r, iso_r, pdf.sum(axis=2)) ax[0].set_xlabel("Cq / MHz") ax[0].set_ylabel("isotropic chemical shift / ppm") # isotropic shift v.s. quadrupolar asymmetry ax[1].contourf(eta_r, iso_r, pdf.sum(axis=1)) ax[1].set_xlabel(r"quadrupolar asymmetry, $\eta$") ax[1].set_ylabel("isotropic chemical shift / ppm") # quadrupolar coupling constant v.s. quadrupolar asymmetry ax[2].contourf(eta_r, Cq_r, pdf.sum(axis=0)) ax[2].set_xlabel(r"quadrupolar asymmetry, $\eta$") ax[2].set_ylabel("Cq / MHz") plt.tight_layout() plt.show() ``` ## Simulation setup Let's create the site and spin system objects from these parameters. Use the :func:`~mrsimulator.utils.collection.single_site_system_generator` utility function to generate single-site spin systems. ``` spin_systems = single_site_system_generator( isotope="27Al", isotropic_chemical_shift=iso, quadrupolar={"Cq": Cq * 1e6, "eta": eta}, # Cq in Hz abundance=pdf, ) len(spin_systems) ``` Simulate a $^{27}\text{Al}$ 3Q-MAS spectrum by using the `ThreeQ_MAS` method. ``` mqvas = ThreeQ_VAS( channels=["27Al"], spectral_dimensions=[ { "count": 512, "spectral_width": 26718.475776, # in Hz "reference_offset": -4174.76184, # in Hz "label": "Isotropic dimension", }, { "count": 512, "spectral_width": 2e4, # in Hz "reference_offset": 2e3, # in Hz "label": "MAS dimension", }, ], ) ``` Create the simulator object, add the spin systems and method, and run the simulation. ``` sim = Simulator() sim.spin_systems = spin_systems # add the spin systems sim.methods = [mqvas] # add the method sim.config.number_of_sidebands = 1 sim.run() data = sim.methods[0].simulation ``` The plot of the corresponding spectrum. ``` plt.figure(figsize=(4.25, 3.0)) ax = plt.subplot(projection="csdm") cb = ax.imshow(data / data.max(), cmap="gist_ncar_r", aspect="auto") plt.colorbar(cb) ax.set_ylim(-20, -50) ax.set_xlim(80, 20) plt.tight_layout() plt.show() ```
github_jupyter
``` #Mounting colab to read the data from google drive from google.colab import drive drive.mount("/content/drive") #imports import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns import pandas as pd import numpy as np from datetime import datetime from plotly.subplots import make_subplots from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import plot_confusion_matrix from sklearn.datasets import make_classification from collections import Counter from xgboost import XGBClassifier from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score from imblearn.over_sampling import SMOTE from sklearn.model_selection import train_test_split ``` # Load data ``` hospital_data = pd.read_csv("/content/drive/MyDrive/hospital_mortality.csv") hospital_data.head(5) ``` ### How many patients have 20 or more missing feature values? **Explanation:** To get this value, I first checked the basic structure of the dataset and the column names. After that I counted the numbers of missing features for each row and filtered the patient_IDs with missing features equal to or greater than 20. #### Patient ID's with 20 or more missing values: 210 ``` print("Dimension of Dataset:", hospital_data.shape ) print("Column names:", hospital_data.columns) lst = [] for i in range(0,len(hospital_data.index)): lst.append([i, hospital_data.iloc[i].isnull().sum()]) missingfeature = pd.DataFrame(lst,columns = ["patient_index", "missing_feature_count"]) missingfeature = missingfeature[missingfeature.missing_feature_count >= 20] print("Patients with 20 or more missing values:", missingfeature.shape[0]) ``` ### What’s the difference in means of HR_min for patients that died vs survived (not counting patients with HR_min of 0)? **Explanation**:To calculate this, I first filtered the rows which do not have HR_min = 0. Then I filtered the mean of HR_min for patients who died and patients who survived and took the difference of both the values. #### Difference in means of HR_min for patients that died vs survived: 1.6746 ``` ## Your code here hospital_data = hospital_data[hospital_data.HR_Min != 0] hospital_data_died = hospital_data[hospital_data["In-hospital_death"] == 1] died_mean = hospital_data_died.HR_Min.mean() hospital_data_survived = hospital_data[hospital_data["In-hospital_death"] == 0] survived_mean = hospital_data_survived.HR_Min.mean() print("Difference in means of HR_min for patients that died vs survived:", round((died_mean - survived_mean),4)) ``` ### What is the median maximum heart rate for patients whose maximum temperature was 2 degrees Celsius higher than their minimum temperature? **Explanation** : For this Question, I filter the rows with patients with difference in max and min temperature to 2 degree celsius. I then calculated the median of Maximum heart rate. #### Median maximum heart rate for patients whose maximum temperature was 2 degrees Celsius higher than their minimum temperature: 109.0 ``` ## Your code here hospital_data_new= hospital_data[(hospital_data.Temp_Max - hospital_data.Temp_Min) == 2] print("Median maximum heart rate for patients whose maximum temperature was 2 degrees \ Celsius higher than their minimum temperature:",hospital_data_new.HR_Max.median()) ``` ### Create a plot to analyze the relationship between median temperature, outcome (died vs. survived), and age. **Explanation:** We can observe that our Age and Temp_Median are both continous variables. I used KBinsDiscretizer to discritize the Age variable into 4 bin. The discretization is done on the quantiled values, which means that each bin has approximately the same number of samples and plotted it with respect to the Temp_median for for survived and dead patients. Here 0 label is for survived patients and 1 is for died patients. We can observe that for all the bins, our distribution of median temperatures for survived case are centered around 37 degree celsius with less variation in survivors as compared to the dead patients. We can also observe that the first two bins have higher variation along with outliers in median temperature as compared to 3rd and the 4th bin. ``` from sklearn.preprocessing import KBinsDiscretizer as KD plt.figure(figsize=(14,8)) sns.set_theme(style="darkgrid") bin = KD(n_bins=4, encode='ordinal', strategy='quantile') transBinned = np.array(hospital_data['Age']).reshape(-1,1) bin.fit(transBinned) hospital_data['Age'] = bin.transform(transBinned) ax = sns.boxplot(x="Age", y="Temp_Median", hue="In-hospital_death", data=hospital_data, palette="Set3") ``` ### Create a plot to analyze the relationship between median temperature, outcome (died vs. survived), and gender. **Explanation:** I first checked the unique values in the gender feature and I observed, there were 3 rows with -1 values as gender which is not possible. - Gender 0: Females - Gender 1: Males Therefore, I dropped those rows with -1 as gender and then plotted the gender with the median temperature keeping In-hospital deaths as hue to check for both the cases. We can observe that for both males and females the mean is same both male and female survivor as well as male and female deid in hospital. From the plot, we are not able to obverse much difference for both males and females in both survived and died-in-hopital case. ``` ## Your code here plt.figure(figsize=(16,8)) hospital_data_gender = hospital_data[hospital_data.Gender != -1] ax = sns.boxplot(x="Gender", y="Temp_Median", hue="In-hospital_death", data=hospital_data_gender, palette="Set3") ``` ### Build and summarize the results of a machine learning model that predicts whether a patient dies or survives in the hospital. (We do not expect a perfect or highest-accuracy solution; this is primarily an opportunity for you to show your machine learning project workflow and to document your thought process, approach to solving problems, and interpretation of results.) ##### Checking the datatypes of columns and missing values **Observation:** We can observe that out of 111 featues 100 features have missing values. All the features are continous. ``` #checking datatype hospital_data.dtypes.unique() #checking missing value counts missing_values = [] missing_values = [[feature,hospital_data[feature].isnull().sum(), np.round(hospital_data[feature].isnull().mean()*100,4)] for feature in hospital_data] df = pd.DataFrame(missing_values, columns=["Features", "Count","Missing_value_percent"]) df = df[df.Count >0] df #Count of unique values in the categorical features summarystats_categorical_feat = [] categorical_feature = [feature for feature in hospital_data.columns if hospital_data[feature].dtype == 'O'] summarystats_categorical_feat = [[feature, len(hospital_data[feature].unique())] for feature in categorical_feature] df = pd.DataFrame(summarystats_categorical_feat, columns=["Features", "Unique Categories"]) df.shape[0] ``` ##### **Handling missing values** In our dataset the missing values present are less than 30% in all the features with missing values. In this case imputing missing values with mean or median is the best option on the basis of the distribution of the data. Since most features has normal distribution, I am using mean to impute the missing values. ``` #Imputing missing data with mean hospital_data=hospital_data.fillna(hospital_data.mean()) ``` ##### **Handling unrealistic data** We can observe from the describe funtion that few of the featues have negative values too which is not possible in a lot of cases. Next, I checked the featues with negative values to identify the realisticness of the data or to impute those data points with relevant data. I found that 'Gender', 'Height','Temp_Min','Weight_Min','Weight_Max','Weight_Mean','Weight_Median' have negative values. I performed the following imputions: **Gender:** I imputed the Gender values having -1 with higher mode which is for label 1. **Height:** We can observe that we have few rows with negative height (-1) which is not possible. I imputed the height with th mean value. **Min temp:** We can observe that we have min temperatues [-17.8 -12.5]. For dead patient case negative temperature could be possible but it is not possible for the survived patients. I imputed survived patient's temperature (-12.5) with mean min temperature. **Weight Min:** We can observe that negative min weight is -1 which is not possible so I replaced the values with mean value. **Weight Max:** We can observe that negative max weight is -1 which is not possible so I replaced the values with mean value. **Weight Mean:** We can observe that negative mean weight is -1 which is not possible so I replaced the values with mean value. **Weight Max:** We can observe that negative median weight is -1 which is not possible so I replaced the values with mean value. ``` #Description of dataset hospital_data.describe() #Columns with negative values print(hospital_data.columns[(hospital_data< 0).any()].tolist()) plt.figure(figsize=(16,8)) hospital_data.boxplot(column=['Gender', 'Height','Temp_Min','Weight_Min','Weight_Max','Weight_Mean','Weight_Median']) ``` ###### **Gender** ``` #Imputing negative gender values print("Gender with their counts:",hospital_data.groupby(by = 'Gender').size()) hospital_data['Gender'] = hospital_data['Gender'].replace(-1.0,1.0) ``` ###### **Height** ``` #Imputing negative height values print(hospital_data[hospital_data.Height < 0]['Height'].unique()) hospital_data['Height'] = hospital_data['Height'].replace(-1.0,hospital_data['Height'].mean()) ``` ###### **Min Temp** ``` #Imputing negative Min temperature values print("Patients with negative temperature:",hospital_data[(hospital_data.Temp_Min < 0)].shape[0]) print("Negative temperatuer values:",hospital_data[(hospital_data.Temp_Min < 0)]['Temp_Min'].unique()) print("Dead patients with negative temperature:",hospital_data[(hospital_data.Temp_Min < 0) & (hospital_data['In-hospital_death'] == 1)]['Temp_Min']) hospital_data['Temp_Min'] = hospital_data['Temp_Min'].replace(-12.5,hospital_data['Temp_Min'].mean()) ``` ###### **Weight Min** ``` #Imputing negative min weight values print(hospital_data[hospital_data['Weight_Min'] < 0]['Weight_Min'].unique()) hospital_data['Weight_Min'] = hospital_data['Weight_Min'].replace(-1.0,hospital_data['Weight_Min'].mean()) ``` ###### **Weight Max** ``` ##Imputing negative max weight values print(hospital_data[hospital_data['Weight_Max'] < 0]['Weight_Max'].unique()) hospital_data['Weight_Max'] = hospital_data['Weight_Max'].replace(-1.0,hospital_data['Weight_Max'].mean()) ``` ###### **Weight Mean** ``` #Imputing negative mean weight values print(hospital_data[hospital_data['Weight_Mean'] < 0]['Weight_Mean'].unique()) hospital_data['Weight_Mean'] = hospital_data['Weight_Mean'].replace(-1.0,hospital_data['Weight_Mean'].mean()) ``` ###### **Weight Median** ``` #Imputing negative median weight values hospital_data[hospital_data['Weight_Median'] < 0]['Weight_Median'].unique() hospital_data['Weight_Median'] = hospital_data['Weight_Median'].replace(-1.0,hospital_data['Weight_Median'].mean()) ``` ###### **Feature Selection** Feature selection is a very important step since we have **111 features** with only **3989 data points** thus our model might overfit and even a lot of features could be redundant with high correlation. I performed feature selection using correlation and p-value check to see if a particular feature is contributing to the prediction of target variable or not. **correlation** I removed the one of the features from the two highly correlated featues. After performing this we got **74 featuers** which are not highly correlated. **Feature Selection** Next we will be selecting the columns based on how they affect the p-value. We are the removing the target column because it is the column we are trying to predict.We assume to null hypothesis to be “The selected combination of dependent variables do not have any effect on the independent variable”. Then we build a small regression model and calculate the p values. If the p values is higher than the threshold, we discard that combination of features. We got to **23 features** after this step which contibutes the most to our target variable. ``` #shape of data hospital_data.shape #checking correlation between the features and removing one highly correlated featue. corr = hospital_data.corr() columns = np.full((corr.shape[0],), True, dtype=bool) for i in range(corr.shape[0]): for j in range(i+1, corr.shape[0]): if corr.iloc[i,j] >= 0.9: if columns[j]: columns[j] = False selected_columns = hospital_data.columns[columns] hospital_data = hospital_data[selected_columns] hospital_data.shape selected_columns #Feature selection selected_columns = selected_columns[1:] import statsmodels.api as sm def backwardElimination(x, Y, sl, columns): numVars = len(x[0]) for i in range(0, numVars): regressor_OLS = sm.OLS(Y, x).fit() maxVar = max(regressor_OLS.pvalues).astype(float) if maxVar > sl: for j in range(0, numVars - i): if (regressor_OLS.pvalues[j].astype(float) == maxVar): x = np.delete(x, j, 1) columns = np.delete(columns, j) regressor_OLS.summary() return x, columns SL = 0.05 data_modeled, selected_columns = backwardElimination(hospital_data.iloc[:,1:].values, hospital_data.iloc[:,0].values, SL, selected_columns) result = pd.DataFrame() result['In-hospital_death'] = hospital_data.iloc[:,72] result data = pd.DataFrame(data = data_modeled, columns = selected_columns) data.shape data.columns ``` #### **Dataset Splitting** It is important to split the dataset into training data and testing data so as to hide the some datapoints while training the model and checking the model performance on the hidden(test data) for observing near real-time performance. I took a classic 80:20 % train:test ratio for train-test splitting. We can also observe that we have just 13% died--in-hospital patients and 86% survivors. ``` #Checking the distribution of our target variable plt.figure(figsize=(8,6)) counts = data['In-hospital_death'].value_counts() print(counts/data.shape[0]) labels = ['Survivor', 'Died_in-hospital'] ax = sns.barplot(x=labels, y=counts, log=True, palette=['green', 'red']) ax.set(xlabel='Class', ylabel='Frequency', title='Class Distribution') #Seperating features and target column y = data.loc[:,'In-hospital_death'] data = data.drop(['In-hospital_death'], axis = 1) X = data #Train-test-split X_train, X_val, y_train, y_val = train_test_split(X, y, stratify= y, \ test_size=0.20, random_state=42) print(X_train.shape,y_train.shape) print(X_val.shape, y_val.shape) X_train = X_train.reset_index(drop =True) y_train = y_train.reset_index(drop = True) X_val = X_val.reset_index(drop = True) y_val = y_val.reset_index(drop = True) ``` #### **Modeling** **Classification: Baseline Models** I am using two standard baseline models to observe the model performance with the unbalanced dataset which we observed in the previous plot. Our aim is the predict whether patient dies or survives in the hospital from the models: 1. Logistic Regression 2. Random Forest I have chosen logistic regression and random forest because, logistic regression is the most basic and highly used classification model applicable. Random forest is an advance model which can handle data imbalance well thus taking random forest too in consideration. **Observations** From the model's results we can make following observations: - Logistic Regression: From the confusion matrix of logistic regression, we can say that we are getting a very good prediction for the negative class which is survivor class (99 % recall) but our model is also not able to predict the true positive i.e. died In-hospital class (12 %). we also have a high false negative (88 %) which is bad for our us. Our aim should be to increase True positives prediction and decrease false negatives. We can say our model is predicting the died-in-hospital as survived due to the data imbalance. - Random Forest: Our random forest model is also giving similar results, with only 28% True positives and 95% true negatives with very high false negative which should be less. ``` # fit a logistic regression model to the data def logisticRegression(train_x, train_y,test_x, test_y): model = LogisticRegression(random_state= 42) model.fit(train_x, train_y) print(model) # make predictions expected = test_y predicted = model.predict(test_x) # summarize the fit of the model print(classification_report(expected, predicted)) print(confusion_matrix(expected, predicted)) disp = plot_confusion_matrix(model, test_x, test_y, cmap=plt.cm.Blues,normalize = 'true') disp.ax_.set_title("Confusion Matrix") plt.figure(figsize=(10,6)) sns.set(font_scale=1.4) print("Confusion Matrix") plt.show() #Predicting with logistive regression model print(logisticRegression(X_train, y_train, X_val, y_val)) def randomForest(train_x, train_y,test_x, test_y): clf = RandomForestClassifier(n_estimators = 5, max_depth =35, random_state=0) clf.fit(train_x,train_y) print(classification_report(test_y, clf.predict(test_x))) print(confusion_matrix(test_y, clf.predict(test_x))) disp = plot_confusion_matrix(clf, test_x, test_y, cmap=plt.cm.Blues, normalize = 'true') disp.ax_.set_title("Confusion Matrix") print("Confusion Matrix") plt.show() #Predicting with Random Forest model print(randomForest(X_train, y_train, X_val, y_val)) ``` ##### **Over Sampling** We can further try oversampling technique to handle the data imbalance. I am using SMOTE for oversampling. SMOTE(Synthetic Minority Oversampling Technique) is the most widely used approach to synthesizing new examples. SMOTE works by selecting examples that are close in the feature space, drawing a line between the examples in the feature space and drawing a new sample at a point along that line. Synthetic over-sampling works to cause the classifier to build larger decision regions that contain nearby minority class points. **Observation:** I made the following observations for both the models: - Logistic regression: From the confustion matrix of logistic regression, we can say even though our true negative decreased to 76% but our true positive which is the more important class have increased to a very high value (72%) which is a very good number that our logistive regression model is giving after over sampling. We can further perform feature engineering and use advance models to increase this accuracy further. - Random Forest: Even though logistic regression performed well with oversampling Random forest couldn't perform very well in this case giving the true positives around 46% only. We can choose logistic regresion as the final model in this case. ``` #Oversampling X_resampled, y_resampled = SMOTE().fit_sample(X_train,y_train) counter = Counter(y_resampled) print(counter) #Predicting with logistive regression model print(logisticRegression(X_resampled, y_resampled, X_val, y_val)) #Predicting with Random Forest model print(randomForest(X_resampled, y_resampled, X_val, y_val)) ```
github_jupyter
_Lambda School Data Science — Model Validation_ # Select models and parameters Objectives - Hyperparameter optimization - Model selection Today we'll use this process: ## "A universal workflow of machine learning" _Excerpt from Francois Chollet, [Deep Learning with Python](https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/README.md), Chapter 4: Fundamentals of machine learning_ **1. Define the problem at hand and the data on which you’ll train.** Collect this data, or annotate it with labels if need be. **2. Choose how you’ll measure success on your problem.** Which metrics will you monitor on your validation data? **3. Determine your evaluation protocol:** hold-out validation? K-fold validation? Which portion of the data should you use for validation? **4. Develop a first model that does better than a basic baseline:** a model with statistical power. **5. Develop a model that overfits.** The universal tension in machine learning is between optimization and generalization; the ideal model is one that stands right at the border between underfitting and overfitting; between undercapacity and overcapacity. To figure out where this border lies, first you must cross it. **6. Regularize your model and tune its hyperparameters, based on performance on the validation data.** Repeatedly modify your model, train it, evaluate on your validation data (not the test data, at this point), modify it again, and repeat, until the model is as good as it can get. Iterate on feature engineering: add new features, or remove features that don’t seem to be informative. Once you’ve developed a satisfactory model configuration, you can train your final production model on all the available data (training and validation) and evaluate it one last time on the test set. ## 1. Define the problem at hand and the data on which you'll train We'll apply the workflow to a [project from _Python Data Science Handbook_](https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html#Example:-Predicting-Bicycle-Traffic) by Jake VanderPlas: > **Predicting Bicycle Traffic** > As an example, let's take a look at whether we can predict the number of bicycle trips across Seattle's Fremont Bridge based on weather, season, and other factors. > We will join the bike data with another dataset, and try to determine the extent to which weather and seasonal factors—temperature, precipitation, and daylight hours—affect the volume of bicycle traffic through this corridor. Fortunately, the NOAA makes available their daily [weather station data](http://www.ncdc.noaa.gov/cdo-web/search?datasetid=GHCND) (I used station ID USW00024233) and we can easily use Pandas to join the two data sources. > Let's start by loading the two datasets, indexing by date: So this is a regression problem, not a classification problem. We'll define the target, choose an evaluation metric, and choose models that are appropriate for regression problems. ### Download data ``` !curl -o FremontBridge.csv https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD !wget https://raw.githubusercontent.com/jakevdp/PythonDataScienceHandbook/master/notebooks/data/BicycleWeather.csv ``` ### Load data ``` # Modified from cells 15, 16, and 20, at # https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html#Example:-Predicting-Bicycle-Traffic import pandas as pd counts = pd.read_csv('FremontBridge.csv', index_col='Date', parse_dates=True, infer_datetime_format=True) weather = pd.read_csv('BicycleWeather.csv', index_col='DATE', parse_dates=True, infer_datetime_format=False) daily = counts.resample('d').sum() daily['Total'] = daily.sum(axis=1) daily = daily[['Total']] # remove other columns weather_columns = ['PRCP', 'SNOW', 'SNWD', 'TMAX', 'TMIN', 'AWND'] daily = daily.join(weather[weather_columns], how='inner') # Make a feature for yesterday's total daily['Total_yesterday'] = daily.Total.shift(1) daily = daily.drop(index=daily.index[0]) ``` ### First fast look at the data - What's the shape? - What's the date range? - What's the target and the features? ``` daily.shape daily.head() daily.tail() ``` Target - Total : Daily total number of bicycle trips across Seattle's Fremont Bridge Features - Date (index) : from 2012-10-04 to 2015-09-01 - Total_yesterday : Total trips yesterday - PRCP : Precipitation (1/10 mm) - SNOW : Snowfall (1/10 mm) - SNWD : Snow depth (1/10 mm) - TMAX : Maximum temperature (1/10 Celsius) - TMIN : Minimum temperature (1/10 Celsius) - AWND : Average daily wind speed (1/10 meters per second) ## 2. Choose how you’ll measure success on your problem. Which metrics will you monitor on your validation data? This is a regression problem, so we need to choose a regression [metric](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values). I'll choose mean absolute error. ``` from sklearn.metrics import mean_absolute_error ``` ## 3. Determine your evaluation protocol We're doing model selection, hyperparameter optimization, and performance estimation. So generally we have two ideal [options](https://sebastianraschka.com/images/blog/2018/model-evaluation-selection-part4/model-eval-conclusions.jpg) to choose from: - 3-way holdout method (train/validation/test split) - Cross-validation with independent test set I'll choose cross-validation with independent test set. Scikit-learn makes cross-validation convenient for us! Specifically, I will use random shuffled cross validation to train and validate, but I will hold out an "out-of-time" test set, from the last 100 days of data: ``` train = daily[:-100] test = daily[-100:] X_train = train.drop(columns = 'Total') y_train = train.Total X_test = test.drop(columns = 'Total') y_test = test.Total ``` ## 4. Develop a first model that does better than a basic baseline ### Look at the target's distribution and descriptive stats ``` %matplotlib inline import seaborn as sns sns.distplot(y_train); ``` ### Basic baseline 1 ``` import numpy as np y_pred = np.full(shape=y_train.shape, fill_value=y_train.mean()) mean_absolute_error(y_train, y_pred) ``` ### Basic baseline 2 ``` mean_absolute_error(y_train, X_train.Total_yesterday) ``` ### First model that does better than a basic baseline https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html ``` from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_validate scores = cross_validate(LinearRegression(), X_train, y_train, scoring='neg_mean_absolute_error', cv=3, return_train_score=True, return_estimator=True) pd.DataFrame(scores) scores['test_score'].mean() for i, model in enumerate(scores['estimator']): coefficients = model.coef_ intercept = model.intercept_ feature_names = X_train.columns print('Model from cross validation fold #' + str(i)) print('Intercept', model.intercept_) print(pd.Series(coefficients, feature_names).to_string()) print('\n') import statsmodels.api as sm model = sm.OLS(y_train, sm.add_constant(X_train)) print(model.fit().summary()) ``` ## 5. Develop a model that overfits. "The universal tension in machine learning is between optimization and generalization; the ideal model is one that stands right at the border between underfitting and overfitting; between undercapacity and overcapacity. To figure out where this border lies, first you must cross it." —Chollet <img src="https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png"> Diagram source: https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn ### Polynomial Regression? ``` # Copied from cell 10 at # https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn.pipeline import make_pipeline def PolynomialRegression(degree = 2, **kwargs): return make_pipeline(PolynomialFeatures(degree), LinearRegression(**kwargs)) for degree in [0, 1, 2, 3]: features = PolynomialFeatures(degree).fit(X_train).get_feature_names(X_train.columns) print(f'{degree} degree polynomial has {len(features)} features') print(features) print('\n') ``` ### Validation curve (with Polynomial Regression) https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html > Validation curve. Determine training and test scores for varying parameter values. This is similar to grid search with one parameter. ``` # Modified from cell 13 at # https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn %matplotlib inline import matplotlib.pyplot as plt from sklearn.model_selection import validation_curve degree = [0, 1, 2] train_score, val_score = validation_curve( PolynomialRegression(), X_train, y_train, param_name='polynomialfeatures__degree', param_range=degree, scoring='neg_mean_absolute_error', cv=3) plt.plot(degree, np.median(train_score, 1), color='blue', label='training score') plt.plot(degree, np.median(val_score, 1), color='red', label='validation score') plt.legend(loc='best') plt.xlabel('degree'); ``` ### Grid Search (with Polynomial Regression) https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html https://scikit-learn.org/stable/modules/grid_search.html ``` from sklearn.model_selection import GridSearchCV param_grid = { 'polynomialfeatures__degree': [0, 1, 2, 3] } gridsearch = GridSearchCV(PolynomialRegression(), param_grid = param_grid, scoring = 'neg_mean_absolute_error', cv = 3, return_train_score = True, verbose = 10) gridsearch.fit(X_train, y_train) pd.DataFrame(gridsearch.cv_results_).sort_values(by = 'rank_test_score') ``` ### Random Forest? https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html ``` from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor(n_estimators = 100, max_depth = 20) scores = cross_validate(model, X_train, y_train, scoring='neg_mean_absolute_error', cv=3, return_train_score=True, return_estimator=True) pd.DataFrame(scores) scores['test_score'].mean() ``` ### Validation Curve (with Random Forest) ``` # Modified from cell 13 at # https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn %matplotlib inline import matplotlib.pyplot as plt from sklearn.model_selection import validation_curve model = RandomForestRegressor(n_estimators = 100) depth = [2, 3, 4, 5, 6] train_score, val_score = validation_curve( model, X_train, y_train, param_name = 'max_depth', param_range = depth, scoring = 'neg_mean_absolute_error', cv = 3) plt.plot(depth, np.median(train_score, 1), color='blue', label='training score') plt.plot(depth, np.median(val_score, 1), color='red', label='validation score') plt.legend(loc='best') plt.xlabel('depth'); ``` ### Grid Search (with Random Forest) ``` %%time param_grid = { 'n_estimators': [100, 200], 'max_depth': [4, 5], 'criterion': ['mse', 'mae'] } gridsearch = GridSearchCV(RandomForestRegressor(), param_grid = param_grid, scoring = 'neg_mean_absolute_error', cv = 3, return_train_score = True, verbose = 10) gridsearch.fit(X_train, y_train) results = pd.DataFrame(gridsearch.cv_results_) print(f'Best result from grid search of {len(results)} parameter combinations') results.sort_values(by = 'rank_test_score').head(1) ``` ## FEATURE ENGINEERING! Jake VanderPlas demonstrates this feature engineering: https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html#Example:-Predicting-Bicycle-Traffic ``` # Modified from code cells 17-21 at # https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html#Example:-Predicting-Bicycle-Traffic # patterns of use generally vary from day to day; # let's add binary columns that indicate the day of the week: days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] for i, day in enumerate(days): X_train[day] = (X_train.index.dayofweek == i).astype(float) # we might expect riders to behave differently on holidays; # let's add an indicator of this as well: from pandas.tseries.holiday import USFederalHolidayCalendar cal = USFederalHolidayCalendar() holidays = cal.holidays('2012', '2016') X_train = X_train.join(pd.Series(1, index=holidays, name='holiday')) X_train['holiday'].fillna(0, inplace=True) # We also might suspect that the hours of daylight would affect # how many people ride; let's use the standard astronomical calculation # to add this information: def hours_of_daylight(date, axis=23.44, latitude=47.61): """Compute the hours of daylight for the given date""" days = (date - pd.datetime(2000, 12, 21)).days m = (1. - np.tan(np.radians(latitude)) * np.tan(np.radians(axis) * np.cos(days * 2 * np.pi / 365.25))) return 24. * np.degrees(np.arccos(1 - np.clip(m, 0, 2))) / 180. X_train['daylight_hrs'] = list(map(hours_of_daylight, X_train.index)) # temperatures are in 1/10 deg C; convert to C X_train['TMIN'] /= 10 X_train['TMAX'] /= 10 # We can also calcuate the average temperature. X_train['Temp (C)'] = 0.5 * (X_train['TMIN'] + X_train['TMAX']) # precip is in 1/10 mm; convert to inches X_train['PRCP'] /= 254 # In addition to the inches of precipitation, let's add a flag that # indicates whether a day is dry (has zero precipitation): X_train['dry day'] = (X_train['PRCP'] == 0).astype(int) # Let's add a counter that increases from day 1, and measures how many # years have passed. This will let us measure any observed annual increase # or decrease in daily crossings: X_train['annual'] = (X_train.index - X_train.index[0]).days / 365. ``` ### Linear Regression (with new features) ``` scores = cross_validate(LinearRegression(), X_train, y_train, scoring = 'neg_mean_absolute_error', cv = 3, return_train_score = True, return_estimator = True) pd.DataFrame(scores) ``` ### Random Forest (with new features) ``` %%time param_grid = { 'n_estimators': [100], 'max_depth': [5, 10, 15], 'criterion': ['mae'] } gridsearch = GridSearchCV(RandomForestRegressor(), param_grid = param_grid, scoring = 'neg_mean_absolute_error', cv = 3, return_train_score = True, verbose = 10) gridsearch.fit(X_train, y_train) results = pd.DataFrame(gridsearch.cv_results_) print(f'Best result from grid search of {len(results)} parameter combinations') results.sort_values(by='rank_test_score').head(1) results['mean_test_score'].max() ``` ### Ridge Regression (with new features) https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html ``` from sklearn.linear_model import Ridge param_grid = { 'alpha': [0.1, 1.0, 10.] } gridsearch = GridSearchCV(Ridge(), param_grid = param_grid, scoring = 'neg_mean_absolute_error', cv = 3, return_train_score = True) gridsearch.fit(X_train, y_train) results = pd.DataFrame(gridsearch.cv_results_) print(f'Best result from grid search of {len(results)} parameter combinations') results.sort_values(by='rank_test_score').head(1) model = gridsearch.best_estimator_ coefficients = model.coef_ intercept = model.intercept_ feature_names = X_train.columns print('Best model from grid search cross validation') print('Intercept', intercept) print(pd.Series(coefficients, feature_names).to_string()) ``` ### Compare to statsmodels ``` import statsmodels.api as sm model = sm.OLS(y_train, sm.add_constant(X_train)) model.fit_regularized(alpha=1).params ``` ### Feature engineering, explained by Francois Chollet > _Feature engineering_ is the process of using your own knowledge about the data and about the machine learning algorithm at hand to make the algorithm work better by applying hardcoded (nonlearned) transformations to the data before it goes into the model. In many cases, it isn’t reasonable to expect a machine-learning model to be able to learn from completely arbitrary data. The data needs to be presented to the model in a way that will make the model’s job easier. > Let’s look at an intuitive example. Suppose you’re trying to develop a model that can take as input an image of a clock and can output the time of day. > If you choose to use the raw pixels of the image as input data, then you have a difficult machine-learning problem on your hands. You’ll need a convolutional neural network to solve it, and you’ll have to expend quite a bit of computational resources to train the network. > But if you already understand the problem at a high level (you understand how humans read time on a clock face), then you can come up with much better input features for a machine-learning algorithm: for instance, write a Python script to follow the black pixels of the clock hands and output the (x, y) coordinates of the tip of each hand. Then a simple machine-learning algorithm can learn to associate these coordinates with the appropriate time of day. > You can go even further: do a coordinate change, and express the (x, y) coordinates as polar coordinates with regard to the center of the image. Your input will become the angle theta of each clock hand. At this point, your features are making the problem so easy that no machine learning is required; a simple rounding operation and dictionary lookup are enough to recover the approximate time of day. > That’s the essence of feature engineering: making a problem easier by expressing it in a simpler way. It usually requires understanding the problem in depth. > Before convolutional neural networks became successful on the MNIST digit-classification problem, solutions were typically based on hardcoded features such as the number of loops in a digit image, the height of each digit in an image, a histogram of pixel values, and so on. > Neural networks are capable of automatically extracting useful features from raw data. Does this mean you don’t have to worry about feature engineering as long as you’re using deep neural networks? No, for two reasons: > - Good features still allow you to solve problems more elegantly while using fewer resources. For instance, it would be ridiculous to solve the problem of reading a clock face using a convolutional neural network. > - Good features let you solve a problem with far less data. The ability of deep-learning models to learn features on their own relies on having lots of training data available; if you have only a few samples, then the information value in their features becomes critical. # ASSIGNMENT ### Core assignment Complete the notebook cells that were originally commented **`TODO`**. Then, focus on feature engineering to improve your cross validation scores. Collaborate with your cohort on Slack. You could start with the ideas [Jake VanderPlas suggests:](https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html#Example:-Predicting-Bicycle-Traffic) > Our model is almost certainly missing some relevant information. For example, nonlinear effects (such as effects of precipitation and cold temperature) and nonlinear trends within each variable (such as disinclination to ride at very cold and very hot temperatures) cannot be accounted for in this model. Additionally, we have thrown away some of the finer-grained information (such as the difference between a rainy morning and a rainy afternoon), and we have ignored correlations between days (such as the possible effect of a rainy Tuesday on Wednesday's numbers, or the effect of an unexpected sunny day after a streak of rainy days). These are all potentially interesting effects, and you now have the tools to begin exploring them if you wish! At the end of the day, take the last step in the "universal workflow of machine learning" — "You can train your final production model on all the available data (training and validation) and evaluate it one last time on the test set." See the [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) documentation for the `refit` parameter, `best_estimator_` attribute, and `predict` method: > **refit : boolean, or string, default=True** > Refit an estimator using the best found parameters on the whole dataset. > The refitted estimator is made available at the `best_estimator_` attribute and permits using `predict` directly on this `GridSearchCV` instance. ### More options **A.** Apply this lesson to other datasets. **B.** We predicted the number of bicycle trips based on that day's weather. But imagine you were asked to predict trips at the beginning of each day, based only on data known at the time of prediction or before — so you cannot use the current day's weather. How would you wrangle the features to handle this new requirement? How does this impact the predictive accuracy and coefficients of your models? **C.** In additon to `GridSearchCV`, scikit-learn has [`RandomizedSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html), which is sometimes even better. Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives. **D.** _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6: > You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ... The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines?
github_jupyter
# T1046 - Network Service Scanning Adversaries may attempt to get a listing of services running on remote hosts, including those that may be vulnerable to remote software exploitation. Methods to acquire this information include port scans and vulnerability scans using tools that are brought onto a system. Within cloud environments, adversaries may attempt to discover services running on other cloud hosts. Additionally, if the cloud environment is connected to a on-premises environment, adversaries may be able to identify services running on non-cloud systems as well. ## Atomic Tests ``` #Import the Module before running the tests. # Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts. Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force ``` ### Atomic Test #1 - Port Scan Scan ports to check for listening ports. Upon successful execution, sh will perform a network connection against a single host (192.168.1.1) and determine what ports are open in the range of 1-65535. Results will be via stdout. **Supported Platforms:** linux, macos #### Attack Commands: Run with `sh` ```sh for port in {1..65535}; do echo >/dev/tcp/192.168.1.1/$port && echo "port $port is open" || echo "port $port is closed" : ; done ``` ``` Invoke-AtomicTest T1046 -TestNumbers 1 ``` ### Atomic Test #2 - Port Scan Nmap Scan ports to check for listening ports with Nmap. Upon successful execution, sh will utilize nmap, telnet, and nc to contact a single or range of adresseses on port 80 to determine if listening. Results will be via stdout. **Supported Platforms:** linux, macos #### Dependencies: Run with `sh`! ##### Description: Check if nmap command exists on the machine ##### Check Prereq Commands: ```sh if [ -x "$(command -v nmap)" ]; then exit 0; else exit 1; fi; ``` ##### Get Prereq Commands: ```sh echo "Install nmap on the machine to run the test."; exit 1; ``` ``` Invoke-AtomicTest T1046 -TestNumbers 2 -GetPreReqs ``` #### Attack Commands: Run with `sh` ```sh nmap -sS 192.168.1.0/24 -p 80 telnet 192.168.1.1 80 nc -nv 192.168.1.1 80 ``` ``` Invoke-AtomicTest T1046 -TestNumbers 2 ``` ### Atomic Test #3 - Port Scan NMap for Windows Scan ports to check for listening ports for the local host 127.0.0.1 **Supported Platforms:** windows Elevation Required (e.g. root or admin) #### Dependencies: Run with `powershell`! ##### Description: NMap must be installed ##### Check Prereq Commands: ```powershell if (cmd /c "nmap 2>nul") {exit 0} else {exit 1} ``` ##### Get Prereq Commands: ```powershell Invoke-WebRequest -OutFile $env:temp\nmap-7.80-setup.exe https://nmap.org/dist/nmap-7.80-setup.exe Start-Process $env:temp\nmap-7.80-setup.exe /S ``` ``` Invoke-AtomicTest T1046 -TestNumbers 3 -GetPreReqs ``` #### Attack Commands: Run with `powershell` ```powershell nmap 127.0.0.1``` ``` Invoke-AtomicTest T1046 -TestNumbers 3 ``` ## Detection System and network discovery techniques normally occur throughout an operation as an adversary learns the environment. Data and events should not be viewed in isolation, but as part of a chain of behavior that could lead to other activities, such as Lateral Movement, based on the information obtained. Normal, benign system and network events from legitimate remote service scanning may be uncommon, depending on the environment and how they are used. Legitimate open port and vulnerability scanning may be conducted within the environment and will need to be deconflicted with any detection capabilities developed. Network intrusion detection systems can also be used to identify scanning activity. Monitor for process use of the networks and inspect intra-network flows to detect port scans. ## Shield Active Defense ### Software Manipulation Make changes to a system's software properties and functions to achieve a desired effect. Software Manipulation allows a defender to alter or replace elements of the operating system, file system, or any other software installed and executed on a system. #### Opportunity There is an opportunity for the defender to observe the adversary and control what they can see, what effects they can have, and/or what data they can access. #### Use Case A defender can change the output of a recon commands to hide simulation elements you don’t want attacked and present simulation elements you want the adversary to engage with. #### Procedures Hook the Win32 Sleep() function so that it always performs a Sleep(1) instead of the intended duration. This can increase the speed at which dynamic analysis can be performed when a normal malicious file sleeps for long periods before attempting additional capabilities. Hook the Win32 NetUserChangePassword() and modify it such that the new password is different from the one provided. The data passed into the function is encrypted along with the modified new password, then logged so a defender can get alerted about the change as well as decrypt the new password for use. Alter the output of an adversary's profiling commands to make newly-built systems look like the operating system was installed months earlier. Alter the output of adversary recon commands to not show important assets, such as a file server containing sensitive data.
github_jupyter
# 1-5.2 Python Intro ## conditionals, type, and mathematics extended - conditionals: `elif` - casting - **basic math operators** ----- ><font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font> - code more than two choices using `elif` - gather numeric input using type casting - **perform subtraction, multiplication and division operations in code** # &nbsp; <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> ## Math basic operators ### `+` addition ### `-` subtraction ### `*` multiplication ### `/` division [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/5bc97f7e-3015-4178-ac20-371a5302def1/Unit1_Section5.2-Math-operators.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/5bc97f7e-3015-4178-ac20-371a5302def1/Unit1_Section5.2-Math-operators.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # &nbsp; <font size="6" color="#00A0B2" face="verdana"> <B>Examples</B></font> ``` # [ ] review and run example print("3 + 5 =",3 + 5) print("3 + 5 - 9 =", 3 + 5 - 9) print("48/9 =", 48/9) print("5*5 =", 5*5) print("(14 - 8)*(19/4) =", (14 - 8)*(19/4)) # [ ] review and run example - 'million_maker' def million_maker(): make_big = input("enter a non-decimal number you wish were bigger: ") return int(make_big)*1000000 print("Now you have", million_maker()) ``` # &nbsp; <font size="6" color="#B24C00" face="verdana"> <B>Task 1</B></font> ## use math operators to solve the set of tasks below ``` # [ ] print the result of subtracting 15 from 43 print("43 - 15 = ", 43 - 14) # [ ] print the result of multiplying 15 and 43 print("15 * 43 = ", 15 * 43) # [ ] print the result of dividing 156 by 12 print("156 / 12 = ", 156 / 12) # [ ] print the result of dividing 21 by 0.5 print("21 / 0.5 = ", 21 / 0.5) # [ ] print the result of adding 111 plus 84 and then subtracting 45 print("111 + 84 = ", 111 + 84) # [ ] print the result of adding 21 and 4 and then multiplying that sum by 4 print("(21 + 4) * 4 = ", (21 + 4) * 4) ``` # &nbsp; <font size="6" color="#B24C00" face="verdana"> <B>Task 2</B></font> ## Program: Multiplying Calculator Function - define function **`multiply()`**, and within the function: - gets user input() of 2 *strings* made of whole numbers - cast the input to **`int()`** - multiply the integers and **return** the equation with result as a **`str()`** - **return** example ```python 9 * 13 = 117 ``` ``` # [ ] create and test multiply() function def multiply(): firstNumber = int(input("Enter a whole number: ")) secondNumber = int(input("Enter a second whole number: ")) output = firstNumber * secondNumber return str(output) # Test the multiply() function print("The answer is: ", multiply()) ``` # &nbsp; <font size="6" color="#B24C00" face="verdana"> <B>Task 3</B></font> ## Project: Improved Multiplying Calculator Function ### putting together conditionals, input casting and math - #### update the multiply() function to multiply or divide - single parameter is **`operator`** with arguments of **`*`** or **`/`** operator - default operator is "*" (multiply) - **return** the result of multiplication or division - if operator other than **`"*"`** or **`"/"`** then **` return "Invalid Operator"`** ``` # [ ] create improved multiply() function and test with /, no argument, and an invalid operator ($) def multiply(operator = "*"): # Check the operator if operator == "*": # get user input for two whole numbers firstNumber = int(input("Enter a whole number: ")) secondNumber = int(input("Enter a second whole number: ")) return str(firstNumber * secondNumber) elif operator == "/": # get user input for two whole numbers firstNumber = int(input("Enter a whole number: ")) secondNumber = int(input("Enter a second whole number: ")) return str(firstNumber / secondNumber) else: return "Invalid Operator." # Test # testCalc = input("Enter an operator: ") print("The answer is:", multiply()) ``` # &nbsp; <font size="6" color="#B24C00" face="verdana"> <B>Task 4</B></font> ## Fix the Errors ``` # Review, run, fix student_name = input("enter name: ").capitalize() if student_name.startswith("F"): print(student_name,"Congratulations, names starting with 'F' get to go first today!") elif student_name.startswith("G"): print(student_name,"Congratulations, names starting with 'G' get to go second today!") else: print(student_name, "please wait for students with names staring with 'F' and 'G' to go first today.") ``` [Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977) &nbsp; [Privacy & cookies](https://go.microsoft.com/fwlink/?LinkId=521839) &nbsp; © 2017 Microsoft
github_jupyter
# Heart Desease Prediction Execise: Predict if a patient has a heart disease or not. We have a data which classified if patients have heart disease or not according to features in it. We will try to use this data to create a model which tries predict if a patient has this disease or not. Dataset = https://archive.ics.uci.edu/ml/datasets/Heart+Disease ----- ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from math import pi %matplotlib inline %load_ext autoreload %autoreload 2 ``` ## Load and prepare data ``` import os os.path.abspath(os.getcwd()) data_path='./data/heart.csv' df = pd.read_csv(data_path) print(f"Dataframe shape: {df.shape}") df.head(10) ``` ## Understand the data ``` fig,ax = plt.subplots(figsize=(20, 20)) sns.heatmap(df.corr(), ax=ax, annot=True, linewidths=0.05, fmt= '.2f',cmap="magma") plt.show() print("People having heart diseace vs people who doesn't: \n", df.target.value_counts()) heart_disease = len(df[df['target']==1]) no_heart_disease = len(df[df['target']==0]) labels = ["Heart Diesease", "NO Heart Disease"] sizes = [heart_disease, no_heart_disease] colors = ['skyblue', 'yellowgreen'] plt.figure(figsize=(8,6)) plt.pie(sizes, labels=labels, colors=colors, autopct='%1.2f%%', shadow=True) plt.show() ``` ## Feature Engineering Dummy variables (cp, thal, slope) ``` cp = pd.get_dummies(df['cp'], prefix = "cp") thal = pd.get_dummies(df['thal'], prefix = "thal") slope = pd.get_dummies(df['slope'], prefix = "slope") frames = [df, cp, thal, slope] df = pd.concat(frames, axis = 1) to_drop = ['cp','thal','slope'] df = df.drop(to_drop, axis=1) df.head() df = (df - np.min(df)) / (np.max(df) - np.min(df)).values ``` Get features and target ``` features = df.drop('target',axis =1) targets = df.target.values ``` Split the dataset ``` from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(features, targets, test_size = 0.20, random_state=42) ``` Visualize shape of the dataset and info ``` print(x_train.shape) print(x_test.shape) print(y_train.shape) print(y_test.shape) ``` ## Build models ### First easy model ``` # Imports import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Activation from tensorflow.keras.optimizers import SGD ``` Build the model (4 layers) ``` model = Sequential() model.add(Dense(128, activation='relu', input_shape=(x_train.shape[1],))) model.add(Dense(64, activation='relu', input_shape=(x_train.shape[1],))) model.add(Dense(32, activation='relu', input_shape=(x_train.shape[1],))) model.add(Dense(1, activation='sigmoid')) model.compile(loss='mean_squared_error', optimizer='adam',metrics=['mae','mse']) model.summary() ``` Tensorboard configuration ``` from datetime import datetime logdir = 'logs/scalars/' + datetime.now().strftime("%d-%m-%Y-%H-%M-%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir, histogram_freq=1) ``` Train the model ``` epochs_number = 100 history = model.fit(x_train, y_train, validation_split=0.2, epochs=epochs_number, batch_size=16, verbose=1, callbacks=[tensorboard_callback]) ``` ## Evaluate the model and results evaluate model loss with epochs on train and test Prediction vs original labels ### More complicated model Recreate the input using 2 dim on output ``` print(y_train.shape) print(x_train.shape) ``` Define the new model and compile Fit the model ``` # fit the model to the training data n_epochs = 200 n_batch = 10 ``` Print model accuracy vs val accuracy Print model loss vs val loss # MNIST Example https://keras.io/api/datasets/mnist/ Import the dataset Create the model Create a second more complicated model In this case we use dropout to avoid overfitting: https://machinelearningmastery.com/dropout-for-regularizing-deep-neural-networks/ Tensorboard configuration Train the model (and compile) Compare the results (val loss)
github_jupyter
# Fig.2 - Evolution of MDR Types (w/ 5x3 subpanels) Change plot's default size and font ``` import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [20, 12] rc = {"font.family" : "sans-serif", "font.style" : "normal", "mathtext.fontset" : "dejavusans"} plt.rcParams.update(rc) plt.rcParams["font.sans-serif"] = ["Myriad Pro"] + plt.rcParams["font.sans-serif"] ``` You need to define these variables each run to reflect certain drug coverage settings. ``` file_path_cyc = 'raw_data/0p1/set7_c/monthly/set7cf_%smonthly_data_0.txt' file_path_mft = 'raw_data/0p1/set7_m/monthly/set7mf_%smonthly_data_0.txt' file_path_adpcyc = 'raw_data/0p1/set7_ac/monthly/set7acf_%smonthly_data_0.txt' plot_savepath = '' IQR_only=False ``` Load Computed NTF IQR Data ``` # Compute Median Value of NTF from plot_helper import NTF_IQR_compute MFT_ntf = NTF_IQR_compute('raw_data/0p1/set7_m/summary/set7mf_%ssummary_0.txt') Cyc_ntf = NTF_IQR_compute('raw_data/0p1/set7_c/summary/set7cf_%ssummary_0.txt') AC_ntf = NTF_IQR_compute('raw_data/0p1/set7_ac/summary/set7acf_%ssummary_0.txt') ``` Load output files to prepare `dflist` ``` from constant import REPORTDAYS, HEADER_NAME, COLUMNS_TO_DROP import pandas as pd dflist_cyc = [] dflist_mft = [] dflist_adpcyc = [] for i in range(1,101): dflist_cyc.append(pd.read_csv(file_path_cyc % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) dflist_mft.append(pd.read_csv(file_path_mft % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) dflist_adpcyc.append(pd.read_csv(file_path_adpcyc % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) ``` Test plot to get y-limit of each row (NO ANNOTATION) ``` # prepare plt for 5x3 subpanels from plot_helper import xaxis_label_ticker import matplotlib.ticker as ticker xlocator = 5*365 ticks_x = xaxis_label_ticker() fig, axs = plt.subplots(5, 3, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0}) fig.patch.set_facecolor('white') fig.suptitle('Evolution of Multiple-Drug-Resistant Types', y=0.92, fontweight='bold') (ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9), \ (ax10, ax11, ax12), (ax13, ax14, ax15) = axs # plot trends for most-dangerous-triple (2 types) from plotter import fig2_dangerous_triple, \ fig2_dangerous_double # MFT fig2_dangerous_triple(ax1, dflist_mft, 'TYY..Y2.', IQR_only) fig2_dangerous_triple(ax4, dflist_mft, 'KNF..Y2.', IQR_only) fig2_dangerous_double(ax7, dflist_mft, 'DHA-PPQ', IQR_only) fig2_dangerous_double(ax10, dflist_mft, 'ASAQ', IQR_only) fig2_dangerous_double(ax13, dflist_mft, 'AL', IQR_only) # Cycling fig2_dangerous_triple(ax2, dflist_cyc, 'TYY..Y2.', IQR_only) fig2_dangerous_triple(ax5, dflist_cyc, 'KNF..Y2.', IQR_only) fig2_dangerous_double(ax8, dflist_cyc, 'DHA-PPQ', IQR_only) fig2_dangerous_double(ax11, dflist_cyc, 'ASAQ', IQR_only) fig2_dangerous_double(ax14, dflist_cyc, 'AL', IQR_only) # Adaptive Cycling fig2_dangerous_triple(ax3, dflist_adpcyc, 'TYY..Y2.', IQR_only) fig2_dangerous_triple(ax6, dflist_adpcyc, 'KNF..Y2.', IQR_only) fig2_dangerous_double(ax9, dflist_adpcyc, 'DHA-PPQ', IQR_only) fig2_dangerous_double(ax12, dflist_adpcyc, 'ASAQ', IQR_only) fig2_dangerous_double(ax15, dflist_adpcyc, 'AL', IQR_only) # Labeling Strategies ax1.set_title('MFT') ax2.set_title('5-Year Cycling') ax3.set_title('Adaptive Cycling') ax13.xaxis.set_major_locator(ticker.MultipleLocator(xlocator)) ax13.xaxis.set_major_formatter(ticks_x) ax14.xaxis.set_major_locator(ticker.MultipleLocator(xlocator)) ax14.xaxis.set_major_formatter(ticks_x) ax15.xaxis.set_major_locator(ticker.MultipleLocator(xlocator)) ax15.xaxis.set_major_formatter(ticks_x) ax13.set_xlabel('Year') ax14.set_xlabel('Year') ax15.set_xlabel('Year') (_, row1ylim) = ax1.get_ylim() (_, row2ylim) = ax4.get_ylim() (_, row3ylim) = ax7.get_ylim() (_, row4ylim) = ax10.get_ylim() (_, row5ylim) = ax13.get_ylim() ``` Final Version of Plot ``` #upper_row_lim = max(row1ylim, row2ylim) lower_row_lim = max(row3ylim, row4ylim, row5ylim) TITLE_FONTSIZE = 20 XLABEL_FONTSIZE = 20 YLABEL_PADDING = 290 lower_row_lowlim = 0 - lower_row_lim * 0.05 fig, axs = plt.subplots(5, 3, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0}) fig.patch.set_facecolor('white') fig.suptitle('Evolution of Multiple-Drug-Resistant Types', y=0.95, fontweight='bold', fontsize=TITLE_FONTSIZE) (ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9), \ (ax10, ax11, ax12), (ax13, ax14, ax15) = axs ax7.set_ylim(lower_row_lowlim, lower_row_lim) ax8.set_ylim(lower_row_lowlim, lower_row_lim) ax9.set_ylim(lower_row_lowlim, lower_row_lim) ax10.set_ylim(lower_row_lowlim, lower_row_lim) ax11.set_ylim(lower_row_lowlim, lower_row_lim) ax12.set_ylim(lower_row_lowlim, lower_row_lim) ax13.set_ylim(lower_row_lowlim, lower_row_lim) ax14.set_ylim(lower_row_lowlim, lower_row_lim) ax15.set_ylim(lower_row_lowlim, lower_row_lim) # plot trends for most-dangerous-triple (2 types combined) from plotter import fig2_dangerous_triple, \ fig2_dangerous_double # MFT fig2_dangerous_triple(ax1, dflist_mft, 'TYY..Y2.', IQR_only, annoty=row1ylim, ntf=MFT_ntf) fig2_dangerous_triple(ax4, dflist_mft, 'KNF..Y2.', IQR_only, annoty=row2ylim) fig2_dangerous_double(ax7, dflist_mft, 'DHA-PPQ', IQR_only, annoty=lower_row_lim) fig2_dangerous_double(ax10, dflist_mft, 'ASAQ', IQR_only, annoty=lower_row_lim) fig2_dangerous_double(ax13, dflist_mft, 'AL', IQR_only, annoty=lower_row_lim) # Cycling fig2_dangerous_triple(ax2, dflist_cyc, 'TYY..Y2.', IQR_only, annoty=row1ylim, ntf=Cyc_ntf) fig2_dangerous_triple(ax5, dflist_cyc, 'KNF..Y2.', IQR_only, annoty=row2ylim) fig2_dangerous_double(ax8, dflist_cyc, 'DHA-PPQ', IQR_only, annoty=lower_row_lim) fig2_dangerous_double(ax11, dflist_cyc, 'ASAQ', IQR_only, annoty=lower_row_lim) fig2_dangerous_double(ax14, dflist_cyc, 'AL', IQR_only, annoty=lower_row_lim) # Adaptive Cycling fig2_dangerous_triple(ax3, dflist_adpcyc, 'TYY..Y2.', IQR_only, annoty=row1ylim, ntf=AC_ntf) fig2_dangerous_triple(ax6, dflist_adpcyc, 'KNF..Y2.', IQR_only, annoty=row2ylim) fig2_dangerous_double(ax9, dflist_adpcyc, 'DHA-PPQ', IQR_only, annoty=lower_row_lim) fig2_dangerous_double(ax12, dflist_adpcyc, 'ASAQ', IQR_only, annoty=lower_row_lim) fig2_dangerous_double(ax15, dflist_adpcyc, 'AL', IQR_only, annoty=lower_row_lim) # Labeling Strategies ax1.set_title('MFT', fontsize=TITLE_FONTSIZE) ax2.set_title('5-Year Cycling', fontsize=TITLE_FONTSIZE) ax3.set_title('Adaptive Cycling', fontsize=TITLE_FONTSIZE) ax13.set_xlabel('Year', fontsize=TITLE_FONTSIZE) ax14.set_xlabel('Year', fontsize=TITLE_FONTSIZE) ax15.set_xlabel('Year', fontsize=TITLE_FONTSIZE) ax13.xaxis.set_major_locator(ticker.MultipleLocator(xlocator)) ax13.xaxis.set_major_formatter(ticks_x) ax14.xaxis.set_major_locator(ticker.MultipleLocator(xlocator)) ax14.xaxis.set_major_formatter(ticks_x) ax15.xaxis.set_major_locator(ticker.MultipleLocator(xlocator)) ax15.xaxis.set_major_formatter(ticks_x) # ax1.set_ylabel('Triple-resistant \ngenotypes carrying \n76T, 86Y, Y184, 580Y, \nand double-copy $pfpm2/3$', # multialignment='left', # horizontalalignment='left', rotation=0, # fontsize=XLABEL_FONTSIZE, labelpad=YLABEL_PADDING) # ax4.set_ylabel('Triple-resistant \ngenotypes carrying \nK76, N86, 184F, 580Y, \nand double-copy $pfpm2/3$', # multialignment='left', # horizontalalignment='left', rotation=0, # fontsize=XLABEL_FONTSIZE, labelpad=YLABEL_PADDING) # ax7.set_ylabel('Double-resistant genotypes \nto DHA-PPQ', multialignment='left', # horizontalalignment='left', rotation=0, # fontsize=XLABEL_FONTSIZE, labelpad=YLABEL_PADDING) # ax10.set_ylabel('Double-resistant genotypes \nto ASAQ', multialignment='left', # horizontalalignment='left', rotation=0, # fontsize=XLABEL_FONTSIZE, labelpad=YLABEL_PADDING) # ax13.set_ylabel('Double-resistant genotypes \nto AL', multialignment='left', # horizontalalignment='left', rotation=0, # fontsize=XLABEL_FONTSIZE, labelpad=YLABEL_PADDING) fig.add_subplot(111, frameon=False) # hide tick and tick label of the big axis plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) # add common x- and y-labels plt.ylabel('Genotype Frequency', fontsize=XLABEL_FONTSIZE) if plot_savepath != '': plt.savefig(fname=plot_savepath, format='svg') ```
github_jupyter
# Publications markdown generator for academicpages Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `publications.py`. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one containing your data. TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style. ## Data format The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top. - `excerpt` and `paper_url` can be blank, but the others must have values. - `pub_date` must be formatted as YYYY-MM-DD. - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]` This is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create). ``` !cat publications.tsv ``` ## Import pandas We are using the very handy pandas library for dataframes. ``` import pandas as pd ``` ## Import TSV Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`. I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others. ``` publications = pd.read_csv("publications.tsv", sep="\t", header=0) publications ``` ## Escape special characters YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely. ``` html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;" } def html_escape(text): """Produce entities within text.""" return "".join(html_escape_table.get(c,c) for c in text) ``` ## Creating the markdown files This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. ``` import os for row, item in publications.iterrows(): md_filename = str(item.pub_date) + "-" + item.url_slug + ".md" html_filename = str(item.pub_date) + "-" + item.url_slug year = item.pub_date[:4] ## YAML variables md = "---\ntitle: \"" + item.title + '"\n' md += """collection: publications""" md += """\npermalink: /publication/""" + html_filename if len(str(item.excerpt)) > 5: md += "\nexcerpt: '" + html_escape(item.excerpt) + "'" md += "\ndate: " + str(item.pub_date) md += "\nvenue: '" + html_escape(item.venue) + "'" if len(str(item.paper_url)) > 5: md += "\npaperurl: '" + item.paper_url + "'" md += "\ncitation: '" + html_escape(item.citation) + "'" md += "\n---" ## Markdown description for individual page if len(str(item.excerpt)) > 5: md += "\n" + html_escape(item.excerpt) + "\n" if len(str(item.paper_url)) > 5: md += "\n[Download paper here](" + item.paper_url + ")\n" md += "\ncitation: " + item.citation md_filename = os.path.basename(md_filename) with open("../_publications/" + md_filename, 'w') as f: f.write(md) ``` These files are in the publications directory, one directory below where we're working from. ``` !ls ../_publications/ !cat ../_publications/2009-10-01-paper-title-number-1.md ```
github_jupyter
<h1><center>Survey Improvement Analysis</center></h1> <center>Rock n Ribs</center> ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import random %matplotlib inline df_survey = pd.read_csv('./rnr_survey_master_table.csv') df_survey.drop(['Unnamed: 0'],axis=1, inplace=True) df_survey[0:3] df_survey.columns ``` According with our dataset, we can measure some metrics. On this notebook we are going to analyze this metrics from October 2019: 1. Franchise customer distribution 2. Recommendation rate 3. Food & Drink average 4. Price-Quality rate 5. Client retention 6. Advertising media effectiveness ``` # Firstly we must convert to datetime format the column date_created df_survey['date_created'] = pd.to_datetime(df_survey['date_created']) df_survey['date_created'][0] # Create October subset month_in = pd.to_datetime('2019-10-01 00:00:00') month_fn = pd.to_datetime('2019-10-31 23:59:59') df_oct_2019 = df_survey[(df_survey['date_created']>=month_in)&(df_survey['date_created']<=month_fn)] # Create November subset month_in = pd.to_datetime('2019-11-01 00:00:00') month_fn = pd.to_datetime('2019-11-30 23:59:59') df_nov_2019 = df_survey[(df_survey['date_created']>=month_in)&(df_survey['date_created']<=month_fn)] # Create December subset month_in = pd.to_datetime('2019-12-01 00:00:00') month_fn = pd.to_datetime('2019-12-31 23:59:59') df_dec_2019 = df_survey[(df_survey['date_created']>=month_in)&(df_survey['date_created']<=month_fn)] # Create January subset month_in = pd.to_datetime('2020-01-01 00:00:00') month_fn = pd.to_datetime('2020-01-31 23:59:59') df_jan_2020 = df_survey[(df_survey['date_created']>=month_in)&(df_survey['date_created']<=month_fn)] # Create February subset month_in = pd.to_datetime('2019-02-01 00:00:00') month_fn = pd.to_datetime('2019-02-28 23:59:59') df_feb_2020 = df_survey[(df_survey['date_created']>=month_in)&(df_survey['date_created']<=month_fn)] # Create March subset month_in = pd.to_datetime('2019-03-01 00:00:00') month_fn = pd.to_datetime('2019-03-31 23:59:59') df_mar_2020 = df_survey[(df_survey['date_created']>=month_in)&(df_survey['date_created']<=month_fn)] ``` <h2>1. Franchise customer distribution</h2> ``` df_oct_2019.isna().sum() # Recomendacion is a column without NaN values so we must use to count customers df_oct_2019.groupby('Franquicia').sum()['Recomendacion'] ber_cust = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][0] jar_cust = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][1] jur_cust = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][2] ln_cust = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][3] sjr_cust = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][4] sen_cust = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][5] # Franchise customer Pie chart fran = ['Bernardo Quintana, Qro.', 'Jardines de la Hacienda, Qro.', 'Juriquilla, Qro.', 'León, Gto.', 'San Juan del Río, Qro.', 'Sendero, Qro.'] customer_quan = [ber_cust, jar_cust, jur_cust, ln_cust, sjr_cust, sen_cust] colors = ['#00A0B0','#6A4A3C', '#CC333F', '#EB6841', '#EDC951', '#CC333F'] plt.figure(figsize=(10,8)) plt.pie(customer_quan, labels=fran, colors=colors, shadow=True, autopct='%1.1f%%') plt.title('October Customer Distribution') plt.show() for i in range(0, len(customer_quan)): print(f'{fran[i]}: {customer_quan[i]}') ``` <h2>2. Recommendation rate</h2> ``` # Recomendation Histogram recom = np.array(df_oct_2019['Recomendacion']) bins = list(range(2, 12, 1)) plt.figure(figsize=(16,7)) plt.hist(recom, bins, histtype='bar', rwidth=0.8, color='#00A0B0') plt.title('Recomendation Histogram') plt.ylabel('Frequency') plt.xlabel('Average') plt.grid(linewidth =0.5, linestyle='--') plt.axvline(np.mean(recom), color='red', linestyle='--', label='Mean') plt.axvline(np.mean(recom) + np.std(recom), color='green', linestyle='--', label='+1 std') plt.axvline(np.mean(recom) - np.std(recom), color='green', linestyle='--', label='-1 std') plt.legend(loc='best') plt.show() df_oct_2019['Recomendacion'].describe() ``` According with the Empirical Rule, Rock n Ribs has 68% of his opinion range into a 8 to 10 average. This is a good average, but we need to know why the other customers rated us so low. ``` df_oct_2019[df_oct_2019['Recomendacion']<8].groupby('Franquicia').sum()['Recomendacion'] df_oct_2019.groupby('Franquicia').sum()['Recomendacion'] # Low recomendation rate per Franchise lowrec_ber = df_oct_2019[df_oct_2019['Recomendacion']<8].groupby('Franquicia').sum()['Recomendacion'][0] lowrec_jar = df_oct_2019[df_oct_2019['Recomendacion']<8].groupby('Franquicia').sum()['Recomendacion'][1] lowrec_jur = df_oct_2019[df_oct_2019['Recomendacion']<8].groupby('Franquicia').sum()['Recomendacion'][2] lowrec_ln = df_oct_2019[df_oct_2019['Recomendacion']<8].groupby('Franquicia').sum()['Recomendacion'][3] lowrec_sjr = df_oct_2019[df_oct_2019['Recomendacion']<8].groupby('Franquicia').sum()['Recomendacion'][4] lowrec_sen = df_oct_2019[df_oct_2019['Recomendacion']<8].groupby('Franquicia').sum()['Recomendacion'][5] total_ber = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][0] total_jar = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][1] total_jur = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][2] total_ln = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][3] total_sjr = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][4] total_sen = df_oct_2019.groupby('Franquicia').sum()['Recomendacion'][5] lowrate_ber = lowrec_ber/total_ber lowrate_jar = lowrec_jar/total_jar lowrate_jur = lowrec_jur/total_jur lowrate_ln = lowrec_ln/total_ln lowrate_sjr = lowrec_sjr/total_sjr lowrate_sen = lowrec_sen/total_sen # Configuring a bar graph x = ['BQ', 'JAR', 'JUR', 'LN', 'SJR', 'SEN'] y = [lowrate_ber, lowrate_jar, lowrate_jur, lowrate_ln, lowrate_sjr, lowrate_sen] plt.figure(figsize=(16,8)) plt.bar(x, y, width=0.8, color=colors) plt.title('Ratio de No Recomendación') plt.ylabel('Porcentaje') plt.show() for i in range(0, len(x)): print(f'{x[i]} : {round(y[i],4)}') ``` We can look that Bernardo Quintana and León franchises have worst recomendation rates, in spite of the mayority bad recomendations come from Sendero. So we must analyze the comments existing. ``` df_oct_2019[df_oct_2019['Recomendacion']<8].groupby('Franquicia')['Comentarios'].count() coms = list(df_oct_2019[df_oct_2019['Recomendacion']<8].groupby('Franquicia')['Comentarios']) bad_com_dict = {} for i in range(0, len(coms)): bad_com_dict[f'{coms[i][0]}'] = coms[i][1] # No recomendation comments from Bernardo Quintana for i in list(bad_com_dict['Bernardo Quintana, Qro.'].keys()): print(bad_com_dict['Bernardo Quintana, Qro.'][i]) # No recomendation comments from Jardines de la Hacienda for i in list(bad_com_dict['Jardines de la Hacienda, Qro.'].keys()): print(bad_com_dict['Jardines de la Hacienda, Qro.'][i]) # No recomendation comments from León for i in list(bad_com_dict['León, Gto.'].keys()): print(bad_com_dict['León, Gto.'][i]) # No recomendation comments from Sendero for i in list(bad_com_dict['Sendero, Qro.'].keys()): print(bad_com_dict['Sendero, Qro.'][i]) ``` <h4>Bernardo Quintana</h4> 1. Mesas sucias <br> <h4>Jardines de la Hacienda</h4> 1. Gerente Alejandra no compensó la falla <br>2. Ha bajado la calidad de carne para hamburguesa (2x) <br> <h4>León</h4> 1. Salsas de las alas muy saladas <br>2. Costra de las alas muy gruesa <br>3. Mejoren la calidad de la carne <br> <h4>Sendero</h4> 1. Agregar salsa picantes <br>2. Servicio muy lento (3x) <h2>3. Food and Drink Average</h2> ``` # Total Average Food Count df_food_ = list(df_oct_2019['Comida']) total_e_f = 0 total_b_f = 0 total_r_f = 0 total_m_f = 0 total_na_f = 0 for i in df_food_: if i == 'Excelente': total_e_f += 1 elif i == 'Bueno': total_b_f += 1 elif i == 'Regular': total_r_f += 1 elif i == 'Malo': total_m_f += 1 else: total_na_f += 1 # Total Average Drink Count df_drink_ = list(df_oct_2019['Bebida']) total_e_d = 0 total_b_d = 0 total_r_d = 0 total_m_d = 0 total_na_d = 0 for i in df_drink_: if i == 'Excelente': total_e_d += 1 elif i == 'Bueno': total_b_d += 1 elif i == 'Regular': total_r_d += 1 elif i == 'Malo': total_m_d += 1 else: total_na_d += 1 # Congigure the graph bar plt.figure(figsize=(16,5)) avg = ['Excelente', 'Bueno', 'Regular', 'Malo'] food_list = [total_e_f, total_b_f, total_r_f, total_m_f] drink_list = [total_e_d, total_b_d, total_r_d, total_m_d] plt.bar(avg, food_list, label='Comida', width=0.8, color = '#CC333F') plt.bar(avg, drink_list, label='Bebida', width=0.8, color = '#00A0B0', bottom = food_list) plt.title('Evaluación de Comida y Bebida') plt.ylabel('Clientes') plt.legend(loc='best') plt.show() ``` This month was very positive in terms of food and drink average. Only we need to look if REGULAR or MALO averages provided from all franchises (uniform distribution) or provided from few locals. ``` # Bad rate from Food per Franchise df_food_outna = df_oct_2019[['Franquicia', 'Comida']].dropna() df_food_res = df_food_outna[(df_food_outna['Comida'] == 'Regular') | (df_food_outna['Comida'] == 'Malo')].groupby('Franquicia').count() df_food_res['Total'] = [ df_food_outna.groupby('Franquicia').count()['Comida'][0], df_food_outna.groupby('Franquicia').count()['Comida'][1], df_food_outna.groupby('Franquicia').count()['Comida'][3], df_food_outna.groupby('Franquicia').count()['Comida'][5] ] df_food_res df_food_res['Rate'] = [ df_food_res['Comida'][0]/df_food_outna.groupby('Franquicia').count()['Comida'][0], df_food_res['Comida'][1]/df_food_outna.groupby('Franquicia').count()['Comida'][1], df_food_res['Comida'][2]/df_food_outna.groupby('Franquicia').count()['Comida'][3], df_food_res['Comida'][3]/df_food_outna.groupby('Franquicia').count()['Comida'][5] ] df_food_res # Configuring a bar graph x_1 = ['BQ', 'JAR', 'LN', 'SEN'] y_1 = list(df_food_res['Rate']) plt.figure(figsize=(16,8)) plt.bar(x_1, y_1, width=0.8, color=colors) plt.title('Ratio de Disguto por Comida') plt.ylabel('Porcentaje') plt.show() for i in range(0, len(x_1)): print(f'{x_1[i]} : {round(y_1[i],4)}') ``` After normalizing the graph according with their total customers who answer this part of the survey, we can note yet that food in León on october 2019 was the worst of all. And if we take a look to customer comments, the events relationate as well. Virtually Leon and Sendero franchise comments ``` # Bad comments from León Franchise for i in list(bad_com_dict['León, Gto.'].keys()): print(bad_com_dict['León, Gto.'][i]) # Bad comments from Sendero Franchise for i in list(bad_com_dict['Sendero, Qro.'].keys()): print(bad_com_dict['Sendero, Qro.'][i]) ``` Other thing that could be important to highlight is that Bernardo Quintana has a hight no recomendation rate but also has a lower bad rate about food. So this could be indicate that customers were disgusted with the service more than the food quality. ``` # Bad rate from Drink per Franchise df_drink_outna = df_oct_2019[['Franquicia', 'Bebida']].dropna() df_drink_res = df_drink_outna[(df_drink_outna['Bebida'] == 'Regular') | (df_drink_outna['Bebida'] == 'Malo')].groupby('Franquicia').count() df_drink_res['Total'] = [ df_drink_outna.groupby('Franquicia').count()['Bebida'][0], df_drink_outna.groupby('Franquicia').count()['Bebida'][2], df_drink_outna.groupby('Franquicia').count()['Bebida'][3], df_drink_outna.groupby('Franquicia').count()['Bebida'][4], df_drink_outna.groupby('Franquicia').count()['Bebida'][5] ] df_drink_res df_drink_res['Rate'] = [ df_drink_res['Bebida'][0]/df_drink_outna.groupby('Franquicia').count()['Bebida'][0], df_drink_res['Bebida'][1]/df_drink_outna.groupby('Franquicia').count()['Bebida'][2], df_drink_res['Bebida'][2]/df_drink_outna.groupby('Franquicia').count()['Bebida'][3], df_drink_res['Bebida'][3]/df_drink_outna.groupby('Franquicia').count()['Bebida'][4], df_drink_res['Bebida'][4]/df_drink_outna.groupby('Franquicia').count()['Bebida'][5] ] df_drink_res # Configuring a bar graph x_2 = ['BQ', 'JUR', 'LN', 'SJR', 'SEN'] y_2 = list(df_drink_res['Rate']) plt.figure(figsize=(16,8)) plt.bar(x_2, y_2, width=0.8, color=colors) plt.title('Ratio de Disguto por Bebida') plt.ylabel('Porcentaje') plt.show() for i in range(0, len(x_2)): print(f'{x_2[i]} : {round(y_2[i],4)}') #Visualizing in one graph to compare food vs drink dislike y_1 = list(df_food_res['Rate']) y_1.insert(2,0.0) y_1.insert(4,0.0) y_2_pos = list(df_drink_res['Rate']) y_2_pos.insert(1,0.0) y_2 = [] for i in y_2_pos: y_2.append(i*(-1)) x_3 = ['BQ', 'JAR', 'JUR', 'LN', 'SJR', 'SEN'] # Configuring a bar graph plt.figure(figsize=(16,8)) plt.bar(x_3, y_1, facecolor=colors[0], edgecolor='white', label='Comida') plt.bar(x_3, y_2, facecolor=colors[3], edgecolor='white', label='Bebida') for x, y in zip(x_3, y_1): plt.text(x, y + 0.005, '%.2f' % y, ha='center', va= 'bottom') for x, y in zip(x_3, y_2): plt.text(x, y - 0.018, '%.2f' % y, ha='center', va= 'bottom') plt.xlim(-.5, 5.5) plt.ylim(-0.25, 0.25) plt.legend(loc='best') plt.title('Ratio de Disgusto General') plt.show() ``` I conclude that Bernardo Quintana and Sendero must be append on yellow light list, but correction on food of Leon franchise is most relevant, because its averages are very lower. <h2>4. Price-Quality rate</h2> ``` df_pq_outna = df_oct_2019[['Franquicia', 'CalidadPrecio']].dropna() # Bernardo Quintana Price-Quality Rate pq_bq_sum = df_pq_outna[df_pq_outna['Franquicia']=='Bernardo Quintana, Qro.'].groupby('CalidadPrecio').count() # Jardines de la Hacienda Price-Quality Rate pq_jh_sum = df_pq_outna[df_pq_outna['Franquicia']=='Jardines de la Hacienda, Qro.'].groupby('CalidadPrecio').count() # Juriquilla Price-Quality Rate pq_jur_sum = df_pq_outna[df_pq_outna['Franquicia']=='Juriquilla, Qro.'].groupby('CalidadPrecio').count() # León Price-Quality Rate pq_ln_sum = df_pq_outna[df_pq_outna['Franquicia']=='León, Gto.'].groupby('CalidadPrecio').count() # San Juan del Río Price-Quality Rate pq_sjr_sum = df_pq_outna[df_pq_outna['Franquicia']=='San Juan del Río, Qro.'].groupby('CalidadPrecio').count() # Sendero Price-Quality Rate pq_sen_sum = df_pq_outna[df_pq_outna['Franquicia']=='Sendero, Qro.'].groupby('CalidadPrecio').count() # Price-Quality Rate per Franchise plt.figure(figsize=(16,8)) plt.subplot(2,3,1) plt.pie(list(pq_bq_sum['Franquicia']), labels=list(pq_bq_sum['Franquicia'].keys()), colors=colors, shadow=True, autopct='%1.1f%%') plt.title('Bernardo Quintana') plt.subplot(2,3,2) plt.pie(list(pq_jh_sum['Franquicia']), labels=list(pq_jh_sum['Franquicia'].keys()), colors=colors, shadow=True, autopct='%1.1f%%') plt.title('Jardines de la Hacienda') plt.subplot(2,3,3) plt.pie(list(pq_jur_sum['Franquicia']), labels=list(pq_jur_sum['Franquicia'].keys()), colors=colors, shadow=True, autopct='%1.1f%%') plt.title('Juriquilla') plt.subplot(2,3,4) plt.pie(list(pq_ln_sum['Franquicia']), labels=list(pq_ln_sum['Franquicia'].keys()), colors=colors, shadow=True, autopct='%1.1f%%') plt.title('León') plt.subplot(2,3,5) plt.pie(list(pq_sjr_sum['Franquicia']), labels=list(pq_sjr_sum['Franquicia'].keys()), colors=colors, shadow=True, autopct='%1.1f%%') plt.title('San Juan del Río') plt.subplot(2,3,6) plt.pie(list(pq_sen_sum['Franquicia']), labels=list(pq_sen_sum['Franquicia'].keys()), colors=colors, shadow=True, autopct='%1.1f%%') plt.title('Sendero') plt.show() ``` This metric is very dinamic month a month because the Management Team of Rock n Ribs change his prices of each franchise according to the season, the existing inventory or other events like soccer matchs with the aim to increase his revenue. Looking this graphs we can perceive that Juriquilla and León have the best scores to Price-Quality Rate, maybe because the mean cost of food in there cities is higher than the others franchise cities. <br>With the aim to clarify this hipothesys we take a look into https://www.inegi.org.mx/app/preciospromedio/ to understand the cost of food in these states. ``` # Download our dataset from INEGI.ORG.MX df_cost_food = pd.read_csv('./INP_PP.csv') df_cost_food[0:5] # Filtering according with supplies needed for cooking Rock n Ribs dishes df_cost_food[(df_cost_food['Generico']=='Pan de caja') | (df_cost_food['Generico']=='Carne de res') | (df_cost_food['Generico']=='Carne de cerdo') | (df_cost_food['Generico']=='Pollo') | (df_cost_food['Generico']=='Tocino') | (df_cost_food['Generico']=='Cerveza') | (df_cost_food['Generico']=='Ron') | (df_cost_food['Generico']=='Masa y harinas de maíz') | (df_cost_food['Generico']=='Masa y harinas de maíz')].groupby('Nombre ciudad').describe()['Precio promedio'] ``` Unfortunately does not exist crearly information about cost of food per each township, so we can get data only from principal cities on our states. Even so, we look that the diference in prices between both cities is not relevant. Possibly we could develop other analysis about our providers prices to compare with others. Also we could scrapping the costs of our restaurant rivals. <h2>5. Client Retention</h2> ``` # We take Recomendacion column like a reference to count easily without NaN values df_be_cust = pd.DataFrame(df_oct_2019[['Franquicia','TiempoSerCliente','Recomendacion']].groupby(['Franquicia', 'TiempoSerCliente']).count()) df_be_cust keys_be_cust = list(df_be_cust['Recomendacion'].keys()) params_dict = {} for i in range(0, len(keys_be_cust)-1): params_dict[f'{keys_be_cust[i][0]} , {keys_be_cust[i][1]}'] = df_be_cust['Recomendacion'][keys_be_cust[i][0]][keys_be_cust[i][1]] y_bq = [params_dict['Bernardo Quintana, Qro. , Esta es mi primera\xa0visita'], params_dict['Bernardo Quintana, Qro. , Seis meses a un año'], params_dict['Bernardo Quintana, Qro. , 1-2 años'], params_dict['Bernardo Quintana, Qro. , 3 años o más']] y_jh = [params_dict['Jardines de la Hacienda, Qro. , Esta es mi primera\xa0visita'], params_dict['Jardines de la Hacienda, Qro. , Seis meses a un año'], params_dict['Jardines de la Hacienda, Qro. , 1-2 años'], params_dict['Jardines de la Hacienda, Qro. , 3 años o más']] y_jur = [params_dict['Jardines de la Hacienda, Qro. , Esta es mi primera\xa0visita'], params_dict['Jardines de la Hacienda, Qro. , Seis meses a un año'], params_dict['Jardines de la Hacienda, Qro. , 1-2 años'], params_dict['Jardines de la Hacienda, Qro. , 3 años o más']] y_ln = [params_dict['León, Gto. , Esta es mi primera\xa0visita'], params_dict['León, Gto. , Seis meses a un año'], params_dict['León, Gto. , 1-2 años'], 0] y_sjr = [params_dict['San Juan del Río, Qro. , Esta es mi primera\xa0visita'], params_dict['San Juan del Río, Qro. , Seis meses a un año'], params_dict['San Juan del Río, Qro. , 1-2 años'], params_dict['San Juan del Río, Qro. , 3 años o más']] y_sen = [params_dict['Sendero, Qro. , Esta es mi primera\xa0visita'], 0, params_dict['Sendero, Qro. , 1-2 años'], params_dict['Sendero, Qro. , 3 años o más']] # Design the graph bar x = ['1st visit', '6m to 1y', '1y to 2y', '3y +'] plt.figure(figsize=(16,5)) plt.plot(x, y_bq, label='Bernardo Quintana', linewidth=2, color=colors[0]) plt.plot(x, y_jh, label='Jardines de la Hacienda', linewidth=2, color=colors[1]) plt.plot(x, y_jur, label='Juriquilla', linewidth=2, color='k') plt.plot(x, y_ln, label='León', linewidth=2, color=colors[3]) plt.plot(x, y_sjr, label='San Juan del Río', linewidth=2, color=colors[4]) plt.plot(x, y_sen, label='Sendero', linewidth=2, color=colors[5]) plt.gca().invert_xaxis() plt.title('Retención de los Clientes') plt.xlabel = ('Tiempo de Ser Cliente') plt.ylabel = ('Clientes') plt.grid(linewidth=0.8, linestyle=':') plt.legend(loc='best') plt.show() ``` Most customer quantity came from Sendero, in specific the customers whose first visit was 1 year to back. Sendero and León had an increment for new clients on October, but the other franchises had a decrease on this topic. The customer retention in general is uniform but partly decreasing, with an exception in Sendero, which retention was been very changeable, in this case we need to look up other retention rate months to have a best look for this event. <h2>6. Advertising media effectiveness</h2> ``` df_oct_2019.columns # Firstly we must to analyze if comments about Other Media Ways are relevant df_oct_2019['OtroMedCon'].dropna() ``` Live or work near to Rock n Ribs restaurants is not relevant; we assume this fact yet. The other comments could group on Recomendation subgroup. The most important comment is from friend of workers, is important because we can create comercial strategies to attract these potential clients. ``` df_medcon_outna = df_oct_2019[['Franquicia','MedCon', 'Recomendacion']].dropna() df_medcon_outna.groupby(['Franquicia', 'MedCon']).count() df_medcon_outna.groupby(['Franquicia', 'MedCon']).count()['Recomendacion']['Bernardo Quintana, Qro.']['Lo ví'] keys_list_medcon = list(df_medcon_outna.groupby(['Franquicia', 'MedCon']).count()['Recomendacion'].keys()) medcon_dict = {} for key in keys_list_medcon: medcon_dict[f'{key[0]},{key[1]}'] = df_medcon_outna.groupby(['Franquicia', 'MedCon']).count()['Recomendacion'][key[0]][key[1]] bq_watch = medcon_dict['Bernardo Quintana, Qro.,Lo ví'] bq_adv = medcon_dict['Bernardo Quintana, Qro.,Publicidad'] bq_rec = medcon_dict['Bernardo Quintana, Qro.,Recomendación'] bq_sm = medcon_dict['Bernardo Quintana, Qro.,Redes sociales (facebook, twitter, instagram)'] bq_medcon_list = [bq_watch, bq_adv, bq_rec, bq_sm] jh_watch = medcon_dict['Jardines de la Hacienda, Qro.,Lo ví'] jh_adv = medcon_dict['Jardines de la Hacienda, Qro.,Publicidad'] jh_rec = medcon_dict['Jardines de la Hacienda, Qro.,Recomendación'] jh_sm = medcon_dict['Jardines de la Hacienda, Qro.,Redes sociales (facebook, twitter, instagram)'] jh_medcon_list = [jh_watch, jh_adv, jh_rec, jh_sm] jur_watch = medcon_dict['Juriquilla, Qro.,Lo ví'] jur_rec = medcon_dict['Juriquilla, Qro.,Recomendación'] jur_sm = medcon_dict['Juriquilla, Qro.,Redes sociales (facebook, twitter, instagram)'] jur_medcon_list = [jur_watch, jur_rec, jur_sm] ln_watch = medcon_dict['León, Gto.,Lo ví'] ln_adv = medcon_dict['León, Gto.,Publicidad'] ln_rec = medcon_dict['León, Gto.,Recomendación'] ln_sm = medcon_dict['León, Gto.,Redes sociales (facebook, twitter, instagram)'] ln_medcon_list = [ln_watch, ln_adv, ln_rec, ln_sm] sjr_watch = medcon_dict['San Juan del Río, Qro.,Lo ví'] sjr_adv = medcon_dict['San Juan del Río, Qro.,Publicidad'] sjr_rec = medcon_dict['San Juan del Río, Qro.,Recomendación'] sjr_sm = medcon_dict['San Juan del Río, Qro.,Redes sociales (facebook, twitter, instagram)'] sjr_medcon_list = [sjr_watch, sjr_adv, sjr_rec, sjr_sm] sen_watch = medcon_dict['Sendero, Qro.,Lo ví'] sen_adv = medcon_dict['Sendero, Qro.,Publicidad'] sen_rec = medcon_dict['Sendero, Qro.,Recomendación'] sen_sm = medcon_dict['Sendero, Qro.,Redes sociales (facebook, twitter, instagram)'] sen_web = medcon_dict['Sendero, Qro.,Página web'] sen_medcon_list = [sen_watch, sen_adv, sen_rec, sen_sm, sen_web] # Customer way to knew Rock n Ribs plt.figure(figsize=(16,8)) plt.subplot(2,3,1) plt.pie(bq_medcon_list , labels=['Lo ví', 'Publicidad', 'Recomendación', 'Social Media'], colors=colors, shadow=True, autopct='%1.1f%%') plt.title('Bernardo Quintana') plt.subplot(2,3,2) plt.pie(jh_medcon_list , labels=['Lo ví', 'Publicidad', 'Recomendación', 'Social Media'], colors=colors, shadow=True, autopct='%1.1f%%') plt.title('Jardines de la Hacienda') plt.subplot(2,3,3) plt.pie(jur_medcon_list , labels=['Lo ví', 'Recomendación', 'Social Media'], colors=colors, shadow=True, autopct='%1.1f%%') plt.title('Juriquilla') plt.subplot(2,3,4) plt.pie(ln_medcon_list , labels=['Lo ví', 'Publicidad', 'Recomendación', 'Social Media'], colors=colors, shadow=True, autopct='%1.1f%%') plt.title('León') plt.subplot(2,3,5) plt.pie(sjr_medcon_list , labels=['Lo ví', 'Publicidad', 'Recomendación', 'Social Media'], colors=colors, shadow=True, autopct='%1.1f%%') plt.title('San Juan del Río') plt.subplot(2,3,6) plt.pie(sen_medcon_list , labels=['Lo ví', 'Publicidad', 'Recomendación', 'Social Media', 'Web'], colors=colors, shadow=True, autopct='%1.1f%%') plt.title('Sendero') plt.show() ``` We observed that Recomendation and Watch on Street are our biggest source of customers. It's logical that Juriquilla had lower Watch on Street rate than the others franchises because it location does not the best. Sendero, instead has very good location on a fluent avenue. <br> <br>Social Media and Advertising Campaigns are our second pack to get customers, but in this analysis, the Web source was really poor, so it is a red light for us, we need to improve the SEO and SEM of our web page to get more customers of this source.
github_jupyter
This notebook runs various solvers for a single step and dumps out some variables -- it is intended for unit testing. At the moment, it is required to be at the top-level `pyro/` directory, because not all the functions find pyro's home directory on their own. ``` from pyro import Pyro ``` ## advection ``` solver = "advection" problem_name = "smooth" param_file = "inputs.smooth" other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() dens = pyro_sim.sim.cc_data.get_var("density") dens.pretty_print(show_ghost=False) ``` ## advection_nonuniform ``` solver = "advection_nonuniform" problem_name = "slotted" param_file = "inputs.slotted" other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() dens = pyro_sim.sim.cc_data.get_var("density") dens.pretty_print(show_ghost=False) ``` ## advection_fv4 ``` solver = "advection_fv4" problem_name = "smooth" param_file = "inputs.smooth" other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() dens = pyro_sim.sim.cc_data.get_var("density") dens.pretty_print(show_ghost=False) ``` ## advection_rk ``` solver = "advection_rk" problem_name = "tophat" param_file = "inputs.tophat" other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() dens = pyro_sim.sim.cc_data.get_var("density") dens.pretty_print(show_ghost=False) ``` ## compressible ``` solver = "compressible" problem_name = "rt" param_file = "inputs.rt" other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=24", "driver.verbose=0", "compressible.riemann=CGF"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() dens = pyro_sim.sim.cc_data.get_var("density") dens.pretty_print(show_ghost=False) ``` ## compressible_fv4 ``` solver = "compressible_fv4" problem_name = "kh" param_file = "inputs.kh" other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8", "driver.verbose=0"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() e = pyro_sim.sim.cc_data.get_var("eint") e.pretty_print(show_ghost=False) ``` ## compressible_rk ``` solver = "compressible_rk" problem_name = "quad" param_file = "inputs.quad" other_commands = ["driver.max_steps=1", "mesh.nx=16", "mesh.ny=16", "driver.verbose=0"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() p = pyro_sim.sim.cc_data.get_var("pressure") p.pretty_print(show_ghost=False) ``` ## compressible_sdc ``` solver = "compressible_sdc" problem_name = "sod" param_file = "inputs.sod.y" other_commands = ["driver.max_steps=1", "mesh.nx=4", "mesh.ny=16", "driver.verbose=0"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() p = pyro_sim.sim.cc_data.get_var("pressure") p.pretty_print(show_ghost=False) ``` ## diffusion ``` solver = "diffusion" problem_name = "gaussian" param_file = "inputs.gaussian" other_commands = ["driver.max_steps=1", "mesh.nx=16", "mesh.ny=16", "driver.verbose=0"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() dens = pyro_sim.sim.cc_data.get_var("phi") dens.pretty_print(show_ghost=False) ``` ## incompressible ``` solver = "incompressible" problem_name = "shear" param_file = "inputs.shear" other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8", "driver.verbose=0"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() u = pyro_sim.sim.cc_data.get_var("x-velocity") u.pretty_print(show_ghost=False) ``` ## lm_atm ``` solver = "lm_atm" problem_name = "bubble" param_file = "inputs.bubble" other_commands = ["driver.max_steps=1", "mesh.nx=16", "mesh.ny=16", "driver.verbose=0"] pyro_sim = Pyro(solver) pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands) pyro_sim.run_sim() v = pyro_sim.sim.cc_data.get_var("y-velocity") v.pretty_print(show_ghost=False, fmt="%10.3g") ```
github_jupyter
# A sample example to tuning the hyperparameters of Prophet classifier is shown as usecase. ``` from mango.tuner import Tuner from mango.domain.distribution import loguniform param_dict = {"changepoint_prior_scale": loguniform(-3, 4), 'seasonality_prior_scale' : loguniform(1, 2) } ``` # userObjective ``` from classifiers.prophet import Prophet from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score import numpy as np model = Prophet() import os data_path = os.path.abspath('.')+'/classifiers/data/' X_train, y_train =model.load_train_dataset(data_path+"PJME/train_data") X_test, y_test = model.load_train_dataset(data_path+"PJME/test_data") X_validate, y_validate = model.load_train_dataset(data_path+"PJME/validate_data") count_called = 1 def objective_Prophet(args_list): global X_train, y_train,X_validate,y_validate, count_called print('count_called:',count_called) count_called = count_called + 1 hyper_evaluated = [] results = [] for hyper_par in args_list: clf = Prophet(**hyper_par) clf.fit(X_train, y_train.ravel()) y_pred = clf.predict(X_validate) mse = mean_squared_error(y_validate, y_pred) mse = mse/10e5 result = (-1.0) * mse results.append(result) hyper_evaluated.append(hyper_par) return hyper_evaluated, results conf_Dict = dict() conf_Dict['batch_size'] = 2 conf_Dict['num_iteration'] = 10 conf_Dict['initial_random'] = 5 #conf_Dict['domain_size'] = 10000 ``` # Defining Tuner ``` tuner_user = Tuner(param_dict, objective_Prophet,conf_Dict) tuner_user.getConf() import time start_time = time.clock() results = tuner_user.maximize() end_time = time.clock() print(end_time - start_time) ``` # Inspect the results ``` print('best hyper parameters:',results['best_params']) print('best objective:',results['best_objective']) print('Sample hyper parameters tried:',len(results['params_tried'])) print(results['params_tried'][:2]) print('Sample objective values',len(results['objective_values'])) print(results['objective_values'][:5]) ``` # Plotting the actual variation in objective values of the tried results ``` Size = 201 from matplotlib import pyplot as plt fig = plt.figure(figsize=(30,5)) plt.title('Variation of Objective',fontsize=20) plt.plot(results['objective_values'][:Size],lw=4,label='BL') plt.xlabel('Iterations', fontsize=25) plt.ylabel('objective_values',fontsize=25) plt.xticks(fontsize=20) plt.yticks(fontsize=20) plt.legend(prop={'size': 30}) plt.show() ``` # Plotting the variation of Max objective values of the tried results ``` Size = 201 import numpy as np results_obj = np.array(results['objective_values']) y_max=[] for i in range(results_obj.shape[0]): y_max.append(np.max(results_obj[:i+1])) from matplotlib import pyplot as plt fig = plt.figure(figsize=(30,5)) plt.title('Max variation of Objective',fontsize=20) plt.plot(y_max[:Size],lw=4,label='BL') plt.xlabel('Iterations', fontsize=25) plt.ylabel('objective_values',fontsize=25) plt.xticks(fontsize=20) plt.yticks(fontsize=20) plt.legend(prop={'size': 30}) plt.show() ``` # See the Result ``` import pprint pp = pprint.PrettyPrinter(indent=4) pp.pprint(results) ``` # See the learned classifier result on the test data ``` model = Prophet(**results['best_params']) model.fit(X_train, y_train.ravel()) y_pred = model.predict(X_test) from matplotlib import pyplot as plt fig = plt.figure(figsize=(30,10)) plt.rcParams.update({'font.size': 18}) plt.plot(X_test,y_test,label='Test') plt.plot(X_test,y_pred,label='Prediction') plt.title('Testing Data') plt.legend() plt.show() ``` # All the Data ``` from classifiers.prophet import Prophet model = Prophet() import os data_path = os.path.abspath('.')+'/classifiers/data/' X_train, y_train =model.load_train_dataset(data_path+"PJME/train_data") X_test, y_test = model.load_train_dataset(data_path+"PJME/test_data") X_validate, y_validate = model.load_train_dataset(data_path+"PJME/validate_data") from matplotlib import pyplot as plt fig = plt.figure(figsize=(30,10)) plt.rcParams.update({'font.size': 18}) plt.plot(X_train,y_train,label='Train') plt.plot(X_validate,y_validate,label='validate') plt.plot(X_test,y_test,label='Test') plt.title('All Data') plt.legend() plt.show() ```
github_jupyter
# 23mer Regression analysis ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import rc from itertools import cycle import pickle import sklearn.manifold from sklearn.metrics import roc_curve, auc, r2_score, mean_squared_error, make_scorer from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV, KFold, cross_validate, cross_val_score from sklearn.preprocessing import label_binarize from sklearn.svm import LinearSVR, SVR from sklearn.feature_selection import SelectFromModel, RFECV, VarianceThreshold from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn import linear_model, decomposition from sklearn.pipeline import Pipeline, make_pipeline from yellowbrick.regressor import ResidualsPlot from yellowbrick.features import RFECV as yellowRFECV from sklearn.kernel_ridge import KernelRidge from math import log10, pow, log,sqrt from statistics import mean import scipy.stats as stats #Load data rownames = np.loadtxt(open("/home/pierre/Desktop/Position_Matrix/ML_REG_input23mer_7514.csv", "rb"),delimiter=',', dtype='str')[0,:] names = np.asarray([_ for _ in rownames]) df = np.loadtxt(open("/home/pierre/Desktop/Position_Matrix/ML_REG_input23mer_7514.csv", "rb"),delimiter=',', usecols=range(1,rownames.shape[0]), dtype='float', skiprows=1) sgRNA_seq = np.loadtxt(open("/home/pierre/Desktop/Position_Matrix/ML_REG_input23mer_7514.csv", "rb"),delimiter=',', usecols=0, dtype='str', skiprows=1) #sort by sequence (not by activity/efficiency which is the response variable) indx = sgRNA_seq.argsort() df = df[indx,:] #Standard scale non binary features sc = StandardScaler() df[:,1:26] = sc.fit_transform(df[:,1:26]) #Save standardized dataset pd.DataFrame(np.column_stack((sgRNA_seq, df)), columns=names).to_csv("/home/pierre/Desktop/Position_Matrix/ML_REG_input23mer_7514-scaled.csv",index=False) #Number of cpus to use for multithreading n_cpu = 2 ``` ### RFECV ``` #Recursive Feature Elimination with Cross-Validation to remove the irrelevant features modelnames = [ "Linear Regression", "Linear SVR", "l1 Linear Regression", "Gradient-Boosted Decision Tree", "Random Forest" ] model = [ LinearRegression(n_jobs=n_cpu), LinearSVR(random_state=0), Ridge(random_state=0), GradientBoostingRegressor(random_state=0), RandomForestRegressor(random_state=0) ] rfecv = dict() models=dict() datasets=dict() supports = dict() i=0 for name, clf in zip(modelnames, model): model = RFECV(estimator=clf, cv=KFold(n_splits=10, random_state=0), scoring='r2', n_jobs=n_cpu) rfecv[i] = model model.fit(df[:,1:], df[:,0]) X_new = model.transform(df[:,1:]) sup = model.get_support(True) #Insert Column names X_new = pd.DataFrame(X_new, columns=names[1:][sup]) print(X_new.shape) models[i] = model datasets[i] = X_new supports[i] = sup i+=1 # RFECV to plot modelnames = [ "Linear Regression", "Linear SVR", "l2 Linear Regression", "Gradient-Boosted Decision Tree", "Random Forest" ] n_groups = 5 # create plot fig, axes = plt.subplots(nrows=3,ncols=2,figsize=(25,20)) plt.figure() index = np.arange(n_groups) for ax, i in zip(axes.flatten(),range(n_groups)): rfecv_model = rfecv[i] X_newL1 = datasets[i] ax.set_xlabel("Number of features selected", fontsize=20) ax.set_ylabel("Cross validation score (R2 score)", fontsize=20) ax.set_title("%s - Optimal number of features : %d" % (modelnames[i], X_newL1.shape[1]), fontsize=20) ax.plot(range(1, len(rfecv_model.grid_scores_) + 1), rfecv_model.grid_scores_) plt.show() data = { 'rfecv': rfecv, 'datasets': datasets, 'supports': supports } pickle_out = open("23mer_RFECV.pickle","wb") pickle.dump(data, pickle_out) pickle_out.close() # # Uncomment to load the frecv and datasets # pickle_in = open("23mer_RFECV.pickle","rb") # p_load = pickle.load(pickle_in) # rfecv = p_load['rfecv'] # datasets = p_load['datasets'] # supports = p_load['supports'] ``` ## Hyperparameter Optimization & Performance Evaluation ``` # find the best hyperparameters for each model and evaluate its performance on the training set models = [LinearRegression(), LinearSVR(), Ridge(), GradientBoostingRegressor(), RandomForestRegressor() ] # set up params tuned_parameters = [{}, {'loss': ['epsilon_insensitive','squared_epsilon_insensitive'], 'epsilon': [0,0.001,0.01,0.1,1], 'C': [0.001,0.01,0.1,1,10,100,1000]}, {'alpha': np.logspace(log10(10e-5),log10(1.5e5),100)}, {'n_estimators':[50,100,150,200], 'max_depth':[2,4,6,8,10], 'min_samples_split':[2,4], 'min_samples_leaf':[1,2], 'max_features':['auto','sqrt','log2']}, {'n_estimators':[50,100,150,200], 'max_depth':[2,4,6,8,10], 'min_samples_split':[2,4], 'min_samples_leaf':[1,2], 'max_features':['auto','sqrt','log2']} ] grid = dict() for i in datasets : print(datasets[i].shape) grid[i] = dict() for j in range(len(models)): print(str(models[j])) print(str(tuned_parameters[j])) print() #Inner CV for parameter Optimization grid[i][j] = GridSearchCV(models[j], tuned_parameters[j], cv=KFold(n_splits=10, shuffle=True, random_state=i+j), scoring='r2',n_jobs=n_cpu).fit(datasets[i], df[:,0]) r2 = dict() r2_adj = dict() RMSE = dict() for i in grid : print(datasets[i].shape) n =(datasets[i].shape[0]) k =(datasets[i].shape[1]) r2[i] = dict() r2_adj[i] = dict() RMSE[i] = dict() for j in range(len(grid[i])): print(str(grid[i][j])) scoreR2 = np.mean(cross_val_score(grid[i][j].best_estimator_, datasets[i], df[:,0], cv=KFold(n_splits=10, shuffle=True, random_state=i+j), scoring='r2', n_jobs=n_cpu)) print('r2 = ',scoreR2) scoreR2_adj =1-(1-scoreR2)*(n-1)/(n-(k+1)) print('r2_adj = ',scoreR2_adj) scoreRMSE = np.mean(cross_val_score(grid[i][j].best_estimator_, datasets[i], df[:,0], cv=KFold(n_splits=10, shuffle=True, random_state=i+j), scoring='neg_mean_squared_error', n_jobs=n_cpu)) print('RMSE = ',sqrt(abs(scoreRMSE))) r2[i][j] = scoreR2 r2_adj[i][j] = scoreR2_adj RMSE[i][j] = sqrt(abs(scoreRMSE)) print('_____________') data = { 'grid': grid, 'r2': r2, 'r2_adj': r2_adj, 'RMSE': RMSE } pickle_out = open("23mer_GRID&Perf.pickle","wb") pickle.dump(data, pickle_out) pickle_out.close() # #Uncomment to load performance # pickle_in = open("23mer_GRID&Perf.pickle","rb") # p_load = pickle.load(pickle_in) # grid = p_load['grid'] # r2 = p_load['r2'] # r2_adj = p_load['r2_adj'] # RMSE = p_load['RMSE'] ``` # Plot the performance comparison on the training set ``` # data to plot n_groups = 5 metrics = {'r2':r2, 'r2_adj':r2_adj, 'RMSE':RMSE} # create plot fig, axes = plt.subplots(nrows=3,ncols=1,figsize=(15,20)) index = np.arange(n_groups) bar_width = 0.14 opacity = 0.8 i=0 for ax, v in zip(axes.flatten(),metrics.keys()): FS_LinReg = list() FS_LinSVR = list() FS_Ridge = list() FS_GBRT = list() FS_RF = list() for i in range(len(metrics[v])): FS_LinReg.append(metrics[v][i][0]) FS_LinSVR.append(metrics[v][i][1]) FS_Ridge.append(metrics[v][i][2]) FS_GBRT.append(metrics[v][i][3]) FS_RF.append(metrics[v][i][4]) FS_LinReg = tuple(FS_LinReg) FS_LinSVR = tuple(FS_LinSVR) FS_Ridge = tuple(FS_Ridge) FS_GBRT = tuple(FS_GBRT) FS_RF = tuple(FS_RF) rects1 = ax.bar(index, FS_LinReg, bar_width, align = 'center', alpha=opacity, color='blue', label='LinReg') rects2 = ax.bar(index + bar_width, FS_LinSVR, bar_width, align = 'center', alpha=opacity, color='red', label='LinSVR') rects5 = ax.bar(index + bar_width*2, FS_Ridge, bar_width, align = 'center', alpha=opacity, color='purple', label='Ridge') rects6 = ax.bar(index + bar_width*3, FS_GBRT, bar_width, align = 'center', alpha=opacity, color='orange', label='GBRT') rects7 = ax.bar(index + bar_width*4, FS_RF, bar_width, align = 'center', alpha=opacity, color='yellow', label='RF') ax.set_xlabel('Regression Models',fontsize=20) ax.set_ylabel(v,fontsize=20) ax.set_title('%s of different ML models' % v,fontsize=20) ax.set_xticks(index + bar_width*3) ax.set_xticklabels(['FS_LinReg %i ' % datasets[0].shape[1], 'FS_LinSVR %i ' % datasets[1].shape[1], 'FS_Ridge %i ' % datasets[2].shape[1], 'FS_GBRT %i ' % datasets[3].shape[1], 'FS_RF %i ' % datasets[4].shape[1]], fontsize=20) ax.legend(fontsize=15,loc='upper right', bbox_to_anchor=(1.12, 1), ncol=1) i+=1 plt.tight_layout() plt.show() ``` ## Save Best Model ``` data = { 'model': grid[2][2].best_estimator_, 'df_indexes': supports[2] } pickle_out = open("23mer_135FS_Ridge_REGmodel.pickle","wb") pickle.dump(data, pickle_out) pickle_out.close() # # Uncomment ot lead the best model # pickle_in = open("23mer_135FS_Ridge_REGmodel.pickle","rb") # p_load = pickle.load(pickle_in) # Model = p_load['model'] # idx = p_load['df_indexes'] ``` # Residual plot Analysis ``` # Create the train and test data X_train, X_test, y_train, y_test = train_test_split(datasets[2], df[:,0], test_size=0.2) model = Model visualizer = ResidualsPlot(model) visualizer.fit(X_train, y_train) # Fit the training data to the model visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.poof() ``` ## feature importance ``` df_importance = pd.DataFrame(grid[2][3].best_estimator_.feature_importances_, datasets[2].columns) with pd.option_context('display.max_rows', None, 'display.max_columns', 3): print(df_importance) pos_indep_order1 = 0 print(pos_indep_order1) pos_dep_order1 = sum(df_importance.iloc[3:31][0]) print(pos_dep_order1) pos_indep_order2 = sum(df_importance.iloc[0:3][0]) print(pos_indep_order2) pos_dep_order2 = sum(df_importance.iloc[31:][0]) print(pos_dep_order2) #PAM_bounds = sum(df_importance.iloc[197:][0]) #print(PAM_bounds) print(sum((pos_indep_order1,pos_indep_order2,pos_dep_order1,pos_dep_order2 #,PAM_bounds ))) pos = np.arange(4) + .5 plt.subplot(1, 2, 2) plt.barh(pos,(pos_indep_order1,pos_indep_order2,pos_dep_order1,pos_dep_order2 #,PAM_bounds ), align='center') plt.yticks(pos, ('pos_indep_order1','pos_indep_order2','pos_dep_order1','pos_dep_order2' #,'PAM_bounds' )) plt.xlabel('Importance') plt.title('23mer GBRT Variable Importance') plt.show() ``` ## Guide efficiency prediction Performance ### Training set ``` scoredf = np.loadtxt(open("/home/pierre/Desktop/Position_Matrix/Analysis-23mer_sgRNA_7514predictions.csv", "rb"), delimiter=',', usecols=(1,2), dtype='float', skiprows=1) indx = scoredf[:,1].argsort() scoredf = scoredf[indx,:] dic_scores = dict() k=0 for i in np.arange(0,1,0.1): dic_scores[k]= scoredf[np.where((scoredf[:,1]>=i) & (scoredf[:,1]<i+0.1)),] k+=1 #print(dic_scores) npRed = list() npOrange = list() npYellow = list() npGreen = list() for i in dic_scores: npRed.append(len(np.where((dic_scores[i][0][:,0]>=0) & (dic_scores[i][0][:,0]<0.25))[0])/len(dic_scores[i][0][:,0])) npOrange.append(len(np.where((dic_scores[i][0][:,0]>=0.25) & (dic_scores[i][0][:,0]<0.5))[0])/len(dic_scores[i][0][:,0])) npYellow.append(len(np.where((dic_scores[i][0][:,0]>=0.5) & (dic_scores[i][0][:,0]<0.75))[0])/len(dic_scores[i][0][:,0])) npGreen.append(len(np.where((dic_scores[i][0][:,0]>=0.75) & (dic_scores[i][0][:,0]<1.0))[0])/len(dic_scores[i][0][:,0])) # Data r = [0,1,2,3,4,5,6,7,8,9] raw_data = {'greenBars': npGreen, 'yellowBars': npYellow, 'orangeBars': npOrange, 'redBars': npRed } df = pd.DataFrame(raw_data) # From raw value to percentage totals = [i+j+k+l for i,j,k,l in zip(df['greenBars'], df['yellowBars'], df['orangeBars'], df['redBars'])] greenBars = [i / j for i,j in zip(df['greenBars'], totals)] yellowBars = [i / j for i,j in zip(df['yellowBars'], totals)] orangeBars = [i / j for i,j in zip(df['orangeBars'], totals)] redBars = [i / j for i,j in zip(df['redBars'], totals)] # plot plt.figure(figsize=(20,10)) plt.rc('axes', titlesize=20) plt.rc('xtick', labelsize=20) plt.rc('ytick', labelsize=20) barWidth = 0.85 names = ('0.0-0.1\nn=%s' % len(dic_scores[0][0][:,0]), '0.1-0.2\nn=%s' % len(dic_scores[1][0][:,0]), '0.2-0.3\nn=%s' % len(dic_scores[2][0][:,0]), '0.3-0.4\nn=%s' % len(dic_scores[3][0][:,0]), '0.4-0.5\nn=%s' % len(dic_scores[4][0][:,0]), '0.5-0.6\nn=%s' % len(dic_scores[5][0][:,0]), '0.6-0.7\nn=%s' % len(dic_scores[6][0][:,0]), '0.7-0.8\nn=%s' % len(dic_scores[7][0][:,0]), '0.8-0.9\nn=%s' % len(dic_scores[8][0][:,0]), '0.9-1.0\nn=%s' % len(dic_scores[9][0][:,0])) # Create green Bars plt.bar(r, greenBars, color='g', edgecolor='black', width=barWidth) # Create yellow Bars plt.bar(r, yellowBars, bottom=greenBars, color='yellow', edgecolor='black', width=barWidth) # Create orange Bars plt.bar(r, orangeBars, bottom=[i+j for i,j in zip(greenBars, yellowBars)], color='orange', edgecolor='black', width=barWidth) # Create red Bars plt.bar(r, redBars, bottom=[i+j+k for i,j,k in zip(greenBars, yellowBars,orangeBars)], color='red', edgecolor='black', width=barWidth) # Custom x axis plt.xticks(r, names) plt.title("23mer efficiency on training set") plt.xlabel("dMel efficiency prediction", fontsize=20) plt.ylabel("True efficieny quartiles", fontsize=20) # Show graphic plt.show() ``` ### Testing set ``` scoredf = np.loadtxt(open("/home/pierre/Desktop/Position_Matrix/Analysis-Test_Set_Droso.csv", "rb"), delimiter=',', usecols=(4,6), dtype='float', skiprows=1) indx = scoredf[:,1].argsort() scoredf = scoredf[indx,:] dic_scores = dict() k=0 for i in np.arange(0.1,0.9,0.1): dic_scores[k]= scoredf[np.where((scoredf[:,1]>=i) & (scoredf[:,1]<i+0.1)),] k+=1 #print(dic_scores) npRed = list() npOrange = list() npYellow = list() npGreen = list() for i in dic_scores: npRed.append(len(np.where((dic_scores[i][0][:,0]>=0) & (dic_scores[i][0][:,0]<0.25))[0])/len(dic_scores[i][0][:,0])) npOrange.append(len(np.where((dic_scores[i][0][:,0]>=0.25) & (dic_scores[i][0][:,0]<0.5))[0])/len(dic_scores[i][0][:,0])) npYellow.append(len(np.where((dic_scores[i][0][:,0]>=0.5) & (dic_scores[i][0][:,0]<0.75))[0])/len(dic_scores[i][0][:,0])) npGreen.append(len(np.where((dic_scores[i][0][:,0]>=0.75) & (dic_scores[i][0][:,0]<1.0))[0])/len(dic_scores[i][0][:,0])) # Data r = [1,2,3,4,5,6,7,8] raw_data = {'greenBars': npGreen, 'yellowBars': npYellow, 'orangeBars': npOrange, 'redBars': npRed } df = pd.DataFrame(raw_data) # From raw value to percentage totals = [i+j+k+l for i,j,k,l in zip(df['greenBars'], df['yellowBars'], df['orangeBars'], df['redBars'])] greenBars = [i / j for i,j in zip(df['greenBars'], totals)] yellowBars = [i / j for i,j in zip(df['yellowBars'], totals)] orangeBars = [i / j for i,j in zip(df['orangeBars'], totals)] redBars = [i / j for i,j in zip(df['redBars'], totals)] # plot plt.figure(figsize=(20,10)) plt.rc('axes', titlesize=20) plt.rc('xtick', labelsize=20) plt.rc('ytick', labelsize=20) barWidth = 0.85 names = ( '0.1-0.2\nn=%s' % len(dic_scores[0][0][:,0]), '0.2-0.3\nn=%s' % len(dic_scores[1][0][:,0]), '0.3-0.4\nn=%s' % len(dic_scores[2][0][:,0]), '0.4-0.5\nn=%s' % len(dic_scores[3][0][:,0]), '0.5-0.6\nn=%s' % len(dic_scores[4][0][:,0]), '0.6-0.7\nn=%s' % len(dic_scores[5][0][:,0]), '0.7-0.8\nn=%s' % len(dic_scores[6][0][:,0]), '0.8-0.9\nn=%s' % len(dic_scores[7][0][:,0])) # Create green Bars plt.bar(r, greenBars, color='g', edgecolor='black', width=barWidth) # Create yellow Bars plt.bar(r, yellowBars, bottom=greenBars, color='yellow', edgecolor='black', width=barWidth) # Create orange Bars plt.bar(r, orangeBars, bottom=[i+j for i,j in zip(greenBars, yellowBars)], color='orange', edgecolor='black', width=barWidth) # Create red Bars plt.bar(r, redBars, bottom=[i+j+k for i,j,k in zip(greenBars, yellowBars,orangeBars)], color='red', edgecolor='black', width=barWidth) # Custom x axis plt.xticks(r, names) plt.title("23mer efficiency on testing set") plt.xlabel("dMel efficiency prediction", fontsize=20) plt.ylabel("True efficieny quartiles", fontsize=20) # Show graphic plt.show() ```
github_jupyter
## CSc 4222 - Cyber Security | Assignment 2 ### Bryan W. Nonni ### Password Salt System Implementation and Brutal Force Cracker ### 1. Implementation of the Password Salt System In this section, students are required to implement a password salt verification system. With the given UID and Hash files, students need to implement the verification system, such that the given example of the password and salt can match with the hash value in the `Hash.txt` file. For example, the first`UID` is `001`, the `password` is `0599`, the salt associated with the first `UID` is `054`. When applying the MD5 Hash Function with the encode format as `utf-8` as shown in the figure below, the expected output should be `4a1d6f102cd95fac33853e4d72fe1dc5`. It is worth to mention that, the concatenation between password and salt needs to be in the format of `(password||salt)`. For example, with the aforementioned input, the concatenation result will be `0599054`. 0 should not be omitted. __Requirement for the designed system:__ The designed verification system should be able to correctly verify the example shown above. When the input is correct, the system will output a String “The input password and salt matches the hash value in the database”. Otherwise, the output should be “The input password and salt does not match the hash value in the database”. ``` from hashlib import md5 import pandas as pd Hash = open('Hash.txt', 'r') UID = open('UID.txt', 'r') hash_dictionary = { 'uid': UID, 'hash': Hash } hash_df = pd.DataFrame(hash_dictionary).replace('\n', '', regex=True) hash_df.head(5) def computeMD5hash(pwsalt): m = md5() m.update(pwsalt.encode('utf-8')) return m.hexdigest() uid001_hash = '4a1d6f102cd95fac33853e4d72fe1dc5' compute_hash = computeMD5hash('0599 054') print(uid001_hash, "matches", compute_hash, "=>", True) if uid001_hash == compute_hash else print(False) ``` ### 2. Implementation of the Cracker System To reduce the complexity for cracking the password and salt, the passwords are randomly set in the range of `[0000, 1000]`, while the salt is randomly set in the range of `[000,100]` for each `UID`. One easy idea to implement a cracker system is to brute-forcely try all possible combinations of password and salt for one UID. As the `Hash.txt` and `UID.txt` files are given, students are requested to implement a cracker system which could find the correct password and salt for a specific `UID`. __Requirement for the designed system:__ For a specific `UID`, the cracker system can output the correct password and salt value. For example, when input the `UID` as `001`, the output should be `password: 0599; salt: 054`. __Demo and Report:__ __1)__ Each student is required to go to either TA or instructor to demo both systems. The TA or instructor will ask the students to run one or two specific UID(s) to check the corresponding password and salt. __2)__ The report should firstly describe how these two systems are designed; secondly, the report should also include the set of passwords and salts for ten different UIDs. <a href='./Report.txt'>Bryan's Report</a> __3)__ For undergraduate students, the verification and cracker systems can be designed separately. For graduate students, the cracker system should include the function of verification system. ``` salt = [f"{i:03}" for i in range(1000)] password = [f"{i:04}" for i in range(10000)] def getUidHash(UID): j = 0 Hash = hash_df.loc[hash_df.uid == UID, 'hash'].values Hash = Hash[-1] print('uid', UID, 'Hash:',Hash) while(j < len(hash_df)): for p in password: for s in salt: pass_salt = p + s #print(pass_salt) md5_hash = computeMD5hash(pass_salt) #print(md5_hash) if md5_hash == Hash: return 'Match! uid: {}; password: {}; salt: {}; hash: {}'.format(UID, p, s, Hash) else: pass j+=1 getUidHash('059') getUidHash('002') getUidHash('003') getUidHash('004') def executeBruteForceAttack(): i = 0 while(i < len(hash_df)): uid = hash_df['uid'][i] Hash = hash_df['hash'][i] print(Hash) for p in password: for s in salt: pass_salt = p + s md5_hash = computeMD5hash(pass_salt) if md5_hash == Hash: print("Match! uid: {}; password: {}; salt: {}; hash: {}\n".format(uid, p, s, Hash)) else: pass i+=1 executeBruteForceAttack() ```
github_jupyter
# CNN for Classification --- In this notebook, we define **and train** an CNN to classify images from the [Fashion-MNIST database](https://github.com/zalandoresearch/fashion-mnist). We are providing two solutions to show you how different network structures and training strategies can affect the performance and accuracy of a CNN. This second solution will be a CNN with two convolutional layers **and** additional fully-connected and dropout layers to avoid overfitting the data and gradient descent with momentum to avoid reaching a local minimum. The batch size and number of epochs to train are the same as in the first example solution so that you can see how the structure of the network and loss hyperparameters have affected the accuracy of the model! Again, this is just one possible solution out of many. ### Load the [data](http://pytorch.org/docs/master/torchvision/datasets.html) In this cell, we load in both **training and test** datasets from the FashionMNIST class. ``` # our basic libraries import torch import torchvision # data loading and transforming from torchvision.datasets import FashionMNIST from torch.utils.data import DataLoader from torchvision import transforms # The output of torchvision datasets are PILImage images of range [0, 1]. # We transform them to Tensors for input into a CNN ## Define a transform to read the data in as a tensor data_transform = transforms.ToTensor() # choose the training and test datasets train_data = FashionMNIST(root='./data', train=True, download=True, transform=data_transform) test_data = FashionMNIST(root='./data', train=False, download=True, transform=data_transform) # Print out some stats about the training and test data print('Train data, number of images: ', len(train_data)) print('Test data, number of images: ', len(test_data)) # prepare data loaders, set the batch_size ## TODO: you can try changing the batch_size to be larger or smaller ## when you get to training your network, see how batch_size affects the loss batch_size = 20 train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True) # specify the image classes classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] ``` ### Visualize some training data This cell iterates over the training dataset, loading a random batch of image/label data, using `dataiter.next()`. It then plots the batch of images and labels in a `2 x batch_size/2` grid. ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(batch_size): ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[]) ax.imshow(np.squeeze(images[idx]), cmap='gray') ax.set_title(classes[labels[idx]]) ``` ### Define the network architecture The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/master/nn.html). For a convolutional neural network, we'll use a simple series of layers: * Convolutional layers * Maxpooling layers * Fully-connected (linear) layers You are also encouraged to look at adding [dropout layers](http://pytorch.org/docs/stable/nn.html#dropout) to avoid overfitting this data. --- To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the feedforward behavior of a network that employs those initialized layers in the function `forward`, which takes in an input image tensor, `x`. The structure of this Net class is shown below and left for you to fill in. Note: During training, PyTorch will be able to perform backpropagation by keeping track of the network's feedforward behavior and using autograd to calculate the update to the weights in the network. #### Define the Layers in ` __init__` As a reminder, a conv/pool layer may be defined like this (in `__init__`): ``` # 1 input image channel (for grayscale images), 32 output channels/feature maps, 3x3 square convolution kernel self.conv1 = nn.Conv2d(1, 32, 3) # maxpool that uses a square window of kernel_size=2, stride=2 self.pool = nn.MaxPool2d(2, 2) ``` #### Refer to Layers in `forward` Then referred to in the `forward` function like this, in which the conv1 layer has a ReLu activation applied to it before maxpooling is applied: ``` x = self.pool(F.relu(self.conv1(x))) ``` You must place any layers with trainable weights, such as convolutional layers, in the `__init__` function and refer to them in the `forward` function; any layers or functions that always behave in the same way, such as a pre-defined activation function, may appear *only* in the `forward` function. In practice, you'll often see conv/pool layers defined in `__init__` and activations defined in `forward`. #### Convolutional layer The first convolution layer has been defined for you, it takes in a 1 channel (grayscale) image and outputs 10 feature maps as output, after convolving the image with 3x3 filters. #### Flattening Recall that to move from the output of a convolutional/pooling layer to a linear layer, you must first flatten your extracted features into a vector. If you've used the deep learning library, Keras, you may have seen this done by `Flatten()`, and in PyTorch you can flatten an input `x` with `x = x.view(x.size(0), -1)`. ### TODO: Define the rest of the layers It will be up to you to define the other layers in this network; we have some recommendations, but you may change the architecture and parameters as you see fit. Recommendations/tips: * Use at least two convolutional layers * Your output must be a linear layer with 10 outputs (for the 10 classes of clothing) * Use a dropout layer to avoid overfitting ### A note on output size For any convolutional layer, the output feature maps will have the specified depth (a depth of 10 for 10 filters in a convolutional layer) and the dimensions of the produced feature maps (width/height) can be computed as the _input image_ width/height, W, minus the filter size, F, divided by the stride, S, all + 1. The equation looks like: `output_dim = (W-F)/S + 1`, for an assumed padding size of 0. You can find a derivation of this formula, [here](http://cs231n.github.io/convolutional-networks/#conv). For a pool layer with a size 2 and stride 2, the output dimension will be reduced by a factor of 2. Read the comments in the code below to see the output size for each layer. ``` import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 1 input image channel (grayscale), 10 output channels/feature maps # 3x3 square convolution kernel ## output size = (W-F)/S +1 = (28-3)/1 +1 = 26 # the output Tensor for one image, will have the dimensions: (10, 26, 26) # after one pool layer, this becomes (10, 13, 13) self.conv1 = nn.Conv2d(1, 10, 3) # maxpool layer # pool with kernel_size=2, stride=2 self.pool = nn.MaxPool2d(2, 2) # second conv layer: 10 inputs, 20 outputs, 3x3 conv ## output size = (W-F)/S +1 = (13-3)/1 +1 = 11 # the output tensor will have dimensions: (20, 11, 11) # after another pool layer this becomes (20, 5, 5); 5.5 is rounded down self.conv2 = nn.Conv2d(10, 20, 3) # 20 outputs * the 5*5 filtered/pooled map size self.fc1 = nn.Linear(20*5*5, 50) # dropout with p=0.4 self.fc1_drop = nn.Dropout(p=0.4) # finally, create 10 output channels (for the 10 classes) self.fc2 = nn.Linear(50, 10) # define the feedforward behavior def forward(self, x): # two conv/relu + pool layers x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) # prep for linear layer # this line of code is the equivalent of Flatten in Keras x = x.view(x.size(0), -1) # two linear layers with dropout in between x = F.relu(self.fc1(x)) x = self.fc1_drop(x) x = self.fc2(x) # final output return x # instantiate and print your Net net = Net() print(net) ``` ### TODO: Specify the loss function and optimizer Learn more about [loss functions](http://pytorch.org/docs/master/nn.html#loss-functions) and [optimizers](http://pytorch.org/docs/master/optim.html) in the online documentation. Note that for a classification problem like this, one typically uses cross entropy loss, which can be defined in code like: `criterion = nn.CrossEntropyLoss()`. PyTorch also includes some standard stochastic optimizers like stochastic gradient descent and Adam. You're encouraged to try different optimizers and see how your model responds to these choices as it trains. ``` import torch.optim as optim ## TODO: specify loss function # using cross entropy whcih combines softmax and NLL loss criterion = nn.CrossEntropyLoss() ## TODO: specify optimizer # stochastic gradient descent with a small learning rate AND some momentum optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) ``` ### A note on accuracy It's interesting to look at the accuracy of your network **before and after** training. This way you can really see that your network has learned something. In the next cell, let's see what the accuracy of an untrained network is (we expect it to be around 10% which is the same accuracy as just guessing for all 10 classes). ``` # Calculate accuracy before training correct = 0 total = 0 # Iterate through test dataset for images, labels in test_loader: # forward pass to get outputs # the outputs are a series of class scores outputs = net(images) # get the predicted class from the maximum value in the output-list of class scores _, predicted = torch.max(outputs.data, 1) # count up total number of correct labels # for which the predicted and true labels are equal total += labels.size(0) correct += (predicted == labels).sum() # calculate the accuracy # to convert `correct` from a Tensor into a scalar, use .item() accuracy = 100.0 * correct.item() / total # print it out! print('Accuracy before training: ', accuracy) ``` ### Train the Network Below, we've defined a `train` function that takes in a number of epochs to train for. * The number of epochs is how many times a network will cycle through the entire training dataset. * Inside the epoch loop, we loop over the training dataset in batches; recording the loss every 1000 batches. Here are the steps that this training function performs as it iterates over the training dataset: 1. Zero's the gradients to prepare for a forward pass 2. Passes the input through the network (forward pass) 3. Computes the loss (how far is the predicted classes are from the correct labels) 4. Propagates gradients back into the network’s parameters (backward pass) 5. Updates the weights (parameter update) 6. Prints out the calculated loss ``` def train(n_epochs): loss_over_time = [] # to track the loss as the network trains for epoch in range(n_epochs): # loop over the dataset multiple times running_loss = 0.0 for batch_i, data in enumerate(train_loader): # get the input images and their corresponding labels inputs, labels = data # zero the parameter (weight) gradients optimizer.zero_grad() # forward pass to get outputs outputs = net(inputs) # calculate the loss loss = criterion(outputs, labels) # backward pass to calculate the parameter gradients loss.backward() # update the parameters optimizer.step() # print loss statistics # to convert loss into a scalar and add it to running_loss, we use .item() running_loss += loss.item() if batch_i % 1000 == 999: # print every 1000 batches avg_loss = running_loss/1000 # record and print the avg loss over the 1000 batches loss_over_time.append(avg_loss) print('Epoch: {}, Batch: {}, Avg. Loss: {}'.format(epoch + 1, batch_i+1, avg_loss)) running_loss = 0.0 print('Finished Training') return loss_over_time # define the number of epochs to train for n_epochs = 30 # start small to see if your model works, initially # call train training_loss = train(n_epochs) ``` ## Visualizing the loss A good indication of how much your network is learning as it trains is the loss over time. In this example, we printed and recorded the average loss for each 1000 batches and for each epoch. Let's plot it and see how the loss decreases (or doesn't) over time. In this case, you should see that the loss has an initially large decrease and even looks like it would decrease more (by some small, linear amount) if we let it train for more epochs. ``` # visualize the loss as the network trained plt.plot(training_loss) plt.xlabel('1000\'s of batches') plt.ylabel('loss') plt.ylim(0, 2.5) # consistent scale plt.show() ``` ### Test the Trained Network Once you are satisfied with how the loss of your model has decreased, there is one last step: test! You must test your trained model on a previously unseen dataset to see if it generalizes well and can accurately classify this new dataset. For FashionMNIST, which contains many pre-processed training images, a good model should reach **greater than 85% accuracy** on this test dataset. If you are not reaching this value, try training for a larger number of epochs, tweaking your hyperparameters, or adding/subtracting layers from your CNN. ``` # initialize tensor and lists to monitor test loss and accuracy test_loss = torch.zeros(1) class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) # set the module to evaluation mode net.eval() for batch_i, data in enumerate(test_loader): # get the input images and their corresponding labels inputs, labels = data # forward pass to get outputs outputs = net(inputs) # calculate the loss loss = criterion(outputs, labels) # update average test loss test_loss = test_loss + ((torch.ones(1) / (batch_i + 1)) * (loss.data - test_loss)) # get the predicted class from the maximum value in the output-list of class scores _, predicted = torch.max(outputs.data, 1) # compare predictions to true label # this creates a `correct` Tensor that holds the number of correctly classified images in a batch correct = np.squeeze(predicted.eq(labels.data.view_as(predicted))) # calculate test accuracy for *each* object class # we get the scalar value of correct items for a class, by calling `correct[i].item()` for i in range(batch_size): label = labels.data[i] class_correct[label] += correct[i].item() class_total[label] += 1 print('Test Loss: {:.6f}\n'.format(test_loss.numpy()[0])) for i in range(10): if class_total[i] > 0: print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % ( classes[i], 100 * class_correct[i] / class_total[i], np.sum(class_correct[i]), np.sum(class_total[i]))) else: print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i])) print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % ( 100. * np.sum(class_correct) / np.sum(class_total), np.sum(class_correct), np.sum(class_total))) ``` ### Visualize sample test results Format: predicted class (true class) ``` # obtain one batch of test images dataiter = iter(test_loader) images, labels = dataiter.next() # get predictions preds = np.squeeze(net(images).data.max(1, keepdim=True)[1].numpy()) images = images.numpy() # plot the images in the batch, along with predicted and true labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(batch_size): ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[]) ax.imshow(np.squeeze(images[idx]), cmap='gray') ax.set_title("{} ({})".format(classes[preds[idx]], classes[labels[idx]]), color=("green" if preds[idx]==labels[idx] else "red")) ``` ### Question: What are some weaknesses of your model? (And how might you improve these in future iterations.) **Answer**: Since t-shirts, shirts, and coats have a similar overall shape, my model has trouble distinguishing between those items. In fact, its lowest test class accuracy is: `Test Accuracy of Shirt`, which this model only gets right about 60% of the time . I suspect that this accuracy could be improved by doing some data augmentation with respect to these classes or even adding another convolutional layer to extract even higher level features. ``` # Saving the model model_dir = 'saved_models/' model_name = 'fashion_net_ex.pt' # after training, save your model parameters in the dir 'saved_models' # when you're ready, un-comment the line below torch.save(net.state_dict(), model_dir+model_name) ```
github_jupyter
``` from google.colab import drive drive.mount('/content/drive') !pip install picklable_itertools !pip install fuel !pip install foolbox %reload_ext autoreload %autoreload 2 %matplotlib inline import numpy as np PROJECT_DIR = "/content/drive/My Drive/2018/Colab_Deep_Learning/one_class_neural_networks/" import sys,os import numpy as np sys.path.append(PROJECT_DIR) ``` ##** MNIST 0 Vs All **## ``` ## Obtaining the training and testing data %reload_ext autoreload %autoreload 2 import numpy as np from src.config import Configuration as Cfg from src.models.svm import SVM DATASET = "mnist" ESTIMATORS = 100 MAX_SAMPLES = 250 CONTAMINATION = 0.1 MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/" PRETRAINED_WT_PATH = "" RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] ## Setting the required config values Cfg.out_frac = 0.1 Cfg.ad_experiment = 1 # 1 : yes # 0 : No Cfg.unit_norm_used = "l1" Cfg.gcn = 1 # 1 : yes # 0 : No Cfg.zca_whitening = 0 # 1 : yes # 0 : No Cfg.pca = 0 # 1 for yes # 0 : No Cfg.mnist_val_frac = 0.1 Cfg.mnist_normal = 0 Cfg.mnist_outlier = -1 # SVM parameters Cfg.svm_nu = 0.1 Cfg.svm_GridSearchCV = 1 for seed in RANDOM_SEED: # plot parameters # Cfg.xp_path = REPORT_SAVE_PATH # dataset Cfg.seed = seed # initialize OC-SVM ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf") # train OC-SVM model ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV) # predict scores auc_roc = ocsvm.predict(which_set='test') print("========================================================================",) print("AUROC: ",auc_roc) print("========================================================================",) AUC.append(auc_roc) print("===========AURO Computed============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100) print("========================================================================") ``` ##** MNIST 1 Vs All **## ``` ## Obtaining the training and testing data %reload_ext autoreload %autoreload 2 import numpy as np from src.config import Configuration as Cfg from src.models.svm import SVM DATASET = "mnist" ESTIMATORS = 100 MAX_SAMPLES = 250 CONTAMINATION = 0.1 MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/" PRETRAINED_WT_PATH = "" RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] ## Setting the required config values Cfg.out_frac = 0.1 Cfg.ad_experiment = 1 # 1 : yes # 0 : No Cfg.unit_norm_used = "l1" Cfg.gcn = 1 # 1 : yes # 0 : No Cfg.zca_whitening = 0 # 1 : yes # 0 : No Cfg.pca = 0 # 1 for yes # 0 : No Cfg.mnist_val_frac = 0.1 Cfg.mnist_normal = 1 Cfg.mnist_outlier = -1 # SVM parameters Cfg.svm_nu = 0.1 Cfg.svm_GridSearchCV = 1 for seed in RANDOM_SEED: # plot parameters # Cfg.xp_path = REPORT_SAVE_PATH # dataset Cfg.seed = seed # initialize OC-SVM ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf") # train OC-SVM model ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV) # predict scores auc_roc = ocsvm.predict(which_set='test') print("========================================================================",) print("AUROC: ",auc_roc) print("========================================================================",) AUC.append(auc_roc) print("===========AURO Computed============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100) print("========================================================================") ``` ##** MNIST 2 Vs All **## ``` ## Obtaining the training and testing data %reload_ext autoreload %autoreload 2 import numpy as np from src.config import Configuration as Cfg from src.models.svm import SVM DATASET = "mnist" ESTIMATORS = 100 MAX_SAMPLES = 250 CONTAMINATION = 0.1 MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/" PRETRAINED_WT_PATH = "" RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] ## Setting the required config values Cfg.out_frac = 0.1 Cfg.ad_experiment = 1 # 1 : yes # 0 : No Cfg.unit_norm_used = "l1" Cfg.gcn = 1 # 1 : yes # 0 : No Cfg.zca_whitening = 0 # 1 : yes # 0 : No Cfg.pca = 0 # 1 for yes # 0 : No Cfg.mnist_val_frac = 0.1 Cfg.mnist_normal = 2 Cfg.mnist_outlier = -1 # SVM parameters Cfg.svm_nu = 0.1 Cfg.svm_GridSearchCV = 1 for seed in RANDOM_SEED: # plot parameters # Cfg.xp_path = REPORT_SAVE_PATH # dataset Cfg.seed = seed # initialize OC-SVM ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf") # train OC-SVM model ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV) # predict scores auc_roc = ocsvm.predict(which_set='test') print("========================================================================",) print("AUROC: ",auc_roc) print("========================================================================",) AUC.append(auc_roc) print("===========AURO Computed============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100) print("========================================================================") ``` ##** MNIST 3 Vs All **## ``` ## Obtaining the training and testing data %reload_ext autoreload %autoreload 2 import numpy as np from src.config import Configuration as Cfg from src.models.svm import SVM DATASET = "mnist" ESTIMATORS = 100 MAX_SAMPLES = 250 CONTAMINATION = 0.1 MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/" PRETRAINED_WT_PATH = "" RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] ## Setting the required config values Cfg.out_frac = 0.1 Cfg.ad_experiment = 1 # 1 : yes # 0 : No Cfg.unit_norm_used = "l1" Cfg.gcn = 1 # 1 : yes # 0 : No Cfg.zca_whitening = 0 # 1 : yes # 0 : No Cfg.pca = 0 # 1 for yes # 0 : No Cfg.mnist_val_frac = 0.1 Cfg.mnist_normal = 3 Cfg.mnist_outlier = -1 # SVM parameters Cfg.svm_nu = 0.1 Cfg.svm_GridSearchCV = 1 for seed in RANDOM_SEED: # plot parameters # Cfg.xp_path = REPORT_SAVE_PATH # dataset Cfg.seed = seed # initialize OC-SVM ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf") # train OC-SVM model ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV) # predict scores auc_roc = ocsvm.predict(which_set='test') print("========================================================================",) print("AUROC: ",auc_roc) print("========================================================================",) AUC.append(auc_roc) print("===========AURO Computed============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100) print("========================================================================") ``` ##** MNIST 4 Vs All **## ``` ## Obtaining the training and testing data %reload_ext autoreload %autoreload 2 import numpy as np from src.config import Configuration as Cfg from src.models.svm import SVM DATASET = "mnist" ESTIMATORS = 100 MAX_SAMPLES = 250 CONTAMINATION = 0.1 MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/" PRETRAINED_WT_PATH = "" RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] ## Setting the required config values Cfg.out_frac = 0.1 Cfg.ad_experiment = 1 # 1 : yes # 0 : No Cfg.unit_norm_used = "l1" Cfg.gcn = 1 # 1 : yes # 0 : No Cfg.zca_whitening = 0 # 1 : yes # 0 : No Cfg.pca = 0 # 1 for yes # 0 : No Cfg.mnist_val_frac = 0.1 Cfg.mnist_normal = 4 Cfg.mnist_outlier = -1 # SVM parameters Cfg.svm_nu = 0.1 Cfg.svm_GridSearchCV = 1 for seed in RANDOM_SEED: # plot parameters # Cfg.xp_path = REPORT_SAVE_PATH # dataset Cfg.seed = seed # initialize OC-SVM ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf") # train OC-SVM model ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV) # predict scores auc_roc = ocsvm.predict(which_set='test') print("========================================================================",) print("AUROC: ",auc_roc) print("========================================================================",) AUC.append(auc_roc) print("===========AURO Computed============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100) print("========================================================================") ``` ##** MNIST 5 Vs All **## ``` ## Obtaining the training and testing data %reload_ext autoreload %autoreload 2 import numpy as np from src.config import Configuration as Cfg from src.models.svm import SVM DATASET = "mnist" ESTIMATORS = 100 MAX_SAMPLES = 250 CONTAMINATION = 0.1 MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/" PRETRAINED_WT_PATH = "" RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] ## Setting the required config values Cfg.out_frac = 0.1 Cfg.ad_experiment = 1 # 1 : yes # 0 : No Cfg.unit_norm_used = "l1" Cfg.gcn = 1 # 1 : yes # 0 : No Cfg.zca_whitening = 0 # 1 : yes # 0 : No Cfg.pca = 0 # 1 for yes # 0 : No Cfg.mnist_val_frac = 0.1 Cfg.mnist_normal = 5 Cfg.mnist_outlier = -1 # SVM parameters Cfg.svm_nu = 0.1 Cfg.svm_GridSearchCV = 1 for seed in RANDOM_SEED: # plot parameters # Cfg.xp_path = REPORT_SAVE_PATH # dataset Cfg.seed = seed # initialize OC-SVM ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf") # train OC-SVM model ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV) # predict scores auc_roc = ocsvm.predict(which_set='test') print("========================================================================",) print("AUROC: ",auc_roc) print("========================================================================",) AUC.append(auc_roc) print("===========AURO Computed============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100) print("========================================================================") ``` ##** MNIST 6 Vs All **## ``` ## Obtaining the training and testing data %reload_ext autoreload %autoreload 2 import numpy as np from src.config import Configuration as Cfg from src.models.svm import SVM DATASET = "mnist" ESTIMATORS = 100 MAX_SAMPLES = 250 CONTAMINATION = 0.1 MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/" PRETRAINED_WT_PATH = "" RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] ## Setting the required config values Cfg.out_frac = 0.1 Cfg.ad_experiment = 1 # 1 : yes # 0 : No Cfg.unit_norm_used = "l1" Cfg.gcn = 1 # 1 : yes # 0 : No Cfg.zca_whitening = 0 # 1 : yes # 0 : No Cfg.pca = 0 # 1 for yes # 0 : No Cfg.mnist_val_frac = 0.1 Cfg.mnist_normal = 6 Cfg.mnist_outlier = -1 # SVM parameters Cfg.svm_nu = 0.1 Cfg.svm_GridSearchCV = 1 for seed in RANDOM_SEED: # plot parameters # Cfg.xp_path = REPORT_SAVE_PATH # dataset Cfg.seed = seed # initialize OC-SVM ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf") # train OC-SVM model ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV) # predict scores auc_roc = ocsvm.predict(which_set='test') print("========================================================================",) print("AUROC: ",auc_roc) print("========================================================================",) AUC.append(auc_roc) print("===========AURO Computed============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100) print("========================================================================") ``` ##** MNIST 7 Vs All **## ``` ## Obtaining the training and testing data %reload_ext autoreload %autoreload 2 import numpy as np from src.config import Configuration as Cfg from src.models.svm import SVM DATASET = "mnist" ESTIMATORS = 100 MAX_SAMPLES = 250 CONTAMINATION = 0.1 MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/" PRETRAINED_WT_PATH = "" RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] ## Setting the required config values Cfg.out_frac = 0.1 Cfg.ad_experiment = 1 # 1 : yes # 0 : No Cfg.unit_norm_used = "l1" Cfg.gcn = 1 # 1 : yes # 0 : No Cfg.zca_whitening = 0 # 1 : yes # 0 : No Cfg.pca = 0 # 1 for yes # 0 : No Cfg.mnist_val_frac = 0.1 Cfg.mnist_normal = 7 Cfg.mnist_outlier = -1 # SVM parameters Cfg.svm_nu = 0.1 Cfg.svm_GridSearchCV = 1 for seed in RANDOM_SEED: # plot parameters # Cfg.xp_path = REPORT_SAVE_PATH # dataset Cfg.seed = seed # initialize OC-SVM ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf") # train OC-SVM model ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV) # predict scores auc_roc = ocsvm.predict(which_set='test') print("========================================================================",) print("AUROC: ",auc_roc) print("========================================================================",) AUC.append(auc_roc) print("===========AURO Computed============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100) print("========================================================================") ``` ##** MNIST 8 Vs All **## ``` ## Obtaining the training and testing data %reload_ext autoreload %autoreload 2 import numpy as np from src.config import Configuration as Cfg from src.models.svm import SVM DATASET = "mnist" ESTIMATORS = 100 MAX_SAMPLES = 250 CONTAMINATION = 0.1 MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/" PRETRAINED_WT_PATH = "" RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] ## Setting the required config values Cfg.out_frac = 0.1 Cfg.ad_experiment = 1 # 1 : yes # 0 : No Cfg.unit_norm_used = "l1" Cfg.gcn = 1 # 1 : yes # 0 : No Cfg.zca_whitening = 0 # 1 : yes # 0 : No Cfg.pca = 0 # 1 for yes # 0 : No Cfg.mnist_val_frac = 0.1 Cfg.mnist_normal = 8 Cfg.mnist_outlier = -1 # SVM parameters Cfg.svm_nu = 0.1 Cfg.svm_GridSearchCV = 1 for seed in RANDOM_SEED: # plot parameters # Cfg.xp_path = REPORT_SAVE_PATH # dataset Cfg.seed = seed # initialize OC-SVM ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf") # train OC-SVM model ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV) # predict scores auc_roc = ocsvm.predict(which_set='test') print("========================================================================",) print("AUROC: ",auc_roc) print("========================================================================",) AUC.append(auc_roc) print("===========AURO Computed============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100) print("========================================================================") ``` ##** MNIST 9 Vs All **## ``` ## Obtaining the training and testing data %reload_ext autoreload %autoreload 2 import numpy as np from src.config import Configuration as Cfg from src.models.svm import SVM DATASET = "mnist" ESTIMATORS = 100 MAX_SAMPLES = 250 CONTAMINATION = 0.1 MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/" PRETRAINED_WT_PATH = "" RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] ## Setting the required config values Cfg.out_frac = 0.1 Cfg.ad_experiment = 1 # 1 : yes # 0 : No Cfg.unit_norm_used = "l1" Cfg.gcn = 1 # 1 : yes # 0 : No Cfg.zca_whitening = 0 # 1 : yes # 0 : No Cfg.pca = 0 # 1 for yes # 0 : No Cfg.mnist_val_frac = 0.1 Cfg.mnist_normal = 9 Cfg.mnist_outlier = -1 # SVM parameters Cfg.svm_nu = 0.1 Cfg.svm_GridSearchCV = 1 for seed in RANDOM_SEED: # plot parameters # Cfg.xp_path = REPORT_SAVE_PATH # dataset Cfg.seed = seed # initialize OC-SVM ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf") # train OC-SVM model ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV) # predict scores auc_roc = ocsvm.predict(which_set='test') print("========================================================================",) print("AUROC: ",auc_roc) print("========================================================================",) AUC.append(auc_roc) print("===========AURO Computed============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100) print("========================================================================") ```
github_jupyter
# Bag of Words ## Term Document Frequency ![](images/dtm.PNG) ## TF- IDF Term Frequency Inverse Document Frequency Words that appear in many documents are probably less meaningful We are interested in obtaining VXD type matrix where V is the vocabulary size and D is the Vector Dimensionality ![](images/vxd.PNG) # Word Embeddings Feature vector representing a word ## Word Analogies There is no concept of word analogies with algorithms like word2vec and Glove.They just suddenly emerge out of the model and training process. ![](images/word_analogies.PNG) ## Get Word Similarity ## Similarity Metric 1. Euclidean 2. Cosine # Glove Analogies Get glove.6B.50d file for word embeddings ``` import numpy as np from sklearn.metrics.pairwise import pairwise_distances #Euclidean def dist1(a, b): return np.linalg.norm(a - b) #Cosine def dist2(a, b): return 1 - a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b)) # pick a distance type dist, metric = dist2, 'cosine' def find_analogies(w1, w2, w3): for w in (w1, w2, w3): if w not in word2vec: print("%s not in dictionary" % w) return king = word2vec[w1] man = word2vec[w2] woman = word2vec[w3] v0 = king - man + woman distances = pairwise_distances(v0.reshape(1, D), embedding, metric=metric).reshape(V) idxs = distances.argsort()[:4] for idx in idxs: word = idx2word[idx] if word not in (w1, w2, w3): best_word = word break print(w1, "-", w2, "=", best_word, "-", w3) def nearest_neighbors(w, n=5): if w not in word2vec: print("%s not in dictionary:" % w) return v = word2vec[w] distances = pairwise_distances(v.reshape(1, D), embedding, metric=metric).reshape(V) idxs = distances.argsort()[1:n+1] print("neighbors of: %s" % w) for idx in idxs: print("\t%s" % idx2word[idx]) print('Loading word vectors...') word2vec = {} embedding = [] idx2word = [] with open('glove.6B.50d.txt', encoding='utf-8') as f: # is just a space-separated text file in the format: # word vec[0] vec[1] vec[2] ... for line in f: values = line.split() word = values[0] vec = np.asarray(values[1:], dtype='float32') word2vec[word] = vec embedding.append(vec) idx2word.append(word) print('Found %s word vectors.' % len(word2vec)) embedding = np.array(embedding) V, D = embedding.shape find_analogies('king', 'man', 'woman') find_analogies('france', 'paris', 'london') ``` # Word2Vec Analogies Get GoogleNews-vectors-negative300.bin file for word embedding ``` from gensim.models import KeyedVectors word_vectors = KeyedVectors.load_word2vec_format( 'GoogleNews-vectors-negative300.bin', binary=True ) def find_analogies(w1, w2, w3): r = word_vectors.most_similar(positive=[w1, w3], negative=[w2]) print("%s - %s = %s - %s" % (w1, w2, r[0][0], w3)) def nearest_neighbors(w): r = word_vectors.most_similar(positive=[w]) print("neighbors of: %s" % w) for word, score in r: print("\t%s" % word) ``` # Word Embeddings When you're dealing with words in text, you end up with tens of thousands of word classes to analyze; one for each word in a vocabulary. Trying to one-hot encode these words is massively inefficient because most values in a one-hot vector will be set to zero. So, the matrix multiplication that happens in between a one-hot input vector and a first, hidden layer will result in mostly zero-valued hidden outputs. To solve this problem and greatly increase the efficiency of our networks, we use what are called **embeddings**. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the "on" input unit. ![](images/embedding.PNG) Instead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example "heart" is encoded as 958, "mind" as 18094. Then to get hidden layer values for "heart", you just take the 958th row of the embedding matrix. This process is called an **embedding lookup** and the number of hidden units is the **embedding dimension**. There is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix. Embeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called **Word2Vec** uses the embedding layer to find vector representations of words that contain semantic meaning. ## Word2Vec [Word2Vec](https://arxiv.org/pdf/1310.4546.pdf) [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/abs/1301.3781) Word2Vec is one of the most popular technique to learn word embeddings using shallow neural network. We’re going to train a simple neural network with a single hidden layer to perform a certain task, but then we’re not actually going to use that neural network for the task we trained it on! Instead, the goal is actually just to learn the weights of the hidden layer–we’ll see that these weights are actually the “word vectors” that we’re trying to learn. Different model architectures that can be used with Word2Vec 1. CBOW Neural Network Model 2. Skipgram Neural Network Model ![](images/cbow_skipgram.PNG) Different ways to train Word2vec Model: 1. Heirarchial Softmax 2. Negative Sampling ### Skipgram ![](images/skipgram_g.PNG) **eg: The quick brown fox jumps over the lazy dog** ![](images/skipgram.PNG) We’re going to train the neural network to do the following. Given a specific word in the middle of a sentence (the input word), look at the words **nearby** and pick one at random. The network is going to tell us the probability for every word in our vocabulary of being the “nearby word” that we chose. **nearby** - there is actually a "window size" parameter to the algorithm. A typical window size might be 5, meaning 5 words behind and 5 words ahead (10 in total). We’ll train the neural network to do this by feeding it word pairs found in our training documents. The network is going to learn the statistics from the number of times each pairing shows up. So, for example, the network is probably going to get many more training samples of (“Soviet”, “Union”) than it is of (“Soviet”, “Sasquatch”). When the training is finished, if you give it the word “Soviet” as input, then it will output a much higher probability for “Union” or “Russia” than it will for “Sasquatch”. #### Skipgram Model Details Skipgram architecture consists of: 1. [Embedding layer / Hidden Layer](https://pytorch.org/docs/stable/nn.html#embedding) An Embedding layer takes in a number of inputs, importantly: * **num_embeddings** – the size of the dictionary of embeddings, or how many rows you'll want in the embedding weight matrix * **embedding_dim** – the size of each embedding vector; the embedding dimension.(300 features is what Google used in their published model trained on the Google news dataset (you can download it from here) 2. Softmax Output Layer<br> The output layer will have output neuron (one per word in our vocabulary) will produce an output between 0 and 1, and the sum of all these output values will add up to 1. ![](images/skip_gram_arch.PNG) **Working** 1. The input words are passed in as batches of input word tokens. 2. This will go into a hidden layer of linear units (our embedding layer). 3. Then, finally into a softmax output layer. We'll use the softmax layer to make a prediction about the context words by sampling, as usual. ### Training Word2Vec Model Word2Vec network is a huge network in terms of parameters. Say we had word vectors with 300 components, and a vocabulary of 10,000 words. Recall that the neural network had two weight matrices–a hidden layer and output layer. Both of these layers would have a weight matrix with 300 x 10,000 = 3 million weights each! Hence to reduce compute burden of the training process,the research paper proposed following two innovations: 1. **Subsampling frequent words** to decrease the number of training examples. 2. Modifying the optimization objective with a technique they called **Negative Sampling**,instead of normal cross entropy which causes each training sample to update only a small percentage of the model’s weights which makes training the network very inefficient. #### Subsampling Words that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by $$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$ where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset. #### Negative Sampling As discussed above,the the skip-gram neural network has a tremendous number of weights, all of which would be updated slightly by every one of our millions or billions of training samples! Negative sampling addresses this by having each training sample only modify a small percentage of the weights, rather than all of them. With negative sampling, we are instead going to randomly select just a small number of “negative” words (let’s say 5) to update the weights for. (In this context, a “negative” word is one for which we want the network to output a 0 for). We will also still update the weights for our “positive” word (which is the word “quick” in our current example). ***The paper says that selecting 5-20 words works well for smaller datasets, and you can get away with only 2-5 words for large datasets.*** #### Selecting Negative Samples The “negative samples” (that is, the 5 output words that we’ll train to output 0) are selected using a “unigram distribution”, where more frequent words are more likely to be selected as negative samples. **Modification in Cost Function** We change the loss function to only care about correct example and a small amount of wrong examples. ![](https://render.githubusercontent.com/render/math?math=-%20%5Clarge%20%5Clog%7B%5Csigma%5Cleft%28u_%7Bw_O%7D%5Chspace%7B0.001em%7D%5E%5Ctop%20v_%7Bw_I%7D%5Cright%29%7D%20-%0A%5Csum_i%5EN%20%5Cmathbb%7BE%7D_%7Bw_i%20%5Csim%20P_n%28w%29%7D%5Clog%7B%5Csigma%5Cleft%28-u_%7Bw_i%7D%5Chspace%7B0.001em%7D%5E%5Ctop%20v_%7Bw_I%7D%5Cright%29%7D&mode=display) First part of the Loss function: ![](https://render.githubusercontent.com/render/math?math=%5Clarge%20%5Clog%7B%5Csigma%5Cleft%28u_%7Bw_O%7D%5Chspace%7B0.001em%7D%5E%5Ctop%20v_%7Bw_I%7D%5Cright%29%7D&mode=display) we take the log-sigmoid of the inner product of the output word vector and the input word vector. Second part of the Loss function: let's first look at $$\large \sum_i^N \mathbb{E}_{w_i \sim P_n(w)}$$ This means we're going to take a sum over words $w_i$ drawn from a noise distribution $w_i \sim P_n(w)$. The noise distribution is basically our vocabulary of words that aren't in the context of our input word. In effect, we can randomly sample words from our vocabulary to get these words. $P_n(w)$ is an arbitrary probability distribution though, which means we get to decide how to weight the words that we're sampling. This could be a uniform distribution, where we sample all words with equal probability. Or it could be according to the frequency that each word shows up in our text corpus, the unigram distribution $U(w)$. The authors found the best distribution to be $U(w)^{3/4}$, empirically. The power makes less frequent words be sampled more often Finally, in $$\large \log{\sigma\left(-u_{w_i}\hspace{0.001em}^\top v_{w_I}\right)},$$ we take the log-sigmoid of the negated inner product of a noise vector with the input vector. ![](images/neg_sampling_loss.PNG) To give you an intuition for what we're doing here, remember that the sigmoid function returns a probability between 0 and 1. The first term in the loss pushes the probability that our network will predict the correct word $w_O$ towards 1. In the second term, since we are negating the sigmoid input, we're pushing the probabilities of the noise words towards 0. ### Word2Vec Implementation codes 1. Numpy implementation 2. Pytorch Implementation ## Global Vector for Word Representation(GloVe) [GLoVe](https://www.aclweb.org/anthology/D14-1162) **Motivation** The Word2Vec -***context window-based methods*** suffer from the disadvantage of not learning from the global corpus statistics. As a result, repetition and large-scale patterns may not be learned as well with these models. This method combines elements from the two main word embedding models which existed when GloVe was proposed: 1. Count based approach - Global matrix factorization 2. Direct Prediction - Local context window methods ![](images/count_based_vs_direct_pred.PNG) ### Global Matrix Factorization - Count based approach It is the process of using matrix factorization methods from linear algebra to perform rank reduction on a large term-frequency matrix. These matrices can be represented as : 1. Term-Document Frequency - rows are words and the columns are documents (or sometimes paragraphs) 2. Term-Term Frequencies - words on both axes and measure co-occurrence **Latent semantic analysis (LSA)** It is a technique in natural language processing for analyzing relationships between a set of documents and the terms they contain by applying **Global matrix factorization** to term-document frequency matrices. In **LSA** the high-dimensional matrix is reduced via **singular value decomposition (SVD).** ![](images/svd.JPG) ### Local Context Window Direct prediction 1. **Skip-gram model**<br> By passing a window over the corpus line-by-line and learning to predict either the surroundings of a given word 2. **Continuous Bag of Words Model (CBOW)**<br> Predict a word given its surroundings. Note the bag-of-words problem is often shortened to “CBOW”. **Skip-gram**: works well with small amount of the training data, represents well even rare words or phrases.<br> **CBOW**: several times faster to train than the skip-gram, slightly better accuracy for the frequent words. ![](images/cbow_skipgram.PNG) ### GloVe embedding generation algorithm GloVe technique improves on these previous methods by making changes in the following: 1. **Co-occurance Probabilities**<br> Instead of learning the raw co-occurrence probabilities, it may make more sense to learn ratios of these co-occurrence probabilities, which seem to better discriminate subtleties in term-term relevance. To illustrate this, we borrow an example from their paper: suppose we wish to study the relationship between two words, i = ice and j = steam. We’ll do this by examining the co-occurrence probabilities of these words with various “probe” words. Co-occurrence probability of an arbitrary word i with an arbitrary word j to be the probability that word j appears in the context of word i. ![](images/co-occurance_matrix.PNG) ***X_i*** is defined as the number of times any word appears in the context of word i, so it’s defined as the sum over all words k of the number of times word k occurs in the context of word i. Let us take few probe words and see how does the ratio appears: 1. If we choose a probe word k = solid which is closely related to i = ice but not to j = steam, we expect the ratio P_{ik}/P_{jk} of co-occurrence probabilities to be large 2. If we choose a probe word k = gas we would expect the same ratio to be small, since steam is more closely related to gas than ice is. 3. If we choose a probe word k = water , which are closely related to both ice and steam, but not more to one than the other ,we expect our ratio to be close to 1 since there shouldn’t be any bias to one of ice or steam 4. If we choose a probe word k = fashion ,which are not closely related to either of the words in question, we expect our ratio to be close to 1 since there shouldn’t be any bias to one of ice or steam ![](images/co-occurance_ex.PNG) Noting that the ratio P<sub>ik</sub> /P<sub>jk</sub> depends on three words i, j, and k, the most general model takes the form, ![](images/naive_form.PNG) ***In this equation, the right-hand side is extracted from the corpus, and F may depend on some as-of-yet unspecified parameters. The number of possibilities for F is vast, but by enforcing a few desiderata we can select a unique choice***. We have two word vectors which we’d like to discriminate between, and a context word vector which is used to this effect.So to encode information about the ratios between two words, the authors suggest using vector differences as inputs to our function **Vector Difference Model** ![](images/vector_difference_model.PNG) So now,vector difference of the two words **i** and **j** we’re comparing as an input instead of both of these words individually, since our output is a ratio between their co-occurrence probabilities with the context word. Now we have two arguments, the context word vector, and the vector difference of the two words we’re comparing. Since the authors wish to take scalar values to scalar values (note the ratio of probabilities is a scalar), the dot product of these two arguments is taken ![](images/scalar_input_model.PNG) Next, note that for word-word co-occurrence matrices, the distinction between a word and a context word is arbitrary and that we are free to exchange the two roles.Our final model should be invariant under this relabeling, but above equation is not. After applying Homomorphism condition: ![](images/homomorphism.PNG) we arrive at the equation as in the paper ![](images/glove_homo.PNG) # Playground ``` import numpy as np def logit(x): return np.log(x/(1-x)) def sigmoid(x): return 1/ (1+ np.exp(-x)) # p(y=1/x)= Sigmoid(logit(p)) sigmoid(logit(0.8)) #p(y=0/x) = sigmoid(-logit(p)) sigmoid(-logit(0.8)) # p(y=1/x)= Sigmoid(logit(p)) , p(y=0/x) = sigmoid(-logit(p)) # sigmoid(logit) + sigmoid(-logit) =1 #Inverse of logit is sigmoid i.e p = sigmoid(logit(p)) # sigmoid(-x)= 1-sigmoid(x) print(sigmoid(logit(0.8))) print(sigmoid(-logit(0.8))) print(sigmoid(logit(0.8))+sigmoid(-logit(0.8))) print(np.log(sigmoid(0.9))) print(np.log(sigmoid(-0.1))) print(-(np.log(sigmoid(0.8))+np.log(sigmoid(-0.1)))) ``` # Reference 1. [Bayes Rule](https://math.stackexchange.com/questions/1037327/extended-bayes-theorem-pa-b-c-d-constructing-a-bayesian-network) 2. [Word2Vec from Chris McCormick ](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) 3. [Word Embedding](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/) 4. [Word2Vec](https://p.migdal.pl/2017/01/06/king-man-woman-queen-why.html) 5. [First Word2Vec paper](https://arxiv.org/pdf/1301.3781.pdf) from Mikolov et al. 6. [Video: Intuition & Use-Cases of Embeddings in NLP & beyond](http://jalammar.github.io/) 7. [Neural Information Processing Systems, paper](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) with improvements for Word2Vec also from Mikolov et al. 8. [Skipgram-Pytorch](https://github.com/udacity/deep-learning-v2-pytorch/blob/master/word2vec-embeddings/Negative_Sampling_Solution.ipynb) 9. [Word2Vec-Pytorch](https://github.com/dthiagarajan/word2vec-pytorch) 10. [GloVe](https://towardsdatascience.com/emnlp-what-is-glove-part-iv-e605a4c407c8) 11. [Glove](https://towardsdatascience.com/light-on-math-ml-intuitive-guide-to-understanding-glove-embeddings-b13b4f19c010)
github_jupyter
# Vector Norm ``` import numpy as np from scipy import signal from scipy.spatial import distance A = np.array([1+1j, 2+2j, 3+3j, 4+4j, 5+5j]) B = np.array([6-6j, 7-7j, 8-8j, 9-9j, 10-10j]) C = np.array([2,3,5,7,11]) Z = np.array([0,0,0,0,0]) D = np.array([A,B]) ``` For every complex inner product space V(-,-), we can define a norm or length which is a function defined as \begin{align} | |: V -> E \end{align} defined as \begin{align} |V| = |\sqrt{V . V}| \end{align} ``` [ np.linalg.norm(A) == np.abs(np.sqrt(np.dot(A,A))), np.linalg.norm(B) == np.abs(np.sqrt(np.dot(B,B))), np.linalg.norm(C) == np.abs(np.sqrt(np.dot(C,C))) ] [ np.linalg.norm(A), np.linalg.norm(B), np.linalg.norm(C), ] ``` # Vector Distance For every complex inner product space V(-,-), we can define a distance function \begin{align} d(,) : V x V -> E \end{align} where \begin{align} d(V1,V2) : |V1 - V2| = \sqrt{V1-V2, V1-V2} \end{align} ``` distance.euclidean(A, B) np.linalg.norm(A-B) == distance.euclidean(A, B) np.round( distance.euclidean(A, B), 10) == \ np.round( np.abs(np.sqrt(np.dot(A,A)-np.dot(B,B))), 10) ``` Distance is symmetric: d(V, W) = d(W, V) ``` distance.euclidean(A, B) == distance.euclidean(B, A) ``` Distance satisfies the triangle inequality: d(U, V) ≤ d(U, W) + d(W, V) ``` distance.euclidean(A, C), distance.euclidean(A, B) + distance.euclidean(B, C) distance.euclidean(A, C) <= distance.euclidean(A, B) + distance.euclidean(B, C) ``` Distance is nondegenerate: d(V, W) > 0 if V ≠ W and d(V, V) = 0. ``` distance.euclidean(Z,Z) distance.euclidean(A,Z), distance.euclidean(A,Z) > 0 ``` ## Orthogonal Vectors The dot product of orthogonal vectors is zero ``` X = np.array([1,0]) Y = np.array([0,1]) np.dot(X,Y) ``` ## Kronecker Delta δj,k is called the Kronecker delta function. δj,k = 1 (if i == j); 0 (if i != j); ``` M = np.matrix([[1,2,3],[4,5,6],[7,8,9]]); X { "shape": M.shape, "size": M.size } def kronecker_delta(matrix): output = np.copy(matrix) for i in range(0, matrix.shape[0]): for j in range(0, matrix.shape[1]): output[i,j] = output[i,j] if i == j else 0 return output kronecker_delta(M) ``` It is equlivant to element wise multiplication by the identity matrx ``` np.multiply(M, np.identity(3)) kronecker_delta(M) == np.multiply(M, np.identity(M.shape[0])) ``` NOTE: np.kron is the Kronecker (tensor) product function, and not the Kronecker DELTA ``` np.kron(M,M) ```
github_jupyter
``` import numpy as np import pandas as pd from pathlib import Path import matplotlib.pyplot as plt %matplotlib inline import warnings warnings.simplefilter(action='ignore', category=FutureWarning) ``` # Return Forecasting: Read Historical Daily Yen Futures Data In this notebook, you will load historical Dollar-Yen exchange rate futures data and apply time series analysis and modeling to determine whether there is any predictable behavior. ``` # Futures contract on the Yen-dollar exchange rate: # This is the continuous chain of the futures contracts that are 1 month to expiration yen_futures = pd.read_csv( Path("yen.csv"), index_col="Date", infer_datetime_format=True, parse_dates=True ) yen_futures.head() # Trim the dataset to begin on January 1st, 1990 yen_futures = yen_futures.loc["1990-01-01":, :] yen_futures.head() ``` # Return Forecasting: Initial Time-Series Plotting Start by plotting the "Settle" price. Do you see any patterns, long-term and/or short? ``` yen_futures_settle= yen_futures['Settle'] #print(type(yen_futures_settle)) #print(yen_futures_settle) yen_futures_settle = yen_futures_settle.to_frame() yen_futures_settle.head() ``` #### make a copy for later ``` yen_futures_settle_only = yen_futures_settle.copy() yen_futures_settle_only.head() # Plot just the "Settle" column from the dataframe: # YOUR CODE HERE! yen_futures_settle.plot(y='Settle', title='Yen Futures Settle Prices', figsize=(20,10)) #ax.legend(['Settle prices']) ``` --- # Decomposition Using a Hodrick-Prescott Filter Using a Hodrick-Prescott Filter, decompose the Settle price into a trend and noise. ``` import statsmodels.api as sm # Apply the Hodrick-Prescott Filter by decomposing the "Settle" price into two separate series: # YOUR CODE HERE! #Hodrick-Prescott filter ts_noise, ts_trend = sm.tsa.filters.hpfilter(yen_futures_settle['Settle']) ``` #### Test the noise , trend datasets ``` print(ts_noise.head()) print(ts_noise[1]) print(ts_trend.head()) # Create a dataframe of just the settle price, and add columns for "noise" and "trend" series from above: # YOUR CODE HERE! yen_futures_settle['noise'] = ts_noise yen_futures_settle['trend'] = ts_trend yen_futures_settle.head() ``` #### Drop noise from data frame ``` yen_futures_settle_trend = yen_futures_settle.drop(columns=['noise']) yen_futures_settle_only.head() yen_futures_settle_only.tail() ``` #### filter 2015 to now ``` yen_futures_settle_trend2015 = yen_futures_settle_trend['2015':] # Plot the Settle Price vs. the Trend for 2015 to the present # YOUR CODE HERE! #yen_futures_settle_trend.plot(title='Yen Futures Settle vs Trend', figsize=(20,10)) yen_futures_settle_trend2015.plot(title='Yen Futures Settle vs Trend', figsize=(20,10)) # Plot the Settle Noise # YOUR CODE HERE! ts_noise.plot(title='Noise', figsize=(20,10)) ``` --- # Forecasting Returns using an ARMA Model Using futures Settle *Returns*, estimate an ARMA model 1. ARMA: Create an ARMA model and fit it to the returns data. Note: Set the AR and MA ("p" and "q") parameters to p=2 and q=1: order=(2, 1). 2. Output the ARMA summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)? 3. Plot the 5-day forecast of the forecasted returns (the results forecast from ARMA model) ``` # Create a series using "Settle" price percentage returns, drop any nan"s, and check the results: # (Make sure to multiply the pct_change() results by 100) # In this case, you may have to replace inf, -inf values with np.nan"s returns = (yen_futures[["Settle"]].pct_change() * 100) returns = returns.replace(-np.inf, np.nan).dropna() returns.tail() import statsmodels.api as sm # Estimate and ARMA model using statsmodels (use order=(2, 1)) # YOUR CODE HERE! from statsmodels.tsa.arima_model import ARMA # For the order parameter, the first 1 indicates the number of AR lags # For the order parameter, the second 1 indicates the number of MA lags model = ARMA(returns.values, order=(2,1)) # Fit the model and assign it to a variable called results # YOUR CODE HERE! results = model.fit() # Output model summary results: # YOUR CODE HERE! results.summary() # Plot the 5 Day Returns Forecast # YOUR CODE HERE! pd.DataFrame(results.forecast(steps=5)[0]).plot(title="5 Day Returns Forecast") pd.DataFrame(results.forecast(steps=5)[0]) ``` --- # Forecasting the Settle Price using an ARIMA Model 1. Using the *raw* Yen **Settle Price**, estimate an ARIMA model. 1. Set P=5, D=1, and Q=1 in the model (e.g., ARIMA(df, order=(5,1,1)) 2. P= # of Auto-Regressive Lags, D= # of Differences (this is usually =1), Q= # of Moving Average Lags 2. Output the ARIMA summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)? 3. Construct a 5 day forecast for the Settle Price. What does the model forecast will happen to the Japanese Yen in the near term? ``` from statsmodels.tsa.arima_model import ARIMA # Estimate and ARIMA Model: # Hint: ARIMA(df, order=(p, d, q)) # YOUR CODE HERE! model2 = ARIMA(yen_futures_settle['Settle'], order=(5, 1, 1)) # Fit the model # YOUR CODE HERE! res2 = model2.fit() # Output model summary results: res2.summary() # Plot the 5 Day Price Forecast # YOUR CODE HERE! pd.DataFrame(res2.forecast(steps=5)[0]).plot(title="5 Day Futures Price Forecast") pd.DataFrame(res2.forecast(steps=5)[0]) from statsmodels.graphics.tsaplots import plot_acf, plot_pacf plot_acf(yen_futures_settle['Settle'], lags=30, zero=False) plot_pacf(yen_futures_settle['Settle'], lags=30, zero=False) ``` --- # Volatility Forecasting with GARCH Rather than predicting returns, let's forecast near-term **volatility** of Japanese Yen futures returns. Being able to accurately predict volatility will be extremely useful if we want to trade in derivatives or quantify our maximum loss. Using futures Settle *Returns*, estimate an GARCH model 1. GARCH: Create an GARCH model and fit it to the returns data. Note: Set the parameters to p=2 and q=1: order=(2, 1). 2. Output the GARCH summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)? 3. Plot the 5-day forecast of the volatility. ``` yen_futures_settle_only.head() #import arch from arch import arch_model # Estimate a GARCH model: # YOUR CODE HERE! model = arch_model(returns, mean="Zero", vol="GARCH", p=2, q=1) # Fit the model # YOUR CODE HERE! res_garch = model.fit(disp="off") # Summarize the model results # YOUR CODE HERE! res_garch.summary() fig = res_garch.plot(annualize='D') # Find the last day of the dataset last_day = returns.index.max().strftime('%Y-%m-%d') last_day # Create a 5 day forecast of volatility forecast_horizon = 5 # Start the forecast using the last_day calculated above # YOUR CODE HERE! forecasts = res_garch.forecast(start=last_day, horizon=forecast_horizon) forecasts # Annualize the forecast intermediate = np.sqrt(forecasts.variance.dropna() * 252) intermediate.head() # Transpose the forecast so that it is easier to plot final = intermediate.dropna().T final.head() # Plot the final forecast # YOUR CODE HERE! final.plot(title = "5 Day Forecast of Volatality") ``` --- # Conclusions Based on your time series analysis, would you buy the yen now? Is the risk of the yen expected to increase or decrease? Based on the model evaluation, would you feel confident in using these models for trading? #### LaTex: $\alpha$2
github_jupyter
``` %load_ext autoreload %autoreload 2 import os import sys sys.path.append("..") import datetime import pathlib from collections import OrderedDict import numpy as np import pandas as pd # Pytorch import torch from torch.optim import lr_scheduler import torch.optim as optim from torch.autograd import Variable # Custom from dutils import Experiment from trainer import fit import visualization as vis from tcga_datasets import SiameseDataset # Models from tcga_networks import EmbeddingNet, SiameseNet from losses import ContrastiveLoss # Metrics from sklearn.cluster import KMeans from sklearn.metrics import adjusted_mutual_info_score as ANMI def getTCGA(disease): path = "/srv/nas/mk2/projects/pan-cancer/TCGA_CCLE_GCP/TCGA/TCGA_{}_counts.tsv.gz" files = [path.format(d) for d in disease] return files def readGCP(files, biotype='protein_coding', mean=True): """ Paths to count matrices. """ data_dict = {} for f in files: key = os.path.basename(f).split("_")[1] data = pd.read_csv(f, sep='\t', index_col=0) # transcript metadata meta = pd.DataFrame([row[:-1] for row in data.index.str.split("|")], columns=['ENST', 'ENSG', 'OTTHUMG', 'OTTHUMT', 'GENE-NUM', 'GENE', 'BP', 'BIOTYPE']) meta = pd.MultiIndex.from_frame(meta) data.index = meta # subset transcripts data = data.xs(key=biotype, level='BIOTYPE') data = data.droplevel(['ENST', 'ENSG', 'OTTHUMG', 'OTTHUMT', 'GENE-NUM', 'BP']) # average gene expression of splice variants data = data.T if mean: data = data.groupby(by=data.columns, axis=1).mean() data_dict[key] = data return data_dict def uq_norm(df, q=0.75): """ Upper quartile normalization of GEX for samples. """ quantiles = df.quantile(q=q, axis=1) norm = df.divide(quantiles, axis=0) return norm def process_TCGA(disease=['BRCA', 'LUAD', 'KIRC', 'THCA', 'PRAD', 'SKCM']): base="/srv/nas/mk2/projects/pan-cancer/TCGA_CCLE_GCP" # get files tcga_files = getTCGA(disease) # read meta/data tcga_meta = pd.read_csv(os.path.join(base, "TCGA/TCGA_GDC_ID_MAP.tsv"), sep="\t") tcga_raw = readGCP(tcga_files, mean=True) # combine samples tcga_raw = pd.concat(tcga_raw.values()) # Upper quartile normalization tcga_raw = uq_norm(tcga_raw) # log norm tcga = tcga_raw.transform(np.log1p) return tcga, tcga_meta def generate_fsets(data, n_features, steps=5): r = np.linspace(start=1, stop=n_features, num=steps, dtype='int') idx = [np.random.choice(data.shape[1], size=i, replace=False) for i in r] return idx def feature_training(train_data, train_labels, test_data, test_labels, feature_idx, embedding, exp_dir, cuda=True): # Meta data meta_data = {"n_features":[], "model":[], "ANMI":[]} # Params batch_size = 8 kwargs = {'num_workers': 10, 'pin_memory': True} if cuda else {'num_workers': 10} # Feature Index for batch, feat in enumerate(feature_idx): print("Number features: {}\n".format(len(feat))) exp_data = {'feature_idx':feat} # Define data siamese_train_dataset = SiameseDataset(data=train_data.iloc[:,feat], labels=train_labels, train=True) siamese_test_dataset = SiameseDataset(data=test_data.iloc[:,feat], labels=test_labels, train=False) # Loaders siamese_train_loader = torch.utils.data.DataLoader(siamese_train_dataset, batch_size=batch_size, shuffle=True, **kwargs) siamese_test_loader = torch.utils.data.DataLoader(siamese_test_dataset, batch_size=batch_size, shuffle=False, **kwargs) # Instantiate model n_samples, n_features = siamese_train_dataset.train_data.shape for i in range(3): nmodel = 'model_{}'.format(i) print("\t{}".format(nmodel)) embedding_net = EmbeddingNet(n_features, embedding) model = SiameseNet(embedding_net) if cuda: model.cuda() # Parameters margin = 1. loss_fn = ContrastiveLoss(margin) lr = 1e-3 optimizer = optim.Adam(model.parameters(), lr=lr) scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1) n_epochs = 10 log_interval = round(len(siamese_train_dataset)/1/batch_size) # Train train_loss, val_loss = fit(siamese_train_loader, siamese_test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval) # Test Embeddings val_embeddings_baseline, val_labels_baseline = vis.extract_embeddings(siamese_test_dataset.test_data, siamese_test_dataset.labels, model) # Evaluation n_clusters = len(np.unique(test_labels)) kmeans = KMeans(n_clusters=n_clusters) siamese_clusters = kmeans.fit_predict(val_embeddings_baseline) anmi = ANMI(siamese_clusters, val_labels_baseline) # Store meta_data['n_features'].append(len(feat)) meta_data['model'].append(nmodel) meta_data['ANMI'].append(anmi) exp_data[nmodel] = {'data': (val_embeddings_baseline, val_labels_baseline), 'loss': (train_loss, val_loss), 'ANMI': anmi} pd.to_pickle(exp_data, os.path.join(exp_dir, "model_{}.pkl".format(len(feat)))) pd.to_pickle(meta_data, os.path.join(exp_dir, "model_meta_data.pkl")) def main(disease, sample_type, **kwargs): # GPUs os.environ["CUDA_VISIBLE_DEVICES"] = kwargs['device'] cuda = torch.cuda.is_available() print("Cuda is available: {}".format(cuda)) # Read / write / process tcga, tcga_meta = process_TCGA(disease) # Feature design feature_idx = generate_fsets(tcga, n_features=kwargs['n_features'], steps=kwargs['steps']) # Experiment design hierarchy = OrderedDict({'Disease':disease, 'Sample Type':sample_type}) # Define experiment exp = Experiment(meta_data=tcga_meta, hierarchy=hierarchy, index='CGHubAnalysisID', cases='Case ID', min_samples=20) # Train / Test split exp.train_test_split(cases='Case ID') # Return data train_data, train_labels = exp.get_data(tcga, subset="train", dtype=np.float32) test_data, test_labels = exp.get_data(tcga, subset="test", dtype=np.float32) # randomize labels np.random.shuffle(train_labels) # Path *fix* dtime = datetime.datetime.today().strftime("%Y.%m.%d_%H:%M") exp_dir = "/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/{}_{}_{}_{}_{}-{}".format(dtime, kwargs['note'], len(exp.labels_dict), kwargs['embedding'], kwargs['n_features'], kwargs['steps']) pathlib.Path(exp_dir).mkdir(parents=True, exist_ok=False) print('Saving to: \n{}'.format(exp_dir)) # Meta data experiments = {'experiment': exp, 'train':(train_data, train_labels), 'test': (test_data, test_labels)} pd.to_pickle(experiments, os.path.join(exp_dir, "experiment_meta_data.pkl")) # Training feature_training(train_data, train_labels, test_data, test_labels, feature_idx, kwargs['embedding'], exp_dir) ``` ### Setup ``` disease = ['BRCA', 'LUAD', 'KIRC', 'THCA', 'PRAD', 'SKCM'] sample_type = ['Primary Tumor', 'Solid Tissue Normal'] params = {"device":"4", "note":"shuffle", "n_features":150, "steps":50, "embedding":2} main(disease=disease, sample_type=sample_type, **params) ```
github_jupyter
# COVID-19 Drug Repurposing via gene-compounds relations This example shows how to do drug repurposing using DRKG even with the pretrained model. ## Collecting COVID-19 related disease At the very beginning we need to collect a list of associated genes for Corona-Virus(COV) in DRKG. ``` import pandas as pd import numpy as np file='coronavirus-related-host-genes.tsv' df = pd.read_csv(file, sep="\t") cov_genes = np.unique(df.values[:,2]).tolist() file='covid19-host-genes.tsv' df = pd.read_csv(file, sep="\t") cov2_genes = np.unique(df.values[:,2]).tolist() # keep unique related genes cov_related_genes=list(set(cov_genes+cov2_genes)) #cov_related_genes=list(set(cov2_genes)) print(len(cov_related_genes)) ``` ## Candidate drugs Now we use FDA-approved drugs in Drugbank as candidate drugs. (we exclude drugs with molecule weight < 250) The drug list is in infer\_drug.tsv ``` import csv # Load entity file drug_list = [] with open("./infer_drug.tsv", newline='', encoding='utf-8') as csvfile: reader = csv.DictReader(csvfile, delimiter='\t', fieldnames=['drug','ids']) for row_val in reader: drug_list.append(row_val['drug']) len(drug_list) ``` ## Inhibits relation One inhibit relation in this context ``` treatment = ['GNBR::N::Compound:Gene']#'DRUGBANK::target::Compound:Gene','DGIDB::INHIBITOR::Gene:Compound'] ``` ## Get pretrained model We can directly use the pretrianed model to do drug repurposing. ``` import pandas as pd import numpy as np import sys import csv sys.path.insert(1, '../utils') from utils import download_and_extract download_and_extract() entity_idmap_file = '../data/drkg/embed/entities.tsv' relation_idmap_file = '../data/drkg/embed/relations.tsv' ``` ## Get embeddings for genes and drugs ``` # Get drugname/disease name to entity ID mappings entity_map = {} entity_id_map = {} relation_map = {} with open(entity_idmap_file, newline='', encoding='utf-8') as csvfile: reader = csv.DictReader(csvfile, delimiter='\t', fieldnames=['name','id']) for row_val in reader: entity_map[row_val['name']] = int(row_val['id']) entity_id_map[int(row_val['id'])] = row_val['name'] with open(relation_idmap_file, newline='', encoding='utf-8') as csvfile: reader = csv.DictReader(csvfile, delimiter='\t', fieldnames=['name','id']) for row_val in reader: relation_map[row_val['name']] = int(row_val['id']) # handle the ID mapping drug_ids = [] gene_ids = [] for drug in drug_list: drug_ids.append(entity_map[drug]) for gene in cov_related_genes: gene_ids.append(entity_map[gene]) treatment_rid = [relation_map[treat] for treat in treatment] # Load embeddings import torch as th entity_emb = np.load('../data/drkg/embed/DRKG_TransE_l2_entity.npy') rel_emb = np.load('../data/drkg/embed/DRKG_TransE_l2_relation.npy') drug_ids = th.tensor(drug_ids).long() gene_ids = th.tensor(gene_ids).long() treatment_rid = th.tensor(treatment_rid) drug_emb = th.tensor(entity_emb[drug_ids]) treatment_embs = [th.tensor(rel_emb[rid]) for rid in treatment_rid] ``` ## Drug Repurposing Based on Edge Score We use following algorithm to calculate the edge score. Note, here we use logsigmiod to make all scores < 0. The larger the score is, the stronger the $h$ will have $r$ with $t$. $\mathbf{d} = \gamma - ||\mathbf{h}+\mathbf{r}-\mathbf{t}||_{2}$ $\mathbf{score} = \log\left(\frac{1}{1+\exp(\mathbf{-d})}\right)$ When doing drug repurposing, we only use the treatment related relations. ``` import torch.nn.functional as fn gamma=12.0 def transE_l2(head, rel, tail): score = head + rel - tail return gamma - th.norm(score, p=2, dim=-1) scores_per_gene = [] dids_per_gene = [] for rid in range(len(treatment_embs)): treatment_emb=treatment_embs[rid] for gene_id in gene_ids: gene_emb = th.tensor(entity_emb[gene_id]) if treatment[rid]=='DGIDB::INHIBITOR::Gene:Compound': score = fn.logsigmoid(transE_l2(gene_emb, treatment_emb, drug_emb)) else: score = fn.logsigmoid(transE_l2(drug_emb, treatment_emb, gene_emb)) scores_per_gene.append(score) dids_per_gene.append(drug_ids) scores = th.cat(scores_per_gene) dids = th.cat(dids_per_gene) ``` ### Check clinical trial drugs per gene Here we load the clinical trial drugs ``` clinical_drugs_file = './COVID19_clinical_trial_drugs.tsv' clinical_drug_map = {} with open(clinical_drugs_file, newline='', encoding='utf-8') as csvfile: reader = csv.DictReader(csvfile, delimiter='\t', fieldnames=['id', 'drug_name','drug_id']) for row_val in reader: clinical_drug_map[row_val['drug_id']] = row_val['drug_name'] ``` Next we measure some statistics per gene. ``` maxhit=0 drugs_in_top_k={} drugsfr_in_top_k={} for i in range(len(scores_per_gene)): score=scores_per_gene[i] did=dids_per_gene[i] idx = th.flip(th.argsort(score), dims=[0]) score = score[idx].numpy() did = did[idx].numpy() #print(did) _, unique_indices = np.unique(did, return_index=True) topk=100 topk_indices = np.sort(unique_indices)[:topk] proposed_did = did[topk_indices] proposed_score = score[topk_indices] found_in_top_k=0 found_drugs="\n" for j in range(topk): drug = entity_id_map[int(proposed_did[j])][10:17] if clinical_drug_map.get(drug, None) is not None: found_in_top_k+=1 score = proposed_score[j] if drug in drugs_in_top_k: drugs_in_top_k[drug]+=1 drugsfr_in_top_k[drug]+=1/(j+1) else: drugs_in_top_k[drug]=1 drugsfr_in_top_k[drug]=1/(j+1) found_drugs+="[{}]{}\n".format(j, clinical_drug_map[drug]) #print("[{}]{}".format(j, clinical_drug_map[drug])) #print("{}\t{}".format(cov_related_genes[i], found_in_top_k)) if maxhit< found_in_top_k: maxhit=found_in_top_k maxgene=cov_related_genes[i] max_dugs=found_drugs print("{}\t{}\t{}".format(maxgene, maxhit,max_dugs)) res=[[drug, clinical_drug_map[drug] ,drugs_in_top_k[drug],drugsfr_in_top_k[drug]] for drug in drugs_in_top_k.keys()] res=reversed(sorted(res, key=lambda x : x[2])) for drug in res: print("{}\t{}\t{}\t{}".format(drug[0], drug[1] ,drug[2],drug[3])) ```
github_jupyter
``` !pip3 install palmerpenguins ``` ## Review Let's do some review with `pandas` and `dataframes` to ease everyone back in from spring break! We'll be looking at our friends the Palmer penguins. ``` from palmerpenguins import load_penguins import pandas as pd penguins = load_penguins() penguins.sample(5) ``` ## Filtering and Selecting from a Dataframe 1. For the following code cells, write code that will correctly filter and select from `penguins` ``` # All penguins with bills longer than 15mm penguins[penguins['bill_length_mm'] > 15] # All female penguins penguins[penguins['sex'] == 'female'] # All Gentoo penguins on Biscoe #penguins[(penguins['island'] == 'Biscoe') & (penguins['species'] == 'Gentoo')] penguins[(penguins['body_mass_g'] >= 4500) & (penguins['body_mass_g'] < 5000)].shape[0] # All penguins on Torgersen with body mass greater than 4000g penguins[(penguins['island'] == 'Torgersen') & (penguins['body_mass_g'] > 4000)] ``` 2. For the following code cells, find the correct values ``` # Descriptive statistics for the bill depth of Adelie penguins on Torgersen penguins.query("(island == 'Torgersen') & (species == 'Adelie')")['bill_depth_mm'].describe() # Number of each species of penguin on each island penguins.groupby('island')['species'].value_counts() # Correlation between flipper length and bill length for all penguins penguins['flipper_length_mm'].corr(penguins['bill_length_mm']) #penguins.corr() # Correlation between flipper length and bill length per species of penguin penguins.groupby('species')[['flipper_length_mm', 'bill_length_mm']].corr() penguins[['species', 'flipper_length_mm']].groupby('species') \ .corrwith(penguins['bill_length_mm']) \ .rename(columns={'flipper_length_mm': 'Correlation'}) ``` # Intro to Probability As we've discussed previously, one way of dealing with bias in statistics is through **randomness**, especially when thinking about sampling from a sample frame. We can analyze the randomness in our datasets and make inferences about the population based on probabilities. ## Vocabulary and Basics Probability is based on the *long-run* behaviour of random events. Let's look at an example that everyone is familiar with: rolling a single 6-sided die. 3. The die is rolled once. What is the set of all possible outcomes of this roll? This set is called the **sample space**. { 1 .. 6} 4. An **event** refers to a particular outcome of a random phenomenon. More specifically, an **event** refers to a *subset* of the sample space. For example, "rolling a 5" or "rolling greater than 3" are considered to be events. Give another example of an event. 5. Find the following probabilities: a. P(rolling a 3) 1/6 b. P(not rolling a 3) 5/6 c. P(rolling a number between 0 and 7) 1 6. Explain how you calculated the answers to (5) using the terminology of events and sample spaces. Number of events / size of sample space 7. If you roll a die 12 times in a row, do you expect to see each number from 1-6 exactly twice? Why or why not? No, there's variation in randomness 8. If you roll a die 120 times, roughly how many times do you expect to see 5 rolled? Roughly 20 9. In those 120 rolls, would you be surprised to see 5 rolled 23 times? What if it was rolled 87 times? 23 times no, 87 times yes b/c that's very far from expected ## Simulation It is usually impossible for us to know the exactly probability of an event. We can think about the **theoretical probability**, as we have been doing in the last few questions, but those are only accurate if we live in an ideal world, which we do not. Thus, we usually have to gather information through **experimental probability**. One way of doing so is via simulation. For example, we can simulate rolling a perfect fair die 120 times to check our answers for questions (7) - (9). 10. Write code that will simulate rolling a 6-sided die 120 times. Keep track of the cumulative proportion of 5s. In other words, keep track of the number of 5s divided by how many rolls have been made so far. What do you estimate the final cumulative proportion to be? ``` from matplotlib import pyplot as plt import numpy as np import random num_fives = 0 num_rolls = 100000 cumulative_proportions = np.zeros((num_rolls,)) for i in range(1,num_rolls+1): roll = random.randint(1,6) if roll == 5: num_fives += 1 cumulative_proportions[i-1] = num_fives/i x_axis = np.arange(1,num_rolls+1) plt.xlabel("Number of rolls") plt.ylabel("Cumulative Proportion of Rolling a 5") plt.plot(x_axis, cumulative_proportions) ``` 11. Modify your code so that the die is rolled 1200 times, then 12000, then 120000. What happens to the final cumulative proportion as you increase the number of rolls? ## The Law of Large Numbers When we think about randomness in the context of *experimental probability*, the **probability** of an event is the number that the cumulative proportion approaches in the long run. ## Probability Practice Answer the following questions. You may add code cells and write code if that will help you answer the question. 12. Your friend flips a two-sided coin repeatedly to test whether the probability of heads is 1/2. The coin is flipped 10 times, and it lands on heads 2 times. Your friend comes to the conclusion that this coin isn't fair, since the probability of heads is 2/10 = 0.2 a. What's wrong with your friend's claim? not enough samples b. If the coin actually is fair, how would you prove it to your friend? flip it more times, law of large numbers 13. A researcher is conducting a study on the proportion of adults in the US who are in favor of mandatory covid vaccinations. The researcher comments that because of the law of large numbers, the sampling method doesn't matter as long as the sample size is large enough. Explain why this claim is incorrect. bias can come from other areas, such as really big sample from just one state 14. A market analyst is trying to model what will happen to a particular company's stock price. Let's say that we know for certain that each new day, the company's stock price has a 1/2 chance of going up, and a 1/2 chance of going down. Write code that models what happens to this company's stock price over the period of 75 days. What patterns do you notice? Would these patterns be the same if you ran the model again and again? it's totally random, but you'll have "trends" of going up or down, but never the same exact trend each time you run it ``` days = 7500 prices = np.zeros((days,)) price = 1000 for i in range(days): if random.randint(0,1) == 0: price += 25 else: price -= 25 prices[i] = price X = np.arange(days) plt.plot(X, prices) ``` 15. Your friend wants to make a bet with you: a fair coin will be flipped. If it lands on heads, you get a cup of coffee, but if it lands on tails, you lose a cup of coffee. The coin is flipped 2000 times. How many cups of coffee do you expect to have at the end? 0
github_jupyter
# Facial Keypoint Detection This project will be all about defining and training a convolutional neural network to perform facial keypoint detection, and using computer vision techniques to transform images of faces. The first step in any challenge like this will be to load and visualize the data you'll be working with. Let's take a look at some examples of images and corresponding facial keypoints. <img src='images/key_pts_example.png' width=50% height=50%/> Facial keypoints (also called facial landmarks) are the small magenta dots shown on each of the faces in the image above. In each training and test image, there is a single face and **68 keypoints, with coordinates (x, y), for that face**. These keypoints mark important areas of the face: the eyes, corners of the mouth, the nose, etc. These keypoints are relevant for a variety of tasks, such as face filters, emotion recognition, pose recognition, and so on. Here they are, numbered, and you can see that specific ranges of points match different portions of the face. <img src='images/landmarks_numbered.jpg' width=30% height=30%/> --- ## Load and Visualize Data The first step in working with any dataset is to become familiar with your data; you'll need to load in the images of faces and their keypoints and visualize them! This set of image data has been extracted from the [YouTube Faces Dataset](https://www.cs.tau.ac.il/~wolf/ytfaces/), which includes videos of people in YouTube videos. These videos have been fed through some processing steps and turned into sets of image frames containing one face and the associated keypoints. #### Training and Testing Data This facial keypoints dataset consists of 5770 color images. All of these images are separated into either a training or a test set of data. * 3462 of these images are training images, for you to use as you create a model to predict keypoints. * 2308 are test images, which will be used to test the accuracy of your model. The information about the images and keypoints in this dataset are summarized in CSV files, which we can read in using `pandas`. Let's read the training CSV and get the annotations in an (N, 2) array where N is the number of keypoints and 2 is the dimension of the keypoint coordinates (x, y). --- First, before we do anything, we have to load in our image data. This data is stored in a zip file and in the below cell, we access it by it's URL and unzip the data in a `/data/` directory that is separate from the workspace home directory. ``` # -- DO NOT CHANGE THIS CELL -- # !mkdir /data !wget -P /data/ https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip !unzip -n /data/train-test-data.zip -d /data # import the required libraries import glob import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 ``` Then, let's load in our training data and display some stats about that dat ato make sure it's been loaded in correctly! ``` key_pts_frame = pd.read_csv('/data/training_frames_keypoints.csv') n = 0 image_name = key_pts_frame.iloc[n, 0] key_pts = key_pts_frame.iloc[n, 1:].as_matrix() key_pts = key_pts.astype('float').reshape(-1, 2) print('Image name: ', image_name) print('Landmarks shape: ', key_pts.shape) print('First 4 key pts: {}'.format(key_pts[:4])) # print out some stats about the data print('Number of images: ', key_pts_frame.shape, key_pts_frame.shape[0]) ``` ## Look at some images Below, is a function `show_keypoints` that takes in an image and keypoints and displays them. As you look at this data, **note that these images are not all of the same size**, and neither are the faces! To eventually train a neural network on these images, we'll need to standardize their shape. ``` def show_keypoints(image, key_pts): """Show image with keypoints""" plt.imshow(image) plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m') # Display a few different types of images by changing the index n # select an image by index in our data frame n = 17 image_name = key_pts_frame.iloc[n, 0] print(key_pts_frame.iloc[n,0]) key_pts = key_pts_frame.iloc[n, 1:].as_matrix() key_pts = key_pts.astype('float').reshape(-1, 2) plt.figure(figsize=(5, 5)) show_keypoints(mpimg.imread(os.path.join('/data/training/', image_name)), key_pts) plt.show() ``` ## Dataset class and Transformations To prepare our data for training, we'll be using PyTorch's Dataset class. Much of this this code is a modified version of what can be found in the [PyTorch data loading tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). #### Dataset class ``torch.utils.data.Dataset`` is an abstract class representing a dataset. This class will allow us to load batches of image/keypoint data, and uniformly apply transformations to our data, such as rescaling and normalizing images for training a neural network. Your custom dataset should inherit ``Dataset`` and override the following methods: - ``__len__`` so that ``len(dataset)`` returns the size of the dataset. - ``__getitem__`` to support the indexing such that ``dataset[i]`` can be used to get the i-th sample of image/keypoint data. Let's create a dataset class for our face keypoints dataset. We will read the CSV file in ``__init__`` but leave the reading of images to ``__getitem__``. This is memory efficient because all the images are not stored in the memory at once but read as required. A sample of our dataset will be a dictionary ``{'image': image, 'keypoints': key_pts}``. Our dataset will take an optional argument ``transform`` so that any required processing can be applied on the sample. We will see the usefulness of ``transform`` in the next section. ``` from torch.utils.data import Dataset, DataLoader class FacialKeypointsDataset(Dataset): """Face Landmarks dataset.""" def __init__(self, csv_file, root_dir, transform=None): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.key_pts_frame = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.key_pts_frame) def __getitem__(self, idx): image_name = os.path.join(self.root_dir, self.key_pts_frame.iloc[idx, 0]) image = mpimg.imread(image_name) # if image has an alpha color channel, get rid of it if(image.shape[2] == 4): image = image[:,:,0:3] key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix() key_pts = key_pts.astype('float').reshape(-1, 2) sample = {'image': image, 'keypoints': key_pts} if self.transform: sample = self.transform(sample) return sample ``` Now that we've defined this class, let's instantiate the dataset and display some images. ``` # Construct the dataset face_dataset = FacialKeypointsDataset(csv_file='/data/training_frames_keypoints.csv', root_dir='/data/training/') # print some stats about the dataset print('Length of dataset: ', len(face_dataset)) # Display a few of the images from the dataset num_to_display = 3 for i in range(num_to_display): # define the size of images fig = plt.figure(figsize=(20,10)) # randomly select a sample rand_i = np.random.randint(0, len(face_dataset)) sample = face_dataset[rand_i] # print the shape of the image and keypoints print(i, sample['image'].shape, sample['keypoints'].shape) ax = plt.subplot(1, num_to_display, i + 1) ax.set_title('Sample #{}'.format(i)) # Using the same display function, defined earlier show_keypoints(sample['image'], sample['keypoints']) ``` ## Transforms Now, the images above are not of the same size, and neural networks often expect images that are standardized; a fixed size, with a normalized range for color ranges and coordinates, and (for PyTorch) converted from numpy lists and arrays to Tensors. Therefore, we will need to write some pre-processing code. Let's create four transforms: - ``Normalize``: to convert a color image to grayscale values with a range of [0,1] and normalize the keypoints to be in a range of about [-1, 1] - ``Rescale``: to rescale an image to a desired size. - ``RandomCrop``: to crop an image randomly. - ``ToTensor``: to convert numpy images to torch images. We will write them as callable classes instead of simple functions so that parameters of the transform need not be passed everytime it's called. For this, we just need to implement ``__call__`` method and (if we require parameters to be passed in), the ``__init__`` method. We can then use a transform like this: tx = Transform(params) transformed_sample = tx(sample) Observe below how these transforms are generally applied to both the image and its keypoints. ``` import torch from torchvision import transforms, utils # tranforms class Normalize(object): """Convert a color image to grayscale and normalize the color range to [0,1].""" def __call__(self, sample): image, key_pts = sample['image'], sample['keypoints'] image_copy = np.copy(image) key_pts_copy = np.copy(key_pts) # convert image to grayscale image_copy = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # scale color range from [0, 255] to [0, 1] image_copy= image_copy/255.0 # scale keypoints to be centered around 0 with a range of [-1, 1] # mean = 100, sqrt = 50, so, pts should be (pts - 100)/50 key_pts_copy = (key_pts_copy - 100)/50.0 return {'image': image_copy, 'keypoints': key_pts_copy} class Rescale(object): """Rescale the image in a sample to a given size. Args: output_size (tuple or int): Desired output size. If tuple, output is matched to output_size. If int, smaller of image edges is matched to output_size keeping aspect ratio the same. """ def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) self.output_size = output_size def __call__(self, sample): image, key_pts = sample['image'], sample['keypoints'] h, w = image.shape[:2] if isinstance(self.output_size, int): if h > w: new_h, new_w = self.output_size * h / w, self.output_size else: new_h, new_w = self.output_size, self.output_size * w / h else: new_h, new_w = self.output_size new_h, new_w = int(new_h), int(new_w) img = cv2.resize(image, (new_w, new_h)) # scale the pts, too key_pts = key_pts * [new_w / w, new_h / h] return {'image': img, 'keypoints': key_pts} class RandomCrop(object): """Crop randomly the image in a sample. Args: output_size (tuple or int): Desired output size. If int, square crop is made. """ def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) if isinstance(output_size, int): self.output_size = (output_size, output_size) else: assert len(output_size) == 2 self.output_size = output_size def __call__(self, sample): image, key_pts = sample['image'], sample['keypoints'] h, w = image.shape[:2] new_h, new_w = self.output_size top = np.random.randint(0, h - new_h) left = np.random.randint(0, w - new_w) image = image[top: top + new_h, left: left + new_w] key_pts = key_pts - [left, top] return {'image': image, 'keypoints': key_pts} class ToTensor(object): """Convert ndarrays in sample to Tensors.""" def __call__(self, sample): image, key_pts = sample['image'], sample['keypoints'] # if image has no grayscale color channel, add one if(len(image.shape) == 2): # add that third color dim image = image.reshape(image.shape[0], image.shape[1], 1) # swap color axis because # numpy image: H x W x C # torch image: C X H X W image = image.transpose((2, 0, 1)) return {'image': torch.from_numpy(image), 'keypoints': torch.from_numpy(key_pts)} ``` ## Test out the transforms Let's test these transforms out to make sure they behave as expected. As you look at each transform, note that, in this case, **order does matter**. For example, you cannot crop a image using a value smaller than the original image (and the orginal images vary in size!), but, if you first rescale the original image, you can then crop it to any size smaller than the rescaled size. ``` # test out some of these transforms rescale = Rescale(100) crop = RandomCrop(50) composed = transforms.Compose([Rescale(250), RandomCrop(224)]) # apply the transforms to a sample image test_num = 500 sample = face_dataset[test_num] fig = plt.figure() for i, tx in enumerate([rescale, crop, composed]): transformed_sample = tx(sample) ax = plt.subplot(1, 3, i + 1) plt.tight_layout() ax.set_title(type(tx).__name__) show_keypoints(transformed_sample['image'], transformed_sample['keypoints']) plt.show() ``` ## Create the transformed dataset Apply the transforms in order to get grayscale images of the same shape. Verify that your transform works by printing out the shape of the resulting data (printing out a few examples should show you a consistent tensor size). ``` # define the data tranform # order matters! i.e. rescaling should come before a smaller crop data_transform = transforms.Compose([Rescale(250), RandomCrop(224), Normalize(), ToTensor()]) # create the transformed dataset transformed_dataset = FacialKeypointsDataset(csv_file='/data/training_frames_keypoints.csv', root_dir='/data/training/', transform=data_transform) # print some stats about the transformed data print('Number of images: ', len(transformed_dataset)) # make sure the sample tensors are the expected size for i in range(5): sample = transformed_dataset[i] print(i, sample['image'].size(), sample['keypoints'].size()) ``` ## Data Iteration and Batching Right now, we are iterating over this data using a ``for`` loop, but we are missing out on a lot of PyTorch's dataset capabilities, specifically the abilities to: - Batch the data - Shuffle the data - Load the data in parallel using ``multiprocessing`` workers. ``torch.utils.data.DataLoader`` is an iterator which provides all these features, and we'll see this in use in the *next* notebook, Notebook 2, when we load data in batches to train a neural network! --- ## Ready to Train! Now that you've seen how to load and transform our data, you're ready to build a neural network to train on this data. In the next notebook, you'll be tasked with creating a CNN for facial keypoint detection.
github_jupyter
# k-Nearest Neighbor (kNN) exercise *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.* The kNN classifier consists of two stages: - During training, the classifier takes the training data and simply remembers it - During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples - The value of k is cross-validated In this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code. ``` # Run some setup code for this notebook. import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt # This is a bit of magic to make matplotlib figures appear inline in the notebook # rather than in a new window. %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 # Load the raw CIFAR-10 data. cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' # Cleaning up variables to prevent loading data multiple times (which may cause memory issue) try: del X_train, y_train del X_test, y_test print('Clear previously loaded data.') except: pass X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # As a sanity check, we print out the size of the training and test data. print('Training data shape: ', X_train.shape) print('Training labels shape: ', y_train.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) # Visualize some examples from the dataset. # We show a few examples of training images from each class. classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] num_classes = len(classes) samples_per_class = 7 for y, cls in enumerate(classes): idxs = np.flatnonzero(y_train == y) idxs = np.random.choice(idxs, samples_per_class, replace=False) for i, idx in enumerate(idxs): plt_idx = i * num_classes + y + 1 plt.subplot(samples_per_class, num_classes, plt_idx) plt.imshow(X_train[idx].astype('uint8')) plt.axis('off') if i == 0: plt.title(cls) plt.show() # Subsample the data for more efficient code execution in this exercise num_training = 5000 mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] num_test = 500 mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] # Reshape the image data into rows X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) print(X_train.shape, X_test.shape) from cs231n.classifiers import KNearestNeighbor # Create a kNN classifier instance. # Remember that training a kNN classifier is a noop: # the Classifier simply remembers the data and does no further processing classifier = KNearestNeighbor() classifier.train(X_train, y_train) ``` We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps: 1. First we must compute the distances between all test examples and all train examples. 2. Given these distances, for each test example we find the k nearest examples and have them vote for the label Lets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example. **Note: For the three distance computations that we require you to implement in this notebook, you may not use the np.linalg.norm() function that numpy provides.** First, open `cs231n/classifiers/k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time. ``` # Open cs231n/classifiers/k_nearest_neighbor.py and implement # compute_distances_two_loops. # Test your implementation: dists = classifier.compute_distances_two_loops(X_test) print(dists.shape) # We can visualize the distance matrix: each row is a single test example and # its distances to training examples plt.imshow(dists, interpolation='none') plt.show() ``` **Inline Question 1** Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.) - What in the data is the cause behind the distinctly bright rows? - What causes the columns? $\color{blue}{\textit Your Answer:}$ *fill this in.* ``` # Now implement the function predict_labels and run the code below: # We use k = 1 (which is Nearest Neighbor). y_test_pred = classifier.predict_labels(dists, k=1) # Compute and print the fraction of correctly predicted examples num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) ``` You should expect to see approximately `27%` accuracy. Now lets try out a larger `k`, say `k = 5`: ``` y_test_pred = classifier.predict_labels(dists, k=5) num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) ``` You should expect to see a slightly better performance than with `k = 1`. **Inline Question 2** We can also use other distance metrics such as L1 distance. For pixel values $p_{ij}^{(k)}$ at location $(i,j)$ of some image $I_k$, the mean $\mu$ across all pixels over all images is $$\mu=\frac{1}{nhw}\sum_{k=1}^n\sum_{i=1}^{h}\sum_{j=1}^{w}p_{ij}^{(k)}$$ And the pixel-wise mean $\mu_{ij}$ across all images is $$\mu_{ij}=\frac{1}{n}\sum_{k=1}^np_{ij}^{(k)}.$$ The general standard deviation $\sigma$ and pixel-wise standard deviation $\sigma_{ij}$ is defined similarly. Which of the following preprocessing steps will not change the performance of a Nearest Neighbor classifier that uses L1 distance? Select all that apply. 1. Subtracting the mean $\mu$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu$.) 2. Subtracting the per pixel mean $\mu_{ij}$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu_{ij}$.) 3. Subtracting the mean $\mu$ and dividing by the standard deviation $\sigma$. 4. Subtracting the pixel-wise mean $\mu_{ij}$ and dividing by the pixel-wise standard deviation $\sigma_{ij}$. 5. Rotating the coordinate axes of the data. $\color{blue}{\textit Your Answer:}$ $\color{blue}{\textit Your Explanation:}$ ``` # Now lets speed up distance matrix computation by using partial vectorization # with one loop. Implement the function compute_distances_one_loop and run the # code below: dists_one = classifier.compute_distances_one_loop(X_test) # To ensure that our vectorized implementation is correct, we make sure that it # agrees with the naive implementation. There are many ways to decide whether # two matrices are similar; one of the simplest is the Frobenius norm. In case # you haven't seen it before, the Frobenius norm of two matrices is the square # root of the squared sum of differences of all elements; in other words, reshape # the matrices into vectors and compute the Euclidean distance between them. difference = np.linalg.norm(dists - dists_one, ord='fro') print('One loop difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') # Now implement the fully vectorized version inside compute_distances_no_loops # and run the code dists_two = classifier.compute_distances_no_loops(X_test) # check that the distance matrix agrees with the one we computed before: difference = np.linalg.norm(dists - dists_two, ord='fro') print('No loop difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') # Let's compare how fast the implementations are def time_function(f, *args): """ Call a function f with args and return the time (in seconds) that it took to execute. """ import time tic = time.time() f(*args) toc = time.time() return toc - tic two_loop_time = time_function(classifier.compute_distances_two_loops, X_test) print('Two loop version took %f seconds' % two_loop_time) one_loop_time = time_function(classifier.compute_distances_one_loop, X_test) print('One loop version took %f seconds' % one_loop_time) no_loop_time = time_function(classifier.compute_distances_no_loops, X_test) print('No loop version took %f seconds' % no_loop_time) # You should see significantly faster performance with the fully vectorized implementation! # NOTE: depending on what machine you're using, # you might not see a speedup when you go from two loops to one loop, # and might even see a slow-down. ``` ### Cross-validation We have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation. ``` num_folds = 5 k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100] X_train_folds = [] y_train_folds = [] ################################################################################ # TODO: # # Split up the training data into folds. After splitting, X_train_folds and # # y_train_folds should each be lists of length num_folds, where # # y_train_folds[i] is the label vector for the points in X_train_folds[i]. # # Hint: Look up the numpy array_split function. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # A dictionary holding the accuracies for different values of k that we find # when running cross-validation. After running cross-validation, # k_to_accuracies[k] should be a list of length num_folds giving the different # accuracy values that we found when using that value of k. k_to_accuracies = {} ################################################################################ # TODO: # # Perform k-fold cross validation to find the best value of k. For each # # possible value of k, run the k-nearest-neighbor algorithm num_folds times, # # where in each case you use all but one of the folds as training data and the # # last fold as a validation set. Store the accuracies for all fold and all # # values of k in the k_to_accuracies dictionary. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Print out the computed accuracies for k in sorted(k_to_accuracies): for accuracy in k_to_accuracies[k]: print('k = %d, accuracy = %f' % (k, accuracy)) # plot the raw observations for k in k_choices: accuracies = k_to_accuracies[k] plt.scatter([k] * len(accuracies), accuracies) # plot the trend line with error bars that correspond to standard deviation accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())]) accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())]) plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std) plt.title('Cross-validation on k') plt.xlabel('k') plt.ylabel('Cross-validation accuracy') plt.show() # Based on the cross-validation results above, choose the best value for k, # retrain the classifier using all the training data, and test it on the test # data. You should be able to get above 28% accuracy on the test data. best_k = 1 classifier = KNearestNeighbor() classifier.train(X_train, y_train) y_test_pred = classifier.predict(X_test, k=best_k) # Compute and display the accuracy num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) ``` **Inline Question 3** Which of the following statements about $k$-Nearest Neighbor ($k$-NN) are true in a classification setting, and for all $k$? Select all that apply. 1. The decision boundary of the k-NN classifier is linear. 2. The training error of a 1-NN will always be lower than or equal to that of 5-NN. 3. The test error of a 1-NN will always be lower than that of a 5-NN. 4. The time needed to classify a test example with the k-NN classifier grows with the size of the training set. 5. None of the above. $\color{blue}{\textit Your Answer:}$ $\color{blue}{\textit Your Explanation:}$
github_jupyter
<a href="https://colab.research.google.com/github/VanessaMMH/EDA-B/blob/main/EDA_kdtree.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #Implementacion de KD-Tree ``` import math import numpy as np class kdTree_node: def __init__(self, x, y, split_along_x=True): self.x = x self.y = y self.xmax = math.inf self.ymax = math.inf self.xmin = -math.inf self.ymin = -math.inf self.split_along_x = split_along_x self.left = None self.right = None def __str__(self): return "(x="+str(self.x)+",y="+str(self.y)+")" class kdTree: def __init__(self, xs, ys): i_x_sort = np.argsort(xs) i_y_sort = np.argsort(ys) self.root = self.__buildTree(xs, ys, i_x_sort, i_y_sort, True) def print(self): self.__printSubtree(self.root) def __printSubtree(self, node): if node.left!=None: self.__printSubtree(node.left) print(node) if node.right!=None: self.__printSubtree(node.right) def __select(self, isorted, isecond): iy = np.array([]).astype(int) for i in isecond: r = (isorted==i) if r.any()==True: iy=np.append(iy,i) return iy def __buildTree(self, xs, ys, ix, iy, splitx=None, father=None): l = ix.shape[0] med = l//2 # Split along the xaxis if splitx: n = kdTree_node(xs[ix[med]], ys[ix[med]], True) if father != None: n.xmin = father.xmin n.xmax = father.xmax n.ymin = father.ymin n.ymax = father.ymax if n.y <= father.y: n.ymax = father.y else: n.ymin = father.y if med > 0: sub_iy = self.__select(ix[:med],iy) n.left = self.__buildTree(xs, ys, ix[:med], sub_iy, False, n) if med+1<l: sub_iy = self.__select(ix[med+1:], iy) n.right = self.__buildTree(xs, ys, ix[med+1:], sub_iy, False, n) # This node corresponds to a split of the data along y else: n = kdTree_node(xs[iy[med]], ys[iy[med]], False) if father != None: n.xmin = father.xmin n.xmax = father.xmax n.ymin = father.ymin n.ymax = father.ymax if n.x < father.x: n.xmax = father.x else: n.xmin = father.x if med > 0: sub_ix = self.__select(iy[:med],ix) n.left = self.__buildTree(xs, ys, sub_ix, iy[:med], True, n) if med+1<l: sub_ix = self.__select(iy[med+1:], ix) n.right = self.__buildTree(xs, ys, sub_ix, iy[med+1:], True, n) return n def is_fully_contained(self, node, r): if node: if r['xmin'] <= node.xmin and r['xmax'] >= node.xmax and r['ymin'] <= node.ymin and r['ymax'] >= node.ymax: return True return False def is_intersect(self, node, r): if node: if r['ymin'] > node.ymax or r['ymax'] < node.ymin or r['xmin'] > node.xmax or r['xmax'] < node.xmin: return False return True def range_search(self, node, r): results = [] if node == None: return results if node.left == None and node.right == None: if r['xmin'] <= node.x and r['xmax'] >= node.x and r['ymin'] <= node.y and r['ymax'] >= node.y: results.append(node) else: if r['xmin'] <= node.x and r['xmax'] >= node.x and r['ymin'] <= node.y and r['ymax'] >= node.y: results.append(node) if self.is_fully_contained(node.left, r): results += self.traverse(node.left) elif self.is_intersect(node.left, r): results += self.range_search(node.left, r) if self.is_fully_contained(node.right, r): results += self.traverse(node.right) elif self.is_intersect(node.right, r): results += self.range_search(node.right, r) return results def traverse(self, node): members = [] if node: members += self.traverse(node.left) members.append(node) members += self.traverse(node.right) return members from google.colab import drive drive.mount('/content/drive') ``` #Importando el archivo covid.xlsx ``` import pandas as pd import numpy as np file_loc = "/content/drive/MyDrive/EDA/covid_kaggle.xlsx" df = pd.read_excel(file_loc, index_col=None, na_values=['NA'], usecols="B,C,G,I,J,M,N,O,Q,S,AP") df.head(10) df = df.rename( columns = {'Patient age quantile':'Patient', 'SARS-Cov-2 exam result':'SARS-Cov-2', 'Proteina C reativa mg/dL':'Proteina_C'}) # Modificar el nombre de un índice individualmente df.head(10) ``` #Modificando columna SARS-cov ``` df['SARS-Cov-2'] = df['SARS-Cov-2'].replace('negative', 0) df['SARS-Cov-2'] = df['SARS-Cov-2'].replace('positive', 1) df def correlation_matrix(df): from matplotlib import pyplot as plt from matplotlib import cm as cm fig = plt.figure(figsize=(16,12)) ax1 = fig.add_subplot(111) cmap = cm.get_cmap('jet', 30) cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap) ax1.grid(True) plt.title('Covid data set features correlation\n',fontsize=15) labels=df.columns ax1.set_xticklabels(labels,fontsize=9) ax1.set_yticklabels(labels,fontsize=9) # Add colorbar, make sure to specify tick locations to match desired ticklabels fig.colorbar(cax, ticks=[0.1*i for i in range(-11,11)]) plt.show() correlation_matrix(df) df.iloc[:,:].describe() df2 = df.dropna() from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler ss=StandardScaler() X_cols=["Patient","SARS-Cov-2","Hematocrit","Platelets","PlateletVolume ","MCHC","Leukocytes","Basophils","Eosinophils","Monocytes","Proteina_C"] df2[X_cols]=ss.fit_transform(df2[X_cols]) df2.head(10) pca2=PCA(n_components=2,random_state=42) pca_2=pca2.fit_transform(df2[X_cols]) pca_2[:5] #df_pca=pd.DataFrame({'PCA1':pca_2[:,0],'PCA2':pca_2[:1],'Class':df2['Class']}) df_pca= pd.DataFrame (data = pca_2 , columns = ['PCA1', 'PCA2']) df_pca.head(5) ``` #Representación del árbol kd ``` import pandas as pd import geopandas as gpd import numpy as np import math import matplotlib.pyplot as plt def drawSubtree(node,s, ax): if node.left!=None: drawSubtree(node.left,s+1, ax) # Draw the current node as a line segment width = 8-s if node.split_along_x==True: if (width>0): ax.plot([node.x,node.x],[max(-5,node.ymin),min(node.ymax,5)],linewidth=width) else: if (width>0): ax.plot([max(-5,node.xmin),min(node.xmax,5)],[node.y,node.y],linewidth=width) if node.right!=None: drawSubtree(node.right,s+1, ax) def draw(kdTree, ax): drawSubtree(kdTree.root,0, ax) if __name__=='__main__': kdt = kdTree(pca_2[:,0],pca_2[:,1]) # Plot the station positions and the number of slots fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16,8)) axes[0].scatter(pca_2[:,0],pca_2[:,1]) plt.gca().set_aspect('equal', adjustable='box') draw(kdt, axes[1]) plt.show() ``` #Buscar un rango en una región en particular ``` if __name__=='__main__': kdt = kdTree(pca_2[:,0],pca_2[:,1]) xmin = int(input("Enter min x: ")) xmax = int(input("Enter max x: ")) ymin = int(input("Enter min y: ")) ymax = int(input("Enter max y: ")) r = {'xmin':xmin ,'xmax':xmax, 'ymin':ymin, 'ymax': ymax} node = kdTree_node(xmax, ymin) results = kdt.range_search(kdt.root, r) x = [node.x for node in results] y = [node.y for node in results] print(pd.DataFrame({'x':x, 'y':y})) ```
github_jupyter
![BTS](https://github.com/vfp1/bts-mbds-data-science-foundations-2019/raw/master/sessions/img/Logo-BTS.jpg) # Session 06: Market Basket Analysis and Recommender Systems ### Victor F. Pajuelo Madrigal <victor.pajuelo@bts.tech> - Advanced Data Analysis (27-04-2020) Open this notebook in Google Colaboratory: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/vfp1/bts-advanced-data-analysis-2020/blob/master/S06_MBA/06_Advanced_Data_Analysis_MBA_TF_NOTsolved.ipynb) **Resources (code patched and updated from):** * O'Reilly library * fast.ai # Market Basket Analysis ## Simulation of Association Rule Metrics UUID - #S6C1 ``` from os.path import join import pandas as pd import numpy as np import seaborn as sns current_palette = sns.color_palette() %matplotlib inline import matplotlib.pyplot as plt from matplotlib_venn import venn2 from matplotlib import rcParams from matplotlib.ticker import FuncFormatter from scipy.stats import fisher_exact from ipywidgets import interact, IntSlider, FloatSlider total_widget = IntSlider(min=10, max=2000, step=10, value=500) antecedent_widget = IntSlider(min=5, max=2000, step=5, value=100) consequent_widget = IntSlider(min=5, max=2000, step=5, value=100) joint_widget = FloatSlider(min=.01, max=1.0, step=.01, value=.25) def plot_metrics(antecedent, consequent, joint_percent, total): """Interactive Venn Diagram of joint transactions and plot of support, confidence, and lift Slider Inputs: - total: total transactions for all itemsets - antecedent, consequent: all transactions involving either itemset - joint_percent: percentage of (smaller of) antecedent/consequent involving both itemsets Venn Diagram Calculations: - joint = joint_percent * min(antecedent, consequent) - antecedent, consequent: original values - joint transactions Metric Calculations: - Support Antecedent: antecedent/total - Support Consequent: Consequent/total - Support Joint Transactions: joint/total - Rule Confidence: Support Joint Transactions / total - Rule Lift: Support Joint Transactions / (Support Antecedent * Support Consequent) """ fig = plt.figure(figsize=(15, 8)) ax1 = plt.subplot2grid((2, 2), (0, 0)) ax2 = plt.subplot2grid((2, 2), (0, 1)) ax3 = plt.subplot2grid((2, 2), (1, 0)) ax4 = plt.subplot2grid((2, 2), (1, 1)) joint = int(joint_percent * min(antecedent, consequent)) contingency_table = [[joint, consequent - joint], [antecedent - joint, max(total - antecedent - consequent + joint, 0)]] contingency_df = pd.DataFrame(contingency_table, columns=['Consequent', 'Not Consequent'], index=['Antecedent', 'Not Antecedent']).astype(int) h = sns.heatmap(contingency_df, ax=ax1, annot=True, cmap='Blues', square=True, vmin=0, vmax=total, fmt='.0f') h.set_yticklabels(h.get_yticklabels(), rotation = 0) ax1.set_title('Contingency Table') v = venn2(subsets=(antecedent - joint, consequent - joint, joint), set_labels=['Antecedent', 'Consequent'], set_colors=current_palette[:2], ax=ax2) ax2.set_title("{} Transactions".format(total)) support_antecedent = antecedent / total support_consequent = consequent / total support = pd.Series({'Antecedent': support_antecedent, 'Consequent': support_consequent}) support.plot(kind='bar', ax=ax3, color=current_palette[:2], title='Support', ylim=(0, 1), rot=0) ax3.yaxis.set_major_formatter( FuncFormatter(lambda y, _: '{:.0%}'.format(y))) support_joint = joint / total confidence = support_joint / support_antecedent lift = support_joint / (support_antecedent * support_consequent) _, pvalue = fisher_exact(contingency_table, alternative='greater') metrics = pd.Series( {'Confidence': confidence, 'Lift': lift, 'p-Value': pvalue}) metrics.plot(kind='bar', ax=ax4, color=current_palette[2:5], rot=0, ylim=(0, 2)) ax3.yaxis.set_major_formatter( FuncFormatter(lambda y, _: '{:.0%}'.format(y))) for ax, series in {ax3: support, ax4: metrics}.items(): rects = ax.patches labels = ['{:.0%}'.format(x) for x in series.tolist()] for rect, label in zip(rects, labels): height = min(rect.get_height() + .01, 2.05) ax.text(rect.get_x() + rect.get_width() / 2, height, label, ha='center', va='bottom') plt.suptitle('Assocation Rule Analysis {Antecedent => Consequent}') plt.tight_layout() plt.subplots_adjust(top=0.9) plt.show() interact(plot_metrics, antecedent=antecedent_widget, consequent=consequent_widget, joint_percent=joint_widget, total=total_widget); ``` ## The apriori algorithm UUID - #S6C2 ### Apyori library Apyori is an easy and simple implementation of the apriori algorithm. It **does not incorporate the p-value thresholding**. Check the source code [here](https://github.com/ymoch/apyori/blob/master/apyori.py). ``` !pip install apyori import numpy as np import matplotlib.pyplot as plt import pandas as pd from apyori import apriori ``` #### Preprocessing the data from groceries ``` grocery_dataset = 'https://raw.githubusercontent.com/vfp1/bts-advanced-data-analysis-2020/master/S06_MBA/grocery_transactions.csv' baskets = pd.read_csv(grocery_dataset, header=None) baskets.iloc[:10, :10] # Let's replace empty values with 0 baskets.fillna(0,inplace=True) baskets.iloc[:10, :10] ``` Our `DataFrame` shape is important, since we need to know how many rows do we have ``` baskets.shape baskets.info() ``` #### Turning DataFrame into Apyori friendly format Ayori expects that our dataset is in the form of a list of lists. The whole dataset should be a big list and each transaction in the dataset is an inner list within it. So we need to convert our data into a list of lists. ``` records = [] for i in range(0, baskets.shape[0]): records.append([str(baskets.values[i,j]) for j in range(0, 20) if str(baskets.values[i,j])!='0']) records ``` #### Let's run Apyori Let's check the parameters involved in the apriori method. This is a copy of the GitHub repo containing the method. ``` # Do not run this! This comes from Apyori library. Just here for showing parameters and how do they work def apriori(transactions, **kwargs): """ Executes Apriori algorithm and returns a RelationRecord generator. Arguments: transactions -- A transaction iterable object (eg. [['A', 'B'], ['B', 'C']]). Keyword arguments: min_support -- The minimum support of relations (float). min_confidence -- The minimum confidence of relations (float). min_lift -- The minimum lift of relations (float). max_length -- The maximum length of the relation (integer). """ # Parse the arguments. min_support = kwargs.get('min_support', 0.1) min_confidence = kwargs.get('min_confidence', 0.0) min_lift = kwargs.get('min_lift', 0.0) max_length = kwargs.get('max_length', None) # Check arguments. if min_support <= 0: raise ValueError('minimum support must be > 0') # For testing. _gen_support_records = kwargs.get( '_gen_support_records', gen_support_records) _gen_ordered_statistics = kwargs.get( '_gen_ordered_statistics', gen_ordered_statistics) _filter_ordered_statistics = kwargs.get( '_filter_ordered_statistics', filter_ordered_statistics) # Calculate supports. transaction_manager = TransactionManager.create(transactions) support_records = _gen_support_records( transaction_manager, min_support, max_length=max_length) # Calculate ordered stats. for support_record in support_records: ordered_statistics = list( _filter_ordered_statistics( _gen_ordered_statistics(transaction_manager, support_record), min_confidence=min_confidence, min_lift=min_lift, ) ) if not ordered_statistics: continue yield RelationRecord( support_record.items, support_record.support, ordered_statistics) ``` #### Association Rules ``` association_rules = apriori(records, min_support=0.01, min_confidence=0.2, min_lift=1, max_length=3) association_results = list(association_rules) print(len(association_results)) print(association_results[0]) ``` ### MLXTEND Library [Mlxtend](http://rasbt.github.io/mlxtend/) (machine learning extensions) is a Python library of useful tools for the day-to-day data science tasks. Check it out, it has some cool plotting and helper functions ``` import pandas as pd from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import apriori from mlxtend.frequent_patterns import association_rules records_mlx = records ``` #### Preprocessing for MLXTEND Mlxtend needs a DataFrame in certain manner. Our *records* input works fine, if we treat it properly. ``` te = TransactionEncoder() te_ary = te.fit(records_mlx).transform(records_mlx) df = pd.DataFrame(te_ary, columns=te.columns_) df ``` #### Running apriori on Mlxtend ``` frequent_itemsets = apriori(df, min_support=0.01, use_colnames=True) frequent_itemsets ``` #### Running Association Rules with Mlxtend The metrics you will get with this is **confidence** and **lift**. The `min_threshold` variable relates to the percentage above the level of **confidence**. **Conviction** is a metric introduced which its value means that the consequent is highly depending on the antecedent. For instance, in the case of a perfect confidence score, the denominator becomes 0 (due to 1 - 1) for which the conviction score is defined as 'inf'. Similar to lift, if items are independent, the conviction is 1. $\text{conviction}(A\rightarrow C) = \frac{1 - \text{support}(C)}{1 - \text{confidence}(A\rightarrow C)}, \;\;\; \text{range: } [0, \infty]$ ``` association_rules(frequent_itemsets, metric="confidence", min_threshold=0.4) ``` Filter by rules with other metrics ``` rules = association_rules(frequent_itemsets, metric="lift", min_threshold=1.2) rules ``` #### Filter by items in DataFrame ``` rules["antecedent_len"] = rules["antecedents"].apply(lambda x: len(x)) rules rules[ (rules['antecedent_len'] >= 1) & (rules['confidence'] > 0.3) & (rules['lift'] > 1.2) ] ``` #### Filter by entries based on antecedents or consequents ``` rules[rules['antecedents'] == {'tropical fruit', 'whole milk'}] ``` #EXERCISE 1: best products from large baskets UUID - #S6E1 Find the best products (those that has a consequent with high lift) from baskets larger than 4 items on the dataset given below. --- ***Part from Optional Exercise at home*** Also, and this is more complex, try to also threshold by pvalue. Apryori does not account for having a pvalue thresholding. Will you be able to filter results by pvalue? ``` exercise_csv = 'https://raw.githubusercontent.com/vfp1/bts-advanced-data-analysis-2020/master/S06_MBA/groceries_exercise.csv' ``` # Recommender systems Let's build a movie recommender system. Perhaps better than Netflix... ``` import numpy as np import pandas as pd from pathlib import Path from sklearn.model_selection import train_test_split from keras.layers import Embedding, Reshape,dot,Input,Dense from keras.models import Sequential,Model from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint from keras.utils.vis_utils import plot_model import matplotlib.pyplot as plt ``` ## MovieLens 100k Dataset UUID - #S6C3 ``` !wget 'http://files.grouplens.org/datasets/movielens/ml-latest-small.zip' ``` ### Unzipping downloaded data ``` import zipfile with zipfile.ZipFile('ml-latest-small.zip', 'r') as zip_ref: zip_ref.extractall('.') data_dir = 'ml-latest-small/' !ls -ltr !ls -ltr ml-latest-small ``` ### Data Preprocessing ``` %matplotlib inline import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder movie_ratings = pd.read_csv(data_dir + 'ratings.csv') movie_ratings.head() movies = pd.read_csv(data_dir + 'movies.csv') movies.head() ``` #### Check what the data is about ``` # Groupby user ID and its number of ratings g = movie_ratings.groupby('userId')['rating'].count() # Sort by the users with the highest number of ratings top_users = g.sort_values(ascending=False)[:15] # Groupby movieId and its number of ratings g = movie_ratings.groupby('movieId')['rating'].count() # Sort by the movies with the highest number of ratings top_movies = g.sort_values(ascending=False)[:15] # Join tables and due a crosstab top_r = movie_ratings.join(top_users, rsuffix='_r', how='inner', on='userId') top_r = top_r.join(top_movies, rsuffix='_r', how='inner', on='movieId') pd.crosstab(top_r.userId, top_r.movieId, top_r.rating, aggfunc=np.sum) ``` #### Encode the labels ``` # Encode users user_enc = LabelEncoder() movie_ratings['user'] = user_enc.fit_transform(movie_ratings['userId'].values) # Get the total number of users n_users = movie_ratings['user'].nunique() # Encode movies item_enc = LabelEncoder() movie_ratings['movie'] = item_enc.fit_transform(movie_ratings['movieId'].values) # Get the number of movies n_movies = movie_ratings['movie'].nunique() # Create the minimum rating and the maximum rating movie_ratings['rating'] = movie_ratings['rating'].values.astype(np.float32) min_rating = min(movie_ratings['rating']) max_rating = max(movie_ratings['rating']) n_users, n_movies, min_rating, max_rating movie_ratings.describe() ``` #### Split data into XY ``` X = movie_ratings[['user', 'movie']].values y = movie_ratings['rating'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) X_train.shape, X_test.shape, y_train.shape, y_test.shape ``` #### Decide on the number of factors Decide on number of factors and create X_train, X_test tuples. Tuples consist of user and movie. ``` n_factors = 180 X_train_array = [X_train[:, 0], X_train[:, 1]] X_test_array = [X_test[:, 0], X_test[:, 1]] from sklearn.preprocessing import StandardScaler #df_crosstab=pd.crosstab(top_r.userId, top_r.movieId, top_r.rating, aggfunc=np.sum) df_cross=movie_ratings.join(movies, rsuffix='_r', how='inner', on='movieId') df_crosstab =pd.crosstab(df_cross.userId, df_cross.movieId, df_cross.rating, aggfunc=np.sum) x = StandardScaler().fit_transform(df_crosstab) x from sklearn.impute import SimpleImputer imp = SimpleImputer(missing_values=np.nan, strategy='mean') imp.fit(x) x=imp.transform(x) from sklearn.decomposition import PCA pca = PCA(n_components=500) principalComponents = pca.fit_transform(x) principalDf = pd.DataFrame(data = principalComponents) # , columns = ['principal component 1', 'principal component 2']) x.shape #principalComponents print(pca.explained_variance_ratio_.cumsum()) ``` ### Define and train the model ``` from keras.models import Model from keras.layers import Input, Reshape, Dot from keras.layers.embeddings import Embedding from keras.optimizers import Adam from keras.regularizers import l2 from keras.layers import Add, Activation, Lambda from keras.layers import Concatenate, Dense, Dropout from keras.utils import plot_model class EmbeddingLayer: def __init__(self, n_items, n_factors): self.n_items = n_items self.n_factors = n_factors def __call__(self, x): x = Embedding(self.n_items, self.n_factors, embeddings_initializer='he_normal', embeddings_regularizer=l2(1e-6))(x) x = Reshape((self.n_factors,))(x) return x ``` ##### Build a recommender with no activation ``` def Recommender_NoActivation(n_users, n_movies, n_factors): user = Input(shape=(1,)) u = EmbeddingLayer(n_users, n_factors)(user) movie = Input(shape=(1,)) m = EmbeddingLayer(n_movies, n_factors)(movie) x = Dot(axes=1)([u, m]) model = Model(inputs=[user, movie], outputs=x) opt = Adam(lr=0.001) model.compile(loss='mean_squared_error', optimizer=opt) return model n_users,n_movies,n_factors user = Input(shape=(1,)) u = EmbeddingLayer(n_users, n_factors)(user) u model = Recommender_NoActivation(n_users, n_movies, n_factors) model.summary() plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True) history = model.fit(x=X_train_array, y=y_train, batch_size=64, epochs=5, verbose=1, validation_data=(X_test_array, y_test)) ``` ##### Build a recommender with activation ``` def Recommender_WithActivation(n_users, n_movies, n_factors): user = Input(shape=(1,)) u = EmbeddingLayer(n_users, n_factors)(user) movie = Input(shape=(1,)) m = EmbeddingLayer(n_movies, n_factors)(movie) x = Dot(axes=1)([u, m]) # Adding activation x = Activation('sigmoid')(x) # Scale the results using min and max ratings of the dataset x = Lambda(lambda x: x * (max_rating - min_rating) + min_rating)(x) model = Model(inputs=[user, movie], outputs=x) opt = Adam(lr=0.001) model.compile(loss='mean_squared_error', optimizer=opt) return model model_activation = Recommender_WithActivation(n_users, n_movies, n_factors) model_activation.summary() plot_model(model_activation, to_file='model_plot.png', show_shapes=True, show_layer_names=True) history = model_activation.fit(x=X_train_array, y=y_train, batch_size=64, epochs=5, verbose=1, validation_data=(X_test_array, y_test)) ``` ##### Taking onto a NN approach ``` def Recommender_NN(n_users, n_movies, n_factors, min_rating, max_rating): user = Input(shape=(1,)) u = EmbeddingLayer(n_users, n_factors)(user) movie = Input(shape=(1,)) m = EmbeddingLayer(n_movies, n_factors)(movie) # Use concatenate instead of Dot. Now we have an MLP!!! x = Concatenate()([u, m]) # Add Dropout x = Dropout(0.05)(x) # Add a Dense layer, Activation and Dropout x = Dense(10, kernel_initializer='he_normal')(x) x = Activation('relu')(x) x = Dropout(0.5)(x) # Add the output Dense layer equal to 1 x = Dense(1, kernel_initializer='he_normal')(x) x = Activation('sigmoid')(x) # Add the scaled results x = Lambda(lambda x: x * (max_rating - min_rating) + min_rating)(x) model = Model(inputs=[user, movie], outputs=x) opt = Adam(lr=0.001) model.compile(loss='mean_squared_error', optimizer=opt) return model model_neural = Recommender_NN(n_users, n_movies, n_factors, min_rating, max_rating) model_neural.summary() plot_model(model_neural, to_file='model_plot.png', show_shapes=True, show_layer_names=True) history = model_activation.fit(x=X_train_array, y=y_train, batch_size=64, epochs=5, verbose=1, validation_data=(X_test_array, y_test)) ``` ### Predicting the dataset ``` prediction_array = model.predict(X_test_array) prediction_array movies.loc[movies['movieId'] == movies['movieId'][X_test[23][1]], 'title'] ``` #### Creating a new dataframe with predictions ``` df_predictions = movies.join(movie_ratings.set_index('movieId'), on='movieId') df_predictions = df_predictions.drop(['timestamp', 'user', 'movie'], axis=1) pred_list = [(x[0], float(y)) for x, y in zip(X_test, prediction_array)] pred_list_df = pd.DataFrame(pred_list, columns=['movieId', 'predictions']) pred_list_df.head() df_predictions_final = df_predictions.join(pred_list_df.set_index('movieId'), on='movieId').dropna() df_predictions_final ``` # EXERCISE 2: adding more features UUID - #S6E2 Improve this recommendation system by adding movie genre and timestamp embeddings. As well, try to bring the cavalry with the MLP part, with all what you have learned. Also, try to use callbacks and TensorBoard for EarlyStopping and Visualization
github_jupyter
## Preparing Interface Data ``` import json as json import pandas as pd ``` - Upload CVD Cell2PMID data and OS Cell2PMID data ``` with open("../../caseolap-cvd/data/textcube_cell2pmid.json",'r') as f1: texcube_cell2pmid_cvd = json.load(f1) with open("../../caseolap-os/data/textcube_cell2pmid.json",'r') as f1: texcube_cell2pmid_os = json.load(f1) ``` - CVD and OS catagories ``` CVD = ["CM","ARR","CHD","VD","IHD","CCS","VOO","OHD"] OS = ["IOS", "ROS","OOS"] ``` #### Intersection of Documents - Find the intersection of the documents among the CVDs and OSs ``` DATA = [] for os,cell2pmid_os in zip(OS,texcube_cell2pmid_os): data = {"OS":os} for cvd,cell2pmid_cvd in zip(CVD,texcube_cell2pmid_cvd): '''Find the intersection of two sets''' ints = set(cell2pmid_os).intersection(set(cell2pmid_cvd)) data.update({cvd:len(ints)}) DATA.append(data) #print(os,cvd,len(ints)) #DATA df = pd.DataFrame(DATA) df df.to_csv("cvd-os-ints-docs.csv") ``` ### Intersection of Proteins ``` data_cvd = pd.read_csv("../../caseolap-cvd/result/caseolap.csv") data_cvd = data_cvd.set_index("protein") data_cvd.head() data_os = pd.read_csv("../../caseolap-os/result/caseolap.csv") data_os = data_os.set_index("protein") data_os.head() CVD = ["CM","ARR","CHD","VD","IHD","CCS","VOO","OHD"] OS = ["IOS", "ROS","OOS"] data_cvd.loc["P05556"]["CM"] DATA = [] for os in OS: data = {"OS":os} os_name = data_os.index d_os = data_os[os] scored_name_os = [] for osname,os_score in zip(os_name,d_os): if os_score > 0: scored_name_os.append(osname) for cvd in CVD: total_score = 0 d_cvd = data_cvd[cvd] cvd_name = data_cvd.index scored_name_cvd = [] for cvdname,cvd_score in zip(cvd_name,d_cvd): if cvd_score > 0: scored_name_cvd.append(cvdname) ints = set(scored_name_os).intersection(set(scored_name_cvd)) for p in list(ints): cvd_score = data_cvd.loc[p][cvd] os_score = data_os.loc[p][os] total_score += cvd_score+os_score data.update({cvd: total_score}) print(os,cvd,len(ints),total_score) DATA.append(data) df2 = pd.DataFrame(DATA) df2 df2.to_csv("cvd-os-ints-prot.csv") ``` ### Merged Data **1. Import Ion-channel data prepared from implementation of Uniprot API** ``` with open("../../UniprotAPI/proteins.json",'r') as pf: Proteins = json.load(pf) ``` **2. Prepare a dictionary with Uniprot ``ID`` as key and value as a another dictionary with ``name`` (recomended name) and ``synonyms`` keys and value.** ``` DATA = {} for item in Proteins: ID = item['ID'] names = item['names'] DATA.update({ID:{"name":names[0],"synonyms": names}}) ``` **3. Update CVD scores ** ``` CVD = ["CM","ARR","CHD","VD","IHD","CCS","VOO","OHD"] OS = ["IOS", "ROS","OOS"] ``` - 3.1: Set 0.0 score by defult ``` for key,value in DATA.items(): for cvd in CVD: DATA[key].update({cvd:0.0}) for os in OS: DATA[key].update({os:0.0}) ``` - 3.2 Update CVD data ``` """Load CVD data""" data_cvd = pd.read_csv("../../caseolap-cvd/result/caseolap.csv") data_cvd.head() ``` - 3.3 set protein as index ``` data_cvd = data_cvd.set_index("protein") data_cvd.head() ``` - 3.4 Reading a specific row data ``` idata = data_cvd.loc['P17612',:] idata["CM"],idata["ARR"] ``` - 3.5 Update CVD data in DATA dictionary ``` unscored = [] for key,value in DATA.items(): try: idata = data_cvd.loc[key,:] except: unscored.append(key) for cvd in CVD: DATA[key].update({cvd:idata[cvd]}) ``` **4. Update OS data ** - 4.1 load os data ``` data_os = pd.read_csv("../../caseolap-os/result/caseolap.csv") data_os.head() ``` - 4.2 set protein as index ``` data_os = data_os.set_index("protein") data_os.head() ``` - Update OS data in DATA dictionary ``` unscored = [] for key,value in DATA.items(): try: idata = data_os.loc[key,:] except: unscored.append(key) for os in OS: DATA[key].update({os:idata[os]}) #DATA ``` - Save the data ``` with open("merged_protein_data.json","w") as fpp: json.dump(DATA,fpp) DATAFRAME = [] for key,value in DATA.items(): data = {} data.update({"ID": key}) data.update(value) DATAFRAME.append(data) df = pd.DataFrame(DATAFRAME) df = df.set_index("ID") df.head() df.to_csv("merged-proteins.csv") ```
github_jupyter
[this doc on github](https://github.com/dotnet/interactive/tree/main/samples/notebooks/polyglot) # Visualizing data using d3js **This is a work in progress.** It doesn't work yet in [Binder](https://mybinder.org/v2/gh/dotnet/interactive/master?urlpath=lab) because it relies on HTTP communication between the kernel and the Jupyter frontend. This notebooks uses directly [d3.js](https://d3js.org/) library to perform custom data visualisation. ``` var rnd = new Random(); var a = Enumerable.Range(1,rnd.Next(4,12)).Select( t => rnd.Next(t, t*10)).ToArray(); ``` Using [RequireJS](https://requirejs.org/) we import [d3.js](https://d3js.org/). We setup the rendering code, some SVG filter inspiredy by [Visual Cinnamon](https://www.visualcinnamon.com/) article on [gooey effect](https://www.visualcinnamon.com/2016/06/fun-data-visualizations-svg-gooey-effect). Using `interactive.csharp.getVariable` we fetch the variable `a` value. ``` #!javascript if (typeof (notebookScope.interval) !== 'undefined') { clearInterval(notebookScope.interval); } notebookScope.plot = (sgvSelector, variableName) => { let dtreeLoader = interactive.configureRequire({ paths: { d3: "https://d3js.org/d3.v6.min" } }); dtreeLoader(["d3"], function (d3) { let svg = d3. select(sgvSelector); svg.selectAll("defs").remove(); svg.selectAll("g").remove(); let defs = svg.append("defs"); let filter = defs.append("filter").attr("id", "gooeyCodeFilter"); filter.append("feGaussianBlur") .attr("in", "SourceGraphic") .attr("stdDeviation", "10") .attr("color-interpolation-filters", "sRGB") .attr("result", "blur"); filter.append("feColorMatrix") .attr("in", "blur") .attr("mode", "matrix") .attr("values", "1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 19 -9") .attr("result", "gooey"); let container = d3 .select(sgvSelector) .append("g") .style("filter", "url(#gooeyCodeFilter)"); console.log(container); let updateD3Rendering = () => interactive.csharp.getVariable(variableName) .then(data => { var i = 0; var p = container .selectAll(".points") .data(data, (d, i) => i); p.transition() .duration(2000) .style("fill", d => d3.interpolateTurbo(d / 80)) .attr("r", d => Math.max(0, d)); p.enter() .append("circle") .attr("class", "points") .attr("cy", 80) .attr("cx", (d,i) => ((i) + 1) * 60) .transition() .duration(2000) .style("fill", d => d3.interpolateTurbo(d / 80)) .ease(d3.easeElasticOut.period(1.00)) .attr("r", d => Math.max(0, d)), p.exit() .transition() .duration(1000) .attr("r", 0) .remove(); }); notebookScope.interval = setInterval(() => updateD3Rendering(), 3000); }); } ``` Notice the `setInterval` call near the end of the previous cell. This rechecks the data in the kernel and updates the plot. Back on the kernel, we can now update the data so that the kernel can see it. Yes, this is a contrived example, and we're planning to support true streaming data, but it's a start. ``` #!html <svg id="dataPlot1" width="100%" height=200></svg> #!js notebookScope.plot("svg#dataPlot1", "a"); #!csharp for(var i = 0; i < 10; i++){ await Task.Delay(1000); var limit = rnd.Next(4,12); a = Enumerable.Range(1,limit).Select( t => rnd.Next(30, 80)).ToArray(); } ```
github_jupyter
# **Simple binary classifier** This notebook will generate a binary classifier that will identify if an attack is benign or not. The following algorithms will be used: * Logistic regression * Random forest * CatBoost ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import os import imblearn from collections import Counter import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import glob import os import scikitplot as skplt import eli5 from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.dummy import DummyClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, precision_recall_curve, average_precision_score, roc_auc_score from catboost import CatBoostClassifier from catboost import Pool from eli5.sklearn import PermutationImportance from scipy.stats import spearmanr from scipy.cluster import hierarchy from scipy.spatial.distance import squareform from collections import defaultdict from scipy.stats import ks_2samp from scipy.stats import describe from imblearn.over_sampling import SMOTENC from collections import Counter from IPython.display import display def get_data(): data_path = os.path.dirname(os.path.abspath('')) + "/data/combined/" df = pd.read_pickle(data_path+'combined_cleaned.pickle') return df def del_std_equal_to_zero(d): print(d.shape) counts = d.nunique() to_del = [i for i,v in enumerate(counts) if v == 1] print(d.columns[to_del]) print(to_del) d = d.drop(d.columns[to_del], axis = 1) print(d.shape) return d def correlations(d): # plt.figure(figsize=(15,10)) # placing the deature dataset into a spearmanr structure and then into a correlation corr_matrix = d.corr().abs() # Select upper triangle of correlation matrix upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)) # Find index of feature columns with correlation greater than 0.95 to_drop = [column for column in upper.columns if any(upper[column] > 0.95)] # Drop features df.drop(df[to_drop], axis=1) return d def print_report(ds_type, cls, X_vals, y_true, y_predict, plot_pr=False, plot_roc=False): print(f"Classification Report ({ds_type}):") print(classification_report(y_true, y_predict)) print(f"Avg Precision Score: {average_precision_score(y_true, y_predict, average='weighted')}") if plot_roc: print(f"ROC AUC Score: {roc_auc_score(y_true, y_predict)}") skplt.metrics.plot_roc(y_true, cls.predict_proba(X_vals)) plt.show() if plot_pr: skplt.metrics.plot_precision_recall(y_true, cls.predict_proba(X_vals)) plt.show() print('\n') def split_data(X, y, size, label): return train_test_split(X, y, test_size=size, stratify=label) def one_hot_encoding(x, col): df = pd.get_dummies(x, columns=col) return df def dummy_classifier(x,y): cls_dum = DummyClassifier('most_frequent') return cls_dum.fit(x, y) def scale_data(x): scaledd = StandardScaler() return scaledd.fit(x) def log_reg(x, y, sd): log_reg = LogisticRegression(solver='saga', n_jobs=-1, verbose=2) return log_reg.fit(sd.transform(x), y) def random_f(x,y): rf = RandomForestClassifier(verbose=1, n_jobs=-1, class_weight='balanced') return rf.fit(x, y) def calculate_misclassifiations(y, predictions): classifications = y.copy() classifications['pred'] = predictions counts = classifications.label.value_counts() misclassifications = classifications[classifications.detect_threat != classifications.pred] mc = pd.DataFrame(misclassifications.label.value_counts()) mc['percentage'] = mc.apply(lambda x: x[0] / counts[x.name], axis=1) mc = mc.sort_values('percentage', ascending=False) print('Misclassifications:') display(mc) df = get_data() X = df.drop(columns=['label', 'detect_type', 'detect_threat']) y = df[['label', 'detect_type', 'detect_threat']] X = del_std_equal_to_zero(X) X = X.drop(columns=['timestamp', 'dst_port']) X = correlations(X) X_train, X_hold, y_train, y_hold = split_data(X, y, 0.3, y.detect_type) X_eval, X_test, y_eval, y_test = split_data(X_hold, y_hold, 0.7, y_hold.detect_type) X_train_oh = one_hot_encoding(X_train, ['protocol']) X_eval_oh = one_hot_encoding(X_eval, ['protocol']) X_test_oh = one_hot_encoding(X_test, ['protocol']) # X_train, X_hold, y_train, y_hold = train_test_split(X, y, test_size=0.3, stratify=y.detect_type) # X_eval, X_test, y_eval, y_test = train_test_split(X_hold, y_hold, test_size=0.6, stratify=y_hold.detect_type) # X_train_oh = pd.get_dummies(X_train, columns=['protocol']) # X_eval_oh = pd.get_dummies(X_eval, columns=['protocol']) # X_test_oh = pd.get_dummies(X_test, columns=['protocol']) scaled_data = scale_data(X_train_oh) X_train_oh = scaled_data.transform(X_train_oh) X_eval_oh = scaled_data.transform(X_eval_oh) X_test_oh = scaled_data.transform(X_test_oh) # rf = random_f(X_train_oh, y_train.detect_threat) # print_report('Train', rf, X_train_oh, y_train.detect_threat, rf.predict(X_train_oh)) # print_report('Eval', rf, X_eval_oh, y_eval.detect_threat, rf.predict(X_eval_oh), plot_pr=True) from sklearn.decomposition import PCA # Performing the principal component analysis. With just 19 components the variance ratio remains 99%, which is great. pca = PCA(0.99) pca.fit(X_train_oh) X_train_oh = pca.transform(X_train_oh) X_eval_oh = pca.transform(X_eval_oh) X_test_oh = pca.transform(X_test_oh) np.sum(pca.explained_variance_ratio_) pca.n_components_ # Create correlation matrix corr_matrix = df.corr().abs() # Select upper triangle of correlation matrix upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)) # Find index of feature columns with correlation greater than 0.95 to_drop = [column for column in upper.columns if any(upper[column] > 0.95)] # Drop features df.drop(df[to_drop], axis=1) from imbalanced_ensemble.ensemble import SelfPacedEnsembleClassifier from sklearn.datasets import make_classification clf = SelfPacedEnsembleClassifier(random_state=0) clf.fit(X, y.detect_threat) clf.predict(X) ```
github_jupyter
# Working with Text ``` %matplotlib inline ``` Libraries for I/O ``` import os import glob ``` Libraries for numerics ``` import numpy as np import pandas as pd import scipy.stats as stats ``` Libraries for plotting ``` import matplotlib.pyplot as plt import seaborn as sns ``` Libraries for string manipulation ``` import string import re ``` Libraries for functional programming ``` from functools import reduce, partial import itertools as it import operator as op import toolz as tz import toolz.curried as c ``` ## String methods ``` s = " Avoid taking unnecessary gambles. Lucky numbers: 12, 15, 23, 28, 37\n" ``` ### Removing leading and trailing whitespace ``` s.strip() s.lstrip() s.rstrip() s = s.strip() ``` ### Changing case ``` s.lower() s.upper() s.title() ``` ### Checking conditions ``` s.startswith('Avoid') s.endswith('37') s.isalpha() s.isnumeric() s.isspace() s.isprintable() ``` ### Counting and indexing ``` s.count('a') s.count('gambles') s.find('gambles') s[27:] s.find('foobar') s.index('gambles') try: s.index('foobar') except ValueError as e: print(e) ``` ### Splitting and joining ``` s.split() s.split(':') '-'.join(s.split()) ``` ### Replacing ``` s.replace('gambles', 'risk') ``` ### Translating ``` table = str.maketrans(string.ascii_lowercase, string.ascii_uppercase, string.punctuation) s.translate(table) table = str.maketrans('', '', string.punctuation) s.translate(table) ``` **Exercise: Caesar Cipher** A Caesar cipher with offset $k$ converts a character into the character $k$ letters down, looping around if this goes past `z`. Non-characters (numbers, spaces, punctuation) are left intact. For instance, with offset=3, we get `abcXYZ` being coded as `defABC`. Write an function `encode(k, s)` where `k` is the offset and `s` the string to be coded. Write a `decode(k, s)` function that decodes encrypted ciphers. Test it out on the fortune. ``` def encode(k, s): table = str.maketrans( string.ascii_lowercase + string.ascii_uppercase, string.ascii_lowercase[k:] + string.ascii_lowercase[:k] + string.ascii_uppercase[k:] + string.ascii_uppercase[:k]) return s.translate(table) encode(3, 'abcXYZ') def decode(k, s): return encode(-k, s) code = encode(3, s) code decode(3, code) ``` ## Counting words To count words, we typically do the following preprocessing: - Convert to lower (or upper) case - Remove punctuation - Split on blank space - Count each word in list ``` s ``` ### Preprocessing ``` words = s.lower().translate(str.maketrans('','',string.punctuation)).split() ``` ### Using a Counter (bag) ``` from collections import Counter Counter(words) ``` ### Using a dictionary ``` counter = {} for word in words: counter[word] = counter.get(word, 0) + 1 counter ``` ### Using a `defaultdict` ``` from collections import defaultdict d = defaultdict(int) for word in words: d[word] += 1 d ``` ### Using a functional pipe ``` tz.pipe( s, lambda s: s.lower(), lambda s: s.translate(str.maketrans('', '', string.punctuation)), lambda s: s.split(), tz.frequencies ) ``` ### Modification for collection of strings ``` ss = [s, s, s] ss tz.pipe( ss, c.map(lambda s: s.lower()), c.map(lambda s: s.translate(str.maketrans('', '', string.punctuation))), c.mapcat(lambda s: s.split()), tz.frequencies ) ``` ## String to vector To analyze text, we typically need to convert it to a vector format. There are several ways to do so. Here we show the most obvious method known as one-hot encoding. ### One hot character encoding We first encode the string 'abcabc' as the vector [0,1,2,0,1,2]. For one-hot encoding, we next convert this to the one-hot encoded matrix ```python array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]]) ``` ``` idx = 0 index = {} for ch in s: if not ch in index: index[ch] = idx idx += 1 index ``` #### Categorical encoding ``` nchars = len(index) vs = np.array([index[ch] for ch in s]) vs ``` #### One-hot encoding ``` n = len(vs) p = len(index) m = np.zeros((n,p), dtype='int') i = np.arange(len(vs)) m[i, vs] = 1 m ``` #### Reverse index lookup ``` reverse_index = dict(zip(index.values(), index.keys())) ''.join(reverse_index[v] for v in vs) ``` ### One hot encoding for words. ``` words = ' '.join([s,s]).lower().translate(str.maketrans('', '', string.punctuation)).split() pos = 0 index = {} for word in words: if word not in index: index[word] = pos pos += 1 ``` #### Categorical encoding ``` ws = np.array([index[word] for word in words]) ws ``` #### One-hot encoding ``` n = len(ws) p = len(index) m = np.zeros((n,p), dtype='int') i = np.arange(len(ws)) m[i, ws] = 1 m ``` #### Reverse lookup ``` reverse_index = dict(zip(index.values(), index.keys())) ' '.join(reverse_index[w] for w in ws) ``` ## Regular expressions ``` s ``` ### Literal match ``` re.findall(r'gambles', s) ``` ### Quantifiers `.`, `{m,n}`, `+`, `*` ``` re.findall(r'gam.les', s) re.findall(r'g.*s', s) ``` ### Non-greedy quantifier. ``` re.findall(r'g.*?s', s) ``` ### Special characters ``` re.findall(r'\bg.*?s\b', s) re.findall(r'\b\w+?\b', s) re.findall(r'\b\d+?\b', s) re.findall(r'\b[a-zA-Z]+?\b', s) ``` ### Begin and end anchors ``` re.findall(r'\w+', s) re.findall(r'^\w+', s) re.findall(r'\w+$', s) ``` ### Capture groups ``` pat = r'\b(\d)(\d)?\b' re.findall(pat, s) ``` ### Using search and match objects ``` re.search(pat, s) m = re.search(pat, s) m.string m.group() m.groups() ``` ### Replacement using capture groups ``` rep = r'\2\1' re.sub(pat, rep, s) ``` ### Using compiled patterns ``` pat = re.compile(r'\b[a-zA-Z]+?\b') pat.findall(s) ```
github_jupyter
``` """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ # If you're using Google Colab and not running locally, run this cell # install NeMo BRANCH = 'main' !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp] # If you're not using Colab, you might need to upgrade jupyter notebook to avoid the following error: # 'ImportError: IProgress not found. Please update jupyter and ipywidgets.' ! pip install ipywidgets ! jupyter nbextension enable --py widgetsnbextension # Please restart the kernel after running this cell from nemo.collections import nlp as nemo_nlp from nemo.utils.exp_manager import exp_manager import os import wget import torch import pytorch_lightning as pl from omegaconf import OmegaConf ``` In this tutorial, we are going to describe how to finetune BioMegatron - a [BERT](https://arxiv.org/abs/1810.04805)-like [Megatron-LM](https://arxiv.org/pdf/1909.08053.pdf) model pre-trained on large biomedical text corpus ([PubMed](https://pubmed.ncbi.nlm.nih.gov/) abstracts and full-text commercial use collection) - on [RE: Text mining chemical-protein interactions (CHEMPROT)](https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/). The model size of Megatron-LM can be larger than BERT, up to multi-billion parameters, compared to 345 million parameters of BERT-large. There are some alternatives of BioMegatron, most notably [BioBERT](https://arxiv.org/abs/1901.08746). Compared to BioBERT BioMegatron is larger by model size and pre-trained on larger text corpus. A more general tutorial of using BERT-based models, including Megatron-LM, for downstream natural language processing tasks can be found [here](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb). # Task Description **Relation Extraction (RE)** can be regarded as a type of sentence classification. The task is to classify the relation of a [GENE] and [CHEMICAL] in a sentence, for example like the following: ```html 14967461.T1.T22 <@CHEMICAL$> inhibitors currently under investigation include the small molecules <@GENE$> (Iressa, ZD1839) and erlotinib (Tarceva, OSI-774), as well as monoclonal antibodies such as cetuximab (IMC-225, Erbitux). <CPR:4> 14967461.T2.T22 <@CHEMICAL$> inhibitors currently under investigation include the small molecules gefitinib (<@GENE$>, ZD1839) and erlotinib (Tarceva, OSI-774), as well as monoclonal antibodies such as cetuximab (IMC-225, Erbitux). <CPR:4> ``` to one of the following class: | Relation Class | Relations | | ----------- | ----------- | | CPR:3 | Upregulator and activator | | CPR:4 | Downregulator and inhibitor | | CPR:5 | Agonist | | CPR:6 | Antagonist | | CPR:9 | Substrate and product of | # Datasets Details of ChemProt Relation Extraction task and the original data can be found on the [BioCreative VI website](https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/) ChemProt dataset pre-processed for easier consumption can be downloaded from [here](https://github.com/arwhirang/recursive_chemprot/blob/master/Demo/tree_LSTM/data/chemprot-data_treeLSTM.zip) or [here](https://github.com/ncbi-nlp/BLUE_Benchmark/releases/download/0.1/bert_data.zip) ``` TASK = 'ChemProt' DATA_DIR = os.path.join(os.getcwd(), 'DATA_DIR') RE_DATA_DIR = os.path.join(DATA_DIR, 'RE') WORK_DIR = os.path.join(os.getcwd(), 'WORK_DIR') MODEL_CONFIG = 'text_classification_config.yaml' os.makedirs(DATA_DIR, exist_ok=True) os.makedirs(os.path.join(DATA_DIR, 'RE'), exist_ok=True) os.makedirs(WORK_DIR, exist_ok=True) # download the dataset wget.download('https://github.com/arwhirang/recursive_chemprot/blob/master/Demo/tree_LSTM/data/chemprot-data_treeLSTM.zip?raw=true', os.path.join(DATA_DIR, 'data_re.zip')) !unzip -o {DATA_DIR}/data_re.zip -d {RE_DATA_DIR} ! ls -l $RE_DATA_DIR ``` ## Pre-process dataset Let's convert the dataset into the format that is compatible for [NeMo text-classification module](https://github.com/NVIDIA/NeMo/blob/stable/examples/nlp/text_classification/text_classification_with_bert.py). ``` wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/text_classification/data/import_datasets.py') ! python import_datasets.py --dataset_name=chemprot --source_data_dir={RE_DATA_DIR} --target_data_dir={RE_DATA_DIR} # let's take a look at the training data ! head -n 5 {RE_DATA_DIR}/train.tsv # let's check the label mapping ! cat {RE_DATA_DIR}/label_mapping.tsv ``` It is not necessary to have the mapping exactly like this - it can be different. We use the same [mapping used by BioBERT](https://github.com/dmis-lab/biobert/blob/master/run_re.py#L438) so that comparison can be more straightforward. # Model configuration Now, let's take a closer look at the model's configuration and learn to train the model. The model is defined in a config file which declares multiple important sections. They are: - **model**: All arguments that are related to the Model - language model, a classifier, optimizer and schedulers, datasets and any other related information - **trainer**: Any argument to be passed to PyTorch Lightning ``` # download the model's configuration file config_dir = WORK_DIR + '/configs/' os.makedirs(config_dir, exist_ok=True) if not os.path.exists(config_dir + MODEL_CONFIG): print('Downloading config file...') wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/text_classification/conf/' + MODEL_CONFIG, config_dir) else: print ('config file is already exists') # this line will print the entire config of the model config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}' print(config_path) config = OmegaConf.load(config_path) config.model.train_ds.file_path = os.path.join(RE_DATA_DIR, 'train.tsv') config.model.validation_ds.file_path = os.path.join(RE_DATA_DIR, 'dev.tsv') config.model.task_name = 'chemprot' # Note: these are small batch-sizes - increase as appropriate to available GPU capacity config.model.train_ds.batch_size=8 config.model.validation_ds.batch_size=8 config.model.dataset.num_classes=6 print(OmegaConf.to_yaml(config)) ``` # Model Training ## Setting up Data within the config Among other things, the config file contains dictionaries called **dataset**, **train_ds** and **validation_ds**. These are configurations used to setup the Dataset and DataLoaders of the corresponding config. We assume that both training and evaluation files are located in the same directory, and use the default names mentioned during the data download step. So, to start model training, we simply need to specify `model.dataset.data_dir`, like we are going to do below. Also notice that some config lines, including `model.dataset.data_dir`, have `???` in place of paths, this means that values for these fields are required to be specified by the user. Let's now add the data directory path, task name and output directory for saving predictions to the config. ``` config.model.task_name = TASK config.model.output_dir = WORK_DIR config.model.dataset.data_dir = RE_DATA_DIR ``` ## Building the PyTorch Lightning Trainer NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem. Let's first instantiate a Trainer object ``` print("Trainer config - \n") print(OmegaConf.to_yaml(config.trainer)) # lets modify some trainer configs # checks if we have GPU available and uses it cuda = 1 if torch.cuda.is_available() else 0 config.trainer.gpus = cuda # for PyTorch Native AMP set precision=16 config.trainer.precision = 16 if torch.cuda.is_available() else 32 # remove distributed training flags config.trainer.accelerator = None trainer = pl.Trainer(**config.trainer) ``` ## Setting up a NeMo Experiment NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it: ``` config.exp_manager.exp_dir = WORK_DIR exp_dir = exp_manager(trainer, config.get("exp_manager", None)) # the exp_dir provides a path to the current experiment for easy access exp_dir = str(exp_dir) exp_dir ``` Before initializing the model, we might want to modify some of the model configs. Here we are modifying it to use BioMegatron, [Megatron-LM BERT](https://arxiv.org/abs/1909.08053) pre-trained on [PubMed](https://pubmed.ncbi.nlm.nih.gov/) biomedical text corpus. ``` # complete list of supported BERT-like models print(nemo_nlp.modules.get_pretrained_lm_models_list()) # specify BERT-like model, you want to use, for example, "megatron-bert-345m-uncased" or 'bert-base-uncased' PRETRAINED_BERT_MODEL = "biomegatron-bert-345m-uncased" # add the specified above model parameters to the config config.model.language_model.pretrained_model_name = PRETRAINED_BERT_MODEL ``` Now, we are ready to initialize our model. During the model initialization call, the dataset and data loaders we'll be prepared for training and evaluation. Also, the pretrained BERT model will be downloaded, note it can take up to a few minutes depending on the size of the chosen BERT model. ``` model = nemo_nlp.models.TextClassificationModel(cfg=config.model, trainer=trainer) ``` ## Monitoring training progress Optionally, you can create a Tensorboard visualization to monitor training progress. If you're not using Colab, refer to [https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks](https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks) if you're facing issues with running the cell below. ``` try: from google import colab COLAB_ENV = True except (ImportError, ModuleNotFoundError): COLAB_ENV = False # Load the TensorBoard notebook extension if COLAB_ENV: %load_ext tensorboard %tensorboard --logdir {exp_dir} else: print("To use tensorboard, please use this notebook in a Google Colab environment.") # start model training trainer.fit(model) ``` ## Training Script If you have NeMo installed locally, you can also train the model with `examples/nlp/text_classification/text_classification_with_bert.py.` To run training script, use: `python text_classification_with_bert.py \ model.dataset.data_dir=PATH_TO_DATA_DIR \ model.task_name=TASK` The training could take several minutes and the results should look something like: ``` precision recall f1-score support 0 0.7328 0.8348 0.7805 115 1 0.9402 0.9291 0.9346 7950 2 0.8311 0.9146 0.8708 199 3 0.6400 0.6302 0.6351 457 4 0.8002 0.8317 0.8156 1093 5 0.7228 0.7518 0.7370 548 accuracy 0.8949 10362 macro avg 0.7778 0.8153 0.7956 10362 weighted avg 0.8963 0.8949 0.8954 10362 ```
github_jupyter
### bigplanet Example David Fleming, Feb 2017 --- In this notebook, I'll run through the basic functionality of bigplanet, a package for data-processing, analysis, and plotting of data produced by VPLANET. ``` #Imports %matplotlib inline from __future__ import (print_function, division, absolute_import, unicode_literals) #Imports import numpy as np import os ``` # 0) Import bigplanet --- bigplanet, a dumb wordplay on ``big data`` and ``VPLANET``, is a suite of tools used to wrangle and analyze data produced by large-scale ``VPLANET`` simulation suites. It is currently under active development and any/all suggestions, bug discoveries, pull requests, etc are very much appreciated! bigplanet can be imported and used just like any other python module. Here, I'll just import the data extraction functionality. ``` from bigplanet import data_extraction as de ``` # 1) Load in Data --- Let's say we ran a simulation where we varied several body parameters in some grid over parameters such as binary semimajor axis, eccentricity and the same for a circumbinary planet (CBP). The suite of simulations, set up using ``vspace``, produced a lot of directories, one for each simulation, so in order to work with the data products, we need to traverse through each directory, extract the results, and transform them into a meaningful data structure. Currently, ``bigplanet`` supports extracting data into a hdf5 dataset. The hdf5 format is a versitile compressed file format that allows for quick access of data that cannot fit into memory. The hdf5 format stores array-like data in a POSIX-like file tree system that supports random-access by decompressing the given data in real-time that is only a factor of a few slower than if one was accessing the data from memory. Since VPLANET simulations have a simulation-body-variable hierarchy, it makes sense to use a hierarchical format. For more info on all things hdf5, check out these links: hdf5 with python: http://docs.h5py.org/en/latest/quick.html hdf5 general: https://en.wikipedia.org/wiki/Hierarchical_Data_Format (the h, d, and f from hdf5!) hdf5 group: https://www.hdfgroup.org/HDF5/ ** Tell bigplanet where the data is ** --- Here, we'll load the data into the variable data. When using the hdf5 format, data is actually an object that stores metadata about the hdf5 dataset for ease of manipulation. For this, we must make known the source directory where the simulations live, src, and the location of the hdf5 dataset, dataset. Other kwargs include remove_halts and skip_body. Remove_halts = True makes the function ignore all simulations that halted while skip_body tells the function to ignore all the output from the specified bodies. Cadence is an optional kwarg that has extract_data_hdf5 output which simulation it's on every cadence steps for integer cadence. ``` # Define root dirctory where all sim sub directories are located src = "/Users/dflemin3/Desktop/GM_run/Data" # Define location for hdf5 file to live hdf5_src = "/Users/dflemin3/Desktop/GM_run/Serial" # Path to the hdf5 dataset dataset = os.path.join(hdf5_src,"simulation") # How you wish the data to be ordered (grid for grid simulation suites) order = "none" # Format of the data (default) fmt = "hdf5" # Ignore simulations that halted at some point? remove_halts = False # Any bodies whose output you wish to ignore? skip_body = ["primary.in"] # Any parameters not in a body's .forward files that you want? var_from_log = {"secondary" : ["Mass"], "cbp" : ["Mass"]} # An optional kwarg that has extract_data_hdf5 output which simulation it's on # every cadence steps for int cadence cadence = 100 # Compression algorithm to use compression = None #"gzip" # Use all processors? Best if used on a cluster parallel = False # Any parameters to extract from input files? var_from_infile = {"cbp" : ["iBodyType"]} ``` ** Load in the data!** --- Here we actually get a data object to manipulate and work with. The core of this "function" occurs in the following line > data = de.extract_data_hdf5(src=src, dataset=dataset, ...) We pass the extract_data_hdf5 function the source directory src and the location of the dataset, dataset. If the dataset does not exist, i.e. all the data directories in source have not been traversed and parsed, then the function will traverse the simulation directories and store the data into an hdf5 Dataset object. On my slow laptop, this takes ~1.5 minutes for ~1000 simulations. If dataset does exist, it's quickly loaded and ready to go. For suites with ~10,000 simulations, it takes roughly 30 minutes - hour depending on how bad your computer is. For large number of simulations, I recommend setting ```parallel = True``` to utilize all cores on your machine for data extraction. ``` # Extract the data! data = de.extract_data_hdf5(src=src, dataset=dataset, order=order, remove_halts=remove_halts, skip_body=skip_body, compression=compression, var_from_infile=var_from_infile, cadence=cadence, parallel=parallel) print(data) ``` ** Dataset object methods ** --- The dataset object has some useful methods and attributes to keep track of and work with the data. ``` # Which input files do we have data for? data.input_files # What are the bodies and their corresponding variables? data.data_cols # Given a simulation number, what is the simulation's name? data.sim_name(25) # How is the data ordered? data.order # What is the size of the dataset (total number of simulations)? data.size # What's the path to the actual hdf5 file(s) where the simulations live? data.data ``` # 2) Data Processing --- The real power of this package is the easy of data access and manipulation. In this section, I'll show the user how to access the results of ``VPLANET`` simulations in a variety of ways. **Individual simulation data** --- Suppose you want to pull the binary (stored in the body "secondary") orbital eccentricity and the simulation time from the 2nd (index 1) simulation to plot how it varies as a function of time. Accessing it is as easy as the following: The get method for the data object takes the following arguments: ``` 1) Simulation number : int 2) body name : str 3) parameter or list of parameters : str, list of str ``` This method allows for easy data access for individual simulations and allows for one to iterate over an arbitrary number of simulations. This is useful if, for whatever reason, the user would want to plot binary eccentricity as a function of time from every simulation. ``` # Get the time, binary (stored in the body 'secondary') eccentricity data from simulation 400 time, ecc = data.get(400,"secondary",["Time","Eccentricity"]) print("Time:",time[0:10],"...") print("Ecc:",ecc[0:10],"...") # Let's plot it! import matplotlib.pyplot as plt plt.plot(time,ecc,lw=3) plt.xlabel("Time [Yr]") plt.ylabel("Eccentricity") ``` ** Documentation ** I forget stuff pretty quickly, so I've extensively documented the code. Read the doc strings and comments in the package for additional information on what a function does, what parameters it takes and so on. ``` # Check out the docs! de.data_from_dir_hdf5? ``` **Bulk Simulation Results** What if we don't want data from individual simulations, but rather some matrix-like structure that shows how one or many parameter changes as a function of initial simulation conditions. For example, what if one wants to see how the CBP eccentricity at some given time varies as a function of binary a, e and CBP e. To do this, we'll use another function, aggregate_data, that uses the following calling sequence: >de.aggregate_data(data, bodies=bodies, ind=0, funcs={"cbp" : {"Semim" : np.mean}}, > new_cols=new_cols,cache="cache.pkl",fmt=fmt,**kw) This function produces a pandas dataframe where the columns are the parameters (i.e. initial conditions) of the simulations and any other user-defined parameter you want. This function can get as complex as the user wants it to get, so we'll break it down by input parameter: --- 1) data ``` hdf5 Dataset object ``` 2) bodies ``` dictionary where the keys are body names and and the values are lists of the initial parameters that were varied for said object. In our typical example, binary eccentricity was varied in the vspace input file with some line like secondary.in dEcc [0.01,0.1,n5] dEcc In the secondary.in file, eccentricity is outputted by specifying "Eccentricity" in the saOutputOrder line. Hence to have that parameter in the final matrix, we would need to include in the bodies dict "secondary" : ['Eccentricity'] amongst other parameters of interest. In the cell below our simulation grid spanned binary a, e and the same for the cbp and our bodies dict reflects that. ``` 3) ind ``` Time index to extract for the given body's variable from each simulation, e.g. the binary's initial (time[ind = 0]) eccentricity. This makes it explicit that we want the matrix to span initial simulation conditions. I'm not sure why/if a user would have their grid over other time, but this variable allows for that functionality. ``` 4) funcs ``` Funcs is a nested dictionary of the form {body : {body_variable : function}, body2 : {...}, ...}. If something in funcs is specified for the variable of a given body, the funcs functionality supercedes the ind behavior and stores the result of funcs of that variable time series instead of the initial condition. In the example below, I use funcs to store the mean CBP semimajor axis instead of the initial condition. Typically a user won't use this, but it could be useful. ``` 5) new_cols ``` New_cols is another nest dictionary that is rather powerful and allows the user to apply any function to the simulation data to produce a new data product for each simulation. The effect of this is to add another column to the final dataframe. In the example below, I want to add the initial eccentricity again for a trivial example. new_cols = {"cbp" : {"InitEcc" : trivial}} This tells aggregate_data that for the cbp body, I want a new column, "InitEcc", added and it's computed by the trivial function. Obviously, this functionality becomes useful for non-trivial calculations like computing eccentricity damping times for the cbp. Note that any functions supplied to new_cols require the first 3 parameters to be data, sim, body and require a fmt kwarg. See the docs for more details. ``` 6) kw ``` A dictionary of any keyword arguments the functions in new_cols require. ``` 7) cache ``` Where to cache the output of this function into a pickle file. This is useful if any of your new_cols functions are computationaly expensive and the pickle file can be transfered and read by all sorts of machines and python distros. ``` 8) fmt ``` Tells the function whether you are using the hdf5 data format. ``` **Example** --- Ok, that's a lot of information, so let's see it in action. To show off functionality, below I wrote a trivial function that returns the initial eccentricity and takes a keyword argument. ``` # Trivial function to return initial eccentricity # This function assumes an hdf5 format def trivial(data,sim,body,key=None,fmt="hdf5"): # Check out the get function in action! return data.get(sim,body,key)[0] # Define the bodies and body variables to extract using a dictionary bodies = {'cbp': ['Eccentricity', 'SemimajorAxis'],'secondary': ['SemimajorAxis','Eccentricity']} # Define the new value (dataframe column) to produce for a given body. The new column # and how to calculate it are given as a dictionary for each body. # New column for the cbp is "InitEcc" and is calculated using the function "trivial" new_cols = {"cbp" : {"InitEcc" : trivial}} # Define any keyword arguments trivial might need kw = {"key" : "Eccentricity"} # Extract and save into a cache (or read from it if it already exists) # Note ind=0 makes it clear that we want initial conditions stored for all non-new_cols variables df = de.aggregate_data(data, bodies=bodies, ind=0, funcs={"cbp" : {"SemimajorAxis" : np.mean}}, new_cols=new_cols,cache=os.path.join(hdf5_src,"trivial_cache.pkl"), fmt=fmt,**kw) ``` ** What does df look like?** ``` # Dataframe with new column produced by user-defined function! df ``` ** Oh no! I forgot another column I wanted to add to the data frame. ** No problem, there's a function for that: add_column. For add_column, you need to pass it the dataframe you wish to add a column to, df, the path to a cache to re-cache the dataframe, and new_cols. Note that new_cols is the exact same as the one used in aggregate_data. Let's define the function that computes that simulation metric we forgot to include. ``` def mean_func(data,i,body,fmt="hdf5",**kwargs): return np.mean(data.get(i,body,"Eccentricity")) # new_cols, define similar to the one in aggregate_data new_cols = {"cbp" : {"MeanEcc" : mean_func, "MeanEcc_Again" : mean_func}} # Optional kwargs kwargs = {} df = de.add_column(data, df, new_cols=new_cols, cache=src+"trivial_cache.pkl", **kwargs) ``` ** Now what does the updated dataframe look like?** ``` df ```
github_jupyter
# Pandas - **Pandas** provides a complete framework for **effective data structure** and **fast numerical computations**. - **Pandas** is used with **numpy** and **scipy** libraries for **numerical computations**. - **Pandas** is used with **statsmodel** and **scikit-learn** libraries for **statistical analysis**. - **Pandas** is used with **matplotlib** for **data visualization**. - As **pandas** developed on top of **numpy**, pandas uses the same charactristics and data processing (**fast array-based computations**) **without** the need for **loops**. - **Pandas** has two **data structures**, namely, **series** and **dataframes**. ## 1) Series in Pandas - A **series** is a **one-dimensional array-like object** which contains a **sequence of values**. - It is **similar** to **numpy array** and the **differnce** is that it additionally has an associated **label** which is called **index**. ``` import pandas as pd ``` **pd.Series()**: To create a **pandas series** from a **list**. The function **Series()** begins with **capital s**. ``` s = pd.Series([11, 15, 18, 2, 9]) s ``` - The **column** on the **left** is the **index column** associated with values. - Every **data structure** in pandas **must** have an **index**. - You can **specify** the index by yourself, if you don't do that **pandas** will **generate** it **automatically**. **values**: To display the **data** of the **series** ``` s.values ``` **index**: To display the **index** of the **series** ``` s.index ``` When index is generated by pandas, it is integer and starts from zero. We can **create** the **index values** ourselves, using the **argument index**: ``` s = pd.Series([12, 23, 13, 44, 15], index=['a','b','c','d','e']) s s.index s['a'] ``` When you want to use **more than one** values, we need to use **double brackets**. ``` s[['b','d']] ``` We can also use **boolean indexing** to slice the **series** like this: ``` s[s > 18] ``` Arithmetic operation can also be applied to the series: ``` s * 2 ``` A pandas **series** can be created from a **dictionary**, in that case, the **keys of the dictionary** will be the **index** and the **values of the dictionary** will be the **values of the series**. ``` dict = {'Sam': 23, 'Jone':41, 'Jake':26, 'Sally':29} s = pd.Series(dict) s s.index s.values ``` We can check if a **certain index** is present by using the keyword ( **in** ): ``` 'Jake' in s ``` **name**: To assign a **label** for the **index** ``` s.index.name = 'Names' s s.index = ['a','b','c','d'] s ``` ## 2) Dataframe in Pandas - A **dataframe** is like a **datasheet** which is a **table of data** with columns. - These **columns** can have **different data types**. - A **dataframe** has **two dimensions**, row and column. - The simplest way to create a **dataframe** is from a **dictionary**. ``` dict = {'name': ['bob', 'jake', 'sam', 'jone', 'sally','william'], 'age': [23,34,41,29,19, 34], 'income': [72,65,49,39,81,55]} ``` **pd.DataFrame()**: To convert a **dictionary** into a pandas **dataframe** ``` df = pd.DataFrame(dict) df ``` **set_index()**: To **set the index** to be any **coloumn** in the dataframe ``` df.set_index('name', inplace = True) df ``` **head()**: To display the **first five rows** ``` df.head() ``` We can aslo display **any number of the first rows** like this: ``` df.head(6) ``` **tail()**: To display the **last five rows** ``` df.tail() df.tail(6) ``` To display a **certain column**, we use this: ``` df.age ``` **Note**: This one **works only** if the name of the **column** is a **valid python variable name** with no spaces, ... ``` df['age'] ``` **loc[ ]**: To display a **certain row** ``` df.loc['jake'] ``` We can **change** the **values** of a **certain column** by assigning a **new value**: ``` df['income'] = 100 df ``` To change a **certain single value**, we include the label of the **column** and the label of the **index**: ``` df['income']['jake'] = 50 df ``` The whole **column** can be **replaced** with a **series**, but in that case, **index must be specified** (The **index should not be in order**, pandas will align it in the right order.) ``` df['income'] = pd.Series([44,38,79,23,66,59], index=['william', 'jake', 'sam', 'jone', 'sally','bob']) df ``` If we use a **column name** that does **not exist** in the dataframe, pandas will **add** it as a **new column**: ``` height = [5.6, 7.1, 6.8, 5.9, 6.1, 5.8 ] df['heights'] = height df ``` We can **delete a column** using the **del** keyword: ``` del df['heights'] df ``` We can **transpose** a dataframe using the **attribute .T**, but the **origianl dataframe** does **not change**. ``` df.T df ``` **columns**: To get the **labels** of the **columns** ``` df.columns ``` **index**: To get the **index** ``` df.index ``` ## 3) Index Objects - **Series** and **dataframes** are always align with **index objects**. - **Index objects** are the keys for **data manipulations** in pandas. - **Index** can be any **array** or **sequence of objects**. - **Index** can be **numeric**, **string**, and even **booleans**. ``` labels = list('abdcde') s = pd.Series([32,45,23,37,65,55], index = labels) s s.index[0] ``` One characteristic of **index** is that it is **immutable** meaning that index **cannot be modified**. ``` s.index[0] = 'f' ``` **Index** affects **size array** meaing that if we **add data** with size **smaller** than the dataframe, pandas will keep the **size** of the **index fixed** and fill automatically the **missing values** with **NaN**. ``` dict = {'name': ['bob', 'jake', 'sam', 'jone', 'sally'], 'age': [23,34,41,29,19], 'income': [72,65,49,39,81]} df= pd.DataFrame(dict) df df.set_index('name', inplace=True) df df['degree'] = pd.Series(['Yes', 'No', 'No'], index = ['jake', 'sam', 'sally']) df df.degree ``` **Index object** allows **duplicate entries**, consider this dataframe: ``` dict = {'name': ['bob', 'jake', 'bob', 'jone', 'sally'], 'age': [23,34,41,29,19], 'income': [72,65,49,39,81]} df= pd.DataFrame(dict) df.set_index('name', inplace = True) df ``` When we try to display the **index** with the **duplicate labels**, **all labels are displayed**: ``` df.loc['bob'] ``` ## 4) Reindexing in Series and Dataframes - **Reindexing** means creating a **new index objects** which replace the **old index**. - **Reindexing rearranges** the data according to the **new index**. ``` s = pd.Series([23, 45, 28, 37], index = ['b','c','a','e']) s ``` We can change the index of this series using the function **reindex()**, any index **without a values**, will be filled with **NaN**: ``` s = s.reindex(['a','b','c','d','e','f']) s ``` **method**: To change the way pandas **fill the missing values** ``` s = pd.Series(['red','green', 'yellow'], index = [0,2,4]) s s.reindex(range(6), method='ffill') ``` **ffill**: Which means **forward filling** meaning that the **empty values** will be **filled by previous value** In pandas we can **reindex** the **rows** as well as the **columns**, for example consider this dataframe: ``` dict = {'red': [33,22,55], 'green': [66,33,11], 'white':[66,44,22]} df = pd.DataFrame(dict, index = ['a', 'c','d']) df ``` We can **reindex** the **row index** like this: ``` df.reindex(['a','b','c','d']) ``` We can **reindex** the **columns labels** like this: ``` df.reindex(columns = ['red', 'white','brown','green']) ``` ## 5) Deleting Rows and Columns ``` import numpy as np s = pd.Series([11,22,33,44,55,66,77,88], index=list('abcdefgh')) s ``` **drop()**: To **delete** a **single entry** in **series** ``` s.drop('e') s ``` To **delete multiple entries**, we enclose them in **square brackets** [ ] as a list: ``` s.drop(['b', 'g']) ``` **Note**: The **drop** method returns a **new object** which contains the entries after drop; however, the **original series** is **not changed**. ``` s df = pd.DataFrame(np.arange(24).reshape((6,4)), index = list('abcdef'), columns= ['red', 'green', 'white', 'black']) df ``` **drop()**: To **delete** specific **rows** ``` df.drop(['b','e']) ``` To **delete columns**, we add the argument **axis = 1**: ``` df.drop('green', axis=1) df ``` **Note**: The **original dataframe** is **not changed**, to make the deletion **reflected** in the **original dataframe** we use the argument **inplace = True** ``` df.drop('red', axis=1, inplace=True) df ``` ## 6) Indexing, Slicing, and Filtering To **select** a **subset** of rows and/or columns from a dataframe. ``` s = pd.Series(np.arange(6), index=list('abcdef')) s s['c'] s['c':'e'] s[0] s[2:5] s[s > 3] df = pd.DataFrame(np.arange(24).reshape((6,4)), index=list('abcdef'), columns=['red', 'green', 'white', 'black']) df df['green'] df[['red','green']] ``` **loc[ ]**: To **select rows** with the **index label** ``` df.loc['a'] ``` To make the selection **looks like** a **dataframe**, we use **double brackets**: ``` df.loc[['a']] df.loc['b':'e'] ``` We can select a **subset** of **rows** and **columns** together like this: ``` df.loc[['c','d'], ['green', 'white']] df.loc['c':'e', 'green':'black'] ``` **iloc[ ]**: To **select rows** with the **digits** ``` df.iloc[2:5] df.iloc[2:5, 2:4] df.iloc[:, [1]] ``` **Boolean indexing** in **dataframe** can be used like this: ``` df[df['red'] > 10] df.iloc[:, 2:4][df['black'] > 12] ``` ## 7) Arithmetic with Dataframe ``` df = pd.DataFrame(np.arange(1,13).reshape((3,4)), columns = ['a', 'b','c','d']) df 1 / df df.div(2) df.add(2) df.sub(2) df.mul(5) df.pow(2) ``` We can apply **arithmetic operation** to a **single column** like this: ``` df df['a'].add(5) df.iloc[0].add(2) df['d'] - df['a'] df.min() df.mean() ``` **describe()**: To calculate a **group of statistics** ``` df.describe() df.max() - df.min() normalized_df = (df - df.mean()) / (df.min() - df.max()) normalized_df s = pd.Series(np.random.randint(1,50, size = 20)) s normalized_s = (s - s.min()) / (s.max() - s.min()) normalized_s ``` ## 8) Sorting Series and Dataframe ``` s = pd.Series(np.random.randint(50, size =10), index = list('jsgjhfsagb')) s ``` **sort_index()**: To **sort** the **index** of a series (The **original series** is **not changed**) ``` s.sort_index() s s.sort_index(inplace=True) s ``` **sort_values()**: To **sort** the **data** of the series ``` s.sort_values() df = pd.read_csv('data/wage.csv', index_col = 'year') df ``` **sort_index()**: To **sort** the **index** of dataframe (The **original series** is **not changed**) ``` df.sort_index() ``` We can also **sort** the **labels of the columns** by using the argument **axis = 1**: ``` df.sort_index(axis=1) ``` For **descending** ordering of the **index**, we use the argument **ascending = False**: ``` df.sort_index(ascending=False) ``` **sort_values**: To **sort the dataframe** by a **column** ``` df.sort_values(by='age') ``` We can even **sort the dataframe** by **multiple columns**, in this case, we pass the **columns** as a **list**. ``` df.sort_values(by=['age', 'wage']) ``` ## 9) Descriptive Statistics with Dataframe ``` df = pd.read_csv('data/wage.csv', index_col ='year') df.head() df.sum() ``` If we want to calculate **statistics** per **rows**, we use the argument **axis = 'columns'**: ``` df.sum(axis='columns') ``` The same for statistics like min(), max(), cumsum() To display the **index value** for the **minimum** and the **maximum** we use the functions **idxmax()** and **idxmin()**: ``` df.idxmax() df['age'].idxmax() df.describe() ``` ## 10) Correlation and Variance - **Correlation** is a measure of the **strength of a relationship** between **two variables**. - Correlation takes values from -1 to 1. - A **positive correlation** means that **both variables** move in the **same direction**. - A **negative correlation** means that when one **variable increases** the other **variable decreases**. - The correlation becomes **weaker** when **approaching zero**, and **stronger towards -1 or 1**. ``` df = pd.read_csv('data/advertising.csv') df.head() ``` **corr()**: To calculate the **correlation coefficient** between **two variables** ``` df['TV'].corr(df['Sales']) df.TV.corr(df.Sales) ``` We can calculate the **correlation matrix** for **all variables**. ``` df.corr() ``` - **Covariance** also examines the **relationship** between **two variables**. - **Covariance** measures the **extent** to which **two variables change with each other**. ``` df['TV'].cov(df['Newspaper']) df.TV.cov(df.Newspaper) df.cov() ``` ## 11) Reading Data in Text Format - Your first task when working with **datasets** in **python** is to **convert** them into **python friendly formats**, mainly **pandas series** and **dataframes**. **pd.read_csv()**: To **read** a **dataframe** from a **csv file** ``` df = pd.read_csv('data/auto1.csv') ``` **Note**: The path is from current working directory, you can also you pathlib. ``` df.head() ``` **pd.read_table()**: To **read** a **dataframe** from a **file** where the values are **separated** by **other symbols** ``` df = pd.read_table('data/auto1.csv', sep=',') df.head() ``` If the **data** has **no column labels** (**header**), we need to use an argument called **header = None** ``` df = pd.read_csv('data/auto2.csv', header=None) df.head() ``` If the **data** has **no column labels**, we can **assign labels** for the columns using the argument **names** ``` df = pd.read_csv('data/auto2.csv', names=['name', 'mpg', 'cylinders', 'displacement', 'horsepower']) df.head() ``` We can **define** the **row index** using an argument called **index_col**. ``` df = pd.read_csv('data/auto1.csv', index_col='name') df.head() ``` We can also set the **index** to be **multiple columns**, we do it if we are interested in **data grouping**. ``` df = pd.read_csv('data/auto1.csv', index_col=['cylinders', 'mpg']) df.head(20) df = pd.read_csv('data/auto3.csv') df.head() ``` We can **skip certain rows** from the data file using the argument **skiprows** ``` df = pd.read_csv('data/auto3.csv', skiprows=[2, 3]) df.head() ``` **pd.isnull()**: To check for **missing data** and it returns a **boolean mask** for all dataframe where **True** means **missing value** ``` df = pd.read_csv('data/auto3.csv') pd.isnull(df) ``` In large datasets, it is difficult to go through all values, so we can add **any()** method and it tells us which columns have missing values. ``` pd.isnull(df).any() ``` ## 12) Writing Data in Text Format - How to write pandas dataframe in text format on your local hard disk. ``` df = pd.read_csv('data/population.csv', index_col='Country') df.head() df['Density'] = df['Population'] / df['Area'] df.head() ``` **to_csv()**: To **save** a **dataframe** in a **csv file**, We can also **save pandas series** as well. ``` df.to_csv('data/Population2.csv') import numpy as np s = pd.Series(np.arange(10), index=list('abcdefghij')) s s.to_csv('data/series1.csv') ``` ## 13) Reading Microsoft Excel Files - How to read microsoft excel files into pandas dataframes. - Install **openpyxl** in your environment **pd.read_excel()**: To **read** a **dataframe** from **Ms excel file** ``` credit = pd.read_excel('data/credit.xlsx') credit.head() ``` We can extract **specific columns** form a dataframe into a **new dataframe** like this: ``` df = pd.DataFrame(credit, columns= (['Income', 'Limit'])) df.head() ``` **to_excel()**: To **save** a **dataframe** into an **excel file** ``` df.to_excel('data/example1.xlsx') ```
github_jupyter
``` import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd matplotlib.__version__, np.__version__, pd.__version__ ``` ## 2 Plots side by side ``` plt.clf() # sample data x = np.linspace(0.0,100,50) y = np.random.uniform(low=0,high=10,size=50) # create figure and axes fig, axes = plt.subplots(1,2) ax1 = axes[0] ax2 = axes[1] # just plot things on each individual axes ax1.scatter(x,y,c='red',marker='+') ax2.bar(x,y) plt.gcf().set_size_inches(10,5) plt.show() ``` ## 2 plots one on top of the other ``` plt.clf() # sample data x = np.linspace(0.0,100,50) y = np.random.uniform(low=0,high=10,size=50) # create figure and axes fig, axes = plt.subplots(2,1) ax1 = axes[0] ax2 = axes[1] # just plot things on each individual axes ax1.scatter(x,y,c='red',marker='+') ax2.bar(x,y) plt.gcf().set_size_inches(5,5) plt.show() ``` ## 4 plots in a grid ``` import numpy as np import matplotlib.pyplot as plt x = np.linspace(0.0,100,50) y = np.random.uniform(low=0,high=10,size=50) # plt.subplots returns an array of arrays. We can # directly assign those to variables directly # like this fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2) # just plot things on each individual axes ax1.scatter(x,y,c='red',marker='+') ax2.bar(x,y) ax3.scatter(x,y,marker='x') ax4.barh(x,y) plt.gcf().set_size_inches(5,5) plt.show() ``` ## Pandas plots ``` import matplotlib.pyplot as plt import pandas as pd df = pd.DataFrame({ 'string_col':['foo','bar','baz','quux'], 'x':[10,20,30,40], 'y':[1,2,3,4] }) df plt.clf() # plt.subplots returns an array of arrays. We can # directly assign those to variables directly fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2) # bar plot for column 'x' df.plot(y='x', kind='bar', ax=ax1) ax1.set_xlabel('index') # horizontal bar plot for column 'y' df.plot(y='y', kind='bar', ax=ax2, color='orange') ax2.set_xlabel('index') # both columns in a scatter plot df.plot('x','y', kind='scatter', ax=ax3) # to have two lines, plot twice in the same axis df.plot(y='x', kind='line', ax=ax4) df.plot(y='y', kind='line', ax=ax4) ax4.set_xlabel('index') plt.subplots_adjust(wspace=0.3, hspace=0.5) plt.show() ``` ## Set subplot title ``` plt.clf() # plt.subplots returns an array of arrays. We can # directly assign those to variables directly fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2) # sample data x = np.linspace(0.0,100,50) y = np.random.uniform(low=0,high=10,size=50) # plot individual subplots ax1.bar(x,y) ax2.bar(x,y) ax3.scatter(x,y) ax4.plot(x) ax4.set_title('This is Plot 4',size=14) plt.subplots_adjust(wspace=0.3, hspace=0.5) plt.show() ``` ## Padding ``` import numpy as np import matplotlib.pyplot as plt # sample data x = np.linspace(0.0,100,50) y = np.random.uniform(low=0,high=10,size=50) # plt.subplots returns an array of arrays. We can # directly assign those to variables directly fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2) # just plot things on each individual axes ax1.scatter(x,y,c='red',marker='+') ax2.bar(x,y) ax3.scatter(x,y,marker='x') ax4.barh(x,y) # here, set the width and the height between the subplots # the default value is 0.2 for each plt.subplots_adjust(wspace=0.50, hspace=1.0) plt.show() ``` ## Align axes ``` import numpy as np import matplotlib.pyplot as plt plt.clf() # plt.subplots returns an array of arrays. We can # directly assign those to variables directly fig, ((ax1,ax2)) = plt.subplots(1,2) np.random.seed(42) x = np.linspace(0.0,100,50) # sample data in different magnitudes y1 = np.random.normal(loc=10, scale=2, size=10) y2 = np.random.normal(loc=20, scale=2, size=10) ax1.plot(y1) ax2.plot(y2) ax1.grid(True,alpha=0.3) ax2.grid(True,alpha=0.3) ax1.set_ylim(0,25) ax2.set_ylim(0,25) plt.subplots_adjust(wspace=0.3, hspace=0.5) plt.show() ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #@title MIT License # # Copyright (c) 2017 François Chollet # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ``` # Train your first neural network <table align="left"><td> <a target="_blank" href="https://colab.sandbox.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_classification.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td><td> <a target="_blank" href="https://github.com/tensorflow/models/blob/master/samples/core/get_started/basic_classification.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on Github</a></td></table> In this guide, we will train a neural network model to classify images of clothing, like sneakers and shirts. It's fine if you don't understand all the details, this is a fast-paced overview of a complete TensorFlow program with the details explained as we go. This guide uses [tf.keras](https://www.tensorflow.org/programmers_guide/keras), a high-level API to build and train models in TensorFlow. ``` # TensorFlow and tf.keras import tensorflow as tf from tensorflow import keras # Helper libraries import numpy as np import matplotlib.pyplot as plt print(tf.__version__) ``` ## Import the Fashion MNIST dataset This guide uses the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels), as seen here: <table> <tr><td> <img src="https://tensorflow.org/images/fashion-mnist-sprite.png" alt="Fashion MNIST sprite" width="600"> </td></tr> <tr><td align="center"> <b>Figure 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/>&nbsp; </td></tr> </table> Fashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc) in an identical format to the articles of clothing we'll use here. This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are useful to verify that an algorithm works as expected. They're good starting points to test and debug code. We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can acess the Fashon MNIST directly from TensorFlow, just import and load the data: ``` fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() ``` Loading the dataset returns four NumPy arrays: * The `train_images` and `train_labels` arrays are the *training set*, this is the data the model uses to learn. * The model is tested against the *test set*, the `test_images` and `test_labels` arrays. The images are 28x28 numpy arrays, with pixel values ranging between 0 and 255. The *labels* are an array of integers, ranging from 0 to 9. These correspond to the *class* of clothing the image represents: <table> <tr> <th>Label</th> <th>Class</th> </tr> <tr> <td>0</td> <td>T-shirt/top</td> </tr> <tr> <td>1</td> <td>Trouser</td> </tr> <tr> <td>2</td> <td>Pullover</td> </tr> <tr> <td>3</td> <td>Dress</td> </tr> <tr> <td>4</td> <td>Coat</td> </tr> <tr> <td>5</td> <td>Sandal</td> </tr> <tr> <td>6</td> <td>Shirt</td> </tr> <tr> <td>7</td> <td>Sneaker</td> </tr> <tr> <td>8</td> <td>Bag</td> </tr> <tr> <td>9</td> <td>Ankle boot</td> </tr> </table> Each image is mapped to a single label. Since the *class names* are not included with the dataset, store them here to use later when plotting the images: ``` class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] ``` ## Explore the data Let's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, with each image represented as 28 x 28 pixels: ``` train_images.shape ``` Likewise, there are 60,000 labels in the training set: ``` len(train_labels) ``` Each label is an integer between 0 and 9: ``` train_labels ``` There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels: ``` test_images.shape ``` And the test set contains 10,000 images labels: ``` len(test_labels) ``` ## Preprocess the data The data must be preprocessed before training the network. If you inspect the first image in the training set, you will see that the pixel values fall in the range of 0 to 255: ``` plt.figure() plt.imshow(train_images[0]) plt.colorbar() plt.gca().grid(False) ``` We will scale these values to a range of 0 to 1 before feeding to the neural network model. For this, cast the datatype of the image components from and integer to a float, and divide by 255. Here's the function to preprocess the images: It's important that the *training set* and the *testing set* are preprocessed in the same way: ``` train_images = train_images / 255.0 test_images = test_images / 255.0 ``` Display the first 25 images from the *training set* and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network. ``` import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid('off') plt.imshow(train_images[i], cmap=plt.cm.binary) plt.xlabel(class_names[train_labels[i]]) ``` ## Build the model Building the neural network requires configuring the layers of the model, then compiling the model. ### Setup the layers The basic building block of a neural network is the *layer*. Layers extract representations from the data fed into them. And, hopefully, these representations are more meaningful for the problem at hand. Most of deep learning consists of chaining together simple layers. Most layers, like `tf.keras.layers.Dense`, have parameters that are learned during training. ``` model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation=tf.nn.relu), keras.layers.Dense(10, activation=tf.nn.softmax) ]) ``` The first layer in this network, `tf.keras.layers.Flatten`, transforms the format of the images from a 2d-array (of 28 by 28 pixels), to a 1d-array of 28 * 28 = 784 pixels. Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn, it only reformats the data. After the pixels are flattened, the network consists of a sequence of two `tf.keras.layers.Dense` layers. These are densely-connected, or fully-connected, neural layers. The first `Dense` layer has 128 nodes, or neurons. The second (and last) layer is a 10-node *softmax* layer—this returns an array of 10 probability scores that sum to 1. Each node contains a score that indicates the probability that the current image belongs to one of the 10 digit classes. ### Compile the model Before the model is ready for training, it needs a few more settings. These are added during the model's *compile* step: * *Loss function* —This measures how accurate the model is during training. We want to minimize this function to "steer" the model in the right direction. * *Optimizer* —This is how the model is updated based on the data it sees and its loss function. * *Metrics* —Used to monitor the training and testing steps. The following example uses *accuracy*, the fraction of the images that are correctly classified. ``` model.compile(optimizer=tf.train.AdamOptimizer(), loss='sparse_categorical_crossentropy', metrics=['accuracy']) ``` ## Train the model Training the neural network model requires the following steps: 1. Feed the training data to the model—in this example, the `train_images` and `train_labels` arrays. 2. The model learns to associate images and labels. 3. We ask the model to make predictions about a test set—in this example, the `test_images` array. We verify that the predictions match the labels from the `test_labels` array.. To start training, call the `model.fit` method—the model is "fit" to the training data: ``` model.fit(train_images, train_labels, epochs=5) ``` As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.88 (or 88%) on the training data. ## Evaluate accuracy Next, compare how the model performs on the test dataset: ``` test_loss, test_acc = model.evaluate(test_images, test_labels) print('Test accuracy:', test_acc) ``` It turns out, the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy is an example of *overfitting*. Overfitting is when a machine learning model performs worse on new data than on their training data. ## Make predictions With the model trained, we can use it to make predictions about some images. ``` predictions = model.predict(test_images) ``` Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction: ``` predictions[0] ``` A prediction is an array of 10 numbers. These describe the "confidence" of the model that the image corresponds to each of the 10 different articles of clothing. We can see see which label has the highest confidence value: ``` np.argmax(predictions[0]) ``` So the model is most confident that this image is an ankle boot, or `class_names[9]`. And we can check the test label to see this is correct: ``` test_labels[0] ``` Let's plot several images with their predictions. Correct prediction labels are green and incorrect prediction labels are red. ``` # Plot the first 25 test images, their predicted label, and the true label # Color correct predictions in green, incorrect predictions in red plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid('off') plt.imshow(test_images[i], cmap=plt.cm.binary) predicted_label = np.argmax(predictions[i]) true_label = test_labels[i] if predicted_label == true_label: color = 'green' else: color = 'red' plt.xlabel("{} ({})".format(class_names[predicted_label], class_names[true_label]), color=color) ``` Finally, use the trained model to make a prediction about a single image. ``` # Grab an image from the test dataset img = test_images[0] print(img.shape) ``` `tf.keras` models are optimized to make predictions on a *batch*, or collection, of examples at once. So even though we're using a single image, we need to add it to a list: ``` # Add the image to a batch where it's the only member. img = (np.expand_dims(img,0)) print(img.shape) ``` Now predict the image: ``` predictions = model.predict(img) print(predictions) ``` `model.predict` returns a list of lists, one for each image in the batch of data. Grab the predictions for our (only) image in the batch: ``` prediction = predictions[0] np.argmax(prediction) ``` And, as before, the model predicts a label of 9.
github_jupyter
``` import pandas as pd import os import re from pathlib import Path from tqdm.auto import tqdm import numpy as np from scipy.stats import ttest_ind from statsmodels.stats.multitest import multipletests import re from collections import defaultdict pd.set_option('display.max_columns', None) folder = '/media/data_1/darius/models/finetuned' numFolds= 1000 sheets = ['overall', 'gender' ,'language', 'insurance', 'ethnicity'] allowed_models = {'overall': ['baseline'], 'gender': ['baseline'], 'language': ['baseline'], 'insurance': ['baseline'], 'ethnicity': ['baseline']} model = 'baseline' def gap_significant(lower, upper): return (((lower < 0) & (upper < 0)) | ((lower > 0) & (upper > 0))) def add_to_dict(gap_dict, model, sheet, name, num_sig, num_favor): if model not in gap_dict: gap_dict[model] = {} if sheet not in gap_dict[model]: gap_dict[model][sheet] = {} gap_dict[model][sheet][name] = [num_sig, num_favor] def add_gap(model, gap, sheet, res, gap_infos_naive): res[model+'_' + gap+'_sig'] = multipletests( res[model+'_'+gap + '_p'], alpha=0.05, method="fdr_bh")[0] add_to_dict(gap_infos_naive, model, sheet, gap, res[model+'_' + gap + 'naive_sig'].astype( int).sum(), (res.loc[res[model + '_' + gap + 'naive_sig'], model + '_' + gap] > 0).astype(int).sum()) def get_seeds(finetuned_dir): p = Path(folder) seeds = [f.name.split('_seed')[1] for f in p.glob('*_seed[0-9]*')] return seeds def get_target_name(mname): target = None if 'inhosp_mort' in mname: target = 'inhosp_mort' elif 'phenotype' in mname: mname = mname.split('seed')[0] name = re.findall(r'.*512_(?:lambda1_)*(.*)', mname)[0] if name.endswith('_gs'): name = name[:-3] name = name.replace('_', ' ') if 'phenotype_all' in mname: target = 'phenotype_all_%s' % name else: target = 'phenotype_first_%s' % name assert(target) return target def populate_df(*, df, res, idx, model, columns, multi=False): for i in columns: col = model + '_' + columns[i] res.loc[idx, col] = df.loc[i, 'avg'] res.loc[idx, col + '_p'] = df.loc[i, 'p'] res.loc[idx, col + '_favor'] = df.loc[i, 'favor'] res.loc[idx, col + 'lowerCI'] = df.loc[i, '2.5%'] res.loc[idx, col + 'upperCI'] = df.loc[i, '97.5%'] if multi and ('dgap_' in col or 'egap_' in col): res.loc[idx, col + 'naive_sig'] = gap_significant(df.loc[i, '2.5%'], df.loc[i, '97.5%']) else: res.loc[idx, col + 'naive_sig'] = gap_significant(df.loc[i, '2.5%'], df.loc[i, '97.5%']) seeds = get_seeds(folder) dfs_list = [] gap_infos_naive_list = [] for seed in tqdm(seeds): dfs = {} gap_infos_naive = {} for sheet in sheets: res = pd.DataFrame() for root, dirs, files in os.walk(folder): for d in dirs: mname = d if f'_seed{seed}' not in mname: continue file = Path(root) / d / 'results.xlsx' if not file.is_file(): print(f'Cannot find {file.parents[0]} results file. Skipping...') continue # files = sorted([f for f in p.glob('*.*') if f.stem == 'results']) target = get_target_name(mname) if sheet == 'overall': df = pd.read_excel(os.path.join(root, file), index_col=0, sheet_name = 'all') columns = ['all_auroc', 'all_auprc', 'all_recall', 'all_class_true_count'] for i in columns: res.loc[f'{target}-{seed}', model + '_' +i] = df.loc[i, 'avg'] res.loc[f'{target}-{seed}', model + '_' +i+'lowerCI'] = df.loc[i, '2.5%'] res.loc[f'{target}-{seed}', model + '_' +i+'upperCI'] = df.loc[i, '97.5%'] elif sheet == 'gender': df = pd.read_excel(os.path.join(root, file), index_col=0, sheet_name = 'gender') columns = { 'gender=="M"_dgap_max':'Parity Gap (M-F)', 'gender=="M"_egap_positive_max':'Recall Gap', 'gender=="M"_egap_negative_max':'Specificity Gap' } populate_df(df=df, res=res, idx=target, model=model, columns=columns) elif sheet == 'language': df = pd.read_excel(os.path.join(root, file), index_col=0, sheet_name = 'language_to_use') columns = {'language_to_use=="English"_dgap_max' : 'Parity Gap (E-O)', 'language_to_use=="English"_egap_positive_max' : 'Recall Gap', 'language_to_use=="English"_egap_negative_max' : 'Specificity Gap' } populate_df(df=df, res=res, idx=target, model=model, columns=columns) elif sheet == 'insurance': df = pd.read_excel(os.path.join(root, file), index_col=0, sheet_name = 'insurance') columns = [] for i in ['Medicare', 'Private', 'Medicaid']: for j in ['dgap_max', 'egap_positive_max', 'egap_negative_max']: columns.append( 'insurance=="%s"_%s'%(i,j) ) columns = {k:k.replace('insurance==', '') for k in columns} populate_df(df=df, res=res, idx=target, model=model, columns=columns, multi=True) elif sheet == 'ethnicity': df = pd.read_excel(os.path.join(root, file), index_col=0, sheet_name = 'ethnicity_to_use') columns = [] for i in ['WHITE', 'BLACK', 'ASIAN', 'HISPANIC/LATINO', 'OTHER']: for j in ['dgap_max', 'egap_positive_max', 'egap_negative_max']: columns.append( 'ethnicity_to_use=="%s"_%s'%(i,j) ) columns = {k:k.replace('ethnicity_to_use==', '') for k in columns} populate_df(df=df, res=res, idx=target, model=model, columns=columns, multi=True) try: if sheet == 'gender': for m in allowed_models[sheet]: for i in ('Parity Gap (M-F)','Recall Gap','Specificity Gap'): add_gap(m, i, sheet, res, gap_infos_naive) if sheet == 'language': for m in allowed_models[sheet]: for i in ('Parity Gap (E-O)','Recall Gap','Specificity Gap'): add_gap(m, i, sheet, res, gap_infos_naive) if sheet == 'insurance': for m in allowed_models[sheet]: for g in ['Medicare', 'Private', 'Medicaid']: for i in ('"%s"_'%g + t for t in ['dgap_max','egap_positive_max','egap_negative_max']): add_gap(m, i, sheet, res, gap_infos_naive) if sheet == 'ethnicity': for m in allowed_models[sheet]: for g in ['WHITE', 'BLACK', 'ASIAN', 'HISPANIC/LATINO', 'OTHER']: for i in ('"%s"_'%g + t for t in ['dgap_max','egap_positive_max','egap_negative_max']): add_gap(m, i, sheet, res, gap_infos_naive) except: print(f'Error at Target {target} with seed {seed}') continue res = res.reset_index() dfs[sheet] = res.sort_index() dfs_list.append(dfs) gap_infos_naive_list.append(gap_infos_naive) def display_tables(df): for i in ['gender', 'language', 'ethnicity', 'insurance']: temp = df.T.xs(i, level = 1).dropna(axis = 1) temp = temp.apply(lambda x: x.apply(lambda y: str(y[0]) + ' (' + "{:.0%}".format(y[1]/y[0]) + ')'), axis = 0) if i in ['ethnicity', 'insurance']: temp = temp.T temp['Gap' ] = list(map(lambda x: list(reversed(re.split(r'"_', x)))[0][:-4], temp.index)) temp['Group' ] = list(map(lambda x: list(reversed(re.split(r'"_', x)))[1][1:].lower(), temp.index)) temp = temp.set_index(['Gap','Group']).sort_index() elif i == 'gender': columns = ['Recall Gap', 'Parity Gap (M-F)', 'Specificity Gap'] temp = temp[columns] elif i == 'language': columns = ['Recall Gap', 'Parity Gap (E-O)', 'Specificity Gap'] temp = temp[columns] yield(temp) if i in ['ethnicity', 'insurance']: temp = temp[['baseline']].reset_index() temp = temp.pivot_table(values = 'baseline', index = 'Group', columns = 'Gap', aggfunc = lambda x: x) temp = temp[['egap_positive', 'dgap', 'egap_negative']] if i =='ethnicity': temp = temp.loc[['white','black','hispanic/latino','asian', 'other']] elif i =='insurance': temp = temp.loc[['medicare', 'private', 'medicaid']] yield(temp) import random idx = random.randint(0,500) gap_infos_naive = gap_infos_naive_list[463] dict_of_df = {k: pd.DataFrame(v) for k, v in gap_infos_naive.items()} naive_df = pd.concat(dict_of_df, axis=1, sort=False) list((display_tables(naive_df))) ``` ### Naive ``` display_tables(naive_df) seeds = defaultdict(float) for sheet in sheets: for root, dirs, files in tqdm(os.walk(folder)): for d in dirs: if 'seed' not in d: continue for file in os.scandir(os.path.join(root, d)): if not(file.name.endswith('final_scores.txt')): continue df = pd.read_csv(file, ": ", header=None, index_col=0) df = pd.DataFrame(df.values.transpose(), columns=df.index) seeds[int(df.Seed.item())] = df.Accuracy.item() seeds = [(k,v) for k,v in seeds.items()] import matplotlib.pyplot as plt import matplotlib.ticker as plticker %matplotlib inline x = [seed[0] for seed in seeds] y = [seed[1] for seed in seeds] plt.scatter(x,y) folder = '/media/data_1/darius/models/finetuned' numFolds= 1000 sheets = ['overall', 'gender' ,'language', 'insurance', 'ethnicity'] model = 'baseline' target = 'inhosp_mort' dfs = {} gap_infos_hyp = {} gap_infos_naive = {} for sheet in sheets: res = pd.DataFrame() for root, dirs, files in tqdm(os.walk(folder)): for d in dirs: mname = d if 'seed' not in mname: continue files = sorted([os.path.join(root, d, f) for f in os.listdir(os.path.join(root, d)) if 'results' in f or 'final_scores' in f]) for file in files: if 'final_scores' in file: df = pd.read_csv(file, ": ", header=None, index_col=0) df = pd.DataFrame(df.values.transpose(), columns=df.index) seed = str(int(df.Seed.item())) continue print(f'Current finetuned model is {mname} with seed {seed}...') if sheet == 'overall': print(os.path.join(root, file)) df = pd.read_excel(os.path.join(root, file), index_col=0, sheet_name = 'all') column = 'all_auroc' res.loc[target, seed + '_' + column] = df.loc[column, 'avg'] res.loc[target, seed + '_' + column +'lowerCI'] = df.loc[column, '2.5%'] res.loc[target, seed + '_' + column +'upperCI'] = df.loc[column, '97.5%'] elif sheet == 'gender': df = pd.read_excel(os.path.join(root, file), index_col=0, sheet_name = 'gender') columns = ['gender=="M"_auroc', 'gender=="F"_auroc'] for i in columns: res.loc[target, seed + '_' + i] = df.loc[i, 'avg'] res.loc[target, seed + '_' + i +'lowerCI'] = df.loc[i, '2.5%'] res.loc[target, seed + '_' + i +'upperCI'] = df.loc[i, '97.5%'] elif sheet == 'language': df = pd.read_excel(os.path.join(root, file), index_col=0, sheet_name = 'language_to_use') columns = ['language_to_use=="English"_auroc', 'language_to_use=="Other"_auroc'] for i in columns: res.loc[target, seed + '_' + i] = df.loc[i, 'avg'] res.loc[target, seed + '_' + i +'lowerCI'] = df.loc[i, '2.5%'] res.loc[target, seed + '_' + i +'upperCI'] = df.loc[i, '97.5%'] elif sheet == 'insurance': df = pd.read_excel(os.path.join(root, file), index_col=0, sheet_name = 'insurance') columns = ['insurance=="Medicare"_auroc', 'insurance=="Private"_auroc', 'insurance=="Medicaid"_auroc'] for i in columns: res.loc[target, seed + '_' + i] = df.loc[i, 'avg'] res.loc[target, seed + '_' + i +'lowerCI'] = df.loc[i, '2.5%'] res.loc[target, seed + '_' + i +'upperCI'] = df.loc[i, '97.5%'] elif sheet == 'ethnicity': df = pd.read_excel(os.path.join(root, file), index_col=0, sheet_name = 'ethnicity_to_use') columns = ['ethnicity_to_use=="WHITE"_auroc', 'ethnicity_to_use=="BLACK"_auroc', 'ethnicity_to_use=="ASIAN"_auroc', 'ethnicity_to_use=="HISPANIC/LATINO"_auroc', 'ethnicity_to_use=="OTHER"_auroc'] for i in columns: res.loc[target, seed + '_' + i] = df.loc[i, 'avg'] res.loc[target, seed + '_' + i +'lowerCI'] = df.loc[i, '2.5%'] res.loc[target, seed + '_' + i +'upperCI'] = df.loc[i, '97.5%'] # if sheet == 'gender': # for m in allowed_models[sheet]: # for i in ('Parity Gap (M-F)','Recall Gap','Specificity Gap'): # add_gap(m, i, sheet, res) # if sheet == 'language': # for m in allowed_models[sheet]: # for i in ('Parity Gap (E-O)','Recall Gap','Specificity Gap'): # add_gap(m, i, sheet, res) # if sheet == 'insurance': # for m in allowed_models[sheet]: # for g in ['Medicare', 'Private', 'Medicaid']: # for i in ('"%s"_'%g + t for t in ['dgap_max','egap_positive_max','egap_negative_max']): # add_gap(m, i, sheet, res) # if sheet == 'ethnicity': # for m in allowed_models[sheet]: # for g in ['WHITE', 'BLACK', 'ASIAN', 'HISPANIC/LATINO', 'OTHER']: # for i in ('"%s"_'%g + t for t in ['dgap_max','egap_positive_max','egap_negative_max']): # add_gap(m, i, sheet, res) dfs[sheet] = res.sort_index() def process_sheet(df): temp = defaultdict(dict) items = [(int(k.split('_')[0]), k.split('=="')[1].split('"_')[0], k.split('_')[-1], v.item()) for k, v in df.items()] for item in items: temp[(item[0], item[1])][item[2]] = item[3] rowlist = [] for k, v in temp.items(): row = {} row['seed'] = k[0] row['target'] = k[1] row['auroc'] = v['auroc'] row['lower'] = v['auroclowerCI'] row['upper'] = v['aurocupperCI'] rowlist.append(row) return pd.DataFrame(rowlist).sort_values(['seed', 'target']) df = process_sheet(dfs['gender']) ax = df[df.target == 'M'].plot.hist(x="seed", y="auroc", color="DarkGreen", label="M") df[df.target == 'F'].plot.hist(x="seed", y="auroc", color="DarkBlue", label="F", ax=ax, figsize=(16,10)) # start, end = ax.get_xlim() # ax.xaxis.set_ticks(np.arange(start, end, 5)) df = process_sheet(dfs['ethnicity']) ax = df[df.target == 'OTHER'].plot.hist(x="seed", y="auroc", color="yellow", label="Other") df[df.target == 'WHITE'].plot.hist(x="seed", y="auroc", color="red", label="White", ax=ax) df[df.target == 'HISPANIC/LATINO'].plot.hist(x="seed", y="auroc", color="darkviolet", label="Hispanic", ax=ax) df[df.target == 'BLACK'].plot.hist(x="seed", y="auroc", color="royalblue", label="Black", ax=ax) df[df.target == 'ASIAN'].plot.hist(x="seed", y="auroc", color="green", label="Asian", ax=ax, figsize=(20,10)) # start, end = ax.get_xlim() # ax.xaxis.set_ticks(np.arange(start, end, 1)) df = process_sheet(dfs['language']) ax = df[df.target == 'English'].plot.scatter(x="seed", y="auroc", color="DarkGreen", label="English") df[df.target == 'Other'].plot.scatter(x="seed", y="auroc", color="DarkBlue", label="Other", ax=ax) df = process_sheet(dfs['insurance']) ax = df[df.target == 'Medicare'].plot.scatter(x="seed", y="auroc", color="red", label="Medicare") df[df.target == 'Private'].plot.scatter(x="seed", y="auroc", color="royalblue", label="Private", ax=ax) df[df.target == 'Medicaid'].plot.scatter(x="seed", y="auroc", color="green", label="Medicaid", ax=ax) start, end = ax.get_xlim() ax.xaxis.set_ticks(np.arange(start, end, 1)) temp = defaultdict(dict) items = [(int(k.split('_')[0]), k.split('_')[-1], v.item()) for k, v in dfs['overall'].items()] for item in items: temp[item[0]][item[1]] = item[2] rowlist = [] for k, v in temp.items(): row = {} row['seed'] = k row['auroc'] = v['auroc'] row['lower'] = v['auroclowerCI'] row['upper'] = v['aurocupperCI'] rowlist.append(row) df = pd.DataFrame(rowlist) df.plot.scatter(x="seed", y="auroc") df = pd.concat([process_sheet(dfs[target]) for target in sheets if target != 'overall']) df.to_csv('/home/darius/subgroup_auroc.csv', index=False) df ```
github_jupyter
# NBDC Pipeline Buoy Data This file cleans and concatenates buoy data for the Pipeline Buoy from the National Buoy Data Center (NBDC). Each year of data has its own file, and missing values are expressed differently for each column. Dates are separated into year, month, day, hour and minute columns. **Dates are in UTC, must be changed to Hawaii time for consistency with scores data.** ## Imports ``` import pandas as pd import numpy as np ``` ## NOAA raw data Get one file first to clean and then repeat process. ### First file 2008 ``` # Read in csv, skip second row, has units waimea2008 = pd.read_csv("../data/noaa/51201h2008.csv", skiprows=[1]) # 999, 99, 9999 etc are missing values waimea2008.head() # Columns of missing data have only one value: 99, 9999, etc. Add column names to list to drop them nan_cols = [] for col in waimea2008.columns: if len(waimea2008[col].value_counts()) == 1 and col != '#YY': nan_cols.append(col) nan_cols waimea2008.drop(columns=nan_cols) ``` ### Repeat for all files ``` # Repeat process for yearly data files df_list = [] for year in range(2008, 2023): if year == 2020: continue df = pd.read_csv(f"../data/noaa/51201h{year}.csv", skiprows=[1]) nan_cols = [] for col in df.columns: if len(df[col].value_counts()) == 1 and col != '#YY': nan_cols.append(col) df.drop(columns=nan_cols, inplace=True) df_list.append(df) noaa = pd.concat(df_list) noaa ``` ### Drop water temperature feature per EDA findings ``` noaa.drop(columns="WTMP", inplace=True) ``` ### Missing values ``` # Visually inspect for missing values, expecting 99., 999, 9999, etc for col in noaa.columns: print(col, noaa[col].unique()) ``` #### From output text file, these are problem values: - 'WVHT' -> 99. - 'DPD' -> 99. - 'APD' -> 99., ..., '5.6', 'MM' **strings** - 'MWD' -> 999 ### Discard missing values ``` noaa = noaa[ (noaa["WVHT"] != 99.0) & (noaa["DPD"] != 99.0) & (noaa["APD"] != 99.0) & (noaa["APD"] != "...") & (noaa["APD"] != "MM") & (noaa["MWD"] != 999) ] noaa.info() ``` ### Fix dtypes APD needs to be float. MWD needs to be int. ``` noaa['APD'] = noaa['APD'].astype(float) noaa['MWD'] = noaa['MWD'].astype(int) # Sanity check noaa.info() ``` ### Human readable column names ``` # Rename columns for ease of use. noaa.rename( columns={ "WVHT": "wave_height", "DPD": "dominant_period", "APD": "avg_period", "MWD": "dominant_wave_direction", }, inplace=True, ) ``` ### Date conversions Convert date columns to strings and add '0' where necessary. ``` noaa['MM'].unique() noaa["MM"] = noaa["MM"].map(lambda x: "0" + str(x) if len(str(x)) == 1 else str(x)) noaa["MM"].unique() noaa["DD"] = noaa["DD"].map(lambda x: "0" + str(x) if len(str(x)) == 1 else str(x)) noaa['DD'].unique() noaa["hh"] = noaa["hh"].map(lambda x: "0" + str(x) if len(str(x)) == 1 else str(x)) noaa['hh'].unique() noaa["mm"] = noaa["mm"].map(lambda x: "0" + str(x) if len(str(x)) == 1 else str(x)) noaa['mm'].unique() ``` ### Create a date column with correct format from other cols. ``` # Create a date column with the format '2018-10-26 12:00' noaa["date"] = ( "'" + noaa["#YY"].astype(str) + '-' + noaa["MM"].astype(str) + '-' + noaa["DD"].astype(str) + ' ' + noaa["hh"].astype(str) + ':' + noaa["mm"].astype(str) + "'" ) noaa.head() noaa.tail() ``` ### Convert date column to datetime format ``` # With help from https://stackoverflow.com/questions/32344533/how-do-i-round-datetime-column-to-nearest-quarter-hour # Round to nearest half hour # Subtract 10 hours to convert from UTC to Hawaii time noaa["date"] = pd.to_datetime(noaa["date"]).dt.round("30min") - pd.DateOffset(hours=10) noaa.drop(columns=["#YY", "MM", "DD", "hh", "mm"], inplace=True) noaa.head() noaa.tail() ``` ### Sort by date and move to index ``` noaa.sort_values(by='date', inplace=True) noaa.set_index('date', inplace=True) ``` ### Since we only have competition dates from 2014 on, keep those dates only. This also fixes the sampling frequency to ~30 min ``` noaa = noaa[noaa.index.year > 2013] ``` ### Visual check There are some gaps with no buoy data. ``` noaa['wave_height'].plot() noaa.info() ``` ### Replace wave direction with sin and cos columns for modeling ``` # With help from # https://dax-nb-preview-prod.s3.us.cloud-object-storage.appdomain.cloud/jfk-weather-data/Part%201%20-%20Data%20Cleaning.html#2.6-Feature-encoding noaa['dominant_wave_direction_sin'] = np.sin(noaa['dominant_wave_direction']*(2.*np.pi/360)) noaa['dominant_wave_direction_cos'] = np.cos(noaa['dominant_wave_direction']*(2.*np.pi/360)) # Drop original wave direction column noaa.drop(columns='dominant_wave_direction', inplace=True) ``` ### Create time shifts Wave conditions measured by the buoy create waves onshore hours after they pass the buoy. To take this into account when modeling, we create 3, 6, 9 and 12 hour shifted data. This is a big area of opportunity for further exploration, calculating the time to shore is a function of wave period and size. ``` # 3 hour shift noaa.shift(6).rename(columns={'wave_height': 'wave_height_3_h'}) # Create sets of shifted data and concatenate them with our original data shifts = [2, 3, 6, 12, 18] df_list = [] for shift in shifts: noaa_shifted = noaa.shift(shift).rename( columns={ "wave_height": f"wave_height_{shift/2}_h", "dominant_period": f"dominant_period_{shift/2}_h", "avg_period": f"avg_period_{shift/2}_h", "dominant_wave_direction_sin": f"dominant_wave_direction_sin_{shift/2}_h", "dominant_wave_direction_cos": f"dominant_wave_direction_cos_{shift/2}_h", } ) df_list.append(noaa_shifted) noaa_shifted = pd.concat([noaa, pd.concat(df_list, axis=1)], axis=1) noaa_shifted ``` ## Save clean and merged buoy data to merge with scores for modeling ``` noaa_shifted.to_csv('../data/noaa/pipeline_buoy_full_shifted.csv') ```
github_jupyter
``` import pandas as pd import numpy as np import os import datetime import simplejson import git import sys repo = git.Repo("./", search_parent_directories=True) homedir = repo.working_dir def makeHMMUnSupData(Input, colname, fipsname): #Takes input dataframe, and gives out HMM format of Input data, a list of lists #of the colname value, each list in the set represents one fips code. Output = [] for fips in Input[fipsname].unique(): temp = list(Input[Input[fipsname] == fips][colname]) Output.append(temp) return Output #Cumulative Death Data NYT_tot = pd.read_csv(f"{homedir}/data/us/covid/nyt_us_counties.csv") NYT_tot = NYT_tot.drop(columns=['county','state']).sort_values(['fips','date']).reset_index(drop=True) NYT_tot = NYT_tot.dropna(subset=['fips']) NYT_tot['fips'] = NYT_tot.fips.astype(int) NYT_tot['date'] = pd.to_datetime(NYT_tot['date']) NYT_tot['id'] = NYT_tot.fips.astype(str).str.cat(NYT_tot.date.astype(str), sep=', ') #Making new parameter for deathrate NYT_tot['deathrate'] = NYT_tot['deaths']/NYT_tot['cases'] NYT_tot = NYT_tot.fillna(0) #multiplying death rate by 1000 to give integer state values NYT_tot['deathstate'] = NYT_tot['deathrate']*1000 NYT_tot['deathstate'] = NYT_tot['deathstate'].astype(int) #Differenced Daily Death Data NYT_daily = pd.read_csv(f"{homedir}/data/us/covid/nyt_us_counties_daily.csv") NYT_daily = NYT_daily.drop(columns=['county','state']).sort_values(['fips','date']).reset_index(drop=True) NYT_daily['fips'] = NYT_daily.fips.astype(int) NYT_daily['date'] = pd.to_datetime(NYT_daily['date']) NYT_daily['id'] = NYT_daily.fips.astype(str).str.cat(NYT_daily.date.astype(str), sep=', ') FirstDay = min(NYT_daily.date.unique()) LastDay = max(NYT_daily.date.unique()) #Making a time-warping of NYT daily data, so each county has a value at the starting day of 2020-01-21 # and then a final value at the most recent day NYT_daily_Warp = NYT_daily for fips in NYT_daily.fips.unique(): rows = NYT_daily[NYT_daily['fips'] == fips] #adding in the first day values if FirstDay not in rows.date.unique(): NYT_daily_Warp = NYT_daily_Warp.append({'fips': fips, 'date': pd.to_datetime('2020-01-21'), \ 'cases': 0, 'deaths' : 0, 'id' : str(fips) + ', 2020-01-21'}, ignore_index=True) #making sure each entry has the final day values if LastDay not in rows.date.unique(): NYT_daily_Warp = NYT_daily_Warp[NYT_daily_Warp['fips'] != fips] NYT_daily_Warp = NYT_daily_Warp.sort_values(['fips','date']).reset_index(drop=True) NYT_daily_Warp.to_csv('NYT_daily_Warp.csv') NYT_daily_Warp_Death = makeHMMUnSupData(NYT_daily_Warp, 'deaths', 'fips') #This is a list of all the counties and dates County_List = list(NYT_daily.fips.unique()) Date_List = list(NYT_daily.date.unique()) #This creates a base dataframe that contains all pairs of FIPS codes with the valid dates given in Air_Qual CL, DL = pd.core.reshape.util.cartesian_product([County_List, Date_List]) BaseFrame = pd.DataFrame(dict(fips=CL, date=DL)).sort_values(['fips','date']).reset_index(drop=True) BaseFrame['id'] = BaseFrame.fips.astype(str).str.cat(BaseFrame.date.astype(str), sep=', ') #Making frame of all deaths at all dates to properly do DTW clustering NYT_daily_Filled = BaseFrame.join(NYT_daily.set_index('id'), on='id', how='outer', lsuffix='',rsuffix='_x').sort_values(['fips', 'date']).drop(columns=['fips_x','date_x']).fillna(0).drop_duplicates(subset=['fips','date']).reset_index(drop=True) NYT_daily_Filled.to_csv('NYT_daily_Filled.csv') #List of lists of daily death count for each county, starting 1/23/20, ending most recent date. NYT_daily_Death_Filled = makeHMMUnSupData(NYT_daily_Filled, 'deaths', 'fips') #JHU Data JHU_tot = pd.read_csv(f"{homedir}/data/us/covid/JHU_daily_US.csv").sort_values(['FIPS','Date']) FIPSlist = JHU_tot.FIPS.unique() Datelist = JHU_tot.Date.unique() Datepair = [Datelist[0],Datelist[-1]] #Getting rid of unneded fips code in the list of total codes for fips in FIPSlist: rows = JHU_tot[JHU_tot['FIPS'] == fips] datelist = rows.Date.unique() datepair = [datelist[0],datelist[-1]] if np.array_equal(Datepair,datepair) != True: JHU_tot = JHU_tot.drop(list(JHU_tot[JHU_tot['FIPS'] == fips].index)) JHU_tot = JHU_tot.sort_values(['FIPS','Date']).reset_index(drop=True) def monotonicCol(Data, colname): #Takes a column that should have monotonically increasing data for a column (number of deaths) #and adjusts the column to ensure this property, iterating backwards through each fips code's entries ls = [] tempvals = [] for fips in Data.FIPS.unique(): vals = list(Data[Data['FIPS'] == fips][colname]) flag = True for val in reversed(vals): if flag: flag = False maxval = val tempvals.append(maxval) else: if val > maxval: tempvals.append(maxval) else: maxval = val tempvals.append(val) ls.extend(reversed(tempvals)) tempvals = [] return ls d = {'FIPS': JHU_tot['FIPS'], 'Date' : JHU_tot['Date'], 'Confirmed' : monotonicCol(JHU_tot,'Confirmed'),\ 'Deaths' : monotonicCol(JHU_tot,'Deaths'),'Active' : monotonicCol(JHU_tot,'Active'), \ 'Recovered' : monotonicCol(JHU_tot,'Recovered')} #Monotonically increaasing transformation of JHU_tot JHU_mono = pd.DataFrame(data=d) def cumtoDaily(Data, colname): #Takes cumulative column data and turns the data into daily changes ls = [] column = Data[colname] for fips in Data.FIPS.unique(): ls.extend(list(Data[Data['FIPS'] == fips][colname].diff().fillna(0))) return ls d = {'FIPS': JHU_mono['FIPS'], 'Date' : JHU_mono['Date'], 'Confirmed' : cumtoDaily(JHU_mono,'Confirmed'),\ 'Deaths' : cumtoDaily(JHU_mono,'Deaths'),'Active' : cumtoDaily(JHU_mono,'Active'), \ 'Recovered' : cumtoDaily(JHU_mono,'Recovered')} #Daily changing data based on monotonically transformed data JHU_daily = pd.DataFrame(data=d) JHU_daily.to_csv('JHU_Daily.csv') #List of lists of daily death count for each county, starting 3/23/20, ending most recent date. JHU_daily_death = makeHMMUnSupData(JHU_daily, 'Deaths', 'FIPS') #Our three types of death lists for DTW clusterings NYT_daily_Warp_Death NYT_daily_Death_Filled JHU_daily_death print(len(NYT_daily_Warp_Death)) print(np.mean([len(a) for a in NYT_daily_Warp_Death])) print(np.mean([sum(a) for a in NYT_daily_Warp_Death])) print(np.mean([np.mean(a) for a in NYT_daily_Warp_Death])) print(len(NYT_daily_Death_Filled)) print(np.mean([len(a) for a in NYT_daily_Death_Filled])) print(np.mean([sum(a) for a in NYT_daily_Death_Filled])) print(np.mean([np.mean(a) for a in NYT_daily_Death_Filled])) print(len(JHU_daily_death)) print(np.mean([len(a) for a in JHU_daily_death])) print(np.mean([sum(a) for a in JHU_daily_death])) print(np.mean([np.mean(a) for a in JHU_daily_death])) #Saving the death data filesw f = open('NYT_daily_Warp_Death.txt', 'w') simplejson.dump(NYT_daily_Warp_Death, f) f.close() g = open('NYT_daily_Death_Filled.txt', 'w') simplejson.dump(NYT_daily_Death_Filled, g) g.close() h = open('JHU_daily_death.txt', 'w') simplejson.dump(JHU_daily_death, h) h.close() ```
github_jupyter
``` import numpy as np import pandas as pd # Code to read csv file into colaboratory: !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) ''' downloaded = drive.CreateFile({'id':'1fjM5LTtbHpkeI0CxnuMWWc0vC3_ldhw-'}) downloaded.GetContentFile('quora_questions.csv') quora= pd.read_csv("quora_questions.csv") quora.head() ''' ``` # Question and Answer Chat Bots ## Loading the Data We will be working with the Babi Data Set from Facebook Research. Full Details: https://research.fb.com/downloads/babi/ - Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush, "Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks", http://arxiv.org/abs/1502.05698 ``` downloaded = drive.CreateFile({'id':'1A7O67NVUsahGvcv497TdnD9LFk5BvDDI'}) downloaded.GetContentFile('train_qa.txt') downloaded = drive.CreateFile({'id':'1_G9c3NGMzENi1VGrInCZmOilAafFUhhE'}) downloaded.GetContentFile('test_qa.txt') import pickle import numpy as np with open("train_qa.txt", "rb") as fp: # Unpickling train_data = pickle.load(fp) with open("test_qa.txt", "rb") as fp: # Unpickling test_data = pickle.load(fp) type(test_data) type(train_data) len(test_data) len(train_data) train_data[0] ' '.join(train_data[0][0]) ' '.join(train_data[0][1]) train_data[0][2] ``` ----- ## Setting up Vocabulary of All Words ``` # Create a set that holds the vocab words vocab = set() all_data = test_data + train_data len(all_data) ``` A set in python is an unordered collection of unique elements ``` for story, question , answer in all_data: # In case you don't know what a union of sets is: # https://www.programiz.com/python-programming/methods/set/union # set of story is: set(train_data[0]0) vocab = vocab.union(set(story)) vocab = vocab.union(set(question)) vocab.add('no') vocab.add('yes') vocab vocab_len = len(vocab) + 1 #we add an extra space to hold a 0 for Keras's pad_sequences max_story_len = max([len(data[0]) for data in all_data]) max_story_len max_question_len = max([len(data[1]) for data in all_data]) # index for story is "1" max_question_len ``` ## Vectorizing the Data ``` vocab # Reserve 0 for pad_sequences vocab_size = len(vocab) + 1 from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer # integer encode sequences of words tokenizer = Tokenizer(filters=[]) tokenizer.fit_on_texts(vocab) tokenizer.word_index train_story_text = [] train_question_text = [] train_answers = [] for story,question,answer in train_data: train_story_text.append(story) train_question_text.append(question) train_story_seq = tokenizer.texts_to_sequences(train_story_text) print (len(train_story_text)) print (len(train_story_seq)) # word_index = tokenizer.word_index ``` ### Functionalize Vectorization ``` def vectorize_stories(data, word_index=tokenizer.word_index, max_story_len=max_story_len,max_question_len=max_question_len): ''' INPUT: data: consisting of Stories,Queries,and Answers word_index: word index dictionary from tokenizer max_story_len: the length of the longest story (used for pad_sequences function) max_question_len: length of the longest question (used for pad_sequences function) OUTPUT: Vectorizes the stories,questions, and answers into padded sequences. We first loop for every story, query , and answer in the data. Then we convert the raw words to an word index value. Then we append each set to their appropriate output list. Then once we have converted the words to numbers, we pad the sequences so they are all of equal length. Returns this in the form of a tuple (X,Xq,Y) (padded based on max lengths) Padding is necessary to cut down a story if it is too long and stories to have same padding ''' # X = STORIES X = [] # Xq = QUERY/QUESTION Xq = [] # Y = CORRECT ANSWER Y = [] for story, query, answer in data: # Grab the word index for every word in story x = [word_index[word.lower()] for word in story] # Grab the word index for every word in query xq = [word_index[word.lower()] for word in query] # Grab the Answers (either Yes/No so we don't need to use list comprehension here) # Index 0 is reserved so we're going to use + 1 y = np.zeros(len(word_index) + 1) # Now that y is all zeros and we know its just Yes/No , we can use numpy logic to create this assignment # y[word_index[answer]] = 1 # Append each set of story,query, and answer to their respective holding lists X.append(x) Xq.append(xq) Y.append(y) # Finally, pad the sequences based on their max length so the RNN can be trained on uniformly long sequences. # RETURN TUPLE FOR UNPACKING return (pad_sequences(X, maxlen=max_story_len),pad_sequences(Xq, maxlen=max_question_len), np.array(Y)) inputs_train, queries_train, answers_train = vectorize_stories(train_data) inputs_test, queries_test, answers_test = vectorize_stories(test_data) inputs_test queries_test answers_test sum(answers_test) tokenizer.word_index['yes'] tokenizer.word_index['no'] ``` ## Creating the Model ``` from keras.models import Sequential, Model from keras.layers.embeddings import Embedding from keras.layers import Input, Activation, Dense, Permute, Dropout from keras.layers import add, dot, concatenate from keras.layers import LSTM ``` ### Placeholders for Inputs Recall we technically have two inputs, stories and questions. So we need to use placeholders. `Input()` is used to instantiate a Keras tensor. ``` # placeholder shape= (max_story_len, batch_size) input_sequence = Input((max_story_len,)) # the input shape is a tuple question = Input((max_question_len,)) ``` ### Building the Networks To understand why we chose this setup, make sure to read the paper we are using: * Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus, "End-To-End Memory Networks", http://arxiv.org/abs/1503.08895 ## Encoders ### Input Encoder m ``` # Input gets embedded to a sequence of vectors input_encoder_m = Sequential() input_encoder_m.add(Embedding(input_dim=vocab_size,output_dim=64)) # output dimensions from the paper= 64 input_encoder_m.add(Dropout(0.3)) # This encoder will output: # (samples, story_maxlen, embedding_dim) # Input Encoder c # embed the input into a sequence of vectors of size query_maxlen input_encoder_c = Sequential() input_encoder_c.add(Embedding(input_dim=vocab_size,output_dim=max_question_len)) input_encoder_c.add(Dropout(0.3)) # output: (samples, story_maxlen, query_maxlen) # Question Encoder # embed the question into a sequence of vectors question_encoder = Sequential() question_encoder.add(Embedding(input_dim=vocab_size, output_dim=64, input_length=max_question_len)) question_encoder.add(Dropout(0.3)) # output: (samples, query_maxlen, embedding_dim) # Encode the Sequences # encode input sequence and questions (which are indices) # to sequences of dense vectors input_encoded_m = input_encoder_m(input_sequence) input_encoded_c = input_encoder_c(input_sequence) question_encoded = question_encoder(question) # Use dot product to compute the match between first input vector seq and the query # shape: `(samples, story_maxlen, query_maxlen)` match = dot([input_encoded_m, question_encoded], axes=(2, 2)) match = Activation('softmax')(match) # Add this match matrix with the second input vector sequence # add the match matrix with the second input vector sequence response = add([match, input_encoded_c]) # (samples, story_maxlen, query_maxlen) response = Permute((2, 1))(response) # (samples, query_maxlen, story_maxlen) # concatenate # concatenate the match matrix with the question vector sequence answer = concatenate([response, question_encoded]) answer # Reduce with RNN (LSTM) answer = LSTM(32)(answer) # (samples, 32) # Regularization with Dropout answer = Dropout(0.5)(answer) answer = Dense(vocab_size)(answer) # (samples, vocab_size) # we output a probability distribution over the vocabulary answer = Activation('softmax')(answer) # build the final model model = Model([input_sequence, question], answer) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # train history = model.fit([inputs_train, queries_train], answers_train,batch_size=32,epochs=10,validation_data=([inputs_test, queries_test], answers_test)) ### Saving the Model filename = 'chatbot_120_epochs.h5' model.save(filename) ``` ## Evaluating the Model ### Plotting Out Training History ``` import matplotlib.pyplot as plt %matplotlib inline print(history.history.keys()) # summarize history for accuracy plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() ``` ### Evaluating on Given Test Set ``` model.load_weights(filename) pred_results = model.predict(([inputs_test, queries_test])) test_data[0][0] story =' '.join(word for word in test_data[0][0]) print(story) query = ' '.join(word for word in test_data[0][1]) print(query) print("True Test Answer from Data is:",test_data[0][2]) #Generate prediction from model val_max = np.argmax(pred_results[0]) for key, val in tokenizer.word_index.items(): if val == val_max: k = key print("Predicted answer is: ", k) print("Probability of certainty was: ", pred_results[0][val_max]) ``` ## Writing Your Own Stories and Questions Remember you can only use words from the existing vocab ``` vocab # Note the whitespace of the periods my_story = "John left the kitchen . Sandra dropped the football in the garden ." my_story.split() my_question = "Is the football in the garden ?" my_question.split() mydata = [(my_story.split(),my_question.split(),'yes')] my_story,my_ques,my_ans = vectorize_stories(mydata) pred_results = model.predict(([ my_story, my_ques])) #Generate prediction from model val_max = np.argmax(pred_results[0]) for key, val in tokenizer.word_index.items(): if val == val_max: k = key print("Predicted answer is: ", k) print("Probability of certainty was: ", pred_results[0][val_max]) ```
github_jupyter
``` import numpy as np import pandas as pd import tensorflow.compat.v1 as tf tf.disable_v2_behavior() import matplotlib.pyplot as plt import seaborn as sns sns.set() df = pd.read_csv('../dataset/GOOG-year.csv') df.head() class Agent: LEARNING_RATE = 1e-4 LAYER_SIZE = 256 GAMMA = 0.9 OUTPUT_SIZE = 3 def __init__(self, state_size, window_size, trend, skip): self.state_size = state_size self.window_size = window_size self.half_window = window_size // 2 self.trend = trend self.skip = skip self.X = tf.placeholder(tf.float32, (None, self.state_size)) self.REWARDS = tf.placeholder(tf.float32, (None)) self.ACTIONS = tf.placeholder(tf.int32, (None)) feed_forward = tf.layers.dense(self.X, self.LAYER_SIZE, activation = tf.nn.relu) self.logits = tf.layers.dense(feed_forward, self.OUTPUT_SIZE, activation = tf.nn.softmax) input_y = tf.one_hot(self.ACTIONS, self.OUTPUT_SIZE) loglike = tf.log((input_y * (input_y - self.logits) + (1 - input_y) * (input_y + self.logits)) + 1) rewards = tf.tile(tf.reshape(self.REWARDS, (-1,1)), [1, self.OUTPUT_SIZE]) self.cost = -tf.reduce_mean(loglike * (rewards + 1)) self.optimizer = tf.train.AdamOptimizer(learning_rate = self.LEARNING_RATE).minimize(self.cost) self.sess = tf.InteractiveSession() self.sess.run(tf.global_variables_initializer()) def predict(self, inputs): return self.sess.run(self.logits, feed_dict={self.X:inputs}) def get_state(self, t): window_size = self.window_size + 1 d = t - window_size + 1 block = self.trend[d : t + 1] if d >= 0 else -d * [self.trend[0]] + self.trend[0 : t + 1] res = [] for i in range(window_size - 1): res.append(block[i + 1] - block[i]) return np.array([res]) def discount_rewards(self, r): discounted_r = np.zeros_like(r) running_add = 0 for t in reversed(range(0, r.size)): running_add = running_add * self.GAMMA + r[t] discounted_r[t] = running_add return discounted_r def get_predicted_action(self, sequence): prediction = self.predict(np.array(sequence))[0] return np.argmax(prediction) def buy(self, initial_money): starting_money = initial_money states_sell = [] states_buy = [] inventory = [] state = self.get_state(0) for t in range(0, len(self.trend) - 1, self.skip): action = self.get_predicted_action(state) next_state = self.get_state(t + 1) if action == 1 and initial_money >= self.trend[t] and t < (len(self.trend) - self.half_window): inventory.append(self.trend[t]) initial_money -= self.trend[t] states_buy.append(t) print('day %d: buy 1 unit at price %f, total balance %f'% (t, self.trend[t], initial_money)) elif action == 2 and len(inventory): bought_price = inventory.pop(0) initial_money += self.trend[t] states_sell.append(t) try: invest = ((close[t] - bought_price) / bought_price) * 100 except: invest = 0 print( 'day %d, sell 1 unit at price %f, investment %f %%, total balance %f,' % (t, close[t], invest, initial_money) ) state = next_state invest = ((initial_money - starting_money) / starting_money) * 100 total_gains = initial_money - starting_money return states_buy, states_sell, total_gains, invest def train(self, iterations, checkpoint, initial_money): for i in range(iterations): ep_history = [] total_profit = 0 inventory = [] state = self.get_state(0) starting_money = initial_money for t in range(0, len(self.trend) - 1, self.skip): action = self.get_predicted_action(state) next_state = self.get_state(t + 1) if action == 1 and starting_money >= self.trend[t] and t < (len(self.trend) - self.half_window): inventory.append(self.trend[t]) starting_money -= close[t] elif action == 2 and len(inventory): bought_price = inventory.pop(0) total_profit += self.trend[t] - bought_price starting_money += self.trend[t] ep_history.append([state,action,starting_money,next_state]) state = next_state ep_history = np.array(ep_history) ep_history[:,2] = self.discount_rewards(ep_history[:,2]) cost, _ = self.sess.run([self.cost, self.optimizer], feed_dict={self.X:np.vstack(ep_history[:,0]), self.REWARDS:ep_history[:,2], self.ACTIONS:ep_history[:,1]}) if (i+1) % checkpoint == 0: print('epoch: %d, total rewards: %f.3, cost: %f, total money: %f'%(i + 1, total_profit, cost, starting_money)) close = df.Close.values.tolist() initial_money = 10000 window_size = 30 skip = 1 agent = Agent(state_size = window_size, window_size = window_size, trend = close, skip = skip) agent.train(iterations = 200, checkpoint = 10, initial_money = initial_money) states_buy, states_sell, total_gains, invest = agent.buy(initial_money = initial_money) fig = plt.figure(figsize = (15,5)) plt.plot(close, color='r', lw=2.) plt.plot(close, '^', markersize=10, color='m', label = 'buying signal', markevery = states_buy) plt.plot(close, 'v', markersize=10, color='k', label = 'selling signal', markevery = states_sell) plt.title('total gains %f, total investment %f%%'%(total_gains, invest)) plt.legend() plt.show() ```
github_jupyter
# Optimization Methods Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result. Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this: <img src="images/cost.jpg" style="width:650px;height:300px;"> <caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption> **Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`. To get started, run the following code to import the libraries you will need. ``` import numpy as np import matplotlib.pyplot as plt import scipy.io import math import sklearn import sklearn.datasets from opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation from opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset from testCases import * %matplotlib inline plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' ``` ## 1 - Gradient Descent A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent. **Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$ $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$ where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding. ``` # GRADED FUNCTION: update_parameters_with_gd def update_parameters_with_gd(parameters, grads, learning_rate): """ Update parameters using one step of gradient descent Arguments: parameters -- python dictionary containing your parameters to be updated: parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl grads -- python dictionary containing your gradients to update each parameters: grads['dW' + str(l)] = dWl grads['db' + str(l)] = dbl learning_rate -- the learning rate, scalar. Returns: parameters -- python dictionary containing your updated parameters """ L = len(parameters) // 2 # number of layers in the neural networks # Update rule for each parameter for l in range(L): ### START CODE HERE ### (approx. 2 lines) parameters["W" + str(l+1)] = parameters["W" + str(l+1)]-learning_rate*grads['dW' + str(l+1)] parameters["b" + str(l+1)] = parameters["b" + str(l+1)]-learning_rate*grads['db' + str(l+1)] ### END CODE HERE ### return parameters parameters, grads, learning_rate = update_parameters_with_gd_test_case() parameters = update_parameters_with_gd(parameters, grads, learning_rate) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected Output**: <table> <tr> <td > **W1** </td> <td > [[ 1.63535156 -0.62320365 -0.53718766] [-1.07799357 0.85639907 -2.29470142]] </td> </tr> <tr> <td > **b1** </td> <td > [[ 1.74604067] [-0.75184921]] </td> </tr> <tr> <td > **W2** </td> <td > [[ 0.32171798 -0.25467393 1.46902454] [-2.05617317 -0.31554548 -0.3756023 ] [ 1.1404819 -1.09976462 -0.1612551 ]] </td> </tr> <tr> <td > **b2** </td> <td > [[-0.88020257] [ 0.02561572] [ 0.57539477]] </td> </tr> </table> A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent. - **(Batch) Gradient Descent**: ``` python X = data_input Y = labels parameters = initialize_parameters(layers_dims) for i in range(0, num_iterations): # Forward propagation a, caches = forward_propagation(X, parameters) # Compute cost. cost = compute_cost(a, Y) # Backward propagation. grads = backward_propagation(a, caches, parameters) # Update parameters. parameters = update_parameters(parameters, grads) ``` - **Stochastic Gradient Descent**: ```python X = data_input Y = labels parameters = initialize_parameters(layers_dims) for i in range(0, num_iterations): for j in range(0, m): # Forward propagation a, caches = forward_propagation(X[:,j], parameters) # Compute cost cost = compute_cost(a, Y[:,j]) # Backward propagation grads = backward_propagation(a, caches, parameters) # Update parameters. parameters = update_parameters(parameters, grads) ``` In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this: <img src="images/kiank_sgd.png" style="width:750px;height:250px;"> <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption> **Note** also that implementing SGD requires 3 for-loops in total: 1. Over the number of iterations 2. Over the $m$ training examples 3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$) In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples. <img src="images/kiank_minibatch.png" style="width:750px;height:250px;"> <caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption> <font color='blue'> **What you should remember**: - The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step. - You have to tune a learning rate hyperparameter $\alpha$. - With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large). ## 2 - Mini-Batch Gradient descent Let's learn how to build mini-batches from the training set (X, Y). There are two steps: - **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches. <img src="images/kiank_shuffle.png" style="width:550px;height:300px;"> - **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this: <img src="images/kiank_partition.png" style="width:550px;height:300px;"> **Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches: ```python first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size] second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size] ... ``` Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$). ``` # GRADED FUNCTION: random_mini_batches def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0): """ Creates a list of random minibatches from (X, Y) Arguments: X -- input data, of shape (input size, number of examples) Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples) mini_batch_size -- size of the mini-batches, integer Returns: mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y) """ np.random.seed(seed) # To make your "random" minibatches the same as ours m = X.shape[1] # number of training examples mini_batches = [] # Step 1: Shuffle (X, Y) permutation = list(np.random.permutation(m)) shuffled_X = X[:, permutation] shuffled_Y = Y[:, permutation].reshape((1,m)) # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case. num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning for k in range(0, num_complete_minibatches): ### START CODE HERE ### (approx. 2 lines) mini_batch_X = shuffled_X[:, k*mini_batch_size : (k+1)*mini_batch_size] mini_batch_Y = shuffled_Y[:, k*mini_batch_size : (k+1)*mini_batch_size] ### END CODE HERE ### mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) # Handling the end case (last mini-batch < mini_batch_size) if m % mini_batch_size != 0: ### START CODE HERE ### (approx. 2 lines) mini_batch_X = shuffled_X[:, num_complete_minibatches *mini_batch_size : ] mini_batch_Y = shuffled_Y[:, num_complete_minibatches *mini_batch_size : ] ### END CODE HERE ### mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) return mini_batches X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case() mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size) print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape)) print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape)) print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape)) print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape)) print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape)) print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape)) print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3])) ``` **Expected Output**: <table style="width:50%"> <tr> <td > **shape of the 1st mini_batch_X** </td> <td > (12288, 64) </td> </tr> <tr> <td > **shape of the 2nd mini_batch_X** </td> <td > (12288, 64) </td> </tr> <tr> <td > **shape of the 3rd mini_batch_X** </td> <td > (12288, 20) </td> </tr> <tr> <td > **shape of the 1st mini_batch_Y** </td> <td > (1, 64) </td> </tr> <tr> <td > **shape of the 2nd mini_batch_Y** </td> <td > (1, 64) </td> </tr> <tr> <td > **shape of the 3rd mini_batch_Y** </td> <td > (1, 20) </td> </tr> <tr> <td > **mini batch sanity check** </td> <td > [ 0.90085595 -0.7612069 0.2344157 ] </td> </tr> </table> <font color='blue'> **What you should remember**: - Shuffling and Partitioning are the two steps required to build mini-batches - Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128. ## 3 - Momentum Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations. Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill. <img src="images/opt_momentum.png" style="width:400px;height:250px;"> <caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center> **Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is: for $l =1,...,L$: ```python v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)]) v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)]) ``` **Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop. ``` # GRADED FUNCTION: initialize_velocity def initialize_velocity(parameters): """ Initializes the velocity as a python dictionary with: - keys: "dW1", "db1", ..., "dWL", "dbL" - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters. Arguments: parameters -- python dictionary containing your parameters. parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl Returns: v -- python dictionary containing the current velocity. v['dW' + str(l)] = velocity of dWl v['db' + str(l)] = velocity of dbl """ L = len(parameters) // 2 # number of layers in the neural networks v = {} # Initialize velocity for l in range(L): ### START CODE HERE ### (approx. 2 lines) v["dW" + str(l+1)] = np.zeros((parameters['W' + str(l+1)].shape[0],parameters['W' + str(l+1)].shape[1])) v["db" + str(l+1)] = np.zeros((parameters['b' + str(l+1)].shape[0],parameters['b' + str(l+1)].shape[1])) ### END CODE HERE ### return v parameters = initialize_velocity_test_case() v = initialize_velocity(parameters) print("v[\"dW1\"] = " + str(v["dW1"])) print("v[\"db1\"] = " + str(v["db1"])) print("v[\"dW2\"] = " + str(v["dW2"])) print("v[\"db2\"] = " + str(v["db2"])) ``` **Expected Output**: <table style="width:40%"> <tr> <td > **v["dW1"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **v["db1"]** </td> <td > [[ 0.] [ 0.]] </td> </tr> <tr> <td > **v["dW2"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **v["db2"]** </td> <td > [[ 0.] [ 0.] [ 0.]] </td> </tr> </table> **Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$: $$ \begin{cases} v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\ W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}} \end{cases}\tag{3}$$ $$\begin{cases} v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\ b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}} \end{cases}\tag{4}$$ where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding. ``` # GRADED FUNCTION: update_parameters_with_momentum def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate): """ Update parameters using Momentum Arguments: parameters -- python dictionary containing your parameters: parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl grads -- python dictionary containing your gradients for each parameters: grads['dW' + str(l)] = dWl grads['db' + str(l)] = dbl v -- python dictionary containing the current velocity: v['dW' + str(l)] = ... v['db' + str(l)] = ... beta -- the momentum hyperparameter, scalar learning_rate -- the learning rate, scalar Returns: parameters -- python dictionary containing your updated parameters v -- python dictionary containing your updated velocities """ L = len(parameters) // 2 # number of layers in the neural networks # Momentum update for each parameter for l in range(L): ### START CODE HERE ### (approx. 4 lines) # compute velocities v["dW" + str(l+1)] = beta*v["dW" + str(l+1)]+(1-beta)*grads['dW' + str(l+1)] v["db" + str(l+1)] = beta*v["db" + str(l+1)]+(1-beta)*grads['db' + str(l+1)] # update parameters parameters["W" + str(l+1)] = parameters["W" + str(l+1)]-learning_rate*v["dW" + str(l+1)] parameters["b" + str(l+1)] = parameters["b" + str(l+1)]-learning_rate*v["db" + str(l+1)] ### END CODE HERE ### return parameters, v parameters, grads, v = update_parameters_with_momentum_test_case() parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) print("v[\"dW1\"] = " + str(v["dW1"])) print("v[\"db1\"] = " + str(v["db1"])) print("v[\"dW2\"] = " + str(v["dW2"])) print("v[\"db2\"] = " + str(v["db2"])) ``` **Expected Output**: <table style="width:90%"> <tr> <td > **W1** </td> <td > [[ 1.62544598 -0.61290114 -0.52907334] [-1.07347112 0.86450677 -2.30085497]] </td> </tr> <tr> <td > **b1** </td> <td > [[ 1.74493465] [-0.76027113]] </td> </tr> <tr> <td > **W2** </td> <td > [[ 0.31930698 -0.24990073 1.4627996 ] [-2.05974396 -0.32173003 -0.38320915] [ 1.13444069 -1.0998786 -0.1713109 ]] </td> </tr> <tr> <td > **b2** </td> <td > [[-0.87809283] [ 0.04055394] [ 0.58207317]] </td> </tr> <tr> <td > **v["dW1"]** </td> <td > [[-0.11006192 0.11447237 0.09015907] [ 0.05024943 0.09008559 -0.06837279]] </td> </tr> <tr> <td > **v["db1"]** </td> <td > [[-0.01228902] [-0.09357694]] </td> </tr> <tr> <td > **v["dW2"]** </td> <td > [[-0.02678881 0.05303555 -0.06916608] [-0.03967535 -0.06871727 -0.08452056] [-0.06712461 -0.00126646 -0.11173103]] </td> </tr> <tr> <td > **v["db2"]** </td> <td > [[ 0.02344157] [ 0.16598022] [ 0.07420442]]</td> </tr> </table> **Note** that: - The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps. - If $\beta = 0$, then this just becomes standard gradient descent without momentum. **How do you choose $\beta$?** - The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much. - Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default. - Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$. <font color='blue'> **What you should remember**: - Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent. - You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$. ## 4 - Adam Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum. **How does Adam work?** 1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction). 2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction). 3. It updates parameters in a direction based on combining information from "1" and "2". The update rule is, for $l = 1, ..., L$: $$\begin{cases} v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\ v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\ s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\ s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_1)^t} \\ W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon} \end{cases}$$ where: - t counts the number of steps taken of Adam - L is the number of layers - $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages. - $\alpha$ is the learning rate - $\varepsilon$ is a very small number to avoid dividing by zero As usual, we will store all parameters in the `parameters` dictionary **Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information. **Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is: for $l = 1, ..., L$: ```python v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)]) v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)]) s["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)]) s["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)]) ``` ``` # GRADED FUNCTION: initialize_adam def initialize_adam(parameters) : """ Initializes v and s as two python dictionaries with: - keys: "dW1", "db1", ..., "dWL", "dbL" - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters. Arguments: parameters -- python dictionary containing your parameters. parameters["W" + str(l)] = Wl parameters["b" + str(l)] = bl Returns: v -- python dictionary that will contain the exponentially weighted average of the gradient. v["dW" + str(l)] = ... v["db" + str(l)] = ... s -- python dictionary that will contain the exponentially weighted average of the squared gradient. s["dW" + str(l)] = ... s["db" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural networks v = {} s = {} # Initialize v, s. Input: "parameters". Outputs: "v, s". for l in range(L): ### START CODE HERE ### (approx. 4 lines) v["dW" + str(l+1)] = np.zeros((parameters["W" + str(l+1)].shape[0],parameters["W" + str(l+1)].shape[1])) v["db" + str(l+1)] = np.zeros((parameters["b" + str(l+1)].shape[0],parameters["b" + str(l+1)].shape[1])) s["dW" + str(l+1)] = np.zeros((parameters["W" + str(l+1)].shape[0],parameters["W" + str(l+1)].shape[1])) s["db" + str(l+1)] = np.zeros((parameters["b" + str(l+1)].shape[0],parameters["b" + str(l+1)].shape[1])) ### END CODE HERE ### return v, s parameters = initialize_adam_test_case() v, s = initialize_adam(parameters) print("v[\"dW1\"] = " + str(v["dW1"])) print("v[\"db1\"] = " + str(v["db1"])) print("v[\"dW2\"] = " + str(v["dW2"])) print("v[\"db2\"] = " + str(v["db2"])) print("s[\"dW1\"] = " + str(s["dW1"])) print("s[\"db1\"] = " + str(s["db1"])) print("s[\"dW2\"] = " + str(s["dW2"])) print("s[\"db2\"] = " + str(s["db2"])) ``` **Expected Output**: <table style="width:40%"> <tr> <td > **v["dW1"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **v["db1"]** </td> <td > [[ 0.] [ 0.]] </td> </tr> <tr> <td > **v["dW2"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **v["db2"]** </td> <td > [[ 0.] [ 0.] [ 0.]] </td> </tr> <tr> <td > **s["dW1"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **s["db1"]** </td> <td > [[ 0.] [ 0.]] </td> </tr> <tr> <td > **s["dW2"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **s["db2"]** </td> <td > [[ 0.] [ 0.] [ 0.]] </td> </tr> </table> **Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$: $$\begin{cases} v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \\ v^{corrected}_{W^{[l]}} = \frac{v_{W^{[l]}}}{1 - (\beta_1)^t} \\ s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \\ s^{corrected}_{W^{[l]}} = \frac{s_{W^{[l]}}}{1 - (\beta_2)^t} \\ W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{W^{[l]}}}{\sqrt{s^{corrected}_{W^{[l]}}}+\varepsilon} \end{cases}$$ **Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding. ``` # GRADED FUNCTION: update_parameters_with_adam def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8): """ Update parameters using Adam Arguments: parameters -- python dictionary containing your parameters: parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl grads -- python dictionary containing your gradients for each parameters: grads['dW' + str(l)] = dWl grads['db' + str(l)] = dbl v -- Adam variable, moving average of the first gradient, python dictionary s -- Adam variable, moving average of the squared gradient, python dictionary learning_rate -- the learning rate, scalar. beta1 -- Exponential decay hyperparameter for the first moment estimates beta2 -- Exponential decay hyperparameter for the second moment estimates epsilon -- hyperparameter preventing division by zero in Adam updates Returns: parameters -- python dictionary containing your updated parameters v -- Adam variable, moving average of the first gradient, python dictionary s -- Adam variable, moving average of the squared gradient, python dictionary """ L = len(parameters) // 2 # number of layers in the neural networks v_corrected = {} # Initializing first moment estimate, python dictionary s_corrected = {} # Initializing second moment estimate, python dictionary # Perform Adam update on all parameters for l in range(L): # Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v". ### START CODE HERE ### (approx. 2 lines) v["dW" + str(l+1)] = beta1*v["dW" + str(l+1)]+(1-beta1)*grads['dW'+str(l+1)] v["db" + str(l+1)] = beta1*v["db" + str(l+1)]+(1-beta1)*grads['db'+str(l+1)] ### END CODE HERE ### # Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected". ### START CODE HERE ### (approx. 2 lines) v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)]/(1-pow(beta1,t)) v_corrected["db" + str(l+1)] = v["db" + str(l+1)]/(1-pow(beta1,t)) ### END CODE HERE ### # Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s". ### START CODE HERE ### (approx. 2 lines) s["dW" + str(l+1)] = beta2*s["dW" + str(l+1)]+(1-beta2)*np.power(grads['dW'+str(l+1)],2) s["db" + str(l+1)] = beta2*s["db" + str(l+1)]+(1-beta2)*np.power(grads['db'+str(l+1)],2) ### END CODE HERE ### # Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected". ### START CODE HERE ### (approx. 2 lines) s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)]/(1-pow(beta2,t)) s_corrected["db" + str(l+1)] = s["db" + str(l+1)]/(1-pow(beta2,t)) ### END CODE HERE ### # Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters". ### START CODE HERE ### (approx. 2 lines) parameters["W" + str(l+1)] = parameters["W" + str(l+1)]-learning_rate*np.divide(v_corrected["dW" + str(l+1)],np.sqrt(s_corrected["dW" + str(l+1)])+epsilon) parameters["b" + str(l+1)] = parameters["b" + str(l+1)]-learning_rate*np.divide(v_corrected["db" + str(l+1)],np.sqrt(s_corrected["db" + str(l+1)])+epsilon) ### END CODE HERE ### return parameters, v, s parameters, grads, v, s = update_parameters_with_adam_test_case() parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) print("v[\"dW1\"] = " + str(v["dW1"])) print("v[\"db1\"] = " + str(v["db1"])) print("v[\"dW2\"] = " + str(v["dW2"])) print("v[\"db2\"] = " + str(v["db2"])) print("s[\"dW1\"] = " + str(s["dW1"])) print("s[\"db1\"] = " + str(s["db1"])) print("s[\"dW2\"] = " + str(s["dW2"])) print("s[\"db2\"] = " + str(s["db2"])) ``` **Expected Output**: <table> <tr> <td > **W1** </td> <td > [[ 1.63178673 -0.61919778 -0.53561312] [-1.08040999 0.85796626 -2.29409733]] </td> </tr> <tr> <td > **b1** </td> <td > [[ 1.75225313] [-0.75376553]] </td> </tr> <tr> <td > **W2** </td> <td > [[ 0.32648046 -0.25681174 1.46954931] [-2.05269934 -0.31497584 -0.37661299] [ 1.14121081 -1.09245036 -0.16498684]] </td> </tr> <tr> <td > **b2** </td> <td > [[-0.88529978] [ 0.03477238] [ 0.57537385]] </td> </tr> <tr> <td > **v["dW1"]** </td> <td > [[-0.11006192 0.11447237 0.09015907] [ 0.05024943 0.09008559 -0.06837279]] </td> </tr> <tr> <td > **v["db1"]** </td> <td > [[-0.01228902] [-0.09357694]] </td> </tr> <tr> <td > **v["dW2"]** </td> <td > [[-0.02678881 0.05303555 -0.06916608] [-0.03967535 -0.06871727 -0.08452056] [-0.06712461 -0.00126646 -0.11173103]] </td> </tr> <tr> <td > **v["db2"]** </td> <td > [[ 0.02344157] [ 0.16598022] [ 0.07420442]] </td> </tr> <tr> <td > **s["dW1"]** </td> <td > [[ 0.00121136 0.00131039 0.00081287] [ 0.0002525 0.00081154 0.00046748]] </td> </tr> <tr> <td > **s["db1"]** </td> <td > [[ 1.51020075e-05] [ 8.75664434e-04]] </td> </tr> <tr> <td > **s["dW2"]** </td> <td > [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04] [ 1.57413361e-04 4.72206320e-04 7.14372576e-04] [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]] </td> </tr> <tr> <td > **s["db2"]** </td> <td > [[ 5.49507194e-05] [ 2.75494327e-03] [ 5.50629536e-04]] </td> </tr> </table> You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference. ## 5 - Model with different optimization algorithms Lets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.) ``` train_X, train_Y = load_dataset() ``` We have already implemented a 3-layer neural network. You will train it with: - Mini-batch **Gradient Descent**: it will call your function: - `update_parameters_with_gd()` - Mini-batch **Momentum**: it will call your functions: - `initialize_velocity()` and `update_parameters_with_momentum()` - Mini-batch **Adam**: it will call your functions: - `initialize_adam()` and `update_parameters_with_adam()` ``` def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True): """ 3-layer neural network model which can be run in different optimizer modes. Arguments: X -- input data, of shape (2, number of examples) Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples) layers_dims -- python list, containing the size of each layer learning_rate -- the learning rate, scalar. mini_batch_size -- the size of a mini batch beta -- Momentum hyperparameter beta1 -- Exponential decay hyperparameter for the past gradients estimates beta2 -- Exponential decay hyperparameter for the past squared gradients estimates epsilon -- hyperparameter preventing division by zero in Adam updates num_epochs -- number of epochs print_cost -- True to print the cost every 1000 epochs Returns: parameters -- python dictionary containing your updated parameters """ L = len(layers_dims) # number of layers in the neural networks costs = [] # to keep track of the cost t = 0 # initializing the counter required for Adam update seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours # Initialize parameters parameters = initialize_parameters(layers_dims) # Initialize the optimizer if optimizer == "gd": pass # no initialization required for gradient descent elif optimizer == "momentum": v = initialize_velocity(parameters) elif optimizer == "adam": v, s = initialize_adam(parameters) # Optimization loop for i in range(num_epochs): # Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch seed = seed + 1 minibatches = random_mini_batches(X, Y, mini_batch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # Forward propagation a3, caches = forward_propagation(minibatch_X, parameters) # Compute cost cost = compute_cost(a3, minibatch_Y) # Backward propagation grads = backward_propagation(minibatch_X, minibatch_Y, caches) # Update parameters if optimizer == "gd": parameters = update_parameters_with_gd(parameters, grads, learning_rate) elif optimizer == "momentum": parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate) elif optimizer == "adam": t = t + 1 # Adam counter parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t, learning_rate, beta1, beta2, epsilon) # Print the cost every 1000 epoch if print_cost and i % 1000 == 0: print ("Cost after epoch %i: %f" %(i, cost)) if print_cost and i % 100 == 0: costs.append(cost) # plot the cost plt.plot(costs) plt.ylabel('cost') plt.xlabel('epochs (per 100)') plt.title("Learning rate = " + str(learning_rate)) plt.show() return parameters ``` You will now run this 3 layer neural network with each of the 3 optimization methods. ### 5.1 - Mini-batch Gradient descent Run the following code to see how the model does with mini-batch gradient descent. ``` # train 3-layer model layers_dims = [train_X.shape[0], 5, 2, 1] parameters = model(train_X, train_Y, layers_dims, optimizer = "gd") # Predict predictions = predict(train_X, train_Y, parameters) # Plot decision boundary plt.title("Model with Gradient Descent optimization") axes = plt.gca() axes.set_xlim([-1.5,2.5]) axes.set_ylim([-1,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` ### 5.2 - Mini-batch gradient descent with momentum Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains. ``` # train 3-layer model layers_dims = [train_X.shape[0], 5, 2, 1] parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum") # Predict predictions = predict(train_X, train_Y, parameters) # Plot decision boundary plt.title("Model with Momentum optimization") axes = plt.gca() axes.set_xlim([-1.5,2.5]) axes.set_ylim([-1,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` ### 5.3 - Mini-batch with Adam mode Run the following code to see how the model does with Adam. ``` # train 3-layer model layers_dims = [train_X.shape[0], 5, 2, 1] parameters = model(train_X, train_Y, layers_dims, optimizer = "adam") # Predict predictions = predict(train_X, train_Y, parameters) # Plot decision boundary plt.title("Model with Adam optimization") axes = plt.gca() axes.set_xlim([-1.5,2.5]) axes.set_ylim([-1,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` ### 5.4 - Summary <table> <tr> <td> **optimization method** </td> <td> **accuracy** </td> <td> **cost shape** </td> </tr> <td> Gradient descent </td> <td> 79.7% </td> <td> oscillations </td> <tr> <td> Momentum </td> <td> 79.7% </td> <td> oscillations </td> </tr> <tr> <td> Adam </td> <td> 94% </td> <td> smoother </td> </tr> </table> Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm. Adam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster. Some advantages of Adam include: - Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum) - Usually works well even with little tuning of hyperparameters (except $\alpha$) **References**: - Adam paper: https://arxiv.org/pdf/1412.6980.pdf
github_jupyter
# Plagiarism Detection Model Now that you've created training and test data, you are ready to define and train a model. Your goal in this notebook, will be to train a binary classification model that learns to label an answer file as either plagiarized or not, based on the features you provide the model. This task will be broken down into a few discrete steps: * Upload your data to S3. * Define a binary classification model and a training script. * Train your model and deploy it. * Evaluate your deployed classifier and answer some questions about your approach. To complete this notebook, you'll have to complete all given exercises and answer all the questions in this notebook. > All your tasks will be clearly labeled **EXERCISE** and questions as **QUESTION**. It will be up to you to explore different classification models and decide on a model that gives you the best performance for this dataset. --- ## Load Data to S3 In the last notebook, you should have created two files: a `training.csv` and `test.csv` file with the features and class labels for the given corpus of plagiarized/non-plagiarized text data. >The below cells load in some AWS SageMaker libraries and creates a default bucket. After creating this bucket, you can upload your locally stored data to S3. Save your train and test `.csv` feature files, locally. To do this you can run the second notebook "2_Plagiarism_Feature_Engineering" in SageMaker or you can manually upload your files to this notebook using the upload icon in Jupyter Lab. Then you can upload local files to S3 by using `sagemaker_session.upload_data` and pointing directly to where the training data is saved. ``` import pandas as pd import boto3 import sagemaker """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # session and role sagemaker_session = sagemaker.Session() role = sagemaker.get_execution_role() # create an S3 bucket bucket = sagemaker_session.default_bucket() ``` ## EXERCISE: Upload your training data to S3 Specify the `data_dir` where you've saved your `train.csv` file. Decide on a descriptive `prefix` that defines where your data will be uploaded in the default S3 bucket. Finally, create a pointer to your training data by calling `sagemaker_session.upload_data` and passing in the required parameters. It may help to look at the [Session documentation](https://sagemaker.readthedocs.io/en/stable/session.html#sagemaker.session.Session.upload_data) or previous SageMaker code examples. You are expected to upload your entire directory. Later, the training script will only access the `train.csv` file. ``` # should be the name of directory you created to save your features data data_dir = 'plagiarism_data' # set prefix, a descriptive name for a directory prefix = 'plagiarism-model' # upload all data to S3 data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix) print(data) ``` ### Test cell Test that your data has been successfully uploaded. The below cell prints out the items in your S3 bucket and will throw an error if it is empty. You should see the contents of your `data_dir` and perhaps some checkpoints. If you see any other files listed, then you may have some old model files that you can delete via the S3 console (though, additional files shouldn't affect the performance of model developed in this notebook). ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # confirm that data is in S3 bucket empty_check = [] for obj in boto3.resource('s3').Bucket(bucket).objects.all(): empty_check.append(obj.key) print(obj.key) assert len(empty_check) !=0, 'S3 bucket is empty.' print('Test passed!') ``` --- # Modeling Now that you've uploaded your training data, it's time to define and train a model! The type of model you create is up to you. For a binary classification task, you can choose to go one of three routes: * Use a built-in classification algorithm, like LinearLearner. * Define a custom Scikit-learn classifier, a comparison of models can be found [here](https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html). * Define a custom PyTorch neural network classifier. It will be up to you to test out a variety of models and choose the best one. Your project will be graded on the accuracy of your final model. --- ## EXERCISE: Complete a training script To implement a custom classifier, you'll need to complete a `train.py` script. You've been given the folders `source_sklearn` and `source_pytorch` which hold starting code for a custom Scikit-learn model and a PyTorch model, respectively. Each directory has a `train.py` training script. To complete this project **you only need to complete one of these scripts**; the script that is responsible for training your final model. A typical training script: * Loads training data from a specified directory * Parses any training & model hyperparameters (ex. nodes in a neural network, training epochs, etc.) * Instantiates a model of your design, with any specified hyperparams * Trains that model * Finally, saves the model so that it can be hosted/deployed, later ### Defining and training a model Much of the training script code is provided for you. Almost all of your work will be done in the `if __name__ == '__main__':` section. To complete a `train.py` file, you will: 1. Import any extra libraries you need 2. Define any additional model training hyperparameters using `parser.add_argument` 2. Define a model in the `if __name__ == '__main__':` section 3. Train the model in that same section Below, you can use `!pygmentize` to display an existing `train.py` file. Read through the code; all of your tasks are marked with `TODO` comments. **Note: If you choose to create a custom PyTorch model, you will be responsible for defining the model in the `model.py` file,** and a `predict.py` file is provided. If you choose to use Scikit-learn, you only need a `train.py` file; you may import a classifier from the `sklearn` library. ``` # directory can be changed to: source_sklearn or source_pytorch !pygmentize source_sklearn/train.py ``` ### Provided code If you read the code above, you can see that the starter code includes a few things: * Model loading (`model_fn`) and saving code * Getting SageMaker's default hyperparameters * Loading the training data by name, `train.csv` and extracting the features and labels, `train_x`, and `train_y` If you'd like to read more about model saving with [joblib for sklearn](https://scikit-learn.org/stable/modules/model_persistence.html) or with [torch.save](https://pytorch.org/tutorials/beginner/saving_loading_models.html), click on the provided links. --- # Create an Estimator When a custom model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained; the `train.py` function you specified above. To run a custom training script in SageMaker, construct an estimator, and fill in the appropriate constructor arguments: * **entry_point**: The path to the Python script SageMaker runs for training and prediction. * **source_dir**: The path to the training script directory `source_sklearn` OR `source_pytorch`. * **entry_point**: The path to the Python script SageMaker runs for training and prediction. * **source_dir**: The path to the training script directory `train_sklearn` OR `train_pytorch`. * **entry_point**: The path to the Python script SageMaker runs for training. * **source_dir**: The path to the training script directory `train_sklearn` OR `train_pytorch`. * **role**: Role ARN, which was specified, above. * **train_instance_count**: The number of training instances (should be left at 1). * **train_instance_type**: The type of SageMaker instance for training. Note: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types. * **sagemaker_session**: The session used to train on Sagemaker. * **hyperparameters** (optional): A dictionary `{'name':value, ..}` passed to the train function as hyperparameters. Note: For a PyTorch model, there is another optional argument **framework_version**, which you can set to the latest version of PyTorch, `1.0`. ## EXERCISE: Define a Scikit-learn or PyTorch estimator To import your desired estimator, use one of the following lines: ``` from sagemaker.sklearn.estimator import SKLearn ``` ``` from sagemaker.pytorch import PyTorch ``` ``` # your import and estimator code, here from sagemaker.sklearn.estimator import SKLearn # instantiate a pytorch estimator estimator = SKLearn(entry_point="train.py", source_dir="source_sklearn", role=role, train_instance_count=1, sagemaker_session=sagemaker_session, train_instance_type='ml.c4.xlarge') ``` ## EXERCISE: Train the estimator Train your estimator on the training data stored in S3. This should create a training job that you can monitor in your SageMaker console. ``` %%time # Train your estimator on S3 training data estimator.fit({'train': data}) ``` ## EXERCISE: Deploy the trained model After training, deploy your model to create a `predictor`. If you're using a PyTorch model, you'll need to create a trained `PyTorchModel` that accepts the trained `<model>.model_data` as an input parameter and points to the provided `source_pytorch/predict.py` file as an entry point. To deploy a trained model, you'll use `<model>.deploy`, which takes in two arguments: * **initial_instance_count**: The number of deployed instances (1). * **instance_type**: The type of SageMaker instance for deployment. Note: If you run into an instance error, it may be because you chose the wrong training or deployment instance_type. It may help to refer to your previous exercise code to see which types of instances we used. ``` %%time # uncomment, if needed # from sagemaker.pytorch import PyTorchModel # deploy your model to create a predictor predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.t2.medium') ``` --- # Evaluating Your Model Once your model is deployed, you can see how it performs when applied to our test data. The provided cell below, reads in the test data, assuming it is stored locally in `data_dir` and named `test.csv`. The labels and features are extracted from the `.csv` file. ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import os # read in test data, assuming it is stored locally test_data = pd.read_csv(os.path.join(data_dir, "test.csv"), header=None, names=None) # labels are in the first column test_y = test_data.iloc[:,0] test_x = test_data.iloc[:,1:] ``` ## EXERCISE: Determine the accuracy of your model Use your deployed `predictor` to generate predicted, class labels for the test data. Compare those to the *true* labels, `test_y`, and calculate the accuracy as a value between 0 and 1.0 that indicates the fraction of test data that your model classified correctly. You may use [sklearn.metrics](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics) for this calculation. **To pass this project, your model should get at least 90% test accuracy.** ``` # First: generate predicted, class labels test_y_preds = predictor.predict(test_x) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # test that your model generates the correct number of labels assert len(test_y_preds)==len(test_y), 'Unexpected number of predictions.' print('Test passed!') # Second: calculate the test accuracy from sklearn.metrics import accuracy_score accuracy = accuracy_score(test_y, test_y_preds) print(accuracy) ## print out the array of predicted and true labels, if you want print('\nPredicted class labels: ') print(test_y_preds) print('\nTrue class labels: ') print(test_y.values) # to calculate FP and FN TP = 0 FP = 0 TN = 0 FN = 0 for i in range(len(test_y_preds)): if test_y[i]==test_y_preds[i]==1: TP += 1 if test_y_preds[i]==1 and test_y[i]!=test_y_preds[i]: FP += 1 if test_y[i]==test_y_preds[i]==0: TN += 1 if test_y_preds[i]==0 and test_y[i]!=test_y_preds[i]: FN += 1 print("True Posiitve : ", TP) print("False Posiitve : ", FP) print("True Negative : ", TN) print("False Negative : ", FN) ``` ### Question 1: How many false positives and false negatives did your model produce, if any? And why do you think this is? ** Answer**: - Number of Flase positive = 0 - Number of False negative = 0 The model could give me 100% accuracy which I think worked well becasue of the little amount of data we are training and testing on. Another reason for 100% accuracy could be because the original text remains same, though we have split the testing and training data. so, the accuracy may not be 100% we were to test on a new original test against plagiarised tex ### Question 2: How did you decide on the type of model to use? ** Answer**: I first tried a neural network with 10 hidden hidden layers and 50 epcohs. the accuracy was 88%. Next, I tried with Sklearn's LinearSVC and it could give me 100% accuracy and the training was also much faster than a neural network. I think it is good enough for small amount of data. ---- ## EXERCISE: Clean up Resources After you're done evaluating your model, **delete your model endpoint**. You can do this with a call to `.delete_endpoint()`. You need to show, in this notebook, that the endpoint was deleted. Any other resources, you may delete from the AWS console, and you will find more instructions on cleaning up all your resources, below. ``` # uncomment and fill in the line below! predictor.delete_endpoint() ``` ### Deleting S3 bucket When you are *completely* done with training and testing models, you can also delete your entire S3 bucket. If you do this before you are done training your model, you'll have to recreate your S3 bucket and upload your training data again. ``` # deleting bucket, uncomment lines below bucket_to_delete = boto3.resource('s3').Bucket(bucket) bucket_to_delete.objects.all().delete() ``` ### Deleting all your models and instances When you are _completely_ done with this project and do **not** ever want to revisit this notebook, you can choose to delete all of your SageMaker notebook instances and models by following [these instructions](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html). Before you delete this notebook instance, I recommend at least downloading a copy and saving it, locally. --- ## Further Directions There are many ways to improve or add on to this project to expand your learning or make this more of a unique project for you. A few ideas are listed below: * Train a classifier to predict the *category* (1-3) of plagiarism and not just plagiarized (1) or not (0). * Utilize a different and larger dataset to see if this model can be extended to other types of plagiarism. * Use language or character-level analysis to find different (and more) similarity features. * Write a complete pipeline function that accepts a source text and submitted text file, and classifies the submitted text as plagiarized or not. * Use API Gateway and a lambda function to deploy your model to a web application. These are all just options for extending your work. If you've completed all the exercises in this notebook, you've completed a real-world application, and can proceed to submit your project. Great job!
github_jupyter
# Python basic built-in types Python comes with a lot of [built-in types](https://docs.python.org/3/library/stdtypes.html). For the time being, we will focus on the most common _individual_ ones: * bool * int * float * string ## Retrieving the type of an object The type of an object can be checked using the _type()_ function. To check if an object belongs to a specific type, use the _isinstance()_ function. ``` x = list() print("Type of x:", type(x)) print("Is x a list?", isinstance(x, list)) print("Is x a string?" , isinstance(x, str)) ``` ## Truth value testing: bool The _bool_ type can have only two values: _True_ or _False_. ``` print("Type of True?", type(True)) print("Type of False?", type(False)) ``` ## Numeric types: int/float Integer numbers have unlimited precision (not limited to 32/64 bits as in other languages). Float numbers are usually represented as C _doubles_ internally, and you can check the machine's internal representation using _sys.float_info_. Float numbers also accept the values _nan_, _-inf_, and _+inf_ for Not a Number, negative infinity, and positive infinity. ``` i = 123456789012345678901234567890 print("Type of i?", type(i)) f = 40.0 print("Type of f?", type(f)) pos_infinite = float("+inf") print("Type of pos_infinite?", type(pos_infinite)) print("Value of pos_infinite?", pos_infinite) ``` Python supports mixed arithmetic, and in this case the operand with the "narrower" type is widened to that of the other type. ### Numeric operations The usual operations are available: * sum: a+b * substraction: a-b * multiplication: a\*b * division: a/b * floored division: a//b * remainder: a%b * power: a\*\*b ``` print("4+3:", 4+3) print("4-3:", 4-3) print("4*3:", 4*3) print("4/3:", 4/3) print("4//3:", 4//3) # Floored division works with floats too print("4.0//3.0:", 4.0//3.0) print("4%3:", 4%3) print("4**3:", 4**3) ``` **Watch out!** Division works differently in Python2 and Python3. In Python2, if both operands are integers, the result will be integer. However in Python3 result be promoted to float. For further reference on numeric types, see the [numeric types](https://docs.python.org/3/library/stdtypes.html#numeric-types-int-float-complex) documentation. If you need to manipulate numbers, there are a lot of convenient functions in the [math](https://docs.python.org/3/library/math.html) module. ## Text sequence: str Textual data in Python is handled with _str_ objects, or _strings_. These are immutable sequences of Unicode code points. There are several ways to write a string: ``` print('This is a string') print("This is a string as well") print('''And this is also a string''') print("""This too!""") ``` Triple-quoted strings may span along multiple lines, and all associated whitespaces will be included in the string literal. ``` s = """This is a multiline string that spans over several lines see how whitespaces are preserved! """ print(s) ``` String literals that are part of a single expression containing only whitespaces between them will be implicitly coverted to a single string literal. ``` s1 = "spam " "eggs" s2 = "spam eggs" print("Are s1 and s2 equal?", s1==s2) ``` **Watch out!** Strings are one of most dramatic changes between Python2 and Python3. In Python2, strings and byte strings are ASCII by default, and they can be forced to be Unicode using the 'u' prefix. In Python3 this is reversed: they are Unicode by default, and can be forced to be treated as bytes with the 'b' prefix. The [Unicode HOWTO](https://docs.python.org/3/howto/unicode.html) in Python's documentation contains more information on this topic. Strings are _sequences_, so they can be accessed by index. However, they are **immutable**, that is, they cannot be modified in place. Note that this immutability does not mean that strings cannot be worked with, as new strings can be created and assigned to new (or the same) variables. ``` a = "immutable" print("a:", a) print("a[0]:", a[0]) try: a[0] = 'I' except TypeError as type_error: print("TypeError:", type_error) b = a.capitalize() print("a:", a) print("a[0]:", a[0]) print("b:", b) print("b[0]:", b[0]) ``` There are several useful [string methods](https://docs.python.org/3/library/stdtypes.html#string-methods) in the standard library. Here are some that are worth highlighting: * **count**: count number of non-overlapping occurrences of substring * **find**: retrieve the index of the first appearance of a substring * **format**: perform string formatting * **strip**: remove characters * **startswith**: check if the beginning of the string matches a substring * **endswith**: check if the end of the string matches a substring * **join**: concatenate strings * **split**: split the string into chunks * **lower**: convert to lowercase * **upper**: convert to uppercase
github_jupyter
``` # default_exp learner ``` # Learner > This contains fastai Learner extensions. ``` #export from fastai.learner import * from fastai.vision.models.all import * from fastai.data.transforms import * from tsai.imports import * from tsai.data.core import * from tsai.data.validation import * from tsai.models.utils import * from tsai.models.InceptionTimePlus import * #export @patch def show_batch(self:Learner, **kwargs): self.dls.show_batch(**kwargs) #export @patch def one_batch(self:Learner, i, b): # this fixes a bug that will be managed in the next release of fastai self.iter = i # b_on_device = tuple( e.to(device=self.dls.device) for e in b if hasattr(e, "to")) if self.dls.device is not None else b b_on_device = to_device(b, device=self.dls.device) if self.dls.device is not None else b self._split(b_on_device) self._with_events(self._do_one_batch, 'batch', CancelBatchException) #export @patch def save_all(self:Learner, path='export', dls_fname='dls', model_fname='model', learner_fname='learner', verbose=False): path = Path(path) if not os.path.exists(path): os.makedirs(path) self.dls_type = self.dls.__class__.__name__ if self.dls_type == "MixedDataLoaders": self.n_loaders = (len(self.dls.loaders), len(self.dls.loaders[0].loaders)) dls_fnames = [] for i,dl in enumerate(self.dls.loaders): for j,l in enumerate(dl.loaders): l = l.new(num_workers=1) torch.save(l, path/f'{dls_fname}_{i}_{j}.pth') dls_fnames.append(f'{dls_fname}_{i}_{j}.pth') else: dls_fnames = [] self.n_loaders = len(self.dls.loaders) for i,dl in enumerate(self.dls): dl = dl.new(num_workers=1) torch.save(dl, path/f'{dls_fname}_{i}.pth') dls_fnames.append(f'{dls_fname}_{i}.pth') # Saves the model along with optimizer self.model_dir = path self.save(f'{model_fname}', with_opt=True) # Export learn without the items and the optimizer state for inference self.export(path/f'{learner_fname}.pkl') pv(f'Learner saved:', verbose) pv(f"path = '{path}'", verbose) pv(f"dls_fname = '{dls_fnames}'", verbose) pv(f"model_fname = '{model_fname}.pth'", verbose) pv(f"learner_fname = '{learner_fname}.pkl'", verbose) def load_all(path='export', dls_fname='dls', model_fname='model', learner_fname='learner', device=None, pickle_module=pickle, verbose=False): if isinstance(device, int): device = torch.device('cuda', device) elif device is None: device = default_device() if device == 'cpu': cpu = True else: cpu = None path = Path(path) learn = load_learner(path/f'{learner_fname}.pkl', cpu=cpu, pickle_module=pickle_module) learn.load(f'{model_fname}', with_opt=True, device=device) if learn.dls_type == "MixedDataLoaders": dls_fnames = [] _dls = [] for i in range(learn.n_loaders[0]): _dl = [] for j in range(learn.n_loaders[1]): l = torch.load(path/f'{dls_fname}_{i}_{j}.pth', map_location=device, pickle_module=pickle_module) l = l.new(num_workers=0) l.to(device) dls_fnames.append(f'{dls_fname}_{i}_{j}.pth') _dl.append(l) _dls.append(MixedDataLoader(*_dl, path=learn.dls.path, device=device, shuffle=l.shuffle)) learn.dls = MixedDataLoaders(*_dls, path=learn.dls.path, device=device) else: loaders = [] dls_fnames = [] for i in range(learn.n_loaders): dl = torch.load(path/f'{dls_fname}_{i}.pth', map_location=device, pickle_module=pickle_module) dl = dl.new(num_workers=0) dl.to(device) first(dl) loaders.append(dl) dls_fnames.append(f'{dls_fname}_{i}.pth') learn.dls = type(learn.dls)(*loaders, path=learn.dls.path, device=device) pv(f'Learner loaded:', verbose) pv(f"path = '{path}'", verbose) pv(f"dls_fname = '{dls_fnames}'", verbose) pv(f"model_fname = '{model_fname}.pth'", verbose) pv(f"learner_fname = '{learner_fname}.pkl'", verbose) return learn load_learner_all = load_all #export @patch @delegates(subplots) def plot_metrics(self: Recorder, nrows=None, ncols=None, figsize=None, final_losses=True, perc=.5, **kwargs): n_values = len(self.recorder.values) if n_values < 2: print('not enough values to plot a chart') return metrics = np.stack(self.values) n_metrics = metrics.shape[1] names = self.metric_names[1:n_metrics+1] if final_losses: sel_idxs = int(round(n_values * perc)) if sel_idxs >= 2: metrics = np.concatenate((metrics[:,:2], metrics), -1) names = names[:2] + names else: final_losses = False n = len(names) - 1 - final_losses if nrows is None and ncols is None: nrows = int(math.sqrt(n)) ncols = int(np.ceil(n / nrows)) elif nrows is None: nrows = int(np.ceil(n / ncols)) elif ncols is None: ncols = int(np.ceil(n / nrows)) figsize = figsize or (ncols * 6, nrows * 4) fig, axs = subplots(nrows, ncols, figsize=figsize, **kwargs) axs = [ax if i < n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n] axs = ([axs[0]]*2 + [axs[1]]*2 + axs[2:]) if final_losses else ([axs[0]]*2 + axs[1:]) for i, (name, ax) in enumerate(zip(names, axs)): if i in [0, 1]: ax.plot(metrics[:, i], color='#1f77b4' if i == 0 else '#ff7f0e', label='valid' if i == 1 else 'train') ax.set_title('losses') ax.set_xlim(0, len(metrics)-1) elif i in [2, 3] and final_losses: ax.plot(np.arange(len(metrics) - sel_idxs, len(metrics)), metrics[-sel_idxs:, i], color='#1f77b4' if i == 2 else '#ff7f0e', label='valid' if i == 3 else 'train') ax.set_title('final losses') ax.set_xlim(len(metrics) - sel_idxs, len(metrics)-1) # ax.set_xticks(np.arange(len(metrics) - sel_idxs, len(metrics))) else: ax.plot(metrics[:, i], color='#1f77b4' if i == 0 else '#ff7f0e', label='valid' if i > 0 else 'train') ax.set_title(name if i >= 2 * (1 + final_losses) else 'losses') ax.set_xlim(0, len(metrics)-1) ax.legend(loc='best') ax.grid(color='gainsboro', linewidth=.5) plt.show() @patch @delegates(subplots) def plot_metrics(self: Learner, **kwargs): self.recorder.plot_metrics(**kwargs) #export @patch @delegates(subplots) def show_probas(self:Learner, figsize=(6,6), ds_idx=1, dl=None, one_batch=False, max_n=None, **kwargs): recorder = copy(self.recorder) # This is to avoid loss of recorded values while generating preds if one_batch: dl = self.dls.one_batch() probas, targets = self.get_preds(ds_idx=ds_idx, dl=[dl] if dl is not None else None) if probas.ndim == 2 and probas.min() < 0 or probas.max() > 1: probas = nn.Softmax(-1)(probas) if not isinstance(targets[0].item(), Integral): return targets = targets.flatten() if max_n is not None: idxs = np.random.choice(len(probas), max_n, False) probas, targets = probas[idxs], targets[idxs] fig = plt.figure(figsize=figsize, **kwargs) classes = np.unique(targets) nclasses = len(classes) vals = np.linspace(.5, .5 + nclasses - 1, nclasses)[::-1] plt.vlines(.5, min(vals) - 1, max(vals), color='black', linewidth=.5) cm = plt.get_cmap('gist_rainbow') color = [cm(1.* c/nclasses) for c in range(1, nclasses + 1)][::-1] class_probas = np.array([probas[i,t] for i,t in enumerate(targets)]) for i, c in enumerate(classes): plt.scatter(class_probas[targets == c] if nclasses > 2 or i > 0 else 1 - class_probas[targets == c], targets[targets == c] + .5 * (np.random.rand((targets == c).sum()) - .5), color=color[i], edgecolor='black', alpha=.2, s=100) if nclasses > 2: plt.vlines((targets == c).float().mean(), i - .5, i + .5, color='r', linewidth=.5) plt.hlines(vals, 0, 1) plt.ylim(min(vals) - 1, max(vals)) plt.xlim(0,1) plt.xticks(np.linspace(0,1,11), fontsize=12) plt.yticks(classes, [self.dls.vocab[x] for x in classes], fontsize=12) plt.title('Predicted proba per true class' if nclasses > 2 else 'Predicted class 1 proba per true class', fontsize=14) plt.xlabel('Probability', fontsize=12) plt.ylabel('True class', fontsize=12) plt.grid(axis='x', color='gainsboro', linewidth=.2) plt.show() self.recorder = recorder #export @delegates(build_ts_model) def ts_learner(dls, arch=None, c_in=None, c_out=None, seq_len=None, d=None, splitter=trainable_params, # learner args loss_func=None, opt_func=Adam, lr=defaults.lr, cbs=None, metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95), # other model args **kwargs): if arch is None: arch = InceptionTimePlus model = build_ts_model(arch, dls=dls, c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, **kwargs) try: model[0], model[1] subscriptable = True except: subscriptable = False if subscriptable: splitter = ts_splitter if loss_func is None: if hasattr(dls, 'loss_func'): loss_func = dls.loss_func elif hasattr(dls, 'train_ds') and hasattr(dls.train_ds, 'loss_func'): loss_func = dls.train_ds.loss_func elif hasattr(dls, 'cat') and not dls.cat: loss_func = MSELossFlat() learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, cbs=cbs, metrics=metrics, path=path, splitter=splitter, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn, moms=moms, ) # keep track of args for loggers store_attr('arch', self=learn) return learn #export @delegates(build_tsimage_model) def tsimage_learner(dls, arch=None, pretrained=False, # learner args loss_func=None, opt_func=Adam, lr=defaults.lr, cbs=None, metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95), # other model args **kwargs): if arch is None: arch = xresnet34 model = build_tsimage_model(arch, dls=dls, pretrained=pretrained, **kwargs) learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, cbs=cbs, metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn, moms=moms) # keep track of args for loggers store_attr('arch', self=learn) return learn #export @patch def decoder(self:Learner, o): return L([self.dls.decodes(oi) for oi in o]) #export @patch @delegates(GatherPredsCallback.__init__) def get_X_preds(self:Learner, X, y=None, bs=64, with_input=False, with_decoded=True, with_loss=False, **kwargs): if with_loss and y is None: print('cannot find loss as y=None') with_loss=False pred_pos = 3 if with_input else 2 dl = self.dls.new_dl(X, y=y) dl.bs = bs output = list(self.get_preds(dl=dl, with_input=with_input, with_decoded=with_decoded, with_loss=with_loss)) if with_decoded and hasattr(self.dls, 'vocab'): output[2 + with_input] = L([self.dls.vocab[p] for p in output[2 + with_input]]) return tuple(output) from tsai.data.all import * from tsai.data.core import * from tsai.models.FCNPlus import * dsid = 'OliveOil' X, y, splits = get_UCR_data(dsid, verbose=True, split_data=False) tfms = [None, [Categorize()]] dls = get_ts_dls(X, y, splits=splits, tfms=tfms) learn = ts_learner(dls, FCNPlus) for p in learn.model.parameters(): p.requires_grad=False test_eq(count_parameters(learn.model), 0) learn.freeze() test_eq(count_parameters(learn.model), 1540) learn.unfreeze() test_eq(count_parameters(learn.model), 264580) learn.show_batch(); learn.fit_one_cycle(2, lr_max=1e-3) dsid = 'OliveOil' X, y, splits = get_UCR_data(dsid, split_data=False) tfms = [None, [Categorize()]] dls = get_ts_dls(X, y, tfms=tfms, splits=splits) learn = ts_learner(dls, FCNPlus, metrics=accuracy) learn.fit_one_cycle(2) learn.plot_metrics() learn.show_probas() learn.save_all() del learn learn = load_all() test_probas, test_targets, test_preds = learn.get_X_preds(X[0:10], with_decoded=True) test_probas, test_targets, test_preds learn.fit_one_cycle(1, lr_max=1e-3) #hide out = create_scripts(); beep(out) ```
github_jupyter
# EDA - EDA란? - EDA란 데이터 분석 전 데이터를 온전히 보는 태도이자 철학이다. - 데이터의 본질을 탐구하기 위한 자세이다. 인간의 강력한 패턴 인지능력 때문에 시각화 기법이 많이 사용된다. >EDA is an approach to data analysis that postpones the usual assumptions about what kind of model the data follow with the more direct approach of allowing the data itself to reveal its underlying structure and model. >EDA is not a mere collection of techniques. EDA is a philosophy as to how we dissect a data set; what we look for; how we look; and how we interpret. ## Before EDA ### EDA 전에 설명서를 잘 파악해야한다. - From Nuree Chung님의 Medium Blog, _EDA, __'데이터 설명서에서 시작하기'__ - EDA에 있어서, 데이터의 분포나 변수간 관계를 파악하기 위해 히스토그램, 산점도, 상관관계표 등 다양한 시각화 방법이 동원. - EDA전에 데이터의 변수 각각의 의미를 먼저 파악하는 것이 선행되야 할 것. - 이는 다른사람과 협업시에 '데이터 설명서'를 작성하는 것과 연관. - 데이터 꼼꼼히 들여다보며 각각의 변수가 어떤 의미인지, 어떤 방식으로 측정된 것인지, 명목형인지 수치형인지, 실제 의미대로 코딩되었는지 정확히 이해하는 이 과정은 데이터로부터 좋은 인사이트를 얻는 출발점 - 변수의 의미를 파악하고 그에 맞게 전처리할 때, 실제에 가까운 정보를 가진 데이터 셋이 구성된다. 기초공사의 개념. ### 데이터 분석에서 가장 중요한 것은??? - From '나자바바'님 Youtube. ![image.png](attachment:283d3e19-a2ad-4c7b-a264-746256382c98.png) - 데이터 분석에서 가장 중요한 것이 무엇인지를 묻는다면 '인사이트'의 도출 - 분석 방식과 분석 대상을 고려해야함 - 만약 내가 무엇을 찾아야하는지 알면 이미 인사이트는 도출된 것 -> '최적화'가 가장 중요 - 위의 매트릭스에 맞는 행동! -> ADSP에 나옴. > < 정리 > > 데이터 분석에서 가장 중요한 것은 인사이트의 발견이다. 허나 분석 대상과 분석 방식을 이미 알고 있다면 최적화가 더 중요하다. 인사이트는 EDA를 통해서 얻는다. > EDA -> 인사이트 / ML -> 최적화 __참고__ 1. https://medium.com/mighty-data-science-bootcamp/eda-%EB%8D%B0%EC%9D%B4%ED%84%B0-%EC%84%A4%EB%AA%85%EC%84%9C%EC%97%90%EC%84%9C-%EC%8B%9C%EC%9E%91%ED%95%98%EA%B8%B0-230060b9fc17 2. https://www.itl.nist.gov/div898/handbook/eda/section1/eda11.htm 3. https://eda-ai-lab.tistory.com/13 ``` 오늘 할일 : EDA에 대해 관련된 사이트 읽고 내 나름대로 정리. 내일 볼 EDA 커널 캐글에서 2개 정도 찾아놓기. ``` ### 상대 경로 원래는 부보나 조부모 경로로 이동불가 -> 라이브러리 사용해야... - 그럼 안할래~ ## From 'Team EDA'_EDA (Exploratory Data Analysis) - 개념 : 수집한 데이터가 들어왔을 때, 이를 다양한 각도에서 관찰하고 이해하는 과정입니다. > 일단 자료를 직관적으로 바라보는 과정 - 필요성 : 데이터의 특성, 관계, 현상을 더 잘 이해하고 분석 방향성을 결정할 수 있습니다. > 내가 생각하는 포인트 : __특성, 관계, 목적__ 을 파악하는 것. 이것을 통해 '기존가설 수정, 추가 데이터 수집 등' 분석 방향을 수정, 결정 할 수 있다. > 여기서 '목적'이라 함은 문제정희 혹은 해결을 위한 질문의 답일 수도 있지만, 모집단과 모수에 대한 고려역시 포함이다. - 과정 : 문제정의 단계의 질문과 가설을 바탕으로 분석계획을 세우는 것에서 시작. - 분석 계획에는 어떤 속성 및 관계를 집중적으로 관찰해야 할지 , 최적 방법 무엇인지 포함되야. - 분석 목적, 변수 확인 / 전체적 파악 (Data 완전성등 품질요소, 이상치) / 속성, 관계 - 특징 파악 - 기본적인 특성 파악 - 개별 데이터 관찰 - 통계값 활용 _ 기술통계 요약 - 시각화 - 머신러닝 기법_ 이상치확인에 유용 - - 속성간의 관계 분석 - 변수의 특성에 따라, 문제를 바라보는 방법에 따라 분류 - Categorical Variable (Qualitative) Nominal Data - Numeric Variable (Quantitative) ![image.png](attachment:6f5c80bd-3302-4079-80e6-04321f545039.png) # 유명 EDA 커널 ## 1. ## 2. EDA is Fun! https://www.kaggle.com/deffro/eda-is-fun
github_jupyter
# Fuzzing with Grammars In the chapter on ["Mutation-Based Fuzzing"](MutationFuzzer.ipynb), we have seen how to use extra hints – such as sample input files – to speed up test generation. In this chapter, we take this idea one step further, by providing a _specification_ of the legal inputs to a program. Specifying inputs via a _grammar_ allows for very systematic and efficient test generation, in particular for complex input formats. Grammars also serve as the base for configuration fuzzing, API fuzzing, GUI fuzzing, and many more. **Prerequisites** * You should know how basic fuzzing works, e.g. from the [Chapter introducing fuzzing](Fuzzer.ipynb). * Knowledge on [mutation-based fuzzing](MutationFuzzer.ipynb) and [coverage](Coverage.ipynb) is _not_ required yet, but still recommended. ``` import bookutils import Fuzzer ``` ## Synopsis <!-- Automatically generated. Do not edit. --> To [use the code provided in this chapter](Importing.ipynb), write ```python >>> from fuzzingbook.Grammars import <identifier> ``` and then make use of the following features. This chapter introduces _grammars_ as a simple means to specify input languages, and to use them for testing programs with syntactically valid inputs. A grammar is defined as a mapping of nonterminal symbols to lists of alternative expansions, as in the following example: ```python >>> US_PHONE_GRAMMAR = { >>> "<start>": ["<phone-number>"], >>> "<phone-number>": ["(<area>)<exchange>-<line>"], >>> "<area>": ["<lead-digit><digit><digit>"], >>> "<exchange>": ["<lead-digit><digit><digit>"], >>> "<line>": ["<digit><digit><digit><digit>"], >>> "<lead-digit>": ["2", "3", "4", "5", "6", "7", "8", "9"], >>> "<digit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] >>> } >>> >>> assert is_valid_grammar(US_PHONE_GRAMMAR) ``` Nonterminal symbols are enclosed in angle brackets (say, `<digit>`). To generate an input string from a grammar, a _producer_ starts with the start symbol (`<start>`) and randomly chooses a random expansion for this symbol. It continues the process until all nonterminal symbols are expanded. The function `simple_grammar_fuzzer()` does just that: ```python >>> [simple_grammar_fuzzer(US_PHONE_GRAMMAR) for i in range(5)] ``` In practice, though, instead of `simple_grammar_fuzzer()`, you should use [the `GrammarFuzzer` class](GrammarFuzzer.ipynb) or one of its [coverage-based](GrammarCoverageFuzzer.ipynb), [probabilistic-based](ProbabilisticGrammarFuzzer.ipynb), or [generator-based](GeneratorGrammarFuzzer.ipynb) derivatives; these are more efficient, protect against infinite growth, and provide several additional features. This chapter also introduces a [grammar toolbox](#A-Grammar-Toolbox) with several helper functions that ease the writing of grammars, such as using shortcut notations for character classes and repetitions, or extending grammars ## Input Languages All possible behaviors of a program can be triggered by its input. "Input" here can be a wide range of possible sources: We are talking about data that is read from files, from the environment, or over the network, data input by the user, or data acquired from interaction with other resources. The set of all these inputs determines how the program will behave – including its failures. When testing, it is thus very helpful to think about possible input sources, how to get them under control, and _how to systematically test them_. For the sake of simplicity, we will assume for now that the program has only one source of inputs; this is the same assumption we have been using in the previous chapters, too. The set of valid inputs to a program is called a _language_. Languages range from the simple to the complex: the CSV language denotes the set of valid comma-separated inputs, whereas the Python language denotes the set of valid Python programs. We commonly separate data languages and programming languages, although any program can also be treated as input data (say, to a compiler). The [Wikipedia page on file formats](https://en.wikipedia.org/wiki/List_of_file_formats) lists more than 1,000 different file formats, each of which is its own language. To formally describe languages, the field of *formal languages* has devised a number of *language specifications* that describe a language. *Regular expressions* represent the simplest class of these languages to denote sets of strings: The regular expression `[a-z]*`, for instance, denotes a (possibly empty) sequence of lowercase letters. *Automata theory* connects these languages to automata that accept these inputs; *finite state machines*, for instance, can be used to specify the language of regular expressions. Regular expressions are great for not-too-complex input formats, and the associated finite state machines have many properties that make them great for reasoning. To specify more complex inputs, though, they quickly encounter limitations. At the other end of the language spectrum, we have *universal grammars* that denote the language accepted by *Turing machines*. A Turing machine can compute anything that can be computed; and with Python being Turing-complete, this means that we can also use a Python program $p$ to specify or even enumerate legal inputs. But then, computer science theory also tells us that each such testing program has to be written specifically for the program to be tested, which is not the level of automation we want. ## Grammars The middle ground between regular expressions and Turing machines is covered by *grammars*. Grammars are among the most popular (and best understood) formalisms to formally specify input languages. Using a grammar, one can express a wide range of the properties of an input language. Grammars are particularly great for expressing the *syntactical structure* of an input, and are the formalism of choice to express nested or recursive inputs. The grammars we use are so-called *context-free grammars*, one of the easiest and most popular grammar formalisms. ### Rules and Expansions A grammar consists of a *start symbol* and a set of *expansion rules* (or simply *rules*) which indicate how the start symbol (and other symbols) can be expanded. As an example, consider the following grammar, denoting a sequence of two digits: ``` <start> ::= <digit><digit> <digit> ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 ``` To read such a grammar, start with the start symbol (`<start>`). An expansion rule `<A> ::= <B>` means that the symbol on the left side (`<A>`) can be replaced by the string on the right side (`<B>`). In the above grammar, `<start>` would be replaced by `<digit><digit>`. In this string again, `<digit>` would be replaced by the string on the right side of the `<digit>` rule. The special operator `|` denotes *expansion alternatives* (or simply *alternatives*), meaning that any of the digits can be chosen for an expansion. Each `<digit>` thus would be expanded into one of the given digits, eventually yielding a string between `00` and `99`. There are no further expansions for `0` to `9`, so we are all set. The interesting thing about grammars is that they can be *recursive*. That is, expansions can make use of symbols expanded earlier – which would then be expanded again. As an example, consider a grammar that describes integers: ``` <start> ::= <integer> <integer> ::= <digit> | <digit><integer> <digit> ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 ``` Here, a `<integer>` is either a single digit, or a digit followed by another integer. The number `1234` thus would be represented as a single digit `1`, followed by the integer `234`, which in turn is a digit `2`, followed by the integer `34`. If we wanted to express that an integer can be preceded by a sign (`+` or `-`), we would write the grammar as ``` <start> ::= <number> <number> ::= <integer> | +<integer> | -<integer> <integer> ::= <digit> | <digit><integer> <digit> ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 ``` These rules formally define the language: Anything that can be derived from the start symbol is part of the language; anything that cannot is not. ### Arithmetic Expressions Let us expand our grammar to cover full *arithmetic expressions* – a poster child example for a grammar. We see that an expression (`<expr>`) is either a sum, or a difference, or a term; a term is either a product or a division, or a factor; and a factor is either a number or a parenthesized expression. Almost all rules can have recursion, and thus allow arbitrary complex expressions such as `(1 + 2) * (3.4 / 5.6 - 789)`. ``` <start> ::= <expr> <expr> ::= <term> + <expr> | <term> - <expr> | <term> <term> ::= <term> * <factor> | <term> / <factor> | <factor> <factor> ::= +<factor> | -<factor> | (<expr>) | <integer> | <integer>.<integer> <integer> ::= <digit><integer> | <digit> <digit> ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 ``` In such a grammar, if we start with `<start>` and then expand one symbol after another, randomly choosing alternatives, we can quickly produce one valid arithmetic expression after another. Such *grammar fuzzing* is highly effective as it comes to produce complex inputs, and this is what we will implement in this chapter. ## Representing Grammars in Python Our first step in building a grammar fuzzer is to find an appropriate format for grammars. To make the writing of grammars as simple as possible, we use a format that is based on strings and lists. Our grammars in Python take the format of a _mapping_ between symbol names and expansions, where expansions are _lists_ of alternatives. A one-rule grammar for digits thus takes the form ``` DIGIT_GRAMMAR = { "<start>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] } ``` whereas the full grammar for arithmetic expressions looks like this: ``` EXPR_GRAMMAR = { "<start>": ["<expr>"], "<expr>": ["<term> + <expr>", "<term> - <expr>", "<term>"], "<term>": ["<factor> * <term>", "<factor> / <term>", "<factor>"], "<factor>": ["+<factor>", "-<factor>", "(<expr>)", "<integer>.<integer>", "<integer>"], "<integer>": ["<digit><integer>", "<digit>"], "<digit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] } ``` In the grammar, every symbol can be defined exactly once. We can access any rule by its symbol... ``` EXPR_GRAMMAR["<digit>"] ``` ....and we can check whether a symbol is in the grammar: ``` "<identifier>" in EXPR_GRAMMAR ``` Note that we assume that on the left hand side of a rule (i.e., the key in the mapping) is always a single symbol. This is the property that gives our grammars the characterization of _context-free_. ## Some Definitions We assume that the canonical start symbol is `<start>`: ``` START_SYMBOL = "<start>" ``` The handy `nonterminals()` function extracts the list of nonterminal symbols (i.e., anything between `<` and `>`, except spaces) from an expansion. ``` import re RE_NONTERMINAL = re.compile(r'(<[^<> ]*>)') def nonterminals(expansion): # In later chapters, we allow expansions to be tuples, # with the expansion being the first element if isinstance(expansion, tuple): expansion = expansion[0] return re.findall(RE_NONTERMINAL, expansion) assert nonterminals("<term> * <factor>") == ["<term>", "<factor>"] assert nonterminals("<digit><integer>") == ["<digit>", "<integer>"] assert nonterminals("1 < 3 > 2") == [] assert nonterminals("1 <3> 2") == ["<3>"] assert nonterminals("1 + 2") == [] assert nonterminals(("<1>", {'option': 'value'})) == ["<1>"] ``` Likewise, `is_nonterminal()` checks whether some symbol is a nonterminal: ``` def is_nonterminal(s): return re.match(RE_NONTERMINAL, s) assert is_nonterminal("<abc>") assert is_nonterminal("<symbol-1>") assert not is_nonterminal("+") ``` ## A Simple Grammar Fuzzer Let us now put the above grammars to use. We will build a very simple grammar fuzzer that starts with a start symbol (`<start>`) and then keeps on expanding it. To avoid expansion to infinite inputs, we place a limit (`max_nonterminals`) on the number of nonterminals. Furthermore, to avoid being stuck in a situation where we cannot reduce the number of symbols any further, we also limit the total number of expansion steps. ``` import random class ExpansionError(Exception): pass def simple_grammar_fuzzer(grammar, start_symbol=START_SYMBOL, max_nonterminals=10, max_expansion_trials=100, log=False): term = start_symbol expansion_trials = 0 while len(nonterminals(term)) > 0: symbol_to_expand = random.choice(nonterminals(term)) expansions = grammar[symbol_to_expand] expansion = random.choice(expansions) new_term = term.replace(symbol_to_expand, expansion, 1) if len(nonterminals(new_term)) < max_nonterminals: term = new_term if log: print("%-40s" % (symbol_to_expand + " -> " + expansion), term) expansion_trials = 0 else: expansion_trials += 1 if expansion_trials >= max_expansion_trials: raise ExpansionError("Cannot expand " + repr(term)) return term ``` Let us see how this simple grammar fuzzer obtains an arithmetic expression from the start symbol: ``` simple_grammar_fuzzer(grammar=EXPR_GRAMMAR, max_nonterminals=3, log=True) ``` By increasing the limit of nonterminals, we can quickly get much longer productions: ``` for i in range(10): print(simple_grammar_fuzzer(grammar=EXPR_GRAMMAR, max_nonterminals=5)) ``` Note that this fuzzer is rather inefficient due to the large number of search and replace operations. On the other hand, the implementation is straightforward and does the job in most cases. For this chapter, we'll stick to it; in the [next chapter](GrammarFuzzer.ipynb), we'll show how to build a more efficient one. ## Visualizing Grammars as Railroad Diagrams With grammars, we can easily specify the format for several of the examples we discussed earlier. The above arithmetic expressions, for instance, can be directly sent into `bc` (or any other program that takes arithmetic expressions). Before we introduce a few additional grammars, let us give a means to _visualize_ them, giving an alternate view to aid their understanding. _Railroad diagrams_, also called _syntax diagrams_, are a graphical representation of context-free grammars. They are read left to right, following possible "rail" tracks; the sequence of symbols encountered on the track defines the language. We use [RailroadDiagrams](RailroadDiagrams.ipynb), an external library for visualization. ``` from RailroadDiagrams import NonTerminal, Terminal, Choice, HorizontalChoice, Sequence, Diagram, show_diagram from IPython.display import SVG, display ``` We first define the method `syntax_diagram_symbol()` to visualize a given symbol. Terminal symbols are denoted as ovals, whereas nonterminal symbols (such as `<term>`) are denoted as rectangles. ``` def syntax_diagram_symbol(symbol): if is_nonterminal(symbol): return NonTerminal(symbol[1:-1]) else: return Terminal(symbol) SVG(show_diagram(syntax_diagram_symbol('<term>'))) ``` We define `syntax_diagram_expr()` to visualize expansion alternatives. ``` def syntax_diagram_expr(expansion): # In later chapters, we allow expansions to be tuples, # with the expansion being the first element if isinstance(expansion, tuple): expansion = expansion[0] symbols = [sym for sym in re.split(RE_NONTERMINAL, expansion) if sym != ""] if len(symbols) == 0: symbols = [""] # special case: empty expansion return Sequence(*[syntax_diagram_symbol(sym) for sym in symbols]) SVG(show_diagram(syntax_diagram_expr(EXPR_GRAMMAR['<term>'][0]))) ``` This is the first alternative of `<term>` – a `<factor>` followed by `*` and a `<term>`. Next, we define `syntax_diagram_alt()` for displaying alternate expressions. ``` from itertools import zip_longest def syntax_diagram_alt(alt): max_len = 5 alt_len = len(alt) if alt_len > max_len: iter_len = alt_len // max_len alts = list(zip_longest(*[alt[i::iter_len] for i in range(iter_len)])) exprs = [[syntax_diagram_expr(expr) for expr in alt if expr is not None] for alt in alts] choices = [Choice(len(expr) // 2, *expr) for expr in exprs] return HorizontalChoice(*choices) else: return Choice(alt_len // 2, *[syntax_diagram_expr(expr) for expr in alt]) SVG(show_diagram(syntax_diagram_alt(EXPR_GRAMMAR['<digit>']))) ``` We see that a `<digit>` can be any single digit from `0` to `9`. Finally, we define `syntax_diagram()` which given a grammar, displays the syntax diagram of its rules. ``` def syntax_diagram(grammar): from IPython.display import SVG, display for key in grammar: print("%s" % key[1:-1]) display(SVG(show_diagram(syntax_diagram_alt(grammar[key])))) syntax_diagram(EXPR_GRAMMAR) ``` This railroad representation will come in handy as it comes to visualizing the structure of grammars – especially for more complex grammars. ## Some Grammars Let us create (and visualize) some more grammars and use them for fuzzing. ### A CGI Grammar Here's a grammar for `cgi_decode()` introduced in the [chapter on coverage](Coverage.ipynb). ``` CGI_GRAMMAR = { "<start>": ["<string>"], "<string>": ["<letter>", "<letter><string>"], "<letter>": ["<plus>", "<percent>", "<other>"], "<plus>": ["+"], "<percent>": ["%<hexdigit><hexdigit>"], "<hexdigit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"], "<other>": # Actually, could be _all_ letters ["0", "1", "2", "3", "4", "5", "a", "b", "c", "d", "e", "-", "_"], } syntax_diagram(CGI_GRAMMAR) ``` In contrast to [basic fuzzing](Fuzzer.ipynb) or [mutation-based fuzzing](MutationFuzzer.ipynb), the grammar quickly produces all sorts of combinations: ``` for i in range(10): print(simple_grammar_fuzzer(grammar=CGI_GRAMMAR, max_nonterminals=10)) ``` ### A URL Grammar The same properties we have seen for CGI input also hold for more complex inputs. Let us use a grammar to produce a large number of valid URLs: ``` URL_GRAMMAR = { "<start>": ["<url>"], "<url>": ["<scheme>://<authority><path><query>"], "<scheme>": ["http", "https", "ftp", "ftps"], "<authority>": ["<host>", "<host>:<port>", "<userinfo>@<host>", "<userinfo>@<host>:<port>"], "<host>": # Just a few ["cispa.saarland", "www.google.com", "fuzzingbook.com"], "<port>": ["80", "8080", "<nat>"], "<nat>": ["<digit>", "<digit><digit>"], "<digit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], "<userinfo>": # Just one ["user:password"], "<path>": # Just a few ["", "/", "/<id>"], "<id>": # Just a few ["abc", "def", "x<digit><digit>"], "<query>": ["", "?<params>"], "<params>": ["<param>", "<param>&<params>"], "<param>": # Just a few ["<id>=<id>", "<id>=<nat>"], } syntax_diagram(URL_GRAMMAR) ``` Again, within milliseconds, we can produce plenty of valid inputs. ``` for i in range(10): print(simple_grammar_fuzzer(grammar=URL_GRAMMAR, max_nonterminals=10)) ``` ### A Natural Language Grammar Finally, grammars are not limited to *formal languages* such as computer inputs, but can also be used to produce *natural language*. This is the grammar we used to pick a title for this book: ``` TITLE_GRAMMAR = { "<start>": ["<title>"], "<title>": ["<topic>: <subtopic>"], "<topic>": ["Generating Software Tests", "<fuzzing-prefix>Fuzzing", "The Fuzzing Book"], "<fuzzing-prefix>": ["", "The Art of ", "The Joy of "], "<subtopic>": ["<subtopic-main>", "<subtopic-prefix><subtopic-main>", "<subtopic-main><subtopic-suffix>"], "<subtopic-main>": ["Breaking Software", "Generating Software Tests", "Principles, Techniques and Tools"], "<subtopic-prefix>": ["", "Tools and Techniques for "], "<subtopic-suffix>": [" for <reader-property> and <reader-property>", " for <software-property> and <software-property>"], "<reader-property>": ["Fun", "Profit"], "<software-property>": ["Robustness", "Reliability", "Security"], } syntax_diagram(TITLE_GRAMMAR) titles = set() while len(titles) < 10: titles.add(simple_grammar_fuzzer( grammar=TITLE_GRAMMAR, max_nonterminals=10)) titles ``` (If you find that there is redundancy ("Robustness and Robustness") in here: In [our chapter on coverage-based fuzzing](GrammarCoverageFuzzer.ipynb), we will show how to cover each expansion only once. And if you like some alternatives more than others, [probabilistic grammar fuzzing](ProbabilisticGrammarFuzzer.ipynb) will be there for you.) ## Grammars as Mutation Seeds One very useful property of grammars is that they produce mostly valid inputs. From a syntactical standpoint, the inputs are actually _always_ valid, as they satisfy the constraints of the given grammar. (Of course, one needs a valid grammar in the first place.) However, there are also _semantical_ properties that cannot be easily expressed in a grammar. If, say, for a URL, the port range is supposed to be between 1024 and 2048, this is hard to write in a grammar. If one has to satisfy more complex constraints, one quickly reaches the limits of what a grammar can express. One way around this is to attach constraints to grammars, as we will discuss [later in this book](ConstraintFuzzer.ipynb). Another possibility is to put together the strengths of grammar-based fuzzing and [mutation-based fuzzing](MutationFuzzer.ipynb). The idea is to use the grammar-generated inputs as *seeds* for further mutation-based fuzzing. This way, we can explore not only _valid_ inputs, but also check out the _boundaries_ between valid and invalid inputs. This is particularly interesting as slightly invalid inputs allow to find parser errors (which are often abundant). As with fuzzing in general, it is the unexpected which reveals errors in programs. To use our generated inputs as seeds, we can feed them directly into the mutation fuzzers introduced earlier: ``` from MutationFuzzer import MutationFuzzer # minor dependency number_of_seeds = 10 seeds = [ simple_grammar_fuzzer( grammar=URL_GRAMMAR, max_nonterminals=10) for i in range(number_of_seeds)] seeds m = MutationFuzzer(seeds) [m.fuzz() for i in range(20)] ``` While the first 10 `fuzz()` calls return the seeded inputs (as designed), the later ones again create arbitrary mutations. Using `MutationCoverageFuzzer` instead of `MutationFuzzer`, we could again have our search guided by coverage – and thus bring together the best of multiple worlds. ## A Grammar Toolbox Let us now introduce a few techniques that help us writing grammars. ### Escapes With `<` and `>` delimiting nonterminals in our grammars, how can we actually express that some input should contain `<` and `>`? The answer is simple: Just introduce a symbol for them. ``` simple_nonterminal_grammar = { "<start>": ["<nonterminal>"], "<nonterminal>": ["<left-angle><identifier><right-angle>"], "<left-angle>": ["<"], "<right-angle>": [">"], "<identifier>": ["id"] # for now } ``` In `simple_nonterminal_grammar`, neither the expansion for `<left-angle>` nor the expansion for `<right-angle>` can be mistaken as a nonterminal. Hence, we can produce as many as we want. ### Extending Grammars In the course of this book, we frequently run into the issue of creating a grammar by _extending_ an existing grammar with new features. Such an extension is very much like subclassing in object-oriented programming. To create a new grammar $g'$ from an existing grammar $g$, we first copy $g$ into $g'$, and then go and extend existing rules with new alternatives and/or add new symbols. Here's an example, extending the above `nonterminal` grammar with a better rule for identifiers: ``` import copy nonterminal_grammar = copy.deepcopy(simple_nonterminal_grammar) nonterminal_grammar["<identifier>"] = ["<idchar>", "<identifier><idchar>"] nonterminal_grammar["<idchar>"] = ['a', 'b', 'c', 'd'] # for now nonterminal_grammar ``` Since such an extension of grammars is a common operation, we introduce a custom function `extend_grammar()` which first copies the given grammar and then updates it from a dictionary, using the Python dictionary `update()` method: ``` def extend_grammar(grammar, extension={}): new_grammar = copy.deepcopy(grammar) new_grammar.update(extension) return new_grammar ``` This call to `extend_grammar()` extends `simple_nonterminal_grammar` to `nonterminal_grammar` just like the "manual" example above: ``` nonterminal_grammar = extend_grammar(simple_nonterminal_grammar, { "<identifier>": ["<idchar>", "<identifier><idchar>"], # for now "<idchar>": ['a', 'b', 'c', 'd'] } ) ``` ### Character Classes In the above `nonterminal_grammar`, we have enumerated only the first few letters; indeed, enumerating all letters or digits in a grammar manually, as in `<idchar> ::= 'a' | 'b' | 'c' ...` is a bit painful. However, remember that grammars are part of a program, and can thus also be constructed programmatically. We introduce a function `srange()` which constructs a list of characters in a string: ``` import string def srange(characters): """Construct a list with all characters in the string""" return [c for c in characters] ``` If we pass it the constant `string.ascii_letters`, which holds all ASCII letters, `srange()` returns a list of all ASCII letters: ``` string.ascii_letters srange(string.ascii_letters)[:10] ``` We can use such constants in our grammar to quickly define identifiers: ``` nonterminal_grammar = extend_grammar(nonterminal_grammar, { "<idchar>": srange(string.ascii_letters) + srange(string.digits) + srange("-_") } ) [simple_grammar_fuzzer(nonterminal_grammar, "<identifier>") for i in range(10)] ``` The shortcut `crange(start, end)` returns a list of all characters in the ASCII range of `start` to (including) `end`: ``` def crange(character_start, character_end): return [chr(i) for i in range(ord(character_start), ord(character_end) + 1)] ``` We can use this to express ranges of characters: ``` crange('0', '9') assert crange('a', 'z') == srange(string.ascii_lowercase) ``` ### Grammar Shortcuts In the above `nonterminal_grammar`, as in other grammars, we have to express repetitions of characters using _recursion_, that is, by referring to the original definition: ``` nonterminal_grammar["<identifier>"] ``` It could be a bit easier if we simply could state that a nonterminal should be a non-empty sequence of letters – for instance, as in ``` <identifier> = <idchar>+ ``` where `+` denotes a non-empty repetition of the symbol it follows. Operators such as `+` are frequently introduced as handy _shortcuts_ in grammars. Formally, our grammars come in the so-called [Backus-Naur form](https://en.wikipedia.org/wiki/Backus-Naur_form), or *BNF* for short. Operators _extend_ BNF to so-called _extended BNF*, or *EBNF* for short: * The form `<symbol>?` indicates that `<symbol>` is optional – that is, it can occur 0 or 1 times. * The form `<symbol>+` indicates that `<symbol>` can occur 1 or more times repeatedly. * The form `<symbol>*` indicates that `<symbol>` can occur 0 or more times. (In other words, it is an optional repetition.) To make matters even more interesting, we would like to use _parentheses_ with the above shortcuts. Thus, `(<foo><bar>)?` indicates that the sequence of `<foo>` and `<bar>` is optional. Using such operators, we can define the identifier rule in a simpler way. To this end, let us create a copy of the original grammar and modify the `<identifier>` rule: ``` nonterminal_ebnf_grammar = extend_grammar(nonterminal_grammar, { "<identifier>": ["<idchar>+"] } ) ``` Likewise, we can simplify the expression grammar. Consider how signs are optional, and how integers can be expressed as sequences of digits. ``` EXPR_EBNF_GRAMMAR = { "<start>": ["<expr>"], "<expr>": ["<term> + <expr>", "<term> - <expr>", "<term>"], "<term>": ["<factor> * <term>", "<factor> / <term>", "<factor>"], "<factor>": ["<sign>?<factor>", "(<expr>)", "<integer>(.<integer>)?"], "<sign>": ["+", "-"], "<integer>": ["<digit>+"], "<digit>": srange(string.digits) } ``` Our aim is to convert EBNF grammars such as the ones above into a regular BNF grammar. This is done by four rules: 1. An expression `(content)op`, where `op` is one of `?`, `+`, `*`, becomes `<new-symbol>op`, with a new rule `<new-symbol> ::= content`. 2. An expression `<symbol>?` becomes `<new-symbol>`, where `<new-symbol> ::= <empty> | <symbol>`. 3. An expression `<symbol>+` becomes `<new-symbol>`, where `<new-symbol> ::= <symbol> | <symbol><new-symbol>`. 4. An expression `<symbol>*` becomes `<new-symbol>`, where `<new-symbol> ::= <empty> | <symbol><new-symbol>`. Here, `<empty>` expands to the empty string, as in `<empty> ::= `. (This is also called an *epsilon expansion*.) If these operators remind you of _regular expressions_, this is not by accident: Actually, any basic regular expression can be converted into a grammar using the above rules (and character classes with `crange()`, as defined above). Applying these rules on the examples above yields the following results: * `<idchar>+` becomes `<idchar><new-symbol>` with `<new-symbol> ::= <idchar> | <idchar><new-symbol>`. * `<integer>(.<integer>)?` becomes `<integer><new-symbol>` with `<new-symbol> ::= <empty> | .<integer>`. Let us implement these rules in three steps. #### Creating New Symbols First, we need a mechanism to create new symbols. This is fairly straightforward. ``` def new_symbol(grammar, symbol_name="<symbol>"): """Return a new symbol for `grammar` based on `symbol_name`""" if symbol_name not in grammar: return symbol_name count = 1 while True: tentative_symbol_name = symbol_name[:-1] + "-" + repr(count) + ">" if tentative_symbol_name not in grammar: return tentative_symbol_name count += 1 assert new_symbol(EXPR_EBNF_GRAMMAR, '<expr>') == '<expr-1>' ``` #### Expanding Parenthesized Expressions Next, we need a means to extract parenthesized expressions from our expansions and expand them according to the rules above. Let's start with extracting expressions: ``` RE_PARENTHESIZED_EXPR = re.compile(r'\([^()]*\)[?+*]') def parenthesized_expressions(expansion): # In later chapters, we allow expansions to be tuples, # with the expansion being the first element if isinstance(expansion, tuple): expansion = expansion[0] return re.findall(RE_PARENTHESIZED_EXPR, expansion) assert parenthesized_expressions("(<foo>)* (<foo><bar>)+ (+<foo>)? <integer>(.<integer>)?") == [ '(<foo>)*', '(<foo><bar>)+', '(+<foo>)?', '(.<integer>)?'] ``` We can now use these to apply rule number 1, above, introducing new symbols for expressions in parentheses. ``` def convert_ebnf_parentheses(ebnf_grammar): """Convert a grammar in extended BNF to BNF""" grammar = extend_grammar(ebnf_grammar) for nonterminal in ebnf_grammar: expansions = ebnf_grammar[nonterminal] for i in range(len(expansions)): expansion = expansions[i] while True: parenthesized_exprs = parenthesized_expressions(expansion) if len(parenthesized_exprs) == 0: break for expr in parenthesized_exprs: operator = expr[-1:] contents = expr[1:-2] new_sym = new_symbol(grammar) expansion = grammar[nonterminal][i].replace( expr, new_sym + operator, 1) grammar[nonterminal][i] = expansion grammar[new_sym] = [contents] return grammar ``` This does the conversion as sketched above: ``` convert_ebnf_parentheses({"<number>": ["<integer>(.<integer>)?"]}) ``` It even works for nested parenthesized expressions: ``` convert_ebnf_parentheses({"<foo>": ["((<foo>)?)+"]}) ``` #### Expanding Operators After expanding parenthesized expressions, we now need to take care of symbols followed by operators (`?`, `*`, `+`). As with `convert_ebnf_parentheses()`, above, we first extract all symbols followed by an operator. ``` RE_EXTENDED_NONTERMINAL = re.compile(r'(<[^<> ]*>[?+*])') def extended_nonterminals(expansion): # In later chapters, we allow expansions to be tuples, # with the expansion being the first element if isinstance(expansion, tuple): expansion = expansion[0] return re.findall(RE_EXTENDED_NONTERMINAL, expansion) assert extended_nonterminals( "<foo>* <bar>+ <elem>? <none>") == ['<foo>*', '<bar>+', '<elem>?'] ``` Our converter extracts the symbol and the operator, and adds new symbols according to the rules laid out above. ``` def convert_ebnf_operators(ebnf_grammar): """Convert a grammar in extended BNF to BNF""" grammar = extend_grammar(ebnf_grammar) for nonterminal in ebnf_grammar: expansions = ebnf_grammar[nonterminal] for i in range(len(expansions)): expansion = expansions[i] extended_symbols = extended_nonterminals(expansion) for extended_symbol in extended_symbols: operator = extended_symbol[-1:] original_symbol = extended_symbol[:-1] assert original_symbol in ebnf_grammar, \ f"{original_symbol} is not defined in grammar" new_sym = new_symbol(grammar, original_symbol) grammar[nonterminal][i] = grammar[nonterminal][i].replace( extended_symbol, new_sym, 1) if operator == '?': grammar[new_sym] = ["", original_symbol] elif operator == '*': grammar[new_sym] = ["", original_symbol + new_sym] elif operator == '+': grammar[new_sym] = [ original_symbol, original_symbol + new_sym] return grammar convert_ebnf_operators({"<integer>": ["<digit>+"], "<digit>": ["0"]}) ``` #### All Together We can combine the two, first extending parentheses and then operators: ``` def convert_ebnf_grammar(ebnf_grammar): return convert_ebnf_operators(convert_ebnf_parentheses(ebnf_grammar)) convert_ebnf_grammar({"<authority>": ["(<userinfo>@)?<host>(:<port>)?"]}) expr_grammar = convert_ebnf_grammar(EXPR_EBNF_GRAMMAR) expr_grammar ``` Success! We have nicely converted the EBNF grammar into BNF. With character classes and EBNF grammar conversion, we have two powerful tools that make the writing of grammars easier. We will use these again and again as it comes to working with grammars. ### Grammar Extensions During the course of this book, we frequently want to specify _additional information_ for grammars, such as [_probabilities_](ProbabilisticGrammarFuzzer.ipynb) or [_constraints_](GeneratorGrammarFuzzer.ipynb). To support these extensions, as well as possibly others, we define an _annotation_ mechanism. Our concept for annotating grammars is to add _annotations_ to individual expansions. To this end, we allow that an expansion cannot only be a string, but also a _pair_ of a string and a set of attributes, as in ```python "<expr>": [("<term> + <expr>", opts(min_depth=10)), ("<term> - <expr>", opts(max_depth=2)), "<term>"] ``` Here, the `opts()` function would allow us to express annotations that apply to the individual expansions; in this case, the addition would be annotated with a `min_depth` value of 10, and the subtraction with a `max_depth` value of 2. The meaning of these annotations is left to the individual algorithms dealing with the grammars; the general idea, though, is that they can be ignored. Our `opts()` helper function returns a mapping of its arguments to values: ``` def opts(**kwargs): return kwargs opts(min_depth=10) ``` To deal with both expansion strings and pairs of expansions and annotations, we access the expansion string and the associated annotations via designated helper functions, `exp_string()` and `exp_opts()`: ``` def exp_string(expansion): """Return the string to be expanded""" if isinstance(expansion, str): return expansion return expansion[0] exp_string(("<term> + <expr>", opts(min_depth=10))) def exp_opts(expansion): """Return the options of an expansion. If options are not defined, return {}""" if isinstance(expansion, str): return {} return expansion[1] def exp_opt(expansion, attribute): """Return the given attribution of an expansion. If attribute is not defined, return None""" return exp_opts(expansion).get(attribute, None) exp_opts(("<term> + <expr>", opts(min_depth=10))) exp_opt(("<term> - <expr>", opts(max_depth=2)), 'max_depth') ``` Finally, we define a helper function that sets a particular option: ``` def set_opts(grammar, symbol, expansion, opts=None): """Set the options of the given expansion of grammar[symbol] to opts""" expansions = grammar[symbol] for i, exp in enumerate(expansions): if exp_string(exp) != exp_string(expansion): continue new_opts = exp_opts(exp) if opts is None or new_opts == {}: new_opts = opts else: for key in opts: new_opts[key] = opts[key] if new_opts == {}: grammar[symbol][i] = exp_string(exp) else: grammar[symbol][i] = (exp_string(exp), new_opts) return raise KeyError( "no expansion " + repr(symbol) + " -> " + repr( exp_string(expansion))) ``` ## Checking Grammars Since grammars are represented as strings, it is fairly easy to introduce errors. So let us introduce a helper function that checks a grammar for consistency. The helper function `is_valid_grammar()` iterates over a grammar to check whether all used symbols are defined, and vice versa, which is very useful for debugging; it also checks whether all symbols are reachable from the start symbol. You don't have to delve into details here, but as always, it is important to get the input data straight before we make use of it. ``` import sys def def_used_nonterminals(grammar, start_symbol=START_SYMBOL): defined_nonterminals = set() used_nonterminals = {start_symbol} for defined_nonterminal in grammar: defined_nonterminals.add(defined_nonterminal) expansions = grammar[defined_nonterminal] if not isinstance(expansions, list): print(repr(defined_nonterminal) + ": expansion is not a list", file=sys.stderr) return None, None if len(expansions) == 0: print(repr(defined_nonterminal) + ": expansion list empty", file=sys.stderr) return None, None for expansion in expansions: if isinstance(expansion, tuple): expansion = expansion[0] if not isinstance(expansion, str): print(repr(defined_nonterminal) + ": " + repr(expansion) + ": not a string", file=sys.stderr) return None, None for used_nonterminal in nonterminals(expansion): used_nonterminals.add(used_nonterminal) return defined_nonterminals, used_nonterminals def reachable_nonterminals(grammar, start_symbol=START_SYMBOL): reachable = set() def _find_reachable_nonterminals(grammar, symbol): nonlocal reachable reachable.add(symbol) for expansion in grammar.get(symbol, []): for nonterminal in nonterminals(expansion): if nonterminal not in reachable: _find_reachable_nonterminals(grammar, nonterminal) _find_reachable_nonterminals(grammar, start_symbol) return reachable def unreachable_nonterminals(grammar, start_symbol=START_SYMBOL): return grammar.keys() - reachable_nonterminals(grammar, start_symbol) def opts_used(grammar): used_opts = set() for symbol in grammar: for expansion in grammar[symbol]: used_opts |= set(exp_opts(expansion).keys()) return used_opts def is_valid_grammar(grammar, start_symbol=START_SYMBOL, supported_opts=None): defined_nonterminals, used_nonterminals = \ def_used_nonterminals(grammar, start_symbol) if defined_nonterminals is None or used_nonterminals is None: return False # Do not complain about '<start>' being not used, # even if start_symbol is different if START_SYMBOL in grammar: used_nonterminals.add(START_SYMBOL) for unused_nonterminal in defined_nonterminals - used_nonterminals: print(repr(unused_nonterminal) + ": defined, but not used", file=sys.stderr) for undefined_nonterminal in used_nonterminals - defined_nonterminals: print(repr(undefined_nonterminal) + ": used, but not defined", file=sys.stderr) # Symbols must be reachable either from <start> or given start symbol unreachable = unreachable_nonterminals(grammar, start_symbol) msg_start_symbol = start_symbol if START_SYMBOL in grammar: unreachable = unreachable - \ reachable_nonterminals(grammar, START_SYMBOL) if start_symbol != START_SYMBOL: msg_start_symbol += " or " + START_SYMBOL for unreachable_nonterminal in unreachable: print(repr(unreachable_nonterminal) + ": unreachable from " + msg_start_symbol, file=sys.stderr) used_but_not_supported_opts = set() if supported_opts is not None: used_but_not_supported_opts = opts_used( grammar).difference(supported_opts) for opt in used_but_not_supported_opts: print( "warning: option " + repr(opt) + " is not supported", file=sys.stderr) return used_nonterminals == defined_nonterminals and len(unreachable) == 0 ``` Our grammars defined above pass the test: ``` assert is_valid_grammar(EXPR_GRAMMAR) assert is_valid_grammar(CGI_GRAMMAR) assert is_valid_grammar(URL_GRAMMAR) ``` The check can also be applied to EBNF grammars: ``` assert is_valid_grammar(EXPR_EBNF_GRAMMAR) ``` These ones do not pass the test, though: ``` assert not is_valid_grammar({"<start>": ["<x>"], "<y>": ["1"]}) assert not is_valid_grammar({"<start>": "123"}) assert not is_valid_grammar({"<start>": []}) assert not is_valid_grammar({"<start>": [1, 2, 3]}) ``` From here on, we will always use `is_valid_grammar()` when defining a grammar. ## Synopsis This chapter introduces _grammars_ as a simple means to specify input languages, and to use them for testing programs with syntactically valid inputs. A grammar is defined as a mapping of nonterminal symbols to lists of alternative expansions, as in the following example: ``` US_PHONE_GRAMMAR = { "<start>": ["<phone-number>"], "<phone-number>": ["(<area>)<exchange>-<line>"], "<area>": ["<lead-digit><digit><digit>"], "<exchange>": ["<lead-digit><digit><digit>"], "<line>": ["<digit><digit><digit><digit>"], "<lead-digit>": ["2", "3", "4", "5", "6", "7", "8", "9"], "<digit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] } assert is_valid_grammar(US_PHONE_GRAMMAR) ``` Nonterminal symbols are enclosed in angle brackets (say, `<digit>`). To generate an input string from a grammar, a _producer_ starts with the start symbol (`<start>`) and randomly chooses a random expansion for this symbol. It continues the process until all nonterminal symbols are expanded. The function `simple_grammar_fuzzer()` does just that: ``` [simple_grammar_fuzzer(US_PHONE_GRAMMAR) for i in range(5)] ``` In practice, though, instead of `simple_grammar_fuzzer()`, you should use [the `GrammarFuzzer` class](GrammarFuzzer.ipynb) or one of its [coverage-based](GrammarCoverageFuzzer.ipynb), [probabilistic-based](ProbabilisticGrammarFuzzer.ipynb), or [generator-based](GeneratorGrammarFuzzer.ipynb) derivatives; these are more efficient, protect against infinite growth, and provide several additional features. This chapter also introduces a [grammar toolbox](#A-Grammar-Toolbox) with several helper functions that ease the writing of grammars, such as using shortcut notations for character classes and repetitions, or extending grammars ## Lessons Learned * Grammars are powerful tools to express and produce syntactically valid inputs. * Inputs produced from grammars can be used as is, or used as seeds for mutation-based fuzzing. * Grammars can be extended with character classes and operators to make writing easier. ## Next Steps As they make a great foundation for generating software tests, we use grammars again and again in this book. As a sneak preview, we can use grammars to [fuzz configurations](ConfigurationFuzzer.ipynb): ``` <options> ::= <option>* <option> ::= -h | --version | -v | -d | -i | --global-config <filename> ``` We can use grammars for [fuzzing functions and APIs](APIFuzzer.ipynb) and [fuzzing graphical user interfaces](WebFuzzer.ipynb): ``` <call-sequence> ::= <call>* <call> ::= urlparse(<url>) | urlsplit(<url>) ``` We can assign [probabilities](ProbabilisticGrammarFuzzer.ipynb) and [constraints](GeneratorGrammarFuzzer.ipynb) to individual expansions: ``` <term>: 50% <factor> * <term> | 30% <factor> / <term> | 20% <factor> <integer>: <digit>+ { <integer> >= 100 } ``` All these extras become especially valuable as we can 1. _infer grammars automatically_, dropping the need to specify them manually, and 2. _guide them towards specific goals_ such as coverage or critical functions; which we also discuss for all techniques in this book. To get there, however, we still have bit of homework to do. In particular, we first have to learn how to * [create an efficient grammar fuzzer](GrammarFuzzer.ipynb) ## Background As one of the foundations of human language, grammars have been around as long as human language existed. The first _formalization_ of generative grammars was by Dakṣiputra Pāṇini in 350 BC \cite{Panini350bce}. As a general means to express formal languages for both data and programs, their role in computer science cannot be overstated. The seminal work by Chomsky \cite{Chomsky1956} introduced the central models of regular languages, context-free grammars, context-sensitive grammars, and universal grammars as they are used (and taught) in computer science as a means to specify input and programming languages ever since. The use of grammars for _producing_ test inputs goes back to Burkhardt \cite{Burkhardt1967}, to be later rediscovered and applied by Hanford \cite{Hanford1970} and Purdom \cite{Purdom1972}. The most important use of grammar testing since then has been *compiler testing*. Actually, grammar-based testing is one important reason why compilers and Web browsers work as they should: * The [CSmith](https://embed.cs.utah.edu/csmith/) tool \cite{Yang2011} specifically targets C programs, starting with a C grammar and then applying additional steps, such as referring to variables and functions defined earlier or ensuring integer and type safety. Their authors have used it "to find and report more than 400 previously unknown compiler bugs." * The [LangFuzz](http://issta2016.cispa.saarland/interview-with-christian-holler/) work \cite{Holler2012}, which shares two authors with this book, uses a generic grammar to produce outputs, and is used day and night to generate JavaScript programs and test their interpreters; as of today, it has found more than 2,600 bugs in browsers such as Mozilla Firefox, Google Chrome, and Microsoft Edge. * The [EMI Project](http://web.cs.ucdavis.edu/~su/emi-project/) \cite{Le2014} uses grammars to stress-test C compilers, transforming known tests into alternative programs that should be semantically equivalent over all inputs. Again, this has led to more than 100 bugs in C compilers being fixed. * [Grammarinator](https://github.com/renatahodovan/grammarinator) \cite{Hodovan2018} is an open-source grammar fuzzer (written in Python!), using the popular ANTLR format as grammar specification. Like LangFuzz, it uses the grammar for both parsing and producing, and has found more than 100 issues in the *JerryScript* lightweight JavaScript engine and an associated platform. * [Domato](https://github.com/googleprojectzero/domato) is a generic grammar generation engine that is specifically used for fuzzing DOM input. It has revealed a number of security issues in popular Web browsers. Compilers and Web browsers, of course, are not only domains where grammars are needed for testing, but also domains where grammars are well-known. Our claim in this book is that grammars can be used to generate almost _any_ input, and our aim is to empower you to do precisely that. ## Exercises ### Exercise 1: A JSON Grammar Take a look at the [JSON specification](http://www.json.org) and derive a grammar from it: * Use _character classes_ to express valid characters * Use EBNF to express repetitions and optional parts * Assume that - a string is a sequence of digits, ASCII letters, punctuation and space characters without quotes or escapes - whitespace is just a single space. * Use `is_valid_grammar()` to ensure the grammar is valid. Feed the grammar into `simple_grammar_fuzzer()`. Do you encounter any errors, and why? **Solution.** This is a fairly straightforward translation: ``` CHARACTERS_WITHOUT_QUOTE = (string.digits + string.ascii_letters + string.punctuation.replace('"', '').replace('\\', '') + ' ') JSON_EBNF_GRAMMAR = { "<start>": ["<json>"], "<json>": ["<element>"], "<element>": ["<ws><value><ws>"], "<value>": ["<object>", "<array>", "<string>", "<number>", "true", "false", "null", "'; DROP TABLE STUDENTS"], "<object>": ["{<ws>}", "{<members>}"], "<members>": ["<member>(,<members>)*"], "<member>": ["<ws><string><ws>:<element>"], "<array>": ["[<ws>]", "[<elements>]"], "<elements>": ["<element>(,<elements>)*"], "<element>": ["<ws><value><ws>"], "<string>": ['"' + "<characters>" + '"'], "<characters>": ["<character>*"], "<character>": srange(CHARACTERS_WITHOUT_QUOTE), "<number>": ["<int><frac><exp>"], "<int>": ["<digit>", "<onenine><digits>", "-<digits>", "-<onenine><digits>"], "<digits>": ["<digit>+"], "<digit>": ['0', "<onenine>"], "<onenine>": crange('1', '9'), "<frac>": ["", ".<digits>"], "<exp>": ["", "E<sign><digits>", "e<sign><digits>"], "<sign>": ["", '+', '-'], # "<ws>": srange(string.whitespace) "<ws>": [" "] } assert is_valid_grammar(JSON_EBNF_GRAMMAR) JSON_GRAMMAR = convert_ebnf_grammar(JSON_EBNF_GRAMMAR) from ExpectError import ExpectError for i in range(50): with ExpectError(): print(simple_grammar_fuzzer(JSON_GRAMMAR, '<object>')) ``` We get these errors because `simple_grammar_fuzzer()` first expands to a maximum number of elements, and then is limited because every further expansion would _increase_ the number of nonterminals, even though these may eventually reduce the string length. This issue is addressed in the [next chapter](GrammarFuzzer.ipynb), introducing a more solid algorithm for producing strings from grammars. ### Exercise 2: Finding Bugs The name `simple_grammar_fuzzer()` does not come by accident: The way it expands grammars is limited in several ways. What happens if you apply `simple_grammar_fuzzer()` on `nonterminal_grammar` and `expr_grammar`, as defined above, and why? **Solution**. `nonterminal_grammar` does not work because `simple_grammar_fuzzer()` eventually tries to expand the just generated nonterminal: ``` from ExpectError import ExpectError, ExpectTimeout with ExpectError(): simple_grammar_fuzzer(nonterminal_grammar, log=True) ``` For `expr_grammar`, things are even worse, as `simple_grammar_fuzzer()` can start a series of infinite expansions: ``` with ExpectTimeout(1): for i in range(10): print(simple_grammar_fuzzer(expr_grammar)) ``` Both issues are addressed and discussed in the [next chapter](GrammarFuzzer.ipynb), introducing a more solid algorithm for producing strings from grammars. ### Exercise 3: Grammars with Regular Expressions In a _grammar extended with regular expressions_, we can use the special form ``` /regex/ ``` to include regular expressions in expansions. For instance, we can have a rule ``` <integer> ::= /[+-]?[0-9]+/ ``` to quickly express that an integer is an optional sign, followed by a sequence of digits. #### Part 1: Convert regular expressions Write a converter `convert_regex(r)` that takes a regular expression `r` and creates an equivalent grammar. Support the following regular expression constructs: * `*`, `+`, `?`, `()` should work just in EBNFs, above. * `a|b` should translate into a list of alternatives `[a, b]`. * `.` should match any character except newline. * `[abc]` should translate into `srange("abc")` * `[^abc]` should translate into the set of ASCII characters _except_ `srange("abc")`. * `[a-b]` should translate into `crange(a, b)` * `[^a-b]` should translate into the set of ASCII characters _except_ `crange(a, b)`. Example: `convert_regex(r"[0-9]+")` should yield a grammar such as ```python { "<start>": ["<s1>"], "<s1>": [ "<s2>", "<s1><s2>" ], "<s2>": crange('0', '9') } ``` **Solution.** Left as exercise to the reader. #### Part 2: Identify and expand regular expressions Write a converter `convert_regex_grammar(g)` that takes a EBNF grammar `g` containing regular expressions in the form `/.../` and creates an equivalent BNF grammar. Support the regular expression constructs as above. Example: `convert_regex_grammar({ "<integer>" : "/[+-]?[0-9]+/" })` should yield a grammar such as ```python { "<integer>": ["<s1><s3>"], "<s1>": [ "", "<s2>" ], "<s2>": srange("+-"), "<s3>": [ "<s4>", "<s4><s3>" ], "<s4>": crange('0', '9') } ``` Optional: Support _escapes_ in regular expressions: `\c` translates to the literal character `c`; `\/` translates to `/` (and thus does not end the regular expression); `\\` translates to `\`. **Solution.** Left as exercise to the reader. ### Exercise 4: Defining Grammars as Functions (Advanced) To obtain a nicer syntax for specifying grammars, one can make use of Python constructs which then will be _parsed_ by an additional function. For instance, we can imagine a grammar definition which uses `|` as a means to separate alternatives: ``` def expression_grammar_fn(): start = "<expr>" expr = "<term> + <expr>" | "<term> - <expr>" term = "<factor> * <term>" | "<factor> / <term>" | "<factor>" factor = "+<factor>" | "-<factor>" | "(<expr>)" | "<integer>.<integer>" | "<integer>" integer = "<digit><integer>" | "<digit>" digit = '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' ``` If we execute `expression_grammar_fn()`, this will yield an error. Yet, the purpose of `expression_grammar_fn()` is not to be executed, but to be used as _data_ from which the grammar will be constructed. ``` with ExpectError(): expression_grammar_fn() ``` To this end, we make use of the `ast` (abstract syntax tree) and `inspect` (code inspection) modules. ``` import ast import inspect ``` First, we obtain the source code of `expression_grammar_fn()`... ``` source = inspect.getsource(expression_grammar_fn) source ``` ... which we then parse into an abstract syntax tree: ``` tree = ast.parse(source) ``` We can now parse the tree to find operators and alternatives. `get_alternatives()` iterates over all nodes `op` of the tree; If the node looks like a binary _or_ (`|` ) operation, we drill deeper and recurse. If not, we have reached a single production, and we try to get the expression from the production. We define the `to_expr` parameter depending on how we want to represent the production. In this case, we represent a single production by a single string. ``` def get_alternatives(op, to_expr=lambda o: o.s): if isinstance(op, ast.BinOp) and isinstance(op.op, ast.BitOr): return get_alternatives(op.left, to_expr) + [to_expr(op.right)] return [to_expr(op)] ``` `funct_parser()` takes the abstract syntax tree of a function (say, `expression_grammar_fn()`) and iterates over all assignments: ``` def funct_parser(tree, to_expr=lambda o: o.s): return {assign.targets[0].id: get_alternatives(assign.value, to_expr) for assign in tree.body[0].body} ``` The result is a grammar in our regular format: ``` grammar = funct_parser(tree) for symbol in grammar: print(symbol, "::=", grammar[symbol]) ``` #### Part 1 (a): One Single Function Write a single function `define_grammar(fn)` that takes a grammar defined as function (such as `expression_grammar_fn()`) and returns a regular grammar. **Solution**. This is straightforward: ``` def define_grammar(fn, to_expr=lambda o: o.s): source = inspect.getsource(fn) tree = ast.parse(source) grammar = funct_parser(tree, to_expr) return grammar define_grammar(expression_grammar_fn) ``` **Note.** Python allows us to directly bind the generated grammar to the name `expression_grammar_fn` using function decorators. This can be used to ensure that we do not have a faulty function lying around: ```python @define_grammar def expression_grammar(): start = "<expr>" expr = "<term> + <expr>" | "<term> - <expr>" #... ``` #### Part 1 (b): Alternative representations We note that the grammar representation we designed previously does not allow simple generation of alternatives such as `srange()` and `crange()`. Further, one may find the string representation of expressions limiting. It turns out that it is simple to extend our grammar definition to support grammars such as below: ``` def define_name(o): return o.id if isinstance(o, ast.Name) else o.s def define_expr(op): if isinstance(op, ast.BinOp) and isinstance(op.op, ast.Add): return (*define_expr(op.left), define_name(op.right)) return (define_name(op),) def define_ex_grammar(fn): return define_grammar(fn, define_expr) ``` The grammar: ```python @define_ex_grammar def expression_grammar(): start = expr expr = (term + '+' + expr | term + '-' + expr) term = (factor + '*' + term | factor + '/' + term | factor) factor = ('+' + factor | '-' + factor | '(' + expr + ')' | integer + '.' + integer | integer) integer = (digit + integer | digit) digit = '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' for symbol in expression_grammar: print(symbol, "::=", expression_grammar[symbol]) ``` **Note.** The grammar data structure thus obtained is a little more detailed than the standard data structure. It represents each production as a tuple. We note that we have not enabled `srange()` or `crange()` in the above grammar. How would you go about adding these? (*Hint:* wrap `define_expr()` to look for `ast.Call`) #### Part 2: Extended Grammars Introduce an operator `*` that takes a pair `(min, max)` where `min` and `max` are the minimum and maximum number of repetitions, respectively. A missing value `min` stands for zero; a missing value `max` for infinity. ``` def identifier_grammar_fn(): identifier = idchar * (1,) ``` With the `*` operator, we can generalize the EBNF operators – `?` becomes (0,1), `*` becomes (0,), and `+` becomes (1,). Write a converter that takes an extended grammar defined using `*`, parse it, and convert it into BNF. **Solution.** No solution yet :-)
github_jupyter
## widgets.image_cleaner fastai offers several widgets to support the workflow of a deep learning practitioner. The purpose of the widgets are to help you organize, clean, and prepare your data for your model. Widgets are separated by data type. ``` from fastai.vision import * from fastai.widgets import DatasetFormatter, ImageCleaner, ImageDownloader, download_google_images from fastai.gen_doc.nbdoc import * %reload_ext autoreload %autoreload 2 path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path) learn = create_cnn(data, models.resnet18, metrics=error_rate) learn.fit_one_cycle(2) learn.save('stage-1') ``` We create a databunch with all the data in the training set and no validation set (DatasetFormatter uses only the training set) ``` db = (ImageList.from_folder(path) .no_split() .label_from_folder() .databunch()) learn = create_cnn(db, models.resnet18, metrics=[accuracy]) learn.load('stage-1'); show_doc(DatasetFormatter) ``` The [`DatasetFormatter`](/widgets.image_cleaner.html#DatasetFormatter) class prepares your image dataset for widgets by returning a formatted [`DatasetTfm`](/vision.data.html#DatasetTfm) based on the [`DatasetType`](/basic_data.html#DatasetType) specified. Use `from_toplosses` to grab the most problematic images directly from your learner. Optionally, you can restrict the formatted dataset returned to `n_imgs`. ``` show_doc(DatasetFormatter.from_similars) from fastai.gen_doc.nbdoc import * from fastai.widgets.image_cleaner import * show_doc(DatasetFormatter.from_toplosses) show_doc(ImageCleaner) ``` [`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) is for cleaning up images that don't belong in your dataset. It renders images in a row and gives you the opportunity to delete the file from your file system. To use [`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) we must first use `DatasetFormatter().from_toplosses` to get the suggested indices for misclassified images. ``` ds, idxs = DatasetFormatter().from_toplosses(learn) ImageCleaner(ds, idxs, path) ``` [`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) does not change anything on disk (neither labels or existence of images). Instead, it creates a 'cleaned.csv' file in your data path from which you need to load your new databunch for the files to changes to be applied. ``` df = pd.read_csv(path/'cleaned.csv', header='infer') # We create a databunch from our csv. We include the data in the training set and we don't use a validation set (DatasetFormatter uses only the training set) np.random.seed(42) db = (ImageList.from_df(df, path) .no_split() .label_from_df() .databunch(bs=64)) learn = create_cnn(db, models.resnet18, metrics=error_rate) learn = learn.load('stage-1') ``` You can then use [`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) again to find duplicates in the dataset. To do this, you can specify `duplicates=True` while calling ImageCleaner after getting the indices and dataset from `.from_similars`. Note that if you are using a layer's output which has dimensions <code>(n_batches, n_features, 1, 1)</code> then you don't need any pooling (this is the case with the last layer). The suggested use of `.from_similars()` with resnets is using the last layer and no pooling, like in the following cell. ``` ds, idxs = DatasetFormatter().from_similars(learn, layer_ls=[0,7,1], pool=None) ImageCleaner(ds, idxs, path, duplicates=True) show_doc(ImageDownloader) ``` [`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) widget gives you a way to quickly bootstrap your image dataset without leaving the notebook. It searches and downloads images that match the search criteria and resolution / quality requirements and stores them on your filesystem within the provided `path`. Images for each search query (or label) are stored in a separate folder within `path`. For example, if you pupulate `tiger` with a `path` setup to `./data`, you'll get a folder `./data/tiger/` with the tiger images in it. [`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) will automatically clean up and verify the downloaded images with [`verify_images()`](/vision.data.html#verify_images) after downloading them. ``` path = Config.data_path()/'image_downloader' os.makedirs(path, exist_ok=True) ImageDownloader(path) ``` #### Downloading images in python scripts outside Jupyter notebooks ``` path = Config.data_path()/'image_downloader' files = download_google_images(path, 'aussie shepherd', size='>1024*768', n_images=30) len(files) show_doc(download_google_images) ``` After populating images with [`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader), you can get a an [`ImageDataBunch`](/vision.data.html#ImageDataBunch) by calling `ImageDataBunch.from_folder(path, size=size)`, or using the data block API. ``` # Setup path and labels to search for path = Config.data_path()/'image_downloader' labels = ['boston terrier', 'french bulldog'] # Download images for label in labels: download_google_images(path, label, size='>400*300', n_images=50) # Build a databunch and train! src = (ImageList.from_folder(path) .random_split_by_pct() .label_from_folder() .transform(get_transforms(), size=224)) db = src.databunch(bs=16, num_workers=0) learn = create_cnn(db, models.resnet34, metrics=[accuracy]) learn.fit_one_cycle(3) ``` #### Downloading more than a hundred images To fetch more than a hundred images, [`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) uses `selenium` and `chromedriver` to scroll through the Google Images search results page and scrape image URLs. They're not required as dependencies by default. If you don't have them installed on your system, the widget will show you an error message. To install `selenium`, just `pip install selenium` in your fastai environment. **On a mac**, you can install `chromedriver` with `brew cask install chromedriver`. **On Ubuntu** Take a look at the latest Chromedriver version available, then something like: ``` wget https://chromedriver.storage.googleapis.com/2.45/chromedriver_linux64.zip unzip chromedriver_linux64.zip ``` Note that downloading under 100 images doesn't require any dependencies other than fastai itself, however downloading more than a hundred images [uses `selenium` and `chromedriver`](/widgets.image_cleaner.html#Downloading-more-than-a-hundred-images). `size` can be one of: ``` '>400*300' '>640*480' '>800*600' '>1024*768' '>2MP' '>4MP' '>6MP' '>8MP' '>10MP' '>12MP' '>15MP' '>20MP' '>40MP' '>70MP' ``` ## Methods ## Undocumented Methods - Methods moved below this line will intentionally be hidden ``` show_doc(ImageCleaner.make_dropdown_widget) show_doc(ImageCleaner.next_batch) show_doc(DatasetFormatter.sort_idxs) show_doc(ImageCleaner.make_vertical_box) show_doc(ImageCleaner.relabel) show_doc(DatasetFormatter.largest_indices) show_doc(ImageCleaner.delete_image) show_doc(ImageCleaner.empty) show_doc(ImageCleaner.empty_batch) show_doc(DatasetFormatter.comb_similarity) show_doc(ImageCleaner.get_widgets) show_doc(ImageCleaner.write_csv) show_doc(ImageCleaner.create_image_list) show_doc(ImageCleaner.render) show_doc(DatasetFormatter.get_similars_idxs) show_doc(ImageCleaner.on_delete) show_doc(ImageCleaner.make_button_widget) show_doc(ImageCleaner.make_img_widget) show_doc(DatasetFormatter.get_actns) show_doc(ImageCleaner.batch_contains_deleted) show_doc(ImageCleaner.make_horizontal_box) show_doc(DatasetFormatter.get_toplosses_idxs) show_doc(DatasetFormatter.padded_ds) ``` ## New Methods - Please document or move to the undocumented section
github_jupyter
# Hands-On #11 : Balancing the Cart Pole w/ DDPG! --- ### Goal: - Implement DDPG on the CartPole Environment * It is an overkill, but we will get a good understanding and also can compare against other algorithms ### Steps: 1. Program DDPG Algorithm 2. Run and Optimize 3. Plot Values, like we did in other exercises ### Notebook Organization #### The program has 3 parts : - Part 1 Defines the classes, initiates the environment and so forth. It sets up all the scaffolding needed - Part 2 Explore and Learn - it performs the DDPG Reinforcement Learning. It also saves the best model - Part 3 Run saved model ## Part 1 - Definitions & Setup ### 1.1. Install the required packages * No esoteric requirements * You can run them without docker * pip install -r requirements.txt * Requirements * python 3.6, pytorch, openAI gym, numpy, matplotlib * anaconda is easier but not needed * Miniconda works fine ### 1.2. Define imports python 3, numpy, matplotlib, torch ``` # General imports import numpy as np import random from collections import namedtuple, deque import copy import time from datetime import datetime, timedelta import matplotlib.pyplot as plt %matplotlib inline # torch imports import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim # Constants Definitions BUFFER_SIZE = 4096 # 2048 # 512 # int(1e5) # int(1e6) # int(1e5) # replay buffer size ? BATCH_SIZE = 32 # 64 # 32 # 128 # 64 # 256 # minibatch size for training GAMMA = 0.99 # discount factor TAU = 0.05 # 1e-3 # for soft update of target parameters LR_ACTOR = 0.01 # 5e-4 # 1e-4 # 0.001 # 1e-4 # learning rate of the actor LR_CRITIC = 0.001 # 6e-4 # 3e-4 # 3e-3 # 0.001 # 3e-4 # learning rate of the critic 0.001 WEIGHT_DECAY = 0.0001 # L2 weight decay # Number of neurons in the layers of the Actor & Critic Networks FC_UNITS_ACTOR = [16,8] # [4,4] # [32,16] #[400,300] #[8,8] #[128,128] # [64,128] # [32,16] # [400,300] # [128,128] FC_UNITS_CRITIC = [16,8]# [4,4] # [32,16] #[400,300] #[8,8] #[128,128] # [64,128] # [32,16] # [400,300] # [128,128] # Store models flag. Store during calibration runs and do not store during hyperparameter search # Used in Part 3 to run a stored model STORE_MODELS = False # True - Turn it on when you are ready to do the calibration training # import gym, PIL # env = gym.make('CartPole-v0') # array = env.reset() # PIL.Image.fromarray(env.render(mode='rgb_array')) ``` ### 3.0 Create instance & Explore ``` import gym env = gym.make('CartPole-v0') env.seed(42) # array = env.reset() env.reset() # ** render doesn't work reliably on a server. Uncomment when running ** locally ** # env.render() #PIL.Image.fromarray(env.render(mode='rgb_array')) ``` ### This what it will look like ### We don't need the render(). We run it on headless mode and inspect the results <img src="CartPole_Render.jpg"> ### 3.1 Examine the State and Action Spaces * The state space is continuous, with an observation space of 4 * {x,$\dot{x}$,$\theta$, theta_dot} * Cart Position, Cart Velocity, Pole Angle, Pole Velocity at tip * The angle, probably, is in radians The action space, on the contrary is simple viz. 0 = Left, 1 = Right ``` print(env.observation_space) print(env.action_space) act_space = [i for i in range(0,env.action_space.n)] print(act_space) # env.unwrapped.get_action_meanings() # AttributeError: 'FrozenLakeEnv' object has no attribute 'get_action_meanings' print('[ 0 = Left, 1 = Right ]') print(dir(env)) print(dir(env.unwrapped)) # To see what functions and variables are availabe print('States = ',env.unwrapped.observation_space) print('Actions = ',env.unwrapped.action_space) state_size = env.observation_space.shape[0] action_size = env.action_space.n ``` ### 4. Test the environment with Random Action ``` for i_episode in range(3): state = env.reset() tot_reward = 0 steps = 0 while True: action = env.action_space.sample() next_state, reward, done, info = env.step(action) print('[',state,']','->', action,' : [',next_state,']', 'R=',reward) # env.render() tot_reward += reward steps += 1 if done: print('Episode {:d} finished after {:d} steps with a Total Reward = {:.0f}'. format(i_episode+1,steps, tot_reward)) break else: state = next_state # Pole angle +/-12 degrees, Cart Pos +/- 2.4 or 200 steps # Cart Pos, Velocity, Pole Angle, Velocity # 12 degrees = .2094 radians device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print('Device = {}'.format(device)) ``` ## The Algorithm ### Learning Algorithm We are using the DDPG. I liked the simple systems diagram that Prof. Sergey Levine from UC Berkeley uses for his CS294 Deep Reinforcement Learning class[http://rail.eecs.berkeley.edu/deeprlcourse/] <img src="RL_Systems_Flow.png"> The major components of the algorithm are: 1. `Actor` implemented as a Deep Neural Network whih consists of fully connected layers. 2. `Critic` implemented as a Deep Neural Network which consists of fully connected networks 3. `Experience replay buffer` - in order to train the network we take actions and then store the results in the replay buffer. The replay buffer is a circular buffer and it has methods to sample a random batch 3. `The Agent` brings all of the above together. It interacts with the environment by taking actions based on a policy, collects rewards and the observation feedback, then stores the experience in the replay buffer and also initiates a learning step on the actor and critic networks * The agent has 3 main components viz: 1. The DDPG Orchestrator which interacts with the environment by taking actions and then unpacking the returned package to rewards, state space et al. 2. It also has to do housekeeping like tracking scores, store high performant models and check when the problem is solved 3. The 3rd component is the most interesting one - it gives the agent the capability to hunt for the right policy !! ### Pseudo Code <img src="DDPG_Alg.png"> ``` class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size=state_size, action_size=action_size, seed=42, fc_units=FC_UNITS_ACTOR): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state, defaults to the global state size from the env action_size (int): Dimension of each action, defaults to the global action size from the env seed (int): Random seed fc_units (list(int)): Number of nodes in the hidden layers as a list ** Hard coded as a 3 layer network """ super(Actor, self).__init__() self.seed = torch.manual_seed(seed) self.model = nn.Sequential( #nn.BatchNorm1d(state_size), nn.Linear(state_size,fc_units[0]), nn.ReLU(), #nn.BatchNorm1d(fc_units[0]), nn.Linear(fc_units[0],fc_units[1]), nn.ReLU(), #nn.BatchNorm1d(fc_units[1]), nn.Linear(fc_units[1],action_size), # nn.Tanh() # for continuous -1 to +1 nn.Softmax(dim=-1) # nn.Sigmoid() # for 0-1 ) self.model.apply(self.init_weights) def init_weights(self,m): if (type(m) == nn.Linear): nn.init.xavier_uniform_(m.weight) #nn.init.xavier_normal_(m.weight) # nn.init.kaiming_normal_(m.weight) m.bias.data.fill_(1.0) def forward(self, state): """Build an actor (policy) network that maps states -> actions.""" return self.model(state) class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size = state_size, action_size = action_size, seed=42, fc_units=FC_UNITS_CRITIC): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state, defaults to the global state size from the env action_size (int): Dimension of each action, defaults to the global action size from the env seed (int): Random seed fc_units (list(int)): Number of nodes in the hidden layers as a list ** Hard coded as a 3 layer network """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.hc_1 = nn.Sequential( nn.Linear(state_size,fc_units[0]), nn.ReLU(), # leaky relu ? # nn.BatchNorm1d(fc_units[0]) ) self.hc_2 = nn.Sequential( nn.Linear(fc_units[0]+action_size,fc_units[1]), nn.ReLU(), # leaky relu ? nn.Linear(fc_units[1],1) ) # Initialize the layers self.hc_1.apply(self.init_weights) self.hc_2.apply(self.init_weights) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = self.hc_1(state) x = torch.cat((xs, action), dim=1) x = self.hc_2(x) return (x) def init_weights(self,layer): if (type(layer) == nn.Linear): nn.init.xavier_uniform_(layer.weight) layer.bias.data.fill_(1.0) class OUNoise: """Ornstein-Uhlenbeck process.""" def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2): """Initialize parameters and noise process.""" self.mu = mu * np.ones(size) self.theta = theta self.sigma = sigma self.seed = random.seed(seed) self.reset() def reset(self): """Reset the internal state (= noise) to mean (mu).""" self.state = copy.copy(self.mu) def sample(self): """Update internal state and return it as a noise sample.""" x = self.state dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))]) self.state = x + dx return self.state class ReplayBuffer: """Fixed-size buffer to store experience tuples.""" def __init__(self, action_size, buffer_size, batch_size, seed): """Initialize a ReplayBuffer object. Params ====== buffer_size (int): maximum size of buffer batch_size (int): size of each training batch """ self.action_size = action_size self.memory = deque(maxlen=buffer_size) # internal memory (deque) self.batch_size = batch_size self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"]) self.seed = random.seed(seed) def add(self, state, action, reward, next_state, done): """Add a new experience to memory.""" e = self.experience(state, action, reward, next_state, done) self.memory.append(e) def sample(self): """Randomly sample a batch of experiences from memory.""" experiences = random.sample(self.memory, k=self.batch_size) states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device) actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device) rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device) next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device) dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device) return (states, actions, rewards, next_states, dones) def __len__(self): """Return the current size of internal memory.""" return len(self.memory) class Agent(): """Interacts with and learns from the environment.""" def __init__(self, state_size, action_size, random_seed=42): """Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action random_seed (int): random seed """ self.state_size = state_size self.action_size = action_size self.seed = random.seed(random_seed) # Actor Network (w/ Target Network) self.actor_local = Actor(state_size, action_size, random_seed).to(device) self.actor_target = Actor(state_size, action_size, random_seed).to(device) self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR) # Critic Network (w/ Target Network) self.critic_local = Critic(state_size, action_size, random_seed).to(device) self.critic_target = Critic(state_size, action_size, random_seed).to(device) self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY) # Noise process self.noise = OUNoise(action_size, random_seed) # Replay memory self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed) def step(self, state, action, reward, next_state, done): """Save experience in replay memory, and use random sample from buffer to learn.""" # Save experience / reward self.memory.add(state, action, reward, next_state, done) # Learn, if enough samples are available in memory if len(self.memory) > BATCH_SIZE: experiences = self.memory.sample() self.learn(experiences, GAMMA) def act(self, state, add_noise=False): # True """Returns actions for given state as per current policy.""" state = torch.from_numpy(state).float().to(device) #print(state) self.actor_local.eval() with torch.no_grad(): action = self.actor_local(state).cpu().data.numpy() self.actor_local.train() if add_noise: action += self.noise.sample() return action # np.clip(action, -1, 1) def reset(self): self.noise.reset() def learn(self, experiences, gamma): """Update policy and value parameters using given batch of experience tuples. Q_targets = r + γ * critic_target(next_state, actor_target(next_state)) where: actor_target(state) -> action critic_target(state, action) -> Q-value Params ====== experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor """ states, actions, rewards, next_states, dones = experiences # ---------------------------- update critic ---------------------------- # # Get predicted next-state actions and Q values from target models actions_next = self.actor_target(next_states) Q_targets_next = self.critic_target(next_states, actions_next) # Compute Q targets for current states (y_i) Q_targets = rewards + (gamma * Q_targets_next * (1 - dones)) # Compute critic loss Q_expected = self.critic_local(states, actions) critic_loss = F.mse_loss(Q_expected, Q_targets) # Minimize the loss self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() # ---------------------------- update actor ---------------------------- # # Compute actor loss actions_pred = self.actor_local(states) actor_loss = -self.critic_local(states, actions_pred).mean() # Minimize the loss self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() # ----------------------- update target networks ----------------------- # self.soft_update(self.critic_local, self.critic_target, TAU) self.soft_update(self.actor_local, self.actor_target, TAU) def soft_update(self, local_model, target_model, tau): """Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model: PyTorch model (weights will be copied from) target_model: PyTorch model (weights will be copied to) tau (float): interpolation parameter """ for target_param, local_param in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data) ``` # 1.6. Instantiate an agent The state space and the action space dimensions come from the environment ``` agent = Agent(state_size=state_size, action_size=action_size, random_seed=42) print(agent.actor_local) print(agent.critic_local) ``` ## Part 2 - Learn & Train ----- ### 2.1. DDPG Algorithm Define the DDPG Algorithm. Once we have defined the foundations (network, buffer, actor, critic, agent and so forth), the DDPG is relatively easy. It has a few responsibilities: 1. Orchastrate the episodes calling the appropriate methods 2. Display a running commentry of the scores and episode count 3. Check the success criterion for solving the environment i.e. if running average is > 195 and print the episode count 4. Store the model with the maximum score 5. Keep track of the scores for analytics at the end of the run ``` def ddpg(n_episodes=1000): scores_window = deque(maxlen=100) scores = [] score = 0 max_score = -np.Inf has_seen_195 = False for i_episode in range(1, n_episodes+1): state = env.reset() # reset the environment agent.reset() score = 0 max_steps = 0 while True: action = agent.act(state) # treat as softmax probabilities act = int(np.random.choice(action_size, p=action)) # for Softmax # act = 1 if action > 0.5 else 0 # for Sigmoid next_state, reward, done, _ = env.step(act) agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break max_steps += 1 scores_window.append(score) scores.append(score) print('\rEpisode {}\tAverage Score: {:6.2f}\tScore: {:6.2f}\tMax_steps : {:3d}'.\ format(i_episode, np.mean(scores_window), score, max_steps), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if (np.mean(scores_window) >= 195.0) and (not has_seen_195): print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:5.2f}'.\ format(i_episode-100, np.mean(scores_window))) # torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth') has_seen_195 = True break # Early stop # To see how far it can go : # comment the break out. The has_seen_195 will stop printing the "Environment Solved" every time # Store the best model if desired if STORE_MODELS: if np.mean(scores_window) > max_score: max_score = np.mean(scores_window) torch.save(agent, 'checkpoint.pth') # print(' .. Storing with score {}'.format(max_score)) return scores ``` ### 2.2. The actual training Run 1. Run the DDPG 2. Calculate and display end-of-run analytics viz. descriptive statistics and a plot of the scores ``` start_time = time.time() scores = ddpg(n_episodes=1500) # 2000,1500, 1000 ; For quick functional test use 100,500 env.close() # Close the environment print('Elapsed : {}'.format(timedelta(seconds=time.time() - start_time))) print(datetime.now()) # # plot the scores fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)), scores) plt.ylabel('Score') plt.xlabel('Episode #') plt.show() print(agent.actor_local) # print(agent.critic_local) print('Max Score {:.2f} at {}'.format(np.max(scores), np.argmax(scores))) print('Percentile [25,50,75] : {}'.format(np.percentile(scores,[25,50,75]))) print('Variance : {:.3f}'.format(np.var(scores))) ``` ### Sample Run #### _In 130 episodes !_ <img src="ddpg_run_02.png"> <img src="ddpg_run_01.png"> <img src="ddpg_run.png"> ### 2.3. Run logs & Notes 1. A place to keep the statistics and qualitative observations 2. It is easier to keep notes as soon as a run is done 3. Also a place to keep future explorations ### Logs #### 1/13/19 1. Tried Softmax for actor, tried Sigmoid (with action_size=1) - was tanh() * LR : Actor = 0.01, Critic = 0.001 * network 8 X 8 1. Doesn't go beyond 10-11 steps when episode = 1500, buffer = 512 2. No clue when it will wake up - episode = 15000; no dice so far 3. Buffer = 2048 no change 4. Took out batchnorm 5. Back to softmax 6. Took out clip(-1,1) * No change score ~10 7. tau = 0.05 - started learning - Am seeing the scores in the 20s! See if it passes nope stays at 20s 8. network 400 X 300 ! goes back to a score of 10 1. Network 36 X 8 ! Nope no good 1. Network 4 X 4 - very small ! 1. Buffer = 4096, Batch = 32 meanders around a score of 10 for 10000 episodes ! 1. softmax(-1) definite progress - goes upto 200, but then gets 10 as well ! After 1100, it is steady ! * Solved in 1029 episodes ! * Was about to throw the towel ! Took me a day ! Was going to go back to A2C ! * From 1300 onwards it gets prfect 200 * __added noise and that saved the day !__ 1. back to 16 X 8 Network, batch = 64, 1500 episodes (was 15,000 ! and didn't do any good. The progress of a learning network is evident) * would have solved under 300, but a few black sheep epidodes bring the average down ;o( * 860 episodes. Between 300 and 800, it went down and came back ! 1. Cleaned up a little bit and ran again - 282 Episodes ! #### 2/2/19 1. Again got into a rut-nothing beyond a score of 11 2. Changd network to FC4-FC4 3. Better but - couldn't solve with 1000 episodes, learns and then unlearns 4. Increasing episodes back to 15000. Nope 5. Batch = 32; good steady progress. Woould it make it ? Nope 6. Noise = True, was False for some reason. won't work for softmax 7. Network FC16-FC8 - solved in 533 Steps ! ### 2.4. Test Area ``` env.close() ``` ## Part 3 : Run a stored Model or the learned model ### Note Here we are saving and loading the whole model. We cal also save & load the state dict https://pytorch.org/tutorials/beginner/saving_loading_models.html ``` # Load parameters from the saved file # The file has the parameters of the model that has the highest score during training # agent = torch.load('checkpoint.pth') start_time = time.time() scores=[] for i in range(10): # 10 episodes state = env.reset() # reset the environment & get the current state score = 0 # initialize the score steps = 0 # Keep track of the number of steps while True: action = agent.act(state) # select an action, treat as softmax probabilities act = int(np.random.choice(action_size, p=action)) # for Softmax next_state, reward, done, _ = env.step(act) # send the action to the environment score += reward # update the score state = next_state # roll over the state to next time step if done: # exit loop if episode finished break else: steps += 1 scores.append(score) print("Episode : {:2d} Score : {:5.2f} Steps : {}".format(i+1,score,steps)) # Print stats at the end the run print('Mean of {} episodes = {}'.format(i+1,np.mean(scores))) print('Elapsed : {}'.format(timedelta(seconds=time.time() - start_time))) print(datetime.now()) env.close() ``` ### _That's all Folks !!_
github_jupyter
``` %matplotlib inline from matplotlib import style style.use('fivethirtyeight') import matplotlib.pyplot as plt import numpy as np import pandas as pd import datetime as dt ``` # Reflect Tables into SQLAlchemy ORM ``` # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func # create engine to hawaii.sqlite engine = create_engine("sqlite:///resources/hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect = True) # View all of the classes that automap found Base.classes.keys() # Save references to each table Measurement = Base.classes.measurement Station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine) ``` # Exploratory Precipitation Analysis ``` # Find the most recent date in the data set. last_row = engine.execute('SELECT * FROM Measurement ORDER BY date DESC').fetchall()[0] last_row # Design a query to retrieve the last 12 months of precipitation data and plot the results. # Starting from the most recent data point in the database. # Calculate the date one year from the last date in data set. previous_year = dt.date(2017, 8, 23)-dt.timedelta(days=365) # Perform a query to retrieve the data and precipitation scores results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= previous_year).all() # # Save the query results as a Pandas DataFrame and set the index to the date column df = pd.DataFrame(results, columns = ['date', 'precipitation']) df.set_index(df['date'], inplace = True) # # Sort the dataframe by date df = df.sort_index() # Use Pandas Plotting with Matplotlib to plot the data df.plot(rot = 90) plt.ylabel('Inches') plt.tight_layout() plt.savefig("precipitation.png") plt.show() # Use Pandas to calcualte the summary statistics for the precipitation data prcp_summary = df.describe() prcp_summary ``` # Exploratory Station Analysis ``` # Design a query to calculate the total number stations in the dataset session.query(func.count(Station.station)).all() # Design a query to find the most active stations (i.e. what stations have the most rows?) # List the stations and the counts in descending order. session.query(Measurement.station, func.count(Measurement.station)).\ group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all() # Using the most active station id from the previous query, calculate the lowest, highest, and average temperature. session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\ filter(Measurement.station == 'USC00519281').all() # Using the most active station id # Query the last 12 months of temperature observation data for this station and plot the results as a histogram import datetime as dt from pandas.plotting import table prev_year = dt.date(2017, 8, 23)-dt.timedelta(days=365) results = session.query(Measurement.tobs).\ filter(Measurement.station == 'USC00519281').\ filter(Measurement.date >= previous_year).all() df = pd.DataFrame(results,columns = ['tobs']) df.plot.hist(bins = 10) plt.tight_layout() plt.xlabel("Temperature") plt.tight_layout() plt.savefig("tobs.png") plt.show() ``` # Close session ``` # Close Session session.close() ```
github_jupyter
# WalMart Trip Type ``` import pandas as pd import numpy as np import scipy.stats as stats import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns import statsmodels as sm import math import tools plt.rcParams["figure.figsize"] = (10, 8) mpl.style.use('bmh') %matplotlib inline df = pd.read_csv('input/train.csv') u = df.groupby('VisitNumber') ``` ## Look at a visit ``` u.get_group(8) ``` ## How many unique items of each column are there? ``` [(x, len(df[x].unique())) for x in ['TripType', 'Upc', 'Weekday', 'DepartmentDescription', 'FinelineNumber']] ``` ## What are the DepartmentDescription Factors? ``` dds = [repr(x) for x in list(set(df['DepartmentDescription']))] dds.sort() for d in dds: print(d) df['ScanCount'].describe() df['ScanCount'].hist(bins=100) ``` ## How many NA's are there by column? ``` df.isnull().sum() ``` ### What is the overlap between missing NAs in different columns? ``` len(df[df['DepartmentDescription'].isnull() & df['Upc'].isnull()]) len(df[df['DepartmentDescription'].isnull() & df['FinelineNumber'].notnull()]) len(df[df['FinelineNumber'].isnull() & df['Upc'].notnull()]) ``` ### When finelineNumber or Upc is NA, what departments do they come from (when not also NA)? ``` df[df['FinelineNumber'].isnull() & df['DepartmentDescription'].notnull()]['DepartmentDescription'].value_counts() df[df['Upc'].isnull() & df['DepartmentDescription'].notnull()]['DepartmentDescription'].value_counts() ``` ### When Upc is NA, what are the scan counts? ``` df[df['Upc'].isnull() & df['DepartmentDescription'].notnull()]['ScanCount'].value_counts() df[df['FinelineNumber'].isnull() & df['DepartmentDescription'].notnull()]['ScanCount'].value_counts() ``` ## TripType by FineLineNumber ``` pd.crosstab(index=df['FinelineNumber'], columns=df['TripType']).idxmax() ``` ## Most common DepartmentDescription for each TripType ``` pd.crosstab(index=df['DepartmentDescription'], columns=df['TripType']).idxmax() ``` ## Most common Weekday for each TripType ``` pd.crosstab(index=df['Weekday'], columns=df['TripType']).idxmax() ``` ## Most common TripType for each weekday ``` pd.crosstab(index=df['TripType'], columns=df['Weekday']).hist(figsize=(20,10)) ``` # Clean data ``` dd = (df.pivot_table('ScanCount', ['VisitNumber'], ['DepartmentDescription'])) fln = df.pivot_table('ScanCount', ['VisitNumber'], ['FinelineNumber']) weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] wd = df[['VisitNumber', 'Weekday']].drop_duplicates(subset='VisitNumber') wd['Weekday'] = wd['Weekday'].apply(lambda x: weekdays.index(x)) trip_type = df[['VisitNumber', 'TripType']].drop_duplicates(subset='VisitNumber') dd = df[['VisitNumber', 'TripType']].drop_duplicates() dd['TripType'].value_counts() result = trip_type.join(dd, on='VisitNumber') result = result.join(fln, on='VisitNumber') result['Weekday'] = wd['Weekday'] result2 = result.fillna(0.0) result2 df['Returns'] = df['ScanCount'].apply(lambda x: 1 if x < 0 else 0) rtns = df.pivot_table('Returns', ['VisitNumber'], aggfunc=sum) rtns.apply(lambda x: 1 if x > 0 else 0) dd = list(set(df['DepartmentDescription'].fillna(''))) dd.sort() dd vcs = df['Upc'].value_counts() for x in [int(x) for x in list(vcs.head(2000).index)]: print('{}, '.format(x)) ```
github_jupyter
``` import os, uuid from dotenv import load_dotenv # To access environment variables from a .env file from pyairtable import Api, Base, metadata # To access Airtable import pandas as pd # To work with data from Airtable import azure_helpers # Helper functions from datetime import datetime, timedelta from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, __version__ #to work with Azure # Get variables from .env file load_dotenv() AIRTABLE_API_KEY = os.environ.get('AIRTABLE_API_KEY') AIRTABLE_BASE_IDS = os.environ.get('AIRTABLE_BASE_IDS').split(",") CONNECT_STR = os.environ.get('AZURE_STORAGE_CONNECTION_STRING') ACCOUNT_NAME = os.environ.get('ACCOUNT_NAME') ACCOUNT_KEY = os.environ.get('ACCOUNT_KEY') CONTAINER_NAME = os.environ.get('CONTAINER_NAME') DIRECTORIES = os.environ.get('DIRECTORIES').split(",") # Load Azure Client blob_service_client = BlobServiceClient.from_connection_string(CONNECT_STR) container_client = container_client = blob_service_client.get_container_client(CONTAINER_NAME) for base_id in AIRTABLE_BASE_IDS: # Load Base api = Api(AIRTABLE_API_KEY) my_base = Base(AIRTABLE_API_KEY, base_id) base_name = base_id schema = metadata.get_base_schema(my_base) tables = schema['tables'] if not os.path.isdir("Bases"): os.makedirs('Bases') # Check if Each Folder Directory Exists, if not make one for dir in DIRECTORIES: path = f'Bases/{base_name}/{dir}' folderExists = os.path.isdir(path) if folderExists: pass else: os.makedirs(path) # For each table in a base for table in tables: # Get Table Data and create CSV from JSON table_name = table['name'] filename = f'Bases/{base_name}/Tables/{table_name}.csv' azure_filename = f'Bases/{base_name}/Tables/{table_name}.csv' print(f'Getting data for Table: {table_name}') # Format Table Data table_data_raw = my_base.all(table['id'], cell_format="string", user_locale='en-ie', time_zone='America/New_York') # Update Locale and Timezone table_data = azure_helpers.add_record_ids(table_data_raw) table_df = pd.DataFrame(table_data) table_df.columns = table_df.columns.str.replace(' ', '_') table_df.columns = table_df.columns.str.lower() table_csv = table_df.to_csv(filename, index=False) # Check Azure Container for contents and instance of CSV print('Checking Azure for existing CSV') check_azure = container_client.list_blobs() blob_names = [x.name for x in check_azure] blob_count = len(blob_names) no_content = blob_count == 0 file_exists = None blob_client = blob_service_client.get_blob_client(container=CONTAINER_NAME, blob=filename) # If there aren't any contents in the container skip to next part if no_content: file_exists = False # If there are contents in the container check to see if file exists else: local_filename = f'Bases/{base_name}/Staging/'+table['id']+'.csv' file_exists = filename in blob_names # If File exists, download file, append, and upload to container then remove from staging if file_exists: print('Existing file found, uploading new version') with open(azure_filename, "wb") as download_file: download_file.write(blob_client.download_blob().readall()) existing_df = pd.read_csv(azure_filename) frames = [existing_df, table_df] upload_df = pd.concat(frames).drop_duplicates(subset='airtable_id') upload_df.to_csv(filename, index=False) with open(filename, "rb") as upload_file: blob_client.upload_blob(data=upload_file,overwrite=True) # os.remove(local_filename) # If no file exists with that table id/name in container uplpad the CSV Upload File else: print('No existing file found, uploading a new file to Azure') with open(filename, "rb") as upload_file: blob_client.upload_blob(data=upload_file,overwrite=True) print('\n\n\n') ```
github_jupyter
AIX360: https://github.com/IBM/AIX360 ``` !git clone https://github.com/IBM/AIX360 cd AIX360 !pip install -e . from google.colab import drive drive.mount('/gdrive') %cd /gdrive attach_probe_checkpoint.print_layer_labels("/gdrive/My Drive/braxai/model.h5") tf.reset_default_graph() ``` # Introduction to Prof-Weight This method uses white-box access to the layers of a pre-trained complex neural network model, a training dataset and a training algorithm for a simple model (presumably with much smaller number of parameters compared to the complex model like Decision Tree, Neural Nets with very few layers) and produces a new model in the simple model hypothesis class. The aim is to make sure that this new simple model (trained with the help of a complex model) has a higher accuracy on the training dataset than the output of the simple model training algorithm working **only** with the training dataset. The reasons why one would prefer a simple model is because of resource constraints or because of better interpretability offered by the simple model. The main idea is the following: For every sample (x,y) in the training dataset, we would like to produce a sample weight w(x,y) which indicates how easy/hard the example is to learn. w(x,y) is higher if the example is easier to learn for the complex model. Prof-Weight obtains these weights as follows: a) Take the complex model layer L (flattened layer output). Create dataset consisting L(x),y. b) Train a logistic classifier (we call it the probe classifier) that uses the representation L(x) and predicts y using a linear model. Let the probabilistic confidences of the probe classifier model trained only on Layer L representation be p(x,y,L). c) Repeat steps a and b for the top K layers in the complex model. d) Now take every point (x,y) in the training dataset available for the simple model to train. Let w(x,y)= (\sum_{L} p(x,y,L))/ num of layers used. These are the new sample weights. e) Use the simple model training algorithm with w(x,y) as the sample weights. Intuition: Measure of hardness or easiness of the sample may not be indicated by the final layer confidences in a highly confident complex deep neural network model in a manner that is useful for training a much simpler model. So we track how easy is a sample to predict from lower level layers's representation. So the average of the probe classifier's predictions for top K layers being high means that that sample's prediction is confident from a much lower layer indicating easiness of the sample. For a hard example, only last few layer confidences will be higher. Earlier layer representations won't have enough distinguishability. We demonstrate this method where the complex model (an 18 layer Resnet) is trained on 30000 samples of the CIFAR-10 training dataset. The simple model is a Resnet with only one Resblock. The training dataset available is the rest of the 20000 samples in the standard CIFAR-10 dataset. The test dataset is the 10000 samples as in standard CIFAR-10 dataset. Demonstrations: a) We demonstrate how to attach probes to a specific layer on a complex model stored as a tensorflow checkpoint. b) How to evaulate the flattened layer output and then use it to train a logistic probe classifier to predict y. c) Use pre-stored probe classifiers' confidences for the top K layers (we dont show probe training on all the top K layers in the notebook. However, we do demonstrate probe classifier training on one of the layers) and form sample weights d) Train the simple model using these new sample weights using the Prof-Weight Explainer Class. Complex Model is trained on train1 (30000 samples), Simple Model is alwasy trained on train2 (20000) samples. Probe Classifiers are trained on layer representations of the complex model on train1 samples. However, probe confidences are evaluated on train2 (on which the simple model is also trained) to provide sample weights. References for this method: 1. Amit Dhurandhar, Karthikeyan Shanmugam, Ronny Luss, Peder Olsen. "Improving Simple Models with Confidence Profiles", NeurIPS 2018. # Evaluating a Given Layer of a Tensorflow Checkpoint ``` import sys sys.path.append("../../") from aix360.algorithms.profwt import train_probes from aix360.algorithms.profwt import attach_probe_checkpoint from aix360.datasets.cifar_dataset import CIFARDataset import json import numpy as np import tensorflow as tf import os #Obtain parent directory for acccessing various data files. parent_dir = '../../aix360/models/profwt' ``` ## Define a path for the tensorflow checkpoint of a pre-trained complex model This complex Resnet model has been trained using the model definitions obtained from: https://github.com/tensorflow/models/tree/master/research/resnet ``` checkpoint_path = os.path.join(parent_dir, "checkpoints/train_resnetmodel_new1_799.ckpt") ``` ## Load the Dataset on which Layer outputs need to be evaluated. ``` dataset_obj=CIFARDataset("../../aix360/data/cifar_data") x_train1 =dataset_obj.load_file('cifar-10-train1-image.json') y_train1=dataset_obj.load_file('cifar-10-train1-label.json') ``` ## Define a filename where you want layer output to be saved. ``` run=1 os.mkdir(parent_dir+'/data') to_save_filename=parent_dir+'/data/probe_run'+str(run)+'.npy' print(to_save_filename) ``` ## Print Names of all Layers from the model in the checkpoint ``` attach_probe_checkpoint.print_layer_labels(checkpoint_path) tf.reset_default_graph() ``` Identify tensor names corresponding to a) Layer whose output is of interest b) Input layer where the model takes in image/data sample c) Layer where model takes in the labels to fit. ``` #Fixing a specific operation_name to define the layer output operation_name='unit_1_1/sub_add/add:0' # In this case the probe is intended to be after the second Resnet Block in 18 layer Resnet for CIFAR-10 input_features_name='Placeholder:0' label_name='Placeholder_1:0' #These two correspond to Placeholder tensors for Feature input and label ``` ## Tapping the Layer Output, Evaluating and Storing it in a File attach_probe_eval() function loads a tensorflow checkpoint from a path, takes these inputs: a) Layer name whose output it needs to evaluate) Placeholder Tensor name corresponding to feature input x and c) Placeholder Tensor name corresponding to Label y d) Data samples to evaulate the layer outputs on. ``` pr=attach_probe_checkpoint.attach_probe_eval(input_features_name,label_name,operation_name,x_train1,y_train1,checkpoint_path) np.save(to_save_filename,pr) ``` # Training a Logistic Probe Classifier based on Layer Outputs ## Load the Layer Output File for which Probe Classifier needs to be trained. ``` # In this script, samples for probe training and probe confidence evaluations are done # on the layer outputs obtained from the same dataset. Load the layer output values from the file. #In general, it can be made different by supplying a new y_train2 and probe_eval_input y_train2=y_train1 probe_train_input=np.load(parent_dir+'/data/probe_run1.npy') probe_eval_input=probe_train_input ``` ## Supply Filenames to save Probe Classifier Model, Model Confidences ``` run=1 num_classes=10 to_save_pred_filename=parent_dir+"/data/probe_pred_run"+str(run)+'.npy' to_save_probe_model_filename=parent_dir+"/data/probe_model_run"+str(run) ``` ## Train a Probe Classifier, Evaluate it on Layer Outputs from a Dataset, ## Store the probe confidences in a File. ``` (log,pred)=train_probes.probe_train_eval(probe_train_input,y_train1,num_classes,probe_eval_input,y_train2,to_save_probe_model_filename) np.save(to_save_pred_filename,pred) ``` # Simple Model Training - Unweighted on the Dataset ``` import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, Input, Flatten from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras.callbacks import ReduceLROnPlateau from keras.preprocessing.image import ImageDataGenerator from keras.regularizers import l2 from keras import backend as K from keras.models import Model import os ``` Import the ProfWeight Explainer Class. ``` from aix360.algorithms.profwt.profwt import ProfweightExplainer from aix360.algorithms.profwt.resnet_keras_model import resnet_v1,lr_schedule,HParams ``` Open the file constaining the training dataset for training the simple model. This file could be (In this example it is different) different from the dataset used for training the complex model. ``` x_train2=dataset_obj.load_file('cifar-10-train2-image.json') y_train2=dataset_obj.load_file('cifar-10-train2-label.json') x_test=dataset_obj.load_file('cifar-10-test-image.json') y_test=dataset_obj.load_file('cifar-10-test-label.json') print('x_train shape:', x_train2.shape) print('y_train shape:', y_train2.shape) ``` Specify checkpoint to save the model after training the simple model on x_train2,y_train2 dataset. ``` save_dir = os.path.join(os.getcwd(), 'saved_models') model_name = 'resnet_target_model_unweighted.h5' if not os.path.isdir(save_dir): os.makedirs(save_dir) filepath = os.path.join(save_dir, model_name) ``` Specify Learning Rate Schedule and all the hyper parameters for training. In this example, these are recommended setting from a popular Keras implementation of resnet models for CIFAR-10. ``` lr_scheduler = LearningRateScheduler(lr_schedule) lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),cooldown=0,patience=5,min_lr=0.5e-6) hps = HParams(lr_scheduler=lr_scheduler,lr_reducer=lr_reducer,batch_size=128,epochs=200,checkpoint_path=filepath,num_classes=10,complexity_param=1,optimizer=Adam(lr=lr_schedule(0))) ``` ProfWeightExplainer Class has a fit function that trains a simple model using a provided keras model that is built by calling the resnet_v1 function specified in the model file resnet_keras_model.py ``` a=ProfweightExplainer() m=a.fit(x_train2,y_train2,x_test,y_test,resnet_v1,hps,'neural_keras') print("Initial Simple Model Accuracy:",m[1]) ``` # Simple Model training with Prof Weight- Sample Weights Obtained from Probe Confidences of Various Layers. List of all filenames - each of which contains the probe confidences of a specific layer corresponding to the samples in x_train2,y_train2 dataset. This is assumed to have been obtained using functions in attach_probe_checkpoint.py and train_probes.py. ``` list_probe_filenames=[parent_dir+'/probe_output/probe_2_out_pred'+str(x)+'.npy' for x in range(10,17)] ``` Specify a new checkpoint for the simple model with Prof Weight + set identical hyper parameters for learning rate schedule and training. ``` save_dir = os.path.join(os.getcwd(), 'saved_models') model_name = 'resnet_target_model_weighted.h5' if not os.path.isdir(save_dir): os.makedirs(save_dir) filepath = os.path.join(save_dir, model_name) hps = HParams(lr_scheduler=lr_scheduler,lr_reducer=lr_reducer,batch_size=128,epochs=200,checkpoint_path=filepath,num_classes=10,complexity_param=1,optimizer=Adam(lr=lr_schedule(0))) ``` Call the ProfWeight Explainer Class's explain function - This is same as the fit function but additionally specifies list of probe filenames and start and end layer whose confidences need to be averaged to be used as the sample weights. This explain function also scores the new simple model obtained after weighted training on the test data set. ``` a.explain(x_train2,y_train2,x_test,y_test,resnet_v1,hps,list_probe_filenames,2,6,'neural_keras') ```
github_jupyter
Types of Classification 1. Binary Classification(Pizza and Not Pizza) 2. Multiclass Classification(Is it pizza, a dog, or chips) 3. Multilabel classification ## Creating data to view and fit ``` from sklearn.datasets import make_circles import numpy as np import tensorflow as tf import pandas as pd import matplotlib.pyplot as plt # Make 1000 examples n_samples = 1000 # Create circles x, y = make_circles(n_samples, noise=0.03, random_state=42) print(x[:10]) print(y[:10]) # Let's visualize the data circles = pd.DataFrame({"X0":x[:, 0], "X1":x[:, 1], "label":y}) circles plt.scatter(x[:, 0], x[:, 1], c=y, cmap=plt.cm.RdYlBu) ``` ## Input and output shapes ``` # Check the shapes or our features abd labels x.shape, y.shape # How many samples we're working with len(x), len(y) # View the first example of features and labels x[0] , y[0] # For the x, it is taking the first row(or major group if you will) ``` ## Steps in modeling ``` tf.random.set_seed(42) model_1 = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) model_1.compile(loss=tf.keras.losses.BinaryCrossentropy(), optimizer=tf.keras.optimizers.SGD(), metrics=["accuracy"]) model_1.fit(x, y, epochs=100) # Let's train for longer model_1.fit(x, y, epochs=200) model_1.evaluate(x, y) # Let's try adding another layer tf.random.set_seed(42) model_2 = tf.keras.Sequential([ tf.keras.layers.Dense(1), tf.keras.layers.Dense(1) ]) model_2.compile(loss=tf.losses.BinaryCrossentropy(), optimizer=tf.keras.optimizers.Adam(lr=0.001), metrics=["accuracy"]) model_2.fit(x, y, epochs=100) model_2.evaluate(x, y) ``` ## Improving our model -- Add layers, add neurons, change activation functions, change optimization functions, and change the learning rate ``` tf.random.set_seed(42) model_3 = tf.keras.Sequential([ tf.keras.layers.Dense(100), tf.keras.layers.Dense(10), tf.keras.layers.Dense(1) ]) model_3.compile(loss=tf.losses.BinaryCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=["accuracy"]) model_3.fit(x, y, epochs=100) model_3.evaluate(x, y) ``` ## To visualize our predictions make following function --It is going to take in x as inputs and y as labels --Create a meshgrid of the different x values --Make predictions across the meshgrid --Plot predictiosn and boundary between prediction and actual ``` def plot_decision_boundary(model, x, y): # Definet he axis boudnaries of the plot and create a meshgrid x_min, x_max = x[:, 0].min() - 0.1, x[:, 0].max() + 0.1 y_min, y_max = x[:, 1].min() - 0.1, x[:, 1].max() + 0.1 xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100)) #Create x value x_in = np.c_[xx.ravel(), yy.ravel()] # Stack 2D arrays together # Make predictions y_pred = model.predict(x_in) # Check for multi-class if len(y_pred[0]) > 1: print("doing multiclass classification") y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape) else: print("doing binary classification") y_pred = np.round(y_pred).reshape(xx.shape) # Plot the decision boundary plt.coontourf(xx, yy, y_pred, cmap=) ```
github_jupyter
``` # First let's import # Let's do our imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import math import pandas_profiling import seaborn as sns; sns.set() import category_encoders as ce from statistics import mode from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import RobustScaler from sklearn.metrics import accuracy_score from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression %matplotlib inline # Let's get our data Location = "../data/tanzania/" train = pd.merge(pd.read_csv(Location + 'train_features.csv'), pd.read_csv(Location + 'train_labels.csv')) test = pd.read_csv(Location + 'test_features.csv') sample_submission = pd.read_csv(Location + 'sample_submission.csv') # Now Let's do our test(val)-train split train, val = train_test_split(train, train_size=0.80, test_size=0.20, random_state=42, stratify=train['status_group']) train.shape, val.shape, test.shape train.sample(10) # We're familiar with this data # First let's engineer some of our features def organize(X): """Function will organize the features of train, validate and test sets in the same way""" X = X.copy() # We'll start with latitude and it's tiny values # First, we have to replace these values with 0 X['latitude'] = X['latitude'].replace(-2e-08, 0) # Now those values are the same as the incorrect null values in some of our other features colsw0s = ['longitude', 'latitude', 'population','construction_year'] #also construction year, but we'll get to that # We'll turn those 0's into np.nan the replace them with the mean of their columns for col in colsw0s: X[col] = X[col].replace(0, np.nan) X[col] = X[col].fillna(X[col].mean()) # For construction_year we'll replace with the mode # X['construction_year'] = X['construction_year'].replace(0, np.nan) # XnoNan = X['construction_year'].dropna() # md = XnoNan.mode() # X['construction_year'] = X['construction_year'].fillna(md) # Next we'll convert date_recorded to datetime X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True) # And we'll extract year_recorded also X['year_recorded'] = X['date_recorded'].dt.year # And we'll drop the column quantity_group as it is a duplicate of quantity X = X.drop(columns='quantity_group') # And we'll fill the missing values for categorical features with 'MISSING' cats = X.select_dtypes(exclude='number').columns for col in cats: X[col] = X[col].fillna('MISSING') return X train = organize(train) val = organize(val) test = organize(test) train.sample(20) # Now we can work with our features # We'll set the target first target = 'status_group' # Now we'll set a df with all train features except for our target and the id column trainfeat = train.drop(columns=[target, 'id']) # We'll also separate the numeric features into one list... numfeat = trainfeat.select_dtypes(include='number').columns.tolist() # And we'll get the cardinality of the non-numeric features... cardinality = trainfeat.select_dtypes(exclude='number').nunique() # And then we'll get the list for categorical features with cardinality <= 50 catfeat = cardinality[cardinality <= 50].index.tolist() # Finally we'll combine those lists feats = numfeat + catfeat # Now let's take a look at cardinality cardinality.sort_values() # and also catfeat catfeat # and finally feats feats len(feats) # Now, let's choose some features to use for our decision tree features = ['source_type', 'latitude','longitude'] # Now we can encode, then scale our features # First we arrange our data into X features matrix and y target vector xtrain = train[features] ytrain = train[target] xval = val[features] yval = val[target] xtest = test[features] # Reminder-Encoder: fit_transform on train, transform on val & test encoder = ce.OneHotEncoder(use_cat_names=True) xtrain_encoded = encoder.fit_transform(xtrain) xval_encoded = encoder.transform(xval) xtest_encoded = encoder.transform(xtest) # Reminder-Scaler: fit_transform on train, transform on val & test scaler = RobustScaler() xtrain_scaled = scaler.fit_transform(xtrain_encoded) xval_scaled = scaler.transform(xval_encoded) xtest_scaled = scaler.transform(xtest_encoded) # We'll use logistic regression to get a baseline lr = LogisticRegression(solver='lbfgs', multi_class='auto', n_jobs=-1) lr.fit(xtrain_scaled, ytrain) print('Logistic Regression Model/Baseline') print(f'Train Acc: {lr.score(xtrain_scaled, ytrain)}') print(f'Val Acc: {lr.score(xval_scaled, yval)}') # Now let's try to use a decision tree Classifier dt = DecisionTreeClassifier(random_state=42) dt.fit(xtrain_scaled, ytrain) print('Decision Tree Model') print(f'Train Acc: {dt.score(xtrain_scaled, ytrain)}') print(f'Val Acc: {dt.score(xval_scaled, yval)}') # Big improvement from our baseline, but with some obvious overfitting, # Now let's try to use the same Decision tree Classifier, but with max_depth=10 maxdtdepth=10 dt = DecisionTreeClassifier(max_depth=maxdtdepth, random_state=42) dt.fit(xtrain_scaled, ytrain) print(f'Decision Tree Model: Max Depth = {maxdtdepth}') print(f'Train Acc: {dt.score(xtrain_scaled, ytrain)}') print(f'Val Acc: {dt.score(xval_scaled, yval)}') # We'll keep our max_depth at 10 to avoid overfitting # Let's pick some different features and try to get it all done in one workflow features = ['source_type', 'waterpoint_type', 'extraction_type_group', 'quantity', 'population', 'construction_year', 'latitude','longitude'] xtrain = train[features] ytrain = train[target] xval = val[features] yval = val[target] xtest = test[features] # Reminder-Encoder: fit_transform on train, transform on val & test encoder = ce.OneHotEncoder(use_cat_names=True) xtrain_encoded = encoder.fit_transform(xtrain) xval_encoded = encoder.transform(xval) xtest_encoded = encoder.transform(xtest) # Reminder-Scaler: fit_transform on train, transform on val & test scaler = RobustScaler() xtrain_scaled = scaler.fit_transform(xtrain_encoded) xval_scaled = scaler.transform(xval_encoded) xtest_scaled = scaler.transform(xtest_encoded) maxdtdepth=10 dt = DecisionTreeClassifier(max_depth=maxdtdepth, random_state=42) dt.fit(xtrain_scaled, ytrain) print(f'Decision Tree Model: Max Depth = {maxdtdepth}') print(f'Train Acc: {dt.score(xtrain_scaled, ytrain)}') print(f'Val Acc: {dt.score(xval_scaled, yval)}') # Ok, we have a pretty good validation score, so let's predict on xtest and submit ypred = dt.predict(xtest_scaled) submission = sample_submission.copy() submission['status_group'] = ypred submission.to_csv('dtsubmission-03.csv', index=False) ```
github_jupyter
# Day 4 ``` import numpy as np file = 'example_day4.txt' def day4 (file): with open (file) as f: data = f.read().split('\n\n') data = [d.split('\n') for d in data] data = [[pos for pos in d if d !=''] for d in data] numbers = data[0] numbers = numbers[0].split(',') numbers = [int(n) for n in numbers] boards = data[1:] boards = [[b.split(' ') for b in board] for board in boards] boards = [[[int(b) for b in bor if b!=''] for bor in board] for board in boards] boards = [np.array([bor for bor in board if bor != []]) for board in boards] selected_numbers=[] for n in numbers: selected_numbers.append(n) # print('numbers',selected_numbers) for board in boards: for row in board: row2 = [pos for pos in row if pos not in selected_numbers] # print(row2) if row2 == []: bingo = row print(bingo) winning_board = board break if row2 == []: break if row2 == []: break print(selected_numbers) all_unmarked=[] for row in winning_board: unmarked = [pos for pos in row if pos not in selected_numbers] # print(unmarked) all_unmarked = all_unmarked + unmarked return sum(all_unmarked)*selected_numbers[-1] day4(file) file = 'input_day4.txt' day4(file) ``` PART 2 ``` import numpy as np def day4_2(file): with open (file) as f: data = f.read().split('\n\n') data = [d.split('\n') for d in data] data = [[pos for pos in d if d !=''] for d in data] numbers = data[0] numbers = numbers[0].split(',') numbers = [int(n) for n in numbers] boards = data[1:] boards = [[b.split(' ') for b in board] for board in boards] boards = [[[int(b) for b in bor if b!=''] for bor in board] for board in boards] boards = [np.array([bor for bor in board if bor != []]) for board in boards] selected_numbers = [] winning_boards = [] for n in numbers: selected_numbers.append(n) for board in boards: for row in board: row2 = [pos for pos in row if pos in selected_numbers] if len(row2) == 5: if winning_boards == []: winning_boards.append(board) else: comparisons = [True if elem is board else False for elem in winning_boards] if any (c == True for c in comparisons): pass else: winning_boards.append(board) if len(winning_boards) == len(boards): print('bingo! Row',row) losing_board = board print(board) print(selected_numbers) break if len(winning_boards) == len(boards): break for col in board.T: col2 = [pos for pos in col if pos in selected_numbers] if len(col2) == 5: if winning_boards == []: winning_boards.append(board) else: comparisons = [True if elem is board else False for elem in winning_boards] if any (c == True for c in comparisons): pass else: winning_boards.append(board) if len(winning_boards) == len(boards): print('bingo! Col',col) losing_board = board print(board) print(selected_numbers) break if len(winning_boards) == len(boards): break if len(winning_boards) == len(boards): break all_unmarked=[] for row in losing_board: unmarked = [pos for pos in row if pos not in selected_numbers] all_unmarked = all_unmarked + unmarked print(sum(all_unmarked)) print(selected_numbers[-1]) return sum(all_unmarked) * selected_numbers[-1] file = 'example_day4.txt' day4_2(file) file = 'input_day4.txt' day4_2(file) ```
github_jupyter
# Case Study: Cricket Tournament A panel wants to select players for an upcoming league match based on their fitness. Players from all significant cricket clubs have participated in a practice match, and their data is collected. Let us now explore NumPy features using the player's data. #### Height of the payers is stored as a regular Python list: height_in. The height is expressed in inches. #### Weight of the payers is stored as a regular Python list: weight_lb. The weight is expressed in pounds. ``` heights_in = [74, 74, 72, 72, 73, 69, 69, 71, 76, 71, 73, 73, 74, 74, 69, 70, 73, 75, 78, 79, 76, 74, 76, 72, 71, 75, 77, 74, 73, 74, 78, 73, 75, 73, 75, 75, 74, 69, 71, 74, 73, 73, 76, 74, 74, 70, 72, 77, 74, 70, 73, 75, 76, 76, 78, 74, 74, 76, 77, 81, 78, 75, 77, 75, 76, 74, 72, 72, 75, 73, 73, 73, 70, 70, 70, 76, 68, 71, 72, 75, 75, 75, 75, 68, 74, 78, 71, 73, 76, 74, 74, 79, 75, 73, 76, 74, 74, 73, 72, 74, 73, 74, 72, 73, 69, 72, 73, 75, 75, 73, 72, 72, 76, 74, 72, 77, 74, 77, 75, 76, 80, 74, 74, 75, 78, 73, 73, 74, 75, 76, 71, 73, 74, 76, 76, 74, 73, 74, 70, 72, 73, 73, 73, 73, 71, 74, 74, 72, 74, 71, 74, 73, 75, 75, 79, 73, 75, 76, 74, 76, 78, 74, 76, 72, 74, 76, 74, 75, 78, 75, 72, 74, 72, 74, 70, 71, 70, 75, 71, 71, 73, 72, 71, 73, 72, 75, 74, 74, 75, 73, 77, 73, 76, 75, 74, 76, 75, 73, 71, 76, 75, 72, 71, 77, 73, 74, 71, 72, 74, 75, 73, 72, 75, 75, 74, 72, 74, 71, 70, 74, 77, 77, 75, 75, 78, 75, 76, 73, 75, 75, 79, 77, 76, 71, 75, 74, 69, 71, 76, 72, 72, 70, 72, 73, 71, 72, 71, 73, 72, 73, 74, 74, 72, 75, 74, 74, 77, 75, 73, 72, 71, 74, 77, 75, 75, 75, 78, 78, 74, 76, 78, 76, 70, 72, 80, 74, 74, 71, 70, 72, 71, 74, 71, 72, 71, 74, 69, 76, 75, 75, 76, 73, 76, 73, 77, 73, 72, 72, 77, 77, 71, 74, 74, 73, 78, 75, 73, 70, 74, 72, 73, 73, 75, 75, 74, 76, 73, 74, 75, 75, 72, 73, 73, 72, 74, 78, 76, 73, 74, 75, 70, 75, 71, 72, 78, 75, 73, 73, 71, 75, 77, 72, 69, 73, 74, 72, 70, 75, 70, 72, 72, 74, 73, 74, 76, 75, 80, 72, 75, 73, 74, 74, 73, 75, 75, 71, 73, 75, 74, 74, 72, 74, 74, 74, 73, 76, 75, 72, 73, 73, 73, 72, 72, 72, 72, 71, 75, 75, 74, 73, 75, 79, 74, 76, 73, 74, 74, 72, 74, 74, 75, 78, 74, 74, 74, 77, 70, 73, 74, 73, 71, 75, 71, 72, 77, 74, 70, 77, 73, 72, 76, 71, 76, 78, 75, 73, 78, 74, 79, 75, 76, 72, 75, 75, 70, 72, 70, 74, 71, 76, 73, 76, 71, 69, 72, 72, 69, 73, 69, 73, 74, 74, 72, 71, 72, 72, 76, 76, 76, 74, 76, 75, 71, 72, 71, 73, 75, 76, 75, 71, 75, 74, 72, 73, 73, 73, 73, 76, 72, 76, 73, 73, 73, 75, 75, 77, 73, 72, 75, 70, 74, 72, 80, 71, 71, 74, 74, 73, 75, 76, 73, 77, 72, 73, 77, 76, 71, 75, 73, 74, 77, 71, 72, 73, 69, 73, 70, 74, 76, 73, 73, 75, 73, 79, 74, 73, 74, 77, 75, 74, 73, 77, 73, 77, 74, 74, 73, 77, 74, 77, 75, 77, 75, 71, 74, 70, 79, 72, 72, 70, 74, 74, 72, 73, 72, 74, 74, 76, 82, 74, 74, 70, 73, 73, 74, 77, 72, 76, 73, 73, 72, 74, 74, 71, 72, 75, 74, 74, 77, 70, 71, 73, 76, 71, 75, 74, 72, 76, 79, 76, 73, 76, 78, 75, 76, 72, 72, 73, 73, 75, 71, 76, 70, 75, 74, 75, 73, 71, 71, 72, 73, 73, 72, 69, 73, 78, 71, 73, 75, 76, 70, 74, 77, 75, 79, 72, 77, 73, 75, 75, 75, 73, 73, 76, 77, 75, 70, 71, 71, 75, 74, 69, 70, 75, 72, 75, 73, 72, 72, 72, 76, 75, 74, 69, 73, 72, 72, 75, 77, 76, 80, 77, 76, 79, 71, 75, 73, 76, 77, 73, 76, 70, 75, 73, 75, 70, 69, 71, 72, 72, 73, 70, 70, 73, 76, 75, 72, 73, 79, 71, 72, 74, 74, 74, 72, 76, 76, 72, 72, 71, 72, 72, 70, 77, 74, 72, 76, 71, 76, 71, 73, 70, 73, 73, 72, 71, 71, 71, 72, 72, 74, 74, 74, 71, 72, 75, 72, 71, 72, 72, 72, 72, 74, 74, 77, 75, 73, 75, 73, 76, 72, 77, 75, 72, 71, 71, 75, 72, 73, 73, 71, 70, 75, 71, 76, 73, 68, 71, 72, 74, 77, 72, 76, 78, 81, 72, 73, 76, 72, 72, 74, 76, 73, 76, 75, 70, 71, 74, 72, 73, 76, 76, 73, 71, 68, 71, 71, 74, 77, 69, 72, 76, 75, 76, 75, 76, 72, 74, 76, 74, 72, 75, 78, 77, 70, 72, 79, 74, 71, 68, 77, 75, 71, 72, 70, 72, 72, 73, 72, 74, 72, 72, 75, 72, 73, 74, 72, 78, 75, 72, 74, 75, 75, 76, 74, 74, 73, 74, 71, 74, 75, 76, 74, 76, 76, 73, 75, 75, 74, 68, 72, 75, 71, 70, 72, 73, 72, 75, 74, 70, 76, 71, 82, 72, 73, 74, 71, 75, 77, 72, 74, 72, 73, 78, 77, 73, 73, 73, 73, 73, 76, 75, 70, 73, 72, 73, 75, 74, 73, 73, 76, 73, 75, 70, 77, 72, 77, 74, 75, 75, 75, 75, 72, 74, 71, 76, 71, 75, 76, 83, 75, 74, 76, 72, 72, 75, 75, 72, 77, 73, 72, 70, 74, 72, 74, 72, 71, 70, 71, 76, 74, 76, 74, 74, 74, 75, 75, 71, 71, 74, 77, 71, 74, 75, 77, 76, 74, 76, 72, 71, 72, 75, 73, 68, 72, 69, 73, 73, 75, 70, 70, 74, 75, 74, 74, 73, 74, 75, 77, 73, 74, 76, 74, 75, 73, 76, 78, 75, 73, 77, 74, 72, 74, 72, 71, 73, 75, 73, 67, 67, 76, 74, 73, 70, 75, 70, 72, 77, 79, 78, 74, 75, 75, 78, 76, 75, 69, 75, 72, 75, 73, 74, 75, 75, 73] weights_lb = [180, 215, 210, 210, 188, 176, 209, 200, 231, 180, 188, 180, 185, 160, 180, 185, 189, 185, 219, 230, 205, 230, 195, 180, 192, 225, 203, 195, 182, 188, 200, 180, 200, 200, 245, 240, 215, 185, 175, 199, 200, 215, 200, 205, 206, 186, 188, 220, 210, 195, 200, 200, 212, 224, 210, 205, 220, 195, 200, 260, 228, 270, 200, 210, 190, 220, 180, 205, 210, 220, 211, 200, 180, 190, 170, 230, 155, 185, 185, 200, 225, 225, 220, 160, 205, 235, 250, 210, 190, 160, 200, 205, 222, 195, 205, 220, 220, 170, 185, 195, 220, 230, 180, 220, 180, 180, 170, 210, 215, 200, 213, 180, 192, 235, 185, 235, 210, 222, 210, 230, 220, 180, 190, 200, 210, 194, 180, 190, 240, 200, 198, 200, 195, 210, 220, 190, 210, 225, 180, 185, 170, 185, 185, 180, 178, 175, 200, 204, 211, 190, 210, 190, 190, 185, 290, 175, 185, 200, 220, 170, 220, 190, 220, 205, 200, 250, 225, 215, 210, 215, 195, 200, 194, 220, 180, 180, 170, 195, 180, 170, 206, 205, 200, 225, 201, 225, 233, 180, 225, 180, 220, 180, 237, 215, 190, 235, 190, 180, 165, 195, 200, 190, 190, 185, 185, 205, 190, 205, 206, 220, 208, 170, 195, 210, 190, 211, 230, 170, 185, 185, 241, 225, 210, 175, 230, 200, 215, 198, 226, 278, 215, 230, 240, 184, 219, 170, 218, 190, 225, 220, 176, 190, 197, 204, 167, 180, 195, 220, 215, 185, 190, 205, 205, 200, 210, 215, 200, 205, 211, 190, 208, 200, 210, 232, 230, 210, 220, 210, 202, 212, 225, 170, 190, 200, 237, 220, 170, 193, 190, 150, 220, 200, 190, 185, 185, 200, 172, 220, 225, 190, 195, 219, 190, 197, 200, 195, 210, 177, 220, 235, 180, 195, 195, 190, 230, 190, 200, 190, 190, 200, 200, 184, 200, 180, 219, 187, 200, 220, 205, 190, 170, 160, 215, 175, 205, 200, 214, 200, 190, 180, 205, 220, 190, 215, 235, 191, 200, 181, 200, 210, 240, 185, 165, 190, 185, 175, 155, 210, 170, 175, 220, 210, 205, 200, 205, 195, 240, 150, 200, 215, 202, 200, 190, 205, 190, 160, 215, 185, 200, 190, 210, 185, 220, 190, 202, 205, 220, 175, 160, 190, 200, 229, 206, 220, 180, 195, 175, 188, 230, 190, 200, 190, 219, 235, 180, 180, 180, 200, 234, 185, 220, 223, 200, 210, 200, 210, 190, 177, 227, 180, 195, 199, 175, 185, 240, 210, 180, 194, 225, 180, 205, 193, 230, 230, 220, 200, 249, 190, 208, 245, 250, 160, 192, 220, 170, 197, 155, 190, 200, 220, 210, 228, 190, 160, 184, 180, 180, 200, 176, 160, 222, 211, 195, 200, 175, 206, 240, 185, 260, 185, 221, 205, 200, 170, 201, 205, 185, 205, 245, 220, 210, 220, 185, 175, 170, 180, 200, 210, 175, 220, 206, 180, 210, 195, 200, 200, 164, 180, 220, 195, 205, 170, 240, 210, 195, 200, 205, 192, 190, 170, 240, 200, 205, 175, 250, 220, 224, 210, 195, 180, 245, 175, 180, 215, 175, 180, 195, 230, 230, 205, 215, 195, 180, 205, 180, 190, 180, 190, 190, 220, 210, 255, 190, 230, 200, 205, 210, 225, 215, 220, 205, 200, 220, 197, 225, 187, 245, 185, 185, 175, 200, 180, 188, 225, 200, 210, 245, 213, 231, 165, 228, 210, 250, 191, 190, 200, 215, 254, 232, 180, 215, 220, 180, 200, 170, 195, 210, 200, 220, 165, 180, 200, 200, 170, 224, 220, 180, 198, 240, 239, 185, 210, 220, 200, 195, 220, 230, 170, 220, 230, 165, 205, 192, 210, 205, 200, 210, 185, 195, 202, 205, 195, 180, 200, 185, 240, 185, 220, 205, 205, 180, 201, 190, 208, 240, 180, 230, 195, 215, 190, 195, 215, 215, 220, 220, 230, 195, 190, 195, 209, 204, 170, 185, 205, 175, 210, 190, 180, 180, 160, 235, 200, 210, 180, 190, 197, 203, 205, 170, 200, 250, 200, 220, 200, 190, 170, 190, 220, 215, 206, 215, 185, 235, 188, 230, 195, 168, 190, 160, 200, 200, 189, 180, 190, 200, 220, 187, 240, 190, 180, 185, 210, 220, 219, 190, 193, 175, 180, 215, 210, 200, 190, 185, 220, 170, 195, 205, 195, 210, 190, 190, 180, 220, 190, 186, 185, 190, 180, 190, 170, 210, 240, 220, 180, 210, 210, 195, 160, 180, 205, 200, 185, 245, 190, 210, 200, 200, 222, 215, 240, 170, 220, 156, 190, 202, 221, 200, 190, 210, 190, 200, 165, 190, 185, 230, 208, 209, 175, 180, 200, 205, 200, 250, 210, 230, 244, 202, 240, 200, 215, 177, 210, 170, 215, 217, 198, 200, 220, 170, 200, 230, 231, 183, 192, 167, 190, 180, 180, 215, 160, 205, 223, 175, 170, 190, 240, 175, 230, 223, 196, 167, 195, 190, 250, 190, 190, 190, 170, 160, 150, 225, 220, 209, 210, 176, 260, 195, 190, 184, 180, 195, 195, 219, 225, 212, 202, 185, 200, 209, 200, 195, 228, 210, 190, 212, 190, 218, 220, 190, 235, 210, 200, 188, 210, 235, 188, 215, 216, 220, 180, 185, 200, 210, 220, 185, 231, 210, 195, 200, 205, 200, 190, 250, 185, 180, 170, 180, 208, 235, 215, 244, 220, 185, 230, 190, 200, 180, 190, 196, 180, 230, 224, 160, 178, 205, 185, 210, 180, 190, 200, 257, 190, 220, 165, 205, 200, 208, 185, 215, 170, 235, 210, 170, 180, 170, 190, 150, 230, 203, 260, 246, 186, 210, 198, 210, 215, 180, 200, 245, 200, 192, 192, 200, 192, 205, 190, 186, 170, 197, 219, 200, 220, 207, 225, 207, 212, 225, 170, 190, 210, 230, 210, 200, 238, 234, 222, 200, 190, 170, 220, 223, 210, 215, 196, 175, 175, 189, 205, 210, 180, 180, 197, 220, 228, 190, 204, 165, 216, 220, 208, 210, 215, 195, 200, 215, 229, 240, 207, 205, 208, 185, 190, 170, 208, 225, 190, 225, 185, 180, 165, 240, 220, 212, 163, 215, 175, 205, 210, 205, 208, 215, 180, 200, 230, 211, 230, 190, 220, 180, 205, 190, 180, 205, 190, 195] # Importing the numpy library # Convert the lists into NumPy arrays # Print the type of the created arrays # Print the length of the created arrays ``` ### Multiplication with a factor Now, let's convert the height and weight of the players into more generic units. * Height in metres * Weight in kilograms ``` # Convert the units of height and weight using appropriate factors (look in the video for the factors) heights_m = weights_kg = # Print the newly created arrays ``` As you can see, the NumPy way is clearly more concise. Even simple mathematical operations on lists required loops, unlike with arrays. ### Deriving new data from existing Now, let us try to calculate the Body Mass Index (BMI) value for the players. The formula for BMI is: $$ BMI = \frac{Weight\ of\ the\ individual}{(Height\ of\ the\ individual) ^ 2} $$ ``` # Calculate the bmi value based on the formula above bmi = # Check the newly created array 'bmi' ``` ### Indexing through arrays For **one-dimensional arrays**, indexing is **similar to Python lists** - indexing starts at 0. ``` # Obtain the 5th element from the array 'bmi' # Obtain the 2nd last element from the array 'bmi' # Obtain the first five elements from the array 'bmi' # Obtain the last three elements from the array 'bmi' ``` ### Subsets ``` # Check for the elements where bmi value is less than 21 # Filter the elements where bmi value is less than 21 # Count the number of elements where bmi value is less than 21 # Find the maximum bmi values among the players # Find the minimum bmi values among the players # Find the average bmi among the players ```
github_jupyter
# Masked vs cropped implementation for Gated PixelCNN Hi all, in this notebook we will compare the masked implemntation of the convolutions from the Gated PixelCNN versus the alternative sugexted in the paper, the use of convolutions operaritions with appropriate croppings and padding to achieve the same result. Let's check out! First, we willcheck if both implementation create the same result. For this we will create a 5x5 matrix filled with ones as our input example. ``` import math import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow import nn from tensorflow.keras import initializers test_ones_2d = np.ones([1, 5, 5, 1], dtype='float32') print(test_ones_2d[0,:,:,0].squeeze()) ``` Now, let's copy themasked implementation that we have been using for our Gated PixelCNN models. # Masked convolutions ``` class MaskedConv2D(keras.layers.Layer): """Convolutional layers with masks extended to work with Gated PixelCNN. Convolutional layers with simple implementation of masks type A and B for autoregressive models. Extended version to work with the verticala and horizontal stacks from the Gated PixelCNN model. Arguments: mask_type: one of `"V"`, `"A"` or `"B".` filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. """ def __init__(self, mask_type, filters, kernel_size, strides=1, padding='same', kernel_initializer='glorot_uniform', bias_initializer='zeros'): super(MaskedConv2D, self).__init__() assert mask_type in {'A', 'B', 'V'} self.mask_type = mask_type self.filters = filters if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) self.kernel_size = kernel_size self.strides = strides self.padding = padding.upper() self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) def build(self, input_shape): kernel_h, kernel_w = self.kernel_size self.kernel = self.add_weight('kernel', shape=(kernel_h, kernel_w, int(input_shape[-1]), self.filters), initializer=self.kernel_initializer, trainable=True) self.bias = self.add_weight('bias', shape=(self.filters,), initializer=self.bias_initializer, trainable=True) mask = np.ones(self.kernel.shape, dtype=np.float32) # Get centre of the filter for even or odd dimensions if kernel_h % 2 != 0: center_h = kernel_h // 2 else: center_h = (kernel_h - 1) // 2 if kernel_w % 2 != 0: center_w = kernel_w // 2 else: center_w = (kernel_w - 1) // 2 if self.mask_type == 'V': mask[center_h + 1:, :, :, :] = 0. else: mask[:center_h, :, :] = 0. mask[center_h, center_w + (self.mask_type == 'B'):, :, :] = 0. mask[center_h + 1:, :, :] = 0. self.mask = tf.constant(mask, dtype=tf.float32, name='mask') def call(self, input): masked_kernel = tf.math.multiply(self.mask, self.kernel) x = nn.conv2d(input, masked_kernel, strides=[1, self.strides, self.strides, 1], padding=self.padding) x = nn.bias_add(x, self.bias) return x ``` With this implementation, we will recreate all convolutional operation that occur inside of the Gated Block. These operations are: - Vertical stack - Vertical to horizontal stack - Horizontal stack - convolution layer with mask type "A" - Horizontal stack - convolution layer with mask type "B" IMAGE GATED BLOCK ## Vertical stack ``` mask_type = 'V' kernel_size = (3, 3) conv = MaskedConv2D(mask_type=mask_type, filters=1, kernel_size=kernel_size, padding='same', kernel_initializer='ones', bias_initializer='zeros') result_v = conv(test_ones_2d) print('MASK') print(conv.mask.numpy().squeeze()) print('') print('OUTPUT') print(result_v.numpy().squeeze()) ``` ## Vertical to horizontal stack ``` padding = keras.layers.ZeroPadding2D(padding=((1, 0), 0)) cropping = keras.layers.Cropping2D(cropping=((0, 1), 0)) x = padding(result_v) result = cropping(x) print('INPUT') print(result_v.numpy().squeeze()) print('') print('OUTPUT') print(result.numpy().squeeze()) ``` ## Horizontal stack - convolution layer with mask type "A" ``` mask_type = 'A' kernel_size = (1, 3) conv = MaskedConv2D(mask_type=mask_type, filters=1, kernel_size=kernel_size, padding='same', kernel_initializer='ones', bias_initializer='zeros') result = conv(test_ones_2d) print('MASK') print(conv.mask.numpy().squeeze()) print('') print('OUTPUT') print(result.numpy().squeeze()) ``` ## Horizontal stack - convolution layer with mask type "B" ``` mask_type = 'B' kernel_size = (1, 3) conv = MaskedConv2D(mask_type=mask_type, filters=1, kernel_size=kernel_size, padding='same', kernel_initializer='ones', bias_initializer='zeros') result = conv(test_ones_2d) print('MASK') print(conv.mask.numpy().squeeze()) print('') print('OUTPUT') print(result.numpy().squeeze()) ``` Using the results of the masked approach as reference, let's check the cropped method. # Cropped and padded convolutions ## Vertical stack First, let's checkout this operation that some strategic padding and applying the convolution in "valid" mode to achieve the same result from the masked version. ``` kernel_h = 2 kernel_w = 3 kernel_size = (kernel_h, kernel_w) padding = keras.layers.ZeroPadding2D(padding=((kernel_h - 1, 0), (int((kernel_w - 1) / 2), int((kernel_w - 1) / 2)))) res = padding(test_ones_2d) conv = keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=1, padding='valid', kernel_initializer='ones', bias_initializer='zeros') result_v = conv(res) print('INPUT') print(test_ones_2d.squeeze()) print('') print('PADDED INPUT') print(res.numpy().squeeze()) print('') print('CONV FILTER') print(conv.weights[0].numpy().squeeze()) print('') print('OUTPUT') print(result_v.numpy().squeeze()) ``` Now, let's implement a layer that we will include all the previous operations. ``` class VerticalConv2D(keras.layers.Conv2D): """https://github.com/JesseFarebro/PixelCNNPP/blob/master/layers/VerticalConv2D.py""" def __init__(self, filters, kernel_size, **kwargs): if not isinstance(kernel_size, tuple): kernel_size = (kernel_size // 2 + 1, kernel_size) super(VerticalConv2D, self).__init__(filters, kernel_size, **kwargs) self.pad = tf.keras.layers.ZeroPadding2D( ( (kernel_size[0] - 1, 0), # Top, Bottom (kernel_size[1] // 2, kernel_size[1] // 2), # Left, Right ) ) def call(self, inputs): inputs = self.pad(inputs) output = super(VerticalConv2D, self).call(inputs) return output kernel_h = 2 kernel_w = 3 kernel_size = (kernel_h, kernel_w) conv = VerticalConv2D(filters=1, kernel_size=kernel_size, strides=1, padding='valid', kernel_initializer='ones', bias_initializer='zeros') result_v = conv(test_ones_2d) print('INPUT') print(test_ones_2d.squeeze()) print('') print('CONV FILTER') print(conv.weights[0].numpy().squeeze()) print('') print('OUTPUT') print(result_v.numpy().squeeze()) ``` ## Vertical to horizontal stack In this operation, the implementation continue the same. ``` padding = keras.layers.ZeroPadding2D(padding=((1, 0), 0)) cropping = keras.layers.Cropping2D(cropping=((0, 1), 0)) x = padding(result_v) result = cropping(x) print('INPUT') print(result_v.numpy().squeeze()) print('') print('OUTPUT') print(result.numpy().squeeze()) ``` ## Horizontal stack - convolution layer with mask type "A" Again, let's check each operation step by step. ``` kernel_size = (1, 1) conv = keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=1, kernel_initializer='ones', bias_initializer='zeros') padding = keras.layers.ZeroPadding2D(padding=(0, (1, 0))) cropping = keras.layers.Cropping2D(cropping=(0, (0, 1))) res = conv(test_ones_2d) res_2 = padding(res) res_3 = cropping(res_2) print('INPUT') print(test_ones_2d.squeeze()) print('') print('CONV FILTER') print(conv.weights[0].numpy().squeeze()) print('') print('CONVOLUTION RESULT') print(res.numpy().squeeze()) print('') print('PADDED RESULT') print(res_2.numpy().squeeze()) print('') print('CROPPED RESULT') print(res_3.numpy().squeeze()) ``` Note: Since our input test just have one channel, the convolution 1x1 looks like did not perform any change. ## Horizontal stack - convolution layer with mask type "B" The step by step of the mask type "B" convolution layer is a little different. ``` kernel_size = (1, 2) kernel_h, kernel_w = kernel_size padding = keras.layers.ZeroPadding2D(padding=((int((kernel_h - 1) / 2), int((kernel_h - 1) / 2)), (kernel_w - 1, 0))) conv = keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=1, padding='valid', kernel_initializer='ones', bias_initializer='zeros') res = padding(test_ones_2d) result = conv(res) print('INPUT') print(test_ones_2d.squeeze()) print('') print('PADDED INPUT') print(res.numpy().squeeze()) print('') print('CONV FILTER') print(conv.weights[0].numpy().squeeze()) print('') print('RESULT') print(result.numpy().squeeze()) ``` In this case, we also implemented a layer version encapsulation these operations ``` class HorizontalConv2D(keras.layers.Conv2D): def __init__(self, filters, kernel_size, **kwargs): if not isinstance(kernel_size, tuple): kernel_size = (kernel_size // 2 + 1,) * 2 super(HorizontalConv2D, self).__init__(filters, kernel_size, **kwargs) self.pad = tf.keras.layers.ZeroPadding2D( ( (kernel_size[0] - 1, 0), # (Top, Bottom) (kernel_size[1] - 1, 0), # (Left, Right) ) ) def call(self, inputs): inputs = self.pad(inputs) outputs = super(HorizontalConv2D, self).call(inputs) return outputs kernel_size = (1, 2) conv = HorizontalConv2D(filters=1, kernel_size=kernel_size, strides=1, kernel_initializer='ones', bias_initializer='zeros') result = conv(test_ones_2d) print('INPUT') print(test_ones_2d.squeeze()) print('') print('CONV FILTER') print(conv.weights[0].numpy().squeeze()) print('') print('RESULT') print(result.numpy().squeeze()) ``` # Execution time Now we will compare the time that takes to perform each convolutional operation. ``` import time def measure_time(conv_fn): exec_time = [] n_iter = 100 for _ in range(n_iter): test_input = np.random.rand(128, 256, 256, 1).astype('float32') start = time.time() conv_fn(test_input) exec_time.append(time.time() - start) exec_time = np.array(exec_time, dtype='float32') return exec_time.mean(), exec_time.std() ``` ## Vertical stack ``` mask_type = 'V' kernel_size = (3, 3) masked_conv = MaskedConv2D(mask_type=mask_type, filters=32, kernel_size=kernel_size, padding='same', kernel_initializer='ones', bias_initializer='zeros') @tf.function def test_masked_fn(x): _ = masked_conv(x) masked_time = measure_time(test_masked_fn) # ---------------------------------------------------------------- kernel_size = (2, 3) cropped_conv = VerticalConv2D(filters=32, kernel_size=kernel_size, strides=1, padding='valid', kernel_initializer='ones', bias_initializer='zeros') @tf.function def test_cropped_fn(x): _ = cropped_conv(x) cropped_time = measure_time(test_cropped_fn) # ---------------------------------------------------------------- print("Vertical stack") print(f"Masked convolution: {masked_time[0]:.8f} +- {masked_time[1]:.8f} seconds") print(f"Cropped padded convolution: {cropped_time[0]:.8f} +- {cropped_time[1]:.8f} seconds") ``` ## Horizontal stack - convolution layer with mask type "A" ``` mask_type = 'A' kernel_size = (1, 3) masked_conv = MaskedConv2D(mask_type=mask_type, filters=1, kernel_size=kernel_size, padding='same', kernel_initializer='ones', bias_initializer='zeros') @tf.function def test_masked_fn(x): _ = masked_conv(x) masked_time = measure_time(test_masked_fn) # ---------------------------------------------------------------- kernel_size = (1, 1) conv = keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=1, kernel_initializer='ones', bias_initializer='zeros') padding = keras.layers.ZeroPadding2D(padding=(0, (1, 0))) cropping = keras.layers.Cropping2D(cropping=(0, (0, 1))) @tf.function def test_cropped_fn(x): x = conv(x) x = padding(x) x = cropping(x) cropped_time = measure_time(test_cropped_fn) # ---------------------------------------------------------------- print("Horizontal stack - convolution layer with mask type 'A'") print(f"Masked convolution: {masked_time[0]:.8f} +- {masked_time[1]:.8f} seconds") print(f"Cropped padded convolution: {cropped_time[0]:.8f} +- {cropped_time[1]:.8f} seconds") ``` ## Horizontal stack - convolution layer with mask type "B" ``` mask_type = 'B' kernel_size = (1, 3) masked_conv = MaskedConv2D(mask_type=mask_type, filters=1, kernel_size=kernel_size, padding='same', kernel_initializer='ones', bias_initializer='zeros') @tf.function def test_masked_fn(x): _ = masked_conv(x) masked_time = measure_time(test_masked_fn) # ---------------------------------------------------------------- kernel_size = (1, 2) cropped_conv = HorizontalConv2D(filters=1, kernel_size=kernel_size, strides=1, kernel_initializer='ones', bias_initializer='zeros') @tf.function def test_cropped_fn(x): _ = cropped_conv(x) cropped_time = measure_time(test_cropped_fn) # ---------------------------------------------------------------- print("Horizontal stack - convolution layer with mask type 'B'") print(f"Masked convolution: {masked_time[0]:.8f} +- {masked_time[1]:.8f} seconds") print(f"Cropped padded convolution: {cropped_time[0]:.8f} +- {cropped_time[1]:.8f} seconds") ``` Altough its looks like cropped is better in the vertical convolution, the difference does not to look very significant. # REFERENCES https://wiki.math.uwaterloo.ca/statwiki/index.php?title=STAT946F17/Conditional_Image_Generation_with_PixelCNN_Decoders#Gated_PixelCNN https://www.slideshare.net/suga93/conditional-image-generation-with-pixelcnn-decoders https://www.youtube.com/watch?v=1BURwCCYNEI
github_jupyter
# Comparing two Counters Today we will look at a way of scoring the significance of differences between frequency distributions, based on a method called "Fightin' Words" by Monroe, Colaresi, and Quinn. ``` import re, sys, glob, math import numpy from collections import Counter from matplotlib import pyplot ``` 1. What is the encoding of the files? How are they structured? What do we need to do to separate text from non-textual words like speakers and stage directions? 2. Look at the most frequent words in the counters for comedy and tragedy. What is different? Is this view informative about differences between these two genres? 3. There is a problem calculating `log_rank`. What is it, and how can we fix it? 4. What does the `generate_scores` function do? What is the effect of the `smoothing` parameter? 5. Look at the plot showing "Fightin' Words" scores for comedy vs. tragedy. What stands out? What does this tell you about these genres in Shakespeare? What if any changes might you make to how we tokenize or otherwise pre-process the documents? 6. Create the same plot for tragedy vs. history and comedy vs. history. What is different? What words would you want to look at in their original context and why? ``` genre_directories = { "tragedy" : "shakespeare/tragedies", "comedy" : "shakespeare/comedies", "history" : "shakespeare/historical" } word_pattern = re.compile("\w[\w\-\'’]*\w|\w") # This counter will store the total frequency of each word type across all plays all_counts = Counter() # This dictionary will have one counter for each genre genre_counts = {} # This dictionary will have one dictionary for each genre, each containing one Counter for each play in that genre genre_play_counts = {} # Read the plays from files for genre in genre_directories.keys(): genre_play_counts[genre] = {} genre_counts[genre] = Counter() for filename in glob.glob("{}/*.txt".format(genre_directories[genre])): play_counter = Counter() genre_play_counts[genre][filename] = play_counter with open(filename, encoding="utf-8") as file: ## What encoding? ## This block reads a file line by line. for line in file: line = line.rstrip() tokens = word_pattern.findall(line) play_counter.update(tokens) genre_counts[genre] += play_counter all_counts += play_counter genre_counts.keys() genre_play_counts.keys() genre_play_counts["comedy"].keys() genre_play_counts["comedy"]["shakespeare/comedies/The Merry Wives of Windsor.txt"].most_common(30) genre_counts["comedy"].most_common(15) genre_counts["tragedy"].most_common(15) vocabulary = [w for w, c in all_counts.most_common()] vocabulary_size = len(vocabulary) total_word_counts = numpy.array([all_counts[w] for w in vocabulary]) log_counts = numpy.log(total_word_counts) word_ranks = numpy.arange(len(vocabulary)) log_ranks = numpy.log(word_ranks) genres = genre_play_counts.keys() pyplot.scatter(log_ranks, log_counts, alpha = 0.2) pyplot.show() def generate_scores(counter, smoothing = 0.0): scores = numpy.zeros(vocabulary_size) for word_id, word in enumerate(vocabulary): scores[word_id] = counter[word] + smoothing return scores def count_difference(counter_a, counter_b, smoothing): scores_a = generate_scores(counter_a, smoothing) scores_b = generate_scores(counter_b, smoothing) ratio_a = scores_a / (numpy.sum(scores_a) - scores_a) ratio_b = scores_b / (numpy.sum(scores_b) - scores_b) variances = (1.0/scores_a) + (1.0/scores_b) return numpy.log(ratio_a / ratio_b) / numpy.sqrt(variances) comedy_tragedy_scores = count_difference(genre_counts["comedy"], genre_counts["tragedy"], 0.0) sorted_words = sorted(zip(comedy_tragedy_scores, vocabulary)) print(sorted_words[:10]) print(sorted_words[-10:]) pyplot.figure(figsize=(20, 20)) pyplot.xlim(3, 11) pyplot.scatter(log_counts, comedy_tragedy_scores, alpha = 0.2) for word_id, word in enumerate(vocabulary): if numpy.abs(comedy_tragedy_scores[word_id]) + log_counts[word_id] > 7.5: pyplot.text(log_counts[word_id], comedy_tragedy_scores[word_id], word) pyplot.show() ```
github_jupyter
# 1-Getting Started Always run this statement first, when working with this book: ``` from scipy import * from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" ``` ## Numbers ``` 2 ** (2 + 2) 1j ** 2 # A complex number 1. + 3.0j # Another complex number ``` ## Strings ``` 'valid string' "string with double quotes" "you shouldn't forget comments" 'these are double quotes: ".." ' """This is a long, long string""" ``` ## Variables ``` x = [3, 4] # a list object is created y = x # this object now has two labels: x and y del x # we delete one of the labels del y # both labels are removed: the object is deleted x = [3, 4] # a list object is created print(x) ``` ## Lists ``` L1 = [5, 6] L1[0] # 5 L1[1] # 6 L1[2] # raises IndexError L2 = ['a', 1, [3, 4]] L2[0] # 'a' L2[2][0] # 3 L2[-1] # last element: [3,4] L2[-2] # second to last: 1 print(list(range(5))) len(['a', 1, 2, 34]) L = ['a', 'b', 'c'] L[-1] # 'c' L.append('d') L # L is now ['a', 'b', 'c', 'd'] L[-1] # 'd' ``` ### Operations on Lists ``` L1 = [1, 2] L2 = [3, 4] L = L1 + L2 # [1, 2, 3, 4] L L = [1, 2] 3 * L # [1, 2, 1, 2, 1, 2] ``` ## Boolean Expressions ``` 2 >= 4 # False 2 < 3 < 4 # True 2 < 3 and 3 < 2 # False 2 != 3 < 4 or False # True 2 <= 2 and 2 >= 2 # True not 2 == 3 # True not False or True and False # True! ``` ## Repeating statements by loops ``` L = [1, 2, 10] for s in L: print(s * 2) # output: 2 4 20 ``` ### Repeating a task ``` n = 30 k=0 for iteration in range(n): k+= iteration #do_something(this gets executed n times) k ``` ### Break and else ``` threshold=30 x_values=range(20) for x in x_values: if x > threshold: break print(x) for x in x_values: if x > threshold: break else: print("all the x are below the threshold") ``` ## Conditional Statements ``` # The absolute value x=-25 if x >= 0: print(x) else: print(-x) ``` ## Encapsulating code by functions Example: $$x \mapsto f(x) := 2x + 1$$ ``` def f(x): return 2*x + 1 ``` Calling this function: ``` f(2) # 5 f(1) # 3 ``` ## Scripts and modules ``` def f(x): return 2*x + 1 z = [] for x in range(10): if f(x) > pi: z.append(x) else: z.append(-1) print(z) exec(open('smartscript.py').read()) %run smartscript ``` ## Simple modules - collecting Functions For the next example to work, you need a file `smartfunctions.py`in the same folder as this notebook: ``` def f(x): return 2*x + 1 def g(x): return x**2 + 4*x - 5 def h(x): return 1/f(x) ``` ### Using modules and namespaces ``` import smartfunctions print(smartfunctions.f(2)) from smartfunctions import g #import just this one function print(g(1)) from smartfunctions import * #import all print(h(2)*f(2)) ``` ## Interpreter ``` def f(x): return y**2 a = 3 # here both a and f are defined f(2) # error, y is not defined ```
github_jupyter
# Road Follower - Train Model In this notebook we will train a neural network to take an input image, and output a set of x, y values corresponding to a target. We will be using PyTorch deep learning framework to train ResNet18 neural network architecture model for road follower application. ``` import torch import torch.optim as optim import torch.nn.functional as F import torchvision import torchvision.datasets as datasets import torchvision.models as models import torchvision.transforms as transforms import glob import PIL.Image import os import numpy as np ``` ### Download and extract data Before you start, you should upload the ``road_following_<Date&Time>.zip`` file that you created in the ``data_collection.ipynb`` notebook on the robot. > If you're training on the JetBot you collected data on, you can skip this! You should then extract this dataset by calling the command below: ``` !unzip -q road_following.zip ``` You should see a folder named ``dataset_all`` appear in the file browser. ### Create Dataset Instance Here we create a custom ``torch.utils.data.Dataset`` implementation, which implements the ``__len__`` and ``__getitem__`` functions. This class is responsible for loading images and parsing the x, y values from the image filenames. Because we implement the ``torch.utils.data.Dataset`` class, we can use all of the torch data utilities :) We hard coded some transformations (like color jitter) into our dataset. We made random horizontal flips optional (in case you want to follow a non-symmetric path, like a road where we need to 'stay right'). If it doesn't matter whether your robot follows some convention, you could enable flips to augment the dataset. ``` def get_x(path): """Gets the x value from the image filename""" return (float(int(path[3:6])) - 50.0) / 50.0 def get_y(path): """Gets the y value from the image filename""" return (float(int(path[7:10])) - 50.0) / 50.0 class XYDataset(torch.utils.data.Dataset): def __init__(self, directory, random_hflips=False): self.directory = directory self.random_hflips = random_hflips self.image_paths = glob.glob(os.path.join(self.directory, '*.jpg')) self.color_jitter = transforms.ColorJitter(0.3, 0.3, 0.3, 0.3) def __len__(self): return len(self.image_paths) def __getitem__(self, idx): image_path = self.image_paths[idx] image = PIL.Image.open(image_path) x = float(get_x(os.path.basename(image_path))) y = float(get_y(os.path.basename(image_path))) if float(np.random.rand(1)) > 0.5: image = transforms.functional.hflip(image) x = -x image = self.color_jitter(image) image = transforms.functional.resize(image, (224, 224)) image = transforms.functional.to_tensor(image) image = image.numpy()[::-1].copy() image = torch.from_numpy(image) image = transforms.functional.normalize(image, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) return image, torch.tensor([x, y]).float() dataset = XYDataset('dataset_xy', random_hflips=False) ``` ### Split dataset into train and test sets Once we read dataset, we will split data set in train and test sets. In this example we split train and test a 90%-10%. The test set will be used to verify the accuracy of the model we train. ``` test_percent = 0.1 num_test = int(test_percent * len(dataset)) train_dataset, test_dataset = torch.utils.data.random_split(dataset, [len(dataset) - num_test, num_test]) ``` ### Create data loaders to load data in batches We use ``DataLoader`` class to load data in batches, shuffle data and allow using multi-subprocesses. In this example we use batch size of 64. Batch size will be based on memory available with your GPU and it can impact accuracy of the model. ``` train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=8, shuffle=True, num_workers=0 ) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=8, shuffle=True, num_workers=0 ) ``` ### Define Neural Network Model We use ResNet-18 model available on PyTorch TorchVision. In a process called transfer learning, we can repurpose a pre-trained model (trained on millions of images) for a new task that has possibly much less data available. More details on ResNet-18 : https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py More Details on Transfer Learning: https://www.youtube.com/watch?v=yofjFQddwHE ``` model = models.resnet18(pretrained=True) ``` ResNet model has fully connect (fc) final layer with 512 as ``in_features`` and we will be training for regression thus ``out_features`` as 1 Finally, we transfer our model for execution on the GPU ``` model.fc = torch.nn.Linear(512, 2) device = torch.device('cuda') model = model.to(device) ``` ### Train Regression: We train for 50 epochs and save best model if the loss is reduced. ``` NUM_EPOCHS = 70 BEST_MODEL_PATH = 'best_steering_model_xy.pth' best_loss = 1e9 optimizer = optim.Adam(model.parameters()) for epoch in range(NUM_EPOCHS): model.train() train_loss = 0.0 for images, labels in iter(train_loader): images = images.to(device) labels = labels.to(device) optimizer.zero_grad() outputs = model(images) loss = F.mse_loss(outputs, labels) train_loss += float(loss) loss.backward() optimizer.step() train_loss /= len(train_loader) model.eval() test_loss = 0.0 for images, labels in iter(test_loader): images = images.to(device) labels = labels.to(device) outputs = model(images) loss = F.mse_loss(outputs, labels) test_loss += float(loss) test_loss /= len(test_loader) print('%f, %f' % (train_loss, test_loss)) if test_loss < best_loss: torch.save(model.state_dict(), BEST_MODEL_PATH) best_loss = test_loss ``` Once the model is trained, it will generate ``best_steering_model_xy.pth`` file which you can use for inferencing in the live demo notebook. If you trained on a different machine other than JetBot, you'll need to upload this to the JetBot to the ``road_following`` example folder.
github_jupyter
<h1 align="center"> Logistic Regression (Preloaded Dataset) </h1> scikit-learn comes with a few small datasets that do not require to download any file from some external website. The digits dataset we will use is one of these small standard datasets. These datasets are useful to quickly illustrate the behavior of the various algorithms implemented in the scikit. They are however often too small to be representative of real world machine learning tasks. After learning the basics of logisitic regression, we will use the MNIST Handwritten digit database <b>Each datapoint is a 8x8 image of a digit.</b> Parameters | Number --- | --- Classes | 10 Samples per class | ~180 Samples total | 1797 Dimensionality | 64 Features | integers 0-16 ``` from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Used for Confusion Matrix from sklearn import metrics %matplotlib inline digits = load_digits() digits.data.shape digits.target.shape ``` ## Showing the Images and Labels ``` plt.figure(figsize=(20,4)) for index, (image, label) in enumerate(zip(digits.data[0:5], digits.target[0:5])): plt.subplot(1, 5, index + 1) plt.imshow(np.reshape(image, (8,8)), cmap=plt.cm.gray) plt.title('Training: %i\n' % label, fontsize = 20) ``` ## Splitting Data into Training and Test Sets ``` # test_size: what proportion of original data is used for test set x_train, x_test, y_train, y_test = train_test_split( digits.data, digits.target, test_size=0.25, random_state=0) print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) ``` ## Scikit-learn 4-Step Modeling Pattern <b>Step 1: </b> Import the model you want to use In sklearn, all machine learning models are implemented as Python classes ``` from sklearn.linear_model import LogisticRegression ``` <b>Step 2:</b> Make an instance of the Model ``` logisticRegr = LogisticRegression() ``` <b>Step 3:</b> Training the model on the data, storing the information learned from the data Model is learning the relationship between x (digits) and y (labels) ``` logisticRegr.fit(x_train, y_train) ``` <b>Step 4</b>: Predict the labels of new data (new images) Uses the information the model learned during the model training process ``` # Returns a NumPy Array # Predict for One Observation (image) logisticRegr.predict(x_test[0].reshape(1,-1)) # Predict for Multiple Observations (images) at Once logisticRegr.predict(x_test[0:10]) # Make predictions on entire test data predictions = logisticRegr.predict(x_test) predictions.shape ``` ## Measuring Model Performance accuracy (fraction of correct predictions): correct predictions / total number of data points Basically, how the model performs on new data (test set) ``` # Use score method to get accuracy of model score = logisticRegr.score(x_test, y_test) print(score) ``` ## Confusion Matrix (Matplotlib) A confusion matrix is a table that is often used to describe the performance of a classification model (or "classifier") on a set of test data for which the true values are known. ``` def plot_confusion_matrix(cm, title='Confusion matrix', cmap='Pastel1'): plt.figure(figsize=(9,9)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title, size = 15) plt.colorbar() tick_marks = np.arange(10) plt.xticks(tick_marks, ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], rotation=45, size = 10) plt.yticks(tick_marks, ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], size = 10) plt.tight_layout() plt.ylabel('Actual label', size = 15) plt.xlabel('Predicted label', size = 15) width, height = cm.shape for x in xrange(width): for y in xrange(height): plt.annotate(str(cm[x][y]), xy=(y, x), horizontalalignment='center', verticalalignment='center') # confusion matrix confusion = metrics.confusion_matrix(y_test, predictions) print('Confusion matrix') print(confusion) plt.figure() plot_confusion_matrix(confusion); plt.show(); ``` ## Confusion Matrix (Seaborn) <b>Note: Seaborn needs to be installed for this portion </b> ``` # !conda install seaborn -y # Make predictions on test data predictions = logisticRegr.predict(x_test) cm = metrics.confusion_matrix(y_test, predictions) #cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.figure(figsize=(9,9)) sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r'); plt.ylabel('Actual label'); plt.xlabel('Predicted label'); all_sample_title = 'Accuracy Score: {0}'.format(score) plt.title(all_sample_title, size = 15); ``` ## Display Misclassified images with Predicted Labels ``` index = 0 misclassifiedIndex = [] for predict, actual in zip(predictions, y_test): if predict != actual: misclassifiedIndex.append(index) index +=1 plt.figure(figsize=(20,4)) for plotIndex, wrong in enumerate(misclassifiedIndex[10:15]): plt.subplot(1, 5, plotIndex + 1) plt.imshow(np.reshape(x_test[wrong], (8,8)), cmap=plt.cm.gray) plt.title('Predicted: {}, Actual: {}'.format(predictions[wrong], y_test[wrong]), fontsize = 20) ``` Part 2 of the tutorial is located here: [MNIST Logistic Regression](https://github.com/mGalarnyk/Python_Tutorials/blob/master/Sklearn/Logistic_Regression/LogisticRegression_MNIST.ipynb) <b>if this tutorial doesn't cover what you are looking for, please leave a comment on the youtube video and I will try to cover what you are interested in. </b> [youtube video](https://www.youtube.com/watch?v=71iXeuKFcQM)
github_jupyter
SD211 TP2: Régression logistique *<p>Author: Pengfei Mi</p>* *<p>Date: 12/05/2017</p>* ``` import numpy as np import matplotlib.pyplot as plt from cervicalcancerutils import load_cervical_cancer from scipy.optimize import check_grad from time import time from sklearn.metrics import classification_report ``` ## Partie 1: Régularisation de Tikhonov $\textbf{Question 1.1}\quad\text{Calculer le gradient et la matrice hessienne.}$ <div class="alert alert-success"> <p> Notons $\tilde{X} = (\tilde{\mathbf{x}}_1,...,\tilde{\mathbf{ x}}_n)^T$, où $\tilde{\mathbf{x}}_i = \begin{pmatrix}1\\ \mathbf{x}_i\end{pmatrix}\in \mathbb{R}^{p+1}$, $\tilde{\mathbf{\omega}} = \begin{pmatrix} \omega_0\\\mathbf{\omega}\end{pmatrix}\in \mathbb{R}^{p+1}$, et la matrice $$A = diag(0,1,...,1) = \begin{pmatrix} 0&0&\cdots&0\\ 0&1&&0\\ \vdots&&\ddots&\vdots\\ 0&0&\cdots&1 \end{pmatrix} $$ </p> <p> On a: $$ \begin{aligned} f_1(\omega_0, \omega) &= \frac{1}{n}\sum_{i=1}^{n}\text{log}\big(1+e^{-y_i(x_i^T\omega+\omega_0)}\big)+\frac{\rho}{2}\|\omega\|_2^2 \\ & = \frac{1}{n}\sum_{i=1}^{n}\text{log}\big(1+e^{-y_i\tilde x_i^T \tilde \omega}\big)+\frac{\rho}{2}\tilde{\omega}^TA\tilde{\omega} \end{aligned} $$ </p> <p> Ainsi on obtient le gradient: $$ \begin{aligned} \nabla{f_1}(\omega_0, \omega) &= \frac{1}{n}\sum_{i=1}^{n}\frac{-e^{-y_i\tilde x_i^T \tilde \omega}y_i\tilde{\mathbf{x}}_i}{1+e^{-y_i\tilde x_i^T \tilde \omega}} + \rho A\tilde{\mathbf{\omega}} \\ &= \frac{1}{n}\sum_{i=1}^{n}\frac{-y_i\tilde{\mathbf{x}}_i}{1+e^{y_i\tilde x_i^T \tilde \omega}} + \rho A\tilde{\mathbf{\omega}} \end{aligned} $$ </p> <p> et la matrice hessienne: $$ \begin{aligned} \mathbf{H} = \nabla^2f_1(\omega_0, \omega) &= \frac{1}{n}\sum_{i=1}^{n}\frac{e^{y_i\tilde x_i^T \tilde \omega}(y_i\tilde{\mathbf{x}}_i)(y_i\tilde{\mathbf{x}}_i)^T}{(1+e^{y_i\tilde x_i^T \tilde \omega})^2} + \rho A \\ & = \frac{1}{n}\sum_{i=1}^{n}\frac{(y_i\tilde{\mathbf{x}}_i)(y_i\tilde{\mathbf{x}}_i)^T}{(1+e^{y_i\tilde x_i^T \tilde \omega})(1+e^{-y_i\tilde x_i^T \tilde \omega})} + \rho A \end{aligned} $$ </p> </div> <div class="alert alert-success"> <p> Soient $\omega \in \mathbb{R}^{p+1}$, on a: $$ \begin{aligned} \omega^TH\omega &= \frac{1}{n}\sum_{i=1}^{n}\frac{\omega^T (y_i\tilde{\mathbf{x}}_i)(y_i\tilde{\mathbf{x}}_i)^T \omega}{(1+e^{y_i\tilde x_i^T \tilde \omega})(1+e^{-y_i\tilde x_i^T \tilde \omega})} + \rho \omega^T A \omega \\ &= \frac{1}{n}\sum_{i=1}^{n}\frac{(\omega^T y_i\tilde{\mathbf{x}}_i)(\omega^T y_i\tilde{\mathbf{x}}_i)^T}{(1+e^{y_i\tilde x_i^T \tilde \omega})(1+e^{-y_i\tilde x_i^T \tilde \omega})} + \rho \omega^T A^2 \omega \\ &= \frac{1}{n}\sum_{i=1}^{n}\frac{\|\omega^T y_i\tilde{\mathbf{x}}_i\|_2^2}{(1+e^{y_i\tilde x_i^T \tilde \omega})(1+e^{-y_i\tilde x_i^T \tilde \omega})} + \rho \|A\omega\|_2^2 \geq 0 \end{aligned} $$ </p> <p>Donc, la matrice hessienne est semi-définie positive, la fonction $f_1$ est convexe.</p> </div> $\textbf{Question 1.2}\quad\text{Coder une fonction qui retourne la valeur de la fonction, son gradient et sa hessienne.}$ <div class="alert alert-success"> <p>On insère une colonne de $1$ à gauche de $X$ pour simplifier le calcul.</p> </div> ``` X, y = load_cervical_cancer("riskfactorscervicalcancer.csv") print "Before the insertion:" print X.shape, y.shape n, p = X.shape X = np.c_[np.ones(n), X] print "After the insertion:" print X.shape, y.shape def objective(w_, X, y, rho, return_grad=True, return_H=True): """ X: matrix of size n*(p+1) y: vector of size n w0: real number w: vector of size p """ # Initialize elementary intermediate variables; n, p = X.shape w = w_[1:] y_x = np.array([y[i] * X[i, :] for i in range(n)]) yx_w = np.array([np.sum(y_x[i, :]*w_) for i in range(n)]) exp_yxw_1 = np.array([np.exp(yx_w[i]) for i in range(n)]) + 1 exp_neg_yxw_1 = np.array([np.exp(-yx_w[i]) for i in range(n)]) + 1 # Compute function value val = np.mean(np.log(exp_neg_yxw_1)) + np.sum(w**2)*rho/2. if return_grad == False: return val else: # Compute gradient grad = np.mean(-np.array([y_x[i]/exp_yxw_1[i] for i in range(n)]), axis=0) + rho*np.r_[0, w] if return_H == False: return val, grad else: # Compute the Hessian matrix H = np.mean(np.array([y_x[i].reshape(-1, 1).dot(y_x[i].reshape(1, -1) / (exp_yxw_1[i]*exp_neg_yxw_1[i])) for i in range(n)]), axis=0) + rho*np.diag(np.r_[0, np.ones(p-1)]) return val, grad, H def funcMask(w_, X, y, rho): val, grad = objective(w_, X, y, rho, return_H=False) return val def gradMask(w_, X, y, rho): val, grad = objective(w_, X, y, rho, return_H=False) return grad rho = 1./n t0 = time() print "The difference of gradient is: %0.12f" % check_grad(funcMask, gradMask, np.zeros(p+1), X, y, rho) print "Done in %0.3fs." % (time()-t0) def gradMask(w_, X, y, rho): val, grad = objective(w_, X, y, rho, return_H=False) return grad.sum() def hessianMask(w_, X, y, rho): val, grad, H = objective(w_, X, y, rho) return np.sum(H, axis=1) t0 = time() rho = 1./n print "The difference of Hessian matrix is: %0.12f" % check_grad(gradMask, hessianMask, np.zeros(p+1), X, y, rho) print "Done in %0.3fs." % (time()-t0) ``` <div class="alert alert-success"> <p>On a vérifié le calcul de gradient et de matrice hessienne.</p> </div> $\textbf{Question 1.3}\quad\text{Coder la méthode de Newton.}$ <div class="alert alert-success"> <p> Selon la définition de méthode de Newton, on a: $$\omega^{k+1} = \omega^k - (\nabla^2f_1(\omega^k))^{-1}\nabla f_1(\omega^k)$$ </p> </div> ``` def minimize_Newton(func, w_, X, y, rho, tol=1e-10): n, p = X.shape val, grad, H = func(w_, X, y, rho) grad_norm = np.sqrt(np.sum(grad**2)) norms = [grad_norm] cnt = 0 while (grad_norm > tol): w_ = w_ - np.linalg.solve(H, np.identity(p)).dot(grad) val, grad, H = func(w_, X, y, rho) grad_norm = np.sqrt(np.sum(grad**2)) norms.append(grad_norm) cnt = cnt + 1 return val, w_, cnt, norms t0 = time() rho = 1./n val, w, cnt, grad_norms = minimize_Newton(objective, np.zeros(p+1), X, y, rho, tol=1e-10) print "The value minimal of the objective function is: %0.12f" % val print "Done in %0.3fs, number of iterations: %d" % (time()-t0, cnt) print w plt.figure(1, figsize=(8,6)) plt.title("The norm of gradient, $\omega^0 = 0$") plt.semilogy(range(0, len(grad_norms)), grad_norms) plt.xlabel("Number of iteration") plt.ylabel("Norm of gradient") plt.xlim(0, len(grad_norms)) plt.show() ``` $\textbf{Question 1.4}\quad\text{Lancer avec comme condition initiale }(\omega_0^0,\omega^0) = 0.3e\text{, où }e_i=0\text{ pour tout }i.$ ``` t0 = time() val, grad, H, cnt, grad_norms = minimize_Newton(objective, 0.3*np.ones(p+1), X, y, rho, tol=1e-10) print "The value minimal of the objective function is: %0.12f" % val print "Done in %0.3fs, number of iterations: %d" % (time()-t0, cnt) ``` <div class="alert alert-success"> <p>On a vu que avec cette condition initiale, la fonction objectif ne converge pas. C'est à cause de le point initiale est hors le domaine de convergence.</p> </div> $\textbf{Question 1.5}\quad\text{Coder la méthode de recherche linéaire d'Armijo.}$ <div class="alert alert-success"> <p>Notons $\omega^+(\gamma_k)=\omega^k - \gamma_k(\nabla^2 f_1(\omega^k))^{-1}\nabla f_1(\omega^k)$, soient $a \in (0,1)$, $b>0$ et $\beta \in (0,1)$, on cherche le premier entier $l$ non-négatif tel que:</p> $$f_1(\omega^+(ba^l)) \leq f_1(\omega^k) + \beta\langle\nabla_{f_1}(\omega^k),\,\omega^+(ba^l)-\omega^k\rangle$$ </div> <div class="alert alert-success"> <p>Ici, on prend $\beta = 0.5$, ainsi que la recherche linéaire d'Armijo devient équicalente à la recherche linéaire de Taylor.</p> <p> On fixe $b_0 = 1$ et $b_k = 2\gamma_{k-1}$, c'est un choix classique.</p> <p> On fixe $a = 0.5$, c'est pour faire un compromis entre la précision de recherche et la vitesse de convergence.</p> </div> ``` def minimize_Newton_Armijo(func, w_, X, y, rho, a, b, beta, tol=1e-10, max_iter=500): n, p = X.shape val, grad, H = func(w_, X, y, rho) grad_norm = np.sqrt(np.sum(grad**2)) norms = [grad_norm] d = np.linalg.solve(H, np.identity(p)).dot(grad) gamma = b / 2. cnt = 0 while (grad_norm > tol and cnt < max_iter): gamma = 2*gamma val_ = func(w_ - gamma*d, X, y, rho, return_grad=False) while (val_ > val - beta*gamma*np.sum(d*grad)): gamma = gamma*a val_ = func(w_ - gamma*d, X, y, rho, return_grad=False) w_ = w_ - gamma*d val, grad, H = func(w_, X, y, rho) d = np.linalg.solve(H, np.identity(p)).dot(grad) grad_norm = np.sqrt(np.sum(grad**2)) norms.append(grad_norm) cnt = cnt + 1 return val, w_, cnt, norms t0 = time() rho = 1./n a = 0.5 b = 1 beta = 0.5 val_nls, w_nls, cnt_nls, grad_norms_nls = minimize_Newton_Armijo(objective, 0.3*np.ones(p+1), X, y, rho, a, b, beta, tol=1e-10, max_iter=500) print "The value minimal of the objective function is: %0.12f" % val_nls t_nls = time()-t0 print "Done in %0.3fs, number of iterations: %d" % (t_nls, cnt_nls) print w_nls plt.figure(2, figsize=(8,6)) plt.title("The norm of gradient by Newton with linear search") plt.semilogy(range(0, len(grad_norms_nls)), grad_norms_nls) plt.xlabel("Number of iteration") plt.ylabel("Norm of gradient") plt.xlim(0, len(grad_norms_nls)) plt.show() ``` ## Partie 2: Régularisation pour la parcimoine $\textbf{Question 2.1}\quad\text{Pourquoi ne peut-on pas utiliser la méthode de Newton pour résoudre ce problème?}$ <div class="alert alert-success"> <p>Parce que la fonction objectif ici n'est pas différentiable, on ne peut pas utiliser le gradient et la matrice hessienne.</p> </div> $\textbf{Question 2.2}\quad\text{Écrire la fonction objectif sous la forme }F_2 = f_2 + g_2\text{ où }f_2\text{ est dérivable et l’opérateur proximal de }g_2\text{ est simple.}$ <div class="alert alert-success"> <p> $$ \begin{aligned} F_2(\omega_0,\omega) &= \frac{1}{n}\sum_{i=1}^{n}\text{log}\big(1+e^{-y_i(x_i^T\omega+\omega_0)}\big)+\rho\|\omega\|_1 \\ &= f_2+g_2 \end{aligned} $$ où $f_2 = \frac{1}{n}\sum_{i=1}^{n}\text{log}\big(1+e^{-y_i(x_i^T\omega+\omega_0)}\big)$ est dérivable, $g_2 = \rho\|\omega\|_1$ de laquelle l'opérateur proximal est simple. </p> </div> <div class="alert alert-success"> <p> On a le gradient de $f_2$: $$ \begin{aligned} \nabla{f_2}(\omega_0, \omega) &= \frac{1}{n}\sum_{i=1}^{n}\frac{-e^{-y_i\tilde x_i^T \tilde \omega}y_i\tilde{\mathbf{x}}_i}{1+e^{-y_i\tilde x_i^T \tilde \omega}} \\ &= \frac{1}{n}\sum_{i=1}^{n}\frac{-y_i\tilde{\mathbf{x}}_i}{1+e^{y_i\tilde x_i^T \tilde \omega}} \end{aligned} $$ </p> <p> et l'opérateur proximal de $g_2$: $$ \begin{aligned} \text{prox}_{g_2}(x) &= \text{arg}\,\underset{y \in \mathbb{R}^p}{\text{min}}\, \big(g_2(y) + \frac{1}{2}\|y-x\|^2 \big) \\ &= \text{arg}\,\underset{y \in \mathbb{R}^p}{\text{min}}\, \big(\rho\|y\|_1 + \frac{1}{2}\|y-x\|^2 \big) \\ &= \text{arg}\,\underset{y \in \mathbb{R}^p}{\text{min}}\, \sum_{i=1}^{p}\big(\rho |y_i| + \frac{1}{2}(y_i-x_i)^2\big) \end{aligned} $$ </p> <p> pour $1 \leq i \leq n$, on obtient la solution: $$ y_i^* = \left\{ \begin{align} x_i - \rho, &\text{ si } x_i > \rho \\ x_i + \rho, &\text{ si } x_i < -\rho \\ 0, &\text{ si } -\rho \leq x_i \leq \rho \end{align} \right. $$ </p> </div> <div class="alert alert-success"> <p> $$ \begin{aligned} \mathbf{H_2} = \nabla^2f_2(\omega_0, \omega) &= \frac{1}{n}\sum_{i=1}^{n}\frac{e^{y_i\tilde x_i^T \tilde \omega}(y_i\tilde{\mathbf{x}}_i)(y_i\tilde{\mathbf{x}}_i)^T}{(1+e^{y_i\tilde x_i^T \tilde \omega})^2} \\ & = \frac{1}{n}\sum_{i=1}^{n}\frac{(y_i\tilde{\mathbf{x}}_i)(y_i\tilde{\mathbf{x}}_i)^T}{(1+e^{y_i\tilde x_i^T \tilde \omega})(1+e^{-y_i\tilde x_i^T \tilde \omega})} \end{aligned} $$ </p> <p> Soient $\omega \in \mathbb{R}^{p+1}$, on a: $$ \begin{aligned} \omega^TH_2\omega &= \frac{1}{n}\sum_{i=1}^{n}\frac{\omega^T (y_i\tilde{\mathbf{x}}_i)(y_i\tilde{\mathbf{x}}_i)^T \omega}{(1+e^{y_i\tilde x_i^T \tilde \omega})(1+e^{-y_i\tilde x_i^T \tilde \omega})} \\ &= \frac{1}{n}\sum_{i=1}^{n}\frac{(\omega^T y_i\tilde{\mathbf{x}}_i)(\omega^T y_i\tilde{\mathbf{x}}_i)^T}{(1+e^{y_i\tilde x_i^T \tilde \omega})(1+e^{-y_i\tilde x_i^T \tilde \omega})} \\ &= \frac{1}{n}\sum_{i=1}^{n}\frac{\|\omega^T y_i\tilde{\mathbf{x}}_i\|_2^2}{(1+e^{y_i\tilde x_i^T \tilde \omega})(1+e^{-y_i\tilde x_i^T \tilde \omega})} \geq 0 \end{aligned} $$ </p> <p>Donc, la matrice hessienne de $f_2$ est semi-définie positive, la fonction $f_2$ est convexe.</p> <p> $$ \begin{aligned} g_2(\omega_0, \omega) &= \rho\|\omega\|_1 \\ &= \rho \sum_{i=1}^{n}|\omega_i| \end{aligned} $$ </p> <p>La fonction de valeur absolue est convexe pour chaque élément de $\omega$, pour $\rho \geq 0$, $g_2$ est aussi convexe.</p> <p>Donc $F_2 = f_2 + g_2$ est convexe pour $\rho \geq 0$.</p> </div> $\textbf{Question 2.3}\quad\text{Coder le gradient proximal avec recherche linéaire.}$ <div class="alert alert-success"> <p>On rajoute la recherche linéaire de Taylor.</p> <p>On prend $a = 0.5$, $b_0 = 1b$ et $b = 2\gamma_{k-1}$. On cherche le premier entier $l$ non-négatif tel que:</p> $$f_2(\omega^+(ba^l)) \leq f_2(\omega^k) + \langle\nabla_{f_2}(\omega^k),\,\omega^+(ba^l)-\omega^k\rangle + \frac{1}{2ba^l}\|\omega^k - \omega^+(ba^l)\|^2$$ </div> <div class="alert alert-success"> On peut utiliser un seuillage pour la valeur de fonction objectif évaluée dans une itération comme test d'arrêt. </div> ``` def objective_proximal(w_, X, y, rho): """ X: matrix of size n*(p+1) y: vector of size n w0: real number w: vector of size p """ # Initialize elementary intermediate variables; n, p = X.shape w = w_[1:] y_x = np.array([y[i] * X[i, :] for i in range(n)]) yx_w = np.array([np.sum(y_x[i, :]*w_) for i in range(n)]) exp_neg_yxw_1 = np.array([np.exp(-yx_w[i]) for i in range(n)]) + 1 # Compute function value val = np.mean(np.log(exp_neg_yxw_1)) + rho*np.sum(np.fabs(w)) return val def f(w_, X, y, return_grad=True): """ X: matrix of size n*(p+1) y: vector of size n w0: real number w: vector of size p """ # Initialize elementary intermediate variables; n, p = X.shape w = w_[1:] y_x = np.array([y[i] * X[i, :] for i in range(n)]) yx_w = np.array([np.sum(y_x[i, :]*w_) for i in range(n)]) exp_yxw_1 = np.array([np.exp(yx_w[i]) for i in range(n)]) + 1 exp_neg_yxw_1 = np.array([np.exp(-yx_w[i]) for i in range(n)]) + 1 # Compute function value val = np.mean(np.log(exp_neg_yxw_1)) if return_grad == False: return val else: # Compute gradient grad = np.mean(-np.array([y_x[i]/exp_yxw_1[i] for i in range(n)]), axis=0) return val, grad def Soft_Threshold(w, rho): w_ = np.zeros_like(w) w_[w > rho] = w[w > rho] - rho w_[w < -rho] = w[w < -rho] + rho w_[0] = w[0] return w_ def minimize_prox_grad_Taylor(func, f, w_, X, y, rho, a, b, tol=1e-10, max_iter=500): n, p = X.shape val = func(w_, X, y, rho) val_f, grad_f = f(w_, X, y) gamma = b / 2. delta_val = tol*2 cnt = 0 while (delta_val > tol and cnt < max_iter): gamma = 2*gamma w_new = Soft_Threshold(w_ - gamma*grad_f, gamma*rho) val_f_ = f(w_new, X, y, return_grad=False) #while (val_f_ > val_f + beta*np.sum(grad_f*(w_new - w_))): while (val_f_ > val_f + np.sum(grad_f*(w_new-w_)) + np.sum((w_new-w_)**2)/gamma): #print val_ gamma = gamma*a w_new = Soft_Threshold(w_ - gamma*grad_f, gamma*rho) val_f_ = f(w_new, X, y, return_grad=False) w_ = w_new val_f, grad_f = f(w_, X, y) val_ = func(w_, X, y, rho) delta_val = val - val_ val = val_ cnt = cnt + 1 return func(w_, X, y, rho), w_, cnt t0 = time() rho = 0.1 a = 0.5 b = 1 val_pgls, w_pgls, cnt_pgls = minimize_prox_grad_Taylor(objective_proximal, f, 0.3*np.ones(p+1), X, y, rho, a, b, tol=1e-8, max_iter=500) print "The value minimal of the objective function is: %0.12f" % val_pgls t_pgls = time()-t0 print "Done in %0.3fs, number of iterations: %d" % (t_pgls, cnt_pgls) print w_pgls ``` ## Partie 3: Comparaison $\textbf{Question 3.1}\quad\text{Comparer les propriétés des deux problèmes d’optimisation.}$ <div class="alert alert-success"> <p>1. Toutes les deux fonctions objectifs sont convexes, laquelle de régularisation de Tikhonov est différentible, l'autre n'est pas différentiable.</p> <p>2. Selon les deux $\omega$ qu'on obtient, la régularisation de Tiknonov utilise tous les variables explicatives, la régularisation pour la parcimoine en utilise une partie.</p> </div> $\textbf{Question 3.2}\quad\text{Comparer les solutions obtenues avec les deux types de régularisation.}$ ``` y_pred_nls = np.sign(X.dot(w_nls)) y_pred_pgls = np.sign(X.dot(w_pgls)) print "The chance level is: %f" % max(np.mean(y == 1), 1-np.mean(y == 1)) print "The score by Newton method with line search is: %f" % np.mean(y == y_pred_nls) print "The score by proximal gradient method with line search is: %f" % np.mean(y == y_pred_pgls) print "-"*60 print "Classification report for Newton method" print classification_report(y, y_pred_nls) print "-"*60 print "Classification report for proximal gradient method" print classification_report(y, y_pred_pgls) ``` <div class="alert alert-success"> <p>En comparant les scores et les rapports de classification:</p> <p>1. Le score obtenu par la méthode de Newton est meilleur que celui de la méthode de gradient proximal.</p> <p>2. Selon le f1-score, la méthode de Newton est aussi meilleur.</p> <p>3. Dans la méthode de gradient proximal, la «precision» pour class 1 est 1.0, de plus, la «recall» est 0.1. On peut conclure que cette méthode avandage la class 1.</p> </div>
github_jupyter
# PyCity Schools Analysis * As a whole, schools with higher budgets, did not yield better test results. By contrast, schools with higher spending per student actually (\$645-675) underperformed compared to schools with smaller budgets (<\$585 per student). * As a whole, smaller and medium sized schools dramatically out-performed large sized schools on passing math performances (89-91% passing vs 67%). * As a whole, charter schools out-performed the public district schools across all metrics. However, more analysis will be required to glean if the effect is due to school practices or the fact that charter schools tend to serve smaller student populations per school. --- ``` # Dependencies and Setup import pandas as pd # File to Load (Remember to Change These) school_data_to_load = "Resources/schools_complete.csv" student_data_to_load = "Resources/students_complete.csv" # Read School and Student Data File and store into Pandas Data Frames school_data = pd.read_csv(school_data_to_load) student_data = pd.read_csv(student_data_to_load) # Combine the data into a single dataset school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"]) ``` ## District Summary ``` # Calculate the Totals (Schools and Students) school_count = len(school_data_complete["school_name"].unique()) student_count = school_data_complete["Student ID"].count() # Calculate the Total Budget total_budget = school_data["budget"].sum() # Calculate the Average Scores average_math_score = school_data_complete["math_score"].mean() average_reading_score = school_data_complete["reading_score"].mean() overall_passing_rate = (average_math_score + average_reading_score) / 2 # Calculate the Percentage Pass Rates passing_math_count = school_data_complete[(school_data_complete["math_score"] >= 70)].count()["student_name"] passing_math_percentage = passing_math_count / float(student_count) * 100 passing_reading_count = school_data_complete[(school_data_complete["reading_score"] >= 70)].count()["student_name"] passing_reading_percentage = passing_reading_count / float(student_count) * 100 # Minor Data Cleanup district_summary = pd.DataFrame({"Total Schools": [school_count], "Total Students": [student_count], "Total Budget": [total_budget], "Average Math Score": [average_math_score], "Average Reading Score": [average_reading_score], "% Passing Math": [passing_math_percentage], "% Passing Reading": [passing_reading_percentage], "% Overall Passing Rate": [overall_passing_rate]}) district_summary = district_summary[["Total Schools", "Total Students", "Total Budget", "Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] district_summary["Total Students"] = district_summary["Total Students"].map("{:,}".format) district_summary["Total Budget"] = district_summary["Total Budget"].map("${:,.2f}".format) # Display the data frame district_summary ``` ## School Summary ``` # Determine the School Type school_types = school_data.set_index(["school_name"])["type"] # Calculate the total student count per_school_counts = school_data_complete["school_name"].value_counts() # Calculate the total school budget and per capita spending # per_school_budget = school_data_complete.groupby(["school_name"]).mean()["budget"] per_school_budget = school_data_complete.groupby(["school_name"]).mean()["budget"] per_school_capita = per_school_budget / per_school_counts # Calculate the average test scores per_school_math = school_data_complete.groupby(["school_name"]).mean()["math_score"] per_school_reading = school_data_complete.groupby(["school_name"]).mean()["reading_score"] # Calculate the passing scores by creating a filtered data frame school_passing_math = school_data_complete[(school_data_complete["math_score"] >= 70)] school_passing_reading = school_data_complete[(school_data_complete["reading_score"] >= 70)] per_school_passing_math = school_passing_math.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100 per_school_passing_reading = school_passing_reading.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100 overall_passing_rate = (per_school_passing_math + per_school_passing_reading) / 2 # Convert to data frame per_school_summary = pd.DataFrame({"School Type": school_types, "Total Students": per_school_counts, "Total School Budget": per_school_budget, "Per Student Budget": per_school_capita, "Average Math Score": per_school_math, "Average Reading Score": per_school_reading, "% Passing Math": per_school_passing_math, "% Passing Reading": per_school_passing_reading, "% Overall Passing Rate": overall_passing_rate}) # Minor data munging per_school_summary = per_school_summary[["School Type", "Total Students", "Total School Budget", "Per Student Budget", "Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] per_school_summary["Total School Budget"] = per_school_summary["Total School Budget"].map("${:,.2f}".format) per_school_summary["Per Student Budget"] = per_school_summary["Per Student Budget"].map("${:,.2f}".format) # Display the data frame per_school_summary ``` ## Top Performing Schools (By Passing Rate) ``` # Sort and show top five schools top_schools = per_school_summary.sort_values(["% Overall Passing Rate"], ascending=False) top_schools.head(5) ``` ## Bottom Performing Schools (By Passing Rate) ``` # Sort and show bottom five schools bottom_schools = per_school_summary.sort_values(["% Overall Passing Rate"], ascending=True) bottom_schools.head(5) ``` ## Math Scores by Grade ``` # Create data series of scores by grade levels using conditionals ninth_graders = school_data_complete[(school_data_complete["grade"] == "9th")] tenth_graders = school_data_complete[(school_data_complete["grade"] == "10th")] eleventh_graders = school_data_complete[(school_data_complete["grade"] == "11th")] twelfth_graders = school_data_complete[(school_data_complete["grade"] == "12th")] # Group each by school name ninth_graders_scores = ninth_graders.groupby(["school_name"]).mean()["math_score"] tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["math_score"] eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["math_score"] twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["math_score"] # Combine series into single data frame scores_by_grade = pd.DataFrame({"9th": ninth_graders_scores, "10th": tenth_graders_scores, "11th": eleventh_graders_scores, "12th": twelfth_graders_scores}) # Minor data munging scores_by_grade = scores_by_grade[["9th", "10th", "11th", "12th"]] scores_by_grade.index.name = None # Display the data frame scores_by_grade ``` ## Reading Score by Grade ``` # Create data series of scores by grade levels using conditionals ninth_graders = school_data_complete[(school_data_complete["grade"] == "9th")] tenth_graders = school_data_complete[(school_data_complete["grade"] == "10th")] eleventh_graders = school_data_complete[(school_data_complete["grade"] == "11th")] twelfth_graders = school_data_complete[(school_data_complete["grade"] == "12th")] # Group each by school name ninth_graders_scores = ninth_graders.groupby(["school_name"]).mean()["reading_score"] tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["reading_score"] eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["reading_score"] twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["reading_score"] # Combine series into single data frame scores_by_grade = pd.DataFrame({"9th": ninth_graders_scores, "10th": tenth_graders_scores, "11th": eleventh_graders_scores, "12th": twelfth_graders_scores}) # Minor data munging scores_by_grade = scores_by_grade[["9th", "10th", "11th", "12th"]] scores_by_grade.index.name = None # Display the data frame scores_by_grade ``` ## Scores by School Spending ``` # Establish the bins spending_bins = [0, 585, 615, 645, 675] group_names = ["<$585", "$585-615", "$615-645", "$645-675"] # Categorize the spending based on the bins per_school_summary["Spending Ranges (Per Student)"] = pd.cut(per_school_capita, spending_bins, labels=group_names) spending_math_scores = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"] spending_reading_scores = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"] spending_passing_math = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"] spending_passing_reading = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"] overall_passing_rate = (spending_passing_math + spending_passing_reading) / 2 # Assemble into data frame spending_summary = pd.DataFrame({"Average Math Score" : spending_math_scores, "Average Reading Score": spending_reading_scores, "% Passing Math": spending_passing_math, "% Passing Reading": spending_passing_reading, "% Overall Passing Rate": overall_passing_rate}) # Minor data munging spending_summary = spending_summary[["Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] # Display results spending_summary ``` ## Scores by School Size ``` # Establish the bins size_bins = [0, 1000, 2000, 5000] group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"] # Categorize the spending based on the bins per_school_summary["School Size"] = pd.cut(per_school_summary["Total Students"], size_bins, labels=group_names) # Calculate the scores based on bins size_math_scores = per_school_summary.groupby(["School Size"]).mean()["Average Math Score"] size_reading_scores = per_school_summary.groupby(["School Size"]).mean()["Average Reading Score"] size_passing_math = per_school_summary.groupby(["School Size"]).mean()["% Passing Math"] size_passing_reading = per_school_summary.groupby(["School Size"]).mean()["% Passing Reading"] overall_passing_rate = (size_passing_math + size_passing_reading) / 2 # Assemble into data frame size_summary = pd.DataFrame({"Average Math Score" : size_math_scores, "Average Reading Score": size_reading_scores, "% Passing Math": size_passing_math, "% Passing Reading": size_passing_reading, "% Overall Passing Rate": overall_passing_rate}) # Minor data munging size_summary = size_summary[["Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] # Display results size_summary ``` ## Scores by School Type ``` # Type | Average Math Score | Average Reading Score | % Passing Math | % Passing Reading | % Overall Passing Rate type_math_scores = per_school_summary.groupby(["School Type"]).mean()["Average Math Score"] type_reading_scores = per_school_summary.groupby(["School Type"]).mean()["Average Reading Score"] type_passing_math = per_school_summary.groupby(["School Type"]).mean()["% Passing Math"] type_passing_reading = per_school_summary.groupby(["School Type"]).mean()["% Passing Reading"] overall_passing_rate = (type_passing_math + type_passing_reading) / 2 # Assemble into data frame type_summary = pd.DataFrame({"Average Math Score" : type_math_scores, "Average Reading Score": type_reading_scores, "% Passing Math": type_passing_math, "% Passing Reading": type_passing_reading, "% Overall Passing Rate": overall_passing_rate}) # Minor data munging type_summary = type_summary[["Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] # Display results type_summary ```
github_jupyter
### Combined-Data-PreProcessing 1. **Collecting the data -** data consists of budget text documents in the form of PDF files obtained from the following organizations: * [Guilford County](https://www.guilfordcountync.gov/our-county/budget-management-evaluation) * [Durham County](https://www.dconc.gov/government/departments-a-e/budget-management-services) * [City of Durham](https://durhamnc.gov/199/Budget-Management-Services) * [City of Charlotte](https://charlottenc.gov/budget/Pages/default.aspx) * [Mecklenburg County](https://www.mecknc.gov/CountyManagersOffice/OMB/Pages/Home.aspx) * [Wake County](http://www.wakegov.com/budget/Pages/default.aspx) * [City of Raleigh](https://www.raleighnc.gov/home/content/Departments/Articles/BudgetManagement.html) After the PDF files are collected, they are compressed to reduce the their sizes. Then, the files are tokenized, and converted into CSV files using an app developed by project mentor: **[Jason Jones](https://www.linkedin.com/in/jones-jason-adam/),** **click [here](https://jason-jones.shinyapps.io/Emotionizer/) for the App** 2. **Cleaning the data -** performing some popular text pre-processing techniques 3. **Organizing the data -** organizing the cleaned data into a way that is easy to input into other algorithms ``` import os import glob import nltk import pandas as pd import numpy as np # Change the current directory to read the data os.chdir(r"C:\Users\Sultan\Desktop\data\PreprocessedData") ``` ### 1- Obtaining the data #### Transforming the csv files into dataframes ``` FY13_df = pd.read_csv(r'DataFY13.csv', engine='python') FY14_df = pd.read_csv(r'DataFY14.csv', engine='python') FY15_df = pd.read_csv(r'DataFY15.csv', engine='python') FY16_df = pd.read_csv(r'DataFY16.csv', engine='python') FY17_df = pd.read_csv(r'DataFY17.csv', engine='python') FY18_df = pd.read_csv(r'DataFY18.csv', engine='python') FY19_df = pd.read_csv(r'DataFY19.csv', engine='python') FY20_df = pd.read_csv(r'DataFY20.csv', engine='python') # Combine all dataframes into a single dataframe using concat() function # Row lables are adjusted automaticlly by passing ignore_index=True df = pd.concat([FY13_df, FY14_df, FY15_df, FY16_df, FY17_df, FY18_df, FY19_df, FY20_df], ignore_index=True) df.head() ``` ### 2- Cleaning the Data ``` # listing columns in data frame list(df) ``` #### Dropping and reordering columns ``` # delete columns using the columns parameter of drop df = df.drop(columns="page_number") df.head() ``` #### Removing stop words ``` # Import stop words from nltk from nltk.corpus import stopwords # Define variable stop stop = stopwords.words('english') df['word'] = df['word'].apply(lambda w: " ".join(w for w in w.split() if w not in stop)) df.head(60) ``` #### Removing punctuations ``` df['word'] = df['word'].str.replace('[^\w\s]','') df.head(60) ``` #### Removing short words ``` df['word'] = df['word'].apply(lambda x: ' '.join([x for x in x.split() if len(x) > 3])) df.head(60) ``` #### Handling missing text data ``` # Replace any empty strings in the 'word' column with np.nan objects df['word'].replace('', np.nan, inplace=True) # Drop all NaN values df.dropna(subset=['word'], inplace=True) df.head(20) ``` #### Lowercasing ``` df['word'] = df['word'].str.lower() df.head() ``` ### 2- Organizing the Data We already created a corpus in an earlier step. The definition of a corpus is a collection of texts, and they are all put together neatly in a pandas dataframe here. ``` # Let's take a look at our dataframe df.head(60) ``` #### Dataframe to one single and clean csv file ``` # Change the dirctory for file to be exported to the proper folder os.chdir(r"C:\Users\Sultan\Desktop\data\PreprocessedData") # Export dataframe to csv df.to_csv(r"CombinedData.csv", index=False, encoding='utf-8-sig') ```
github_jupyter
# 可编程直流电供应器 62000H 操作与编程手册阅读笔记 ## 作用 - 作为一个直流电源 它可以模拟出太阳能电池板所产生的 I-V 曲线波形,在 Programming Mode 下还提供了编辑波形的途径。 > I-V Curve,也就是 I-V 曲线是太阳能电池板的一个重要参量,也叫做伏安特性曲线,可以反映出电源的性能好坏。 - 提供双回路电压控制 所谓双回路电压控制,即是在同一个电源中引出两条同等电压的线路,在其中一条线路故障时,另一条线路会立即启动,并投入运行。此设备还可以支持自定义在切换线路时,输出电压、电流的上升速率(Slew Rate)。 > 和双电源电路不同。双电源电路虽然也是引出两根线路,在出现问题时自动切换线路,但两路的电力来源是不同的。但这里的双回路控制不一定来自两个不同电源,甚至可能是来自同一个电源。 - 大功率 输出功率最大可以达到 15kW。 - 支持远程控制 可以通过 GPIB/Ethernet (局域网控制)、USB (通用串行总线接口)或者 APG 等方式进行设备的控制。 ## 操作 ### 操作面板 #### 前面板 前面板的通用部分有: - DISPLAY 显示屏 用于显示输入的数字型结果 - 0 - 9 数字键和 . 小数点键 用于输入通用的数字 - ENTER 键 用来确认之前输入或是滚轮滚出的的电压或电流结果。 - DEL 键 用来删除之前输入的数值。 - ROTARY 电流/电压旋钮 通过此二旋钮可以进行编程。 - 主电源开关 顾名思义。 关于电源电压的设定按钮: - VOLT 键 按下之后,才可以通过数字键盘或滚轮来输入设定的电压值。 - CURR 键 按下之后,才可以通过数字键盘或滚轮来输入设定的电流值。 设定波形的按钮: - PROG 键 按下之后,会被带到 Program Function Page 页面来进行波形的编辑。 其他按钮: - EXIT 键 按下之后会被带到上一层画面。在 Program Function Page 下的更改内容不会被储存。 - LOCK 键 在非锁定状态下,按下 LOCK 键会让设备进入 LOCK 模式。在 LOCK 模式下,所有按键和旋钮都将无效。 长按 LOCK 键退出 LOCK 模式。 - ON/OFF 键 按 ON/OFF 键在设备的开启和关闭之前切换。当设备处于 OFF 模式下时,不会对外输出电流或电压。 - CONF 键 按下以进入 Config Choose Page,在这个页面下可以对设备进行配置。 - SAVE 键 在 Program Function Page 页面下的更改内容必须通过按下 SAVE 键来手动保存。 状态灯: - SLAVE 机种灯号 用于指示当前设备的状态。 绿灯亮代表 POWER ON,黄灯亮代表资料传输中或是通讯处于正常状态,红灯亮代表产生了异常。 #### 后面板 后面板上主要有以下接口: - RS-232 / RS-485(1) 9 针 D 型接口,用于和远程计算机进行控制命令的传输。 - ANALOG INTERFACE 信号连接端口(2) 25 针接口,用于 APG 的输入、输出和系统状态传输。 - USB 连接端口(4) 远端控制器可以由此处连接到计算机以供操作。 - GPIB / Ethernet 端口(8) 连接到局域网以便用于远程操作。 - AC 电源连接端(9) 用交流电源给设备供电。 - 风扇通道出口(11) 散热使用,不可堵塞。 ### 安装 安装时需要进行的操作主要有: - 拆卸固定架(62020H-150S 无固定架) 如不拆除可能导致进风量不足,进而导致损坏设备。 - 接入电源 1. 拆下交流电源输入端口的安全壳。 2. 将电源线端 1cm 部分去皮裸露,使用 O 型端子压接。 (62020H-150S 机种裸露部分需要加锡。) 3. 使用十字螺丝刀将压接好的电源线和输入端口锁紧。 (建议的扭力在 30~40 kg-cm。) 4. 将安全壳外盖上的卡榫锁紧,并锁上安全外盖。 - 接地 任何情况下都应该将设备的金属外壳接地。 ### 连线方式 由于线电阻的损耗,设备输出的电压不一定为机器设定的电压。因此需要回测输出的电压值(使用 Remote Sense 端口来感知),并对这部分电压进行补偿(最多 4%)。 不可以把 Remote Sense 接反,否则会带来不正确的反馈值,可能导致炸机(x) #### 操作 - 接上电源并按下前面板的电源开关后,直流电源供应器就会开始自检并准备启动。 - 自检通过后会进入 MAIN PAGE,会显示默认的 0.0000V / 0.0000A 输出。 #### 设定电压、电流 - 方式 1 - 按下 VOLT 键 - 利用 0~9 和小数点键输入所需要设定的电压值 - 按 ENTER 键完成设定 - 方式 2 - 按下 VOLT 键 - 转动 VOLTAGE 旋钮,直到显示数字达到需要的值 - 按 ENTER 键完成设定 - 方式 3 - 按下 VOLT 键 - 利用上下左右方向键定位到不同的位数,转动旋钮来改变某一位的值 - 按 ENTER 键完成设定 设定电流的方式类似。只需要改为按 CURR 键,转动 CURRENT 旋钮即可。 #### 系统设定(SYSTEM SETUP) - 设定 APG(Analog Programming Interface) APG 可以执行下面的功能:利用类比信号来控制面板的设定值;或是利用类比信号来表示面板的测量值。 前者在系统设定中表示为 APG VSET/APG ISET(分别对应电压和电流) 后者在系统设定中表示为 APG VMEAS / APG IMEAS。 - 这四个参数可以按照以下方式进行编程: - NONE:表示不用编程功能 - Vref(0~5 V)表示使用外部电压源作为依据。范围为 0 到 5 V。 - Vref(0~10V)同上。只是范围更宽 - Iref(4~20mA)表示使用外部电流源作为依据。 - Rref(0~5kOhm)表示使用外部电阻值作为依据。 (注意:MEAS 值不可以使用 Rref 进行编程。) 编程的结果就是:机器的输出会以输入的编程值按比例缩放,在 0~600V 和 0~25A 之间按比例输出。 - 编程完成之后,按 ENTER 键确认后按 EXIT 键回到主屏幕。 #### 输出设定 - 设定 VOLTAGE LIMIT SETTING - V LIMIT 存在两个参数:MAX 和 MIN。在正确设定的情况下,输出的电压值会在 MAX 和 MIN 两值中波动。
github_jupyter
# Preprocessing for BraTS, NFBS, and COVIDx8B datasets #### Change file paths in this notebook to match your system! Preprocessing steps taken: BraTS and NFBS: Load images with SimpleITK -> z-score intensity normalization -> break into patches COVIDx8B: Clean up file names and unzip compressed images ``` import pandas as pd import numpy as np import SimpleITK as sitk import matplotlib.pyplot as plt from tqdm import trange import os import subprocess ``` ### Helper functions ``` # Create binary masks for BraTS dataset def binarize_brats(base_dir): def get_files(path): files_list = list() for root, _, files in os.walk(path, topdown = False): for name in files: files_list.append(os.path.join(root, name)) return files_list files_list = get_files(base_dir) for file in files_list: if 'seg' in file: binary_mask_name = file[0:-7] + '_binary.nii.gz' binarize_cmd = ['c3d', file, '-binarize', '-o', binary_mask_name] subprocess.call(binarize_cmd) ################# End of function ################# # Create a CSV with file paths for a dataset def get_paths_csv(base_dir, name_dict, output_csv): def get_files(path): files_list = list() for root, _, files in os.walk(path, topdown = False): for name in files: files_list.append(os.path.join(root, name)) return files_list cols = ['id'] + list(name_dict.keys()) df = pd.DataFrame(columns = cols) row_dict = dict.fromkeys(cols) ids = os.listdir(base_dir) for i in ids: row_dict['id'] = i path = os.path.join(base_dir, i) files = get_files(path) for file in files: for img_type in name_dict.keys(): for img_string in name_dict[img_type]: if img_string in file: row_dict[img_type] = file df = df.append(row_dict, ignore_index = True) df.to_csv(output_csv, index = False) ################# End of function ################# # Read a nifti file from a given path and return it as a 3D numpy array def ReadImagesSITK(images_list, dims): # Read image, normalize, and get numpy array def GetArray(path): arr = sitk.ReadImage(path) arr = sitk.Normalize(arr) arr = sitk.GetArrayFromImage(arr) return arr image = np.empty((*dims, len(images_list))) for i in range(len(images_list)): image[..., i] = GetArray(images_list[i]) return image # Read a segmentation mask from a given path and return one hot representation of mask def ReadMaskSITK(path, classes): num_classes = len(classes) mask = sitk.ReadImage(path) mask = sitk.GetArrayFromImage(mask) mask_onehot = np.empty((*mask.shape, num_classes)) for i in range(num_classes): mask_onehot[..., i] = mask == classes[i] return mask_onehot # Write slices of data from csv def write_slices(input_csv, image_dest, mask_dest, output_csv, image_dims): input_df = pd.read_csv(input_csv) num_pats = len(input_df) slice_thickness = 5 output_cols = ['id', 'image', 'mask'] output_df = pd.DataFrame(columns = output_cols) for i in trange(num_pats): # Get row of input dataframe current_pat = input_df.iloc[i].to_dict() # Read in images and masks images_list = list(current_pat.values())[2:len(current_pat)] img = ReadImagesSITK(images_list, dims = image_dims) mask_binary = ReadMaskSITK(current_pat['mask'], classes = [0, 1]) img_depth = image_dims[0] for k in range(img_depth - slice_thickness + 1): mask_binary_slice = mask_binary[k:(k + slice_thickness), ...] # Only take slices with foreground in them - this is for training only if np.sum(mask_binary_slice[..., 1]) > 25: # Get corresponding image slices img_slice = img[k:(k + slice_thickness), ...] # Name the slices and write them to disk slice_name = current_pat['id'] + '_' + str(k) + '.npy' img_slice_name = image_dest + slice_name mask_binary_slice_name = mask_dest + slice_name np.save(img_slice_name, img_slice) np.save(mask_binary_slice_name, mask_binary_slice) # Track slices with output dataframe output_df = output_df.append({'id': current_pat['id'], 'image': img_slice_name, 'mask': mask_binary_slice_name}, ignore_index = True) # Save dataframe to .csv and use the .csv for training the BraTS model output_df.to_csv(output_csv, index = False) ############## END OF FUNCTION ############## ``` ### Create file path CSVs for BraTS and NFBS datasets ``` ################# Create binary masks for BraTS ################# # Change this to the appropriate folder on your system brats_base_dir = '/rsrch1/ip/aecelaya/data/brats_2020/raw/train/' binarize_brats(brats_base_dir) ################# Create CSV with BraTS file paths ################# brats_names_dict = {'mask': ['seg_binary.nii.gz'], 't1': ['t1.nii.gz'], 't2': ['t2.nii.gz'], 'tc': ['t1ce.nii.gz'], 'fl': ['flair.nii.gz']} brats_output_csv = 'brats_paths.csv' get_paths_csv(brats_base_dir, brats_names_dict, brats_output_csv) ################# Create CSV with NFBS file paths ################# nfbs_names_dict = {'mask': ['brainmask.nii.gz'], 't1': ['T1w.nii.gz']} # Change this to the appropriate folder on your system nfbs_base_dir = '/rsrch1/ip/aecelaya/data/nfbs/raw/' nfbs_output_csv = 'nfbs_paths.csv' get_paths_csv(nfbs_base_dir, nfbs_names_dict, nfbs_output_csv) ``` ### Preprocess BraTS and NFBS and write slices to disk ``` ################# Preprocess and write BraTS slices to disk ################# brats_input_csv = 'brats_paths.csv' # Change these to the appropriate folder on your system brats_image_dest = '/rsrch1/ip/aecelaya/github/NecrosisRecurrence/pocketnet/brats/test/images/' brats_mask_dest = '/rsrch1/ip/aecelaya/github/NecrosisRecurrence/pocketnet/brats/test/masks/' brats_output_csv = 'brats_slices_paths.csv' write_slices(brats_input_csv, brats_image_dest, brats_mask_dest, brats_output_csv, image_dims = (155, 240, 240)) ################# Preprocess and write NFBS slices to disk ################# nfbs_input_csv = 'nfbs_paths.csv' # Change these to the appropriate folder on your system nfbs_image_dest = '/rsrch1/ip/aecelaya/github/NecrosisRecurrence/pocketnet/brats/test2/images/' nfbs_mask_dest = '/rsrch1/ip/aecelaya/github/NecrosisRecurrence/pocketnet/brats/test2/masks/' nfbs_output_csv = 'nfbs_slices_paths.csv' write_slices(nfbs_input_csv, nfbs_image_dest, nfbs_mask_dest, nfbs_output_csv, image_dims = (192, 256, 256)) ``` ### Clean up file names for COVIDx8B ``` ''' Clean up the COVIDx dataset. There are a few glitches in it. This script corrects them. 1) Some files in the COVIDx training set are compressed (i.e., end with .gz). Keras can't read zipped files with its native image data generators. This script goes through each file and checks to see if its compressed and unzips it if it is. 2) The original train.csv file that comes with the COVIDx dataset has incorrect file names for rows 725 - 1667. These rows only contian numbers and not the name of an image. For example, row 725 has the entry 1 but it should be COVID1.png. Before running this, please change the file paths in this code to match your system. ''' def get_files(dir_name): list_of_files = list() for (dirpath, dirnames, filenames) in os.walk(dir_name): list_of_files += [os.path.join(dirpath, file) for file in filenames] return list_of_files files_list = get_files('/rsrch1/ip/aecelaya/data/covidx/processed/train') for file in files_list: if '(' in file: new_file = file.replace('(', '') new_file = new_file.replace(')', '') print('Renaming ' + file + ' to ' + new_file) os.rename(file, new_file) file = new_file if '.gz' in file: # Unzip files with gunzip print('Unzipping ' + file) subprocess.call('gunzip ' + file, shell = True) train_df = pd.read_csv('/rsrch1/ip/aecelaya/data/covidx/raw/data/train.csv') for i in range(724, 1667): number = train_df.iloc[i]['image'] train_df.at[i, 'image'] = 'COVID' + number + '.png' for i in range(len(train_df)): file = '/rsrch1/ip/aecelaya/data/covidx/train/' + train_df.iloc[i]['image'] if not os.path.isfile(file): print('Does not exist: ' + file + ', row = ' + str(i)) train_df.to_csv('covidx_train_clean.csv', index = False) ```
github_jupyter
# Koster data to excel The following scripts are set up to retrieve the annotations, comments and tags from the Koster seafloor observatory and translate them to excel-friendly format. # Requirements ### Install required packages We use the "panoptes_client" package to communicate with Zooniverse. If you don't have it installed, run the command below. ``` !pip install panoptes_client ``` ### Load required libraries ``` import io import zipfile import json import gzip import getpass import pandas as pd import numpy as np from google.colab import drive from datetime import date from panoptes_client import ( SubjectSet, Subject, Project, Panoptes, ) ``` ### Connect to Zooniverse ``` # Your user name and password for Zooniverse. zoo_user = getpass.getpass('Enter your Zooniverse user') zoo_pass = getpass.getpass('Enter your Zooniverse password') # Connect to Zooniverse with your username and password auth = Panoptes.connect(username=zoo_user, password=zoo_pass) if not auth.logged_in: raise AuthenticationError("Your credentials are invalid. Please try again.") # Connect to the Zooniverse project (our project # is 9747) project = Project(9747) ``` # Download Zooniverse subjects information ``` # Get info of subjects uploaded to the project export = project.get_export("subjects") # Save the subjects info as pandas data frame subjects_df = pd.read_csv( io.StringIO(export.content.decode("utf-8")), usecols=[ "subject_id", "metadata", "created_at", "workflow_id", "subject_set_id", "classifications_count", "retired_at", "retirement_reason", ], ) ``` ## Format subject information ### Define project-specific functions Function to extract the metadata from subjects ``` def extract_metadata(subj_df): # Reset index of df subj_df = subj_df.reset_index(drop=True).reset_index() # Flatten the metadata information meta_df = pd.json_normalize(subj_df.metadata.apply(json.loads)) # Drop metadata and index columns from original df subj_df = subj_df.drop(columns=["metadata", "index",]) return subj_df, meta_df ``` ### Format subjects uploaded automatically ``` # Specify the date when we first started uploading subjects automatically first_auto_upload = "2020-05-29 00:00:00 UTC" # Select automatically uploaded frames auto_subjects_df = subjects_df[subjects_df["created_at"] > first_auto_upload] # Extract metadata from automatically uploaded frames auto_subjects_df, auto_subjects_meta = extract_metadata(auto_subjects_df) # Combine metadata info with the subjects df auto_subjects_df = pd.concat([auto_subjects_df, auto_subjects_meta], axis=1) # Select only relevant columns auto_subjects_df = auto_subjects_df[ ["subject_id", "retired_at", "subject_type"] ] ``` ### Format subjects uploaded manually ``` # Specify the starting date when clips were manually uploaded first_manual_upload = "2019-11-17 00:00:00 UTC" # Select subjects uploaded manually man_clips_df = ( subjects_df[ (subjects_df["metadata"].str.contains(".mp4")) & ( subjects_df["created_at"].between( first_manual_upload, first_auto_upload ) ) ] .reset_index(drop=True) .reset_index() ) # Specify the type of subject man_clips_df["subject_type"] = "clip" # Extract metadata from manually uploaded clips man_clips_df, man_clips_meta = extract_metadata(man_clips_df) # Combine metadata info with the subjects df man_clips_df = pd.concat([man_clips_df, man_clips_meta], axis=1) # Select only relevant columns man_clips_df = man_clips_df[ ["subject_id", "retired_at", "subject_type"] ] # Combine all uploaded subjects subjects = pd.merge(man_clips_df, auto_subjects_df, how="outer") ``` # Explore workflow numbers and names ``` # Get workflow information from Zooniverse w_export = project.get_export("workflows") # Save the response as pandas data frame workflow_export = pd.read_csv( io.StringIO(w_export.content.decode("utf-8")), usecols=[ "workflow_id", "display_name", "version", "tasks", ], ) # Create a table of the different workflows and the most recent version of each workflow workflow_export.groupby(["workflow_id", "display_name"])["version"].max() ``` # Download Zooniverse classifications information ``` # Get classifications from Zooniverse export = project.get_export("classifications") # Save the response as pandas data frame class_df = pd.read_csv( io.StringIO(export.content.decode("utf-8")), usecols=[ "subject_ids", "classification_id", "workflow_id", "workflow_version", "annotations", "created_at", "user_name", ], ) ``` ## Specify the video and frame workflows ``` workflow_clip = 11767 workflow_clip_version = 227 workflow_frame = 12852 workflow_frame_version = 21.85 #Should this be 21.43? ``` ### Format video annotations ``` # Filter clip classifications class_clip = class_df[ (class_df.workflow_id >= workflow_clip) & (class_df.workflow_version >= workflow_clip_version) ].reset_index() # Create an empty list rows_list = [] # Loop through each classification submitted by the users for index, row in class_clip.iterrows(): # Load annotations as json format annotations = json.loads(row["annotations"]) # Select the information from the species identification task for ann_i in annotations: if ann_i["task"] == "T4": # Select each species annotated and flatten the relevant answers for value_i in ann_i["value"]: choice_i = {} # If choice = 'nothing here', set follow-up answers to blank if value_i["choice"] == "NOTHINGHERE": f_time = "" inds = "" # If choice = species, flatten follow-up answers else: answers = value_i["answers"] for k in answers.keys(): if "FIRSTTIME" in k: f_time = answers[k].replace("S", "") if "INDIVIDUAL" in k: inds = answers[k] # Save the species of choice, class and subject id choice_i.update( { "classification_id": row["classification_id"], "label": value_i["choice"], "first_seen": f_time, "how_many": inds, } ) rows_list.append(choice_i) # Create a data frame with annotations as rows class_clips_df = pd.DataFrame( rows_list, columns=["classification_id", "label", "first_seen", "how_many"] ) # Specify the type of columns of the df class_clips_df["how_many"] = pd.to_numeric(class_clips_df["how_many"]) class_clips_df["first_seen"] = pd.to_numeric(class_clips_df["first_seen"]) # Add subject id to each annotation class_clips_df = pd.merge( class_clips_df, class_clip.drop(columns=["annotations"]), how="left", on="classification_id", ) ``` ## Format frame annotations ``` # Filter frame classifications class_frame = class_df[ (class_df.workflow_id >= workflow_frame) & (class_df.workflow_version >= workflow_frame_version) ].reset_index() # Create an empty list rows_list = [] # Loop through each classification submitted by the users for index, row in class_frame.iterrows(): # Load annotations as json format annotations = json.loads(row["annotations"]) # Select the information from each annotation for ann_i in annotations: choice_i = {} if not ann_i["value"]: # Save the annotation and class id choice_i.update( { "classification_id": row["classification_id"], "label": "no_coral", } ) else: # Save the annotation and class id choice_i.update( { "classification_id": row["classification_id"], "label": "coral", } ) rows_list.append(choice_i) # Create a data frame with annotations as rows class_frame_df = pd.DataFrame( rows_list, columns=["classification_id", "label"] ) # Add subject id to each annotation class_frame_df = pd.merge( class_frame_df, class_frame.drop(columns=["annotations"]), how="left", on="classification_id", ) ``` ## Combine classifications and subject information ``` # Combine video and frame classifications annot_df = pd.merge(class_clips_df, class_frame_df, how="outer") # Drop workflow and n_users columns annot_df = annot_df.drop(columns=["workflow_id", "workflow_version"]) # Rename the subject_id field annot_df = annot_df.rename( columns={"subject_ids": "subject_id"} ) # Add the subject information annot_df = pd.merge( annot_df, subjects, how="left", on="subject_id", ) # Select classifications with subject type information annot_df = annot_df[annot_df.subject_type.notnull()] ``` ## Save classifications as csv file ``` annot_df.to_csv('annotations_data.csv') ``` # Download Zooniverse comments ``` # Get comments from Zooniverse export = project.get_export('talk_comments') export = gzip.decompress(export.content) # Save the response as pandas data frame data = json.loads(export.decode('utf-8')[export.decode('utf-8').find('['):export.decode('utf-8').rfind(']')+1]) comment_df = pd.DataFrame(data)[[ "board_title", "comment_body", "comment_focus_id", "comment_id", "discussion_title", "comment_created_at", "comment_user_login", ]] ``` ## Combine comments and subject Information ``` # Rename the subject_id field comment_df = comment_df.rename( columns={"comment_focus_id": "subject_id"} ) # Add the subject information comment_df = pd.merge( comment_df, subjects, how="left", on="subject_id", ) # Remove comments from the Zooniverse team (i.e. non-user comments) comment_df = comment_df.dropna(subset=['subject_id']) ``` # Download Zooniverse tags ``` # Get comments from Zooniverse export = project.get_export('talk_tags') export = gzip.decompress(export.content) # Save the response as pandas data frame data = json.loads(export.decode('utf-8')[export.decode('utf-8').find('['):export.decode('utf-8').rfind(']')+1]) tag_df = pd.DataFrame(data)[["name", "comment_id"]] ``` ## Combine tags and comments information ``` # Add the comments information comment_df = pd.merge( comment_df, tag_df, how="left", on="comment_id", ) ``` ## Save comments as csv file ``` comment_df.to_csv('comments_data.csv') ``` Find out the period when the clip and frame workflows were active ``` # Filter only for subjects that are frames annot_frames = annot_df[(annot_df.subject_type == "frame")] # Select the first frame annotation first_day = annot_frames['created_at'].min() # Date when the last frame was retired last_day = annot_frames['retired_at'].max() # May 16-19 classifications (old subject set) class_df[(class_df.created_at < '2020-05-20') & (class_df.workflow_id >= 12852) & (class_df.workflow_version >= 21.43)]['created_at'].min() class_df[(class_df.created_at < '2020-05-20') & (class_df.workflow_id >= 12852) & (class_df.workflow_version >= 21.43)]['created_at'].max() # New subject set first_day last_day # END ```
github_jupyter
## Smithsonian OpenAccess Collection Data API Let's use requests to scrape some data from an API endpoint. In this case, we can use the Smithsonian's [Open Access API](https://edan.si.edu/openaccess/apidocs/#api-_), which is a REST API that responds to HTTP requests. See the documentation at [https://edan.si.edu/openaccess/apidocs/#api-_footer](https://edan.si.edu/openaccess/apidocs/#api-_footer) The documentation for requests can be found here: http://docs.python-requests.org/en/master/ The endpoint for the search query of the "content" API, which provides information for individual items is `https://api.si.edu/openaccess/api/v1.0/content/:id`. To use the Smithsonian APIs, you will need an API key from the data.gov API key generator. Register with [https://api.data.gov/signup/](https://api.data.gov/signup/) to get a key. ``` import requests statsEndpoint = 'https://api.si.edu/openaccess/api/v1.0/stats' API_Key = 'S26CqhCprwb819ULBJQG62Le5ySrxuCV5L3Ktiov' ``` The content API fetches metadata about objects in the Smithsonian's collections using the ID or URL of the object. For example, in this case to get information about an album in the Folkways Records Collection, we will use the id `edanmdm:siris_arc_231998`. To pass in the parameters, we can use a dictionary! Let's try using `params` ``` key = { 'api_key': API_Key } ``` First, let's try a basic call to the stats API, to see if things are working: ``` r = requests.get(statsEndpoint, params = key) print('You requested:',r.url) print('HTTP server response code:',r.status_code) print('HTTP response headers',r.headers) # notice that the headers method returns a dictionary, too? # We could ask what sort of content it's returning: print('\nYour request has this content type:\n',r.headers['content-type']) ``` So the request has returned a json object! Access the response using the `.text` method. ``` r.text[:500] type(r.text) ``` #### API Call question We want to make a request to the Smithsonian API. Can you fill in the following & explain the missing elements? ``` https://api.si.edu/openaccess/api/v1.0/content/:_____ ``` What other items might you use after the `?`... ## Object information Now, let's try using the "content" API to get information about individual objects: ``` contentEndpoint = 'https://api.si.edu/openaccess/api/v1.0/content/' object_id = 'edanmdm:siris_arc_231998' # Smithsonian Folkways Music of Hungary parameters = { 'api_key' : API_Key } requestURL = contentEndpoint + object_id r = requests.get(requestURL, params = parameters) print('You requested:',r.url) print('HTTP server response code:',r.status_code) print('HTTP response headers',r.headers) # notice that the headers method returns a dictionary, too? # We could ask what sort of content it's returning: print('\nYour request has this content type:\n',r.headers['content-type']) ``` Take a look at the response information: ``` r.text[:500] ``` Use the built-in `.json()` decoder in requests ``` object_json = r.json() for element in object_json['response']: print(element) object = object_json['response'] for k, v in object.items(): print(k,':',v) ``` #### Resources * [Real Python working with JSON data](https://realpython.com/python-json/) * [Python json module documentation](https://docs.python.org/3/library/json.html) ### Parsing the Data from the API using json module Now, we can get the response, let's save to a file. To do this, use the `json` module. ``` import json data = json.loads(r.text) # what are the keys? for element in data: print(element) for key, val in data['response'].items(): print(key,':',val) print(len(data['response'])) object_id = data['response']['id'] print(object_id) ``` Compare to the online display. See https://collections.si.edu/search/detail/edanmdm:siris_arc_231998 Is it possible to extract each result into its own file? ``` # block testing an extaction of each result into a separate file data = json.loads(r.text) #grab the images into a list objectInfo = data['response'] print(len(objectInfo)) ## this is from Python 105a, TODO update fname = 'kitten-result-' format = '.json' n = 0 for item in kittensList: n = n + 1 file = fname + str(n) + format # print(item) with open(file, 'w') as f: f.write(json.dumps(item))#, f, encoding='utf-8', sort_keys=True) print('wrote',file) print('wrote',n,'files!') ``` How could we extract the image URLs? ``` for key in objectInfo['content']: print(key) for info in objectInfo['content']['indexedStructured']: print(info) # doesn't seem to be a image url list ... ``` --- This section explores using a different object to uncover other properties. Namely, Alexander Graham Bell's 1885 Mary Had a Little Lamb recording done at Volta Labs (`edanmdm:nmah_852778`). ``` object_id = 'edanmdm:nmah_852778' # Alexander Graham Bell's 1885 Mary Had a Little Lamb from Volta Labs parameters request_URL = contentEndpoint + object_id r = requests.get(request_URL, params=parameters) print(r.url, '\n', r.status_code, '\n', r.headers) for element in r.json(): print(element) for element in r.json()['response']: print(element) object_info = json.loads(r.text) print(json.dumps(object_info, indent=2)) object_url = 'https://collections.si.edu/search/detail/edanmdm:nmah_852778' # see the following for information on workign with the Image Delivery Service # https://sirismm.si.edu/siris/ImageDisplay.htm # possible to find slideshows? # eg https://edan.si.edu/slideshow/viewer/?damspath=/Public_Sets/NMAH/NMAH-AC/AC0300/S01 ```
github_jupyter
``` # %matplotlib widget from util import get_path import pandas as pd import networkx as nx import numpy as np import matplotlib.pyplot as plt from extract_graph import generate_nx_graph, transform_list, generate_skeleton, generate_nx_graph_from_skeleton, from_connection_tab from node_id import whole_movement_identification, second_identification import ast from plotutil import plot_t_tp1, compress_skeleton from scipy import sparse from sparse_util import dilate, zhangSuen from realign import realign from datetime import datetime,timedelta import cv2 import imageio import scipy.io as sio plate = 13 date_init = datetime(2020,7,1,19,57) dates_datetime = [date_init+timedelta(hours=4)*i for i in range(24)] dates = [f'{0 if date.month<10 else ""}{date.month}{0 if date.day<10 else ""}{date.day}_{0 if date.hour<10 else ""}{date.hour}{0 if date.minute<10 else ""}{date.minute}' for date in dates_datetime] tabs_labeled=[] for date in dates: tabs_labeled.append(pd.read_csv(get_path(date,plate,True,extension='_full_labeled.csv'), converters={'origin_pos' : transform_list,'end_pos' : transform_list,'pixel_list' : ast.literal_eval})) from_tip_growth_pattern=[] for date in dates[:-1]: from_tip_growth_pattern.append(from_connection_tab(pd.read_csv(get_path(date,plate,True,extension='_connection.csv')))) tabs=[] for date in dates: tabs.append(pd.read_csv(get_path(date,plate,True,extension='_full_labeled_matlab.csv'), converters={'origin_pos' : transform_list,'end_pos' : transform_list,'pixel_list' : ast.literal_eval})) tabs_raw=[] for date in dates: tabs_raw.append(pd.read_csv(get_path(date,plate,True,extension='_raw_aligned_skeleton.csv'), converters={'origin_pos' : transform_list,'end_pos' : transform_list,'pixel_list' : ast.literal_eval})) for i, date in enumerate(dates): tabs_labeled[i].to_csv(f'Data/graph_{date}_{plate}_full_labeled.csv') tabs[i].to_csv(f'Data/graph_{date}_{plate}_full_labeled_matlab.csv') tabs_raw[i].to_csv(f'Data/graph_{date}_{plate}_raw_aligned_skeleton.csv') sio.savemat(f'Data/graph_{date}_{plate}_full_labeled.mat', {name: col.values for name, col in tabs_labeled[i].items()}) from_tip_growth_pattern_tab=[] for date in dates[:-1]: from_tip_growth_pattern_tab.append(pd.read_csv(get_path(date,plate,True,extension='_connection.csv'))) for i, date in enumerate(dates[:-1]): from_tip_growth_pattern_tab[i].to_csv(f'Data/connection_{date}_{plate}.csv') # from_tip_growth_pattern=[] # for i in range(len(from_tip_growth_pattern_tab)): # from_tip_growth_pattern.append(from_connection_tab(from_tip_growth_pattern_tab[i])) tabs_labeled=[] for date in dates: tabs_labeled.append(pd.read_csv(f'Data/graph_{date}_{plate}_full_labeled.csv', converters={'origin_pos' : transform_list,'end_pos' : transform_list,'pixel_list' : ast.literal_eval})) nx_graphs=[] poss=[] for tab in tabs_labeled: nx_graph,pos=generate_nx_graph(tab,labeled=True) nx_graphs.append(nx_graph) poss.append(pos) nx_graph_clean=[] for graph in nx_graphs: S = [graph.subgraph(c).copy() for c in nx.connected_components(graph)] len_connected=[len(nx_graph.nodes) for nx_graph in S] nx_graph_clean.append(S[np.argmax(len_connected)]) skeletons=[] for nx_graph in nx_graph_clean: skeletons.append(generate_skeleton(nx_graph,dim=(20800, 46000))) factor = 5 final_pictures = [compress_skeleton(skeletons[i],factor) for i in range(len(skeletons))] connections = [c[0] for c in from_tip_growth_pattern] growth_patterns = [c[1] for c in from_tip_growth_pattern] growths = [{tip : sum([len(branch) for branch in growth_pattern[tip]]) for tip in growth_pattern.keys()} for growth_pattern in growth_patterns] def pinpoint_anastomosis(nx_graph_tm1,nx_grapht,from_tip): anastomosis=[] origins=[] tips = [node for node in nx_graph_tm1.nodes if nx_graph_tm1.degree(node)==1] def count_neighbors_is_from_root(equ_list,nx_graph,root): count=0 for neighbor in nx_graph.neighbors(root): if neighbor in equ_list: count+=1 return(count) for tip in tips: # print(tip) consequence = from_tip[tip] for node in consequence: if node in nx_grapht.nodes and nx_grapht.degree(node)>=3 and count_neighbors_is_from_root(consequence,nx_grapht,node)<2: # if node==2753: # print(count_neighbors_is_from_root(consequence,nx_grapht,node)) # print(list(nx_grapht.neighbors(node))) anastomosis.append(node) origins.append(tip) return(anastomosis,origins) def find_origin_tip(node,from_tip): for tip in from_tip.keys(): if node in from_tip[tip]: return(tip) anastomosiss=[pinpoint_anastomosis(nx_graph_clean[i],nx_graph_clean[i+1], connections[i])[0] for i in range (len(dates)-1)] origins=[pinpoint_anastomosis(nx_graph_clean[i],nx_graph_clean[i+1], connections[i])[1] for i in range (len(dates)-1)] growing_tips=[[node for node in growths[i].keys() if growths[i][node]>=20] for i in range(len(growths))] degree3_nodes = [[node for node in nx_graph.nodes if nx_graph.degree(node)>=3] for nx_graph in nx_graph_clean] plot_t_tp1(degree3_nodes[t],degree3_nodes[tp1],poss[t],poss[tp1],final_pictures[t],final_pictures[tp1],compress=5) t=1 tp1=t+1 plot_t_tp1(origins[t],anastomosiss[t],poss[t],poss[tp1],final_pictures[t],final_pictures[tp1],compress=5,) plot_t_tp1(growing_tips[t],growing_tips[t],poss[t],poss[tp1],final_pictures[t],final_pictures[tp1],compress=5,) t=3 tp1=t+1 plot_t_tp1([2180],[2180],poss[t],poss[tp1],final_pictures[t],final_pictures[tp1],compress=5,) plot_t_tp1(degree3_nodes[t],degree3_nodes[tp1],poss[t],poss[tp1],final_pictures[t],final_pictures[tp1],compress=5) def make_growth_picture_per_tip(pixels_from_tip,pos,shape=(20700,45600),factor=10,max_growth=200,min_growth=10,per_tip=True): final_picture = np.zeros(shape=(shape[0]//factor,shape[1]//factor)) number_tips = np.zeros(shape=(shape[0]//factor,shape[1]//factor)) for tip in pixels_from_tip.keys(): growth=pixels_from_tip[tip] x=min(round(pos[tip][0]/factor),shape[0]//factor-1) y=min(round(pos[tip][1]/factor),shape[1]//factor-1) if growth<=max_growth: # print(number_tips) if growth>=min_growth: number_tips[x,y]+=1 final_picture[x,y]+=growth # print(growth,beginx,endx) # for x in range(shape[0]//factor): # if x%1==0: # print(x/2070) # for y in range(shape[1]//factor): # beginx = x*factor # endx=(x+1)*factor # beginy = y*factor # endy=(y+1)*factor # tips_in_frame = [tip for tip in pixels_from_tip.keys() if (beginx<pos[tip][0]<endx) and (beginy<pos[tip][1]<endy)] # #shouls be improved, len is not a good indicator of actual length... # growth_in_frame = [len(pixels_from_tip[tip]) for tip in tips_in_frame] # final_picture[x,y]=np.mean(growth_in_frame) if per_tip: return(final_picture/(number_tips+(number_tips==0).astype(np.int)),number_tips) else: return(final_picture,number_tips) final_pictures_growth = [np.log(make_growth_picture_per_tip(growths[i],poss[i],factor=500,max_growth=4000,per_tip=True,min_growth=0)[0]+1) for i in range (len(growths))] images = [] for i,picture in enumerate(final_pictures_growth): fig = plt.figure(figsize=(14,12)) ax = fig.add_subplot(111) ax.imshow(picture) bbox_time = dict(boxstyle="square", fc="black") ax.text(0.90, 0.90, f'{4*i}h', horizontalalignment='right', verticalalignment='bottom', transform=ax.transAxes,color='white',size=10*1.5,bbox=bbox_time) plt.savefig(f'Data/video_test/growth_timestep_{i}.png') plt.close(fig) images.append(imageio.imread(f'Data/video_test/growth_timestep_{i}.png')) imageio.mimsave('Data/video_test/movie_growth.gif', images,duration=1) paths=[] i=5 for node in origins[i]: node_interest=node pos_problem=poss[i][node_interest] xbegin=pos_problem[0]-500 ybegin=pos_problem[1]-500 xend=pos_problem[0]+500 yend=pos_problem[1]+500 kernel = np.ones((5,5),np.uint8) skeleton_small1=skeletons[i][xbegin:xend,ybegin:yend] skeleton_small1=cv2.dilate(skeleton_small1.todense().astype(np.uint8),kernel,iterations = 1) skeleton_small2=skeletons[i+1][xbegin:xend,ybegin:yend] skeleton_small2=cv2.dilate(skeleton_small2.todense().astype(np.uint8),kernel,iterations = 1) path = f'Data/video_test/network_timestep_{i}_{node}' pipeline.paths.append(path) plot_t_tp1(origins[i],anastomosiss[i],poss[i],poss[i+1],skeleton_small1,skeleton_small2, relabel_tp1=lambda node : find_origin_tip(node,connections[i]), shift=(xbegin,ybegin), save=path,time=f't={4*i}h') images = [] for path in paths: images.append(imageio.imread(path+'.png')) imageio.mimsave(f'Data/video_test/{plate}_anastomosi_movie{i}.gif', images,duration=2) node_interest=60 pos_problem=poss[0][node_interest] xbegin=pos_problem[0]-1500 ybegin=pos_problem[1]-1500 xend=pos_problem[0]+1500 yend=pos_problem[1]+1500 skeletons_small=[] for skeleton in skeletons: skeletons_small.append(skeleton[xbegin:xend,ybegin:yend]) node_smalls=[] for i,nx_graph in enumerate(nx_graph_clean): node_smalls.append([node for node in nx_graph.nodes if (xbegin<poss[i][node][0]<xend and ybegin<poss[i][node][1]<yend and nx_graph.degree(node)>=1)]) kernel = np.ones((5,5),np.uint8) skeletons_small_dilated=[cv2.dilate(skeleton.todense().astype(np.uint8),kernel,iterations = 1) for skeleton in skeletons_small] for tp1 in range(len(growths)): plot_t_tp1(node_smalls[tp1],node_smalls[tp1],poss[tp1],poss[tp1],skeletons_small_dilated[tp1],skeletons_small_dilated[tp1],shift=(xbegin,ybegin), save=f'Data/video_test/network_timestep_{tp1}',time=f't={4*tp1}h') images = [] for t in range(len(growths)): images.append(imageio.imread(f'Data/video_test/network_timestep_{t}.png')) imageio.mimsave(f'Data/video_test/{node_interest}movie.gif', images,duration=1) node_interest=60 pos_problem=[poss[i][node_interest] for i in range(len(poss))] xbegin=[pos_problem[i][0]-1500 for i in range(len(poss))] ybegin=[pos_problem[i][1]-1500 for i in range(len(poss))] xend=[pos_problem[i][0]+1500 for i in range(len(poss))] yend=[pos_problem[i][1]+1500 for i in range(len(poss))] skeletons_small=[] for i,skeleton in enumerate(skeletons): skeletons_small.append(skeleton[xbegin[i]:xend[i],ybegin[i]:yend[i]]) node_smalls=[] for i,nx_graph in enumerate(nx_graph_clean): node_smalls.append([node for node in nx_graph.nodes if (xbegin[i]<poss[i][node][0]<xend[i] and ybegin[i]<poss[i][node][1]<yend[i] and nx_graph.degree(node)>=1)]) kernel = np.ones((5,5),np.uint8) skeletons_small_dilated=[cv2.dilate(skeleton.todense().astype(np.uint8),kernel,iterations = 1) for skeleton in skeletons_small] for tp1 in range(len(growths)): plot_t_tp1(node_smalls[tp1],node_smalls[tp1],poss[tp1],poss[tp1],skeletons_small_dilated[tp1],skeletons_small_dilated[tp1],shift=(xbegin[tp1],ybegin[tp1]),save=f'Data/video_test/network_timestep_{tp1}',time=f't={4*tp1}h') images = [] for t in range(len(growths)): images.append(imageio.imread(f'Data/video_test/network_timestep_{t}.png')) imageio.mimsave(f'Data/video_test/{node_interest}movie_track.gif', images,duration=1) def plot_t_tp1(node_list_t,node_list_tp1,pos_t,pos_tp1,imt,imtp1,relabel_t=lambda x:x,relabel_tp1=lambda x:x, shift=(0,0),compress=1,save='',time=None): left, width = .25, .5 bottom, height = .25, .5 right = 0.90 top = 0.90 if len(save)>=1: fig=plt.figure(figsize=(14,12)) size = 10 else: fig = plt.figure() size = 5 ax = fig.add_subplot(111) ax.imshow(imtp1, cmap='gray',interpolation='none') ax.imshow(imt, cmap='jet', alpha=0.5,interpolation='none') bbox_time = dict(boxstyle="square", fc="black") bbox_props1 = dict(boxstyle="circle", fc="grey") bbox_props2 = dict(boxstyle="circle", fc="white") for node in node_list_t: t = ax.text((pos_t[node][1]-shift[1])//compress, (pos_t[node][0]-shift[0])//compress, str(relabel_t(node)), ha="center", va="center", size=size, bbox=bbox_props1) for node in node_list_tp1: if node in pos_tp1.keys(): t = ax.text((pos_tp1[node][1]-shift[1])//compress, (pos_tp1[node][0]-shift[0])//compress, str(relabel_tp1(node)), ha="center", va="center", size=size, bbox=bbox_props2) ax.text(right, top, time, horizontalalignment='right', verticalalignment='bottom', transform=ax.transAxes,color='white',size=size*1.5,bbox=bbox_time) if len(save)>=1: plt.savefig(save) plt.close(fig) else: plt.show() growths = [[np.log(len(growth)+1) for growth in growth_pat.values() if len(growth)+1>=10] for growth_pat in growth_pattern] fig=plt.figure() ax = fig.add_subplot(111) ax.hist(growths,10) ```
github_jupyter
``` import dask from dask.distributed import Client import dask_jobqueue import discretize from discretize.utils import mkvc # import deepdish as dd import h5py import json import matplotlib.pyplot as plt from matplotlib import cm as cmap from matplotlib.colors import LogNorm, Normalize import numpy as np import os import pandas as pd import scipy.sparse as sp import xarray as xr import zarr import casingSimulations as casing_sim from SimPEG import maps from SimPEG.electromagnetics import time_domain as tdem from pymatsolver import Pardiso np.random.seed(29) directory = "test" if not os.path.isdir(directory): os.makedirs(directory, exist_ok=True) from matplotlib import rcParams rcParams["font.size"] = 16 nsamples = 2 # set bounds for the distributions of sigma_background_bounds = np.r_[1e-4, 1] sigma_casing_bounds = np.r_[1e4, 1e7] d_casing_bounds = np.r_[5e-2, 30e-2] t_casing_bounds = np.r_[0.5e-2, 2e-2] l_casing_bounds = np.r_[20, 4e3] # constants sigma_air = 1e-4 sigma_inside = 1 # fluid inside the casing mur_casing = 1 # permeability is the same as free space src_a = np.r_[0., 0., 0.] # the radius will be updated to connect to the casing src_b = np.r_[1000., 0, 0] csz = 2.5 # cell-size in the z-direction hy = np.ones(12) hy = hy*2*np.pi / hy.sum() # areas to compare data z_compare = np.linspace(-100, 0, 128) def generate_random_variables(bounds, n_samples, sig_digs=None): min_value = bounds.min() max_value = bounds.max() v = np.random.rand(n_samples) v = min_value + (v*(max_value - min_value)) if sig_digs is not None: v = np.round((v*10**(sig_digs)))/10**(sig_digs) return v log10_sigma_background_dist = generate_random_variables(np.log10(sigma_background_bounds), nsamples, 2) log10_sigma_casing_dist = generate_random_variables(np.log10(sigma_casing_bounds), nsamples, 2) d_casing_dist = generate_random_variables(d_casing_bounds, nsamples, 2) t_casing_dist = generate_random_variables(t_casing_bounds, nsamples, 2) l_casing_dist = np.r_[1000, 1000] #generate_random_variables(l_casing_bounds/csz, nsamples, 0) * csz # generate by ncells parameters = { "log10_sigma_background":log10_sigma_background_dist, "log10_sigma_casing":log10_sigma_casing_dist, "d_casing":d_casing_dist, "t_casing":t_casing_dist, "l_casing":l_casing_dist, } df = pd.DataFrame(parameters) df df.to_hdf(f"{directory}/trial_data.h5", 'data') #for key in df.keys() fig, ax = plt.subplots(1,5, figsize=(20, 4)) for i, key in enumerate(parameters.keys()): ax[i].hist(df[key]) ax[i].set_title(f"{key}".replace("_", " ")) plt.tight_layout() time_steps = [ (1e-6, 20), (1e-5, 30), (3e-5, 30), (1e-4, 40), (3e-4, 30), (1e-3, 20), (1e-2, 15) ] df2 = pd.read_hdf(f"{directory}/trial_data.h5", 'data', start=1, stop=2) df2["log10_sigma_background"] i = 0 trial_directory = f"{directory}/trial_{i}/" if not os.path.isdir(trial_directory): os.makedirs(trial_directory, exist_ok=True) cd = parameters["d_casing"][i] ct = parameters["t_casing"][i] cl = parameters["l_casing"][i] sc = 10**(parameters["log10_sigma_casing"][i]) sb = 10**(parameters["log10_sigma_background"][i]) model = casing_sim.model.CasingInHalfspace( directory=trial_directory, casing_d = cd - ct, # I use diameter to the center of the casing wall casing_l = cl, casing_t = ct, mur_casing = mur_casing, sigma_air = sigma_air, sigma_casing = sc, sigma_back = sb, sigma_inside = sb, src_a = src_a, src_b = src_b, timeSteps = time_steps ) model.filename = "casing.json" np.sum(model.timeSteps) sigmaA = model.sigma_casing * (model.casing_b**2 - model.casing_a**2)/model.casing_b**2 print(f"The approximate conductivity of the solid we use is {sigmaA:1.1e}") model_approx_casing = model.copy() model_approx_casing.casing_t = cd / 2. model_approx_casing.casing_d = cd - model_approx_casing.casing_t model_approx_casing.sigma_inside = sigmaA model_approx_casing.sigma_casing = sigmaA model_approx_casing.filename = "approx_casing.json" def generate_mesh(model): csx1 = model.casing_t/4 csx2 = 100 csz = 2.5 # esure padding goes sufficiently far in the x direction pad_to = 1e4 npad_x = 0 npad_z = 0 padding_x = cl padding_z = cl pfx2 = 1.5 pfz = 1.5 # csx2 = 10 while padding_x < pad_to: npad_x += 1 padding_x = cl + np.sum((csx2 * (np.ones(npad_x)*pfx2)**np.arange(1, npad_x+1))) while padding_z < pad_to: npad_z += 1 padding_z = cl + np.sum((csz * (np.ones(npad_z)*pfz)**np.arange(1, npad_z+1))) meshGen = casing_sim.mesh.CasingMeshGenerator( modelParameters = model, csx1 = csx1, csx2 = csx2, domain_x = cl, hy = hy, npadx = npad_x, npadz = npad_z, csz = csz, _ncx1 = np.ceil(cd / csx1) ) mesh = meshGen.mesh return meshGen, mesh meshGen, mesh = generate_mesh(model) # meshGen_approx, mesh_approx = meshGen, mesh meshGen_approx, mesh_approx = generate_mesh(model_approx_casing) print(model.diffusion_distance(t=0.1)) ax = mesh.plotGrid() # ax[1].set_xlim([0, 1100]) ax2 = mesh_approx.plotGrid() print(mesh.nC, mesh_approx.nC) def get_source(model, mesh, meshGen): src_theta = np.pi/2. + mesh.hy[0]/2. model.src_a[1] = src_theta model.src_b[1] = src_theta src_top = casing_sim.sources.TopCasingSrc( modelParameters=model, meshGenerator=meshGen, src_a=model.src_a, src_b=model.src_b, physics="TDEM", filename="top_casing", ) source_list = src_top.srcList return source_list source_list = get_source(model, mesh, meshGen) source_list_approx = get_source(model_approx_casing, mesh_approx, meshGen_approx) physprops = casing_sim.model.PhysicalProperties(modelParameters=model, meshGenerator=meshGen) physprops_approx = casing_sim.model.PhysicalProperties(modelParameters=model_approx_casing, meshGenerator=meshGen_approx) model.casing_b, model_approx_casing.casing_b fig, ax = plt.subplots(1, 2, figsize=(12, 6)) xlim = 0.5 * np.r_[-1, 1] zlim = np.r_[-model.casing_l*1.1, 10] physprops.plot_sigma(ax=ax[0], pcolorOpts={'norm':LogNorm()}) physprops_approx.plot_sigma(ax=ax[1], pcolorOpts={'norm':LogNorm()}) for a in ax: a.set_xlim(xlim) a.set_ylim(zlim) plt.tight_layout() np.save(f"{trial_directory}casing.npy", model.sigma(mesh)) np.save(f"{trial_directory}approx_casing.npy", model_approx_casing.sigma(mesh_approx)) survey = tdem.Survey(source_list) survey_approx = tdem.Survey(source_list_approx) sim = tdem.Problem3D_j(mesh=mesh, survey=survey, solver=Pardiso, time_steps=time_steps) sim_approx = tdem.Problem3D_j(mesh=mesh_approx, survey=survey_approx, solver=Pardiso, time_steps=time_steps) with open(f"{trial_directory}simulation.json", 'w') as outfile: json.dump(sim.serialize(), outfile) with open(f"{trial_directory}simulation_approx.json", 'w') as outfile: json.dump(sim_approx.serialize(), outfile) def compute_fields(model, simulation, trial_directory): import deepdish as dd import discretize import casingSimulations as casing_sim from SimPEG.electromagnetics import time_domain as tdem from SimPEG import maps from pymatsolver import Pardiso # simulation_params = dd.io.load(f"{trial_directory}simulation.h5") # print(f"{trial_directory}simulation.json") with open(f"{trial_directory}{simulation}.json") as f: simulation_params = json.load(f) sim = tdem.Problem3D_j.deserialize(simulation_params, trusted=True) mesh = sim.mesh sim.solver = Pardiso sim.sigmaMap=maps.IdentityMap(mesh) sim.verbose=True m = np.load(f"{trial_directory}{model}.npy") fields = sim.fields(m) f = fields[:, '{}Solution'.format(sim._fieldType), :] filename = f"{model}_fields.npy" tosave = os.path.sep.join([trial_directory, filename]) print(f"saving {tosave}") np.save(tosave, f) return tosave cluster = dask_jobqueue.SLURMCluster( cores=nsamples, processes=nsamples*2, memory=f'{120*nsamples}GB', job_cpu=1, project="m3384", job_extra = ['--constraint=haswell', '--qos=debug',], death_timeout=360, ) print(cluster.job_script()) client = Client(cluster) client # client = Client(threads_per_worker=1, n_workers=2) # client f = {} for m, sim in zip(["casing", "approx_casing"], ["simulation", "simulation_approx"]): # f[m] = compute_fields(m, trial_directory) f[m] = dask.delayed(compute_fields)(m, sim, trial_directory) cluster.scale(1) fields_files = dask.compute(f)[0] ndata = 32 ntimes = 128 xsample = np.linspace(25, 1000, ndata) zsample = np.linspace(-cl, 0, ndata) xz_grid = discretize.utils.ndgrid(xsample, np.r_[0], zsample) tsample = np.logspace(-6, -2, 128) currents = {} for m in ["casing", "approx_casing"]: currents[m] = np.load(f"{trial_directory}{m}_fields.npy") def get_matching_indices(grid="x"): vnF = getattr(mesh, f"vnF{grid}") vnF_approx = getattr(mesh_approx, f"vnF{grid}") x0 = np.ones(vnF[0], dtype=bool) x0[:vnF[0] - vnF_approx[0]] = False return np.kron(np.ones(vnF[2], dtype=bool), np.kron(np.ones(vnF[1], dtype=bool), x0)) indsFx = get_matching_indices("x") indsFy = get_matching_indices("y") indsFz = get_matching_indices("z") inds = np.hstack([indsFx, indsFy, indsFz]) # compute jd jd = currents["casing"][inds] - currents["approx_casing"] jdx = mkvc(jd[:mesh_approx.vnF[0], :]).reshape(tuple(mesh_approx.vnFx)+(sim_approx.nT+1,), order="F") jdz = mkvc(jd[np.sum(mesh_approx.vnF[:2]):, :]).reshape(tuple(mesh_approx.vnFz)+(sim_approx.nT+1,), order="F") # take mean in theta-dimension jdx.mean(1) jdx = jdx.mean(1) jdz = jdz.mean(1) jdxz = np.hstack([mkvc(jdx), mkvc(jdz)]) hx1a = discretize.utils.meshTensor([(meshGen.csx1, meshGen.ncx1)]) # pad to second uniform region hx1b = discretize.utils.meshTensor([(meshGen.csx1, meshGen.npadx1, meshGen.pfx1)]) # scale padding so it matches cell size properly dx1 = np.sum(hx1a)+np.sum(hx1b) dx1 = 3 #np.floor(dx1/meshGen.csx2) hx1b *= (dx1*meshGen.csx2 - np.sum(hx1a))/np.sum(hx1b) # second uniform chunk of mesh ncx2 = np.ceil((meshGen.domain_x - dx1)/meshGen.csx2) hx2a = discretize.utils.meshTensor([(meshGen.csx2, ncx2)]) # pad to infinity hx2b = discretize.utils.meshTensor([(meshGen.csx2, meshGen.npadx, meshGen.pfx2)]) hx = np.hstack([hx1a, hx1b, hx2a, hx2b]) hx1a_a = discretize.utils.meshTensor([(meshGen_approx.csx1, meshGen_approx.ncx1)]) # pad to second uniform region hx1b_a = discretize.utils.meshTensor([(meshGen_approx.csx1, meshGen_approx.npadx1, meshGen_approx.pfx1)]) # scale padding so it matches cell size properly dx1_a = np.sum(hx1a_a)+np.sum(hx1b_a) dx1_a = 3 #np.floor(dx1_a/meshGen_approx.csx2) hx1b_a *= (dx1_a*meshGen_approx.csx2 - np.sum(hx1a_a))/np.sum(hx1b_a) # second uniform chunk of mesh ncx2_a = np.ceil((meshGen_approx.domain_x - dx1_a)/meshGen_approx.csx2) hx2a_a = discretize.utils.meshTensor([(meshGen_approx.csx2, ncx2_a)]) # pad to infinity hx2b_a = discretize.utils.meshTensor([(meshGen_approx.csx2, meshGen_approx.npadx, meshGen_approx.pfx2)]) hx2 = np.hstack([hx1a_a, hx1b_a, hx2a_a, hx2b_a]) x1 = np.cumsum(np.hstack([np.r_[0], hx])) x2 = np.cumsum(np.hstack([np.r_[0], hx2])) mesh.vectorNx[mesh.vectorNx > 25] mesh_approx.vectorNx[mesh_approx.vectorNx > 25] tind = 0 print(f"{sim_approx.timeMesh.vectorNx[tind]*1e3} ms") plt.colorbar(mesh2d.plotImage( # mesh2d.aveF2CCV * currents["approx_casing"], mesh2d.aveF2CCV * np.hstack([mkvc(jdx[:, :, tind]), mkvc(jdz[:, :, tind])]), view="vec", vType="CCv", range_x=np.r_[25, 100], range_y=[-200, 10], pcolorOpts={"norm": LogNorm()}, clim = np.r_[1e-10, 1e2], stream_threshold=1e-10, )[0]) # build projection matrices for data mesh2d = discretize.CylMesh([mesh_approx.hx, 1, mesh_approx.hz], x0=mesh_approx.x0) Px = mesh2d.getInterpolationMat(xz_grid, 'Fx') Pz = mesh2d.getInterpolationMat(xz_grid, 'Fz') Pt = sim_approx.time_mesh.getInterpolationMat(tsample, 'N') Pxt = sp.kron(Pt, Px) Pzt = sp.kron(Pt, Pz) P = sp.vstack([Pxt, Pzt]) jdata = P * jdxz np.save(f"{trial_directory}j_difference.npy", jdata) a = np.r_[0, 0.5, 1.] a.astype(bool) # compute current inside casing ind_casing_Fz = (mesh_approx.aveFz2CC.T * model_approx_casing.ind_casing(mesh_approx)).astype(bool) I = discretize.utils.sdiag(mesh_approx.area) * currents["approx_casing"] Iz = I[mesh_approx.vnF[:2].sum():, :] Iz[~ind_casing_Fz, :] = 0 Iz = Iz.reshape(tuple(mesh_approx.vnFz) + (sim_approx.nT+1,), order="F") Iz_casing = (Iz.sum(0)).sum(0) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) cm = plt.get_cmap('viridis') c_norm = LogNorm(vmin=sim_approx.timeMesh.vectorCCx[0], vmax=sim_approx.timeMesh.vectorNx[-1]) scalar_map = cmap.ScalarMappable(norm=c_norm, cmap=cm) scalar_map.set_array([]) for i in range(sim_approx.nT): ax[0].plot( mesh_approx.vectorNz, -Iz_casing[:, i], color=scalar_map.to_rgba(sim_approx.timeMesh.vectorNx[i]+1e-7) ) ax[1].semilogy( mesh_approx.vectorNz, np.abs(-Iz_casing[:, i]), color=scalar_map.to_rgba(sim_approx.timeMesh.vectorNx[i]+1e-7) ) for a in ax: a.set_xlim([5., -1.25*model.casing_l]) a.grid(which="both", color="k", lw=0.4, alpha=0.4) ax[1].set_ylim([1e-8, 1]) cb = plt.colorbar(scalar_map) cb.set_label("time (s)") plt.tight_layout() fig, ax = plt.subplots(1, 2, figsize=(12, 6)) cm = plt.get_cmap('viridis') c_norm = Normalize(vmin=0, vmax=model.casing_l) scalar_map = cmap.ScalarMappable(norm=c_norm, cmap=cm) scalar_map.set_array([]) for i in range(mesh_approx.vnFz[2]): ax[0].semilogx(sim_approx.timeMesh.vectorNx+1e-7, -Iz_casing[i, :], color=scalar_map.to_rgba(-mesh_approx.vectorNz[i])) ax[1].loglog(sim_approx.timeMesh.vectorNx+1e-7, np.abs(-Iz_casing[i, :]), color=scalar_map.to_rgba(-mesh_approx.vectorNz[i])) for a in ax: # a.set_xlim([5., -1.25*model.casing_l]) a.grid(which="both", color="k", lw=0.4, alpha=0.4) ax[1].set_ylim([1e-8, 1]) cb=plt.colorbar(scalar_map) cb.set_label("depth (m)") n_z_currents = 128 z_sample = np.linspace(-model_approx_casing.casing_l, 0, n_z_currents) Pz_casing_currents = discretize.TensorMesh([mesh_approx.hz], [mesh_approx.x0[2]]).getInterpolationMat( z_sample, 'N' ) P_casing_currents = sp.kron(Pt, Pz_casing_currents) I_casing_data = -1*P_casing_currents*discretize.utils.mkvc(Iz_casing) np.save(f"{trial_directory}casing_currents.npy", I_casing_data) plt.plot(I_casing_data) ```
github_jupyter
# Cybersecurity **CS1302 Introduction to Computer Programming** ___ Python is a popular tool among hackers and engineers. In this lab, you will learn Cryptology in cybersecurity, which covers - [Cryptography](https://en.wikipedia.org/wiki/Cryptography): Encryption and decryption using a cipher. - [Cryptanalysis](https://en.wikipedia.org/wiki/Cryptanalysis): Devising an attack to break a cipher. ## Caesar symmetric key cipher We first implements a simple cipher called the [Caesar cipher](https://en.wikipedia.org/wiki/Caesar_cipher). ``` %%html <iframe width="800" height="415" src="https://www.youtube.com/embed/sMOZf4GN3oc" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> ``` ### Encrypt/decrypt a character **How to encrypt a character?** The following code encrypts a character `char` using a non-negative integer `key`. ``` cc_n = 1114112 def cc_encrypt_character(char, key): """ Return the encryption of a character by an integer key using Caesar cipher. Parameters ---------- char: str a unicode (UTF-8) character to be encrypted. key int: secret key to encrypt char. """ char_code = ord(char) shifted_char_code = (char_code + key) % cc_n encrypted_char = chr(shifted_char_code) return encrypted_char ``` For example, to encrypt the letter `'A'` using a secret key `5`: ``` cc_encrypt_character("A", 5) ``` The character `'A'` is encrypted to the character `'F'` as follows: 1. `ord(char)` return the integer `65` that is the code point (integer representation) of the unicode of `'A'`. 2. `(char_code + key) % cc_n` cyclic shifts the code by the key `5`. 3. `chr(shifted_char_code)` converts the shifted code back to a character, which is `'F'`. | Encryption | | | | | | | | | | ------------------------------- | --- | ----- | --- | --- | --- | --- | --- | --- | | `char` | ... | **A** | B | C | D | E | F | ... | | `ord(char)` | ... | **65**| 66 | 67 | 68 | 69 | 70 | ... | | `(ord(char) + key) % cc_n` | ... | **70**| 71 | 72 | 73 | 74 | 75 | ... | | `(chr(ord(char) + key) % cc_n)` | ... | **F** | G | H | I | J | K | ... | You may learn more about `ord` and `chr` from their docstrings: ``` help(ord) help(chr) ``` **How to decrypt a character?** Mathematically, we define the encryption and decryption of a character for Caesar cipher as $$ \begin{aligned} E(x,k) &:= x + k \mod n & \text{(encryption)} \\ D(x,k) &:= x - k \mod n & \text{(decryption),} \end{aligned} $$ where $x$ is the character code in $\{0,\dots,n\}$ and $k$ is the secret key. `mod` operator above is the modulo operator. In Mathematics, it has a lower precedence than addition and multiplication and is typeset with an extra space accordingly. The encryption and decryption satisfies the recoverability condition $$ D(E(x,k),k) = x $$ so two people with a common secret key can encrypt and decrypt a character, but others not knowing the key cannot. This is a defining property of a [symmetric cipher](https://en.wikipedia.org/wiki/Symmetric-key_algorithm). The following code decrypts a character using a key. ``` def cc_decrypt_character(char, key): """ Return the decryption of a character by the key using Caesar cipher. Parameters ---------- char: str a unicode (UTF-8) character to be decrypted. key: int secret key to decrypt char. """ char_code = ord(char) shifted_char_code = (char_code - key) % cc_n decrypted_char = chr(shifted_char_code) return decrypted_char ``` For instance, to decrypt the letter `'F'` by the secret key `5`: ``` cc_decrypt_character("F", 5) ``` The character `'F'` is decrypted back to `'A'` because `(char_code - key) % cc_n` reverse cyclic shifts the code by the key `5`. | Encryption | | | | | | | | | Decryption | | ------------------------------- | --- | ----- | --- | --- | --- | --- | --- | --- | ------------------------------- | | `char` | ... | **A** | B | C | D | E | F | ... | `(chr(ord(char) - key) % cc_n)` | | `ord(char)` | ... | **65**| 66 | 67 | 68 | 69 | 70 | ... | `(ord(char) - key) % cc_n` | | `(ord(char) + key) % cc_n` | ... | **70**| 71 | 72 | 73 | 74 | 75 | ... | `ord(char)` | | `(chr(ord(char) + key) % cc_n)` | ... | **F** | G | H | I | J | K | ... | `char` | **Exercise** Why did we set `cc_n = 1114112`? Explain whether the recoverability property may fail if we set `cc_n` to a bigger number or remove `% cc_n` for both `cc_encrypt_character` and `cc_decrypt_character`. YOUR ANSWER HERE ### Encrypt a plaintext and decrypt a ciphertext Of course, it is more interesting to encrypt a string instead of a character. The following code implements this in one line. ``` def cc_encrypt(plaintext, key): """ Return the ciphertext of a plaintext by the key using Caesar cipher. Parameters ---------- plaintext: str A unicode (UTF-8) message to be encrypted. public_key: int Public key to encrypt plaintext. """ return "".join([chr((ord(char) + key) % cc_n) for char in plaintext]) ``` The above function encrypts a message, referred to as the *plaintext*, by replacing each character with its encryption. This is referred to as a [*substitution cipher*](https://en.wikipedia.org/wiki/Substitution_cipher). **Exercise** Define a function `cc_decrypt` that - takes a string `ciphertext` and an integer `key`, and - returns the plaintext that encrypts to `ciphertext` by the key using Caesar cipher. ``` def cc_decrypt(ciphertext, key): """ Return the plaintext that encrypts to ciphertext by the key using Caesar cipher. Parameters ---------- ciphertext: str message to be decrypted. key: int secret key to decrypt the ciphertext. """ # YOUR CODE HERE raise NotImplementedError() # tests assert cc_decrypt(r"bcdefghijklmnopqrstuvwxyz{", 1) == "abcdefghijklmnopqrstuvwxyz" assert cc_decrypt(r"Mjqqt1%\twqi&", 5) == "Hello, World!" # hidden tests ``` ## Brute-force attack ### Create an English dictionary You will launch a brute-force attack to guess the key that encrypts an English text. The idea is simple: - You try decrypting the ciphertext with different keys, and - see which of the resulting plaintexts make most sense (most english-like). To check whether a plaintext is English-like, we need to have a list of English words. One way is to type them out but this is tedious. Alternatively, we can obtain the list from the *Natural Language Toolkit (NLTK)*: ``` import nltk nltk.download("words") from nltk.corpus import words ``` `words.words()` returns a list of words. We can check whether a string is in the list using the operator `in`. ``` for word in "Ada", "ada", "Hello", "hello": print("{!r} in dictionary? {}".format(word, word in words.words())) ``` However there are two issues: - Checking membership is slow for a long list. - Both 'Hello' and 'ada' are English-like but they are not in the words_list. **Exercise** Using the method `lower` of `str` and the constructor `set`, assign `dictionary` to a set of lowercase English words from `words.words()`. ``` # YOUR CODE HERE raise NotImplementedError() # tests assert isinstance(dictionary, set) and len(dictionary) == 234377 assert all(word in dictionary for word in ("ada", "hello")) assert all(word not in dictionary for word in ("Ada", "hola")) # hidden tests ### BEGIN TESTS assert "world" in dictionary assert not "mundo" in dictionary ### END TESTS ``` ### Identify English-like text To determine how English-like a text is, we calculate the following score: $$ \frac{\text{number of English words in the text}}{\text{number of tokens in the text}} $$ where tokens are substrings (not necessarily an English word) separated by white space characters in the text. ``` def tokenizer(text): """Returns the list of tokens of the text.""" return text.split() def get_score(text): """Returns the fraction of tokens which appear in dictionary.""" tokens = tokenizer(text) words = [token for token in tokens if token in dictionary] return len(words) / len(tokens) # tests get_score("hello world"), get_score("Hello, World!") ``` As shown in tests above, the code fails to handle text with punctuations and uppercase letters properly. In particular, - while `get_score` recognizes `hello world` as English-like and returns the maximum score of 1, - it fails to recognize `Hello, World!` as English-like and returns the minimum score of 0. Why? This is because every words in `dictionary` - are in lowercase, and - have no leading/trailing punctuations. **Exercise** Define a funtion `tokenizer` that - takes a string `text` as an argument, and - returns a `list` of tokens obtained by 1. splitting `text` into a list using `split()`; 2. removing leading/trailing punctuations in `string.punctuation` using the `strip` method; and 3. converting all items of the list to lowercase using `lower()`. ``` import string def tokenizer(text): """Returns the list of tokens of the text such that 1) each token has no leading or training spaces/punctuations, and 2) all letters in each tokens are in lowercase.""" # YOUR CODE HERE raise NotImplementedError() # tests assert tokenizer("Hello, World!") == ["hello", "world"] assert get_score("Hello, World!") >= 0.99999 assert tokenizer("Do you know Jean-Pierre?") == ["do", "you", "know", "jean-pierre"] assert get_score("Do you know Jean-Pierre?") >= 0.99999 # hidden tests ``` ### Launch a brute-force attack **Exercise** Define the function `cc_attack` that - takes as arguments - a string `ciphertext`, - a floating point number `threshold` in the interval $(0,1)$ with a default value of $0.6$, and - returns a generator that - generates one-by-one in ascending order guesses of the key that - decrypt `ciphertext` to texts with scores at least the `threshold`. ``` def cc_attack(ciphertext, threshold=0.6): """Returns a generator that generates the next guess of the key that decrypts the ciphertext to a text with get_score(text) at least the threshold. """ # YOUR CODE HERE raise NotImplementedError() # tests ciphertext = cc_encrypt("Hello, World!", 12345) key_generator = cc_attack(ciphertext) key_guess = next(key_generator) assert key_guess == 12345 text = cc_decrypt(ciphertext, key_guess) print( "guess of the key: {}\nscore: {}\ntext :{}".format(key_guess, get_score(text), text) ) # hidden tests ``` ## Challenge Another symmetric key cipher is [columnar transposition cipher](https://en.wikipedia.org/wiki/Transposition_cipher#Columnar_transposition). A transposition cipher encrypts a text by permuting instead of substituting characters. **Exercise** Study and implement the irregular case of the [columnar transposition cipher](https://en.wikipedia.org/wiki/Transposition_cipher#Columnar_transposition) as described in Wikipedia page. Define the functions - `ct_encrypt(plaintext, key)` for encryption, and - `ct_decrypt(ciphertext, key)` for decryption. You can assume the plaintext is in uppercase and has no spaces/punctuations. *Hints:* See the text cases for an example of `plaintext`, `key`, and the corresponding `ciphertext`. You can but are not required to follow the solution template below: ```Python def argsort(seq): '''A helper function that returns the tuple of indices that would sort the sequence seq.''' return tuple(x[0] for x in sorted(enumerate(seq), key=lambda x: x[1])) def ct_idx(length, key): '''A helper function that returns the tuple of indices that would permute the letters of a message according to the key using the irregular case of columnar transposition cipher.''' seq = tuple(range(length)) return [i for j in argsort(key) for i in _______________] def ct_encrypt(plaintext, key): """ Return the ciphertext of a plaintext by the key using the irregular case of columnar transposition cipher. Parameters ---------- plaintext: str a message in uppercase without punctuations/spaces. key: str secret key to encrypt plaintext. """ return ''.join([plaintext[i] for i in ct_idx(len(plaintext), key)]) def ct_decrypt(ciphertext, key): """ Return the plaintext of the ciphertext by the key using the irregular case of columnar transposition cipher. Parameters ---------- ciphertext: str a string in uppercase without punctuations/spaces. key: str secret key to decrypt ciphertext. """ return _______________________________________________________________________ ``` ``` # YOUR CODE HERE raise NotImplementedError() # tests key = "ZEBRAS" plaintext = "WEAREDISCOVEREDFLEEATONCE" ciphertext = "EVLNACDTESEAROFODEECWIREE" assert ct_encrypt(plaintext, key) == ciphertext assert ct_decrypt(ciphertext, key) == plaintext # hidden tests ```
github_jupyter
<a href="https://colab.research.google.com/github/milankansagara/darkcloudcover/blob/Testing-changes/TF_timeseries_tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ##### Copyright 2019 The TensorFlow Authors. #Thanks guys ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Time series forecasting (But we need to use stock data) <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/structured_data/time_series"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/structured_data/time_series.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/structured_data/time_series.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/structured_data/time_series.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This tutorial is an introduction to time series forecasting using TensorFlow. It builds a few different styles of models including Convolutional and Recurrent Neural Networks (CNNs and RNNs). This is covered in two main parts, with subsections: * Forecast for a single timestep: * A single feature. * All features. * Forecast multiple steps: * Single-shot: Make the predictions all at once. * Autoregressive: Make one prediction at a time and feed the output back to the model. ## Setup ``` !pip install -q tf-nightly import os import datetime import IPython import IPython.display import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import tensorflow as tf mpl.rcParams['figure.figsize'] = (8, 6) mpl.rcParams['axes.grid'] = False ``` ## The weather dataset This tutorial uses a <a href="https://www.bgc-jena.mpg.de/wetter/" class="external">weather time series dataset</a> recorded by the <a href="https://www.bgc-jena.mpg.de" class="external">Max Planck Institute for Biogeochemistry</a>. This dataset contains 14 different features such as air temperature, atmospheric pressure, and humidity. These were collected every 10 minutes, beginning in 2003. For efficiency, you will use only the data collected between 2009 and 2016. This section of the dataset was prepared by François Chollet for his book [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python). ``` zip_path = tf.keras.utils.get_file( origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip', fname='jena_climate_2009_2016.csv.zip', extract=True) csv_path, _ = os.path.splitext(zip_path) ``` This tutorial will just deal with **hourly predictions**, so start by sub-sampling the data from 10 minute intervals to 1h: ``` df = pd.read_csv(csv_path) # slice [start:stop:step], starting from index 5 take every 6th record. df = df[5::6] date_time = pd.to_datetime(df.pop('Date Time'), format='%d.%m.%Y %H:%M:%S') ``` Let's take a glance at the data. Here are the first few rows: ``` df.head() ``` Here is the evolution of a few features over time. ``` plot_cols = ['T (degC)', 'p (mbar)', 'rho (g/m**3)'] plot_features = df[plot_cols] plot_features.index = date_time _ = plot_features.plot(subplots=True) plot_features = df[plot_cols][:480] plot_features.index = date_time[:480] _ = plot_features.plot(subplots=True) ``` ### Inspect and cleanup Next look at the statistics of the dataset: ``` df.describe().transpose() ``` #### Wind velocity One thing that should stand out is the `min` value of the wind velocity, `wv (m/s)` and `max. wv (m/s)` columns. This `-9999` is likely erroneous. There's a separate wind direction column, so the velocity should be `>=0`. Replace it with zeros: ``` wv = df['wv (m/s)'] bad_wv = wv == -9999.0 wv[bad_wv] = 0.0 max_wv = df['max. wv (m/s)'] bad_max_wv = max_wv == -9999.0 max_wv[bad_max_wv] = 0.0 # The above inplace edits are reflected in the DataFrame df['wv (m/s)'].min() ``` ### Feature engineering Before diving in to build a model it's important to understand your data, and be sure that you're passing the model appropriately formatted data. #### Wind The last column of the data, `wd (deg)`, gives the wind direction in units of degrees. Angles do not make good model inputs, 360° and 0° should be close to each other, and wrap around smoothly. Direction shouldn't matter if the wind is not blowing. Right now the distribution of wind data looks like this: ``` plt.hist2d(df['wd (deg)'], df['wv (m/s)'], bins=(50, 50), vmax=400) plt.colorbar() plt.xlabel('Wind Direction [deg]') plt.ylabel('Wind Velocity [m/s]') ``` But this will be easier for the model to interpret if you convert the wind direction and velocity columns to a wind **vector**: ``` wv = df.pop('wv (m/s)') max_wv = df.pop('max. wv (m/s)') # Convert to radians. wd_rad = df.pop('wd (deg)')*np.pi / 180 # Calculate the wind x and y components. df['Wx'] = wv*np.cos(wd_rad) df['Wy'] = wv*np.sin(wd_rad) # Calculate the max wind x and y components. df['max Wx'] = max_wv*np.cos(wd_rad) df['max Wy'] = max_wv*np.sin(wd_rad) ``` The distribution of wind vectors is much simpler for the model to correctly interpret. ``` plt.hist2d(df['Wx'], df['Wy'], bins=(50, 50), vmax=400) plt.colorbar() plt.xlabel('Wind X [m/s]') plt.ylabel('Wind Y [m/s]') ax = plt.gca() ax.axis('tight') ``` #### Time Similarly the `Date Time` column is very useful, but not in this string form. Start by converting it to seconds: ``` timestamp_s = date_time.map(datetime.datetime.timestamp) ``` Similar to the wind direction the time in seconds is not a useful model input. Being weather data it has clear daily and yearly periodicity. There are many ways you could deal with periodicity. A simple approach to convert it to a usable signal is to use `sin` and `cos` to convert the time to clear "Time of day" and "Time of year" signals: ``` day = 24*60*60 year = (365.2425)*day df['Day sin'] = np.sin(timestamp_s * (2 * np.pi / day)) df['Day cos'] = np.cos(timestamp_s * (2 * np.pi / day)) df['Year sin'] = np.sin(timestamp_s * (2 * np.pi / year)) df['Year cos'] = np.cos(timestamp_s * (2 * np.pi / year)) plt.plot(np.array(df['Day sin'])[:25]) plt.plot(np.array(df['Day cos'])[:25]) plt.xlabel('Time [h]') plt.title('Time of day signal') ``` This gives the model access to the most important frequency features. In this case we knew ahead of time which frequencies were important. If you didn't know, you can determine which frequencies are important using an `fft`. To check our assumptions, here is the `tf.signal.rfft` of the temperature over time. Note the obvious peaks at frequencies near `1/year` and `1/day`: ``` fft = tf.signal.rfft(df['T (degC)']) f_per_dataset = np.arange(0, len(fft)) n_samples_h = len(df['T (degC)']) hours_per_year = 24*365.2524 years_per_dataset = n_samples_h/(hours_per_year) f_per_year = f_per_dataset/years_per_dataset plt.step(f_per_year, np.abs(fft)) plt.xscale('log') plt.ylim(0, 400000) plt.xlim([0.1, max(plt.xlim())]) plt.xticks([1, 365.2524], labels=['1/Year', '1/day']) _ = plt.xlabel('Frequency (log scale)') ``` ### Split the data We'll use a `(70%, 20%, 10%)` split for the training, validation, and test sets. Note the data is **not** being randomly shuffled before splitting. This is for two reasons. 1. It ensures that chopping the data into windows of consecutive samples is still possible. 2. It ensures that the validation/test results are more realistic, being evaluated on data collected after the model was trained. ``` column_indices = {name: i for i, name in enumerate(df.columns)} n = len(df) train_df = df[0:int(n*0.7)] val_df = df[int(n*0.7):int(n*0.9)] test_df = df[int(n*0.9):] num_features = df.shape[1] ``` ### Normalize the data It is important to scale features before training a neural network. Normalization is a common way of doing this scaling. Subtract the mean and divide by the standard deviation of each feature. The mean and standard deviation should only be computed using the training data so that the models have no access to the values in the validation and test sets. It's also arguable that the model shouldn't have access to future values in the training set when training, and that this normalization should be done using moving averages. That's not the focus of this tutorial, and the validation and test sets ensure that we get (somewhat) honest metrics. So in the interest of simplicity this tutorial uses a simple average. ``` train_mean = train_df.mean() train_std = train_df.std() train_df = (train_df - train_mean) / train_std val_df = (val_df - train_mean) / train_std test_df = (test_df - train_mean) / train_std ``` Now peek at the distribution of the features. Some features do have long tails, but there are no obvious errors like the `-9999` wind velocity value. ``` df_std = (df - train_mean) / train_std df_std = df_std.melt(var_name='Column', value_name='Normalized') plt.figure(figsize=(12, 6)) ax = sns.violinplot(x='Column', y='Normalized', data=df_std) _ = ax.set_xticklabels(df.keys(), rotation=90) ``` ## Data windowing The models in this tutorial will make a set of predictions based on a window of consecutive samples from the data. The main features of the input windows are: * The width (number of time steps) of the input and label windows * The time offset between them. * Which features are used as inputs, labels, or both. This tutorial builds a variety of models (including Linear, DNN, CNN and RNN models), and uses them for both: * *Single-output*, and *multi-output* predictions. * *Single-time-step* and *multi-time-step* predictions. This section focuses on implementing the data windowing so that it can be reused for all of those models. Depending on the task and type of model you may want to generate a variety of data windows. Here are some examples: 1. For example, to make a single prediction 24h into the future, given 24h of history you might define a window like this: ![One prediction 24h into the future.](https://github.com/milankansagara/darkcloudcover/blob/master/images/raw_window_24h.png?raw=1) 2. A model that makes a prediction 1h into the future, given 6h of history would need a window like this: ![One prediction 1h into the future.](https://github.com/milankansagara/darkcloudcover/blob/master/images/raw_window_1h.png?raw=1) The rest of this section defines a `WindowGenerator` class. This class can: 1. Handle the indexes and offsets as shown in the diagrams above. 1. Split windows of features into a `(features, labels)` pairs. 2. Plot the content of the resulting windows. 3. Efficiently generate batches of these windows from the training, evaluation, and test data, using `tf.data.Dataset`s. ### 1. Indexes and offsets Start by creating the `WindowGenerator` class. The `__init__` method includes all the necessary logic for the input and label indices. It also takes the train, eval, and test dataframes as input. These will be converted to `tf.data.Dataset`s of windows later. ``` class WindowGenerator(): def __init__(self, input_width, label_width, shift, train_df=train_df, val_df=val_df, test_df=test_df, label_columns=None): # Store the raw data. self.train_df = train_df self.val_df = val_df self.test_df = test_df # Work out the label column indices. self.label_columns = label_columns if label_columns is not None: self.label_columns_indices = {name: i for i, name in enumerate(label_columns)} self.column_indices = {name: i for i, name in enumerate(train_df.columns)} # Work out the window parameters. self.input_width = input_width self.label_width = label_width self.shift = shift self.total_window_size = input_width + shift self.input_slice = slice(0, input_width) self.input_indices = np.arange(self.total_window_size)[self.input_slice] self.label_start = self.total_window_size - self.label_width self.labels_slice = slice(self.label_start, None) self.label_indices = np.arange(self.total_window_size)[self.labels_slice] def __repr__(self): return '\n'.join([ f'Total window size: {self.total_window_size}', f'Input indices: {self.input_indices}', f'Label indices: {self.label_indices}', f'Label column name(s): {self.label_columns}']) ``` Here is code to create the 2 windows shown in the diagrams at the start of this section: ``` w1 = WindowGenerator(input_width=24, label_width=1, shift=24, label_columns=['T (degC)']) w1 w2 = WindowGenerator(input_width=6, label_width=1, shift=1, label_columns=['T (degC)']) w2 ``` ### 2. Split Given a list consecutive inputs, the `split_window` method will convert them to a window of inputs and a window of labels. The example `w2`, above, will be split like this: ![The initial window is all consecuitive samples, this splits it into an (inputs, labels) pairs](https://github.com/milankansagara/darkcloudcover/blob/master/images/split_window.png?raw=1) This diagram doesn't show the `features` axis of the data, but this `split_window` function also handles the `label_columns` so it can be used for both the single output and multi-output examples. ``` def split_window(self, features): inputs = features[:, self.input_slice, :] labels = features[:, self.labels_slice, :] if self.label_columns is not None: labels = tf.stack( [labels[:, :, self.column_indices[name]] for name in self.label_columns], axis=-1) # Slicing doesn't preserve static shape information, so set the shapes # manually. This way the `tf.data.Datasets` are easier to inspect. inputs.set_shape([None, self.input_width, None]) labels.set_shape([None, self.label_width, None]) return inputs, labels WindowGenerator.split_window = split_window ``` Try it out: ``` # Stack three slices, the length of the total window: example_window = tf.stack([np.array(train_df[:w2.total_window_size]), np.array(train_df[100:100+w2.total_window_size]), np.array(train_df[200:200+w2.total_window_size])]) example_inputs, example_labels = w2.split_window(example_window) print('All shapes are: (batch, time, features)') print(f'Window shape: {example_window.shape}') print(f'Inputs shape: {example_inputs.shape}') print(f'labels shape: {example_labels.shape}') ``` Typically data in TensorFlow is packed into arrays where the outermost index is across examples (the "batch" dimension). The middle indices are the "time" or "space" (width, height) dimension(s). The innermost indices are the features. The code above took a batch of 2, 7-timestep windows, with 19 features at each time step. It split them into a batch of 6-timestep, 19 feature inputs, and a 1-timestep 1-feature label. The label only has one feature because the `WindowGenerator` was initialized with `label_columns=['T (degC)']`. Initially this tutorial will build models that predict single output labels. ### 3. Plot Here is a plot method that allows a simple visualization of the split window: ``` w2.example = example_inputs, example_labels def plot(self, model=None, plot_col='T (degC)', max_subplots=3): inputs, labels = self.example plt.figure(figsize=(12, 8)) plot_col_index = self.column_indices[plot_col] max_n = min(max_subplots, len(inputs)) for n in range(max_n): plt.subplot(3, 1, n+1) plt.ylabel(f'{plot_col} [normed]') plt.plot(self.input_indices, inputs[n, :, plot_col_index], label='Inputs', marker='.', zorder=-10) if self.label_columns: label_col_index = self.label_columns_indices.get(plot_col, None) else: label_col_index = plot_col_index if label_col_index is None: continue plt.scatter(self.label_indices, labels[n, :, label_col_index], edgecolors='k', label='Labels', c='#2ca02c', s=64) if model is not None: predictions = model(inputs) plt.scatter(self.label_indices, predictions[n, :, label_col_index], marker='X', edgecolors='k', label='Predictions', c='#ff7f0e', s=64) if n == 0: plt.legend() plt.xlabel('Time [h]') WindowGenerator.plot = plot ``` This plot aligns inputs, labels, and (later) predictions based on the time that the item refers to: ``` w2.plot() ``` You can plot the other columns, but the example window `w2` configuration only has labels for the `T (degC)` column. ``` w2.plot(plot_col='p (mbar)') ``` ### 4. Create `tf.data.Dataset`s Finally this `make_dataset` method will take a time series `DataFrame` and convert it to a `tf.data.Dataset` of `(input_window, label_window)` pairs using the `preprocessing.timeseries_dataset_from_array` function. ``` def make_dataset(self, data): data = np.array(data, dtype=np.float32) ds = tf.keras.preprocessing.timeseries_dataset_from_array( data=data, targets=None, sequence_length=self.total_window_size, sequence_stride=1, shuffle=True, batch_size=32,) ds = ds.map(self.split_window) return ds WindowGenerator.make_dataset = make_dataset ``` The `WindowGenerator` object holds training, validation and test data. Add properties for accessing them as `tf.data.Datasets` using the above `make_dataset` method. Also add a standard example batch for easy access and plotting: ``` @property def train(self): return self.make_dataset(self.train_df) @property def val(self): return self.make_dataset(self.val_df) @property def test(self): return self.make_dataset(self.test_df) @property def example(self): """Get and cache an example batch of `inputs, labels` for plotting.""" result = getattr(self, '_example', None) if result is None: # No example batch was found, so get one from the `.train` dataset result = next(iter(self.train)) # And cache it for next time self._example = result return result WindowGenerator.train = train WindowGenerator.val = val WindowGenerator.test = test WindowGenerator.example = example ``` Now the `WindowGenerator` object gives you access to the `tf.data.Dataset` objects, so you can easily iterate over the data. The `Dataset.element_spec` property tells you the structure, `dtypes` and shapes of the dataset elements. ``` # Each element is an (inputs, label) pair w2.train.element_spec ``` Iterating over a `Dataset` yields concrete batches: ``` for example_inputs, example_labels in w2.train.take(1): print(f'Inputs shape (batch, time, features): {example_inputs.shape}') print(f'Labels shape (batch, time, features): {example_labels.shape}') ``` ## Single step models The simplest model you can build on this sort of data is one that predicts a single feature's value, 1 timestep (1h) in the future based only on the current conditions. So start by building models to predict the `T (degC)` value 1h into the future. ![Predict the next time step](https://github.com/milankansagara/darkcloudcover/blob/master/images/narrow_window.png?raw=1) Configure a `WindowGenerator` object to produce these single-step `(input, label)` pairs: ``` single_step_window = WindowGenerator( input_width=1, label_width=1, shift=1, label_columns=['T (degC)']) single_step_window ``` The `window` object creates `tf.data.Datasets` from the training, validation, and test sets, allowing you to easily iterate over batches of data. ``` for example_inputs, example_labels in single_step_window.train.take(1): print(f'Inputs shape (batch, time, features): {example_inputs.shape}') print(f'Labels shape (batch, time, features): {example_labels.shape}') ``` ### Baseline Before building a trainable model it would be good to have a performance baseline as a point for comparison with the later more complicated models. This first task is to predict temperature 1h in the future given the current value of all features. The current values include the current temperature. So start with a model that just returns the current temperature as the prediction, predicting "No change". This is a reasonable baseline since temperature changes slowly. Of course, this baseline will work less well if you make a prediction further in the future. ![Send the input to the output](https://github.com/milankansagara/darkcloudcover/blob/master/images/baseline.png?raw=1) ``` class Baseline(tf.keras.Model): def __init__(self, label_index=None): super().__init__() self.label_index = label_index def call(self, inputs): if self.label_index is None: return inputs result = inputs[:, :, self.label_index] return result[:, :, tf.newaxis] ``` Instantiate and evaluate this model: ``` baseline = Baseline(label_index=column_indices['T (degC)']) baseline.compile(loss=tf.losses.MeanSquaredError(), metrics=[tf.metrics.MeanAbsoluteError()]) val_performance = {} performance = {} val_performance['Baseline'] = baseline.evaluate(single_step_window.val) performance['Baseline'] = baseline.evaluate(single_step_window.test, verbose=0) ``` That printed some performance metrics, but those don't give you a feeling for how well the model is doing. The `WindowGenerator` has a plot method, but the plots won't be very interesting with only a single sample. So, create a wider `WindowGenerator` that generates windows 24h of consecutive inputs and labels at a time. The `wide_window` doesn't change the way the model operates. The model still makes predictions 1h into the future based on a single input time step. Here the `time` axis acts like the `batch` axis: Each prediction is made independently with no interaction between time steps. ``` wide_window = WindowGenerator( input_width=24, label_width=24, shift=1, label_columns=['T (degC)']) wide_window ``` This expanded window can be passed directly to the same `baseline` model without any code changes. This is possible because the inputs and labels have the same number of timesteps, and the baseline just forwards the input to the output: ![One prediction 1h into the future, ever hour.](https://github.com/milankansagara/darkcloudcover/blob/master/images/last_window.png?raw=1) ``` print('Input shape:', single_step_window.example[0].shape) print('Output shape:', baseline(single_step_window.example[0]).shape) ``` Plotting the baseline model's predictions you can see that it is simply the labels, shifted right by 1h. ``` wide_window.plot(baseline) ``` In the above plots of three examples the single step model is run over the course of 24h. This deserves some explaination: * The blue "Inputs" line shows the input temperature at each time step. The model recieves all features, this plot only shows the temperature. * The green "Labels" dots show the target prediction value. These dots are shown at the prediction time, not the input time. That is why the range of labels is shifted 1 step relative to the inputs. * The orange "Predictions" crosses are the model's prediction's for each output time step. If the model were predicting perfectly the predictions would land directly on the "labels". ### Linear model The simplest **trainable** model you can apply to this task is to insert linear transformation between the input and output. In this case the output from a time step only depends on that step: ![A single step prediction](https://github.com/milankansagara/darkcloudcover/blob/master/images/narrow_window.png?raw=1) A `layers.Dense` with no `activation` set is a linear model. The layer only transforms the last axis of the data from `(batch, time, inputs)` to `(batch, time, units)`, it is applied independently to every item across the `batch` and `time` axes. ``` linear = tf.keras.Sequential([ tf.keras.layers.Dense(units=1) ]) print('Input shape:', single_step_window.example[0].shape) print('Output shape:', linear(single_step_window.example[0]).shape) ``` This tutorial trains many models, so package the training procedure into a function: ``` MAX_EPOCHS = 20 def compile_and_fit(model, window, patience=2): early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, mode='min') model.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam(), metrics=[tf.metrics.MeanAbsoluteError()]) history = model.fit(window.train, epochs=MAX_EPOCHS, validation_data=window.val, callbacks=[early_stopping]) return history ``` Train the model and evaluate its performance: ``` history = compile_and_fit(linear, single_step_window) val_performance['Linear'] = linear.evaluate(single_step_window.val) performance['Linear'] = linear.evaluate(single_step_window.test, verbose=0) ``` Like the `baseline` model, the linear model can be called on batches of wide windows. Used this way the model makes a set of independent predictions on consecuitive time steps. The `time` axis acts like another `batch` axis. There are no interactions between the precictions at each time step. ![A single step prediction](https://github.com/milankansagara/darkcloudcover/blob/master/images/wide_window.png?raw=1) ``` print('Input shape:', wide_window.example[0].shape) print('Output shape:', baseline(wide_window.example[0]).shape) ``` Here is the plot of its example predictions on the `wide_widow`, note how in many cases the prediction is clearly better than just returning the input temperature, but in a few cases it's worse: ``` wide_window.plot(linear) ``` One advantage to linear models is that they're relatively simple to interpret. You can pull out the layer's weights, and see the weight assigned to each input: ``` plt.bar(x = range(len(train_df.columns)), height=linear.layers[0].kernel[:,0].numpy()) axis = plt.gca() axis.set_xticks(range(len(train_df.columns))) _ = axis.set_xticklabels(train_df.columns, rotation=90) ``` Sometimes the model doesn't even place the most weight on the input `T (degC)`. This is one of the risks of random initialization. ### Dense Before applying models that actually operate on multiple time-steps, it's worth checking the performance of deeper, more powerful, single input step models. Here's a model similar to the `linear` model, except it stacks several a few `Dense` layers between the input and the output: ``` dense = tf.keras.Sequential([ tf.keras.layers.Dense(units=64, activation='relu'), tf.keras.layers.Dense(units=64, activation='relu'), tf.keras.layers.Dense(units=1) ]) history = compile_and_fit(dense, single_step_window) val_performance['Dense'] = dense.evaluate(single_step_window.val) performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0) ``` ### Multi-step dense A single-time-step model has no context for the current values of its inputs. It can't see how the input features are changing over time. To address this issue the model needs access to multiple time steps when making predictions: ![Three time steps are used for each prediction.](https://github.com/milankansagara/darkcloudcover/blob/master/images/conv_window.png?raw=1) The `baseline`, `linear` and `dense` models handled each time step independently. Here the model will take multiple time steps as input to produce a single output. Create a `WindowGenerator` that will produce batches of the 3h of inputs and, 1h of labels: Note that the `Window`'s `shift` parameter is relative to the end of the two windows. ``` CONV_WIDTH = 3 conv_window = WindowGenerator( input_width=CONV_WIDTH, label_width=1, shift=1, label_columns=['T (degC)']) conv_window conv_window.plot() plt.title("Given 3h as input, predict 1h into the future.") ``` You could train a `dense` model on a multiple-input-step window by adding a `layers.Flatten` as the first layer of the model: ``` multi_step_dense = tf.keras.Sequential([ # Shape: (time, features) => (time*features) tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=32, activation='relu'), tf.keras.layers.Dense(units=32, activation='relu'), tf.keras.layers.Dense(units=1), # Add back the time dimension. # Shape: (outputs) => (1, outputs) tf.keras.layers.Reshape([1, -1]), ]) print('Input shape:', conv_window.example[0].shape) print('Output shape:', multi_step_dense(conv_window.example[0]).shape) history = compile_and_fit(multi_step_dense, conv_window) IPython.display.clear_output() val_performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.val) performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.test, verbose=0) conv_window.plot(multi_step_dense) ``` The main down-side of this approach is that the resulting model can only be executed on input wndows of exactly this shape. ``` print('Input shape:', wide_window.example[0].shape) try: print('Output shape:', multi_step_dense(wide_window.example[0]).shape) except Exception as e: print(f'\n{type(e).__name__}:{e}') ``` The convolutional models in the next section fix this problem. ### Convolution neural network A convolution layer (`layers.Conv1D`) also takes multiple time steps as input to each prediction. Below is the **same** model as `multi_step_dense`, re-written with a convolution. Note the changes: * The `layers.Flatten` and the first `layers.Dense` are replaced by a `layers.Conv1D`. * The `layers.Reshape` is no longer necessary since the convolution keeps the time axis in its output. ``` conv_model = tf.keras.Sequential([ tf.keras.layers.Conv1D(filters=32, kernel_size=(CONV_WIDTH,), activation='relu'), tf.keras.layers.Dense(units=32, activation='relu'), tf.keras.layers.Dense(units=1), ]) ``` Run it on an example batch to see that the model produces outputs with the expected shape: ``` print("Conv model on `conv_window`") print('Input shape:', conv_window.example[0].shape) print('Output shape:', conv_model(conv_window.example[0]).shape) ``` Train and evaluate it on the ` conv_window` and it should give performance similar to the `multi_step_dense` model. ``` history = compile_and_fit(conv_model, conv_window) IPython.display.clear_output() val_performance['Conv'] = conv_model.evaluate(conv_window.val) performance['Conv'] = conv_model.evaluate(conv_window.test, verbose=0) ``` The difference between this `conv_model` and the `multi_step_dense` model is that the `conv_model` can be run on inputs on inputs of any length. The convolutional layer is applied to a sliding window of inputs: ![Executing a convolutional model on a sequence](https://github.com/milankansagara/darkcloudcover/blob/master/images/wide_conv_window.png?raw=1) If you run it on wider input, it produces wider output: ``` print("Wide window") print('Input shape:', wide_window.example[0].shape) print('Labels shape:', wide_window.example[1].shape) print('Output shape:', conv_model(wide_window.example[0]).shape) ``` Note that the output is shorter than the input. To make training or plotting work, you need the labels, and prediction to have the same length. So build a `WindowGenerator` to produce wide windows with a few extra input time steps so the label and prediction lengths match: ``` LABEL_WIDTH = 24 INPUT_WIDTH = LABEL_WIDTH + (CONV_WIDTH - 1) wide_conv_window = WindowGenerator( input_width=INPUT_WIDTH, label_width=LABEL_WIDTH, shift=1, label_columns=['T (degC)']) wide_conv_window print("Wide conv window") print('Input shape:', wide_conv_window.example[0].shape) print('Labels shape:', wide_conv_window.example[1].shape) print('Output shape:', conv_model(wide_conv_window.example[0]).shape) ``` Now you can plot the model's predictions on a wider window. Note the 3 input time steps before the first prediction. Every prediction here is based on the 3 preceding timesteps: ``` wide_conv_window.plot(conv_model) ``` ### Recurrent neural network A Recurrent Neural Network (RNN) is a type of neural network well-suited to time series data. RNNs process a time series step-by-step, maintaining an internal state from time-step to time-step. For more details, read the [text generation tutorial](https://www.tensorflow.org/tutorials/text/text_generation) or the [RNN guide](https://www.tensorflow.org/guide/keras/rnn). In this tutorial, you will use an RNN layer called Long Short Term Memory ([LSTM](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/LSTM)). An important constructor argument for all keras RNN layers is the `return_sequences` argument. This setting can configure the layer in one of two ways. 1. If `False`, the default, the layer only returns the output of the final timestep, giving the model time to warm up its internal state before making a single prediction: ![An lstm warming up and making a single prediction](https://github.com/milankansagara/darkcloudcover/blob/master/images/lstm_1_window.png?raw=1) 2. If `True` the layer returns an output for each input. This is useful for: * Stacking RNN layers. * Training a model on multiple timesteps simultaneously. ![An lstm making a prediction after every timestep](https://github.com/milankansagara/darkcloudcover/blob/master/images/lstm_many_window.png?raw=1) ``` lstm_model = tf.keras.models.Sequential([ # Shape [batch, time, features] => [batch, time, lstm_units] tf.keras.layers.LSTM(32, return_sequences=True), # Shape => [batch, time, features] tf.keras.layers.Dense(units=1) ]) ``` With `return_sequences=True` the model can be trained on 24h of data at a time. Note: This will give a pessimistic view of the model's performance. On the first timestep the model has no access to previous steps, and so can't do any better than the simple `linear` and `dense` models shown earlier. ``` print('Input shape:', wide_window.example[0].shape) print('Output shape:', lstm_model(wide_window.example[0]).shape) history = compile_and_fit(lstm_model, wide_window) IPython.display.clear_output() val_performance['LSTM'] = lstm_model.evaluate(wide_window.val) performance['LSTM'] = lstm_model.evaluate(wide_window.test, verbose=0) wide_window.plot(lstm_model) ``` ### Performance With this dataset typically each of the models does slightly better than the one before it. ``` x = np.arange(len(performance)) width = 0.3 metric_name = 'mean_absolute_error' metric_index = lstm_model.metrics_names.index('mean_absolute_error') val_mae = [v[metric_index] for v in val_performance.values()] test_mae = [v[metric_index] for v in performance.values()] plt.ylabel('mean_absolute_error [T (degC), normalized]') plt.bar(x - 0.17, val_mae, width, label='Validation') plt.bar(x + 0.17, test_mae, width, label='Test') plt.xticks(ticks=x, labels=performance.keys(), rotation=45) _ = plt.legend() for name, value in performance.items(): print(f'{name:12s}: {value[1]:0.4f}') ``` ### Multi-output models The models so far all predicted a single output feature, `T (degC)`, for a single time step. All of these models can be converted to predict multiple features just by changing the number of units in the output layer and adjusting the training windows to include all features in the `labels`. ``` single_step_window = WindowGenerator( # `WindowGenerator` returns all features as labels if you # don't set the `label_columns` argument. input_width=1, label_width=1, shift=1) wide_window = WindowGenerator( input_width=24, label_width=24, shift=1) for example_inputs, example_labels in wide_window.train.take(1): print(f'Inputs shape (batch, time, features): {example_inputs.shape}') print(f'Labels shape (batch, time, features): {example_labels.shape}') ``` Note above that the `features` axis of the labels now has the same depth as the inputs, instead of 1. #### Baseline The same baseline model can be used here, but this time repeating all features instead of selecting a specific `label_index`. ``` baseline = Baseline() baseline.compile(loss=tf.losses.MeanSquaredError(), metrics=[tf.metrics.MeanAbsoluteError()]) val_performance = {} performance = {} val_performance['Baseline'] = baseline.evaluate(wide_window.val) performance['Baseline'] = baseline.evaluate(wide_window.test, verbose=0) ``` #### Dense ``` dense = tf.keras.Sequential([ tf.keras.layers.Dense(units=64, activation='relu'), tf.keras.layers.Dense(units=64, activation='relu'), tf.keras.layers.Dense(units=num_features) ]) history = compile_and_fit(dense, single_step_window) IPython.display.clear_output() val_performance['Dense'] = dense.evaluate(single_step_window.val) performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0) ``` #### RNN ``` %%time wide_window = WindowGenerator( input_width=24, label_width=24, shift=1) lstm_model = tf.keras.models.Sequential([ # Shape [batch, time, features] => [batch, time, lstm_units] tf.keras.layers.LSTM(32, return_sequences=True), # Shape => [batch, time, features] tf.keras.layers.Dense(units=num_features) ]) history = compile_and_fit(lstm_model, wide_window) IPython.display.clear_output() val_performance['LSTM'] = lstm_model.evaluate( wide_window.val) performance['LSTM'] = lstm_model.evaluate( wide_window.test, verbose=0) print() ``` <a id="residual"></a> #### Advanced: Residual connections The `Baseline` model from earlier took advantage of the fact that the sequence doesn't change drastically from time step to time step. Every model trained in this tutorial so far was randomly initialized, and then had to learn that the output is a a small change from the previous time step. While you can get around this issue with careful initialization, it's simpler to build this into the model structure. It's common in time series analysis to build models that instead of predicting the next value, predict the how the value will change in the next timestep. Similarly, "Residual networks" or "ResNets" in deep learning refer to architectures where each layer adds to the model's accumulating result. That is how you take advantage of the knowledge that the change should be small. ![A model with a residual connection](https://github.com/milankansagara/darkcloudcover/blob/master/images/residual.png?raw=1) Essentially this initializes the model to match the `Baseline`. For this task it helps models converge faster, with slightly better performance. This approach can be used in conjunction with any model discussed in this tutorial. Here it is being applied to the LSTM model, note the use of the `tf.initializers.zeros` to ensure that the initial predicted changes are small, and don't overpower the residual connection. There are no symmetry-breaking concerns for the gradients here, since the `zeros` are only used on the last layer. ``` class ResidualWrapper(tf.keras.Model): def __init__(self, model): super().__init__() self.model = model def call(self, inputs, *args, **kwargs): delta = self.model(inputs, *args, **kwargs) # The prediction for each timestep is the input # from the previous time step plus the delta # calculated by the model. return inputs + delta %%time residual_lstm = ResidualWrapper( tf.keras.Sequential([ tf.keras.layers.LSTM(32, return_sequences=True), tf.keras.layers.Dense( num_features, # The predicted deltas should start small # So initialize the output layer with zeros kernel_initializer=tf.initializers.zeros) ])) history = compile_and_fit(residual_lstm, wide_window) IPython.display.clear_output() val_performance['Residual LSTM'] = residual_lstm.evaluate(wide_window.val) performance['Residual LSTM'] = residual_lstm.evaluate(wide_window.test, verbose=0) print() ``` #### Performance Here is the overall performance for these multi-output models. ``` x = np.arange(len(performance)) width = 0.3 metric_name = 'mean_absolute_error' metric_index = lstm_model.metrics_names.index('mean_absolute_error') val_mae = [v[metric_index] for v in val_performance.values()] test_mae = [v[metric_index] for v in performance.values()] plt.bar(x - 0.17, val_mae, width, label='Validation') plt.bar(x + 0.17, test_mae, width, label='Test') plt.xticks(ticks=x, labels=performance.keys(), rotation=45) plt.ylabel('MAE (average over all outputs)') _ = plt.legend() for name, value in performance.items(): print(f'{name:15s}: {value[1]:0.4f}') ``` The above performances are averaged across all model outputs. ## Multi-step models Both the single-output and multiple-output models in the previous sections made **single time step predictions**, 1h into the future. This section looks at how to expand these models to make **multiple time step predictions**. In a multi-step prediction, the model needs to learn to predict a range of future values. Thus, unlike a single step model, where only a single future point is predicted, a multi-step model predicts a sequence of the future values. There are two rough approaches to this: 1. Single shot predictions where the entire time series is predicted at once. 2. Autoregressive predictions where the model only makes single step predictions and its output is fed back as its input. In this section all the models will predict **all the features across all output time steps**. For the multi-step model, the training data again consists of hourly samples. However, here, the models will learn to predict 24h of the future, given 24h of the past. Here is a `Window` object that generates these slices from the dataset: ``` OUT_STEPS = 24 multi_window = WindowGenerator(input_width=24, label_width=OUT_STEPS, shift=OUT_STEPS) multi_window.plot() multi_window ``` ### Baselines A simple baseline for this task is to repeat the last input time step for the required number of output timesteps: ![Repeat the last input, for each output step](https://github.com/milankansagara/darkcloudcover/blob/master/images/multistep_last.png?raw=1) ``` class MultiStepLastBaseline(tf.keras.Model): def call(self, inputs): return tf.tile(inputs[:, -1:, :], [1, OUT_STEPS, 1]) last_baseline = MultiStepLastBaseline() last_baseline.compile(loss=tf.losses.MeanSquaredError(), metrics=[tf.metrics.MeanAbsoluteError()]) multi_val_performance = {} multi_performance = {} multi_val_performance['Last'] = last_baseline.evaluate(multi_window.val) multi_performance['Last'] = last_baseline.evaluate(multi_window.val, verbose=0) multi_window.plot(last_baseline) ``` Since this task is to predict 24h given 24h another simple approach is to repeat the previous day, assuming tomorrow will be similar: ![Repeat the previous day](https://github.com/milankansagara/darkcloudcover/blob/master/images/multistep_repeat.png?raw=1) ``` class RepeatBaseline(tf.keras.Model): def call(self, inputs): return inputs repeat_baseline = RepeatBaseline() repeat_baseline.compile(loss=tf.losses.MeanSquaredError(), metrics=[tf.metrics.MeanAbsoluteError()]) multi_val_performance['Repeat'] = repeat_baseline.evaluate(multi_window.val) multi_performance['Repeat'] = repeat_baseline.evaluate(multi_window.test, verbose=0) multi_window.plot(repeat_baseline) ``` ### Single-shot models One high level approach to this problem is use a "single-shot" model, where the model makes the entire sequence prediction in a single step. This can be implemented efficiently as a `layers.Dense` with `OUT_STEPS*features` output units. The model just needs to reshape that output to the required `(OUTPUT_STEPS, features)`. #### Linear A simple linear model based on the last input time step does better than either baseline, but is underpowered. The model needs to predict `OUTPUT_STEPS` time steps, from a single input time step with a linear projection. It can only capture a low-dimensional slice of the behavior, likely based mainly on the time of day and time of year. ![Predct all timesteps from the last time-step](https://github.com/milankansagara/darkcloudcover/blob/master/images/multistep_dense.png?raw=1) ``` multi_linear_model = tf.keras.Sequential([ # Take the last time-step. # Shape [batch, time, features] => [batch, 1, features] tf.keras.layers.Lambda(lambda x: x[:, -1:, :]), # Shape => [batch, 1, out_steps*features] tf.keras.layers.Dense(OUT_STEPS*num_features, kernel_initializer=tf.initializers.zeros), # Shape => [batch, out_steps, features] tf.keras.layers.Reshape([OUT_STEPS, num_features]) ]) history = compile_and_fit(multi_linear_model, multi_window) IPython.display.clear_output() multi_val_performance['Linear'] = multi_linear_model.evaluate(multi_window.val) multi_performance['Linear'] = multi_linear_model.evaluate(multi_window.test, verbose=0) multi_window.plot(multi_linear_model) ``` #### Dense Adding a `layers.Dense` between the input and output gives the linear model more power, but is still only based on a single input timestep. ``` multi_dense_model = tf.keras.Sequential([ # Take the last time step. # Shape [batch, time, features] => [batch, 1, features] tf.keras.layers.Lambda(lambda x: x[:, -1:, :]), # Shape => [batch, 1, dense_units] tf.keras.layers.Dense(512, activation='relu'), # Shape => [batch, out_steps*features] tf.keras.layers.Dense(OUT_STEPS*num_features, kernel_initializer=tf.initializers.zeros), # Shape => [batch, out_steps, features] tf.keras.layers.Reshape([OUT_STEPS, num_features]) ]) history = compile_and_fit(multi_dense_model, multi_window) IPython.display.clear_output() multi_val_performance['Dense'] = multi_dense_model.evaluate(multi_window.val) multi_performance['Dense'] = multi_dense_model.evaluate(multi_window.test, verbose=0) multi_window.plot(multi_dense_model) ``` #### CNN A convolutional model makes predictions based on a fixed-width history, which may lead to better performance than the dense model since it can see how things are changing over time: ![A convolutional model sees how things change over time](https://github.com/milankansagara/darkcloudcover/blob/master/images/multistep_conv.png?raw=1) ``` CONV_WIDTH = 3 multi_conv_model = tf.keras.Sequential([ # Shape [batch, time, features] => [batch, CONV_WIDTH, features] tf.keras.layers.Lambda(lambda x: x[:, -CONV_WIDTH:, :]), # Shape => [batch, 1, conv_units] tf.keras.layers.Conv1D(256, activation='relu', kernel_size=(CONV_WIDTH)), # Shape => [batch, 1, out_steps*features] tf.keras.layers.Dense(OUT_STEPS*num_features, kernel_initializer=tf.initializers.zeros), # Shape => [batch, out_steps, features] tf.keras.layers.Reshape([OUT_STEPS, num_features]) ]) history = compile_and_fit(multi_conv_model, multi_window) IPython.display.clear_output() multi_val_performance['Conv'] = multi_conv_model.evaluate(multi_window.val) multi_performance['Conv'] = multi_conv_model.evaluate(multi_window.test, verbose=0) multi_window.plot(multi_conv_model) ``` #### RNN A recurrent model can learn to use a long history of inputs, if it's relevant to the predictions the model is making. Here the model will accumulate internal state for 24h, before making a single prediction for the next 24h. In this single-shot format, the LSTM only needs to produce an output at the last time step, so set `return_sequences=False`. ![The lstm accumulates state over the input window, and makes a single prediction for the next 24h](https://github.com/milankansagara/darkcloudcover/blob/master/images/multistep_lstm.png?raw=1) ``` multi_lstm_model = tf.keras.Sequential([ # Shape [batch, time, features] => [batch, lstm_units] # Adding more `lstm_units` just overfits more quickly. tf.keras.layers.LSTM(32, return_sequences=False), # Shape => [batch, out_steps*features] tf.keras.layers.Dense(OUT_STEPS*num_features, kernel_initializer=tf.initializers.zeros), # Shape => [batch, out_steps, features] tf.keras.layers.Reshape([OUT_STEPS, num_features]) ]) history = compile_and_fit(multi_lstm_model, multi_window) IPython.display.clear_output() multi_val_performance['LSTM'] = multi_lstm_model.evaluate(multi_window.val) multi_performance['LSTM'] = multi_lstm_model.evaluate(multi_window.train, verbose=0) multi_window.plot(multi_lstm_model) ``` ### Advanced: Autoregressive model The above models all predict the entire output sequence as a in a single step. In some cases it may be helpful for the model to decompose this prediction into individual time steps. Then each model's output can be fed back into itself at each step and predictions can be made conditioned on the previous one, like in the classic [Generating Sequences With Recurrent Neural Networks](https://arxiv.org/abs/1308.0850). One clear advantage to this style of model is that it can be set up to produce output with a varying length. You could take any of single single-step multi-output models trained in the first half of this tutorial and run in an autoregressive feedback loop, but here we'll focus on building a model that's been explicitly trained to do that. ![Feedback a model's output to its input](https://github.com/milankansagara/darkcloudcover/blob/master/images/multistep_autoregressive.png?raw=1) #### RNN This tutorial only builds an autoregressive RNN model, but this pattern could be applied to any model that was designed to output a single timestep. The model will have the same basic form as the single-step `LSTM` models: An `LSTM` followed by a `layers.Dense` that converts the `LSTM` outputs to model predictions. A `layers.LSTM` is a `layers.LSTMCell` wrapped in the higher level `layers.RNN` that manages the state and sequence results for you (See [Keras RNNs](https://www.tensorflow.org/guide/keras/rnn) for details). In this case the model has to manually manage the inputs for each step so it uses `layers.LSTMCell` directly for the lower level, single time step interface. ``` class FeedBack(tf.keras.Model): def __init__(self, units, out_steps): super().__init__() self.out_steps = out_steps self.units = units self.lstm_cell = tf.keras.layers.LSTMCell(units) # Also wrap the LSTMCell in an RNN to simplify the `warmup` method. self.lstm_rnn = tf.keras.layers.RNN(self.lstm_cell, return_state=True) self.dense = tf.keras.layers.Dense(num_features) feedback_model = FeedBack(units=32, out_steps=OUT_STEPS) ``` The first method this model needs is a `warmup` method to initialize is its internal state based on the inputs. Once trained this state will capture the relevant parts of the input history. This is equivalent to the single-step `LSTM` model from earlier: ``` def warmup(self, inputs): # inputs.shape => (batch, time, features) # x.shape => (batch, lstm_units) x, *state = self.lstm_rnn(inputs) # predictions.shape => (batch, features) prediction = self.dense(x) return prediction, state FeedBack.warmup = warmup ``` This method returns a single time-step prediction, and the internal state of the LSTM: ``` prediction, state = feedback_model.warmup(multi_window.example[0]) prediction.shape ``` With the `RNN`'s state, and an initial prediction you can now continue iterating the model feeding the predictions at each step back as the input. The simplest approach to collecting the output predictions is to use a python list, and `tf.stack` after the loop. Note: Stacking a python list like this only works with eager-execution, using `Model.compile(..., run_eagerly=True)` for training, or with a fixed length output. For a dynamic output length you would need to use a `tf.TensorArray` instead of a python list, and `tf.range` instead of the python `range`. ``` def call(self, inputs, training=None): # Use a TensorArray to capture dynamically unrolled outputs. predictions = [] # Initialize the lstm state prediction, state = self.warmup(inputs) # Insert the first prediction predictions.append(prediction) # Run the rest of the prediction steps for n in range(1, self.out_steps): # Use the last prediction as input. x = prediction # Execute one lstm step. x, state = self.lstm_cell(x, states=state, training=training) # Convert the lstm output to a prediction. prediction = self.dense(x) # Add the prediction to the output predictions.append(prediction) # predictions.shape => (time, batch, features) predictions = tf.stack(predictions) # predictions.shape => (batch, time, features) predictions = tf.transpose(predictions, [1, 0, 2]) return predictions FeedBack.call = call ``` Test run this model on the example inputs: ``` print('Output shape (batch, time, features): ', feedback_model(multi_window.example[0]).shape) ``` Now train the model: ``` history = compile_and_fit(feedback_model, multi_window) IPython.display.clear_output() multi_val_performance['AR LSTM'] = feedback_model.evaluate(multi_window.val) multi_performance['AR LSTM'] = feedback_model.evaluate(multi_window.test, verbose=0) multi_window.plot(feedback_model) ``` ### Performance There are clearly diminishing returns as a function of model complexity on this problem. ``` x = np.arange(len(multi_performance)) width = 0.3 metric_name = 'mean_absolute_error' metric_index = lstm_model.metrics_names.index('mean_absolute_error') val_mae = [v[metric_index] for v in multi_val_performance.values()] test_mae = [v[metric_index] for v in multi_performance.values()] plt.bar(x - 0.17, val_mae, width, label='Validation') plt.bar(x + 0.17, test_mae, width, label='Test') plt.xticks(ticks=x, labels=multi_performance.keys(), rotation=45) plt.ylabel(f'MAE (average over all times and outputs)') _ = plt.legend() ``` The metrics for the multi-output models in the first half of this tutorial show the performance averaged across all output features. These performances similar but also averaged across output timesteps. ``` for name, value in multi_performance.items(): print(f'{name:8s}: {value[1]:0.4f}') ``` The gains achieved going from a dense model to convolutional and recurrent models are only a few percent (if any), and the autoregressive model performed clearly worse. So these more complex approaches may not be worth while on **this** problem, but there was no way to know without trying, and these models could be helpful for **your** problem. ## Next steps This tutorial was a quick introduction to time series forecasting using TensorFlow. * For further understanding, see: * Chapter 15 of [Hands-on Machine Learning with Scikit-Learn, Keras, and TensorFlow](https://www.oreilly.com/library/view/hands-on-machine-learning/9781492032632/), 2nd Edition * Chapter 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python). * Lesson 8 of [Udacity's intro to TensorFlow for deep learning](https://www.udacity.com/course/intro-to-tensorflow-for-deep-learning--ud187), and the [exercise notebooks](https://github.com/tensorflow/examples/tree/master/courses/udacity_intro_to_tensorflow_for_deep_learning) * Also remember that you can implement any [classical time series model](https://otexts.com/fpp2/index.html) in TensorFlow, this tutorial just focuses on TensdorFlow's built-in functionality.
github_jupyter
``` import pyscisci.all as pyscisci import numpy as np import matplotlib.pylab as plt try: import seaborn as sns sns.set_style('white') except: pass %matplotlib inline # set this path to where the DBLP database will be stored path2dblp = '/home/ajgates/DBLP' path2dblp = '/Volumes/GatesExpansionDrive/DataSets/DBLP' path2dblp = '/Users/ajgates/Documents/DataSets/DBLP' mydblp = pyscisci.DBLP(path2dblp, keep_in_memory=False) # set keep_in_memory=False if you want to load the database each time its needed - good for when you # cant keep more than one DataFrame in memory at a time # otherwise keep_in_memory=True will keep each DataFrame in memory after its loaded # only need to run this for the first time # usually takes about 1-2min to download mydblp.download_from_source() # before we can start running our analysis, we have to preprocess the raw data into # DataFrames that are more convinent to work with mydblp.preprocess() # DBLP creates three dataframes: # pub_df - keeps all of the publication information # columns : ['PublicationId', 'Title', 'Year', 'Month', 'Volume', 'Number', 'Pages', 'JournalId', 'EE', 'DocType', TeamSize'] # author_df - keeps all of the author information # columns : ['AuthorId', 'FullName', 'LastName', 'FirstName', 'MiddleName'] # author2pub_df - links the authors to their publications # columns : ['PublicationId', 'AuthorId', 'AuthorSequence'] # NOTE: DBLP does not contain citation information # lets plot the number of publications each year yearly_articles = pyscisci.groupby_count(df=mydblp.pub_df, colgroupby='Year', colcountby='PublicationId', count_unique=True) yearly_articles.sort_values(by='Year', inplace=True) yearly_articles = yearly_articles.loc[yearly_articles['Year'] > 0] fig, ax = plt.subplots(1,1,figsize=(8,5)) ax.plot(yearly_articles['Year'], yearly_articles['PublicationIdCount']) ax.set_xlabel('Year') ax.set_ylabel("# of publications") ax.set_yscale('log') plt.show() # now we can see the distribution of author productivity pa_df = mydblp.author2pub_df # publication author relationships author_prod = pyscisci.author_productivity(pa_df, colgroupby = 'AuthorId', colcountby = 'PublicationId') prodvalues, prodcounts = np.unique(author_prod['Productivity'].values, return_counts=True) fig, ax = plt.subplots(1,1,figsize=(8,5)) ax.scatter(prodvalues, prodcounts) ax.set_xlabel('Productivity') ax.set_ylabel("# of authors") ax.set_xscale('log') ax.set_yscale('log') plt.show() ```
github_jupyter
# Dictionaries and DataFrames Today we are going build dictionaries. Dictionaries are datastructures that do not assume an index value for the data stored in the structures. Dictionaries take the general form: > my_dictionary = {key:obj} To call the object that is linked to the key, > *my_dictionary[key]* will output the object, *obj*. ``` dct = {} dct dct = {"run":"to move swiftly by foot", "walk": "to move slowly or leisurely by foot"} dct dct = {"run":{}, "walk":{}} dct dct = {"run":{ "verb":"to move swiftly by foot", "noun":"a period of time while one was running"}, "walk":{ "verb":"to move slowly or leisurely by foot", "noun":"a period of time while one was walking"} } dct import pandas as pd df = pd.DataFrame(dct) df # call a column in the df df["run"] # call a row in the df df.loc["verb"] # call a particular cell df.loc["verb", "run"] dct = {"Caden":{"Age":19, "Interesting Fact":"Played hockey in highschool"}, "Jacob P":{"Age":21, "Interesting Fact":"Dr. Caton thought my last name was Keterson"}, "Finnian":{"Age":21, "Interesting Fact":"Wrestled in highschool"}, "Genesis":{"Age":20, "Interesting Fact":"Tore both ACLs"}, "Sam":{"Age":23, "Interesting Fact":"Favorite color beige"}, "Proma":{"Age":24, "Interesting Fact":"Learned classical dancing for 10 years"}, "Zach":{"Age":20, "Interesting Fact":"On the track and field team for long-distance"}, "Jacob R":{"Age":20, "Interesting Fact":"Plays classic rock on the guitar"}, "Brandon":{"Age":23, "Interesting Fact":"Used play baseball in highschool and other leagues"}, "Gabe":{"Age":23, "Interesting Fact":"A double major MIS and Accounting for undergrad"}, "Drew":{"Age":49, "Interesting Fact":"Was in the Air Force and freed Keiko (Free Willy)"}, "Isaac":{"Age":21, "Interesting Fact":"Traveling to Europe in the Summer 2022"}, "Kodjo":{"Age":30, "Interesting Fact":"Wife is a soldier!"}} dct # transpose a dataframe by calling df.T class_df = pd.DataFrame(dct).T class_df class_df.index class_df["Age"] class_df["Interesting Fact"] dct["Dr. Caton"] = {} dct["Dr. Caton"]["Interesting Fact"] = "I used to ride dirbikes" dct dct["Joe Biden"] = {"Age":78, "Interesting Fact":"Plays Mario Kart with his grandchildren", "Job":"President of the United States"} dct class_df = pd.DataFrame(dct).T class_df class_df.dtypes faculty_dict = {"William Nganje":{"Website":"https://www.ndsu.edu/agecon/faculty/william_nganje/#c622350", "Areas of Specialization":"Risk management; financial analysis; economics of obesity, food safety and food terrorism; experimental economics; and consumer choice theory", "Bio":"NA"}, "David Bullock": {"Website":"https://www.ndsu.edu/agecon/faculty/bullock/#c622728", "Areas of Specialization": "futures and options markets, over-the-counter derivatives, trading, risk management, agrifinance, Monte Carlo simulation, and Big Data", "Bio":"Dr. David W. Bullock is a Research Associate Professor affiliated with the Center for Trading and Risk at NDSU. His research interests include futures and options markets, over-the-counter derivatives, trading, risk management, agrifinance, Monte Carlo simulation, and Big Data applications in agriculture. His academic research in option portfolio theory has been published in both the Journal of Economics and Business and the International Review of Economics and Finance. Additionally, he was the primary contributor behind the AgriBank Insights publication series which won a National AgriMarketing Association (NAMA) award for the best company publication in 2016. Before coming to NDSU in January 2018, Dr. Bullock held numerous positions for over 25 years in the government and private sectors including the Senior Economist at AgriBank FCB – the regional Farm Credit System funding bank for the Upper Midwest region, Director of Research and Senior Foods Economist at Fortune 500 commodity risk management firm INTL FCStone Inc., the Senior Dairy Analyst at Informa Economics, a Risk Management Specialist with the Minnesota Department of Agriculture, and the Senior Economist at the Minneapolis Grain Exchange. David began his academic career as an Assistant Professor and Extension Marketing Economist at Montana State University after graduating from Iowa State University with a Ph.D. in agricultural economics with fields in agricultural price analysis and econometrics in 1989. Prior to entering ISU, he received bachelor’s (1982) and master’s (1984) degrees in agricultural economics from Northwest Missouri State University. Dr. Bullock is originally from the small northwestern Missouri farming community of Lathrop which is located 40 miles north of the Kansas City metropolitan area. While in high school, he served as a regional state Vice-President in the Future Farmers of America (FFA) during his senior year."}, "James Caton": {"Website":"https://www.ndsu.edu/centers/pcpe/about/directory/james_caton/", "Areas of Specialization": "Entrepreneurship, Institutions, Macroeconomics, Computation", "Bio":"James Caton is a faculty fellow at the NDSU Center for the Study of Public Choice and Private Enterprise (PCPE) and an assistant professor in the NDSU Department of Agribusiness and Applied Economics. He teaches undergraduate courses in the areas of macroeconomics, international trade, and computation. He specializes in research related to entrepreneurship, agent-based computational economics, market process theory, and monetary economics. His research has been published in the Southern Economic Journal, Erasmus Journal for Philosophy and Economics, Journal of Entrepreneurship and Public Policy and other academic publications. He co-edited Macroeconomics, a two volume set of essays and primary sources that represent the core of macroeconomic thought. He is also a regular contributor to the American Institute for Economic Research's Sound Money Project, which conducts research and promotes awareness about monetary stability and financial privacy. He resides in Fargo with his wife, Ingrid, and their children."}, "David Englund": {"Website":"https://www.ndsu.edu/agecon/faculty/englund/#c622903", "Areas of Specialization": "Teaches Economic Principles, Led NDSU NAMA to National Champions", "Bio":"David Englund is a lecturer in the department. He came to the department with 16 years of teaching experience, having taught Principles of Microeconomics, Principles of Macroeconomics, Money and Banking, Consumer Behavior, Selected Topics in Business, and several other classes. He also had 10 years’ experience advising student NAMA chapters, having been awarded the Outstanding Advisor of the Year for a Developing Chapter in 2002, and the Outstanding Advisor of the Year award in 2009.\nDavid primarily teaches Survey of Economics, Principles of Microeconomics, Skills for Academic Success, Agricultural Marketing, and NAMA (co-teaches). He joined the NAMA team in the 2014-2015 school year as a co-advisor and helped coach the student team to a 3rd place finish in the national student marketing plan competition at the national conference.\nSome of David’s outside interests are jogging, photography, and writing fiction novels. His latest release, Camouflaged Encounters has received positive reviews."}, "Erik Hanson": {"Website":"https://www.ndsu.edu/agecon/faculty/hanson/#c622905", "Areas of Specialization": "Ag Management, Ag Finance", "Bio":"Erik Hanson is an Assistant Professor in the Department of Agricultural and Applied Economics. He teaches courses on agribusiness management and agricultural finance. Erik completed his Ph.D. at the University of Minnesota in 2016. Prior to that, Erik completed a master’s degree at the University of Illinois (2013) and a bachelor’s degree at Minnesota State University Moorhead (2011)."}, "Ronald Haugen": {"Website":"https://www.ndsu.edu/agecon/about_us/faculty/ron_haugen/#c654700", "Areas of Specialization": "Farm management including: crop budgets, crop insurance, farm programs, custom farm rates, land rents, machinery economics, commodity price projections and agricultural income taxes. ", "Bio":"Ron Haugen is an Extension Farm Management Specialist. He has been in the department since 1991. He computes the North Dakota Land Valuation Model."}, "Robert Hearne": {"Website":"https://www.ndsu.edu/agecon/faculty/hearne/#c622909", "Areas of Specialization": "water resources management institutions, water markets, protected area management, and the economic valuation of environmental goods and services.", "Bio":"Dr. Bob Hearne has been in the Department of Agribusiness and Applied Economics since 2002. He has professional experience in Europe, Asia, Latin America, and Asia."}, "Jeremy Jackson": {"Website":"https://www.ndsu.edu/centers/pcpe/about/directory/jeremy_jackson/", "Areas of Specialization": "public choice and the political economy; the social consequences of economic freedom; happiness and well-being; and philanthropy and nonprofits.", "Bio":" Jeremy Jackson is director of the Center for the Study of Public Choice and Private Enterprise, scholar at the Challey Institute for Global Innovation and Growth, and professor of economics in the Department of Agribusiness and Applied Economics at North Dakota State University.. He teaches undergraduate and graduate courses in the areas of microeconomics, public economics, and game theory and strategy. His research has been published in Applied Economics, The Independent Review, Public Choice, Contemporary Economic Policy, Journal of Happiness Studies, and other refereed and non-refereed sources. "}, "Prithviraj Lakkakula":{"Website":"https://www.ndsu.edu/agecon/faculty/prithviraj_lakkakula/#c623441", "Areas of Specialization":"Blockchain, Agricultural Economics", "Bio":""}, "Siew Lim": {"Website":"https://www.ndsu.edu/agecon/faculty/lim/#c624837", "Areas of Specialization": "applied microeconomics, production economics, industrial organization, transportation and regional development, transportation and regional development", "Bio":"Siew Hoon Lim is an associate professor of economics."}, "Raymond March": {"Website":"https://www.ndsu.edu/centers/pcpe/about/directory/raymond_march/", "Areas of Specialization": "public and private provision and governance of health care in the United States, particularly in pharmaceutical markets", "Bio":"Raymond March is a scholar at the Challey Institute for Global Innovation and Growth with the Center for the Study of Public Choice and Private Enterprise and an assistant professor of economics in the Department of Agribusiness and Applied Economics at North Dakota State University. He teaches courses in microeconomics, the history of economic thought, and health economics."}, "Dragan Miljkovic": {"Website":"https://www.ndsu.edu/agecon/faculty/miljkovic/#c625001", "Areas of Specialization": "agricultural price analysis, international economics, and agricultural and food policy including human nutrition, obesity, and food safety", "Bio":"Dragan Miljkovic is professor of agricultural economics in the Department of Agribusiness & Applied Economics at North Dakota State University. Dr. Miljkovic holds B.S. and M.S. degrees in Economics from the University of Belgrade, and Ph.D. in Agricultural Economics from the University of Illinois at Urbana-Champaign. Dr. Miljkovic authored over sixty peer reviewed journal articles and book chapters, and edited three books. He has more than 60 selected and invited presentations at various domestic and international conferences and universities in North America, Europe, New Zealand, and Australia. Dr. Miljkovic teaches undergraduate class in agricultural prices and graduate advanced econometrics class. Dr. Miljkovic is the Founding Editor and Editor-In-Chief of the Journal of International Agricultural Trade and Development (JIATD), and has also served as the Associate Editor of the Journal of Agricultural and Applied Economics (JAAE). He is an active member of numerous professional organizations and associations including the International Agricultural Trade Research Consortium (IATRC), the AAEA, the SAEA, WAEA, NAREA, AARES, and regional projects NCCC-134, WERA-72, and S-1016."}, "Frayne Olson": {"Website":"https://www.ndsu.edu/agecon/faculty/olson/#c625016", "Areas of Specialization": " crop marketing strategies, crop outlook and price analysis, and the economics of crop contracting", "Bio":"Dr. Frayne Olson is the Crop Economist/Marketing Specialist with the North Dakota State University Extension and Director of the Quentin Burdick Center for Cooperatives. Dr. Olson conducts educational programs. As Director of the Center for Cooperatives, he teaches a senior level course on cooperative business management and coordinates the Center’s research and outreach activities. Dr. Olson received his PhD from the University of Missouri in Agricultural Economics, and his M.S. and B.S. in Agricultural Economics from North Dakota State University."}, "Bryon Parman": {"Website":"https://www.ndsu.edu/agecon/faculty/parman/#c654590", "Areas of Specialization": "", "Bio":""}, "Tim Petry": {"Website":"https://www.ndsu.edu/agecon/faculty/petry/#c625018", "Areas of Specialization": "Price Forecasting", "Bio":"Tim Petry was raised on a livestock ranch in Northwestern North Dakota. He graduated from North Dakota State University with a major in Agricultural Economics in 1969 and served two years in the US Army. Petry returned to NDSU and completed a Master’s Degree in Agricultural Economics with an agricultural marketing emphasis in 1973. He was a member of the teaching/research staff in the Department of Agricultural Economics for 30 years. During his teaching tenure, he taught many marketing courses including several livestock marketing classes, and a very popular introduction to agricultural marketing class. In 2002, Petry retired from teaching and joined the NDSU Extension Service as a Livestock Marketing Economist. He travels extensively in North Dakota and the surrounding area conducting meetings on livestock marketing educational topics. Petry writes a popular monthly “Market Advisor” column on current livestock marketing issues. Copies of his presentations, columns, and other current information affecting the livestock industry are available on his web site: www.ag.ndsu.edu/livestockeconomics. Tim Petry and his wife Shirley have three grown daughters."}, "Xudong Rao": {"Website":"https://www.ndsu.edu/agecon/faculty/rao/#c629066", "Areas of Specialization": "Farm and Agribusiness Management, Risk Analysis, Efficiency and Productivity, Technology Adoption, Food and Agricultural Policy, International Agricultural Development", "Bio":"Rao is an assistant professor of agricultural economics at NDSU"}, "Veeshan Rayamajhee": {"Website":"https://www.ndsu.edu/centers/pcpe/about/directory/veeshan_rayamajhee/", "Areas of Specialization": "Public Choice and New Institutional Economics", "Bio":"Veeshan Rayamajhee is a scholar at the Challey Institute for Global Innovation and Growth with the Center for the Study of Public Choice and Private Enterprise and an assistant professor of economics in the Department of Agribusiness and Applied Economics at North Dakota State University. His research combines insights from Public Choice and New Institutional Economics to understand individual and collective responses to covariate shocks. He uses a range of empirical tools to study issues related to disasters, climate adaptation, food and energy security, and environmental externalities. His research has appeared in peer-reviewed journals such as Journal of Institutional Economics, Disasters, Economics of Disasters and Climate Change, Journal of International Development, Food Security, and Renewable Energy. For updates on his research, please visit: veeshan.rayamajhee.com/research"}, "David Ripplinger": {"Website":"https://www.ndsu.edu/agecon/faculty/ripplinger/#c629078", "Areas of Specialization": "Bioenergy", "Bio":"David Ripplinger is an Associate Professor in the Department of Agribusiness and Applied Economics at North Dakota State University and bioproducts/bioenergy economics specialist with NDSU Extension. In these roles, David conducts research and provides support to farmers and the bioenergy industry. "}, "David Roberts": {"Website":"https://www.ndsu.edu/agecon/faculty/roberts/#c629137", "Areas of Specialization": "agricultural production methods on the environment and natural resources", "Bio":"David Roberts is an Assistant Professor of Agribusiness and Applied Economics at North Dakota State University. His research focuses on the impacts of agricultural production methods on the environment and natural resources. David is particularly interested in the economics of precision agriculture technologies and the response of cropping patterns and land use change to emerging biofuels policy at the Federal level. His doctoral dissertation research investigated the relative profitability of several different mid-season optimal nitrogen rate prediction systems for winter wheat in Oklahoma, and investigated how incorporation of uncertainty in estimated and predicted production functions can increase the profitability of the prediction systems. David’s MS thesis investigated the suitability of water quality trading as a policy instrument for dealing with nutrient runoff from agricultural operations in Tennessee. Results showed conditions in polluted watersheds in Tennessee likely will not support robust trading in water quality allowances or offsets."}, "Kristi Schweiss": {"Website":"https://www.ndsu.edu/agecon/faculty/schweiss/#c629139", "Areas of Specialization": "", "Bio":"Assistant Director, QBCC"}, "Anupa Sharma": {"Website":"https://www.ndsu.edu/agecon/faculty/sharma/#c629150", "Areas of Specialization": "International Trade Agreements, Trade Patterns, Distance and Missing Globalization, International Trade and Development", "Bio":"Anupa Sharma is an Assistant Professor in the Department of Agribusiness and Applied Economics at North Dakota State University. She also serves as the Assistant Director for the Center for Agricultural Policy and Trade Studies (CAPTS). She develops quantitative methods to address issues pertinent to International Trade."}, "Cheryl Wachenheim": {"Website":"https://www.ndsu.edu/agecon/faculty/wachenheim/#c629162", "Bio": "Cheryl Wachenheim is a Professor in the Department of Agribusiness and Applied Economics at North Dakota State University. She holds an undergraduate degree in animal sciences from the University of Minnesota, and a Master’s and doctorate in Agricultural Economics and an MBA from Michigan State University. She began her academic career at Illinois State University in Central Illinois and has been on the faculty at NDSU since 1998. She regularly teaches classes in economics, agricultural sales, agricultural finance, agricultural marketing, and strategic marketing and management. Research interests focus on eliciting perceptions and valuations from consumers, firms, students and other stakeholders and decision makers. Analysis then allows for identification of high-value marketing and management strategies. Cheryl has been a member of the MN Army National Guard since 1998. She is currently the Commander of the 204th Area Medical Support Company in Cottage Grove, Minnesota.", "Areas of Specialization":" eliciting perceptions and valuations from consumers, firms, students and other stakeholders and decision makers"}, "William Wilson": {"Website":"https://www.ndsu.edu/agecon/faculty/wilson/#c629178", "Bio": "Dr. William W. Wilson received his PhD in Agricultural Economics from the University of Manitoba in 1980. Since then he has been a Professor at North Dakota State University in Agribusiness and Applied Economics with periodic sabbaticals at Stanford University. Recently, he was named as a University Distinguished Professor at NDSU which an honorary position is, and a great achievement. And, in 2016 he was named the CHS Chair in Risk Management and Trading at NDSU which is an endowed position. In 2017 he was awarded the AAEA 2016 Distinguished Teaching Award (Chicago July 2017) His focus is risk and strategy as applied to agriculture and agribusiness with a particular focus on agtechnology development and commercialization, procurement, transportation and logistics, international marketing and competition. He teaches classes in Commodity Trading, Risk and AgriBusiness Strategy and has taught his Risk Class at Purdue University; and is a visiting scholar at Melbourne University where he visits 2 times/year and advises PhD students in risk and agbiotechnology. Finally, he has now created the NDSU Commodity Trading Room which is a state of art facility for teaching and research in commodity marketing, logistics and trading. He routinely has projects and/or overseas clients and travels internationally 1 week per month. He led a project for the United States on privatization of the grain marketing system in Russia in the early 1990’s. He currently has projects and/or clients in US, Russia, Ukraine, Mexico, Argentina and Australia. He regularly advises a number of large Agribusiness firms, several major railroads, and several major food and beverage companies and/or governments in other countries. He served as a Board member of the Minneapolis Grain Exchange for 12 years, on the FGIS Advisory Board, and currently serves as a Board member of several regional firms in agtechnology and venture capital (AMITY, BUSHEL), in addition to NCH Capital (New York City which is one of the largest investors in world agriculture). He regularly consults with major agribusiness firms on topics related to above and has worked extensively in the following industries: agtechnology, logistics, procurement strategy, railroads, barges, ocean shipping, elevators (shuttle development), and processed products (malting and beer, durum and pasta, wheat and bread). He was recognized as one of the top 10 Agricultural Economists in 1995 and more recently as one of the top 1% of agricultural economists by RePEc (Research Papers in Economics). Finally, he has students who are in senior positions in a number of the large agribusinesses including commodity companies, railroads and food and beverage companies.", "Areas of Specialization":"commodity marketing, logistics and trading"}, } faculty_dict ``` ## Getting Familiar with pandas DataFrames ``` faculty_df = pd.DataFrame(faculty_dict).T faculty_df.to_csv("facultyInfo.csv") faculty_df.loc["Jeremy Jackson"]["Bio"] faculty_df.index faculty_df.keys() faculty_df.loc["William Nganje"] names = faculty_df.index for name in names: print(name) for name in names: print(faculty_df.loc[name]) keys = faculty_df.keys() keys for name in names: print(name) for key in keys: print(key)#, faculty_df.loc[name][key]) print() for name in names: print(name) for key in keys: print(key+":", faculty_df.loc[name, key]) print() faculty_df[faculty_df["Areas of Specialization"].str.contains("Risk")] names = ["Jeremy Jackson", "Raymond March", "Veeshan Rayamajhee"] faculty_df[faculty_df.index.isin(names)] lst = ["a", "b", "c"] "a" in lst ```
github_jupyter
# Line chart ``` #from matplotlib import pyplot as plt import matplotlib.pyplot as plt plt.plot([1,2,3], [4,5,1]) plt.title("info") plt.xlabel("X Axis") plt.ylabel("Y Axis") plt.show() #line chart ``` # Double line chart ``` import matplotlib.pyplot as plt import matplotlib.style as st st.use("ggplot") x = [5,8,10] y = [12, 6, 6] x2 = [6,9,11] y2 = [6,15,7] plt.plot(x,y, "g", label="line one", linewidth = 5) plt.plot(x2,y2, "c", label="line two", linewidth = 5) plt.title("Epic Info") plt.xlabel("X Axis") plt.ylabel("Y Axis") plt.legend() plt.grid(True, color = "k") plt.show() ``` # Bar chart ``` #Bar chart import matplotlib.pyplot as plt plt.bar([1,3,5,7,9], [5,2,7,8,9], label="Example one") plt.bar([2,4,6,8,10], [8,6,2,5,6], label="Example two", color= "g") plt.legend() plt.xlabel("Bar Number") plt.ylabel("Bar Hight") plt.title("info") plt.show() ``` # Histogram chart ``` #Histogram import matplotlib.pyplot as plt population_ages = [22,55,62,45,21,22,34,42,42,4,99,102,110,120,121,122,130,111,115,112,80,75,65,54,55,44,43,43,42,48] bins = [0,10,20,30,40,50,60,70,80,90,100,110,120,130] plt.hist(population_ages, bins, histtype= "bar", rwidth =0.8) plt.xlabel("X Axis") plt.ylabel("Y Axis") plt.title("histogram") plt.show() ``` # Scatter chart ``` #Scatter chart import matplotlib.pyplot as plt x = [1,2,3,4,5,6,7,8] y = [5,2,4,2,1,4,5,2] plt.scatter(x,y, label = "python", color="r") plt.xlabel("X Axis") plt.ylabel("Y Axis") plt.legend() plt.show() ``` # Stack plot chart ``` #Stack plot days = [1,2,3,4,5] sleeping = [7,8,6,11,7] eating = [2,3,4,3,2] working = [7,8,7,2,2] playing = [8,5,7,8,13] plt.plot([],[], color = "m", label = "Sleeping", linewidth=5) plt.plot([],[], color = "c", label = "Eating ", linewidth=5) plt.plot([],[], color = "r", label = "Working", linewidth=5) plt.plot([],[], color = "k", label = "Playing", linewidth=5) plt.stackplot(days, sleeping, eating, working, playing, color = ["m","c","r","k"]) plt.xlabel("X Axis") plt.ylabel("Y Axis") plt.legend() plt.show() #pie plot slices=[7,2,2,13] activities = ["Sleeping", "Eating", "Working", "Playing"] cols = ["c", "m", "r", "b"] plt.pie(slices, labels = activities, colors=cols, startangle=90, shadow=True, explode=(0,0.1,0,0), autopct= "%1.1f%%") plt.title("pie plot") plt.show() import numpy as np def f(t): return np.exp(-t) * np.cos(2*np.pi*t) t1 = np.arange(0.0,5.0,0.1) t2 = np.arange(0.0,5.0,0.02) plt.subplot(211) plt.plot(t1, f(t1), "bo", t2, f(t2)) plt.subplot(212) plt.plot(t2, np.cos(2*np.pi*t2)) plt.show() ```
github_jupyter
# Aim of this notebook * To construct the singular curve of universal type to finalize the solution of the optimal control problem # Preamble ``` from sympy import * init_printing(use_latex='mathjax') # Plotting %matplotlib inline ## Make inline plots raster graphics from IPython.display import set_matplotlib_formats ## Import modules for plotting and data analysis import matplotlib.pyplot as plt from matplotlib import gridspec,rc,colors import matplotlib.ticker as plticker ## Parameters for seaborn plots import seaborn as sns sns.set(style='white',font_scale=1.25, rc={"xtick.major.size": 6, "ytick.major.size": 6, 'text.usetex': False, 'font.family': 'serif', 'font.serif': ['Times']}) import pandas as pd pd.set_option('mode.chained_assignment',None) import numpy as np from scipy.optimize import fsolve, root from scipy.integrate import ode backend = 'dopri5' import warnings # Timer import time from copy import deepcopy from itertools import cycle palette_size = 10; clrs = sns.color_palette("Reds",palette_size) iclrs = cycle(clrs) # iterated colors # Suppress warnings import warnings warnings.filterwarnings("ignore") ``` # Parameter values * Birth rate and const of downregulation are defined below in order to fit some experim. data ``` d = .13 # death rate c = .04 # cost of resistance α = .3 # low equilibrium point at expression of the main pathway (high equilibrium is at one) θ = .45 # threshold value for the expression of the main pathway κ = 40 # robustness parameter L = .2 # parameter used to model the effect of treatment (see the line below) ``` * Symbolic variables - the list insludes μ & μbar, because they will be varied later ``` σ, φ0, φ, x, μ, μbar = symbols('sigma, phi0, phi, x, mu, mubar') ``` * Main functions ``` A = 1-σ*(1-θ)*(1-L) Θ = θ+σ*(1-θ)*L Eminus = (α*A-Θ)**2/2 ΔE = A*(1-α)*((1+α)*A/2-Θ) ΔEf = lambdify(σ,ΔE) ``` * Birth rate and cost of downregulation ``` b = (0.1*(exp(κ*(ΔEf(1)))+1)-0.14*(exp(κ*ΔEf(0))+1))/(exp(κ*ΔEf(1))-exp(κ*ΔEf(0))) # birth rate χ = 1-(0.14*(exp(κ*ΔEf(0))+1)-b*exp(κ*ΔEf(0)))/b b, χ ``` * Hamiltonian *H* and a part of it ρ that includes the control variable σ ``` h = b*(χ/(exp(κ*ΔE)+1)*(1-x)+c*x) H = -φ0 + φ*(b*(χ/(exp(κ*ΔE)+1)-c)*x*(1-x)+μ*(1-x)/(exp(κ*ΔE)+1)-μbar*exp(-κ*Eminus)*x) + h ρ = (φ*(b*χ*x+μ)+b*χ)/(exp(κ*ΔE)+1)*(1-x)-φ*μbar*exp(-κ*Eminus)*x H, ρ ``` * Same but for no treatment (σ = 0) ``` h0 = h.subs(σ,0) H0 = H.subs(σ,0) ρ0 = ρ.subs(σ,0) H0, ρ0 ``` * Machinery: definition of the Poisson brackets ``` PoissonBrackets = lambda H1, H2: diff(H1,x)*diff(H2,φ)-diff(H1,φ)*diff(H2,x) ``` * Necessary functions and defining the right hand side of dynamical equations ``` ρf = lambdify((x,φ,σ,μ,μbar),ρ) ρ0f = lambdify((x,φ,μ,μbar),ρ0) dxdτ = lambdify((x,φ,σ,μ,μbar),-diff(H,φ)) dφdτ = lambdify((x,φ,σ,μ,μbar),diff(H,x)) dVdτ = lambdify((x,σ),h) dρdσ = lambdify((σ,x,φ,μ,μbar),diff(ρ,σ)) dδρdτ = lambdify((x,φ,σ,μ,μbar),-PoissonBrackets(ρ0-ρ,H)) def ode_rhs(t,state,μ,μbar): x, φ, V, δρ = state σs = [0,1] if (dρdσ(1.,x,φ,μ,μbar)<0) and (dρdσ(θ,x,φ,μ,μbar)>0): σstar = fsolve(dρdσ,.8,args=(x,φ,μ,μbar,))[0] else: σstar = 1.; if ρf(x,φ,σstar,μ,μbar) < ρ0f(x,φ,μ,μbar): sgm = 0 else: sgm = σstar return [dxdτ(x,φ,sgm,μ,μbar),dφdτ(x,φ,sgm,μ,μbar),dVdτ(x,sgm),dδρdτ(x,φ,σstar,μ,μbar)] def σstarf(x,φ,μ,μbar): if (dρdσ(1.,x,φ,μ,μbar)<0) and (dρdσ(θ,x,φ,μ,μbar)>0): σstar = fsolve(dρdσ,.8,args=(x,φ,μ,μbar,))[0] else: σstar = 1.; if ρf(x,φ,σstar,μ,μbar) < ρ0f(x,φ,μ,μbar): sgm = 0 else: sgm = σstar return sgm def get_primary_field(name, experiment,μ,μbar): solutions = {} solver = ode(ode_rhs).set_integrator(backend) τ0 = experiment['τ0'] tms = np.linspace(τ0,experiment['T_end'],1e3+1) for x0 in experiment['x0']: δρ0 = ρ0.subs(x,x0).subs(φ,0)-ρ.subs(x,x0).subs(φ,0).subs(σ,1.) solver.set_initial_value([x0,0,0,δρ0],0.).set_f_params(μ,μbar) sol = []; k = 0; while (solver.t < experiment['T_end']) and (solver.y[0]<=1.) and (solver.y[0]>=0.): solver.integrate(tms[k]) sol.append([solver.t]+list(solver.y)) k += 1 solutions[x0] = {'solution': sol} for x0, entry in solutions.items(): entry['τ'] = [entry['solution'][j][0] for j in range(len(entry['solution']))] entry['x'] = [entry['solution'][j][1] for j in range(len(entry['solution']))] entry['φ'] = [entry['solution'][j][2] for j in range(len(entry['solution']))] entry['V'] = [entry['solution'][j][3] for j in range(len(entry['solution']))] entry['δρ'] = [entry['solution'][j][4] for j in range(len(entry['solution']))] return solutions def get_δρ_value(tme,x0,μ,μbar): solver = ode(ode_rhs).set_integrator(backend) δρ0 = ρ0.subs(x,x0).subs(φ,0)-ρ.subs(x,x0).subs(φ,0).subs(σ,1.) solver.set_initial_value([x0,0,0,δρ0],0.).set_f_params(μ,μbar) while (solver.t < tme) and (solver.y[0]<=1.) and (solver.y[0]>=0.): solver.integrate(tme) sol = [solver.t]+list(solver.y) return solver.y[3] def get_δρ_ending(params,μ,μbar): tme, x0 = params solver = ode(ode_rhs).set_integrator(backend) δρ0 = ρ0.subs(x,x0).subs(φ,0)-ρ.subs(x,x0).subs(φ,0).subs(σ,1.) solver.set_initial_value([x0,0,0,δρ0],0.).set_f_params(μ,μbar) δτ = 1.0e-8; tms = [tme,tme+δτ] _k = 0; sol = [] while (_k<len(tms)):# and (solver.y[0]<=1.) and (solver.y[0]>=0.): solver.integrate(tms[_k]) sol.append(solver.y) _k += 1 #print(sol) return(sol[0][3],(sol[1][3]-sol[0][3])/δτ) def get_state(tme,x0,μ,μbar): solver = ode(ode_rhs).set_integrator(backend) δρ0 = ρ0.subs(x,x0).subs(φ,0)-ρ.subs(x,x0).subs(φ,0).subs(σ,1.) solver.set_initial_value([x0,0,0,δρ0],0.).set_f_params(μ,μbar) δτ = 1.0e-8; tms = [tme,tme+δτ] _k = 0; sol = [] while (solver.t < tms[-1]) and (solver.y[0]<=1.) and (solver.y[0]>=0.): solver.integrate(tms[_k]) sol.append(solver.y) _k += 1 return(list(sol[0])+[(sol[1][3]-sol[0][3])/δτ]) ``` # Machinery for the universal line * To find the universal singular curve we need to define two parameters ``` γ0 = PoissonBrackets(PoissonBrackets(H,H0),H) γ1 = PoissonBrackets(PoissonBrackets(H0,H),H0) ``` * The dynamics ``` dxdτSingExpr = -(γ0*diff(H0,φ)+γ1*diff(H,φ))/(γ0+γ1) dφdτSingExpr = (γ0*diff(H0,x)+γ1*diff(H,x))/(γ0+γ1) dVdτSingExpr = (γ0*h0+γ1*h)/(γ0+γ1) σSingExpr = γ1*σ/(γ0+γ1) ``` * Machinery for Python: lambdify the functions above ``` dxdτSing = lambdify((x,φ,σ,μ,μbar),dxdτSingExpr) dφdτSing = lambdify((x,φ,σ,μ,μbar),dφdτSingExpr) dVdτSing = lambdify((x,φ,σ,μ,μbar),dVdτSingExpr) σSing = lambdify((x,φ,σ,μ,μbar),σSingExpr) def ode_rhs_Sing(t,state,μ,μbar): x, φ, V = state if (dρdσ(1.,x,φ,μ,μbar)<0) and (dρdσ(θ,x,φ,μ,μbar)>0): σstar = fsolve(dρdσ,.8,args=(x,φ,μ,μbar,))[0] else: σstar = 1.; #print([σstar,σSing(x,φ,σstar,μ,μbar)]) return [dxdτSing(x,φ,σstar,μ,μbar),dφdτSing(x,φ,σstar,μ,μbar),dVdτSing(x,φ,σstar,μ,μbar)] # def ode_rhs_Sing(t,state,μ,μbar): # x, φ, V = state # if (dρdσ(1.,x,φ,μ,μbar)<0) and (dρdσ(θ,x,φ,μ,μbar)>0): # σstar = fsolve(dρdσ,.8,args=(x,φ,μ,μbar,))[0] # else: # σstar = 1.; # σTrav = fsolve(lambda σ: dxdτ(x,φ,σ,μ,μbar)-dxdτSing(x,φ,σstar,μ,μbar),.6)[0] # print([σstar,σTrav]) # return [dxdτSing(x,φ,σstar,μ,μbar),dφdτSing(x,φ,σstar,μ,μbar),dVdτ(x,σTrav)] def get_universal_curve(end_point,tmax,Nsteps,μ,μbar): tms = np.linspace(end_point[0],tmax,Nsteps); solver = ode(ode_rhs_Sing).set_integrator(backend) solver.set_initial_value(end_point[1:4],end_point[0]).set_f_params(μ,μbar) _k = 0; sol = [] while (solver.t < tms[-1]): solver.integrate(tms[_k]) sol.append([solver.t]+list(solver.y)) _k += 1 return sol def get_σ_universal(tme,end_point,μ,μbar): δτ = 1.0e-8; tms = [tme,tme+δτ] solver = ode(ode_rhs_Sing).set_integrator(backend) solver.set_initial_value(end_point[1:4],end_point[0]).set_f_params(μ,μbar) _k = 0; sol = [] while (solver.t < tme+δτ): solver.integrate(tms[_k]) sol.append([solver.t]+list(solver.y)) _k += 1 x, φ = sol[0][:2] sgm = fsolve(lambda σ: dxdτ(x,φ,σ,μ,μbar)-(sol[1][0]-sol[0][0])/δτ,θ/2)[0] return sgm def get_state_universal(tme,end_point,μ,μbar): solver = ode(ode_rhs_Sing).set_integrator(backend) solver.set_initial_value(end_point[1:4],end_point[0]).set_f_params(μ,μbar) solver.integrate(tme) return [solver.t]+list(solver.y) def ode_rhs_with_σstar(t,state,μ,μbar): x, φ, V = state if (dρdσ(1.,x,φ,μ,μbar)<0) and (dρdσ(θ,x,φ,μ,μbar)>0): σ = fsolve(dρdσ,.8,args=(x,φ,μ,μbar,))[0] else: σ = 1.; return [dxdτ(x,φ,σ,μ,μbar),dφdτ(x,φ,σ,μ,μbar),dVdτ(x,σ)] def ode_rhs_with_given_σ(t,state,σ,μ,μbar): x, φ, V = state return [dxdτ(x,φ,σ,μ,μbar),dφdτ(x,φ,σ,μ,μbar),dVdτ(x,σ)] def get_trajectory_with_σstar(starting_point,tmax,Nsteps,μ,μbar): tms = np.linspace(starting_point[0],tmax,Nsteps) solver = ode(ode_rhs_with_σstar).set_integrator(backend) solver.set_initial_value(starting_point[1:],starting_point[0]).set_f_params(μ,μbar) sol = []; _k = 0; while solver.t < max(tms) and (solver.y[0]<=1.) and (solver.y[0]>=0.): solver.integrate(tms[_k]) sol.append([solver.t]+list(solver.y)) _k += 1 return sol def get_trajectory_with_given_σ(starting_point,tmax,Nsteps,σ,μ,μbar): tms = np.linspace(starting_point[0],tmax,100) solver = ode(ode_rhs_with_given_σ).set_integrator(backend) solver.set_initial_value(starting_point[1:],starting_point[0]).set_f_params(σ,μ,μbar) sol = []; _k = 0; while solver.t < max(tms) and (solver.y[0]<=1.) and (solver.y[0]>=0.): solver.integrate(tms[_k]) sol.append([solver.t]+list(solver.y)) _k += 1 return sol def get_state_with_σstar(tme,starting_point,μ,μbar): solver = ode(ode_rhs_with_σstar).set_integrator(backend) solver.set_initial_value(starting_point[1:4],starting_point[0]).set_f_params(μ,μbar) solver.integrate(tme) return [solver.t]+list(solver.y) def get_finalizing_point_from_universal_curve(tme,tmx,end_point,μ,μbar): unv_point = get_state_universal(tme,end_point,μ,μbar) return get_state_with_σstar(tmx,unv_point,μ,μbar)[1] ``` # Field of optimal trajectories as the solution of the Bellman equation * μ & μbar are varied by *T* and *T*bar ($\mu=1/T$ and $\bar\mu=1/\bar{T}$) ``` tmx = 720. end_switching_curve = {'t': 24., 'x': .9/.8} # for Τ, Τbar in zip([28]*5,[14,21,28,35,60]): for Τ, Τbar in zip([28],[60]): μ = 1./Τ; μbar = 1./Τbar print("Parameters: μ = %.5f, μbar = %.5f"%(μ,μbar)) end_switching_curve['t'], end_switching_curve['x'] = fsolve(get_δρ_ending,(end_switching_curve['t'],.8*end_switching_curve['x']),args=(μ,μbar),xtol=1.0e-12) end_point = [end_switching_curve['t']]+get_state(end_switching_curve['t'],end_switching_curve['x'],μ,μbar) print("Ending point for the switching line: τ = %.1f days, x = %.1f%%" % (end_point[0], end_point[1]*100)) print("Checking the solution - should give zero values: ") print(get_δρ_ending([end_switching_curve['t'],end_switching_curve['x']],μ,μbar)) print("* Constructing the primary field") experiments = { 'sol1': { 'T_end': tmx, 'τ0': 0., 'x0': list(np.linspace(0,end_switching_curve['x']-(1e-3),10))+ list(np.linspace(end_switching_curve['x']+(1e-6),1.,10)) } } primary_field = [] for name, values in experiments.items(): primary_field.append(get_primary_field(name,values,μ,μbar)) print("* Constructing the switching curve") switching_curve = [] x0s = np.linspace(end_switching_curve['x'],1,21); _y = end_switching_curve['t'] for x0 in x0s: tme = fsolve(get_δρ_value,_y,args=(x0,μ,μbar))[0] if (tme>0): switching_curve = switching_curve+[[tme,get_state(tme,x0,μ,μbar)[0]]] _y = tme print("* Constructing the universal curve") universal_curve = get_universal_curve(end_point,tmx,25,μ,μbar) print("* Finding the last characteristic") #time0 = time.time() tuniv = fsolve(get_finalizing_point_from_universal_curve,tmx-40.,args=(tmx,end_point,μ,μbar,))[0] #print("The proccess to find the last characteristic took %0.1f minutes" % ((time.time()-time0)/60.)) univ_point = get_state_universal(tuniv,end_point,μ,μbar) print("The last point on the universal line:") print(univ_point) last_trajectory = get_trajectory_with_σstar(univ_point,tmx,50,μ,μbar) print("Final state:") final_state = get_state_with_σstar(tmx,univ_point,μ,μbar) print(final_state) print("Fold-change in tumor size: %.2f"%(exp((b-d)*tmx-final_state[-1]))) # Plotting plt.rcParams['figure.figsize'] = (6.75, 4) _k = 0 for solutions in primary_field: for x0, entry in solutions.items(): plt.plot(entry['τ'], entry['x'], 'k-', linewidth=.9, color=clrs[_k%palette_size]) _k += 1 plt.plot([x[0] for x in switching_curve],[x[1] for x in switching_curve],linewidth=2,color="red") plt.plot([end_point[0]],[end_point[1]],marker='o',color="red") plt.plot([x[0] for x in universal_curve],[x[1] for x in universal_curve],linewidth=2,color="red") plt.plot([x[0] for x in last_trajectory],[x[1] for x in last_trajectory],linewidth=.9,color="black") plt.xlim([0,tmx]); plt.ylim([0,1]); plt.xlabel("time, days"); plt.ylabel("fraction of resistant cells") plt.show() print() import csv from numpy.linalg import norm File = open("../figures/draft/sensitivity_mu-example.csv", 'w') File.write("T,Tbar,mu,mubar,sw_end_t,sw_end_x,univ_point_t,univ_point_x,outcome,err_sw_t,err_sw_x\n") writer = csv.writer(File,lineterminator='\n') tmx = 720. end_switching_curve0 = {'t': 23.36, 'x': .9592} end_switching_curve_prev_t = end_switching_curve['t'] tuniv = tmx-30. Τbars = np.arange(120,110,-2) #need to change here if more for Τ in Τbars: μ = 1./Τ end_switching_curve = deepcopy(end_switching_curve0) for Τbar in Τbars: μbar = 1./Τbar print("* Parameters: T = %.1f, Tbar = %.1f (μ = %.5f, μbar = %.5f)"%(Τ,Τbar,μ,μbar)) success = False; err = 1. while (not success)|(norm(err)>1e-6): end_switching_curve = {'t': 2*end_switching_curve['t']-end_switching_curve_prev_t-.001, 'x': end_switching_curve['x']-0.002} sol = root(get_δρ_ending,(end_switching_curve['t'],end_switching_curve['x']),args=(μ,μbar)) end_switching_curve_prev_t = end_switching_curve['t'] end_switching_curve_prev_x = end_switching_curve['x'] end_switching_curve['t'], end_switching_curve['x'] = sol.x success = sol.success err = get_δρ_ending([end_switching_curve['t'],end_switching_curve['x']],μ,μbar) if (not success): print("! Trying again...", sol.message) elif (norm(err)>1e-6): print("! Trying again... Convergence is not sufficient") else: end_point = [end_switching_curve['t']]+get_state(end_switching_curve['t'],end_switching_curve['x'],μ,μbar) print("Ending point: t = %.2f, x = %.2f%%"%(end_switching_curve['t'],100*end_switching_curve['x'])," Checking the solution:",err) universal_curve = get_universal_curve(end_point,tmx,25,μ,μbar) tuniv = root(get_finalizing_point_from_universal_curve,tuniv,args=(tmx,end_point,μ,μbar)).x err_tuniv = get_finalizing_point_from_universal_curve(tuniv,tmx,end_point,μ,μbar) univ_point = get_state_universal(tuniv,end_point,μ,μbar) print("tuniv = %.2f"%tuniv," Checking the solution: ",err_tuniv) final_state = get_state_with_σstar(tmx,univ_point,μ,μbar) outcome = exp((b-d)*tmx-final_state[-1]) print("Fold-change in tumor size: %.2f"%(outcome)) output = [Τ,Τbar,μ,μbar,end_switching_curve['t'],end_switching_curve['x']]+list(univ_point[0:2])+[outcome]+list(err)+[err_tuniv] writer.writerow(output) if (Τbar==Τ): end_switching_curve0 = deepcopy(end_switching_curve) File.close() ``` * Here I investigate the dependence of $\mathrm{FoldChange}(T,\bar T)$. I fix $T$ at 15,30,45,60 days, and then I vary $\bar T$ between zero and $4T$. The example below is just a simulation for only one given value of $T$. ``` import csv from numpy.linalg import norm File = open("../results/sensitivity1.csv", 'w') File.write("T,Tbar,mu,mubar,sw_end_t,sw_end_x,univ_point_t,univ_point_x,outcome,err_sw_t,err_sw_x\n") writer = csv.writer(File,lineterminator='\n') tmx = 720. end_switching_curve = {'t': 23.36, 'x': .9592} end_switching_curve_prev_t = end_switching_curve['t'] tuniv = tmx-30. Τ = 15 Τbars_step = .5; Tbars = np.arange(Τ*4,0,-Τbars_step) for Τbar in Tbars: μ = 1./Τ; μbar = 1./Τbar print("* Parameters: T = %.1f, Tbar = %.1f (μ = %.5f, μbar = %.5f)"%(Τ,Τbar,μ,μbar)) success = False; err = 1. while (not success)|(norm(err)>1e-6): end_switching_curve = {'t': 2*end_switching_curve['t']-end_switching_curve_prev_t-.001, 'x': end_switching_curve['x']-0.002} sol = root(get_δρ_ending,(end_switching_curve['t'],end_switching_curve['x']),args=(μ,μbar)) end_switching_curve_prev_t = end_switching_curve['t'] end_switching_curve_prev_x = end_switching_curve['x'] end_switching_curve['t'], end_switching_curve['x'] = sol.x success = sol.success err = get_δρ_ending([end_switching_curve['t'],end_switching_curve['x']],μ,μbar) if (not success): print("! Trying again...", sol.message) elif (norm(err)>1e-6): print("! Trying again... Convergence is not sufficient") else: end_point = [end_switching_curve['t']]+get_state(end_switching_curve['t'],end_switching_curve['x'],μ,μbar) print("Ending point: t = %.2f, x = %.2f%%"%(end_switching_curve['t'],100*end_switching_curve['x'])," Checking the solution:",err) universal_curve = get_universal_curve(end_point,tmx,25,μ,μbar) tuniv = root(get_finalizing_point_from_universal_curve,tuniv,args=(tmx,end_point,μ,μbar)).x err_tuniv = get_finalizing_point_from_universal_curve(tuniv,tmx,end_point,μ,μbar) univ_point = get_state_universal(tuniv,end_point,μ,μbar) print("tuniv = %.2f"%tuniv," Checking the solution: ",err_tuniv) final_state = get_state_with_σstar(tmx,univ_point,μ,μbar) outcome = exp((b-d)*tmx-final_state[-1]) print("Fold-change in tumor size: %.2f"%(outcome)) output = [Τ,Τbar,μ,μbar,end_switching_curve['t'],end_switching_curve['x']]+list(univ_point[0:2])+[outcome]+list(err)+[err_tuniv] writer.writerow(output) File.close() ``` * The results are aggregated in a file **sensitivity1_agg.csv**. ``` df = pd.DataFrame.from_csv("../figures/draft/sensitivity1_agg.csv").reset_index().drop(['err_sw_t','err_sw_x','err_tuniv'],1) df['Tratio'] = df['Tbar']/df['T'] df.head() ```
github_jupyter
## Multicollinearity and Regression Analysis In this tutorial, we will be using a spatial dataset of county-level election and demographic statistics for the United States. This time, we'll explore different methods to diagnose and account for multicollinearity in our data. Specifically, we'll calculate variance inflation factor (VIF), and compare parameter estimates and model fit in a multivariate regression predicting 2016 county voting preferences using an OLS model, a ridge regression, a lasso regression, and an elastic net regression. Objectives: * ***Calculate a variance inflation factor to diagnose multicollinearity.*** * ***Use geographicall weighted regression to identify if the multicollinearity is scale dependent.*** * ***Interpret model summary statistics.*** * ***Describe how multicollinearity impacts stability in parameter esimates.*** * ***Explain the variance/bias tradeoff and describe how to use it to improve models*** * ***Draw a conclusion based on contrasting models.*** Review: * [Dormann, C. et al. (2013). Collinearity: a review of methods to deal with it and a simulation study evaluating their performance. Ecography, 36(1), 27-46.](https://onlinelibrary.wiley.com/doi/full/10.1111/j.1600-0587.2012.07348.x) ``` import numpy as np import geopandas as gpd import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.model_selection import cross_val_score from sklearn.model_selection import RepeatedKFold from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso from sklearn.linear_model import ElasticNet from numpy import mean from numpy import std from numpy import absolute from libpysal.weights.contiguity import Queen import libpysal from statsmodels.api import OLS sns.set_style('white') import os ``` First, we're going to load the 'Elections' dataset from the libpysal library, which is a very easy to use API that accesses the Geodata Center at the University of Chicago. * More on spatial data science resources from UC: https://spatial.uchicago.edu/ * A list of datasets available through lipysal: https://geodacenter.github.io/data-and-lab// ``` from libpysal.examples import load_example elections = load_example('Elections') #note the folder where your data now lives: #First, let's see what files are available in the 'Elections' data example elections.get_file_list() os.getcwd() os.chdir('H:\\EnvDatSci\\project') ``` When you are out in the world doing research, you often will not find a ready-made function to download your data. That's okay! You know how to get this dataset without using pysal! Do a quick internal review of online data formats and automatic data downloads. ### TASK 1: Use urllib functions to download this file directly from the internet to you H:/EnvDatSci folder (not your git repository). Extract the zipped file you've downloaded into a subfolder called H:/EnvDatSci/elections. ``` # Task 1 code here: Automatic data downloads #import required function: #import urllib.request #define online filepath (aka url): #url='https://geodacenter.github.io/data-and-lab/data/election.zip' #define local filepath: #local = 'H:\\EnvDatSci\\project' #download elections data: #urllib.request.urlretrieve(url, local) #unzip file: see if google can help you figure this one out! #import shutil #shutil.unpack_archive(local, '../../../') #import required function: import urllib.request #define online filepath (aka url): url='https://geodacenter.github.io/data-and-lab/data/election.zip' #define local filepath: #local = '../../../' #local = 'c:\\users\marym' #local = 'H:\\EnvDatSci\\project' local = 'H:\\EnvDatSci\project\elections.zip' #download elections data: urllib.request.urlretrieve(url, local) #unzip file: see if google can help you figure this one out! import shutil extract_dir='H:\\envdatsci\project\election' archive_format = "zip" shutil.unpack_archive(local,extract_dir, archive_format) ``` ### TASK 2: Use geopandas to read in this shapefile. Call your geopandas.DataFrame "votes" ``` # TASK 2: Use geopandas to read in this shapefile. Call your geopandas.DataFrame "votes" #os.listdir('../../../election') votes = gpd.read_file('H:\\envdatsci\project\election\election\election.shp') os.listdir() ``` ### EXTRA CREDIT TASK (+2pts): use os to delete the elections data downloaded by pysal in your C: drive that you are no longer using. ``` # Extra credit task: #Let's view the shapefile to get a general idea of the geometry we're looking at: %matplotlib inline votes.plot() #View the first few line]s of the dataset votes.head() #Since there are too many columns for us to view on a signle page using "head", we can just print out the column names so we have them all listed for reference for col in votes.columns: print(col) ``` #### You can use pandas summary statistics to get an idea of how county-level data varies across the United States. ### TASK 3: For example, how did the county mean percent Democratic vote change between 2012 (pct_dem_12) and 2016 (pct_dem_16)? Look here for more info on pandas summary statistics:https://www.earthdatascience.org/courses/intro-to-earth-data-science/scientific-data-structures-python/pandas-dataframes/run-calculations-summary-statistics-pandas-dataframes/ ``` #Task 3 print (votes.pct_dem_12.mean) print (votes.pct_dem_16.mean) ``` We can also plot histograms of the data. Below, smoothed histograms from the seaborn package (imported as sns) let us get an idea of the distribution of percent democratic votes in 2012 (left) and 2016 (right). ``` # Plot histograms: f,ax = plt.subplots(1,2, figsize=(2*3*1.6, 2)) for i,col in enumerate(['pct_dem_12','pct_dem_16']): sns.kdeplot(votes[col].values, shade=True, color='slategrey', ax=ax[i]) ax[i].set_title(col.split('_')[1]) # Plot spatial distribution of # dem vote in 2012 and 2016 with histogram. f,ax = plt.subplots(2,2, figsize=(1.6*6 + 1,2.4*3), gridspec_kw=dict(width_ratios=(6,1))) for i,col in enumerate(['pct_dem_12','pct_dem_16']): votes.plot(col, linewidth=.05, cmap='RdBu', ax=ax[i,0]) ax[i,0].set_title(['2012','2016'][i] + "% democratic vote") ax[i,0].set_xticklabels('') ax[i,0].set_yticklabels('') sns.kdeplot(votes[col].values, ax=ax[i,1], vertical=True, shade=True, color='slategrey') ax[i,1].set_xticklabels('') ax[i,1].set_ylim(-1,1) f.tight_layout() plt.show() ``` ### TASK 4: Make a new column on your geopandas dataframe called "pct_dem_change" and plot it using the syntax above. Explain the plot. ``` # Task 4: add new column pct_dem_change to votes: votes["pct_dem_change"]=votes["pct_dem_16"]-votes.pct_dem_12 print (votes.pct_dem_change) #Task 4: plot your pct_dem_change variable on a map: f,ax = plt.subplots(3,2, figsize=(1.6*6 + 1,2.4*3), gridspec_kw=dict(width_ratios=(6,1))) for i,col in enumerate(['pct_dem_12','pct_dem_16', 'pct_dem_change']): votes.plot(col, linewidth=.05, cmap='RdBu', ax=ax[i,0]) ax[i,0].set_title(['2012','2016', 'Difference'][i] + "% democratic vote") ax[i,0].set_xticklabels('') ax[i,0].set_yticklabels('') sns.kdeplot(votes[col].values, ax=ax[i,1], vertical=True, shade=True, color='slategrey') ax[i,1].set_xticklabels('') ax[i,1].set_ylim(-1,1) f.tight_layout() plt.show() ``` Click on this url to learn more about the variables in this dataset: https://geodacenter.github.io/data-and-lab//county_election_2012_2016-variables/ As you can see, there are a lot of data values available in this dataset. Let's say we want to learn more about what county-level factors influence percent change in democratic vote between (pct_dem_change). Looking at the data description on the link above, you see that this is an exceptionally large dataset with many variables. During lecture, we discussed how there are two types of multicollinearity in our data: * *Intrinsic multicollinearity:* is an artifact of how we make observations. Often our measurements serve as proxies for some latent process (for example, we can measure percent silt, percent sand, and percent clay as proxies for the latent variable of soil texture). There will be slight variability in the information content between each proxy measurement, but they will not be independent of one another. * *Incidental collinearity:* is an artifact of how we sample complex populations. If we collect data from a subsample of the landscape where we don't see all combinations of our predictor variables (do not have good cross replication across our variables). We often induce collinearity in our data just because we are limitted in our ability to sample the environment at the scale of temporal/spatial variability of our process of interest. Incidental collinearity is a model formulation problem.(See here for more info on how to avoid it: https://people.umass.edu/sdestef/NRC%20601/StudyDesignConcepts.pdf) ### TASK 5: Looking at the data description, pick two variables that you believe will be intrinsically multicollinear. List and describe these variables. Why do you think they will be collinear? Is this an example of *intrinsic* or *incidental* collinearity? *Click on this box to enter text* I chose: * "RHI125214", #White alone, percent, 2014 * "RHI225214", #Black or African American alone, percent, 2014 These variables are intrinsically multicollinear. A decrease in one of a finite number of races implicitly signifies an increase in another race. ## Multivariate regression in observational data: Our next step is to formulate our predictive/diagnostic model. We want to create a subset of the "votes" geopandas data frame that contains ten predictor variables and our response variable (pct_pt_16) two variables you selected under TASK 1. First, create a list of the variables you'd like to select. ### TASK 6: Create a subset of votes called "my_list" containing only your selected predictor variables. Make sure you use the two variables selected under TASK 3, and eight additional variables ``` # Task 4: create a subset of votes called "my list" with all your subset variables. my_list = ["pct_pt_16", "pct_dem_12", "RHI125214", "RHI225214", "POP645213", "LFE305213", "INC910213", "BZA115213", "SBO115207", "POP060210"] #check to make sure all your columns are there: votes[my_list].head() ``` ### Scatterplot matrix We call the process of getting to know your data (ranges and distributions of the data, as well as any relationships between variables) "exploratory data analysis". Pairwise plots of your variables, called scatterplots, can provide a lot of insight into the type of relationships you have between variables. A scatterplot matrix is a pairwise comparison of all variables in your dataset. ``` #Use seaborn.pairplot to plot a scatterplot matrix of you 10 variable subset: sns.pairplot(votes[my_list]) ``` ### TASK 7: Do you observe any collinearity in this dataset? How would you describe the relationship between your two "incidentally collinear" variables that you selected based on looking at variable descriptions? *Type answer here* ### TASK 8: What is plotted on the diagonal panels of the scatterplot matrix? *Type answer here* ## Diagnosing collinearity globally: During class, we discussed the Variance Inflation Factor, which describes the magnitude of variance inflation that can be expected in an OLS parameter estimate for a given variable *given pairwise collinearity between that variable and another variable*. ``` #VIF = 1/(1-R2) of a pairwise OLS regression between two predictor variables #We can use a built-in function "variance_inflation_factor" from statsmodel.api to calculate VIF #Learn more about the function ?variance_inflation_factor #Calculate VIFs on our dataset vif = pd.DataFrame() vif["VIF Factor"] = [variance_inflation_factor(votes[my_list[1:10]].values, i) for i in range(votes[my_list[1:10]].shape[1])] vif["features"] = votes[my_list[1:10]].columns vif.round() ``` ### Collinearity is always present in observational data. When is it a problem? Generally speaking, VIF > 10 are considered "too much" collinearity. But this value is somewhat arbitrary: the extent to which variance inflation will impact your analysis is highly context dependent. There are two primary contexts where variance inflation is problematic: 1\. **You are using your analysis to evaluate variable importance:** If you are using parameter estimates from your model to diagnose which observations have physically important relationships with your response variable, variance inflation can make an important predictor look unimportant, and parameter estimates will be highly leveraged by small changes in the data. 2\. **You want to use your model to make predictions in a situation where the specific structure of collinearity between variables may have shifted:** When training a model on collinear data, the model only applies to data with that exact structure of collinearity. ### Caluculate a linear regression on the global data: In this next step, we're going to calculate a linear regression in our data an determine whether there is a statistically significant relationship between per capita income and percent change in democratic vote. ``` #first, forumalate the model. See weather_trend.py in "Git_101" for a refresher on how. #extract variable that you want to use to "predict" X = np.array(votes[my_list[1:10]].values) #standardize data to assist in interpretation of coefficients X = (X - np.mean(X, axis=0)) / np.std(X, axis=0) #extract variable that we want to "predict" Y = np.array(votes['pct_dem_change'].values) #standardize data to assist in interpretation of coefficients Y = (Y - np.mean(X)) / np.std(Y) lm = OLS(Y,X) lm_results = OLS(Y,X).fit().summary() print(lm_results) ``` ### TASK 9: Answer: which coefficients indicate a statisticall significant relationship between parameter and pct_dem_change? What is your most important predictor variable? How can you tell? *Type answer here* ### TASK10: Are any of these parameters subject to variance inflation? How can you tell? *Type answer here* Now, let's plot our residuals to see if there are any spatial patterns in them. Remember residuals = predicted - fitted values ``` #Add model residuals to our "votes" geopandas dataframe: votes['lm_resid']=OLS(Y,X).fit().resid sns.kdeplot(votes['lm_resid'].values, shade=True, color='slategrey') ``` ### TASK 11: Are our residuals normally distributed with a mean of zero? What does that mean? *Type answer here* ## Penalized regression: ridge penalty In penalized regression, we intentionally bias the parameter estimates to stabilize them given collinearity in the dataset. From https://www.analyticsvidhya.com/blog/2016/01/ridge-lasso-regression-python-complete-tutorial/ "As mentioned before, ridge regression performs ‘L2 regularization‘, i.e. it adds a factor of sum of squares of coefficients in the optimization objective. Thus, ridge regression optimizes the following: **Objective = RSS + α * (sum of square of coefficients)** Here, α (alpha) is the parameter which balances the amount of emphasis given to minimizing RSS vs minimizing sum of square of coefficients. α can take various values: * **α = 0:** The objective becomes same as simple linear regression. We’ll get the same coefficients as simple linear regression. * **α = ∞:** The coefficients will approach zero. Why? Because of infinite weightage on square of coefficients, anything less than zero will make the objective infinite. * **0 < α < ∞:** The magnitude of α will decide the weightage given to different parts of objective. The coefficients will be somewhere between 0 and ones for simple linear regression." In other words, the ridge penalty shrinks coefficients such that collinear coefficients will have more similar coefficient values. It has a "grouping" tendency. ``` # when L2=0, Ridge equals OLS model = Ridge(alpha=1) # define model evaluation method cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1) # evaluate model scores = cross_val_score(model, X, Y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1) #force scores to be positive scores = absolute(scores) print('Mean MAE: %.3f (%.3f)' % (mean(scores), std(scores))) model.fit(X,Y) #Print out the model coefficients print(model.coef_) ``` ## Penalized regression: lasso penalty From https://www.analyticsvidhya.com/blog/2016/01/ridge-lasso-regression-python-complete-tutorial/ "LASSO stands for Least Absolute Shrinkage and Selection Operator. I know it doesn’t give much of an idea but there are 2 key words here – ‘absolute‘ and ‘selection‘. Lets consider the former first and worry about the latter later. Lasso regression performs L1 regularization, i.e. it adds a factor of sum of absolute value of coefficients in the optimization objective. Thus, lasso regression optimizes the following: **Objective = RSS + α * (sum of absolute value of coefficients)** Here, α (alpha) works similar to that of ridge and provides a trade-off between balancing RSS and magnitude of coefficients. Like that of ridge, α can take various values. Lets iterate it here briefly: * **α = 0:** Same coefficients as simple linear regression * **α = ∞:** All coefficients zero (same logic as before) * **0 < α < ∞:** coefficients between 0 and that of simple linear regression Yes its appearing to be very similar to Ridge till now. But just hang on with me and you’ll know the difference by the time we finish." In other words, the lasso penalty shrinks unimportant coefficients down towards zero, automatically "selecting" important predictor variables. But what if that shrunken coefficient is induced by incidental collinearity (i.e. is a feature of how we sampled our data)? ``` # when L1=0, Lasso equals OLS model = Lasso(alpha=0) # define model evaluation method cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1) # evaluate model scores = cross_val_score(model, X, Y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1) #force scores to be positive scores = absolute(scores) print('Mean MAE: %.3f (%.3f)' % (mean(scores), std(scores))) model.fit(X,Y) #Print out the model coefficients print(model.coef_) #How do these compare with OLS coefficients above? # when L1 approaches infinity, certain coefficients will become exactly zero, and MAE equals the variance of our response variable: model = Lasso(alpha=10000000) # define model evaluation method cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1) # evaluate model scores = cross_val_score(model, X, Y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1) #force scores to be positive scores = absolute(scores) print('Mean MAE: %.3f (%.3f)' % (mean(scores), std(scores))) model.fit(X,Y) #Print out the model coefficients print(model.coef_) #How do these compare with OLS coefficients above? ``` ### Penalized regression: elastic net penalty In other words, the lasso penalty shrinks unimportant coefficients down towards zero, automatically "selecting" important predictor variables. The ridge penalty shrinks coefficients of collinear predictor variables nearer to each other, effectively partitioning the magnitude of response from the response variable between them, instead of "arbitrarily" partitioning it to one group. We can also run a regression with a linear combination of ridge and lasso, called the elastic net, that has a cool property called "group selection." The ridge penalty still works to distribute response variance equally between members of "groups" of collinear predictor variables. The lasso penalty still works to shrink certain coefficients to exactly zero so they can be ignored in model formulation. The elastic net produces models that are both sparse and stable under collinearity, by shrinking parameters of members of unimportant collinear predictor variables to exactly zero: ``` # when L1 approaches infinity, certain coefficients will become exactly zero, and MAE equals the variance of our response variable: model = ElasticNet(alpha=1, l1_ratio=0.2) # define model evaluation method cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1) # evaluate model scores = cross_val_score(model, X, Y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1) #force scores to be positive scores = absolute(scores) print('Mean MAE: %.3f (%.3f)' % (mean(scores), std(scores))) model.fit(X,Y) #Print out the model coefficients print(model.coef_) #How do these compare with OLS coefficients above? ``` ### TASK 11: Match these elastic net coefficients up with your original data. Do you see a logical grouping(s) between variables that have non-zero coefficients?Explain why or why not. *Type answer here* ``` # Task 11 scratch cell: ```
github_jupyter
# Logistic Regression (scikit-learn) with HDFS/Spark Data Versioning This example is based on our [basic census income classification example](census-end-to-end.ipynb), using local setups of ModelDB and its client, and [HDFS/Spark data versioning](https://docs.verta.ai/en/master/api/api/versioning.html#verta.dataset.HDFSPath). ``` !pip install /path/to/verta-0.15.10-py2.py3-none-any.whl HOST = "localhost:8080" PROJECT_NAME = "Census Income Classification - HDFS Data" EXPERIMENT_NAME = "Logistic Regression" ``` ## Imports ``` from __future__ import print_function import warnings from sklearn.exceptions import ConvergenceWarning warnings.filterwarnings("ignore", category=ConvergenceWarning) warnings.filterwarnings("ignore", category=FutureWarning) import itertools import os import numpy as np import pandas as pd import sklearn from sklearn import model_selection from sklearn import linear_model ``` --- # Log Workflow This section demonstrates logging model metadata and training artifacts to ModelDB. ## Instantiate Client ``` from verta import Client from verta.utils import ModelAPI client = Client(HOST) proj = client.set_project(PROJECT_NAME) expt = client.set_experiment(EXPERIMENT_NAME) ``` <h2>Prepare Data</h2> ``` from pyspark import SparkContext sc = SparkContext("local") from verta.dataset import HDFSPath hdfs = "hdfs://HOST:PORT" dataset = client.set_dataset(name="Census Income S3") blob = HDFSPath.with_spark(sc, "{}/data/census/*".format(hdfs)) version = dataset.create_version(blob) version csv = sc.textFile("{}/data/census/census-train.csv".format(hdfs)).collect() from verta.external.six import StringIO df_train = pd.read_csv(StringIO('\n'.join(csv))) X_train = df_train.iloc[:,:-1] y_train = df_train.iloc[:, -1] df_train.head() ``` ## Prepare Hyperparameters ``` hyperparam_candidates = { 'C': [1e-6, 1e-4], 'solver': ['lbfgs'], 'max_iter': [15, 28], } hyperparam_sets = [dict(zip(hyperparam_candidates.keys(), values)) for values in itertools.product(*hyperparam_candidates.values())] ``` ## Train Models ``` def run_experiment(hyperparams): # create object to track experiment run run = client.set_experiment_run() # create validation split (X_val_train, X_val_test, y_val_train, y_val_test) = model_selection.train_test_split(X_train, y_train, test_size=0.2, shuffle=True) # log hyperparameters run.log_hyperparameters(hyperparams) print(hyperparams, end=' ') # create and train model model = linear_model.LogisticRegression(**hyperparams) model.fit(X_train, y_train) # calculate and log validation accuracy val_acc = model.score(X_val_test, y_val_test) run.log_metric("val_acc", val_acc) print("Validation accuracy: {:.4f}".format(val_acc)) # save and log model run.log_model(model) # log dataset snapshot as version run.log_dataset_version("train", version) for hyperparams in hyperparam_sets: run_experiment(hyperparams) ``` --- # Revisit Workflow This section demonstrates querying and retrieving runs via the Client. ## Retrieve Best Run ``` best_run = expt.expt_runs.sort("metrics.val_acc", descending=True)[0] print("Validation Accuracy: {:.4f}".format(best_run.get_metric("val_acc"))) best_hyperparams = best_run.get_hyperparameters() print("Hyperparameters: {}".format(best_hyperparams)) ``` ## Train on Full Dataset ``` model = linear_model.LogisticRegression(multi_class='auto', **best_hyperparams) model.fit(X_train, y_train) ``` ## Calculate Accuracy on Full Training Set ``` train_acc = model.score(X_train, y_train) print("Training accuracy: {:.4f}".format(train_acc)) ``` ---
github_jupyter
## Contour deformation In the context of GW method, contour deformation (CD) technique is used in conjunction with resolution of identity (RI) to reduce the formal scaling of the self-energy calculation. Compared to widely used analytic continuation approach it provides a means to evaluate self-energy directly on the real axis without employing Pade approximants or non-linear least squares fit and potentially offering superior accuracy. Here, we provide a brief outline of the theory behind CD and give an example of the self-energy calculation within CD without invoking RI in order to facilitate comparison with the results prsented above. Detailed discussion of the CD can be found in the following papers: 1. Golze, D., Wilhelm, J., van Setten, M. J., & Rinke, P. (2018). Core-Level Binding Energies from GW : An Efficient Full-Frequency Approach within a Localized Basis. Journal of Chemical Theory and Computation, 14(9), 4856–4869. https://doi.org/10.1021/acs.jctc.8b00458 2. Giantomassi, M., Stankovski, M., Shaltaf, R., Grüning, M., Bruneval, F., Rinke, P., & Rignanese, G.-M. (2011). Electronic properties of interfaces and defects from many-body perturbation theory: Recent developments and applications. Physica Status Solidi (B), 248(2), 275–289. https://doi.org/10.1002/pssb.201046094 CD is used to recast the convolution in the GW expression of self-energy as a difference between two integrals, one which can be performed analytically whereas the other can be evaluated numerically on a relatively small grid. This is achieved by closing the inegration contour as shown below [2]: ![Integration contour used to evaluate $\Sigma(\omega)$](CD_scheme.jpg) $$ \Sigma(r_1,r_2, \omega) = \frac{i}{2\pi} \int_{-\infty}^{+\infty} e^{i\omega^{\prime} \eta} G(r_1, r_2, \omega + \omega^{\prime}) W(r_1, r_2, \omega^{\prime}) d\omega^{\prime}\\ = \frac{i}{2\pi} \oint_{\Gamma} G(r_1, r_2, \omega + z) W(r_1, r_2, z) dz - \frac{1}{2\pi} \int_{-\infty}^{+\infty} G(r_1, r_2, \omega + i\omega^{\prime}) W(r_1, r_2, i\omega^{\prime}) d\omega^{\prime} $$ Depending on the $\omega$ value the lower-left and the upper-right loops of the contour can enclose one or several poles of the zero-order Green's function whereas the poles of the screened Coulomb interaction never fall within the contour. This allowes to evaluate the countour integral as a sum of corresponding residues with apropriate signs (note that the upper-right loop is traversed counter-clockwise, while the lower-left loop is traversed clockwise). The imaginary axis contribution is calculated using Gauss-Legendre grid. Importantly, the intgrals over the arches vanish iff the screened Coulomb interaction does not contain the exchange contribution. ``` import psi4 import numpy as np import scipy as sp from matplotlib import pyplot as plt %matplotlib inline from IPython.core.display import display, HTML display(HTML("<style>.container {width:95% !important;}</style>")) psi4.set_options({'basis' : 'cc-pvdz', 'd_convergence' : 1e-7,'scf_type' : 'out_of_core', 'dft_spherical_points' : 974, 'dft_radial_points' : 150 }) c2h2 = psi4.geometry(""" C 0.0000 0.0000 0.6015 C 0.0000 0.0000 -0.6015 H 0.0000 0.0000 1.6615 H 0.0000 0.0000 -1.6615 symmetry c1 units angstrom """) psi4.set_output_file('c2h2_ccpvdz.out') scf_e, scf_wfn = psi4.energy('PBE', return_wfn=True) print("DFT energy is %16.10f" % scf_e) epsilon = np.asarray(scf_wfn.epsilon_a()) print(epsilon*psi4.constants.hartree2ev) ``` ``` SCF Total Energy (Ha): -77.2219432068 (MOLGW) ``` ``` import GW gw_par = {'no_qp' : 7, 'nv_qp' : 0, 'nomega_sigma' : 501, 'step_sigma' : 0.01, 'gl_npoint' : 200, 'low_mem' : True } gw_c2h2_dz_cd1 = GW.GW_DFT(scf_wfn, c2h2, gw_par) gw_c2h2_dz_cd1.print_summary() ``` ``` GW eigenvalues (eV) RI # E0 SigX-Vxc SigC Z E_qp^lin E_qp^graph 1 -269.503377 -35.463486 11.828217 0.724328 -286.623075 -326.542284 2 -269.449587 -35.412335 11.798952 0.725633 -286.584227 -326.514902 3 -18.425273 -9.085843 4.032739 0.740744 -22.168328 -21.438530 4 -13.915903 -6.453950 1.756727 0.797034 -17.659749 -17.729721 5 -11.997810 -5.869987 1.145594 0.873449 -16.124327 -15.984958 6 -6.915552 -3.811111 -0.355345 0.897341 -10.654285 -10.639366 7 -6.915552 -3.811111 -0.355345 0.897341 -10.654285 -10.639366 ``` ``` gw_par = {'no_qp' : 7, 'nv_qp' : 0, 'nomega_sigma' : 501, 'step_sigma' : 0.01, 'analytic_W': True, 'gl_npoint' : 200, 'debug' : False, 'low_mem' : False } gw_c2h2_dz_cd2 = GW.GW_DFT(scf_wfn, c2h2, gw_par) gw_c2h2_dz_cd2.print_summary() ``` ``` Analytic vs approximate W (contour deformation algorithm) Analytic E^lin, eV E^graph, eV Z -286.589767 -326.503147 0.724323 -286.550907 -326.475732 0.725630 -22.169264 -21.436806 0.740752 -17.660393 -17.728667 0.797120 -16.125682 -15.984765 0.873439 -10.631926 -10.639259 0.897342 -10.680195 -10.639259 0.897342 Approximate E^lin, eV E^graph, eV Z -286.587831 -326.503140 0.724323 -286.548967 -326.475725 0.725630 -22.168472 -21.436808 0.740752 -17.660116 -17.728666 0.797120 -16.125265 -15.984765 0.873439 -10.631349 -10.639259 0.897342 -10.679617 -10.639259 0.897342 MOLGW reference GW eigenvalues (eV) # E0 SigX-Vxc SigC Z E_qp^lin E_qp^graph 1 -269.503377 -35.463486 11.828217 0.724328 -286.623075 -326.542284 2 -269.449587 -35.412335 11.798952 0.725633 -286.584227 -326.514902 3 -18.425273 -9.085843 4.032739 0.740744 -22.168328 -21.438530 4 -13.915903 -6.453950 1.756727 0.797034 -17.659749 -17.729721 5 -11.997810 -5.869987 1.145594 0.873449 -16.124327 -15.984958 6 -6.915552 -3.811111 -0.355345 0.897341 -10.654285 -10.639366 7 -6.915552 -3.811111 -0.355345 0.897341 -10.654285 -10.639366 ```
github_jupyter
<img align="center" style="max-width: 1000px" src="banner.png"> <img align="right" style="max-width: 200px; height: auto" src="hsg_logo.png"> ## Lab 04 - Artificial Neural Networks (ANNs) - Assignments EMBA 60 W10 / EMBA 61 W5: Coding und Künstliche Intelligenz, University of St. Gallen In the last lab we learned how to implement, train, and apply our first **Artificial Neural Network (ANN)** using a Python library named `PyTorch`. The `PyTorch` library is an open-source machine learning library for Python, used for a variety of applications such as image classification and natural language processing. In this lab, we aim to leverage that knowledge by applying it to a set of self-coding assignments. But before we do so let's start with a motivational video by NVIDIA: ``` from IPython.display import YouTubeVideo # NVIDIA: "The Deep Learning Revolution" YouTubeVideo('Dy0hJWltsyE', width=1000, height=500) ``` As always, pls. don't hesitate to ask all your questions either during the lab, post them in our CANVAS (StudyNet) forum (https://learning.unisg.ch), or send us an email (using the course email). ## 1. Assignment Objectives: Similar today's lab session, after today's self-coding assignments you should be able to: > 1. Understand the basic concepts, intuitions and major building blocks of **Artificial Neural Networks (ANNs)**. > 2. Know how to use Python's **PyTorch library** to train and evaluate neural network based models. > 3. Understand how to apply neural networks to **classify images** of handwritten digits. > 4. Know how to **interpret the detection results** of the network as well as its **reconstruction loss**. ## 2. Setup of the Jupyter Notebook Environment Similar to the previous labs, we need to import a couple of Python libraries that allow for data analysis and data visualization. We will mostly use the `PyTorch`, `Numpy`, `Sklearn`, `Matplotlib`, `Seaborn` and a few utility libraries throughout this lab: ``` # import standard python libraries import os, urllib, io from datetime import datetime import numpy as np ``` Import the Python machine / deep learning libraries: ``` # import the PyTorch deep learning libary import torch, torchvision import torch.nn.functional as F from torch import nn, optim ``` Import the sklearn classification metrics: ``` # import sklearn classification evaluation library from sklearn import metrics from sklearn.metrics import classification_report, confusion_matrix ``` Import Python plotting libraries: ``` # import matplotlib, seaborn, and PIL data visualization libary import matplotlib.pyplot as plt import seaborn as sns from PIL import Image ``` Enable notebook matplotlib inline plotting: ``` %matplotlib inline ``` Import Google's GDrive connector and mount your GDrive directories: ``` # import the Google Colab GDrive connector from google.colab import drive # mount GDrive inside the Colab notebook drive.mount('/content/drive') ``` Create a structure of Colab Notebook sub-directories inside of GDrive to store (1) the data as well as (2) the trained neural network models: ``` # create Colab Notebooks directory notebook_directory = '/content/drive/MyDrive/Colab Notebooks' if not os.path.exists(notebook_directory): os.makedirs(notebook_directory) # create data sub-directory inside the Colab Notebooks directory data_directory = '/content/drive/MyDrive/Colab Notebooks/data' if not os.path.exists(data_directory): os.makedirs(data_directory) # create models sub-directory inside the Colab Notebooks directory models_directory = '/content/drive/MyDrive/Colab Notebooks/models' if not os.path.exists(models_directory): os.makedirs(models_directory) ``` Set a random `seed` value to obtain reproducable results: ``` # init deterministic seed seed_value = 1234 np.random.seed(seed_value) # set numpy seed torch.manual_seed(seed_value) # set pytorch seed CPU ``` Google Colab provides the use of free GPUs for running notebooks. However, if you just execute this notebook as is, it will use your device's CPU. To run the lab on a GPU, got to `Runtime` > `Change runtime type` and set the Runtime type to `GPU` in the drop-down. Running this lab on a CPU is fine, but you will find that GPU computing is faster. *CUDA* indicates that the lab is being run on GPU. Enable GPU computing by setting the device flag and init a CUDA seed: ``` # set cpu or gpu enabled device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu').type # init deterministic GPU seed torch.cuda.manual_seed(seed_value) # log type of device enabled print('[LOG] notebook with {} computation enabled'.format(str(device))) ``` Let's determine if we have access to a GPU provided by e.g. Google's COLab environment: ``` !nvidia-smi ``` ## 3. Artifcial Neural Networks (ANNs) Assignments ### 3.1 Fashion MNIST Dataset Download and Data Assessment The **Fashion-MNIST database** is a large database of Zalando articles that is commonly used for training various image processing systems. The database is widely used for training and testing in the field of machine learning. Let's have a brief look into a couple of sample images contained in the dataset: <img align="center" style="max-width: 700px; height: 300px" src="https://raw.githubusercontent.com/HSG-AIML-Teaching/EMBA2022-Lab/main/lab_04/FashionMNIST.png"> Source: https://www.kaggle.com/c/insar-fashion-mnist-challenge Further details on the dataset can be obtained via Zalando research's [github page](https://github.com/zalandoresearch/fashion-mnist). The **Fashion-MNIST database** is a dataset of Zalando's article images, consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. Zalando created this dataset with the intention of providing a replacement for the popular **MNIST** handwritten digits dataset. It is a useful addition as it is a bit more complex, but still very easy to use. It shares the same image size and train/test split structure as MNIST, and can therefore be used as a drop-in replacement. It requires minimal efforts on preprocessing and formatting the distinct images. Let's download, transform and inspect the training images of the dataset. Therefore, let's first define the directory in which we aim to store the training data: ``` train_path = data_directory + '/train_fashion_mnist' ``` Now, let's download the training data accordingly: ``` # define pytorch transformation into tensor format transf = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # download and transform training images fashion_mnist_train_data = torchvision.datasets.FashionMNIST(root=train_path, train=True, transform=transf, download=True) ``` Verify the number of training images downloaded: ``` # determine the number of training data images len(fashion_mnist_train_data) ``` Next, we need to map each numerical label to its fashion item, which will be useful throughout the lab: ``` fashion_classes = {0: 'T-shirt/top', 1: 'Trouser', 2: 'Pullover', 3: 'Dress', 4: 'Coat', 5: 'Sandal', 6: 'Shirt', 7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot'} ``` Let's now define the directory in which we aim to store the evaluation data: ``` eval_path = data_directory + '/eval_fashion_mnist' ``` And download the evaluation data accordingly: ``` # define pytorch transformation into tensor format transf = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # download and transform training images fashion_mnist_eval_data = torchvision.datasets.FashionMNIST(root=eval_path, train=False, transform=transf, download=True) ``` Let's also verify the number of evaluation images downloaded: ``` # determine the number of evaluation data images len(fashion_mnist_eval_data) ``` ### 3.2 Artificial Neural Network (ANN) Model Training and Evaluation <img align="center" style="max-width: 1000px" src="https://raw.githubusercontent.com/HSG-AIML-Teaching/EMBA2022-Lab/main/lab_04/process.png"> We recommend you to try the following exercises as part of the self-coding session: **Exercise 1: Train the neural network architecture of the lab for less epochs and evaluate its prediction accuracy.** > Decrease the number of training epochs to **5 epochs** and re-run the network training. Load and evaluate the model exhibiting the lowest training loss. What kind of behaviour in terms of prediction accuracy can be observed with decreasing the number of training epochs? ``` # *************************************************** # INSERT YOUR SOLUTION/CODE HERE # *************************************************** # *************************************************** # Task 1: define and init neural network architecture # *************************************************** # implement the MNISTNet network architecture class FashionMNISTNet(nn.Module): # define the class constructor def __init__(self): # *************************************************** # insert the network architecture here # *************************************************** # define network forward pass def forward(self, images): # *************************************************** # insert the network forwad pass here # *************************************************** # return forward pass result return x # init the neural network model model = ??? # *************************************************** # Task 2: define loss, training hyperparameters and dataloader # *************************************************** # define the optimization criterion / loss function nll_loss = ??? # specify the training parameters num_epochs = ??? # number of training epochs mini_batch_size = ??? # size of the mini-batches # define learning rate and optimization strategy learning_rate = ??? optimizer = optim.SGD(params=model.parameters(), lr=learning_rate) # init the training data loader fashion_mnist_train_dataloader = torch.utils.data.DataLoader(fashion_mnist_train_data, batch_size=mini_batch_size, shuffle=True) # *************************************************** # Task 3: run model training # *************************************************** # init collection of training epoch losses train_epoch_losses = [] # set the model in training mode model.train() # train the MNISTNet model for epoch in range(num_epochs): # init collection of mini-batch losses train_mini_batch_losses = [] # iterate over all-mini batches for i, (images, labels) in enumerate(fashion_mnist_train_dataloader): # run forward pass through the network output = ??? # reset graph gradients model.zero_grad() # determine classification loss loss = ??? # run backward pass loss.backward() # update network paramaters optimizer.step() # collect mini-batch reconstruction loss train_mini_batch_losses.append(loss.data.item()) # determine mean min-batch loss of epoch train_epoch_loss = np.mean(train_mini_batch_losses) # print epoch loss now = datetime.utcnow().strftime("%Y%m%d-%H:%M:%S") print('[LOG {}] epoch: {} train-loss: {}'.format(str(now), str(epoch), str(train_epoch_loss))) # save model to local directory model_name = 'fashion_mnist_model_epoch_{}.pth'.format(str(epoch)) torch.save(model.state_dict(), os.path.join("./models", model_name)) # determine mean min-batch loss of epoch train_epoch_losses.append(train_epoch_loss) # *************************************************** # Task 4: run model evaluation # *************************************************** # determine model predictions predictions = torch.argmax(model(???, dim=1) # determine accuracy scores accuracy = metrics.accuracy_score(???, ???) # print the classification accuracy percentage print('Final FashionMNISTNet classification accuracy: {}%'.format(accuracy * 100)) ``` **Exercise 2: Evaluation of "shallow" vs. "deep" neural network architectures.** > In addition to the architecture of the lab notebook, evaluate further (more **shallow** as well as more **deep**) neural network architectures by (1) either **removing or adding** layers to the network and/or (2) increasing/decreasing the number of neurons per layer. Train a model (using the architectures you selected) for at least **20 training epochs**. Analyze the prediction performance of the trained models in terms of training time and prediction accuracy. ``` # *************************************************** # INSERT YOUR SOLUTION/CODE HERE # *************************************************** # *************************************************** # Task 1: define and init neural network architecture # *************************************************** # implement the MNISTNet network architecture class FashionMNISTNet(nn.Module): # define the class constructor def __init__(self): # *************************************************** # insert the network architecture here # *************************************************** # define network forward pass def forward(self, images): # *************************************************** # insert the network forwad pass here # *************************************************** # return forward pass result return x # init the neural network model model = ??? # *************************************************** # Task 2: define loss, training hyperparameters and dataloader # *************************************************** # define the optimization criterion / loss function nll_loss = ??? # specify the training parameters num_epochs = ??? # number of training epochs mini_batch_size = ??? # size of the mini-batches # define learning rate and optimization strategy learning_rate = ??? optimizer = optim.SGD(params=model.parameters(), lr=learning_rate) # init the training data loader fashion_mnist_train_dataloader = torch.utils.data.DataLoader(fashion_mnist_train_data, batch_size=mini_batch_size, shuffle=True) # *************************************************** # Task 3: run model training # *************************************************** # init collection of training epoch losses train_epoch_losses = [] # set the model in training mode model.train() # train the MNISTNet model for epoch in range(num_epochs): # init collection of mini-batch losses train_mini_batch_losses = [] # iterate over all-mini batches for i, (images, labels) in enumerate(fashion_mnist_train_dataloader): # run forward pass through the network output = ??? # reset graph gradients model.zero_grad() # determine classification loss loss = ??? # run backward pass loss.backward() # update network paramaters optimizer.step() # collect mini-batch reconstruction loss train_mini_batch_losses.append(loss.data.item()) # determine mean min-batch loss of epoch train_epoch_loss = np.mean(train_mini_batch_losses) # print epoch loss now = datetime.utcnow().strftime("%Y%m%d-%H:%M:%S") print('[LOG {}] epoch: {} train-loss: {}'.format(str(now), str(epoch), str(train_epoch_loss))) # save model to local directory model_name = 'fashion_mnist_model_epoch_{}.pth'.format(str(epoch)) torch.save(model.state_dict(), os.path.join("./models", model_name)) # determine mean min-batch loss of epoch train_epoch_losses.append(train_epoch_loss) # *************************************************** # Task 4: run model evaluation # *************************************************** # determine model predictions predictions = torch.argmax(model(???, dim=1) # determine accuracy scores accuracy = metrics.accuracy_score(???, ???) # print the classification accuracy percentage print('Final FashionMNISTNet classification accuracy: {}%'.format(accuracy * 100)) ```
github_jupyter
<p style='direction:rtl; text-align: right'>ابتدا باید کتابخانه های زیر را وارد کنیم: <ul style='direction:rtl; text-align: right'> <li>numpy: برای کار با ماتریس ها</li> <li>matplotlib: برای رسم نمودار</li> <li>PCA: برای کاهش بعد</li> <li>OpenCV: برای کار با عکس</li> <li>special_ortho_group: برای تولید پایه اورتونرمال </li> </ul> </p> </p> <p style='direction:rtl; text-align: right'>تذکر: اگر کتابخانه cv2 اجرا نشد باید آن را نصب کنید. در command prompt دستور زیر را اجرا کنید. </p> <p style='direction:rtl; text-align: right'> pip install opencv-python </p> ``` import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA import cv2 from scipy.stats import special_ortho_group as sog ``` <h1 style='direction:rtl; text-align: right'> پروژه ۲: استفاده از کاهش بعد </h1> <h2 style='direction:rtl; text-align: right'> قسمت ۱.۱: تولید دیتا با استفاده از پایه اورتونرمال </h2> <p style='direction:rtl; text-align: right'> عملیات زیر را انجام دهید: <ul style='direction:rtl; text-align: right'> <li>ابتدا با استفاده از تابع np.zeros آلفا وکتور هایی با ابعاد dim و N بسازید.</li> <li>سعی کنید متغیر آلفا وکتور را طوری پر کنید که به ازای هر اندیس از بعد صفر آن، آرایه ای از توزیع نرمال با میانگین ۰ و انحراف معیار i+1 قرار گیرد.</li> <li> بردار پایه V را با استفاده از تابع special_ortho_group.rvs(dim) بسازید.</li> <li> مشخص کنید که در ده مولفه اول چند درصد دیتا برای هر کدام از ماتریس ها حفظ شده اند. </li> <li> حال بردار زیر را تولید کنید و در alpha_v قرار دهید. </li> $$\alpha_1 V_1 + \alpha_2 V_2 + ... + \alpha_d V_d $$ </ul> ``` dim = 20 N = 1000 alpha_vectors = for i in range(dim): alpha_vectors[i] = V = alpha_v = ``` <h2 style='direction:rtl; text-align: right'> قسمت ۱.۲:استفاده از PCA برای کاهش بعد </h2> <p style='direction:rtl; text-align: right'> عملیات زیر را انجام دهید: <ul style='direction:rtl; text-align: right'> <li>ابتدا یک شیی از PCA بسازید.</li> <li>با استفاده از تابع fit موجود در شیی PCA عملیات pca را روی دیتا alpha_v انجام دهید.</li> <li> با استفاده از تابع components_ موجود در شیی pca بردار های تکین را مشاهده کنید.</li> <li> با استفاده از تابع explained_variance_ موجود در شیی pca مقدار های تکین را مشاهده کنید.</li> </ul> ``` pca = ``` <h2 style='direction:rtl; text-align: right'> قسمت ۱.۳: کاهش بعد به ۳ بعد </h2> <ul style='direction:rtl; text-align: right'> <li>ابتدا یک شیی از PCA با ورودی n_components=3 بسازید.</li> <li>با استفاده از تابع fit موجود در شیی PCA عملیات pca را روی دیتا alpha_v انجام دهید.</li> <li> تابع explained_variance_ratio_ موجود در شیی pca درصد حفظ دیتا به ازای هر کدام از بعد ها را می دهد.</li> <li>با کاهش بعد به ۳، چند درصد از اطلاعات حفظ می شود؟</li> </ul> ``` pca = ``` <p style='direction:rtl; text-align: right'> برای حفظ ۹۰ درصد از اطلاعات به چند بعد نیاز داریم؟ </p> ``` pca = ``` <h2 style='direction:rtl; text-align: right'> قسمت ۲.۱: خواندن فایل تصویر </h2> <p style='direction:rtl; text-align: right'>ابتدا فایل تصویری رنگی باکیفیتی را از گوگل دانلود کنید.</p> <p style='direction:rtl; text-align: right'>با استفاده از تابع imread موجود در کتابخانه <a href="https://www.geeksforgeeks.org/python-opencv-cv2-imread-method/">OpenCV</a> عکس مربوطه را فراخوانی کنید:</p> ``` image1 = cv2.imread("path") ``` <p style='direction:rtl; text-align: right'>عکس خوانده شده را به فرمت <a href="https://www.w3schools.com/colors/colors_rgb.asp">RGB</a> در می آوریم:</p> ``` image = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB) ``` <p style='direction:rtl; text-align: right'> همانطور که می بینید عکس خوانده شده به ازای هر پیکسل ۳ عدد دارد: بنابراین برای هر عکس رنگی x*y یک آرایه x*y*3 خواهیم داشت.</p> ``` dim=image.shape print('Image shape =',dim) ``` <h2 style='direction:rtl; text-align: right'> قسمت ۲.۲: نمایش تصویر </h2> <p style='direction:rtl; text-align: right'>با استفاده از تابع imshow موجود در <a href="https://www.geeksforgeeks.org/matplotlib-pyplot-imshow-in-python/">matplotlib</a> تصویر خوانده شده را نمایش دهید:</p> ``` plt.imshow(image) plt.show() ``` <h2 style='direction:rtl; text-align: right'> قسمت ۲.۳: آماده سازی تصویر برای کاهش بعد </h2> <p style='direction:rtl; text-align: right'>سه ماتریس رنگ را در ماتریس های R,G,B ذخیره کنید:</p> ``` R= G= B= print(R.shape) print(G.shape) print(B.shape) ``` <h2 style='direction:rtl; text-align: right'> قسمت ۲.۴:استفاده از PCA برای کاهش بعد </h2> <p style='direction:rtl; text-align: right'> با استفاده از کلاس PCA در کتابخانه sklearn کاهش بعد را انجام میدهیم. عملیات زیر را انجام دهید: <a href="https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html">راهنمایی</a> <ul style='direction:rtl; text-align: right'> <li>برای هر یک از ماتریس های R,G,B یک شی PCA ایجاد کنید. تعداد مولفه ها را ۱۰ قرار دهید.</li> <li>با استفاده از تابع fit موجود در pca الگوریتم را روی ماتریس ها فیت کنید.</li> <li> با استفاده از دستور _explained_variance_ratio میتوانید ببینید هرکدام از مولفه ها چند درصد دیتای ماتریس را دارند. </li> <li> مشخص کنید که در ده مولفه اول چند درصد دیتا برای هر کدام از ماتریس ها حفظ شده اند. </li> <li> با استفاده از دستور bar مقادیر _explained_variance_ratio را رسم کنید </li> </ul> ``` k=10 ``` <p style='direction:rtl; text-align: right'>عملیات زیر را انجام دهید: <ul style='direction:rtl; text-align: right'> <li>با استفاده از تابع transform موجود در pca دیتا با بعد کمتر را تولید کنید</li> <li> با استفاده از تابع inverse_transform دیتا را به بعد اولیه برگردانید </li> </ul> </p> ``` # Embed the data to the first k components Transform_R= # use tranform to reduce data dimension Transform_B= Transform_G= Reduced_R= # use inverse_transform to return to initial dimension Reduced_G= Reduced_B= print('Transform Matrix Shape=',Transform_R.shape) print('Inverse Transform Matrix Shape=',Reduced_R.shape) ``` <p style='direction:rtl; text-align: right'>با استفاده از دستور concatenate سه ماتریس ً Reduced_R,Reduced_G,Reduced_B را کنار هم قرار دهید تا یک آرایه x*y*3 ایجاد شود. x , y همان ابعاد تصویر اولیه (image) هستند </p> <p style='direction:rtl; text-align: right'>با استفاده از دستور astype ماتریس بدست آمده را به عدد صحیح تبدیل کنید.</p> <p style='direction:rtl; text-align: right'>عکس بدست آمده را با imshow نمایش دهید.</p> ``` Reduced_R=Reduced_R.reshape(dim[0],dim[1],1) Reduced_G= Reduced_B= reduced_image = np.concatenate ... final_image=reduced_image.astype ... print('final_image shape=',final_image.shape) plt.imshow(final_image) plt.show() ``` <h2 style='direction:rtl; text-align: right'> قسمت ۲.۵:استفاده از PCA برای کاهش بعد و حفظ ۹۹ درصد داده ها </h2> <p style='direction:rtl; text-align: right'> کل قسمت ۲.۴ را مجددا اجرا کنید. این بار تعداد مولفه ها را عددی قرار دهید که در هر سه ماتریس R,G,B حداقل ۹۹ درصد داده ها حفظ شود. ``` k= # Embed the data to the first k components Transform_R= # use tranform to reduce data dimension Transform_B= Transform_G= Reduced_R= # use inverse_transform to return to initial dimension Reduced_G= Reduced_B= print('Transform Matrix Shape=',Transform_R.shape) print('Inverse Transform Matrix Shape=',Reduced_R.shape) Reduced_R=Reduced_R.reshape(dim[0],dim[1],1) Reduced_G= Reduced_B= reduced_image = np.concatenate ... final_image=reduced_image.astype ... print('final_image shape=',final_image.shape) plt.imshow(final_image) plt.show() ```
github_jupyter
# Jones Vectors This is a sample notebook to perform basic calculations on the "Jones Calculus". The basis is chosen so that a jones vector is given by a <b>normalized</b> vector whose components represent the magnitude and phase of the $x$ and $y$ components of the electric field of a beam propagatin along $\textbf{k} = k\hat{z}$: \begin{equation} \textbf{j} = \vert{\textbf{E}}\vert^{-1} \left( \begin{array}{c} E_x \\ E_y \end{array} \right) \end{equation} The Jones vector is a class and right now it has very limited functionality: you define the polarization manually: - Horizontal = Jones(1,0) - Vertical = Jones(0,1) - $\pm45^\circ$ = Jones(1,$\pm$1) - Right Circular = Jones(1,1j) - Left Circular = Jones(1,-1j) Rotating the polarization by angle 'theta' is accomplished by rot_mat(theta) which performs the rotation matrix: $$ R(\phi) \equiv \left(\begin{matrix} \cos\phi & -\sin\phi \\ \sin\phi & \cos\phi \end{matrix}\right) $$ half-wave plates with fast axis horizontal are defined by $$ H_0 \equiv e^{i\pi/2}\left(\begin{matrix} 1 & 0 \\ 0 & -1 \end{matrix}\right) $$ and quarter wave plates as: $$ Q_0 \equiv e^{i\pi/4}\left(\begin{matrix} 1 & 0 \\ 0 & -i \end{matrix}\right) $$ Wave plates at arbitrary angles are then applied by rotating polarization into the basis defined by the wave-plate, applying the in-axis wave plate transformation, and rotating back into the "lab frame". \begin{eqnarray} Q(\theta) & = & R(\theta)Q_0R(-\theta) \\ H(\theta) & = & R(\theta)H_0R(-\theta) \end{eqnarray} There is also a primitive method to plot the polarization ellipses of an array of jones vectors via: j1 = Jones(1,2) #j2 = ... # jvecs = [j1,j2,...] plot_pol(j1) # plot a single Jones Vector plot_pols(jvecs) # plot an array of Jones Vectors This couled easily be expanded upon. ``` %pylab inline class Jones: def __init__(self, horiz=1, vert=0): mg =sqrt(abs(horiz)**2 + abs(vert)**2) self.h = horiz/mg self.v = vert/mg def rotmat(self,theta): return Jones(self.h*cos(theta) - self.v*sin(theta), self.v*cos(theta) + self.h*sin(theta)) def hwp0(self): return Jones(self.h,-self.v) def qwp0(self): return Jones(self.h,1j*self.v) def hwp(self,theta): return self.rotmat(-theta).hwp0().rotmat(theta) def qwp(self,theta): return self.rotmat(-theta).qwp0().rotmat(theta) def plot_pol(j): figure(figsize=[5,5]) wt = linspace(0,2*pi,1e2) plot(real(j.h *exp(1j*wt)),real(j.v *exp(1j*wt))) xlim([-1,1]) ylim([-1,1]) grid(True) xlabel('$E_x$') ylabel('$E_y$') def plot_pols(jvex): figure(figsize=[5,5]) wt = linspace(0,2*pi,1e2) for j in jvex: plot(real(j.h *exp(1j*wt)),real(j.v *exp(1j*wt))) xlim([-1,1]) ylim([-1,1]) grid(True) xlabel('$E_x$') ylabel('$E_y$') # Example: action of half wave plate on linear polarization j_h = Jones(1,0) j_p = j_h.hwp(pi/8) plot_pols([j_h,j_p]) title('HWP at $\\theta$ rotates Linear Polarization by $2\\theta$') # Example: a field with some arbitrary Jones vector j = Jones(1,1+1j) plot_pol(j) title('Some Aribitrary Polarization') # Example: action of quarter wave plate on linear polarization j_h = Jones(1,0) plz = [] thz = linspace(0,pi/2,10) for t in thz: plz = append(plz,j_h.qwp(t)) plot_pols(plz) title('H Polarization Through QWP for Various $\\theta$') # Example: action of quarter wave plate on circular polarization j_c = Jones(1,1j) plz = [] thz = linspace(0,pi,10) for t in thz: plz = append(plz,j_c.qwp(t)) plot_pols(plz) title('R Polarization Through QWP for Various $\\theta$') ```
github_jupyter
# <center>Models and Pricing of Financial Derivativs HW_01</center> **<center>11510691 程远星</center>** ## Question 1 $\DeclareMathOperator*{\argmin}{argmin} \DeclareMathOperator*{\argmax}{argmax} \newcommand{\using}[1]{\stackrel{\mathrm{#1}}{=}} \newcommand{\ffrac}{\displaystyle \frac} \newcommand{\space}{\text{ }} \newcommand{\bspace}{\;\;\;\;} \newcommand{\QQQ}{\boxed{?\:}} \newcommand{\void}{\left.\right.} \newcommand{\CB}[1]{\left\{ #1 \right\}} \newcommand{\SB}[1]{\left[ #1 \right]} \newcommand{\P}[1]{\left( #1 \right)} \newcommand{\dd}{\mathrm{d}} \newcommand{\Tran}[1]{{#1}^{\mathrm{T}}} \newcommand{\d}[1]{\displaystyle{#1}} \newcommand{\EE}[2][\,\!]{\mathbb{E}_{#1}\left[#2\right]} \newcommand{\Var}[2][\,\!]{\mathrm{Var}_{#1}\left[#2\right]} \newcommand{\Cov}[2][\,\!]{\mathrm{Cov}_{#1}\left(#2\right)} \newcommand{\Corr}[2][\,\!]{\mathrm{Corr}_{#1}\left(#2\right)} \newcommand{\I}[1]{\mathrm{I}\left( #1 \right)} \newcommand{\N}[1]{\mathrm{N} \left( #1 \right)} \newcommand{\ow}{\text{otherwise}}\bspace$Selling a call option: As the writer of the call option, I give the holder the right to buy an asset at a specified time $T$ for a specified price $K$. My payoff would be $-\max\P{S_T - K,0}$ for european call options. If I sold an american call option, the holder can exercise at any time before $T$. $\bspace$Buying a put option: As the holder of the put option, I actually was granted the right to sell an asset at a specified time $T$ for a specified price $K$. My payoff would be $\max\P{K - S_T, 0}$ Or before $T$ if what I bought is an american put option. ## Question 2 $\bspace$We can write their profit function on the stock price $S_t$. - Stock: $100\P{S_t - 94}$ - Option: $2000\big(\max\P{S_t - 95,0} - 4.7\big)$ $\bspace$They intersect at two points, $\P{0,0}$ and $\P{100,600}$. It's generally acknowledge that it's of higher possibility that the stock price moves less than more, thus I personally think that holding the stocks rather than the options have a better chance to profit. $\bspace$As for the second question, since we've already acquire their intersection, we can say that when the stock price goes higher than $100$, options will win more. ## Question 3 $\bspace$The trader now in the call holder's position. He has paid $c$ to buy the right that he can use $K$ to buy the underlying asset as time $T$. Also in the put writer's position. He has received $p$ and given the right to someone else selling him the asset at price $K$ at time $T$. $\bspace$To let the prices equal, by the **Put-call parity**, we have $S_0 = Ke^{-rT}$, the time value of $K$ is equal to the initial price of the asset. ## Question 4 $\bspace$We first write its payoff function on the stock price: $$\begin{align} p &= 100\P{S_T - 40} + 100\SB{5 - \max\P{S_T - 50, 0}} + 100\SB{\max\P{30 - S_T,0} - 7}\\ &= \begin{cases} 800, &\text{if } S_T \geq 50 \\ 100S_T - 4200, &\text{if } 50 \geq S_T \geq 30 \\ -1200, &\text{if } 30 \geq S_T \geq 0 \end{cases} \end{align} $$ ![](../figs/HW_fig1.5A.png) After that, the payoff would change to: $$\begin{align} p &= 100\P{S_T - 40} + 200\SB{5 - \max\P{S_T - 50, 0}} + 200\SB{\max\P{30 - S_T,0} - 7}\\ &= \begin{cases} 5600 - 100S_T, &\text{if } S_T \geq 50 \\ 100S_T - 4400, &\text{if } 50 \geq S_T \geq 30 \\ 1600 - 100S_T, &\text{if } 30 \geq S_T \geq 0 \end{cases} \end{align} $$ ![](../figs/HW_fig1.5B.png) ## Question 5 $\bspace$The lower bound of the option can be obtained using the formula $\bspace\begin{align} c &\geq K e^{-rT} - S_0 \\ &= 15 \cdot e^{-6\% \times 1/12} - 12 \\ &\approx 2.93 \end{align}$ ## Qustion 6 $\bspace$The early exercise of an American put option is to sell the stock to the writer at the Strike price $K$ before the expiration date $T$. Suppose he exercised at time $t$ thus his time value of money is $Ke^{-r\P{T-t}}$. But then he can not sell the stock at $K$ at time $T$ any more. ## Question 7 $\bspace$By the put-call parity, we have: $1 + 20 \times e^{-4\% \times 0.25} = p + 19$ thus $p = 1.80$ ## Question 8 $\bspace$Based on the put-call parity, $c + Ke^{-rT} = S_0 + p \Longrightarrow c + 49.75 = 47+2.5$. Thus there's always a chance for arbitraging. He can use the same strategy that is to buy a stock and a put option using the borrowed money, $49.5$ with interest rate $6\%$. $\bspace$Then he can win $50 - 49.5e^{0.06\times 1/12} \approx 0.25 $ if the stock price goes lower than $50$ or more if the stock price goes higher than $50$. ## Question 9 $\P{1}$ $\bspace P \geq p = c + Ke^{-rT}-S_0 = C + Ke^{-rT} - S_0 = 4 + 30 e^{-8\%\times1/4} -31 \approx 2.41$ $\bspace$And to find something that keeps over an American Put, we can use $K$ cash at the beginning, thus $c + K \geq P+ S_0$ always holds. Therefore, $P \leq c + K - S_0 = C + K - S_0 = 4 + 30 - 31 = 3$ $\P{2}$ $\bspace$If the American Put price is greater than $3$ that is to say that $P \geq C + K - S_0$, then we write an American put option and sell it to somebody, then use the money to buy a American call option, to borrow a stock and sell it to gain $S_0$. Send $K$ cash to the bank. Then when the American put option holder want to exercise, we can instantly use $K = 30$ to buy the stock and return to the stock lender. Up to now, we start from nothing to a American call option and a positive payoff and some interest. ## Question 10 $\P{1}$ $\bspace$If not, then $2c_2 > c_1 + c_3$. So that we have the arbitrage chance. First write two call option with strike price $K_2$ and then use the money gained to buy two call option with strike price $K_1$ and $K_3$. We already have some money left now. $\bspace$Then we the exercise time comes, we can exercise all three at the same time since $2K_2 = K_1 + K_3$, meaning that we gain money from nothing. Thus $2c_2 \leq c_1 + c_3$. $\P{2}$ $$p_2 \leq 0.5\P{p_1 + p_3}$$ $\bspace$The proof is obvious, similar to the preceding one.
github_jupyter
<a href="https://colab.research.google.com/github/ferdouszislam/Weather-WaterLevel-Prediction-ML/blob/main/Notebooks/brri-dataset/experimentations/classification/selected_algorithms/knn_classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` #@title Default title text # # the imports in this cell are required when running on local device # import os, sys # sys.path.append(os.path.join('..', '..')) # from utils.applyML_util import train_classification, eval_classification # from utils.featureSelection_util import (pearson_correlation_fs, # seleckKBest_fs, selectSequential_fs) # the imports in this cell are required when running from Cloud (Colab/Kaggle) # before running on cloud you nee to upload the .py files # from 'Notebooks/utils' directory from applyML_util import train_classification, eval_classification, showEvalutationGraph_classification from featureSelection_util import (pearson_correlation_fs, seleckKBest_fs, selectSequential_fs) ``` **K Nearest Neigbors Documentation link:** https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier # global random seed RAND_SEED = 42 # initial model with only random seed and not any hyper-parametes initial_model = KNeighborsClassifier() # hyper-parameters n_neighbors = [x for x in range(1,41)] weights = ['uniform', 'distance'] p = [1, 2, 3, 4, 5] # dictonary of all hyperparameters param_grid = {'n_neighbors': n_neighbors, 'weights': weights, 'p': p} # variables needed for showEvalGraph_regression() function MODEL_CLASS = KNeighborsClassifier class_label = 'Rainfall' x_axis_param_name = 'n_neighbors' x_axis_param_vals = n_neighbors ``` ## 1. Experimentation on the Weather Daily Dataset ``` # Load the train dataset weather_daily_train_df = pd.read_csv('https://raw.githubusercontent.com/ferdouszislam/Weather-WaterLevel-Prediction-ML/main/Datasets/brri-datasets/final-dataset/train/brri-weather_train_classification.csv') # Load the test set weather_daily_test_df = pd.read_csv('https://raw.githubusercontent.com/ferdouszislam/Weather-WaterLevel-Prediction-ML/main/Datasets/brri-datasets/final-dataset/test/brri-weather_test_classification.csv') ``` ### 1.0 No technique ``` # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df, cls=class_label) print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # graph on train set performance # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') ``` ### 1.1 Apply Pearson Feature Selection to Daily Weather Dataset ``` # select features from the train dataset weather_daily_fs1_train_df, cols_to_drop = pearson_correlation_fs(weather_daily_train_df, class_label) # keep only selected features on the test dataset weather_daily_fs1_test_df = weather_daily_test_df.drop(columns=cols_to_drop) # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs1_train_df, cls=class_label) print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # graph on train set performance # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs1_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs1_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') ``` ### 1.2 Apply SelectKBest Feature Selection to Daily Weather Dataset ``` # select features from the train dataset weather_daily_fs2_train_df, cols_to_drop = seleckKBest_fs(weather_daily_train_df, class_label, is_regression=False) print('features dropped:', cols_to_drop) # keep only selected features on the test dataset weather_daily_fs2_test_df = weather_daily_test_df.drop(columns=cols_to_drop) # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs2_train_df, cls=class_label) print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # r2-scores graph on the train set # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs2_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs2_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') ``` ### 1.3 SMOTE on Daily Dataset ``` # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df, cls=class_label, sampling_technique='smote') print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # r2-scores graph on the train set # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') ``` ### 1.4 Random Undersampling + SMOTE on Daily Dataset ``` # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df, cls=class_label, sampling_technique='hybrid') print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # graph on train set performance # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') ``` ### 1.5 Pearson Feature Selection + Hybrid Sampling to Daily Weather Dataset ``` # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs1_train_df, cls=class_label, sampling_technique='hybrid') print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # graph on train set performance # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs1_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs1_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') ``` ### 1.6 SelecKBest Feature Selection + Hybrid Sampling to Daily Weather Dataset ``` # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs2_train_df, cls=class_label, sampling_technique='hybrid') print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # graph on train set performance # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs2_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs2_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') ```
github_jupyter
Welcome to exercise one of “Apache Spark for Scalable Machine Learning on BigData”. In this exercise you’ll apply the basics of functional and parallel programming. Let’s start with a simple example. Let’s consider you have a list of integers. Let’s find out what the size of this list is. Note that we already provide an RDD object, so please have a look at the RDD API in order to find out what function to use: https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD The following link contains additional documentation: https://spark.apache.org/docs/latest/rdd-programming-guide.html This notebook is designed to run in a IBM Watson Studio default runtime (NOT the Watson Studio Apache Spark Runtime as the default runtime with 1 vCPU is free of charge). Therefore, we install Apache Spark in local mode for test purposes only. Please don't use it in production. In case you are facing issues, please read the following two documents first: https://github.com/IBM/skillsnetwork/wiki/Environment-Setup https://github.com/IBM/skillsnetwork/wiki/FAQ Then, please feel free to ask: https://coursera.org/learn/machine-learning-big-data-apache-spark/discussions/all Please make sure to follow the guidelines before asking a question: https://github.com/IBM/skillsnetwork/wiki/FAQ#im-feeling-lost-and-confused-please-help-me If running outside Watson Studio, this should work as well. In case you are running in an Apache Spark context outside Watson Studio, please remove the Apache Spark setup in the first notebook cells. ``` from IPython.display import Markdown, display def printmd(string): display(Markdown('# <span style="color:red">'+string+'</span>')) if ('sc' in locals() or 'sc' in globals()): printmd('<<<<<!!!!! It seems that you are running in a IBM Watson Studio Apache Spark Notebook. Please run it in an IBM Watson Studio Default Runtime (without Apache Spark) !!!!!>>>>>') !pip install pyspark==2.4.5 try: from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession except ImportError as e: printmd('<<<<<!!!!! Please restart your kernel after installing Apache Spark !!!!!>>>>>') sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]")) spark = SparkSession \ .builder \ .getOrCreate() rdd = sc.parallelize(range(100)) # please replace $$ with the correct characters rdd.c$$$t() ``` You should see "100" as answer. Now we want to know the sum of all elements. Please again, have a look at the API documentation and complete the code below in order to get the sum. ``` rdd.s$$() ``` You should get "4950" as answer.
github_jupyter