code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 데이터 다루기
# - 이미지 데이터를 불러온 후 다뤄보기
# - 데이터를 준비하는 단계를 연습
# - dataloader를 바로 쓰지 않고 바닥부터 코딩
# - 데이터는 .png형식의 MNIST
# +
import os
from glob import glob
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ### 내 작업환경 확인하기
os.listdir()
print(os.listdir('data/mnist_png/'))
print(os.listdir('data/mnist_png/training'))
print(os.listdir('data/mnist_png/training/0')[:10])
# ### 특정 문구가 들어간 파일만 불러오기(glob)
# +
train_path_list = glob('data/mnist_png/training/*/*.png') # 확장자가 png인 것만 출력
test_path_list = glob('data/mnist_png/testing/*/*.png') # *은 '모든' 이러는 뜻
image_path_list = glob('data/mnist_png/*/*/*.png')
print(train_path_list[:3])
print('\n')
print(test_path_list[:3])
print('\n')
print(len(train_path_list), len(test_path_list), len(image_path_list))
# -
# ### 레이블별 데이터 개수
# +
num_per_label = []
labels = []
for label in os.listdir('data/mnist_png/training/'):
path = 'data/mnist_png/training/' + label + '/'
labels.append(label)
image_num = len(glob(path + '*.png'))
num_per_label.append(image_num)
print(num_per_label)
plt.bar(labels, num_per_label)
plt.title('Number of image per label(training)')
# -
# ### 이미지 확인하기
# +
image_path = 'data/mnist_png/training/0/1.png'
image = Image.open(image_path)
image_arr = np.array(image)
plt.imshow(image_arr, 'gray')
# -
# ### 레이블 추출하기
# +
print(image_path)
names = image_path.split('/')
names
# -
names[-2]
# ### 이미지들의 사이즈 확인하기
# - 다른 사이즈가 있는지 검사
from tqdm import tqdm_notebook
# +
path = 'data/mnist_png/training'
print(path)
image_size = []
for label in labels:
image_path = glob(path + '/{}/*.png'.format(label))
for p in tqdm_notebook(image_path):
image = Image.open(p)
image_arr = np.array(image)
image_size.append(image_arr.shape)
set(image_size)
| 1. Beginner/Pytorch3_1_DataPreprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# DATAFRAMES INITIALISATION
import os
os.chdir('C:\\Users\\asus\\OneDrive\\Documenti\\University Docs\\MSc Computing\\Final Project\\RainbowFood(JN)\\Rainbow-Food-Collaborative-Filtering-')
import pandas as pd
# vegetables file
col_list_veg = ["Vegetables", "Serving", "Calories"]
df_veg = pd.read_csv("Vegetables.csv", usecols = col_list_veg)
# allergies file
col_list_all = ["Class", "Type", "Group", "Food", "Allergy"]
df_all = pd.read_csv("FoodAllergies.csv", usecols = col_list_all)
# got rid of the Nan values because it considered as it was float instead of string (could not apply lower case)
df_all.dropna(inplace = True)
# recipe file
col_list_rec = ['Link', 'Title', 'Total Time', 'Servings', 'Ingredients', 'Instructions']
df_rec = pd.read_csv("Recipes.csv", usecols = col_list_rec)
# ratings
col_list_rat = ["userId", "recipeId", "rating"]
df_rat = pd.read_csv("Ratings_small.csv", usecols = col_list_rat)
# NLP FOR CLEANING UP INPUTS
# lemmatising
import nltk
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
# FUNCTIONS
# function to extract lists from pandas columns
def list_maker(column):
return column.tolist()
# function to make lower case in lists
def lower_case(column_list):
for x in range(len(column_list)):
column_list[x] = column_list[x].lower()
return column_list
# function to cut duplicates from a list
def no_duplicates(column_list):
no_duplicates_list = []
for x in column_list:
if x not in no_duplicates_list:
no_duplicates_list.append(x)
return no_duplicates_list
# function to make dictionaries
def dictionary_maker(list1, list2):
zip_iterator = zip(list1, list2)
dictionary = dict(zip_iterator)
return dictionary
# function to lemmatise words in lists
def lemmatise(list_of_words):
lemmatised_words = []
for word in list_of_words:
lemmatised_words.append(lemmatizer.lemmatize(word))
return lemmatised_words
# function user inputs veggies
mylist = []
mybasket = []
def user_inputs_veggies():
print("Enter 3 veggies: ")
for x in range(1,4):
basket = input("%d " % x)
mylist.append(basket.lower())
for veg in lemmatise(mylist):
if veg in veg_list:
print(veg, "= got it")
mybasket.append(veg)
else:
print(veg, "= we don't have it")
# function user inputs quantities (NOT USED FOR GETTING RECIPES YET)
veg_quantity = {}
def user_input_quantity():
for x in mybasket:
# Ask for the quantity, until it's correct
while True:
# # Quantity?
quantity = input("%s grams " % x)
# Is it an integer?
try:
int(quantity)
break
# No...
except ValueError:
# Is it a float?
try:
float(quantity)
break
# No...
except ValueError:
print("Please, use numbers in grams only")
# If it's valid, add it
veg_quantity[x] = quantity
return veg_quantity
# CODE
# Extracting lists from pandas columns
veg_list = list_maker(df_veg['Vegetables'])
food_list = list_maker(df_all["Food"])
allergy_list = list_maker(df_all["Allergy"])
recipe_titles_list = list_maker(df_rec['Title'])
ingredients_list = list_maker(df_rec['Ingredients'])
users_id_list = list_maker(df_rat["userId"])
recipes_id_list = list_maker(df_rat["recipeId"])
ratings_list = list_maker(df_rat["rating"])
# Lower case in lists
veg_list = lower_case(veg_list)
food_list = lower_case(food_list)
allergy_list = lower_case(allergy_list)
#recipe_titles_list = lower_case(recipe_titles_list)
ingredients_list = lower_case(ingredients_list)
# Dictionaries
food_allergy_dictionary = dictionary_maker(food_list, allergy_list)
recipe_titles_ingredients_dictionary = dictionary_maker(recipe_titles_list, ingredients_list)
recipes_id_ratings_dictionary = dictionary_maker(recipes_id_list, ratings_list)
# User inputs veggies
user_inputs_veggies()
if mybasket == []:
print("Your basket is empty")
else:
print("Here's what we have", mybasket)
# User inputs quantities
user_input_quantity()
# REST OF THE CODE (Still to change...)
# USER INPUTS ALLERGIES (NOT USED FOR GETTING RECIPES YET)
print("Any allergies or intolerances? Please enter them here or leave it blank. \n")
print("Please, specify if you have allergy or intolerance for generic terms \n")
print("(e.g. 'nut allergy', 'gluten allergy', but not for 'strawberry' or 'strawberries'): ")
# add allergies in the list
myallergies = []
# empty basket to break
basket = " "
# indefinite iteration over not empty basket
while basket != "":
# over input
basket = input()
# if input = num
if basket.isnumeric() == True:
# then print you don't want num
print("No numbers, please")
# otherwise if it's a word
elif basket.isnumeric() == False:
# and the basket is not empty
if basket != "":
# append allergies to my list
myallergies.append(basket)
my_allergies = lower_case(myallergies)
my_allergies = lemmatise(myallergies)
my_allergies = no_duplicates(my_allergies)
for al in my_allergies:
if al in food_allergy_dictionary.keys() or al in food_allergy_dictionary.values():
print("You said: ", al)
else:
print(al, ", got it, I will update my database")
# OUTPUT = RECIPES BASED ON USER'S VEGGIES
# RegEx to find matches
import re
recipe_titles_list = []
recipe_title_to_matched_ingredient_list_dict_with_duplicates = {}
recipes_ingredients = {}
recipes = []
input_vegetable_list = mybasket
recipe_title_to_ingredient_list_dict = recipe_titles_ingredients_dictionary
for input_vegetable in input_vegetable_list:
for recipe_title in recipe_title_to_ingredient_list_dict:
ingredient_list_string = recipe_title_to_ingredient_list_dict[ recipe_title ]
# df not perfect, values looked like list but it was a string...
# with eval is list of lists
ingredient_list = eval(ingredient_list_string)
for ingredient in ingredient_list:
find = re.search(input_vegetable, ingredient)
if find:
recipe_titles_list.append( recipe_title )
if recipe_title in recipe_title_to_matched_ingredient_list_dict_with_duplicates:
recipe_title_to_matched_ingredient_list_dict_with_duplicates[recipe_title].append(input_vegetable)
else:
recipe_title_to_matched_ingredient_list_dict_with_duplicates[recipe_title] = [input_vegetable]
# duplicates removed
for key, value in recipe_title_to_matched_ingredient_list_dict_with_duplicates.items():
recipes_ingredients[key] = list(set(value))
print("\n")
for recipe_title in recipe_titles_list:
if recipe_title not in recipes:
recipes.append(recipe_title)
print("These are all the recipes that contain : ", mybasket)
print("\n")
index = 1
for recipe in recipes:
print(index, recipe)
index += 1
import random
recipes_ingredients_items = list(recipes_ingredients.items())
random.shuffle(recipes_ingredients_items)
recipes_ingredients = dict(recipes_ingredients_items)
index = 1
for recipe in recipes_ingredients.items():
index += 1
recipes = {}
i_have_processed_these_already = []
for vegetable in input_vegetable_list:
for key, values in recipes_ingredients.items():
if vegetable in values:
if not key in i_have_processed_these_already:
if not vegetable in recipes:
i_have_processed_these_already.append(key)
recipes[vegetable] = key
import pprint
print("\n")
print("I would recommend you to try these recipes: \n")
pprint.pprint(recipes)
print("\n")
print("Here you can see the ingredients for the recipes selected: \n")
for recipe in recipes.values():
for recipeT, ingredient in recipe_title_to_ingredient_list_dict.items():
if recipe in recipeT:
print(recipeT, "\n",ingredient, "\n")
# -
| .ipynb_checkpoints/MVP (Single block)-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-4977266850932bfc", "locked": true, "schema_version": 3, "solution": false, "task": false}
# # BLU15 - Model CSI
#
# -
import pandas as pd
import numpy as np
import hashlib
import io
import json
import pickle
import requests
import joblib
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import precision_score, recall_score, precision_recall_curve
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from lightgbm import LGBMClassifier
from sklearn.pipeline import Pipeline
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-20ab4cc706ece288", "locked": true, "schema_version": 3, "solution": false, "task": false}
# Alright, let's go on with the BLU and have fun doing some exercises!
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-a39e9c915dc5fb6b", "locked": true, "schema_version": 3, "solution": false, "task": false}
# <img src="media/show.jpg" width=300/>
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-675e349794cfade8", "locked": true, "schema_version": 3, "solution": false, "task": false}
# As a reminder:
#
# In the learning unit we received a pretrained model and a new batch of data and analyzed whether the model performs well and what to do with it.
#
# In the end, we realized that there are some unexpected changes in the data distribution and we need to retrain the model.
#
# As the new dataset was pretty small, we have to concat the old data with the new one and train a new model on the combination of 2 datasets.
#
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-b9f04330950f72d6", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Exercise 1:
#
# - Read the .csv file with the original dataframe as **df_old.**
#
# - If you take a look on the **VehicleSearchedIndicator** column, you understand that this subset represents the searched cars only, so we can drop the **VehicleSearchedIndicator** column.
#
# - As the new dataset doesn't contain **InterventionDateTime** column, we also need to drop it from the old dataset.
#
# - Read new observations as **df_new**.
#
# - Combine both the dataframes and add a new column called **is_new** that is going to have all **False** values for the old data and all **True** values for the new observations.
#
# - Call the combined dataframe **df_combined**
#
# - Drop all **NaN** values
#
# - Apply lowercase to department names and intervention location names in the combined dataset
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-04ea06e1af333be0", "locked": false, "schema_version": 3, "solution": true, "task": false}
# df_old = ...
# df_new = ...
# df_combined = ...
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-8c0d93b76029e43b", "locked": true, "points": 2, "schema_version": 3, "solution": false, "task": false}
assert df_combined.shape == (78715, 14), 'combined dataframe shape is wrong'
assert 'VehicleSearchedIndicator' not in df_combined.columns, 'Did you drop the VehicleSearchedIndicator column?'
assert 'is_new' in df_combined.columns, 'Did you add is_new column?'
assert sum(df_combined['is_new']) == 2000, 'is_new column has a wrong number of True values'
assert all([name.islower() for name in df_combined['Department Name']]), 'Department name is not lowercased'
assert all([name.islower() or not name.isalpha() for name in df_combined['InterventionLocationName']]), 'InterventionLocationName is not lowercased'
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-63bf0204b0039261", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Exercise 2:
#
# **Split the created dataset on train and test parts in the following way:**
#
# - Firstly create train and test set. Call them **df_train** and **df_test**.
# > We'll need them in the future exercises.
# - Then, split **train** and **test** into **X_train**, **X_test**, **y_train** and **y_test**.
# - Test sets shape should be 25% of df_combined shape
# - Make sure to have 25% of new values in the test size.
# - Use random state 42 while splitting the datasets
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-7f3b30be400bcb4f", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-38a33c6dedc5abe8", "locked": true, "points": 3, "schema_version": 3, "solution": false, "task": false}
assert df_train.shape == (59036, 14), 'df_train shape is wrong. Are you sure test size is 25%?'
assert df_test.shape == (19679, 14), 'df_test shape is wrong. Are you sure test size is 25%?'
assert X_train.shape == (59036, 13), 'X_train shape is wrong. Are you sure test size is 25%?'
assert X_test.shape == (19679, 13), 'X_test shape is wrong. Are you sure test size is 25%?'
assert y_train.shape == (59036,), 'X_train shape is wrong. Are you sure test size is 25%?'
assert y_test.shape == (19679,), 'X_train shape is wrong. Are you sure test size is 25%?'
assert sum(X_train['is_new']) == 1500, 'is_new column in Training set has a wrong number of True values. Make sure to have 25% of new values'
assert sum(X_test['is_new']) == 500, 'is_new column in Test set has a wrong number of True values. Make sure to have 25% of new values'
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-5ce46b652d62760d", "locked": true, "schema_version": 3, "solution": false, "task": false}
# Now we need to retrain the model.
#
# If we simply load the pipeline and retrain it, it's going to ignore our new feature.
#
# So let's create the same pipeline as in the original notebook:
# +
categorical_features = df_combined.columns.drop(['ContrabandIndicator', 'SubjectAge'])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[('cat', categorical_transformer, categorical_features)])
pipeline = make_pipeline(
preprocessor,
LGBMClassifier(n_jobs=-1, random_state=42),
)
# -
pipeline.fit(X_train, y_train)
# ## Exercise 3:
#
# Now let's test how the model performs:
#
# - Make model binary predictions and save them to an array **preds**.
# - Make model probability predictions and save them to an array called **preds_proba**. Keep only True class probabilities (by default probability prediction returns you both False and True classes probabilities)
# - Create a variable called **precision** with the model precision score.
# - Create a variable called **recall** with the model recall score
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-9be477a1561648b5", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
raise NotImplementedError()
# -
expected_recall = 'a2cffa866c48b997372a62104161ba89e68fb439c418fc2559e2a32c44987ce8'
hash_recall = hashlib.sha256(bytes(str(round(recall, 2)), encoding='utf8')).hexdigest()
assert hash_recall == expected_recall
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-134d5512471b46a5", "locked": true, "points": 2, "schema_version": 3, "solution": false, "task": false}
assert len(preds) == 19679, 'Are you sure you made predictions for test set only?'
assert len(preds_proba) == 19679, 'Are you sure you made predictions for test set only?'
assert not isinstance(preds_proba[0], np.ndarray), 'Are you sure you kept only the True class predictions?'
assert round(sum(preds_proba)) == 6563
np.testing.assert_almost_equal(precision, 0.64966, decimal=2)
np.testing.assert_almost_equal(recall, 0.546027, decimal=2)
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-00ba0926557db298", "locked": true, "schema_version": 3, "solution": false, "task": false}
#
# ## Exercise 4:
#
# It's already not bad, but let's now try to calculate the optimal threshold.
#
# By threshold I mean the minimal probability of a prediction that we're going to call "True".
#
# By default, any prediction with probability > 0.5 is called True, but we might find a better value.
#
# The metric is the same: our success rate (precision) needs to be at least 50%, and the recall should be as big as possible.
#
# Save the result to a variable called **threshold**. Round the result to 2 decimal points.
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-9f436f48cd5a5f33", "locked": false, "schema_version": 3, "solution": true, "task": false}
# threshold = ...
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-326014262a2ffeb9", "locked": true, "points": 3, "schema_version": 3, "solution": false, "task": false}
assert round(threshold, 2) == threshold, 'Did you round the value?'
ans_threshold = hashlib.sha256(bytes(str(threshold), encoding='utf8')).hexdigest()
assert ans_threshold == "6382e07f9de0c85293aee2a45b88c61c28589419682ecc2f8c097f750e861a24"
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-189130f10b806acf", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Exercise 5:
#
# - Now create a list of predictions.
#
# > All the values from the **preds_proba** list that have a value > threshold should be True. The rest should be False.
#
# > Save the result to a variable called **best_preds**
#
# - Calculate the precision and recall and save them to variables **precision** and **recall**
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-11c3a5f9bbcc11ff", "locked": false, "schema_version": 3, "solution": true, "task": false}
# best_preds = ...
# precision = ...
# recall = ...
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-1b53ef9739686af6", "locked": true, "points": 1, "schema_version": 3, "solution": false, "task": false}
np.testing.assert_almost_equal(precision, 0.50290256, decimal=2)
np.testing.assert_almost_equal(recall, 0.84422789, decimal=2)
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-a38fa1224ab86e3c", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Exercise 6:
#
# Now let's find out whether removing rare values is going to help.
#
# **Filter *df_train* (the one you created in the Exercise 2) the following way:**
#
# - Remove rows with **Department Name** that appear <= 50 times
# - Remove rows with **InterventionLocationName** that appear <= 50 times
# - Remove rows with **ReportingOfficerIdentificationID** that appear <= 30 times
# - Remove rows with **StatuteReason** that appear <= 10 times
# - Note: it's better to keep the original dataframe not touched. Create a copy of the original dataframe and save the results to a variable **train_filtered**
#
# > We have to filter the values after we split the dataset into training and test, because by filtering the test set we also affect the score. If we filtered everything besides the examples that are the easiest to predict, we'd have a super nice score, but in production we're going to expect both the filtered values and unfiltered ones.
#
# > We shouldn't worry about the fact, that some values in the test set will not be present in the training set, because the pipeline is simply going to ignore them.
#
# > (you might use the logic from the original model's notebook, but I suggest trying to implement it by yourself, it's a good exercise)
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-b7b6cf4076907562", "locked": false, "schema_version": 3, "solution": true, "task": false}
# train_filtered = df_train.copy()
# train_filtered = ...
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-204c92ef82321cee", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false}
assert train_filtered.shape == (30502, 14), 'Make sure to filter rare values. Make sure to filter only train set.'
assert 'middlebury' not in train_filtered['Department Name'], 'Did you filter department names?'
assert 'hampton' not in train_filtered['InterventionLocationName'], 'Did you filter InterventionLocationName ?'
assert 'DACYR048' not in train_filtered['ReportingOfficerIdentificationID'], 'Did you filter officer ids?'
assert 'Stop Sign ' not in train_filtered['StatuteReason'], 'Did you filter statute reasons?'
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-4bf528ed9d629b95", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Exercise 7:
#
# **Let's split *train_filtered* into *X* and *Y* parts and do the same thing once again:**
#
# - Fit the model on the training set (this time filtered one)
#
# - Predict probabilities for the test set (untouched one).
#
# - Select the best threshold for the specified requirements (precision >= 0.5, max possible recall).
#
# - Round up the threshold up to 2 decimal points.
#
# - Transform probabilities to binary answers: probability above the threshold = True, False otherwise.
#
# - Calculate the precision and recall scores for these predictions.
#
# I believe you need no exact instructions, as you did exactly same things in Exercises 2, 3 and 4.
#
# Save the score results to variables called **filtered_precision** and **filtered_recall**
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-48912f8accf051fd", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-299a6d55c56a74f0", "locked": true, "points": 2, "schema_version": 3, "solution": false, "task": false}
np.testing.assert_almost_equal(filtered_precision, 0.501309, decimal=2)
np.testing.assert_almost_equal(filtered_recall, 0.83238, decimal=2)
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-db9b3e8c447f4bb9", "locked": true, "schema_version": 3, "solution": false, "task": false}
# Okay, so it seems like the original notebook had a mistake of evaluating the model on filtered test set. In fact, filtering features with these frequency limits decreased the recall (0.844 -> 0.832).
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-1ea1fea30e3c1e9b", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Exercise 8:
#
# Now I'll let you use your fantasy and try to filter the categorical values differently.
#
# You're free to do whatever you want, but here are a few ideas you can use:
#
# - Instead of dropping rare categories, create a new value for them
#
# - Adjust the frequency values (e.g. keep a part of departments we just filtered or filter even more). You can try to search all the possible combinations of frequency values if you want.
#
# Your task is to create a list of *True/False* predictions for the **X_test** and call them **best_preds**. These predictions have to have precision >= 0.5 and recall > 0.84422789
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-ed6aa7a2151ccf89", "locked": false, "schema_version": 3, "solution": true, "task": false}
# predictions = ...
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-0b0af8b253851b65", "locked": true, "points": 3, "schema_version": 3, "solution": false, "task": false}
precision = precision_score(y_test, best_preds)
recall = recall_score(y_test, best_preds)
assert precision >= 0.5
assert recall > 0.84422789
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-397df43952bc83d6", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Exercise 9:
#
# So, we got the model. It's usually a good idea to retrain the model on the whole dataset, so now I want you to:
# - Apply the filters that you just created in the Exercise 7 to **df_combined**
# - Train the same model on the whole dataset
# - Export the model, train columns and data types to **/tmp/<file_name>**, where files are called **new_pipeline.pickle**, **new_dtypes.pickle** and **new_columns.json**.
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-b200c367cf7c4646", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-1277da522704d17d", "locked": true, "points": 2, "schema_version": 3, "solution": false, "task": false}
with open('/tmp/new_columns.json') as fh:
columns = json.load(fh)
with open('/tmp/new_pipeline.pickle', 'rb') as fh:
pipeline = joblib.load(fh)
with open('/tmp/new_dtypes.pickle', 'rb') as fh:
dtypes = pickle.load(fh)
assert isinstance(columns, list), 'columns need to be a list of training features'
assert 'ContrabandIndicator' not in columns, 'there should be only training features in columns. You got target there.'
assert 'is_new' in columns, "your columns don't contain is_new feature. Are you you updated the columns file?"
assert isinstance(pipeline, Pipeline), 'new_pipeline.pickle does not seem it be an instance of Pipeline class.'
assert isinstance(dtypes, pd.core.series.Series)
assert all([column in dtypes.index for column in columns]), 'some columns from new_columns file are not in the new_dtypes file'
assert all([dtype in columns for dtype in dtypes.index]), 'some dtypes from new_dtypes file are not in the new_columns file'
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-e661d30ecca08667", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Exercise 10:
#
# And now it's time to change the server! I know you missed this part :)
#
# Before we do it, I want to remind you, that in this exercise we didn't cover the ethics topic.
#
# Our model is trained on sensible features like race, sex and ethnicity.
#
# In real situation you'd need to make sure that your model is not discriminating anyone.
#
# Now, go and create a copy of the **protected_server.py** file. Call it **new_server.py**
#
# In that file:
# - Change the **check_valid_column** function to have the new added columns
#
# > You can also automate it by reading the columns file, it's even better!
#
# - Change the **check_categorical_values** function:
#
# > We didn't really affect any of the checked columns there besides **StatuteReason** (of course, if you didn't change it in your best solution). Remove the values that should not be in this column anymore.
#
# > We also add one more categorical feature to the dataframe (**is_new**). Go and add possible values to the check.
#
# - As soon as it's done, go ahead and start the server.
#
# - Play with the predictions. Make sure that the server checks the **is_new** feature values. Try to send requests without **is_new** or with a different value (not True or False).
#
# - After you're done, change the value of **done** to **True** to pass the exercise
# -
done = False
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-177c733bbad2ee14", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-af1fc9c2780fcf2a", "locked": true, "points": 2, "schema_version": 3, "solution": false, "task": false}
assert done == True
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-61c0054fb792c42e", "locked": true, "schema_version": 3, "solution": false, "task": false}
# Aaaaaand...we're done!
# + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-c836f75fc6b45559", "locked": true, "schema_version": 3, "solution": false, "task": false}
# <img src="media/congrats.png" width=300/>
# -
| S06 - DS in the Real World/BLU15 - Model CSI/Exercise notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Title : Ex: Polynomial Regression
# - Goal: Create cubic polynomial least-squares regression
#
# ## Description
#
# After fitting the model and getting the predictions, you should see the following plot:
#
#
# <img src="../fig/fig1.png" style="width: 500px;">
#
#
# ## Hints:
#
# <a href="https://www.statsmodels.org/v0.10.1/generated/statsmodels.regression.linear_model.OLS.html#statsmodels.regression.linear_model.OLS" target="_blank">Formulas in statsmodels</a>
#
# <a href="https://www.statsmodels.org/v0.10.1/generated/statsmodels.regression.linear_model.OLS.html#statsmodels.regression.linear_model.OLS" target="_blank">sm.ols</a>
#
# <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.vander.html" target="_blank">numpy vander</a>
#
# Refer to lecture notebook.
#
# Do not change any other code except the blanks.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.formula.api as sm
# %matplotlib inline
# -
df = pd.read_csv('data1.csv')
df = df.sort_values('x')
df.head()
plt.scatter(df.x, df.y);
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# ## Cubic polynomial least-squares regression of y on x
# +
### edTest(test_ols_formula) ###
def fit_model(formula):
return sm.ols(formula=formula, data=df).fit()
formula = _____
fit2_lm = fit_model(formula)
# -
### edTest(test_predictions_summary) ###
#Get the predictions and the summary dataframe
poly_predictions = fit2_lm.______().___()
poly_predictions
# +
ax2 = df.plot.scatter(x='x',y='y',c='Red',title="Data with least-squares cubic fit")
ax2.set_xlabel("x")
ax2.set_ylabel("y")
# CI for the predection at each x value, i.e. the curve itself
ax2.plot(df.x, poly_predictions['mean'],color="green")
ax2.plot(df.x, poly_predictions['mean_ci_lower'], color="blue",linestyle="dashed")
ax2.plot(df.x, poly_predictions['mean_ci_upper'], color="blue",linestyle="dashed");
# -
# #### Condition number
c = np.vander(_, _, increasing=True)
np.linalg.cond(c)
| docs/lectures/lecture02/notebook/L1_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark
# language: python
# name: pyspark
# ---
jdbcDF = spark.read \
.format("jdbc") \
.option("driver", "com.facebook.presto.jdbc.PrestoDriver") \
.option("url", "jdbc:presto://presto:8080/hive/aula") \
.option("user", "hive") \
.option("dbtable", "pessoas") \
.load()
jdbcDF.show()
dataframe_mysql = spark.read \
.format("jdbc") \
.option("url", "jdbc:mysql://database/employees") \
.option("driver", "com.mysql.jdbc.Driver") \
.option("dbtable", "employees") \
.option("user", "root") \
.option("password", "<PASSWORD>") \
.load()
dataframe_mysql.show()
dataframe_mysql.write.parquet("mysql")
df = spark.read.parquet("mysql")
df.show()
| data/notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Import required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# # Acquire Housing dataset
#Read housing dataset
df_housing_dataset = pd.read_csv('housing.csv')
df_housing_dataset.head()
df_housing_dataset.columns
#Check shape of entire dataset
df_housing_dataset.shape
df_housing_dataset.describe()
# # Visualize data to understand the relationship among variables
corr = df_housing_dataset.corr()
df_housing_dataset.corr()
#Seaborn heatmap to view correlations between features in dataset
#Median income has a positive correlation against median house value
plt.figure(figsize=(8,6))
pltheatmap =sns.heatmap(corr)
#Scatter plot of median income with median house value
plt.figure(figsize=(10,7))
plt.scatter(df_housing_dataset['median_income'],df_housing_dataset['median_house_value'])
plt.title='Scatter plot to correlate median income vs median house value'
plt.xlabel('Median Income')
plt.ylabel('Medain House Value')
plt.show()
df_housing_dataset.ocean_proximity.unique()
df_housing_dataset.ocean_proximity.isnull().sum()
#Slice dataset and store independent and dependent variables
X = df_housing_dataset.iloc[:,:-1].values
y = df_housing_dataset.iloc[:,9].values
print (X,y)
#Label Encode ocean proximity column
from sklearn.preprocessing import LabelEncoder
ocean_proximity_labelencoder = LabelEncoder()
X[:,8] = ocean_proximity_labelencoder.fit_transform(X[:,8])
X[:,8]
#Correlation between Ocean proximity and Median house value
corr1 = np.corrcoef(X[:,8].astype('float64'),y.astype('float64'))
print(corr1)
#Seaborn pairplot of median income vs median house value with hue as OCean Proximity
#hue="ocean_proximity"
sns.pairplot(df_housing_dataset, size=4 ,
vars=["median_income","median_house_value"], hue="ocean_proximity")
# # Handle missing values
df_housing_dataset.isnull().sum()
df_X = pd.DataFrame(X)
df_X.isnull().sum()
# +
#==============================================================================
# Handle the missing values, we can see that in dataset there are some missing
# values, we will use strategy to impute mean of column values in these places
#==============================================================================
from sklearn.preprocessing import Imputer
# First create an Imputer
missingValueImputer = Imputer (missing_values = 'NaN', strategy = 'mean',
axis = 0)
# Set which columns imputer should perform
missingValueImputer = missingValueImputer.fit (X[:,4:5])
# update values of X with new values
X[:,4:5] = missingValueImputer.transform(X[:,4:5])
# -
#Notice missing values in total bedrooms column have been imputed with mean of total bedrooms
df_X = pd.DataFrame(X)
df_X.isnull().sum()
# # Principal Component Analysis
X.shape
#Feature Scaling
from sklearn.preprocessing import StandardScaler
stdsclr = StandardScaler()
X_std = stdsclr.fit_transform(X)
#PCA
from sklearn.decomposition.pca import PCA
PCA = PCA(n_components=6)
principal_components = PCA.fit_transform(X_std)
principal_components
#Cal the cumulative proportion of var explained by each component
PCA.explained_variance_ratio_
df_X = pd.DataFrame(X)
print(df_X.columns)
df_housing_dataset.head()
# Dump components relations with features: This gives us the picture of how features are related to components
print(pd.DataFrame(PCA.components_,columns=df_X.columns,index = ['PC-1','PC-2','PC-3','PC-4','PC-5','PC-6']))
principal_components.shape
# # Machine Learning Model Selection and Training
#Let's check our target label
y
#Split Dataset for model training and testing [80/20 split]
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test = train_test_split(principal_components,y, test_size=0.1,random_state=1)
X_train.shape
# # Linear Regression ML Model
#Linear Regression Model
from sklearn.linear_model import LinearRegression
linReg = LinearRegression()
linReg.fit(X_train,y_train)
linReg.predict(X_test)
#Quick check accuracy of the model
score = linReg.score(X_train,y_train)
print(score)
#Quick check accuracy of the model
score = linReg.score(X_test,y_test)
print(score)
linreg_predictions = linReg.predict(X_test)
from sklearn.metrics import mean_squared_error
np.sqrt(mean_squared_error(y_test,linreg_predictions))
#RMSE below
# # Decision Tree ML model
#Train with DT model
from sklearn.tree import DecisionTreeRegressor
DTRegressor = DecisionTreeRegressor(max_depth=9, min_samples_split=5)
DTRegressor.fit(X_train,y_train)
#Quick check accuracy of the model
score = DTRegressor.score(X_train,y_train)
print(score)
#Quick check accuracy of the model
score = DTRegressor.score(X_test,y_test)
print(score)
DTR_predictions = DTRegressor.predict(X_test)
from sklearn.metrics import mean_squared_error
np.sqrt(mean_squared_error(y_test,DTR_predictions))
#RMSE below
# # Random Forest ML Model - Model prediction accuracy is good compared to LR and DT models
#Declare hyper parameters to tune RF model
hyperparameters = { 'randomforestregressor__max_features' : ['auto', 'sqrt', 'log2'],
'randomforestregressor__max_depth': [None, 5, 3, 1],
'randomforestregressor__min_samples_split': [2, 5],
'randomforestregressor__min_samples_leaf': [10, 5]}
#Make a Random forest pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(RandomForestRegressor(n_estimators=50))
#Cross Validation to find best parameters
from sklearn.grid_search import GridSearchCV
clf = GridSearchCV(pipeline, hyperparameters, cv=10)
# Fit and tune model
clf.fit(X_train, y_train)
clf.best_params_
clf.best_score_
clf.best_estimator_
#Quick check accuracy of the model after CV
score = clf.score(X_train,y_train)
print(score)
#Quick check accuracy of the model
score = clf.score(X_test,y_test)
print(score)
X_test
RF_predictions = clf.predict(X_test)
from sklearn.metrics import mean_squared_error
np.sqrt(mean_squared_error(y_test,RF_predictions))
#RMSE below for the 10% test set [unseen data]
# # ML Model Training with only Median income feature to predict housing value
#Let's train the model only with median income and check how model behaves
X
df_X_final = pd.DataFrame(X)
df_X_final.head()
X = np.delete(X,[0,1,2,3,4,5,6,8],axis=1)
X
#Split Dataset for model training and testing [80/20 split]
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y, test_size=1/4,random_state=0)
X_train.shape
#Feature Scaling
from sklearn.preprocessing import StandardScaler
stdsclr = StandardScaler()
X_train_std = stdsclr.fit_transform(X_train)
#Feature Scaling
X_test_std = stdsclr.fit_transform(X_test)
#Linear Regression Model
from sklearn.linear_model import LinearRegression
linReg1 = LinearRegression()
linReg1.fit(X_train_std,y_train)
#Quick check accuracy of the model
score = linReg1.score(X_train_std,y_train)
print(score)
#Quick check accuracy of the model
score = linReg1.score(X_test_std,y_test)
print(score)
#==============================================================================
# Visualize the linear regressor algo outcome
#==============================================================================
# Visualising the Regression results
plt.scatter(X_test, y_test, color = 'red')
plt.plot(X_test, linReg1.predict(X_test), color = 'blue')
#plt.title('Median House Price Prediction')
plt.xlabel('Median Income')
plt.ylabel('Median House Price')
plt.show()
# +
#==============================================================================
# Fitting the Polynomial Regression algorithm to the Training set
#==============================================================================
from sklearn.preprocessing import PolynomialFeatures
polyagent = PolynomialFeatures(degree=5)
X_Poly = polyagent.fit_transform(X_train)
# -
linReg1.fit (X_Poly, y_train )
X_Poly_test = polyagent.fit_transform(X_test)
score = linReg1.score(X_Poly,y_train)
print(score)
score = linReg1.score(X_Poly_test,y_test)
print(score)
#==============================================================================
# Visualize the poly regressor algo outcome
#==============================================================================
# Visualising the Regression results
#plt.scatter(X_Poly_test, y_test, color = 'red')
plt.plot(X_Poly_test, linReg1.predict(X_Poly_test), color = 'blue')
#plt.title('Median House Price Prediction')
plt.xlabel('Median Income')
plt.ylabel('Median House Price')
plt.show()
# +
#Let's fit DT Reg
from sklearn.tree import DecisionTreeRegressor
DTRegressor = DecisionTreeRegressor(max_depth=3)
DTRegressor.fit(X_train,y_train)
# -
#Quick check accuracy of the model on train
score = DTRegressor.score(X_train,y_train)
print(score)
#==============================================================================
# Visualize the DT regressor algo outcome
#==============================================================================
# Visualising the Regression results
plt.scatter(X_test, y_test, color = 'red')
plt.plot(X_test, DTRegressor.predict(X_test), color = 'blue')
#plt.title('Median House Price Prediction')
plt.xlabel('Median Income')
plt.ylabel('Median House Price')
plt.show()
| California_HousingPrice_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# __Hydrograph Development Notebooks__
#
#
# __Breach Hydrographs, Lisle, NY__
#
#
# PYTHON
#
#
# Overview: This notebook was created to document the development of breach hydrographs using historical flow data for two locations along the levee at [Lisle, NY](https://www.google.com/maps/@42.3449088,-75.9925314,3206m/data=!3m1!1e3).
#
# Updated 1.10.2017
# # Develop a discharge hydrograph of the 1% storm for the main flooding source
#
# ## Exploratory Analysis
# [Notebook](FirstLook_GageData.ipynb) developed to evaluate available gage data in the vicinity, plot available time series & qualitatively assess differences in hydrograph shapes.
#
# ## Discharge Hydrograph
# Select the timeseries for the [highest recorded peak (2005)](https://nwis.waterdata.usgs.gov/ny/nwis/peak/?site_no=01509000&agency_cd=USGS) where [available instantaneous gage data](https://nwis.waterdata.usgs.gov/ny/nwis/uv?cb_00060=on&format=gif_default&site_no=01509000&period=&begin_date=2005-03-25&end_date=2005-04-15) exists.
#
# ## Calculate Peak Discharge
# Using Bulletin 17B procedures and the USGS PeakFQ software, the 1% Storm (peak flow) value was determined at the nearest applicable gage.
# [Input](https://raw.githubusercontent.com/Dewberry-RSG/HydrologyTools/master/nbs/peakfq/USGS01509520.inp)
#
# [Output](https://raw.githubusercontent.com/Dewberry-RSG/HydrologyTools/master/nbs/peakfq/USGS01509520.PRT)
#
# ## Stretch the Hydrograph
# Stretch the hydrograph to the calculated peak flow.
#
# *Details on the methodology for this are described in the [Proof of Concepts Document](https://github.com/Dewberry-RSG/HydrologyTools/blob/master/documentation/ProofofConceptHydrologyStudies.pdf). Implementation using Jupyter Notebooks for the proof of concept cases are available in the [Methodology Overview](MethodologyOverview.ipynb).*
#
# ## Develop of a breach hydrograph using the flow hydrograph created in step 1.
#
# In order to convert the flow hydrograph to a stage hydrograph at any given location, a hydraulic analysis is necessary to properly account for differences in the cross-sectional area at different locations along the reach. For this study a 1D, Steady State model was used to simulate a Natural Valley scenario in the levee impact area.
#
# The geometry from this model was used to compute flows ranging from 1,000 cfs to 25,000 cfs in increments of 1,000 cfs. The results of these simulations were used to develop a rating curve at each area of interest to translate flow to stage. The image below is an example of the results at a cross section, illustrating how geometric differences at different flow levels may impact the resultant stage for a given reach.
#
# Note that the change in water surface elevation when the flow is constrained by the channel and the levee during overbank flow rises at a greater rate when compared with the unconstrained flow when conveyance occurs on both sides of the levee (natural valley).
#
# <img src="https://raw.githubusercontent.com/Dewberry-RSG/HydrologyTools/master/images/XS_Example.png" , width=1000,height=600/>
#
#
# ### Procedure to create Breach Hydrograph
#
# A. Read in HEC-RAS data for the XS of interest & create a stage/discharge rating curve using computed flows.
#
# B. Using the data from the rating curve in Part A, create a function (nth degree polynomial interpolation equation) to convert flow to stage.
#
# C. Convert the 1% flow hydrograph created in Step 1 to a stage hydrograph using the rating curve function created in Part B.
#
# D. Normalize the stage to 'feet above the breach point' using the stage hydrograph created in Part C and the breach elevation (head = 0 at breach point).
#
# E. Using the head above breach hydrograph created in Part D, calculate weir flow for (use the Standard Weir Equation, below) each timestep & write to file.
#
# F. Input weir flow hydrograph created in Part E into HEC-RAS unsteady flow file. END.
#
# #### The Standard Weir Equation:
# ## $\qquad$ $Q = CLH^{2/3}$
#
# Where:
#
# $\qquad$ __Q__ = Discharge (cfs)
# $\qquad$ __C__ = Weir coefficient (unitless)
# $\qquad$ __L__ = Weir crest length (ft)
# $\qquad$ __H__ = Energy head over the weir crest (ft)
#
#
# **From HEC-RAS Lateral Weir Coefficients, use the default Weir Coefficient of 2.0 (range is 1.5-2.6, given on page 3-50 of the [2D Users Manual](http://www.hec.usace.army.mil/software/hec-ras/documentation/HEC-RAS%205.0%202D%20Modeling%20Users%20Manual.pdf))*
# +
import os
from glob import glob
from importlib import reload
import utils; reload(utils)
from utils import *
import pandas as pd
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# %matplotlib inline
# -
# ## 1. Flow hydrogaphs for the 1% chance storm:
# #### Read in gage data & develop base hydrograph
#
# - Read in Base Hydrograph from [USGS Gage](https://waterdata.usgs.gov/usa/nwis/uv?site_no=01509000) & Scale to 1-pct using scale factor
# - Manually Smooth the curve where needed
#
# *See comment lines in [Helper Script](ny_clean_nb.py) for smoothing procedure.*
#
# *Data for the falling limb of the April 2005 event was missing from the USGS database. To fill the missing data a third order polynomial interpolation was used to approximately mirror the rising limb.
printbold('Reading data from')
gage_data, data_dir = initialize()
base_storm_1pct = init_base_hydro(gage_data)
smooth_storm = smooth_base_hydro(base_storm_1pct)
# ## Breach Location # 1:
#
# __Upstream Location:__The upstream location selected for Lisle lies in the center of the levee. This is because the 1% flow calculated at the upstream section of the levee along Dudley Creek does not exceed the banks, and therefore a breach at this location would not occur. The backwater from Tioughnioga river does not reach the upper sections of the levee, therefore no breach was created in this location.
#
# As described above, breach locations should be chosen at or very near a XS (or a XS added if not in the area of breaching) to get the stage discharge curve as accurate as possible.
#
# <img src="https://raw.githubusercontent.com/Dewberry-RSG/HydrologyTools/master/images/56045.65_location_1.JPG", width=900,height=800/>
#
# #### Plots Summary (from top to bottom):
#
# 1. Stage/Discharge Rating curve at HEC-RAS Cross section shown above.
# - 1% chance discharge hydrograph on the left, converted to stage on the right. In red is the elevation of the levee toe (invert of the hypothetical breach).
# - 1% chance stage hydrograph on the left (limited to values above breaching threshold), converted to head over breach elevation in the center, final breach hydrograph (computed as described above) in cfs.
#
# NOTE: For this analysis, __*hypothetical breach locations*__ have been selected at 2 locations along the levee. There is no evidence that a breach is likely to occur at this location.
rasdata = r'p:\02\NY\Broome_Co_36007C\LAMP2\TECH\Analysis\Modeling\WorkingModels\Lisle_WhitPt\LAMPRAS\Lisle_WhitPt.p05.hdf'
data_dir = r'C:\Users\slawler\Repos\HydrologyTools\sample_data'
community = 'Lisle'
station = 56045.65
breach_point = 1
breach_height = 969.45
GetBreachFlow(smooth_storm, community, rasdata, station, breach_point, breach_height, data_dir, date_int = 12)
# ## Breach Location # 2:
#
# __Downstream Location__
#
# <img src=https://raw.githubusercontent.com/Dewberry-RSG/HydrologyTools/master/images/53914.48_location_2.JPG , width=900,height=800/>
#
# #### Plots Summary (from top to bottom):
#
# 1. Stage/Discharge Rating curve at HEC-RAS cross section shown above.
# - 1% chance discharge hydrograph on the left, converted to stage on the right. In red is the elevation of the levee toe (invert of the hypothetical breach).
# - 1% chance stage hydrograph on the left (limited to values above breaching threshold), converted to head over breach elevation in the center, final breach hydrograph (computed as described above) in cfs.
#
# NOTE: For this analysis, __*hypothetical breach locations*__ have been selected at 2 locations along the levee. There is no evidence that a breach is likely to occur at this location.
# [Click For Image](https://raw.githubusercontent.com/Dewberry-RSG/HydrologyTools/master/images/53914.48_location_2.JPG)
#
# +
rasdata = r'p:\02\NY\Broome_Co_36007C\LAMP2\TECH\Analysis\Modeling\WorkingModels\Lisle_WhitPt\LAMPRAS\Lisle_WhitPt.p05.hdf'
community="Lisle"
station = 53914.48
breach_point = 2
breach_height = 964.71
GetBreachFlow(smooth_storm,community , rasdata, station, breach_point, breach_height, data_dir, date_int = 12)
| nbs/Lisle_BreachHydro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TEST A PERCEPTUAL PHENOMENON
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
from math import *
# %matplotlib inline
path = os.getcwd()
df = pd.read_csv(path+"\stroopdata.csv", sep=",")
df.head()
# ### 1. What is our independent variable? What is our dependent variable?
# > Independent variables are Word Condition( congruent and incongruent) and dependent varibale is the Response Time.
# ### 2. What is an appropriate set of hypotheses for this task? Specify the null and alternative hypotheses based on what you think the researchers might be interested in. Justify your choices.
# >$H_0 $ ( μi - μc = 0 ) Null hypothesis is that there is no difference in the response time to name congruent and incongruent words <br>
# $H_1$ ( μi - μc ≠ 0 ) Alternate hypothesis is that there is difference in the response time to name congruent and incongruent words<br>
# >
# μi population mean from where incongruent word sample derived<br>
# μc population mean from where congruent word sample was derived<br>
#
# >The Dependent Samples t-Test is the appropriate statistical test as the same subjects are assigned two different conditions. The different conditions are dependent because, in theory, by doing the first test you have some practice doing it and you might have an unfair advantage due to this learning effect in doing the similar type of test second. In addition, we don't have any population parameters provided (so a z-test would not be appropriate here)
# ### 3. Report some descriptive statistics regarding this dataset. Include at least one measure of central tendency and at least one measure of variability.
#
c = df['Congruent']; i = df['Incongruent']
print(" The mean of congruent and incongruent time are {} and {}".format(c.mean(), i.mean()))
print(" The SD of congruent and incongruent time are {} and {}".format(c.std(), i.std()))
# ### 4. Provide one or two visualizations that show the distribution of the sample data. Write one or two sentences noting what you observe about the plot or plots.
# +
df["sample"] = df.index+1
x = df["sample"]
y_c = df["Congruent"]
y_i = df["Incongruent"]
fig = plt.figure()
fig.suptitle("Scatter plot of Test Subject Vs Time", fontsize = 14, fontweight = 'bold')
ax = fig.add_subplot(111)
ax.set_xlabel("Subject")
ax.set_ylabel("Time in seconds")
plt.scatter(x, y_c, s=10, c='b', marker='s', label = "Congruent")
plt.scatter(x, y_i, s=10, c='r', marker='s', label = "Incongruent")
plt.legend(loc="upper left")
plt.show()
# +
box_plot = plt.figure()
ax = box_plot.add_subplot(111)
bp = ax.boxplot([y_c, y_i])
labels = ['Congruent', 'Incongruent']
ax.set_xticklabels(labels)
plt.show()
# -
# From the plot it is clear that average incongruent response time is higher the congruent time for the sample
# ### 5. Now, perform the statistical test and report your results. What is your confidence level or Type I error associated with your test? What is your conclusion regarding the hypotheses you set up? Did the results match up with your expectations? Hint: Think about what is being measured on each individual, and what statistic best captures how an individual reacts in each environment.
# >Confidence Level = 95% <br>
# t-critical value (2-sided test) -> from [t-table](https://s3.amazonaws.com/udacity-hosted-downloads/t-table.jpg)<br>
# df = n - 1 = 23<br>
# tcrit = +- 2.069<br>
# +
#number of subjects
n = len(df)
#point estimate
PE = i.mean() - c.mean()
round(PE,2)
#s - sample standard deviation of differences
#1. D = sample difference
df['D'] = df['Incongruent'] - df['Congruent']
#2. DFM = difference from the mean
#SQD = squared differences from the mean
DFM = df['D'] - df['D'].mean()
df['SQD'] = DFM*DFM
#3. SSD = sum of squared differences
SSD = df['SQD'].sum()
#4. v = variance = SSD/(n-1)
v = SSD/(n-1)
#5. s = sqrt(v)
s = sqrt(v)
round(s,2)
#t-statistic
#t = PE/(s/√n)
#PE = (μi - μc)
t = PE/(s/(sqrt(n)))
print("T-statistic value is {}".format(round(t,4)))
# -
# #### Conclusion
# >From above it is clear that t-statistic(8.0207) > t-critical(2.069). We reject the NULL hypothesis as there is significant differance in time consumed by the test subjects to answer congruent and incongruent sample questions.
#
# >From personal experience of taking the test there was a significant differance in time consumed to answer the rwo set of questions.
#
# >The results match up to my expectations
| Stroop effect_final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Module 1: Reading in and processing Word documents (Focus Group data)
# ## Sourcing packages
# - The textract package is used to read in the .docx files.
# - The gensim package is used to fit prelimnary LDA models on the data and filter out words which are common to the majority of the identified topics.
# - The nltk package is used to get an initial list of stopwords and for word lemmatization.
import textract
import numpy as np
import scipy
import gensim
import os
import pandas as pd
import re
import math
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
#nltk.download('averaged_perceptron_tagger')
from collections import Counter
from matplotlib import pyplot as plt
from gensim import corpora, models
# %matplotlib inline
# ## Definition of FocusGroup class
# ### Instantiation
# - By giving the name of the word file. The word file should have the same format as the focus group documents, that is, each paragraph should be preceeded by a line specifying the name (e.g. Parent 1) of the currently speaking person.
# ### Attributes
# - raw_text: The raw text from the Word document.
# - parent_moderator_discussion: The part of raw_text which refers to the discussion between parents and moderators. The rationale for separating the parent_moderator_discussion and within_moderator_discussion attributes is that there was a case when there was a discussion only between the moderators after the discusion between parents and moderators.
# - text_including_parents: An np.array storing the discussion between the parents and moderators. Each element of the np.array contains a paragraph from the discussion.
# - talkers_including_parents: An np. array with an identical size as text_including_parents containing the respective talker's name (e.g. Parent 1).
# - within_moderator_discussion: The part of raw_text which refers to the discussion only moderators, if available. This part of the text was separated from the parent / moderator discussion part of the text by two blank lines.
# - text_only_moderators: An np.array storing the discussion only between the moderators, if available. Each element of the np.array contains a paragraph from the discussion.
# - talkers_only_moderators: An np. array with an identical size as text_only_moderators containing the respective talker's name (e.g. Moderator 1).
# - parent_list: List of unique parent participants.
# - moderator_list: List of unique moderator participants.
# ### Methods
# - get_participant_text(participant): gets the list of paragraphs which belong to the given participant.
class FocusGroup:
def __init__(self, filename):
self.raw_text=str(textract.process('Data/FocusGroups/' + filename + ".docx")).replace('b\'', '').replace('\'', '')
self.parent_moderator_discussion=self.raw_text.split('\\n\\n\\n')[0].split('\\n\\n')
self.text_including_parents=np.array([parent_moderator_actual
for parent_moderator_actual in self.parent_moderator_discussion
if not (('Parent'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Moderator'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Administrator'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Speaker'==re.sub(r" [0-9]:","",parent_moderator_actual)))])
self.talkers_including_parents=np.array([parent_moderator_actual.replace(':', '')
for parent_moderator_actual in self.parent_moderator_discussion
if (('Parent'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Moderator'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Administrator'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Speaker'==re.sub(r" [0-9]:","",parent_moderator_actual)))])
if len(self.raw_text.split('\\n\\n\\n'))>1:
self.within_moderator_discussion=self.raw_text.split('\\n\\n\\n')[1].split('\\n\\n')
self.text_only_moderators=np.array([parent_moderator_actual
for parent_moderator_actual in self.within_moderator_discussion
if not (('Parent'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Moderator'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Administrator'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Speaker'==re.sub(r" [0-9]:","",parent_moderator_actual)))])
self.talkers_only_moderators=np.array([parent_moderator_actual.replace(':', '')
for parent_moderator_actual in self.within_moderator_discussion
if (('Parent'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Moderator'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Administrator'==re.sub(r" [0-9]:","",parent_moderator_actual)) or
('Speaker'==re.sub(r" [0-9]:","",parent_moderator_actual)))])
self.parent_list=[participant for participant in set(self.talkers_including_parents) if 'Parent' in participant]
self.moderator_list=[participant for participant in set(self.talkers_including_parents) if 'Moderator' in participant]
def get_participant_text(self, participant):
if 'Parent' in participant:
mask=[member==participant for member in self.talkers_including_parents]
return list(self.text_including_parents[mask])
elif 'Moderator' in participant:
mask=[member==participant for member in self.talkers_including_parents]
text_from_parent_discussion=self.text_including_parents[mask]
if len(self.raw_text.split('\\n\\n\\n'))==1:
return list(text_from_parent_discussion)
else:
mask=[member==participant for member in self.talkers_only_moderators]
text_from_moderator_discussion=self.text_only_moderators[mask]
return list(text_from_parent_discussion) + list(text_from_moderator_discussion)
# ## Functions to process text
# - The original list of stopwords was augmented by stopwords which are filler words (for example, okay) or are the consequences of the automated transcription (for example, inaudible), this extra list of stopwords was saved under the custom_stopwords list.
# - The WordNetLemmatizer() class of the nltk library was used for lemmatization.
# - The following data processing steps are performed by the text_processing_pipeline function:
# - Making the string lowercase
# - Removal of punctuation
# - Tokenization
# - Removal of text with less than min_token_count tokens
# - Removing stopwords
# - Lemmatization
# - Removing stopwords (also after the lemmatization)
# - The output of the text processing pipeline is a list with the elements, the first element is the processed, tokenized text and the second element is the original text with the purpose to help with the intepretation of the results.
# +
stopwords_list=stopwords.words('english')
custom_stopwords=['go','parent','say','0','yeah','would','okay','start','also','well','u','thank','inaudible','crosstalk','able','hear','actually','hi','oh','definitely','part','anything','sure','anyone','yes','thanks','everything','end','everybody','tand','administrator','whatever','sound','ti','moderator','though','mute','speak','silence','finish','bye','audio']
stopwords_list=stopwords_list+custom_stopwords
remove_stopwords_function=lambda tokenized_text, stopwords: [word for word in tokenized_text if word not in stopwords]
lemmatizer_instance=WordNetLemmatizer()
pos_tags_lemmatize_mapping_dict={'N': 'n', 'V': 'v', 'J': 'a', 'R': 'r'}
def pos_mapping_function(pos_tag, dictionary=pos_tags_lemmatize_mapping_dict):
if pos_tag[0] in ['N', 'V', 'J', 'R']:
return dictionary[pos_tag[0]]
else:
return 'n'
def lemmatizer_function(text, dictionary=pos_tags_lemmatize_mapping_dict, pos_mapping_function=pos_mapping_function,
lemmatizer=lemmatizer_instance):
pos_tags_for_lemmatize=[(word, pos_mapping_function(pos_tag)) for word, pos_tag in nltk.pos_tag(text)]
pos_tags_lemmatized=[lemmatizer_instance.lemmatize(word, pos=pos_tag) for word, pos_tag in pos_tags_for_lemmatize]
return pos_tags_lemmatized
def text_processing_pipeline(text_list,additional_stopwords, min_token_count=1, stopwords_list=stopwords_list,
lemmatizer_function=lemmatizer_function, dictionary=pos_tags_lemmatize_mapping_dict,
pos_mapping_function=pos_mapping_function, lemmatizer=lemmatizer_instance):
stopwords_list=stopwords_list+additional_stopwords
lowercase_text_list=[text.lower() for text in text_list] #Making text lowercase
lowercase_text_list=[re.sub(r"[^a-zA-Z0-9]", " ", text) for text in lowercase_text_list] #Removal of punctuation
lowercase_text_list=[text.split() for text in lowercase_text_list] #Tokenization
filtering_original_text=[text_list[i] for i in range (len(lowercase_text_list)) if len(lowercase_text_list[i])>min_token_count]
lowercase_text_list=[text for text in lowercase_text_list if len(text)>min_token_count] #Keeping text with an at least a pre-defined token count
lowercase_text_list=[remove_stopwords_function(text, stopwords_list) for text in lowercase_text_list] #Removing stopwords
lowercase_text_list=[lemmatizer_function(text) for text in lowercase_text_list] #Lemmatization
lowercase_text_list=[remove_stopwords_function(text, stopwords_list) for text in lowercase_text_list] #Removing stopwords
return lowercase_text_list, filtering_original_text
# -
# ## Process the word data
# - Loop over the forteen Word documents with the text processing function and save the result in a list with 15 elements.
# - The below code cell contains four lists of additional stopwords for Gaming group / Low PIU group / Media group and Social group, respectively, this set of additional stopwords was generated by Module 2 by iteratively running the gensim LDA algorithm and excluding the words which were included in at least 3 of the 5 topics. The purpose of this data processing step is to avoid having the same set of words in all topics.
# - The min_token_count of the text_processing_pipeline function was set to 60, so only paragraphs with at least 60 tokens were kept in the dataset.
file_list=['Gaming_Group1', 'Gaming_Group2', 'Gaming_Group3', 'Gaming_Group4',
'LowPIU_Group1', 'LowPIU_Group2', 'LowPIU_Group3',
'Media_Group1', 'Media_Group2', 'Media_Group3', 'Media_Group4',
'Social_Group1', 'Social_Group2', 'Social_Group3', 'Social_Group4']
additional_stopword_counts=list(dict(Counter([re.sub('[0-9]', '', file,) for file in file_list])).values())
Gaming_group_stopwords=['like', 'get', 'school', 'hour', 'day', 'even', 'think', 'thing', 'way', 'know', 'year', 'week', 'really', 'one',
'kid', 'game', 'use', 'time', 'want', 'play', 'much', 'back']
Low_PIU_group_stopwords=['school', 'like', 'time', 'get', 'think', 'kid', 'really',
'thing', '00', 'technology', 'year', 'child', 'back', 'lot',
'even', 'know', 'want', 'old', 'one']
Media_group_stopwords=['like', 'thing', 'get', 'really', 'kid', 'time', 'want',
'school', 'think', 'know', 'one', 'use',
'year', 'much', 'back', 'work', 'person', 'pandemic',
'see', 'lot', 'good', 'little', 'day', 'old']
Social_group_stopwords=['like', 'get', 'think', 'know', 'thing', 'time', 'school',
'really', 'child', 'see', 'want',
'kid', 'one', 'lot', 'even']
additional_stopwords_list=[Gaming_group_stopwords, Low_PIU_group_stopwords, Media_group_stopwords, Social_group_stopwords]
additional_stopwords_list=[[stopword_list]*count for count, stopword_list in zip(additional_stopword_counts, additional_stopwords_list)]
additional_stopwords_list=[stopword for additional_stopword in additional_stopwords_list for stopword in additional_stopword]
all_focusgroup_text=[FocusGroup(focus_group_file) for focus_group_file in file_list]
all_focusgroup_processed_text=[text_processing_pipeline(focus_group.text_including_parents,additional_stopword_list, min_token_count=60) for focus_group, additional_stopword_list in zip(all_focusgroup_text, additional_stopwords_list)]
len(all_focusgroup_processed_text)
| Team_5_Final_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # MaxwellEvol: Solving Maxwell's Equations in the Einstein Toolkit with the Method of Lines
#
# ## Author: <NAME>
# ### Formatting improvements courtesy <NAME>
#
# [comment]: <> (Abstract: TODO)
#
# [comment]: <> (Notebook Status and Validation Notes: TODO)
#
# ### NRPy+ Source Code for this module: [Maxwell/MaxwellCartesian_Evol.py](../edit/Maxwell/MaxwellCartesian_Evol.py) [\[tutorial\]](Tutorial-MaxwellCartesian.ipynb) Constructs Maxwell's equations and initial data as SymPy expressions
#
# ## Introduction:
# This module focuses on using the equations developed in the [Tutorial-MaxwellCartesian](Tutorial-MaxwellCartesian.ipynb) tutorial notebook to build an Einstein Toolkit (ETK) thorn to solve Maxwell's equations in Cartesian coordinates. This tutorial will focus on implementing the time evolution aspects; the next will construct the thorn that will set up the initial data to be evolved.
#
# When interfaced properly with the ETK, this module will propagate the initial data for $E_i$, $A_i$, and $\psi$ ( and $\Gamma$, if we so choose), defined in the next tutorial, forward in time by integrating the equations for $\partial_t E_i$, $\partial_t A_i$ and $\partial_t \psi$ (and possibly $\partial_t \Gamma$) subject to spatial boundary conditions. The time evolution itself is handled by the $\text{MoL}$ (Method of Lines) thorn in the $\text{CactusNumerical}$ arrangement, and the boundary conditions by the $\text{Boundary}$ thorn in the $\text{CactusBase}$ arrangement.
#
# Similar to the Maxwell initial data module, we will construct the WaveToyNRPy module in two steps.
#
# 1. Call on NRPy+ to convert the SymPy expressions for the evolution equations into one C-code kernel.
# 1. Write the C code and linkages to the Einstein Toolkit infrastructure (i.e., the .ccl files) to complete this Einstein Toolkit module.
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# 1. [Step 1](#initializenrpy): Initialize core Python/NRPy+ modules
# 1. [Step 2](#etk): Interfacing with the Einstein Toolkit
# 1. [Step 2.a](#etkc): Constructing the Einstein Toolkit C-code calling functions that include the C code kernels
# 1. [Step 2.b](#cclfiles): CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure
# 1. [Step 2.c](#etk_list): Add the C file to Einstein Toolkit compilation list
# 1. [Step 3](#code_validation): Code Validation (**To be performed**)
# 1. [Step 4](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
#
#
# <a id='initializenrpy'></a>
#
# # Step 1: Initialize core Python/NRPy+ modules \[Back to [top](#toc)\]
# $$\label{initializenrpy}$$
#
# Let's start by importing all the needed modules from Python/NRPy+:
# +
# Step 1a: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
import loop
# Step 1b: This is an Einstein Toolkit (ETK) thorn. Here we
# tell NRPy+ that gridfunction memory access will
# therefore be in the "ETK" style.
par.set_parval_from_str("grid::GridFuncMemAccess","ETK")
#Set the spatial dimension parameter to 3.
par.set_parval_from_str("grid::DIM", 3)
DIM = par.parval_from_str("grid::DIM")
# Step 1c: Call the MaxwellCartesian_Evol() function from within the
# Maxwell/MaxwellCartesian_Evol.py module.
import Maxwell.MaxwellCartesian_Evol as mwrhs
par.set_parval_from_str("System_to_use","System_I")
mwrhs.MaxwellCartesian_Evol()
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",2)
# Step 2: Register gridfunctions so they can be written to by NRPy.
# System I:
AIrhsD = ixp.register_gridfunctions_for_single_rank1("EVOL","AIrhsD")
EIrhsD = ixp.register_gridfunctions_for_single_rank1("EVOL","EIrhsD")
psiIrhs = gri.register_gridfunctions("EVOL","psiIrhs")
# Step 3: Set the rhs gridfunctions to their variables
# defined by MaxwellEvol().
for i in range(DIM):
AIrhsD[i] = mwrhs.ArhsD[i]
EIrhsD[i] = mwrhs.ErhsD[i]
psiIrhs = mwrhs.psi_rhs
Maxwell_Evol_to_printI = [\
lhrh(lhs=gri.gfaccess("out_gfs","AIrhsD0"),rhs=AIrhsD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIrhsD1"),rhs=AIrhsD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIrhsD2"),rhs=AIrhsD[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIrhsD0"),rhs=EIrhsD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIrhsD1"),rhs=EIrhsD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIrhsD2"),rhs=EIrhsD[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","psiIrhs"),rhs=psiIrhs)]
# Set outCverbose=False to avoid enormous file output.
Maxwell_Evol_CcodeKernelI = fin.FD_outputC("returnstring",Maxwell_Evol_to_printI,
params="outCverbose=False").replace("AD","AID")\
.replace("ED","EID")\
.replace("psi","psiI")
# For debugging only:
#fin.FD_outputC("stdout",Maxwell_Evol_to_printI)
gri.glb_gridfcs_list = []
# Step 1c: Call the MaxwellCartesian_Evol() function from within the
# Maxwell/MaxwellCartesian_Evol.py module.
par.set_parval_from_str("System_to_use","System_II")
mwrhs.MaxwellCartesian_Evol()
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",2)
# Step 2: Register gridfunctions so they can be written to by NRPy.
# System II:
AIIrhsD = ixp.register_gridfunctions_for_single_rank1("EVOL","AIIrhsD")
EIIrhsD = ixp.register_gridfunctions_for_single_rank1("EVOL","EIIrhsD")
psiIIrhs = gri.register_gridfunctions("EVOL","psiIIrhs")
Gammarhs = gri.register_gridfunctions("EVOL","Gammarhs")
# Step 3: Set the rhs gridfunctions to their variables
# defined by MaxwellEvol().
for i in range(DIM):
AIIrhsD[i] = mwrhs.ArhsD[i]
EIIrhsD[i] = mwrhs.ErhsD[i]
psiIIrhs = mwrhs.psi_rhs
Gammarhs = mwrhs.Gamma_rhs
# Step 4: Create the C code output kernel.
Maxwell_Evol_to_printII = [\
lhrh(lhs=gri.gfaccess("out_gfs","AIIrhsD0"),rhs=AIIrhsD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIIrhsD1"),rhs=AIIrhsD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIIrhsD2"),rhs=AIIrhsD[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIIrhsD0"),rhs=EIIrhsD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIIrhsD1"),rhs=EIIrhsD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIIrhsD2"),rhs=EIIrhsD[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","psiIIrhs"),rhs=psiIIrhs),\
lhrh(lhs=gri.gfaccess("out_gfs","Gammarhs"),rhs=Gammarhs)]
Maxwell_Evol_CcodeKernelII = fin.FD_outputC("returnstring",Maxwell_Evol_to_printII).replace("AD","AIID")\
.replace("ED","EIID")\
.replace("psiGF","psiIIGF")
Maxwell_Evol_looped = loop.loop(["i2","i1","i0"],["cctk_nghostzones[2]","cctk_nghostzones[1]","cctk_nghostzones[0]"],\
["cctk_lsh[2]-cctk_nghostzones[2]","cctk_lsh[1]-cctk_nghostzones[1]",\
"cctk_lsh[0]-cctk_nghostzones[0]"],\
["1","1","1"],["#pragma omp parallel for","",""],"",\
(Maxwell_Evol_CcodeKernelI+Maxwell_Evol_CcodeKernelII).replace("time","cctk_time"))
# Step 5: Create directories for the thorn if they don't exist.
# !mkdir MaxwellEvol 2>/dev/null # 2>/dev/null: Don't throw an error if the directory already exists.
# !mkdir MaxwellEvol/src 2>/dev/null # 2>/dev/null: Don't throw an error if the directory already exists.
# Step 6: Write the C code kernel to file.
with open("MaxwellEvol/src/Maxwell_Evol.h", "w") as file:
file.write(str(Maxwell_Evol_looped))
with open("MaxwellEvol/src/NRPy_params.h", "w") as file:
file.write("#define FD_CENTDERIVS_ORDER "+str(par.parval_from_str("finite_difference::FD_CENTDERIVS_ORDER"))+"\n")
# Step 7: Create a C code kernel to evaluate the constraint violation
Cviolation_CcodeKernel = fin.FD_outputC("returnstring",[lhrh(lhs=gri.gfaccess("out_gfs","Cviolation"),rhs=mwrhs.Cviolation)])
Cviolation_CcodeKernel_geminated = (Cviolation_CcodeKernel.replace("ED","EID").replace("Cviolation","CviolationI") + \
Cviolation_CcodeKernel.replace("ED","EIID").replace("Cviolation","CviolationII"))
Cviolation_CcodeKernel_looped = loop.loop(["i2","i1","i0"],["1","1","1"],["cctk_lsh[2]-1","cctk_lsh[1]-1","cctk_lsh[0]-1"],\
["1","1","1"],["#pragma omp parallel for","",""],"",\
Cviolation_CcodeKernel_geminated.replace("time","cctk_time"))
with open("MaxwellEvol/src/Constraint_violation.h", "w") as file:
file.write(str(Cviolation_CcodeKernel_looped))
# -
# <a id='etk'></a>
#
# # Step 2: Interfacing with the Einstein Toolkit \[Back to [top](#toc)\]
# $$\label{etk}$$
#
#
# <a id='etkc'></a>
#
# ## Step 2.a: Constructing the Einstein Toolkit C-code calling functions that include the C code kernels \[Back to [top](#toc)\]
# $$\label{etkc}$$
#
# Now that we have generated the C code kernel `ScalarWave_RHSs.h` and the parameters file `NRPy_params.h`, we will need to write C code to make use of these files. To do this, we can simply follow the example within the [IDScalarWaveNRPy tutorial notebook](Tutorial-ETK_thorn-IDScalarWaveNRPy.ipynb). Functions defined by these files will be called by the Einstein Toolkit scheduler (specified in schedule.ccl below).
# +
# %%writefile MaxwellEvol/src/MaxwellEvol.c
#include <math.h>
#include <stdio.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
#include "Symmetry.h"
#include "NRPy_params.h"
void MaxwellEvol_calc_constraint_violation(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const CCTK_REAL invdx0 = 1.0 / (CCTK_DELTA_SPACE(0));
const CCTK_REAL invdx1 = 1.0 / (CCTK_DELTA_SPACE(1));
const CCTK_REAL invdx2 = 1.0 / (CCTK_DELTA_SPACE(2));
const CCTK_REAL *gammaDD00GF = gxx;
const CCTK_REAL *gammaDD01GF = gxy;
const CCTK_REAL *gammaDD02GF = gxz;
const CCTK_REAL *gammaDD11GF = gyy;
const CCTK_REAL *gammaDD12GF = gyz;
const CCTK_REAL *gammaDD22GF = gzz;
#include "Constraint_violation.h"
}
void MaxwellEvol_set_rhs(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const CCTK_REAL invdx0 = 1.0 / (CCTK_DELTA_SPACE(0));
const CCTK_REAL invdx1 = 1.0 / (CCTK_DELTA_SPACE(1));
const CCTK_REAL invdx2 = 1.0 / (CCTK_DELTA_SPACE(2));
const CCTK_REAL *gammaDD00GF = gxx;
const CCTK_REAL *gammaDD01GF = gxy;
const CCTK_REAL *gammaDD02GF = gxz;
const CCTK_REAL *gammaDD11GF = gyy;
const CCTK_REAL *gammaDD12GF = gyz;
const CCTK_REAL *gammaDD22GF = gzz;
#include "Maxwell_Evol.h"
}
/* Boundary Condition code adapted from WaveToyC thorn in ETK, implementing built-in
* ETK BC functionality
*/
void MaxwellEvol_SelectBCs(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const char *bctype;
bctype = NULL;
if (CCTK_EQUALS(bound,"flat") || CCTK_EQUALS(bound,"static") ||
CCTK_EQUALS(bound,"radiation") || CCTK_EQUALS(bound,"robin") ||
CCTK_EQUALS(bound,"none"))
{
bctype = bound;
}
else if (CCTK_EQUALS(bound,"zero"))
{
bctype = "scalar";
}
/* Uses all default arguments, so invalid table handle -1 can be passed */
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::AID0GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::AID1GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::AID2GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::EID0GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::EID1GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::EID2GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::psiIGF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::AIID0GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::AIID1GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::AIID2GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::EIID0GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::EIID1GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::EIID2GF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::psiIIGF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"MaxwellEvol::GammaGF", bctype) < 0)
{
CCTK_WARN (0, "MaxwellEvol_Boundaries: Error selecting boundary condition");
}
}
void MaxwellEvol_InitSymBound(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
int sym[3];
sym[0] = 1;
sym[1] = 1;
sym[2] = 1;
SetCartSymVN(cctkGH, sym,"MaxwellEvol::AID0GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::AID1GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::AID2GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::EID0GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::EID1GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::EID2GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::psiIGF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::CviolationIGF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::AIID0GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::AIID1GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::AIID2GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::EIID0GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::EIID1GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::EIID2GF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::psiIIGF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::GammaGF");
SetCartSymVN(cctkGH, sym,"MaxwellEvol::CviolationIIGF");
return;
}
void MaxwellEvol_RegisterVars(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
CCTK_INT ierr CCTK_ATTRIBUTE_UNUSED = 0;
/* Register all the evolved grid functions with MoL */
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::AID0GF"), CCTK_VarIndex("MaxwellEvol::AIrhsD0GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::AID1GF"), CCTK_VarIndex("MaxwellEvol::AIrhsD1GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::AID2GF"), CCTK_VarIndex("MaxwellEvol::AIrhsD2GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::EID0GF"), CCTK_VarIndex("MaxwellEvol::EIrhsD0GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::EID1GF"), CCTK_VarIndex("MaxwellEvol::EIrhsD1GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::EID2GF"), CCTK_VarIndex("MaxwellEvol::EIrhsD2GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::psiIGF"), CCTK_VarIndex("MaxwellEvol::psiIrhsGF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::AIID0GF"), CCTK_VarIndex("MaxwellEvol::AIIrhsD0GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::AIID1GF"), CCTK_VarIndex("MaxwellEvol::AIIrhsD1GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::AIID2GF"), CCTK_VarIndex("MaxwellEvol::AIIrhsD2GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::EIID0GF"), CCTK_VarIndex("MaxwellEvol::EIIrhsD0GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::EIID1GF"), CCTK_VarIndex("MaxwellEvol::EIIrhsD1GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::EIID2GF"), CCTK_VarIndex("MaxwellEvol::EIIrhsD2GF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("MaxwellEvol::psiIIGF"), CCTK_VarIndex("MaxwellEvol::psiIIrhsGF"));
/* Register all the evolved Array functions with MoL */
return;
}
# -
# <a id='cclfiles'></a>
#
# ## Step 2.b: CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure \[Back to [top](#toc)\]
# $$\label{cclfiles}$$
#
# Writing a module ("thorn") within the Einstein Toolkit requires that three "ccl" files be constructed, all in the root directory of the thorn:
#
# 1. `interface.ccl`: defines the gridfunction groups needed, and provides keywords denoting what this thorn provides and what it should inherit from other thorns. This file governs the interaction between this thorn and others; more information can be found in the [official Einstein Toolkit documentation](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-178000D2.2).
# With "implements", we give our thorn its unique name. By "inheriting" other thorns, we tell the Toolkit that we will rely on variables that exist within those functions. Then, we tell the toolkit that we want the gridfunctions $A_i$, $E_i$, $\psi$, and $\Gamma$ to be visible to other thorns by using the keyword "public".
# +
# %%writefile MaxwellEvol/interface.ccl
implements: MaxwellEvol
inherits: admbase Boundary grid
USES INCLUDE: loopcontrol.h
USES INCLUDE: Symmetry.h
USES INCLUDE: Boundary.h
CCTK_INT FUNCTION MoLRegisterEvolved(CCTK_INT IN EvolvedIndex, CCTK_INT IN RHSIndex)
USES FUNCTION MoLRegisterEvolved
CCTK_INT FUNCTION GetBoundarySpecification(CCTK_INT IN size, CCTK_INT OUT ARRAY nboundaryzones, CCTK_INT OUT ARRAY is_internal, CCTK_INT OUT ARRAY is_staggered, CCTK_INT OUT ARRAY shiftout)
USES FUNCTION GetBoundarySpecification
CCTK_INT FUNCTION SymmetryTableHandleForGrid(CCTK_POINTER_TO_CONST IN cctkGH)
USES FUNCTION SymmetryTableHandleForGrid
CCTK_INT FUNCTION Boundary_SelectGroupForBC(CCTK_POINTER_TO_CONST IN GH, CCTK_INT IN faces, CCTK_INT IN boundary_width, CCTK_INT IN table_handle, CCTK_STRING IN group_name, CCTK_STRING IN bc_name)
USES FUNCTION Boundary_SelectGroupForBC
CCTK_INT FUNCTION Boundary_SelectVarForBC(CCTK_POINTER_TO_CONST IN GH, CCTK_INT IN faces, CCTK_INT IN boundary_width, CCTK_INT IN table_handle, CCTK_STRING IN var_name, CCTK_STRING IN bc_name)
USES FUNCTION Boundary_SelectVarForBC
public:
cctk_real system_I_rhs type = GF Timelevels=3 tags='tensortypealias="Scalar"'
{
AIrhsD0GF,AIrhsD1GF,AIrhsD2GF,EIrhsD0GF,EIrhsD1GF,EIrhsD2GF,psiIrhsGF
} "The evolved scalar fields"
public:
cctk_real system_II_rhs type = GF Timelevels=3 tags='tensortypealias="Scalar"'
{
AIIrhsD0GF,AIIrhsD1GF,AIIrhsD2GF,EIIrhsD0GF,EIIrhsD1GF,EIIrhsD2GF,psiIIrhsGF,GammarhsGF
} "The evolved scalar fields"
public:
cctk_real system_I type = GF Timelevels=3 tags='tensortypealias="Scalar"'
{
AID0GF,AID1GF,AID2GF,EID0GF,EID1GF,EID2GF,psiIGF
} "The evolved scalar fields"
public:
cctk_real system_II type = GF Timelevels=3 tags='tensortypealias="Scalar"'
{
AIID0GF,AIID1GF,AIID2GF,EIID0GF,EIID1GF,EIID2GF,psiIIGF,GammaGF
} "The evolved scalar fields"
public:
cctk_real system_I_constraint_violation type = GF Timelevels=3 tags='tensortypealias="Scalar"'
{
CviolationIGF
} "The constraint violation for system I"
public:
cctk_real system_II_constraint_violation type = GF Timelevels=3 tags='tensortypealias="Scalar"'
{
CviolationIIGF
} "The constraint violation for system II"
# -
# 2. `param.ccl`: specifies free parameters within the thorn, enabling them to be set at runtime. It is required to provide allowed ranges and default values for each parameter. More information on this file's syntax can be found in the [official Einstein Toolkit documentation](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-183000D2.3). A number of parameters are defined, and more parameters can be easily added in later versions. We also set the number of timelevels we will store in memory.
# +
# %%writefile MaxwellEvol/param.ccl
shares: MethodOfLines
USES CCTK_INT MoL_Num_Evolved_Vars
USES CCTK_INT MoL_Num_ArrayEvolved_Vars
restricted:
CCTK_INT MaxwellEvol_MaxNumEvolvedVars "Number of evolved variables used by this thorn" ACCUMULATOR-BASE=MethodofLines::MoL_Num_Evolved_Vars STEERABLE=RECOVER
{
15:15 :: "Number of evolved variables used by this thorn"
} 15
restricted:
CCTK_INT SimpleWave_MaxNumArrayEvolvedVars "Number of Array evolved variables used by this thorn" ACCUMULATOR-BASE=MethodofLines::MoL_Num_ArrayEvolved_Vars STEERABLE=RECOVER
{
0:0 :: "Number of Array evolved variables used by this thorn"
} 0
restricted:
KEYWORD bound "Type of boundary condition to use"
{
"flat" :: "Flat (von Neumann, n grad phi = 0) boundary condition"
"static" :: "Static (Dirichlet, dphi/dt=0) boundary condition"
"radiation" :: "Radiation boundary condition"
"robin" :: "Robin (phi(r) = C/r) boundary condition"
"zero" :: "Zero (Dirichlet, phi=0) boundary condition"
"none" :: "Apply no boundary condition"
} "static"
restricted:
CCTK_INT timelevels "Number of active timelevels" STEERABLE=RECOVER
{
0:3 :: ""
} 3
restricted:
CCTK_REAL amp "The amplitude of the wavepacket"
{
*:* :: "The amplitude of the wavepacket"
} 1.0
restricted:
CCTK_REAL lam "The size lambda of the wavepacket"
{
*:* :: "The size lambda of the wavepacket"
} 1.0
# -
# 3. `schedule.ccl`: allocates storage for gridfunctions, defines how the thorn's functions should be scheduled in a broader simulation, and specifies the regions of memory written to or read from gridfunctions. `schedule.ccl`'s official documentation may be found [here](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-186000D2.4).
#
# We first assign storage for both scalar gridfunctions, and then specify the standardized ETK "scheduling bins" in which we want each of our thorn's functions to run.
# +
# %%writefile MaxwellEvol/schedule.ccl
STORAGE: system_I_rhs[timelevels]
STORAGE: system_I[timelevels]
STORAGE: system_II_rhs[timelevels]
STORAGE: system_II[timelevels]
STORAGE: system_I_constraint_violation[timelevels]
STORAGE: system_II_constraint_violation[timelevels]
schedule MaxwellEvol_InitSymBound at BASEGRID
{
LANG: C
OPTIONS: global
} "Schedule symmetries"
schedule MaxwellEvol_calc_constraint_violation IN CCTK_ANALYSIS
{
LANG: C
READS: admbase::gxx(Everywhere)
READS: admbase::gxy(Everywhere)
READS: admbase::gxz(Everywhere)
READS: admbase::gyy(Everywhere)
READS: admbase::gyz(Everywhere)
READS: admbase::gzz(Everywhere)
READS: EID0GF(Everywhere)
READS: EID1GF(Everywhere)
READS: EID2GF(Everywhere)
READS: EIID0GF(Everywhere)
READS: EIID1GF(Everywhere)
READS: EIID2GF(Everywhere)
WRITES: CviolationIGF(Interior)
WRITES: CviolationIIGF(Interior)
}"Calculate the contraint violation of the simulation"
schedule MaxwellEvol_set_rhs as WaveToy_Evolution IN MoL_CalcRHS
{
LANG: C
READS: admbase::gxx(Everywhere)
READS: admbase::gxy(Everywhere)
READS: admbase::gxz(Everywhere)
READS: admbase::gyy(Everywhere)
READS: admbase::gyz(Everywhere)
READS: admbase::gzz(Everywhere)
READS: AID0GF(Everywhere)
READS: AID1GF(Everywhere)
READS: AID2GF(Everywhere)
READS: EID0GF(Everywhere)
READS: EID1GF(Everywhere)
READS: EID2GF(Everywhere)
READS: psiIGF(Everywhere)
READS: AIID0GF(Everywhere)
READS: AIID1GF(Everywhere)
READS: AIID2GF(Everywhere)
READS: EIID0GF(Everywhere)
READS: EIID1GF(Everywhere)
READS: EIID2GF(Everywhere)
READS: psiIIGF(Everywhere)
READS: GammaGF(Everywhere)
WRITES: AIrhsD0GF(Interior)
WRITES: AIrhsD1GF(Interior)
WRITES: AIrhsD2GF(Interior)
WRITES: EIrhsD0GF(Interior)
WRITES: EIrhsD1GF(Interior)
WRITES: EIrhsD2GF(Interior)
WRITES: psiIrhsGF(Interior)
WRITES: AIIrhsD0GF(Interior)
WRITES: AIIrhsD1GF(Interior)
WRITES: AIIrhsD2GF(Interior)
WRITES: EIIrhsD0GF(Interior)
WRITES: EIIrhsD1GF(Interior)
WRITES: EIIrhsD2GF(Interior)
WRITES: psiIIrhsGF(Interior)
WRITES: GammarhsGF(Interior)
} "Evolution of Maxwell's equations"
schedule MaxwellEvol_SelectBCs in MoL_PostStep
{
LANG: C
OPTIONS: level
SYNC: system_I
SYNC: system_II
} "Boundaries of Maxwell's equations"
schedule GROUP ApplyBCs as MaxwellEvol_ApplyBCs in MoL_PostStep after MaxwellEvol_SelectBCs
{
} "Apply boundary conditions"
schedule GROUP ApplyBCs as MaxwellEvol_ApplyBCs at POSTRESTRICT
{
} "Apply boundary conditions"
schedule MaxwellEvol_RegisterVars in MoL_Register
{
LANG: C
OPTIONS: meta
} "Register Variables for MoL"
# -
# <a id='etk_list'></a>
#
# ## Step 2.c: Add the C file to Einstein Toolkit compilation list \[Back to [top](#toc)\]
# $$\label{etk_list}$$
#
# We will also need `make.code.defn`, which indicates the list of files that need to be compiled. This thorn only has the one C file to compile.
# %%writefile MaxwellEvol/src/make.code.defn
SRCS = MaxwellEvol.c
# <a id='code_validation'></a>
#
# # Step 3: Code Validation (To be performed) \[Back to [top](#toc)\]
# $$\label{code_validation}$$
# <a id='latex_pdf_output'></a>
#
# # Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-ETK_thorn-MaxwellEvol.pdf](Tutorial-ETK_thorn-MaxwellEvol.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
# !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-ETK_thorn-MaxwellEvol.ipynb
# !pdflatex -interaction=batchmode Tutorial-ETK_thorn-MaxwellEvol.tex
# !pdflatex -interaction=batchmode Tutorial-ETK_thorn-MaxwellEvol.tex
# !pdflatex -interaction=batchmode Tutorial-ETK_thorn-MaxwellEvol.tex
# !rm -f Tut*.out Tut*.aux Tut*.log
| Tutorial-ETK_thorn-MaxwellEvol.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# This tells matplotlib not to try opening a new window for each plot.
# %matplotlib inline
# General libraries.
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import collections
from tqdm import tqdm
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import *
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
# ADD METRICS
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
#NLTK - NLP Tokenizing and Cleaning
import nltk
from nltk import pos_tag, pos_tag_sents
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import spacy
import sys, os, re, csv, codecs, numpy as np, pandas as pd
import matplotlib.pyplot as plt
from IPython.core.display import display, HTML
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
# Tokenize and Pad
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
# -
data = pd.read_json("microaggressions-modified.json")
#clean_tags
data.tags = data.tags.str.join("|").apply(lambda x: re.sub(r'3yearsago|acces|beauty|colonialism|discussion|education|eurocentricism|eurocentrism|event|everything|family|faq|hair|institutional|internalization|macro|\bmarriage\b|marriagestatus|meta|orientalism|patriarchy|queer|reblog|reflection|region|relationship|resistance|school|west|\body|\*', r'', x))\
.apply(lambda y: re.sub(r'Skin Tone|citizenship|ethnicity|language|nationality|racism|skin tone', r'race', y))\
.apply(lambda z: re.sub(r"ability|ableism|invisibility|invisible", r"disability",z))\
.apply(lambda a: re.sub(r"Mind|mind|neurodiversity",r"mental health",a))\
.apply(lambda b: re.sub(r"immigrant status|immigration status", r"immigration",b))\
.apply(lambda c: re.sub(r"Sexuality|\bsex\b|sexism", r"sexuality",c))\
.apply(lambda d: re.sub(r"genderqueer|trans", r"gender",d))\
.apply(lambda e: re.sub(r"Survivor|submission|surivor|survivor", r"sexual assault",e))
data = pd.concat([data, data['tags'].str.get_dummies()], axis = 1)
print("Dataset has {} rows and {} columns".format(data.shape[0], data.shape[1]))
data.head()
#Rows with empty values either in the tags or the text and reindex
bad_indices = list(set(np.where(data[["tags", "text"]] == "")[0]))
data.drop(data.index[bad_indices], inplace = True)
data = data.reset_index(drop=True)
print("After culling the empty values, the dataset now has {} rows and {} columns".format(data.shape[0], data.shape[1]))
# +
#Class labels
list_classes = ["age","body","class", "disability", "gender", "immigration" "mental health", "race", "religion", "sexual assault", "sexuality"]
#Read the data
train, test, train_labels, test_labels = train_test_split(data.iloc[:, 1],data.iloc[:,2:],test_size=0.20,train_size=0.80, stratify = data["race"])
print(train.shape, train_labels.shape)
print(test.shape, test_labels.shape)
# +
# Create a counter object for each dataset
word_counter = collections.Counter([word for sentence in tqdm(train, total=len(train)) \
for word in sentence.split()])
print('{} words.'.format(len([word for sentence in train for word in sentence.split()])))
print('{} unique words.'.format(len(word_counter)))
print('10 Most common words in the dataset:')
print('"' + '" "'.join(list(zip(*word_counter.most_common(10)))[0]) + '"')
# -
data_classes = data
counts = []
categories = list(data.columns[2:].values)
for i in categories:
counts.append((i, data[i].sum()))
data_stats = pd.DataFrame(counts, columns=['Classes', '#Microagressions'])
data_stats
# +
fig, ax = plt.subplots(figsize=(15, 10))
bar_width = 0.5
opacity = 0.6
labels = data_stats["#Microagressions"]
rects1 = plt.bar(data_stats["Classes"], data_stats["#Microagressions"], bar_width,
alpha=opacity,
color='indigo',
label='Classes')
plt.xlabel('Classes')
plt.ylabel('#Microaggressions')
plt.title('Microagression Classes')
plt.xticks(np.arange(len(data_stats["Classes"])), data_stats["Classes"], rotation='vertical')
plt.legend()
for rect, label in zip(rects1, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label,
ha='center', va='bottom', rotation = 45)
# -
# A few things to note so far - class imbalance and stopwords need to be filtered
# +
# Create tokenizer
tokenizer = Tokenizer(num_words=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
char_level=False)
# Fit and run tokenizer
tokenizer.fit_on_texts(list(train))
tokenized_train = tokenizer.texts_to_sequences(train)
tokenized_test = tokenizer.texts_to_sequences(test)
word_index = tokenizer.word_index
# Extract variables
vocab_size = len(word_index)
print('Vocab size: {}'.format(vocab_size))
longest = max(len(seq) for seq in tokenized_train)
print("Longest comment size: {}".format(longest))
average = np.mean([len(seq) for seq in tokenized_train])
print("Average comment size: {}".format(average))
stdev = np.std([len(seq) for seq in tokenized_train])
print("Stdev of comment size: {}".format(stdev))
max_len = int(average + stdev * 3)
print('Max comment size: {}'.format(max_len))
print()
# Pad sequences
processed_X_train = pad_sequences(tokenized_train, maxlen=max_len, padding='post', truncating='post')
processed_X_test = pad_sequences(tokenized_test, maxlen=max_len, padding='post', truncating='post')
# Sample tokenization
for sample_i, (sent, token_sent) in enumerate(zip(train[:2], tokenized_train[:2])):
print('Sequence {}'.format(sample_i + 1))
print(' Input: {}'.format(sent))
print(' Output: {}'.format(token_sent))
# -
# A vocabulary of 17110 words has been shrunk to 8883
word_count_distribution = tokenizer.word_counts.items()
sorted(word_count_distribution, key=lambda kv: kv[1], reverse = True)
# +
embedding_dim = 300
# Get embeddings
embeddings_index = {}
f = open('wiki.en.vec', encoding="utf8")
for line in f:
values = line.rstrip().rsplit(' ', embedding_dim)
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found {} word vectors.'.format(len(embeddings_index)))
# -
# Build embedding matrix
embedding_matrix = np.zeros((len(word_index) + 1, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# Words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
import h5py
with h5py.File('embeddings.h5', 'w') as hf:
hf.create_dataset("fasttext", data=embedding_matrix)
# Load embeddings# Load e
with h5py.File('embeddings.h5', 'r') as hf:
embedding_matrix = hf['fasttext'][:]
# +
import keras.backend
from keras.models import Sequential
from keras.layers import CuDNNGRU, Dense, Conv1D, MaxPooling1D
from keras.layers import Dropout, GlobalMaxPooling1D, BatchNormalization
from keras.layers import Bidirectional
from keras.layers.embeddings import Embedding
from keras.optimizers import Nadam
# Initate model
model = Sequential()
# Add Embedding layer
model.add(Embedding(vocab_size + 1, embedding_dim, weights=[embedding_matrix], input_length=max_len, trainable=True))
# Add Recurrent layers
#model.add(Bidirectional(CuDNNGRU(300, return_sequences=True)))
# Add Convolutional layer
model.add(Conv1D(filters=128, kernel_size=5, padding='same', activation='relu'))
model.add(MaxPooling1D(3))
model.add(GlobalMaxPooling1D())
model.add(BatchNormalization())
# Add fully connected layers
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(11, activation='sigmoid'))
# Summarize the model
model.summary()
# +
def loss(y_true, y_pred):
return keras.backend.binary_crossentropy(y_true, y_pred)
lr = .0001
model.compile(loss=loss, optimizer=Nadam(lr=lr, clipnorm=1.0),
metrics=['binary_accuracy'])
# +
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from keras.callbacks import Callback
class RocAucEvaluation(Callback):
def __init__(self, filepath, validation_data=(), interval=1, max_epoch = 100):
super(Callback, self).__init__()
# Initialize state variables
print("After init")
self.interval = interval
self.filepath = filepath
self.stopped_epoch = max_epoch
self.best = 0
self.X_val, self.y_val = validation_data
self.y_pred = np.zeros(self.y_val.shape)
def on_epoch_end(self, epoch, logs={}):
print("Epoch end 1")
if epoch % self.interval == 0:
y_pred = self.model.predict_proba(self.X_val, verbose=0)
current = roc_auc_score(self.y_val, y_pred)
logs['roc_auc_val'] = current
if current > self.best: #save model
print(" - AUC - improved from {:.5f} to {:.5f}".format(self.best, current))
self.best = current
self.y_pred = y_pred
self.stopped_epoch = epoch+1
self.model.save(self.filepath, overwrite=True)
else:
print(" - AUC - did not improve")
[X, X_val, y, y_val] = train_test_split(processed_X_train, train_labels, test_size=0.03, shuffle=False)
RocAuc = RocAucEvaluation(filepath='model.best.hdf5',validation_data=(X, y), interval=1)
# +
from keras.callbacks import EarlyStopping, ModelCheckpoint
model.compile(loss='binary_crossentropy', optimizer='Adam')
# Set variables
batch_size = 64
epochs = 5
# Set early stopping
early_stop = EarlyStopping(monitor="roc_auc_val", mode="max", patience=2)
# Train
graph = model.fit(X, y, batch_size=batch_size, epochs=epochs,
validation_data=(X_val, y_val), callbacks=[RocAuc, early_stop],
verbose=1, shuffle=False)
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# Visualize history of loss
plt.plot(graph.history['loss'])
plt.plot(graph.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# -
predictions = model.predict(processed_X_test, verbose=0)
# +
#preprocessing
def nltk_preprocess(data):
'''This function preprocesses a data frame, specifing a text_column,
and strips down the document to cleaned, individualized word tokens without
stop words and other excessive parts of speech and eventually rejoins the remaining words.
'''
#Initializes stop words and new column creation
stop = stopwords.words('english')
#Initialize Lemmatizer object and final list of lemmatized words
lemmatizer = WordNetLemmatizer()
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return None # for easy if-statement
def lemmatized(word, tag):
wntag = get_wordnet_pos(tag)
if wntag is None:
lemma = str(lemmatizer.lemmatize(word))
else:
lemma = str(lemmatizer.lemmatize(word, pos=wntag))
return lemma
data = data.apply(lambda x: re.sub(r'\d+', r' ', x)).apply(lambda y: re.sub(r'\W+', r' ', y)).apply(lambda z: re.sub(r"_+",r" ",z))
data = data.str.lower()
data = data.apply(word_tokenize)
data = data.apply(lambda x: [item for item in x if item not in stop])
data = data.apply(pos_tag)
data = data.apply(lambda x: [lemmatized(word, tag) for (word, tag) in x])
data = data.apply(lambda x: ' '.join(x))
return data
data["text"] = nltk_preprocess(data["text"])
# -
train = train.reset_index().drop(['index'], axis=1)
train_labels = train_labels.reset_index().drop(['index'], axis=1)
dev = dev.reset_index().drop(['index'], axis=1)
dev_labels = dev_labels.reset_index().drop(['index'], axis=1)
test = test.reset_index().drop(['index'], axis=1)
test_labels = test_labels.reset_index().drop(['index'], axis=1)
# +
pipe1 = Pipeline([('cv', CountVectorizer(min_df=.02, max_df=.3, ngram_range=(1,3))),
('tfidf', TfidfTransformer()),
('clf', OneVsRestClassifier(MultinomialNB()))])
#categories = list(data.columns[2:].values) <- refresher for categories
for category in categories:
print('... Processing {}'.format(category))
# train the model using X_dtm & y
pipe1.fit(train, train_labels[category])
# compute the testing accuracy
prediction = pipe1.predict(dev)
print('Test accuracy is {}'.format(accuracy_score(dev_labels[category], prediction)))
# +
pipe2 = Pipeline([('cv', CountVectorizer(min_df=.02, max_df=.3, ngram_range=(1,3))),
('tfidf', TfidfTransformer()),
('clf', OneVsRestClassifier(LinearSVC()))])
#categories = list(data.columns[2:].values) <- refresher for categories
for category in categories:
print('... Processing {}'.format(category))
# train the model using X_dtm & y
pipe2.fit(train, train_labels[category])
# compute the testing accuracy
prediction = pipe2.predict(dev)
print('Test accuracy is {}'.format(accuracy_score(dev_labels[category], prediction)))
# +
pipe3 = Pipeline([('cv', CountVectorizer(min_df=.02, max_df=.3, ngram_range=(1,3))),
('tfidf', TfidfTransformer()),
('clf', OneVsRestClassifier(LogisticRegression()))])
#categories = list(data.columns[2:].values) <- refresher for categories
for category in categories:
print('... Processing {}'.format(category))
# train the model using X_dtm & y
pipe3.fit(train, train_labels[category])
# compute the testing accuracy
prediction = pipe2.predict(dev)
print('Test accuracy is {}'.format(accuracy_score(dev_labels[category], prediction)))
# +
pipe1.fit(train, train_labels)
pipe2.fit(train, train_labels)
pipe3.fit(train, train_labels)
pipe_pred_1 = pipe1.predict(test)
print(classification_report(pipe_pred_1, test_labels, target_names = categories))
# -
pipe_pred_2 = pipe2.predict(test)
print(classification_report(pipe_pred_2, test_labels, target_names = categories))
pipe_pred_3 = pipe3.predict(test)
print(classification_report(pipe_pred_3, test_labels, target_names = categories))
import pandas as pd
import numpy as np
import re
import csv
import os
import tensorflow as tf
import nltk
import gc
from keras.preprocessing import text, sequence
from sklearn.model_selection import train_test_split
from collections import Counter
# +
def get_coefs(word, *arr):
return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.rstrip().rsplit(' ')) for o in open('/Users/robertdeng/Google Drive/Data Science/W266/allie/crawl-300d-2M.vec'))
del embeddings_index['2000000']
# -
len(embeddings_index)
max_features = 100000
maxlen = 180
plt.hist([len(i) for i in data.text], bins=100)
plt.ylabel('Max Length Histogram');
print(np.mean([len(i) for i in data.text]), max([len(i) for i in data.text]))
# +
#Create the dictionary whose keys contains all words in train dataset that also shown
#in FastText word embeddings.
lst = []
for line in train["text"]:
lst += line.split()
count = Counter(lst)
for k in list(count.keys()):
if k not in embeddings_index:
del count[k]
len(count)
# -
count = dict(sorted(count.items(), key=lambda x: -x[1]))
count = {k:v for (k,v) in count.items() if v >= 2}
len(count)
count = dict(zip(list(count.keys()),range(1,64349 + 1)))
embedding_matrix = {}
for key in count:
embedding_matrix[key] = embeddings_index[key]
W = np.zeros((1,300))
W = np.append(W, np.array(list(embedding_matrix.values())),axis=0)
W = W.astype(np.float32, copy=False)
W.shape
# +
#Same Step for text dataset.
lst = []
for line in test["text"]:
lst += line.split()
count_test = Counter(lst)
for k in list(count_test.keys()):
if k not in embedding_matrix:
del count_test[k]
else:
count_test[k] = count[k]
# -
len(count_test)
#Release Memory
del lst
gc.collect()
for i in range(len(train)):
temp = train.iloc[i, 0].split()
for word in temp[:]:
if word not in count:
temp.remove(word)
for j in range(len(temp)):
temp[j] = count[temp[j]]
train.iloc[i, 0] = temp
for i in range(len(test)):
temp = test.iloc[i, 0].split()
for word in temp[:]:
if word not in count_test:
temp.remove(word)
for j in range(len(temp)):
temp[j] = count_test[temp[j]]
test.iloc[i, 0] = temp
#Pad sequence to max length parameter
train_x = sequence.pad_sequences(list(train["text"]), maxlen = maxlen)
test_x = sequence.pad_sequences(list(test["text"]), maxlen = maxlen)
del embeddings_index
gc.collect()
# **CNN Placeholder & Model Construction**
filter_sizes = [1,2,3,4,5]
num_filters = 32
batch_size = 256
#This large batch_size is specially for this case. Usually it is between 64-128.
num_filters_total = num_filters * len(filter_sizes)
embedding_size = 300
sequence_length = 170
num_epochs = 3 #Depends on your choice.
dropout_keep_prob = 0.9
input_x = tf.placeholder(tf.int32, [None, sequence_length], name = "input_x")
input_y = tf.placeholder(tf.float32, [None,6], name = "input_y")
embedded_chars = tf.nn.embedding_lookup(W, input_x)
embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)
def CNN(data):
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
filter_shape = [filter_size, embedding_size, 1, num_filters]
w = tf.Variable(tf.truncated_normal(filter_shape,stddev = 0.05), name = "w")
b = tf.Variable(tf.truncated_normal([num_filters], stddev = 0.05), name = "b")
conv = tf.nn.conv2d(
data,
w,
strides = [1,1,1,1],
padding = "VALID",
name = "conv"
)
h = tf.nn.relu(tf.nn.bias_add(conv, b), name = "relu")
pooled = tf.nn.max_pool(
h,
ksize = [1, sequence_length - filter_size + 1, 1, 1],
strides = [1,1,1,1],
padding = "VALID",
name = "pool"
)
pooled_outputs.append(pooled)
#return pooled_outputs
h_pool = tf.concat(pooled_outputs, 3)
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
return h_pool_flat
h_pool_flat = CNN(embedded_chars_expanded)
h_drop = tf.nn.dropout(h_pool_flat, dropout_keep_prob)
#In the first dense layer, reduce the node to half.
wd1 = tf.Variable(tf.truncated_normal([num_filters_total, int(num_filters_total/2)], stddev=0.05), name = "wd1")
bd1 = tf.Variable(tf.truncated_normal([int(num_filters_total/2)], stddev = 0.05), name = "bd1")
layer1 = tf.nn.xw_plus_b(h_drop, wd1, bd1, name = 'layer1') # Do wd1*h_drop + bd1
layer1 = tf.nn.relu(layer1)
#Second dense layer, reduce the outputs to 6.
wd2 = tf.Variable(tf.truncated_normal([int(num_filters_total/2),6], stddev = 0.05), name = 'wd2')
bd2 = tf.Variable(tf.truncated_normal([6], stddev = 0.05), name = "bd2")
layer2 = tf.nn.xw_plus_b(layer1, wd2, bd2, name = 'layer2')
prediction = tf.nn.sigmoid(layer2)# Make it to be 0-1.
#pred_clipped = tf.clip_by_value(prediction, 1e-10, 0.9999999)
#For some special loss function clip is necessary. Like log(x).
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = layer2, labels = input_y))
optimizer = tf.train.AdamOptimizer(learning_rate = 0.0007).minimize(loss)
#when learning rate set to 0.0007, the mean of threat is not 0, but when it is 0.001, it becomes 0 again.
#Learning rates usually is small for CNN compared with pure neural network.
#Need to define a approriate learning rate before you run on the whole dataset.
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(prediction), input_y), tf.float32))
#correct_prediction = tf.equal(tf.argmax(input_y, 1), tf.argmax(prediction, 1))
#accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#Define batch generation function.
def generate_batch(data, batch_size, num_epochs, shuffle=True):
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
l = 0
for epoch in range(num_epochs):
l += 1
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
#For Test data. Can use generate_batch function.
def blocks(data, block_size):
data = np.array(data)
data_size = len(data)
nums = int((data_size-1)/block_size) + 1
for block_num in range(nums):
if block_num == 0:
print("prediction start!")
start_index = block_num * block_size
end_index = min((block_num + 1) * block_size, data_size)
yield data[start_index:end_index]
# **Blocks and Batches**
train_labels.columns
batch1 = generate_batch(list(zip(np.array(train.text), train_labels['age'], train_labels['body'], train_labels['class'], train_labels['disability'], train_labels['gender'], train_labels['immigration'], train_labels['mental health'], train_labels['race'], train_labels['sexual assault'], train_labels['immigration'], train_labels['sexuality'])), batch_size, 1)
batch2 = generate_batch(list(zip(np.array(train.text), train_labels['age'], train_labels['body'], train_labels['class'], train_labels['disability'], train_labels['gender'], train_labels['immigration'], train_labels['mental health'], train_labels['race'], train_labels['sexual assault'], train_labels['immigration'], train_labels['sexuality'])), batch_size, 1)
batch3 = generate_batch(list(zip(np.array(train.text), train_labels['age'], train_labels['body'], train_labels['class'], train_labels['disability'], train_labels['gender'], train_labels['immigration'], train_labels['mental health'], train_labels['race'], train_labels['sexual assault'], train_labels['immigration'], train_labels['sexuality'])), batch_size, 1)
batch4 = generate_batch(list(zip(np.array(train.text), train_labels['age'], train_labels['body'], train_labels['class'], train_labels['disability'], train_labels['gender'], train_labels['immigration'], train_labels['mental health'], train_labels['race'], train_labels['sexual assault'], train_labels['immigration'], train_labels['sexuality'])), batch_size, 1)
batch5 = generate_batch(list(zip(np.array(train.text), train_labels['age'], train_labels['body'], train_labels['class'], train_labels['disability'], train_labels['gender'], train_labels['immigration'], train_labels['mental health'], train_labels['race'], train_labels['sexual assault'], train_labels['immigration'], train_labels['sexuality'])), batch_size, 1)
batch6 = generate_batch(list(zip(np.array(train.text), train_labels['age'], train_labels['body'], train_labels['class'], train_labels['disability'], train_labels['gender'], train_labels['immigration'], train_labels['mental health'], train_labels['race'], train_labels['sexual assault'], train_labels['immigration'], train_labels['sexuality'])), batch_size, 1)
batch7 = generate_batch(list(zip(np.array(train.text), train_labels['age'], train_labels['body'], train_labels['class'], train_labels['disability'], train_labels['gender'], train_labels['immigration'], train_labels['mental health'], train_labels['race'], train_labels['sexual assault'], train_labels['immigration'], train_labels['sexuality'])), batch_size, 1)
batch_bag = [batch1,batch2,batch3,batch4,batch5,batch6,batch7]
test_blocks = blocks(list(np.array(test_x)), 1000)
[i for i in batch1]
# +
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
i = 0
for batches in batch_bag:
i += 1
print('Epoch: ' + str(i) + ' start!')
avg_acc = 0
avg_loss = 0
for batch in batches:
batch = pd.DataFrame(batch, columns = ['text', 'age', 'body', 'class', 'disability', 'gender', 'immigration', 'mental health', 'race', 'religion', 'sexual assault', 'sexuality'])
x_batch = pd.DataFrame(list(batch['text']))
y_batch = batch.loc[:, batch.columns != 'text']
_,c, acc = sess.run([optimizer, loss, accuracy],feed_dict = {input_x: x_batch, input_y: y_batch})
avg_loss += c
avg_acc += acc
avg_loss = avg_loss/624
avg_acc = avg_acc/624
print('Epoch:' + str(i) + ' loss is ' + str(avg_loss) + ', train accuracy is ' + str(avg_acc))
#print('Evaluation Accuracy: ')
#print(accuracy.eval({input_x: val_x, input_y: yval}))
print('Training Finish!')
df = pd.DataFrame()
for block in test_blocks:
block = pd.DataFrame(block)
pred = sess.run(prediction, feed_dict = {input_x: block})
df = df.append(pd.DataFrame(pred))
print('Prediction Finish!')
# -
df.round().mean()
| Allie Topic Classification 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Control Flow: If, For, and While Loops
#
# When building programs, you may want to find ways to repeating certain sets of actions or choosing between sets of actions based on some condition. **Control flow** is the order that different statements or pieces of your code run in. In Python the three basic ways we can control how our code runs are the `for`, `while`, and `if` control statements.
# # 1.0 - If Loops
# If statements capture the same behavior that we do constantly throughout the day: based on some criteria, do something from a list of options. Let's see how this behavior can be implemented. Below, I've created an if statement to print out whether a number is greater than 0.
# ### 1.1 - Example 1: If/Else Statement
# +
your_number = -1
if your_number > 0:
print('Your number is greater than 0.')
else:
print('Your number is less than 0.')
# -
# This example only gives a single critera to evaluate on. If the criteria described in the first statement is true, (your_number > 0), then it will run the lines nested below it and then continue. If it doesn't meet the criteria (your_number > 0), then it will not run the nested lines and, instead, run the the lines underneath the `else` statement.
#
# But if statements can be strung together to create multiple levels of evaluations! To do so, you'll need to use an `elif` statement. To see how this works, let's extend the example above to tell us if our number is postive or negative:
# ### 1.2 - Example 2: If/Elif/Else Statement
# +
your_number = -1
if your_number > 0:
print('Your number is positive.')
elif your_number < 0:
print('Your number is negative')
else:
print('Your number is exactly 0')
# -
# When this set of if statements are run, it will go through each criteria from the top to the bottom. Since it failed the first criteria, it is evaluated based on the second criteria. Since it matched the second criteria, it ran the statements underneath and continued on. If you would like to have even more complex criteria, you can add additional `elif` statements.
# # 2.0 - For Loops
#
# For loops are one of the most common control flow statements you'll see and use in programming. For loops are used when you want to **run a set of statements over a known number of items**. This "known number of items" could be a number of steps (E.g. run for 10 times) or it could be a specific list of items (E.g. run for each item in List A). Let's see how a for loop is structured and how to implement it in Python
# ## 2.1 - Example 1: Print "Hello World" Multiple Times
#
# To introduce the basic structure of a for-loop, let's try printing "Hello World" multiple times.
for i in range(3):
print('Hello World {}'.format(i))
# The structure of a for loops follows a simple pattern:
#
# <img src="img/for-loop-structure.jpeg" width="400">
#
# **Loop Object:**
# The loop object is the thing that we are telling the computer to loop over. In some cases (such as this example), we use this to indicate how many times we want to run the loop. In more complex examples, this can be a list of items that the program will automatically pull from (See Example 2).
#
# **Loop Variable:**
# This is a variable (as you have seen in previous modules) that stores the value for each loop. It can be as simple as a number indicating how many times we have run the loop (as in this Example 1), but can also be the specific item from a list (See Example 2). Like any other Python variable, you are free to name this variable whatever you like.
#
# **Loop Statements:**
# Any of the lines nested/tabbed underneath the for-loop statement will be run each time you go through a for loop. You can access variables declared outside of the for loop, or even create new variables within it to help with more complex computations.
# ## 2.2 - Example 2: Looping Over Items in a List
# What if you had a list of items that you wanted to loop over? In Python, there are two simple approaches for tackling this problem. Let's see how we can do it.
# +
# Here's a list of words we'd like to print
word_list = ['Cat', 'Dog', 'Bunny', 'Bird']
# Approach 1: Specifying the number of times to loop
for i in range(len(word_list)):
print(word_list[i])
# -
# In this first approach, we use the same approach as in Example 1 by specifying how many times we want to run the for loop over. Since each loop value corresponds to one of the words in our list, we can use the loop value to pick out the word and then print it out.
#
# However, you can actually loop over the list **directly**, allowing the computer to automatically pick out the word **and** calculate how many times to run the loop:
# Approach 2: Loop directly over the list
for word in word_list:
print(word)
# As you can see, the overall structure of the for loop is the exact same. However, instead of specifying the number of times we want to run the for-loop, we provide the list itself, `word_list`. The program will now go through each item in the list and store it in the loop variable, `word`, and run the statements below as normal
# ## 2.3 - Example 3: Storing Values from a For Loop
# In many cases, you may want to calculate and store values every time you run a for loop.
# Let's see how this can be implemented:
# +
values = [] # List to store values from the for loop
for i in range(5): # Let's loop for 5 times
val_to_store = i + 5 # Compute the value you'd like to store
values.append(val_to_store) # Use ".append" method to store value in the outside list
print(values) # Our val_to_store are now in a list!
# -
# In this example, we decided to store the values we are calculating in a for loop in a list. However, we could store them in any of the collection formats we discussed before.
# ## 2.4 - Example 4: Breaking out of For Loops
#
# While going through a for loop, there may be cases where you'll want to stop iterating early based on a threshold or other metric. To implement this, we combine a for loop with an `if` and `break` statement.
# +
a = 1 # This is a value we'll keep track of
threshold = 100 # Maximum value for a before we want to stop
for i in range(100):
a *= 2 # Double a
# Check if a is above threshold
if a > threshold:
break # Break will cause the program to leave the enclosing loop
print(a)
# -
# ## While Loops
# Unlike for loops, while loops we don't provide an explicit number of times to run the loop. Instead we provide the **condition for stopping**.
# +
b = 1 # This is a value we'll keep track of
threshold_b = 100
while b < threshold_b: # This is our evaluation criteria
b*= 2
print(b)
# -
# You may notice that this looks very similar to the for loop with a threshold introduced as Example 4. Like that for loop, this while loop will check that the condition `b < threshold_b` every time it reaches the last nested statement. If that evaluation returns false, it will break out of the loop, just like our explicit evaluation in the for loop case.
#
# However, this loop will continue running *as long as the stated condition isn't satisfied* This leads to a common error where a while-loop condition never finishes running. Try running the code below, instead of an upper threshold, we're looking to see when c is exactly equal to the threshold.
# +
c = 1 # Indicator to keep track of
target = 100 # Imagine you accidently left a negative sign
while c != target:
c *= 2
print(c)
# -
# Before you let this code snippet keep running (you'll see a `*` next to the cell number), go up to the menu bar and interrupt the kernel. As you can see, the condition `c!= target` is never true, resulting in a while loop that runs forever.
# ---
#
# [Return to homepage](https://anthony-agbay.github.io/bioe-python-guide/)
| notebooks/control-flow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pytorch for Deeplearning Research and Development
#
# **Authors:** Faustine
# ## Introduction
#
# ### What is Pytorch
# [PyTorch](http://pytorch.org/) is an open source deep learning framework that two high-level features: Tensor computation (like numpy) with strong GPU acceleration and Deep Neural Networks built on a tape-based autograd system.
#
#
# This notebook require PyTorch 1.8.0 or later. You can check the version number of the currently installed pytorch package with: ```python print(torch.__version__)```.
import torch
print(torch.__version__)
# ### Pytorch Tensors
#
# The main building block of the PyTorch is the tensors. **Tensor** is a multi-dimensional matrix containing elements of a single data type. They are very similar to the NumPy array. However, unlike numpy array, pytorch tensor can utilize GPU. For instance, a vector is a 1-D tensor, and a matrix a 2-D tensor. Most common functions you know from numpy can be used on tensors as well. Actually, since numpy arrays are so similar to tensors, we can convert most tensors to numpy arrays (and back) but we don’t need it too often
#
# A tensor can be constructed from a Python list or sequence with the [**torch.Tensor()**](https://pytorch.org/docs/stable/tensors.html) function. The simplest approach for creating tensor is to call torch.Tensor passing the desired shape as input argument:
#
#
#Create a torch.Tensor object with the given data. It is a 1D vector
x = torch.Tensor(1, 2, 3)
print(x)
# Creates a matrix
data = [[1., 2., 3.], [4., 5., 6]]
M = torch.Tensor(data)
print (M)
# #### You can create a tensor with *random data* and the supplied dimensionality with **torch.randn()**
# Creates a tensor with random values sampled from a normal distribution with mean 0 and variance 1
x = torch.randn(1, 2, 4)
print(x)
#Creates a tensor with random values uniformly sampled between 0 and 1
torch.rand(2, 5)
# You can also use special tensors line ones and zeros
torch.ones(2, 3)
torch.zeros(2, 5)
torch.arange(1, 4)
# To get size of tensor you can use **.size()**, it also possible to use **.shape**
x = torch.rand(1, 3, 2)
x.size()
x.shape
# To returns the value of this tensor as a standard Python number use **.item()**. This only works for tensors with one element.
x = torch.rand(3)
print(x[0])
print(x[0].item())
# #### Numpy to Tensor Conversion
#
# You can easily convernt pytorh tensor into numpy array and viceversa. To create a tensor from a Numpy array, use `torch.from_numpy()` or `torch.Tensor()`. To convert a tensor to a Numpy array, use the `.numpy()` method. In case you have a tensor on GPU, you need to call `.cpu()` on the tensor beforehand. Hence, you get a line like ``np_arr = tensor.cpu().numpy().``
#
#
import numpy as np
numpy_arr = np.random.randn(3, 4)
print(numpy_arr)
# convert numpy array to pytorch array
pytorch_tensor = torch.Tensor(numpy_arr)
print(pytorch_tensor)
# use from_numpy
pytorch_tensor = torch.from_numpy(numpy_arr)
print(pytorch_tensor)
# convert torch tensor to numpy representation
pytorch_tensor.numpy()
# ### Tensor Operations
#
# You can operate on tensors in the ways you would expect.
x = torch.Tensor([ 1., 2., 3. ])
y = torch.Tensor([ 4., 5., 6. ])
z = x + y
print (z)
# You can also use
z = torch.add(x, y)
print(z)
# Calling x1 + x2 creates a new tensor containing the sum of the two inputs. However, we can also use in-place operations that are applied directly on the memory of a tensor. We therefore change the values of x2 without the chance to re-accessing the values of x2 before the operation. In-place operations are usually marked with a underscore postfix (e.g. “add_” instead of “add”).
#
# An example is shown below
# +
print("x (before)", x)
x.add_(y)
print("x (after)", x)
# -
# Other commonly used operations include matrix multiplications.
#
# - `torch.matmul`: Performs the matrix product over two tensors, where the specific behavior depends on the dimensions. If both inputs are matrices (2-dimensional tensors), it performs the standard matrix product. For higher dimensional inputs, the function supports broadcasting (for details see the documentation). Can also be written as a @ b, similar to numpy.
#
# - `torch.mm`: Performs the matrix product over two matrices, but doesn’t support broadcasting.
#
# - `torch.bmm`: Performs the matrix product with a support batch dimension. If the first tensor 𝑇 is of shape `(𝑏×𝑛×𝑚)`, and the second tensor `𝑅 (𝑏×𝑚×𝑝)`, the output 𝑂 is of shape `(𝑏×𝑛×𝑝)`, and has been calculated by performing 𝑏 matrix multiplications of the submatrices of 𝑇 and `𝑅: 𝑂𝑖=𝑇𝑖@𝑅𝑖`
#
# - `torch.einsum`: Performs matrix multiplications and more (i.e. sums of products) using the Einstein summation convention. Explanation of the Einstein sum can be found in assignment 1.
# For more and compresnive list on pytorch operations follow [pytorch documentation](https://pytorch.org/docs/stable/tensors.html)
# ### Reshaping Tensors
#
# The **.view()** method provide a function to reshape a tensor. This method receives heavy use, because many neural network components expect their inputs to have a certain shape. Often you will need to reshape before passing your data to the component.
#
x = torch.randn(1, 3, 4)
x
# Reshape to 1 rows, 12 columns
x.view(1, 12)
# Reshape to 1x6x2
x.reshape(1, 6, 2)
# [Torch indexing operations](https://pytorch.org/docs/stable/torch.html#indexing-slicing-joining-mutating-ops)
# ### GPU support
# Pytorch has GPU support that greatly speed up training of deep learning models by running the matrix operations on a GPU with CUDA. GPU support is implemented in `torch.cuda`. This package adds support for CUDA tensor types, that implement the same function as CPU tensors, but they utilize GPUs for computation. The new API (v0.4.0) lets us define it in a nice way.
#
# You can use `is_available()` to determine if your system supports CUDA
# +
if torch.cuda.is_available():
print("CUDA supported")
else:
print("No cuda support")
# -
# ### Move tensors between CPU and GPU
#
# A torch.device contains a device type ('cpu' or 'cuda') and optional device ordinal (id) for the device type. It can be initilized with torch.device('{device_type}').
#
# - The device attribute of a Tensor gives the torch.device for all Tensors (get_device only works for CUDA tensors)
# - The to method of Tensors and Modules can be used to easily move objects to different devices (instead of having to call cpu() or cuda() based on the context)
#
# +
# at beginning of the script
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device type set:", "GPU" if device.type == "cuda" else "CPU")
# -
# Tensors can be moved onto any device using the **.to** method.
#convernt tensor into device
x = x.to(device)
# ### NOTE:
# You can access use PyTorch with GPU in [Google Colab](https://colab.research.google.com/notebooks/welcome.ipynb#recent=true). Details on how to get started can be found [here](https://jovianlin.io/pytorch-with-gpu-in-google-colab/)
# ## Autograd and Variables
#
# **Autograd** provide a mechanism to compute error gradients and back-propagated through the computational graph. It is a define-by-run framework, which means that your backprop is defined by how your code is run, and that every single iteration can be different.
#
# **torch.Tensor** is the central class of the package. If you set its attribute **.requires_grad** as True, it starts to track all operations on it. When you finish your computation you can call **.backward()** and have all the gradients computed automatically.
#
# To stop a tensor from tracking history, you can call **.detach()** to detach it from the computation history, and to prevent future computation from being tracked.
#
# To prevent tracking history (and using memory), you can also wrap the code block in **with torch.no_grad()**:. This is helpful when evaluating a model because the model may have trainable parameters with requires_grad=True, but for which we don’t need the gradients.
#
#
# Every tensor instance has two attributes: **.data** that contain initial tensor itself and **.grad** that will contain gradients for the corresponding tensor.
# ### NOTE:
# **Computation graph** is simply a specification of how your data is combined to give you the output. Since the graph totally specifies what parameters were involved with which operations, it contains enough information to compute derivatives.
#
#
# For example: if we have $y = wx + b$ it clear that $\frac{\partial y}{\partial x} =w$, $\frac{\partial y}{\partial b} = 1$ and $\frac{\partial y}{\partial w} = x$
#
#
# To compute the derivatives, you can call **.backward()** on a Variable. If Variable is a scalar (i.e. it holds a one element tensor), you don’t need to specify any arguments to backward(), however if it has more elements, you need to specify a grad_output argument that is a tensor of matching shape.
# ### Example 1:
x = torch.ones(2, 2, requires_grad=True)
# Create tensors
x = torch.tensor(1.0, requires_grad=True)
w = torch.tensor(2.0, requires_grad=True)
b = torch.tensor(3.0, requires_grad=True)
# Build a computational graph.
y = w * x + b # y = 2 * x + 3
# +
# Compute gradients.
y.backward()
# Print out the gradients.
print(x.grad) # x.grad = 2
print(w.grad) # w.grad = 1
print(b.grad) # b.grad = 1
# -
# ### Example 2:
# +
# Create tensors variables.
x = torch.ones((1, 1), requires_grad=True)
# perform operations
y = x + 2
z = y * y * 3
# find gradient
z.backward()
#print gradient
print(x.grad)
# -
# The gradient of x is equal to 18. This is equivalent to:
# $$
# z = 3y^2 \text{ where } y = x + 2 \Rightarrow z = 3(x + 2)^2
# $$
#
# Thus: $$ \frac{dz}{dx} = 6(x +2) = 6(1+2) = 18$$
# ## Deep Learning Building Blocks
#
# Deep learning consists of composing linearities with non-linearities modules. The introduction of non-linearities allows for powerful models. Given linear and non-liear module how to define objective function and train deep learninh model in pytorch.
#
# Neural networks can be constructed using the **torch.nn** package. It provides pretty much all neural network related functionalities such as :
#
# - Linear layers - nn.Linear, nn.Bilinear
# - Convolution Layers - nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d
# - Nonlinearities - nn.Sigmoid, nn.Tanh, nn.ReLU, nn.LeakyReLU
# - Pooling Layers - nn.MaxPool1d, nn.AveragePool2d
# - Recurrent Networks - nn.LSTM, nn.GRU
# - Normalization - nn.BatchNorm2d
# - Dropout - nn.Dropout, nn.Dropout2d
# - Embedding - nn.Embedding
# - Loss Functions - nn.MSELoss, nn.CrossEntropyLoss, nn.NLLLoss
#
# Using the above **torch.nn** classes requires defining an instance of the class and then running inputs through the instance.
#
# Pytorch provide the functional API thta allows users to use these classes in a functional way. Such as
#
# `import torch.nn.functional as F`
#
# - Linear layers - F.linear(input=x, weight=W, bias=b)
# - Convolution Layers - F.conv2d(input=x, weight=W, bias=b, stride=1, padding=0, dilation=1, groups=1)
# - Nonlinearities - F.sigmoid(x), F.tanh(x), F.relu(x), F.softmax(x)
# - Dropout - F.dropout(x, p=0.5, training=True)
import torch.nn as nn
import torch.nn.functional as F
# #### Linear function (Affine Maps)
#
# This is the core building block of deep learning defined is a function:
# $$ f(x) = \mathbf{wx + b}$$ for a matrix $\mathbf{w} $ and vectors $\mathbf{x,b}$. Linear function is implemented in: torch.nn
#
# **torch.nn.Linear(in_features, out_features, bias=True)**
#
#
# Note: pytorch maps the rows of the input instead of the columns
lin = nn.Linear(1, 1, bias=True)
x = torch.Tensor(np.arange(-50, 50).reshape(-1,1))
y = lin(x)
#print(y)
import matplotlib.pyplot as plt
# %matplotlib inline
fig, ax = plt.subplots(1,1, figsize=(3,2))
plt.plot(x.data.numpy(), y.data.numpy(), label="linear")
plt.title("Linear Activation")
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.legend();
# #### Non-Linearities Function (Activation Function)
#
# Most used non-linear functions are: sigmoid, tanh and relu function.
## sigmoid
y = torch.sigmoid(x)
fig, ax = plt.subplots(1,1, figsize=(3,2))
plt.plot(x.data.numpy(), y.data.numpy(), label="sigmoid")
plt.title("Sigmoid Activation")
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.legend();
## Relu
fig, ax = plt.subplots(1,1, figsize=(3,2))
y = F.relu(x)
plt.plot(x.data.numpy(), y.data.numpy(), label="ReLU")
plt.title("ReLU Activation")
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.legend();
## Relu
fig, ax = plt.subplots(1,1, figsize=(3,2))
y = torch.tanh(x)
plt.plot(x.data.numpy(), y.data.numpy(), label="Tanh")
plt.title("Tanh Activation")
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.legend();
# ### Other pytorch modules for defining neural networks
#
# `torch.optim`: provides implementations of standard stochastic optimization techniques.
#
# `torch.distributions`: contains parameterizable probability distributions and sampling functions.
#
# ## Creating a neural network
#
# To create a neural network in PyTorch, we use **nn.Module** base class with Python class inheritance which allows us to use all of the functionality of the **nn.Module base class**.
class Model(torch.nn.Module):
def __init__(self, nb_feature, nb_output):
"""
In the constructor we instantiate two nn.Linear module
"""
super(Model, self).__init__()
self.fc1 = torch.nn.Linear(nb_feature, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, nb_output)
def forward(self, x):
"""
In the forward function we accept a Variable of input data and we must return
a Variable of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Variables.
"""
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
# - In the class definition, you can see the inheritance of the base class **torch.nn.Module**.
# - Then, in the first line of the class initialization (def __init__(self):) we have the required Python **super() function**, which creates an instance of the base **torch.nn.Module** class.
# - The next line define a linear object defined by **torch.nn.Linear**, with the first argument in the definition being the number of input feature and the next argument being the number of output.
# - After that we need to define how data flows through out network. This can be doe using **forward()** method in which we supply the input data x as the primary argument.
# **PyTorch** offers an alternative easier and more convenient way of creating neural networ using `torch.nn.Sequential` class. You can also define your own layers and add them to the Sequential chain.
nb_feature = 2
hidden_size = 10
nb_output = 1
model_type_2 = torch.nn.Sequential(torch.nn.Linear(nb_feature, hidden_size),
torch.nn.ReLU(),
torch.nn.Linear(hidden_size, nb_output)
)
# The next step is to create an instance of this network architecture and assign this instance to cuda() method if available. Suppose we have the following data.
# Create tensors.
x = torch.randn(10, 8)
y = torch.randn(10, 1)
model = Model(8, 1)
#move to device
model = model.to(device)
x = x.to(device)
y = y.to(device)
# We can check the instance of our model:
print(model)
# ### Training the network
# To train this model we need to setup an optimizer and a loss criterion:
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
# - In the first line, we create a stochastic gradient descent optimizer, and we specify the learning rate and supply the model parameters using **model.parameters()** method of the base **torch.nn.Module** class that we inherit.
# - Next, we set our loss criterion to be the **MSE** loss. For details on different loss function you may refer to [pytorch documentation](http://pytorch.org/docs/master/nn.html#loss-functions)
# ### In the training process:
# - First we run optimizer.zero_grad() – this zeroes / resets all the gradients in the model, so that it is ready to go for the next back propagation pass. In other libraries this is performed implicitly, but in PyTorch you have to remember to do it explicitly.
# - Then we we pass the input data into the model **pred = model(x)** – this will call the **forward()** method in our model class.
# - After that we get the MSE loss between the output of our network and the target data as **loss = criterion(y_pred, y_data)**.
optimizer.zero_grad()
pred = model(x)
loss = criterion(pred, y)
print('loss: ', loss.item())
# - Then we runs a back-propagation operation from the loss Variable backwards through the network using **loss.backward()***
# - Finaly we tell PyTorch to execute a gradient descent step based on the gradients calculated during the **.backward()** operation using **optimizer.step()**.
#
loss.backward()
optimizer.step()
# ### Data loaders
#
# PyTorch provides two classess the **Dataset class and the Dataloader class** that can be used to to feed training data into the network.
#
# **Dataset class** is used to provide an interface for accessing all the training or testing samples in your dataset. To achieve this, you have to implement two method, `__getitem__` and `__len__` so that each training sample can be accessed by its index.
# +
from torch.utils.data import Dataset, DataLoader
class customDataset(Dataset):
""" custom dataset."""
# Initialize your data, download, etc.
def __init__(self, x, y):
x = (x - x.mean(axis=0))/(x.std(axis=0))
self.len = x.shape[0]
self.x = torch.from_numpy(x).float()
self.y = torch.from_numpy(y).float()
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.len
# +
## Let us prepare data and define the dataset class
import pandas as pd
df = pd.read_csv("../data/pima/diabetes.csv")
features = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin',
'BMI', 'DiabetesPedigreeFunction', 'Age']
target = ['Outcome']
inputs = df[features].as_matrix()
targets = df[target].as_matrix()
# +
dataset = customDataset(x=inputs, y=targets)
#print length of the datasets
print(dataset.len)
# -
# The **Dataloader class** accept a dataset and other parameters such as **batch size** etc to load the data and so Then we can iterate over the Dataloader to get batches of training data and train your models. This class provides several important functionality for building deep learning models such as batching, shuffling, multiprocess data loading, etc
data_loader = DataLoader(dataset=dataset,
batch_size=32,
shuffle=True)
# ##### To access data in data loader
X_data, y_data=next(iter(data_loader))
X_data
# To iterate through our data we use for loop as follows
# for training purpose will
for i, (x_data, y_data) in enumerate(data_loader, 0):
if i ==2:
print(x_data)
print(y_data)
break
def train(model, optimizer, loss_fn, device, data_loader, num_epochs, print_every=2):
total_loss = []
model.to(device)
loss_fn.to(device)
model.train()
print("Start training")
for epoch in range(num_epochs):
training_loss = []
for i, (inputs, targets) in enumerate(data_loader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
pred = model(inputs)
# Calculate Loss:
loss = loss_fn(pred, targets)
training_loss.append(loss.item())
loss.backward()
optimizer.step()
total_loss.append(np.mean(training_loss))
if epoch % print_every == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}'.format(
epoch+1, i * len(inputs), len(data_loader.dataset),
100. * i / len(data_loader), np.mean(training_loss)))
return total_loss
train_loss = train(model, optimizer, criterion, device, data_loader, 1000, 50)
plt.plot(train_loss)
plt.title("Training loss")
plt.xlabel("iterations")
plt.ylabel("Loss")
# ## References:
#
# - [Adventures in machine learning](http://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/)
# - [DeepLearningZeroToAll](https://github.com/hunkim/DeepLearningZeroToAll)
# - [MILA welocome tutorial](https://github.com/mila-udem/welcome_tutorials/tree/master/pytorch)
# - [PyTorch With Baby Steps: From y = x To Training A Convnet](http://lelon.io/blog/2018/02/08/pytorch-with-baby-steps)
# - [How to Use Your Own Custom Dataset for Classification in PyTorch](https://jdhao.github.io/2017/10/23/pytorch-load-data-and-make-batch/)
# <a name="myfootnote1">1</a>: http://pytorch.org/about/
| DeepLearning/DL-pytorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from keras.models import Model
from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications import imagenet_utils
# -
resnet_model = ResNet50(weights='imagenet')
model = Model(inputs=resnet_model.input, outputs=resnet_model.get_layer('avg_pool').output)
input_dim = (224, 224)
image = load_img('Data/dog_example.jpg', target_size=input_dim)
image = img_to_array(image)
image = image.reshape((1, *image.shape))
image = preprocess_input(image)
avg_pool_features = model.predict(image)
| Section06/Extracting bottleneck features with ResNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to get a simple dataset from a big CSV file
#
# This notebook goes through some steps to convert a large unwieldy file from the Council's [Litter Bin Sensor Project](http://data.edinburghopendata.info/dataset/bf994150-8983-43b4-9c08-45d9a46a08be/resource/a7b80a47-781c-46ff-9123-e9ba6a00d8b6/download/cecbinsensorbinsdatafindings.csv) into a simpler dataset for visualisation. We use a number of functions provided by the Python [pandas library](http://pandas.pydata.org).
#
# We assume that we've already downloaded the file to our local file system. So our first step is to import the CSV file as a pandas `DataFrame`:
import pandas as pd
table = pd.read_csv("../data/binsensors.csv")
# Let's have a look at the column labels:
list(table.columns.values)
# Suppose we just want to select a couple of columns, we can use the column labels like this:
table[['ID', 'Address']]
# But a couple of interesting columns (for the collection date and the weight measured by the sensor) have very complicated labels, so let's simplify them.
#
# First, we'll just make a list of all the labels, then we'll bind the relevant string values to a couple of variables. This means that we don't have to worry about mis-typing things like `'Date & Time of bin collection (Europe/London)`!
l = list(table.columns.values)
date = l[8]
fill = l[10]
date, fill
# Now that we've got short variables `date` and `time` in place of the long strings, let's go ahead and replace those labels with something simpler:
table = table.rename(columns={date: 'Date', fill: 'Fill_level'})
# Now we'll make a new table with just four columns:
table1 = table[['ID', 'Address', 'Date', 'Fill_level']]
# And we'll just take the first 30 rows:
tabletop = table1.head(30)
tabletop
# Finally, we'll write the result to a JSON formatted file.
tabletop.to_json('../data/binsensorsimple.json', orient="records")
| viz/python/Simplify_Bin_Sensor_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # !wget http://baidudeeplearning.bj.bcebos.com/image_contest_level_1.tar.gz
# # !tar -zxf image_contest_level_1.tar.gz
# -
import numpy as np
import os
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage.transform import resize as imresize
import cv2
import time
directory = 'image_contest_level_1/'
images = ['%d.png'%(d) for d in range(100000)]
with open(directory+'labels.txt','r') as fopen:
labels = [i.split()[0] for i in list(filter(None,fopen.read().split('\n')))]
len(images)
len(labels)
plt.imshow(cv2.imread(directory+images[0], 0).astype(np.float32)/255.)
plt.title(labels[0])
plt.show()
# +
charset = '0123456789+-*()'
num_classes = len(charset) + 2
encode_maps = {}
decode_maps = {}
for i, char in enumerate(charset, 1):
encode_maps[char] = i
decode_maps[i] = char
SPACE_INDEX = 0
SPACE_TOKEN = ''
encode_maps[SPACE_TOKEN] = SPACE_INDEX
decode_maps[SPACE_INDEX] = SPACE_TOKEN
# -
image_height = 60
image_width = 240
image_channel = 1
max_stepsize = 128
num_hidden = 256
epoch = 20
batch_size = 128
initial_learning_rate = 1e-3
# +
def pad_second_dim(x, desired_size):
padding = tf.tile([[0]], tf.stack([tf.shape(x)[0], desired_size - tf.shape(x)[1]], 0))
return tf.concat([x, padding], 1)
class Model:
def __init__(self):
self.X = tf.placeholder(tf.float32, [None, image_height, image_width, image_channel])
self.Y = tf.sparse_placeholder(tf.int32)
self.SEQ_LEN = tf.placeholder(tf.int32, [None])
self.label = tf.placeholder(tf.int32, [None, None])
self.Y_seq_len = tf.placeholder(tf.int32, [None])
batch_size = tf.shape(self.X)[0]
filters = [64, 128, 128, max_stepsize]
strides = [1, 2]
x = self.conv2d(self.X, 'cnn-1', 3, 1, filters[0], strides[0])
x = self.batch_norm('bn1', x)
x = self.leaky_relu(x, 0.01)
x = self.max_pool(x, 2, strides[1])
x = self.conv2d(x, 'cnn-2', 3, filters[0], filters[1], strides[0])
x = self.batch_norm('bn2', x)
x = self.leaky_relu(x, 0.01)
x = self.max_pool(x, 2, strides[1])
x = self.conv2d(x, 'cnn-3', 3, filters[1], filters[2], strides[0])
x = self.batch_norm('bn3', x)
x = self.leaky_relu(x, 0.01)
x = self.max_pool(x, 2, strides[1])
x = self.conv2d(x, 'cnn-4', 3, filters[2], filters[3], strides[0])
x = self.batch_norm('bn4', x)
x = self.leaky_relu(x, 0.01)
x = self.max_pool(x, 2, strides[1])
x = tf.reshape(x, [batch_size, -1, filters[3]])
x = tf.transpose(x, [0, 2, 1])
x = tf.reshape(x, [batch_size, filters[3], 4 * 15])
cell = tf.contrib.rnn.LSTMCell(num_hidden)
cell1 = tf.contrib.rnn.LSTMCell(num_hidden)
stack = tf.contrib.rnn.MultiRNNCell([cell, cell1])
outputs, _ = tf.nn.dynamic_rnn(stack, x, self.SEQ_LEN, dtype=tf.float32)
outputs = tf.reshape(outputs, [-1, num_hidden])
self.logits = tf.layers.dense(outputs, num_classes)
shape = tf.shape(x)
self.logits = tf.reshape(self.logits, [shape[0], -1, num_classes])
self.logits = tf.transpose(self.logits, (1, 0, 2))
self.global_step = tf.Variable(0, trainable=False)
self.loss = tf.nn.ctc_loss(labels=self.Y,
inputs=self.logits,
sequence_length=self.SEQ_LEN)
self.cost = tf.reduce_mean(self.loss)
self.optimizer = tf.train.AdamOptimizer(learning_rate=initial_learning_rate).minimize(self.cost)
self.decoded, self.log_prob = tf.nn.ctc_beam_search_decoder(self.logits,
self.SEQ_LEN,
merge_repeated=False)
decoded = tf.to_int32(self.decoded[0])
self.dense_decoded = tf.sparse_tensor_to_dense(decoded)
preds = self.dense_decoded[:, :tf.reduce_max(self.Y_seq_len)]
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
preds = pad_second_dim(preds, tf.reduce_max(self.Y_seq_len))
y_t = tf.cast(preds, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.label, masks)
self.mask_label = mask_label
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def conv2d(self, x, name, filter_size, channel_in, channel_out, strides):
with tf.variable_scope(name):
return tf.layers.conv2d(x, channel_out, filter_size, strides, padding='SAME')
def batch_norm(self, name, x):
with tf.variable_scope(name):
params_shape = [x.get_shape()[-1]]
beta = tf.get_variable('beta', params_shape, tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32))
gamma = tf.get_variable('gamma', params_shape, tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32))
mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments')
x_bn = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001)
x_bn.set_shape(x.get_shape())
return x_bn
def leaky_relu(self, x, leak=0):
return tf.where(tf.less(x, 0.0), leak * x, x, name='leaky_relu')
def max_pool(self, x, size, strides):
return tf.nn.max_pool(x,
ksize=[1, size, size, 1],
strides=[1, strides, strides, 1],
padding='SAME',
name='max_pool')
# +
def sparse_tuple_from_label(sequences, dtype=np.int32):
indices, values = [], []
for n, seq in enumerate(sequences):
indices.extend(zip([n] * len(seq), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=dtype)
shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int64)
return indices, values, shape
def pad_sentence_batch(sentence_batch, pad_int):
padded_seqs = []
seq_lens = []
max_sentence_len = max([len(sentence) for sentence in sentence_batch])
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(len(sentence))
return padded_seqs, seq_lens
# -
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model()
sess.run(tf.global_variables_initializer())
# +
from tqdm import tqdm
X, Y = [], []
for i in tqdm(range(len(images))):
img = images[i]
X.append(imresize(cv2.imread(directory+img, 0).astype(np.float32)/255., (image_height,image_width)))
Y.append([SPACE_INDEX if labels[0] == SPACE_TOKEN else encode_maps[c] for c in labels[i]])
# +
from sklearn.model_selection import train_test_split
train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size = 0.2)
# -
for e in range(epoch):
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'minibatch loop')
train_loss, train_acc, test_loss, test_acc = [], [], [], []
total_lost, total_acc = 0, 0
for i in pbar:
index = min(i + batch_size, len(train_X))
batch_x = train_X[i : index]
batch_x = np.array(batch_x).reshape((len(batch_x), image_height, image_width,image_channel))
y = train_Y[i : index]
batch_y = sparse_tuple_from_label(y)
batch_label, batch_length = pad_sentence_batch(y, 0)
batch_len = np.asarray([max_stepsize for _ in [1]*len(batch_x)], dtype=np.int64)
feed = {model.X: batch_x,
model.Y: batch_y,
model.SEQ_LEN: batch_len,
model.label: batch_label,
model.Y_seq_len: batch_length}
accuracy, loss, _ = sess.run([model.accuracy,model.cost,model.optimizer],
feed_dict = feed)
train_loss.append(loss)
train_acc.append(accuracy)
pbar.set_postfix(cost = loss, accuracy = accuracy)
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'minibatch loop')
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = test_X[i : index]
batch_x = np.array(batch_x).reshape((len(batch_x), image_height, image_width,image_channel))
y = test_Y[i : index]
batch_y = sparse_tuple_from_label(y)
batch_label, batch_length = pad_sentence_batch(y, 0)
batch_len = np.asarray([max_stepsize for _ in [1]*len(batch_x)], dtype=np.int64)
feed = {model.X: batch_x,
model.Y: batch_y,
model.SEQ_LEN: batch_len,
model.label: batch_label,
model.Y_seq_len: batch_length}
accuracy, loss = sess.run([model.accuracy,model.cost],
feed_dict = feed)
test_loss.append(loss)
test_acc.append(accuracy)
pbar.set_postfix(cost = loss, accuracy = accuracy)
print('epoch %d, training avg loss %f, training avg acc %f'%(e+1,
np.mean(train_loss),np.mean(train_acc)))
print('epoch %d, testing avg loss %f, testing avg acc %f'%(e+1,
np.mean(test_loss),np.mean(test_acc)))
decoded = sess.run(model.dense_decoded, feed_dict = {model.X: batch_x[:1],
model.SEQ_LEN: batch_len[:1]})
plt.imshow(batch_x[0][:,:,0])
decoded = ''.join([decode_maps[i] for i in decoded[0]])
actual = ''.join([decode_maps[i] for i in y[0]])
plt.title('predict: %s, actual: %s'%(decoded, actual))
plt.show()
| ocr/1.cnn-rnn-ctc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/coding-ss/DS-Unit-1-Sprint-3-Data-Storytelling/blob/master/Copy_of_LS_DS_224_Sequence_your_narrative_LIVE_LESSON.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="JbDHnhet8CWy" colab_type="text"
# _Lambda School Data Science_
#
# # Sequence your narrative
#
# Today we will create a sequence of visualizations inspired by [<NAME>'s 200 Countries, 200 Years, 4 Minutes](https://www.youtube.com/watch?v=jbkSRLYSojo).
#
# Using this [data from Gapminder](https://github.com/open-numbers/ddf--gapminder--systema_globalis/):
# - https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv
# - https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv
# - https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv
# - https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv
# - https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv
# + [markdown] id="zyPYtsY6HtIK" colab_type="text"
# Objectives
# - sequence multiple visualizations
# - combine qualitative anecdotes with quantitative aggregates
#
# Links
# - [<NAME>’s TED talks](https://www.ted.com/speakers/hans_rosling)
# - [Spiralling global temperatures from 1850-2016](https://twitter.com/ed_hawkins/status/729753441459945474)
# - "[The Pudding](https://pudding.cool/) explains ideas debated in culture with visual essays."
# - [A Data Point Walks Into a Bar](https://lisacharlotterost.github.io/2016/12/27/datapoint-in-bar/): a thoughtful blog post about emotion and empathy in data storytelling
# + [markdown] id="SxTJBgRAW3jD" colab_type="text"
# ## Make a plan
#
# #### How to present the data?
#
# Variables --> Visual Encodings
# - Income --> x
# - Lifespan --> y
# - Region --> color
# - Population --> size
# - Year --> animation frame (alternative: small multiple)
# - Country --> annotation
#
# Qualitative --> Verbal
# - Editorial / contextual explanation --> audio narration (alternative: text)
#
#
# #### How to structure the data?
#
# | Year | Country | Region | Income | Lifespan | Population |
# |------|---------|----------|--------|----------|------------|
# | 1818 | USA | Americas | ### | ## | # |
# | 1918 | USA | Americas | #### | ### | ## |
# | 2018 | USA | Americas | ##### | ### | ### |
# | 1818 | China | Asia | # | # | # |
# | 1918 | China | Asia | ## | ## | ### |
# | 2018 | China | Asia | ### | ### | ##### |
#
# + [markdown] id="3ebEjShbWsIy" colab_type="text"
# ## Upgrade Seaborn
#
# Make sure you have at least version 0.9.0.
#
# In Colab, go to **Restart runtime** after you run the `pip` command.
# + id="4RSxbu7rWr1p" colab_type="code" outputId="b88a1d67-cc65-4c91-b413-f3e0e6504d71" colab={"base_uri": "https://localhost:8080/", "height": 411}
# !pip install --upgrade seaborn
# + id="5sQ0-7JUWyN4" colab_type="code" outputId="6f491442-5844-4886-d52f-1b1ea6f4d83e" colab={"base_uri": "https://localhost:8080/", "height": 34}
import seaborn as sns
sns.__version__
# + [markdown] id="S2dXWRTFTsgd" colab_type="text"
# ## More imports
# + id="y-TgL_mA8OkF" colab_type="code" colab={}
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# + [markdown] id="CZGG5prcTxrQ" colab_type="text"
# ## Load & look at data
# + id="-uE25LHD8CW0" colab_type="code" colab={}
income = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv')
# + id="gg_pJslMY2bq" colab_type="code" colab={}
lifespan = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv')
# + id="F6knDUevY-xR" colab_type="code" colab={}
population = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv')
# + id="hX6abI-iZGLl" colab_type="code" colab={}
entities = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv')
# + id="AI-zcaDkZHXm" colab_type="code" colab={}
concepts = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv')
# + id="EgFw-g0nZLJy" colab_type="code" outputId="ccb336dc-daea-4c38-8616-d60c8c1dfe59" colab={"base_uri": "https://localhost:8080/", "height": 34}
income.shape, lifespan.shape, population.shape, entities.shape, concepts.shape
# + id="I-T62v7FZQu5" colab_type="code" outputId="2150dcb2-f75f-445b-d2cb-7cf4e426070b" colab={"base_uri": "https://localhost:8080/", "height": 204}
income.head()
# + id="2zIdtDESZYG5" colab_type="code" outputId="2be5e856-5d86-44f6-cded-8d04748f6e62" colab={"base_uri": "https://localhost:8080/", "height": 204}
lifespan.head()
# + id="58AXNVMKZj3T" colab_type="code" outputId="7e738a72-b7bb-4f5c-d81f-52dbc7d42e6e" colab={"base_uri": "https://localhost:8080/", "height": 204}
population.head()
# + id="0ywWDL2MZqlF" colab_type="code" outputId="6e824b69-eb33-4232-c7eb-7523a88c0f09" colab={"base_uri": "https://localhost:8080/", "height": 258}
pd.options.display.max_columns = 500
entities.head()
# + id="mk_R0eFZZ0G5" colab_type="code" outputId="765fef42-f482-47fa-c68a-126743027eb8" colab={"base_uri": "https://localhost:8080/", "height": 513}
concepts.head()
# + [markdown] id="6HYUytvLT8Kf" colab_type="text"
# ## Merge data
# + [markdown] id="dhALZDsh9n9L" colab_type="text"
# https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf
# + id="OYNkyW4E6Koy" colab_type="code" colab={}
df = pd.merge(income, lifespan)
# + id="YWd8CTyu6cBC" colab_type="code" colab={}
df = pd.merge(df, population)
# + id="VQP5AXmN7rDO" colab_type="code" colab={}
variables = ['country', 'name', 'world_6region']
# + id="N3nm5As385Fp" colab_type="code" outputId="9a04a5c5-f6f1-4094-faf0-31cfa3825ad7" colab={"base_uri": "https://localhost:8080/", "height": 34}
df = pd.merge(df, entities[variables],
how='inner', left_on='geo', right_on='country')
df = df.drop(columns=['geo', 'country'])
df = df.rename(columns={
'time': 'year',
'income_per_person_gdppercapita_ppp_inflation_adjusted': 'income',
'life_expectancy_years': 'lifespan',
'population_total': 'population',
'name': 'country',
'world_6region': 'region'
})
df.shape
# + id="MJQWLNHy9qWZ" colab_type="code" outputId="8d0527ea-5770-46cf-9c26-13ac0c1de3f6" colab={"base_uri": "https://localhost:8080/", "height": 204}
df.tail()
# + [markdown] id="4OdEr5IFVdF5" colab_type="text"
# ## Explore data
# + id="4IzXea0T64x4" colab_type="code" outputId="0cbe63cc-a0c7-4cc6-ff02-6315fabe6817" colab={"base_uri": "https://localhost:8080/", "height": 297}
df.describe()
# + id="cndDbmuc9yxC" colab_type="code" outputId="86db359a-4574-4e54-e948-9479bf241ac0" colab={"base_uri": "https://localhost:8080/", "height": 173}
df.describe(exclude=np.number)
# + id="dfKpmUfl955K" colab_type="code" outputId="05469f10-d285-4396-f10c-47367940cc9f" colab={"base_uri": "https://localhost:8080/", "height": 680}
df.country.unique()
# + id="AYsdWTUN9_Sh" colab_type="code" outputId="9c752f4c-3034-40db-ad12-182c9481e016" colab={"base_uri": "https://localhost:8080/", "height": 142}
usa = df[df.country=='United States']
usa[usa.year.isin([1818, 1918, 2018])]
# + id="WviDL18d-S32" colab_type="code" outputId="f424e482-c6b5-4838-c60d-634f0449a9d9" colab={"base_uri": "https://localhost:8080/", "height": 142}
china = df[df.country=='China']
china[china.year.isin([1818, 1918, 2018])]
# + [markdown] id="hecscpimY6Oz" colab_type="text"
# ## Plot visualization
# + id="_o8RmX2M67ai" colab_type="code" outputId="500a24cc-edf5-4f26-e256-71b147fc50cb" colab={"base_uri": "https://localhost:8080/", "height": 34}
now = df[df.year==2018]
now.shape
# + id="rMcZ8mfT-28b" colab_type="code" outputId="79a19cb6-2416-48c0-d681-dedd1ed03435" colab={"base_uri": "https://localhost:8080/", "height": 164}
sns.relplot(x='income', y='lifespan', hue='region', size='population', data=now);
# + [markdown] id="8OFxenCdhocj" colab_type="text"
# ## Analyze outliers
# + id="D59bn-7k6-Io" colab_type="code" colab={}
now[now.income > 80000].sort_values(by='income')
# + id="IQD5QDgcDAEp" colab_type="code" colab={}
qatar = now[now.country=='Qatar']
qatar_income = qatar.income.values[0]
qatar_lifespan = qatar.lifespan.values[0]
# + id="cnG1t9gwCmM6" colab_type="code" colab={}
sns.relplot(x='income', y='lifespan', hue='region', size='population', data=now)
plt.text(x=qatar_income-5000, y=qatar_lifespan+1, s='Qatar')
plt.title('Qatar has the highest incomes in 2018');
# + [markdown] id="DNTMMBkVhrGk" colab_type="text"
# ## Plot multiple years
# + id="JkTUmYGF7BQt" colab_type="code" colab={}
years = [1818, 1918, 2018]
centuries = df[df.year.isin(years)]
sns.relplot(x='income', y='lifespan', hue='region', size='population',
col='year', data=centuries)
plt.xscale('log');
# + [markdown] id="BB1Ki0v6hxCA" colab_type="text"
# ## Point out a story
# + id="eSgZhD3v7HIe" colab_type="code" colab={}
years = [1918, 1938, 1958, 1978, 1998, 2018]
for year in years:
sns.relplot(x='income', y='lifespan', hue='region', size='population',
data=df[df.year==year])
plt.xscale('log')
plt.xlim((150, 150000))
plt.ylim((20, 90))
plt.title(year)
plt.axhline(y=50, color='grey')
# + [markdown] id="XOLritSpHtCs" colab_type="text"
# ### 100 years ago, few countries had lifespans _above_ 50 years
# + id="9EIBIyETH1F2" colab_type="code" colab={}
df[(df.year==1918) & (df.lifespan > 50)]
# + [markdown] id="vVAh0A75IRch" colab_type="text"
# ### Today, no countries have lifespans _below_ 50 years
# + id="o4WKWMImIEta" colab_type="code" colab={}
df[(df.year==2018) & (df.lifespan < 50)]
# + [markdown] id="p4zrCcFsxqPv" colab_type="text"
# ## Get interactive with Google Colab
#
# In Colab, go to **Insert** > **Add a form field**
# + id="tFvryyYcxsHc" colab_type="code" colab={}
year = 1961 #@param {type:"slider", min:1800, max:2018, step:1}
sns.relplot(x='income', y='lifespan', hue='region', size='population',
data=df[df.year==year])
plt.xscale('log')
plt.xlim((150, 150000))
plt.ylim((20, 90))
plt.title(year);
# + id="UnLgcmIIxyqC" colab_type="code" colab={}
# Based on the example at:
# https://colab.research.google.com/notebooks/widgets.ipynb#scrollTo=BZa2t-U5gIAB
from google.colab import widgets
tb = widgets.TabBar([str(year) for year in years])
for tab, year in zip(tb, years):
sns.relplot(x='income', y='lifespan', hue='region', size='population',
data=df[df.year==year])
plt.xscale('log')
plt.xlim((150, 150000))
plt.ylim((20, 90));
# + [markdown] id="KV8xHfvmh0kL" colab_type="text"
# # ASSIGNMENT
# Replicate the lesson code
#
# # STRETCH OPTIONS
#
# ## 1. Animate!
# - [Making animations work in Google Colaboratory](https://medium.com/lambda-school-machine-learning/making-animations-work-in-google-colaboratory-new-home-for-ml-prototyping-c6147186ae75)
# - [How to Create Animated Graphs in Python](https://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1)
#
# ## 2. Work on anything related to your portfolio site / project
# - [DS2 Data Storytelling portfolio project](https://gist.github.com/rrherr/6388f3ad9415b51053e0f1603446c6b0): Deliverables, Milestones, Examples
| Copy_of_LS_DS_224_Sequence_your_narrative_LIVE_LESSON.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Chr4Nn81r8Oi"
# # Abstract
#
# **Objective:** To do a roundtrip analysis on a file: reading, transforming, writing.
#
# **Method:** Load a given file and
#
# + [markdown] id="lxW3e1ShtPx9"
# # Use Kaggle to Download the Data
#
# This installs a local copy of the `kaggle` command-line app to your PC, and uses it to search for and download a smart-meter dataset. You will need a few things set up in advance.
# + [markdown] id="144OSdU8Cz95"
# You will need a `kaggle.json` file. The steps are:
#
# 1. Create a Kaggle account via email
# 2. Open your profile, and go the **Account** settings: `https://www.kaggle.com/$USERNAME_HERE/account`
# 3. Click on the button **Create New API Token**. This will download a `kaggle.json` file to local PC.
#
# Next you need to put it on Google Drive
#
# 1. Make sure you have a Google Drive account
# 2. Create a folder `Colab Data`
# 3. Upload the `kaggle.json` file to that `Colab Data` folder on Google driver.
#
# You now have what you need to execute this notebook
# + id="Sq-QJUjctWV4" executionInfo={"status": "ok", "timestamp": 1604249623789, "user_tz": 0, "elapsed": 17856, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}} outputId="072558f4-d6c7-4791-da4f-766f7f01e5c0" colab={"base_uri": "https://localhost:8080/"}
from google.colab import drive
drive.mount('/content/drive')
# + id="whpHBo-4tpSa" executionInfo={"status": "ok", "timestamp": 1604249623791, "user_tz": 0, "elapsed": 17850, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
# !mkdir -p ~/.kaggle
# + id="IRlqMEEbtpU1" executionInfo={"status": "ok", "timestamp": 1604249624170, "user_tz": 0, "elapsed": 18222, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
# !cp "/content/drive/My Drive/Colab Data/kaggle.json" ~/.kaggle
# + id="zk3EtDsctpXC" executionInfo={"status": "ok", "timestamp": 1604249624171, "user_tz": 0, "elapsed": 18219, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
# !chmod 600 ~/.kaggle/kaggle.json
# + id="H1RDZtyvtpZT" executionInfo={"status": "ok", "timestamp": 1604249624424, "user_tz": 0, "elapsed": 18468, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}} outputId="cbd58918-9ebf-4e69-f43c-3c0a8433e96c" colab={"base_uri": "https://localhost:8080/"}
# !ls ~/.kaggle
# + id="OXuW8HOptpbd" executionInfo={"status": "ok", "timestamp": 1604249627848, "user_tz": 0, "elapsed": 21887, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}} outputId="8358be9f-a0eb-48f6-f9df-5ecdf9650666" colab={"base_uri": "https://localhost:8080/"}
# !pip install kaggle
# + id="kTSBSCgbtpdn" executionInfo={"status": "ok", "timestamp": 1604249628207, "user_tz": 0, "elapsed": 22241, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}} outputId="1f6bcfb9-1822-461c-a7d0-51d1b6e6a764" colab={"base_uri": "https://localhost:8080/"}
# !kaggle datasets list --search energy
# + id="DKTadn-XAsll" executionInfo={"status": "ok", "timestamp": 1604249628208, "user_tz": 0, "elapsed": 22235, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
# # !kaggle datasets download jeanmidev/smart-meters-in-london --path /content/drive/My\ Drive/Colab\ Data/
# + [markdown] id="RbApeQfCCqSw"
# # Unpack and Verify the Smart-Meter Dataset
# + [markdown] id="XkFX66_6IPH2"
# In this cell we unzip the data, list the files, and ensure they're what we expect to see given the description of the dataset at [the dataset's page on Kaggle](https://www.kaggle.com/jeanmidev/smart-meters-in-london)
# + id="wdd5-S4CJb7C" executionInfo={"status": "ok", "timestamp": 1604249628209, "user_tz": 0, "elapsed": 22233, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
# # !mkdir /content/drive/My\ Drive/Colab\ Data/smart-meters-in-london
# + id="9f9Os3nKJlHp" executionInfo={"status": "ok", "timestamp": 1604249628210, "user_tz": 0, "elapsed": 22231, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
# # !unzip -o -q /content/drive/My\ Drive/Colab\ Data/smart-meters-in-london.zip \
# # -d /content/drive/My\ Drive/Colab\ Data/smart-meters-in-london \
# + id="kVvGorSiLGmB" executionInfo={"status": "ok", "timestamp": 1604249628646, "user_tz": 0, "elapsed": 22663, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}} outputId="6fddd5e8-b12a-476f-9b0c-cc79d388ef93" colab={"base_uri": "https://localhost:8080/"}
# !ls /content/drive/My\ Drive/Colab\ Data/smart-meters-in-london/halfhourly_dataset/
# + id="jDmPHdExWG7R" executionInfo={"status": "ok", "timestamp": 1604249628647, "user_tz": 0, "elapsed": 22659, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
import os
import pathlib
import pandas as pd
from IPython.display import display, Markdown, Image
# + id="9ffGg4FqWV5P" executionInfo={"status": "ok", "timestamp": 1604249628648, "user_tz": 0, "elapsed": 22659, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
DATASET_PATH=pathlib.Path('/content/drive/My Drive/Colab Data/smart-meters-in-london')
ELEC_READINGS_ONE_ROW_PER_DAY=DATASET_PATH / 'hhblock_dataset' / 'hhblock_dataset' # one row per day
ELEC_READINGS_ONE_ROW_PER_READING=DATASET_PATH / 'halfhourly_dataset' / 'halfhourly_dataset' # one row per timestamp
# + id="1Ev0xbPiWbBe" executionInfo={"status": "ok", "timestamp": 1604249630463, "user_tz": 0, "elapsed": 24469, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
path1_block = pd.read_csv(ELEC_READINGS_ONE_ROW_PER_DAY / 'block_0.csv')
# + id="Hn9068JXXEPH" executionInfo={"status": "ok", "timestamp": 1604249632316, "user_tz": 0, "elapsed": 26318, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
path2_block = pd.read_csv(ELEC_READINGS_ONE_ROW_PER_READING / 'block_0.csv')
# + id="d_eiE3FiXZ5w" executionInfo={"status": "ok", "timestamp": 1604249632320, "user_tz": 0, "elapsed": 26319, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}} outputId="f154180f-b299-4277-8013-2ed9227d9d32" colab={"base_uri": "https://localhost:8080/", "height": 226}
path1_block.head()
# + id="SdgFJfIRYYNs" executionInfo={"status": "ok", "timestamp": 1604249632321, "user_tz": 0, "elapsed": 26317, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}} outputId="8fe08898-df0d-40f7-b8af-7ae1306b4361" colab={"base_uri": "https://localhost:8080/", "height": 363}
path2_block.iloc[46:,:].head(10)
# + [markdown] id="zWYHU7hhSXBt"
# So really what we want here is to read in the hhblock version, and then process that. There are multiple households per file.
# + id="0I-eZZsxsZ9M" executionInfo={"status": "ok", "timestamp": 1604250029658, "user_tz": 0, "elapsed": 78200, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
blocks = []
for reading_file in ELEC_READINGS_ONE_ROW_PER_DAY.iterdir():
if (reading_file.name.startswith('block_')) and (reading_file.name.endswith('.csv')):
print(f"Reading in {reading_file}")
blocks += [ pd.read_csv(reading_file)]
# + id="vdMsVDbywfal" executionInfo={"status": "ok", "timestamp": 1604250130187, "user_tz": 0, "elapsed": 2047, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
readings = pd.concat(blocks, sort=False)
# + id="tqMtfqyIwmCf" executionInfo={"status": "ok", "timestamp": 1604250150976, "user_tz": 0, "elapsed": 532, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
del blocks
# + [markdown] id="te8Zrn9UsBBX"
# # Experiment 1: An Autoencoder for Daily Energy Consumption
#
# In this experiment we stack all days from all households together -- ignoring the household-specific clusterieng -- and just fit a variationall auto-encoder to the daily data to try to find a low-rank representation of a household-day
# + [markdown] id="PG9vXcY6xmyZ"
# In order to make this easier to model in Gaussian terms, we should use a log transform of the data, but we'll skip this for the time being
# + id="I6uz78fjsRct" executionInfo={"status": "ok", "timestamp": 1604251678964, "user_tz": 0, "elapsed": 608, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
HouseId = 'LCLid'
Day = 'day'
# + id="JL04swPEvRLx" executionInfo={"status": "ok", "timestamp": 1604251708840, "user_tz": 0, "elapsed": 1147, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
readings.set_index([HouseId, Day], inplace=True)
readings.columns = list(range(48))
# + id="UduDYeCf2eKi" executionInfo={"status": "ok", "timestamp": 1604251737000, "user_tz": 0, "elapsed": 510, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}} outputId="33e129e1-16ea-4f8d-c52a-a8feffc80aa4" colab={"base_uri": "https://localhost:8080/", "height": 258}
readings.head()
# + id="1rWgYRSO2mHf"
# + [markdown] id="PaXm95-43Rr3"
# ## Following a Convolutional Variational Auto-Encoder
#
# We follow this tutorial at first for the variational auto-encoder, operating on digits. Subsequently we will adapt it to work on time-series: https://keras.io/examples/generative/vae/
# + id="62eKxAde3gUp" executionInfo={"status": "ok", "timestamp": 1604251981023, "user_tz": 0, "elapsed": 2340, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# + [markdown] id="gMWOWNDr3tsd"
# Note that this tutorial follows the new class-based Keras API
#
# The first layer is the "Sampling Layer", i.e. the target low-rank representation
# + id="bqqN7Mr83ouB" executionInfo={"status": "ok", "timestamp": 1604252354597, "user_tz": 0, "elapsed": 733, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPntWv82gXOOwC1JBFNoK4SPM-4IxTQnERyCBuUQ=s64", "userId": "15000880758463632242"}}
class Sampling(layers.Layer):
"""
Uses a Gaussian over the latent space z, parameterised by z_meaan and
z_log_var.
"""
def call(self, inputs):
z_mean, z_log_var = inputs # What?!
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1] # What is tf.shape?
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
# + [markdown] id="Slxi9OHc5FBZ"
#
| Theano and Smart-Meter Experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Intel DAAL related imports
from daal.data_management import (
DataSourceIface, FileDataSource, HomogenNumericTable, MergedNumericTable, NumericTable
)
#"utils" module can be found in IDP environment installation folder (intall_dir)\share\pydaal_examples\examples\python\source
#uncomment the below comment and replace <install_dir> with the correct path
#sys.path.append(<install_dir>\share\pydaal_examples\examples\python\source)
from utils import printNumericTable
import sys, os
sys.path.append(os.path.realpath('../3-custom-modules'))
from customUtils import getArrayFromNT
# Import numpy, matplotlib, seaborn
import numpy as np
# Boilerplate
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
# Plotting configurations
# %config InlineBackend.figure_format = 'retina'
plt.rcParams["figure.figsize"] = (12, 9)
# -
# # Online Ridge Regression
#
# ### Tutorial brief
# This tutorial is an example of using ridge regression algorithms from PyDAAL to build predictive models.
# We use the well-studied Boston House Prices dataset to train ridge regression model in online processiong mode. We test the accuracy of these models in median house price prediction. The code for ridge regression model training and prediction is provided.
#
# ### Learning objectives
# * To understand how to process the data that doen not fit into memory using online computing mode.
# * To understand and practice the typical code sequence of using PyDAAL for supervised learning.
# * To practice interactions and conversions between DAAL NumericTables and NumPy ndarrays.
#
#
# ### Linear regression introduction
# Supervised learning involves training a model using the data that has known responses, and then apply the model to predict responses for unseen data. In the case of **linear regression** and **ridge regression**, the model is linear. That is,
#
# $$ f_{\beta}(X) = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \cdots + \beta_k x_k $$
#
# $\beta_0, \beta_1, \cdots, \beta_k$ are the regression model coefficients.
#
# PyDAAL provides two linear regression algorithms:
# * **Multiple Linear Regression**: The model is trained by minimizing an objective function in the form of **Residual Sum of Squares**. PyDAAL supports two ways to train the model: 1) Normal Equation method, and 2) QR method.
#
# $$ \sum \limits_{i=1}^n\left ( y_i - f_{\beta}(X^i)\right )^2 $$
# * **Ridge Regression**: It is similar to multiple linear regression, but adds a regularization term to the objective function. The regularization term penalizes features with large values, thus makes the model less prone to overfitting.
#
# $$ \sum \limits_{i=1}^n\left ( y_i - f_{\beta}(X^i)\right )^2 + \lambda \sum \limits_{j=1}^k \beta_j^2 $$
#
# ### Online processing mode
# Some Intel DAAL algorithms enable processing of data sets in blocks. In the online processing mode, the `compute()`, and `finalizeCompute()` methods of a particular algorithm class are used.
#
# This computation mode assumes that the data arrives in blocks i = 1, 2, 3, … nBlocks.
#
# Call the `compute()` method each time new input becomes available.
# 
#
# When the last block of data arrives, call the `finalizeCompute()` method to produce final results.
# 
#
# If the input data arrives in an asynchronous mode, you can use the `getStatus()` method for a given data source to check whether a new block of data is available for load.
# ### The Boston House Prices dataset
# The dataset has already been downloaded to the ./mldata folder. There are 506 rows and 14 columns. The first 13 columns are features (explanatory variables), and the last column is the dependent variable we try to make predictions for. Here's detailed information about this dataset, including descriptions of each feature:
#
# > Origin:
#
# > This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
#
# > Creator:
#
# > <NAME>. and <NAME>.
# > 'Hedonic prices and the demand for clean air', J. Environ. Economics & Management, vol.5, 81-102, 1978.
#
# > Data Set Information:
#
# > Concerns housing values in suburbs of Boston.
#
#
# > Attribute Information:
#
# > 1. CRIM: per capita crime rate by town
# > 2. ZN: proportion of residential land zoned for lots over 25,000 sq.ft.
# > 3. INDUS: proportion of non-retail business acres per town
# > 4. CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
# > 5. NOX: nitric oxides concentration (parts per 10 million)
# > 6. RM: average number of rooms per dwelling
# > 7. AGE: proportion of owner-occupied units built prior to 1940
# > 8. DIS: weighted distances to five Boston employment centres
# > 9. RAD: index of accessibility to radial highways
# > 10. TAX: full-value property-tax rate per \$10,000
# > 11. PTRATIO: pupil-teacher ratio by town
# > 12. B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
# > 13. LSTAT: % lower status of the population
# > 14. MEDV: Median value of owner-occupied homes in \$1000's
#
#
# ### Quality metrics
# +
def mse(values, fitted_values):
"""Return Mean Squared Errors for fitted values w.r.t. true values
Args:
values: True values. NumericTable, nsamples-by-noutputs
fitted_values: True values. NumericTable, nsamples-by-noutputs
Returns:
A tuple contains MSE's
"""
y_t = getArrayFromNT(values)
y_p = getArrayFromNT(fitted_values)
rss = ((y_t - y_p) ** 2).sum(axis = 0)
mse = rss / y_t.shape[0]
return tuple(mse)
def score(y_true, y_pred):
"""Compute R-squared and adjusted R-squared
Args:
y_true: True values. NumericTable, shape = (nsamples, noutputs)
y_pred: Predicted values. NumericTable, shape = (nsamples, noutputs)
Returns:
R2: A tuple with noutputs values
"""
y_t = getArrayFromNT(y_true)
y_p = getArrayFromNT(y_pred)
rss = ((y_t - y_p) ** 2).sum(axis = 0)
tss = ((y_t - y_t.mean(axis = 0)) ** 2).sum(axis = 0)
r2 = 1 - rss/tss
return tuple(r2)
# -
# ### Ridge regression model training for Boston houses prices
# The code below reads data from file `housing.data.train.csv` and creates 2 NumericTables: training data (`xTrain`) and ground truth (`yTrain`). We use the `FileDataSource` to stream the data from the file into in-memory representation - numeric tables.
#
# The model of ridge regression gets and update after each new block of data.
# +
from daal.algorithms.ridge_regression import training as ridge_training
# Number of teatures in the dataset
nFeatures = 13
# Initialize FileDataSource to retrieve the input data from a .csv file
trainDataSource = FileDataSource(
'./mldata/housing.data.train.csv', DataSourceIface.notAllocateNumericTable, DataSourceIface.doDictionaryFromContext
)
# Create Numeric Tables for training data and dependent variables
xTrain = HomogenNumericTable(nFeatures, 0, NumericTable.notAllocate)
yTrain = HomogenNumericTable(1, 0, NumericTable.notAllocate)
mergedDataTrain = MergedNumericTable(xTrain, yTrain)
# Create an algorithm object to train ridge regression model in online processing mode
regr = ridge_training.Online()
while(trainDataSource.loadDataBlock(50, mergedDataTrain) == 50):
# Pass new block of data from the training data set and dependent values to the algorithm
regr.input.set(ridge_training.data, xTrain)
regr.input.set(ridge_training.dependentVariables, yTrain)
# Update ridge regression model
regr.compute()
model = regr.finalizeCompute().get(ridge_training.model)
# Peek at the model (Betas)
printNumericTable(model.getBeta())
# -
# ### Prediction with Ridge Regression model
#
# The code below reads data from file housing.data.test.csv and creates 2 NumericTables: test data (xTest) and test ground truth (yTest). We use ridge regression prediction algorithm and the model obtained on the training stage to compute the predictions for a new, prevoiusly unseen data.
# +
from daal.algorithms.ridge_regression import prediction as ridge_prediction
testDataSource = FileDataSource(
'./mldata/housing.data.test.csv', DataSourceIface.notAllocateNumericTable, DataSourceIface.doDictionaryFromContext
)
# Create Numeric Tables for testing data and dependent variables
xTest = HomogenNumericTable(nFeatures, 0, NumericTable.notAllocate)
yTest = HomogenNumericTable(1, 0, NumericTable.notAllocate)
mergedDataTest = MergedNumericTable(xTest, yTest)
testDataSource.loadDataBlock(mergedDataTest)
# Create a prediction algorithm object
alg = ridge_prediction.Batch()
# Set input
alg.input.setModel(ridge_prediction.model, model)
alg.input.setTable(ridge_prediction.data, xTest)
# Compute
predictions = alg.compute().get(ridge_prediction.prediction)
# -
# ### Plotting predicted values against the ground truth
# To see if the model has done a good job, we plot the predicted values against the ground truth. If the model does a perfect job then all points on the plot should fall on a straight line. As we see, it's not quite the case. But still the predictions are close to true values in many cases.
# +
print(mse(yTest, predictions))
print(score(yTest, predictions))
predicted = getArrayFromNT(predictions)
expected = getArrayFromNT(yTest)
fig, ax = plt.subplots()
ax.scatter(expected, predicted)
ax.plot([0, 30], [0, 30], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.show()
# -
# ### Summary
# In this lab, we learned two widely used linear regression models: Multiple linear regression and Ridge regression. We saw how to apply them to the Boston House Prices dataset. We studied and practiced PyDAAL API for these two algorithms.
| 4-interactive-tutorials/Regression_online_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="ngeJE2Tt2RQw" toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Autoencoders-Variacionales" data-toc-modified-id="Autoencoders-Variacionales-1"><span class="toc-item-num">1 </span>Autoencoders Variacionales</a></span><ul class="toc-item"><li><span><a href="#Clonar-repo-en-colab" data-toc-modified-id="Clonar-repo-en-colab-1.1"><span class="toc-item-num">1.1 </span>Clonar repo en colab</a></span></li><li><span><a href="#Modulos-basicos" data-toc-modified-id="Modulos-basicos-1.2"><span class="toc-item-num">1.2 </span>Modulos basicos</a></span></li><li><span><a href="#Modulos-locales-al-repo" data-toc-modified-id="Modulos-locales-al-repo-1.3"><span class="toc-item-num">1.3 </span>Modulos locales al repo</a></span></li><li><span><a href="#Variables-importantes-del-notebook" data-toc-modified-id="Variables-importantes-del-notebook-1.4"><span class="toc-item-num">1.4 </span>Variables importantes del notebook</a></span></li><li><span><a href="#Entre-DL-y-ML" data-toc-modified-id="Entre-DL-y-ML-1.5"><span class="toc-item-num">1.5 </span>Entre DL y ML</a></span></li><li><span><a href="#Modelos-generativos" data-toc-modified-id="Modelos-generativos-1.6"><span class="toc-item-num">1.6 </span>Modelos generativos</a></span></li></ul></li><li><span><a href="#VAE-para-fingerprints-moleculares" data-toc-modified-id="VAE-para-fingerprints-moleculares-2"><span class="toc-item-num">2 </span>VAE para fingerprints moleculares</a></span><ul class="toc-item"><li><span><a href="#Paso-1:-Conoce-tus-datos!" data-toc-modified-id="Paso-1:-Conoce-tus-datos!-2.1"><span class="toc-item-num">2.1 </span>Paso 1: Conoce tus datos!</a></span></li><li><span><a href="#Design-matrix-($x$)" data-toc-modified-id="Design-matrix-($x$)-2.2"><span class="toc-item-num">2.2 </span>Design matrix ($x$)</a></span></li><li><span><a href="#PCA-:-Descomposiciones-lineales-de-los-datos" data-toc-modified-id="PCA-:-Descomposiciones-lineales-de-los-datos-2.3"><span class="toc-item-num">2.3 </span>PCA : Descomposiciones lineales de los datos</a></span></li><li><span><a href="#A-construir-PCA-(un-linear-autoencoder!!)" data-toc-modified-id="A-construir-PCA-(un-linear-autoencoder!!)-2.4"><span class="toc-item-num">2.4 </span>A construir PCA (un linear autoencoder!!)</a></span></li><li><span><a href="#Encodificar,-decodificar" data-toc-modified-id="Encodificar,-decodificar-2.5"><span class="toc-item-num">2.5 </span>Encodificar, decodificar</a></span></li><li><span><a href="#A-visualizar-el-espacio-latente" data-toc-modified-id="A-visualizar-el-espacio-latente-2.6"><span class="toc-item-num">2.6 </span>A visualizar el espacio latente</a></span></li></ul></li><li><span><a href="#VAE-con-TF-Probability" data-toc-modified-id="VAE-con-TF-Probability-3"><span class="toc-item-num">3 </span>VAE con TF-Probability</a></span><ul class="toc-item"><li><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Objective-(elbo):" data-toc-modified-id="Objective-(elbo):-3.0.0.1"><span class="toc-item-num">3.0.0.1 </span>Objective (elbo):</a></span></li></ul></li></ul></li><li><span><a href="#El-modelo-en-codigo" data-toc-modified-id="El-modelo-en-codigo-3.1"><span class="toc-item-num">3.1 </span>El modelo en codigo</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Assemblar-el-modelo" data-toc-modified-id="Assemblar-el-modelo-3.1.0.1"><span class="toc-item-num">3.1.0.1 </span>Assemblar el modelo</a></span></li></ul></li></ul></li><li><span><a href="#Que-esta-pasando?" data-toc-modified-id="Que-esta-pasando?-3.2"><span class="toc-item-num">3.2 </span>Que esta pasando?</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Funcion-de-perdida" data-toc-modified-id="Funcion-de-perdida-3.2.0.1"><span class="toc-item-num">3.2.0.1 </span>Funcion de perdida</a></span></li></ul></li></ul></li><li><span><a href="#que-tal-funciona?" data-toc-modified-id="que-tal-funciona?-3.3"><span class="toc-item-num">3.3 </span>que tal funciona?</a></span><ul class="toc-item"><li><span><a href="#OJO!-z-es-una-distribucion" data-toc-modified-id="OJO!-z-es-una-distribucion-3.3.1"><span class="toc-item-num">3.3.1 </span>OJO! z es una distribucion</a></span></li><li><span><a href="#Verificar-el-espacio-latente" data-toc-modified-id="Verificar-el-espacio-latente-3.3.2"><span class="toc-item-num">3.3.2 </span>Verificar el espacio latente</a></span></li></ul></li><li><span><a href="#Decodificar,-encodificar" data-toc-modified-id="Decodificar,-encodificar-3.4"><span class="toc-item-num">3.4 </span>Decodificar, encodificar</a></span></li></ul></li></ul></div>
# + [markdown] colab_type="text" id="aqjbxeS9ZTrD"
# # Autoencoders Variacionales
#
#
# Ejecutar este notebook via colab:
# <a href="https://colab.research.google.com/github/GenerativeModels_19/blob/master/notebooks/1_VAE_molecular.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#
# ## Clonar repo en colab
# + colab={"base_uri": "https://localhost:8080/", "height": 202} colab_type="code" id="pwtWYTS92RQ3" outputId="565bed85-d559-4ee8-defd-0bb46ebeea29"
import os
import subprocess
import sys
GIT_NAME='GenerativeModels_19'
GIT_URL='https://riiaa@github.com/riiaa/{}.git'.format(GIT_NAME)
IN_COLAB = 'google.colab' in sys.modules
def run_cmd(cmd):
print('Output of "{}":'.format(cmd))
print(subprocess.run(cmd,stdout=subprocess.PIPE, shell=True).stdout.decode('utf-8'))
if IN_COLAB:
SRC_DIR='.'
run_cmd('rm -rf sample_data')
run_cmd('rm -rf {}'.format(GIT_NAME))
run_cmd('git clone --verbose --progress {}'.format(GIT_URL))
run_cmd('mv {}/* . '.format(GIT_NAME))
run_cmd('rm -rf {}'.format(GIT_NAME))
#run_cmd('pip install --upgrade --force-reinstall tf-nightly-gpu-2.0-preview')
else:
SRC_DIR='..'
print('Using colab? {}, using root directory "{}"'.format(IN_COLAB,SRC_DIR))
# + [markdown] colab_type="text" id="hsa9EoHiZqnO"
# ## Modulos basicos
#
# + colab={"base_uri": "https://localhost:8080/", "height": 121} colab_type="code" id="xFiTTx7MV-xx" outputId="e557afa5-b3c1-4be1-aa8c-c7762161a903"
import os
import sys
from tqdm.autonotebook import tqdm
# scientific python stack
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# ML/DL
import sklearn
import sklearn.model_selection
from sklearn.model_selection import train_test_split
import tensorflow as tf
#tf.enable_eager_execution()
import tensorflow.keras as tfk
import tensorflow.keras.layers as tkl
import tensorflow_probability as tfp
tfd = tfp.distributions
print('Tensorflow:{}'.format(tf.__version__))
print('Keras:{}'.format(tfk.__version__))
print('Tf-prob:{}'.format(tfp.__version__))
# + [markdown] colab_type="text" id="uvwFDFvL2RRG"
# ## Modulos locales al repo
# + colab={} colab_type="code" id="n_SPI30p2RRJ"
sys.path.append(os.path.join(SRC_DIR,'code'))
import utils
# + [markdown] colab_type="text" id="LSLMPpKd2RRQ"
# ## Variables importantes del notebook
# + colab={} colab_type="code" id="pcnZrVzy2RRR"
data_path = os.path.join(SRC_DIR,'data')
# + [markdown] colab_type="text" id="UFeErdff9q71"
# ## Entre DL y ML
#
#
# 
#
#
# ## Modelos generativos
#
# 
#
# + [markdown] colab_type="text" id="ZiruH-sz79_m"
# # VAE para fingerprints moleculares
# + colab={"base_uri": "https://localhost:8080/", "height": 212} colab_type="code" id="KDXd2XHW79ax" outputId="c4e4e831-7c57-4070-e36b-5acd1b2da2ee"
def str2arr(astr):
arr = astr.replace('\n','').replace('[','').replace(']','')
arr = np.array([int(i) for i in arr.split()])
return arr
df = pd.read_csv(os.path.join(data_path,'zinc_19k_fp.csv'))
df['fp']=df['fp'].apply(str2arr)
print(df.shape)
df.head()
# + [markdown] colab_type="text" id="Rbwp9dx12RRp"
# ## Paso 1: Conoce tus datos!
#
# 
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="SC6EmoqB2RRq" outputId="2cd41d8f-55c1-4ba0-fb4a-77daa99b299c"
df['fp'].iloc[0].shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="h4XekBa7_OT3" outputId="ebc2b3ec-7b7b-4a6d-d837-b9b36b3b1d94"
df.shape
# + [markdown] colab_type="text" id="jRbYQKt1Jgay"
# ## Design matrix ($x$)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="5ZUTRnAEJgjz" outputId="f46da617-0d8a-4db4-9d29-4f9f7d63cd2c"
#from sklearn.preprocessing import StandardScaler
train_index,test_index = train_test_split(df.index,test_size=0.2)
x = np.vstack(df['fp'].tolist()).astype(np.float32)
y = df['logp'].values.reshape(-1,1).astype(np.float32)
x_train,x_test = x[train_index],x[test_index]
y_train,y_test = y[train_index],y[test_index]
print(x.shape,y.shape)
# + [markdown] colab_type="text" id="QZUqYNeN2RR5"
# ## PCA : Descomposiciones lineales de los datos
# + colab={"base_uri": "https://localhost:8080/", "height": 486} colab_type="code" id="xHbx4hkV2RR6" outputId="5e77d6d8-c7ea-4985-a9cd-41631574b24f"
from sklearn.decomposition import PCA
model = PCA(2)
x_pca = model.fit_transform(x_test)
plt.figure(figsize=(8,8))
plt.scatter(x_pca[:,0],x_pca[:,1],s=1)
plt.show()
# + [markdown] colab_type="text" id="n_3uFsu_cBgl"
# ## A construir PCA (un linear autoencoder!!)
#
#
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="7U6dyblgcBAk" outputId="a834b281-aaed-47dc-fc79-21a7228e9df3"
tfkl = tf.keras.layers
latent_dim=2
input_dim = x.shape[-1]
encoder = tf.keras.Sequential([
tfkl.InputLayer(input_shape=[input_dim]),
tfkl.Dense(latent_dim,activation=None)])
decoder = tf.keras.Sequential([
tfkl.InputLayer(input_shape=[latent_dim]),
tfkl.Dense(input_dim,activation=None)])
ae = tfk.Model(inputs=encoder.inputs,outputs=decoder(encoder.outputs))
ae.compile('adam',loss='mse')
ae.summary()
ae.fit(x_train,x_train,batch_size=64,epochs=100)
# + [markdown] colab_type="text" id="M8P0LwQH2RSF"
# ## Encodificar, decodificar
# + colab={"base_uri": "https://localhost:8080/", "height": 756} colab_type="code" id="dhELRG8jlmf1" outputId="03e30ce1-1f14-48d3-a644-efe199fb8959"
z = encoder.predict(x_test)
recon_x = decoder.predict(z)
print(np.abs(recon_x[0]-x[0]))
print(np.linalg.norm(recon_x[0]-x[0]))
# + [markdown] colab_type="text" id="__N_T8D42RSL"
# ## A visualizar el espacio latente
# + colab={"base_uri": "https://localhost:8080/", "height": 486} colab_type="code" id="2K-nO4R5l8dP" outputId="7902b337-104d-47ca-c269-2aaca14eb6ab"
plt.figure(figsize=(8,8))
plt.scatter(z[:,0],z[:,1],s=1)
plt.show()
# + [markdown] colab_type="text" id="eR3EKI9Yb87q"
# # VAE con TF-Probability
#
#
# + [markdown] colab_type="text" id="p6qhztJuJbh8"
#
#
#
# 
#
#
# #### Objective (elbo):
# 
#
#
# ## El modelo en codigo
# + colab={} colab_type="code" id="NbqLKehiGN77"
tfpl = tfp.layers
# tfpl.MultivariateNormalTriL?
# + colab={"base_uri": "https://localhost:8080/", "height": 319} colab_type="code" id="bad2wnwHEqKk" outputId="0cdca2ec-6cfc-4fb6-92a2-1daac4edd6fc"
tfpl = tfp.layers
tfkl = tf.keras.layers
input_dim = x.shape[-1]
latent_dim=16
prior = tfd.Independent(tfd.Normal(loc=tf.zeros(latent_dim),scale=1),reinterpreted_batch_ndims=1)
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=[input_dim]),
tfkl.Dense(input_dim/4, activation=tf.nn.leaky_relu),
tfkl.Dense(input_dim/8, activation=tf.nn.leaky_relu),
tfkl.Dense(input_dim/16, activation=tf.nn.leaky_relu),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(latent_dim),activation=None),
tfpl.MultivariateNormalTriL(latent_dim,activity_regularizer=tfpl.KLDivergenceRegularizer(prior))
])
decoder = tf.keras.Sequential(
[
tfkl.InputLayer(input_shape=[latent_dim]),
tfkl.Dense(latent_dim*2, activation=tf.nn.leaky_relu),
tfkl.Dense(latent_dim*4, activation=tf.nn.leaky_relu),
tfkl.Dense(latent_dim*8, activation=tf.nn.leaky_relu),
tfkl.Dense(input_dim, activation=None),
tfpl.IndependentBernoulli([input_dim],tfd.Bernoulli.logits)
])
encoder.summary()
# + [markdown] colab_type="text" id="aYHJuoRr2RSY"
# #### Assemblar el modelo
# + colab={"base_uri": "https://localhost:8080/", "height": 386} colab_type="code" id="NsjBmfEZGOk_" outputId="48f765ec-feeb-47b1-f92c-e8484f9a34c8"
vae = tfk.Model(inputs=encoder.inputs,outputs=decoder(encoder.outputs[0]))
vae.summary()
# + [markdown] colab_type="text" id="jnoP1e8I2RSd"
#
# ## Que esta pasando?
#
# * Iteratar sobre los datos en epochs
# * En cada epoch, encodificamos, calculamos la media y log-varianza del posterior aproxiamdor $q(z|x)$
# * Usamos el truco de reparametrizacion para samplear de $q(z|x)$
# * Nuestros samples reparametrizados se pasan al decoder para obtain logits de la distribucion generativa $p(x|z)$
#
# #### Funcion de perdida
#
# Para un dato $x_i$:
#
# $$
# l_i(\theta,\phi) = - \mathbb{E}_{z\sim q_\theta(z|x_i)}[log p_\phi(x_i|z)] + \mathbb{KL}(q_\theta(z|x_i) || p(z))
# $$
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="ZBs63P2JGd-v" outputId="50e56aa4-cc8d-4dd2-856f-452aa27b7879"
negative_log_likelihood = lambda x,rv_x: -rv_x.log_prob(x)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train,x_train)).shuffle(len(x_train)).batch(128)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test,x_test)).batch(128)
vae.compile(optimizer='adam',loss=negative_log_likelihood)
vae.fit(train_dataset, epochs=100)
# + [markdown] colab_type="text" id="weRWp8OYHwWb"
# ## que tal funciona?
#
# ### OJO! z es una distribucion
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="WZctt0ZjLJKa" outputId="fd88ecf5-2803-49c9-e2b7-d063dad7a1ca"
z.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 924} colab_type="code" id="GPACr5cRHvjw" outputId="bb906e81-18ce-4013-d926-9f2aa325fa5c"
z = encoder(x_train)
x_recon = decoder(z).mean().numpy()
print(x_test[0])
print(x_recon[0])
# + [markdown] colab_type="text" id="6KvkUieo2RTB"
# ### Verificar el espacio latente
# + colab={} colab_type="code" id="bRdo6lBYJakL"
z = encoder(x_test)
# + colab={} colab_type="code" id="T5oO5wPBLFCt"
# + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="2M8Puow8J0iJ" outputId="8bbbfc50-a655-460a-801e-b87fa25fa0af"
x_recon = decoder(z).mean().numpy()
for i in range(latent_dim):
plt.hist(z[:,i],bins=20,alpha=0.2)
plt.show()
# + [markdown] colab_type="text" id="wghBY3kL2RTL"
# ## Decodificar, encodificar
# + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="ma8UNee8JBCZ" outputId="dbbf7f30-40bb-4c80-aa52-a355feaf41ef"
z = prior.sample(sample_shape=(10))
x_recon = decoder(z).mode().numpy()
x_recon
# + colab={} colab_type="code" id="gAwn0sqrJUxK"
| code/exploratory/1_VAE_molecular_version_del_taller.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hyperparameter tuning
#
# **Learning Objectives**
# 1. Learn how to use `cloudml-hypertune` to report the results for Cloud hyperparameter tuning trial runs
# 2. Learn how to configure the `.yaml` file for submitting a Cloud hyperparameter tuning job
# 3. Submit a hyperparameter tuning job to Cloud AI Platform
#
# ## Introduction
#
# Let's see if we can improve upon that by tuning our hyperparameters.
#
# Hyperparameters are parameters that are set *prior* to training a model, as opposed to parameters which are learned *during* training.
#
# These include learning rate and batch size, but also model design parameters such as type of activation function and number of hidden units.
#
# Here are the four most common ways to finding the ideal hyperparameters:
# 1. Manual
# 2. Grid Search
# 3. Random Search
# 4. Bayesian Optimzation
#
# **1. Manual**
#
# Traditionally, hyperparameter tuning is a manual trial and error process. A data scientist has some intution about suitable hyperparameters which they use as a starting point, then they observe the result and use that information to try a new set of hyperparameters to try to beat the existing performance.
#
# Pros
# - Educational, builds up your intuition as a data scientist
# - Inexpensive because only one trial is conducted at a time
#
# Cons
# - Requires alot of time and patience
#
# **2. Grid Search**
#
# On the other extreme we can use grid search. Define a discrete set of values to try for each hyperparameter then try every possible combination.
#
# Pros
# - Can run hundreds of trials in parallel using the cloud
# - Gauranteed to find the best solution within the search space
#
# Cons
# - Expensive
#
# **3. Random Search**
#
# Alternatively define a range for each hyperparamter (e.g. 0-256) and sample uniformly at random from that range.
#
# Pros
# - Can run hundreds of trials in parallel using the cloud
# - Requires less trials than Grid Search to find a good solution
#
# Cons
# - Expensive (but less so than Grid Search)
#
# **4. Bayesian Optimization**
#
# Unlike Grid Search and Random Search, Bayesian Optimization takes into account information from past trials to select parameters for future trials. The details of how this is done is beyond the scope of this notebook, but if you're interested you can read how it works here [here](https://cloud.google.com/blog/products/gcp/hyperparameter-tuning-cloud-machine-learning-engine-using-bayesian-optimization).
#
# Pros
# - Picks values intelligenty based on results from past trials
# - Less expensive because requires fewer trials to get a good result
#
# Cons
# - Requires sequential trials for best results, takes longer
#
# **AI Platform HyperTune**
#
# AI Platform HyperTune, powered by [Google Vizier](https://ai.google/research/pubs/pub46180), uses Bayesian Optimization by default, but [also supports](https://cloud.google.com/ml-engine/docs/tensorflow/hyperparameter-tuning-overview#search_algorithms) Grid Search and Random Search.
#
#
# When tuning just a few hyperparameters (say less than 4), Grid Search and Random Search work well, but when tunining several hyperparameters and the search space is large Bayesian Optimization is best.
# !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
PROJECT = "<YOUR PROJECT>"
BUCKET = "<YOUR BUCKET>"
REGION = "<YOUR REGION>"
TFVERSION = "2.1" # TF version for AI Platform to use
import os
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = TFVERSION
# + language="bash"
# gcloud config set project $PROJECT
# gcloud config set compute/region $REGION
# -
# ## Make code compatible with AI Platform Training Service
# In order to make our code compatible with AI Platform Training Service we need to make the following changes:
#
# 1. Upload data to Google Cloud Storage
# 2. Move code into a trainer Python package
# 4. Submit training job with `gcloud` to train on AI Platform
# ### Upload data to Google Cloud Storage (GCS)
#
# Cloud services don't have access to our local files, so we need to upload them to a location the Cloud servers can read from. In this case we'll use GCS.
#
# To do this run the notebook [0_export_data_from_bq_to_gcs.ipynb](./0_export_data_from_bq_to_gcs.ipynb), which will export the taxifare data from BigQuery directly into a GCS bucket. If all ran smoothly, you should be able to list the data bucket by running the following command:
# !gsutil ls gs://$BUCKET/taxifare/data
# ## Move code into python package
#
# In the [previous lab](), we moved our code into a python package for training on Cloud AI Platform. Let's just check that the files are there. You should see the following files in the `taxifare/trainer` directory:
# - `__init__.py`
# - `model.py`
# - `task.py`
# !ls -la taxifare/trainer
# To use hyperparameter tuning in your training job you must perform the following steps:
#
# 1. Specify the hyperparameter tuning configuration for your training job by including a HyperparameterSpec in your TrainingInput object.
#
# 2. Include the following code in your training application:
#
# - Parse the command-line arguments representing the hyperparameters you want to tune, and use the values to set the hyperparameters for your training trial.
# Add your hyperparameter metric to the summary for your graph.
#
# - To submit a hyperparameter tuning job, we must modify `model.py` and `task.py` to expose any variables we want to tune as command line arguments.
# ### Modify model.py
# +
# %%writefile ./taxifare/trainer/model.py
import datetime
import hypertune
import logging
import os
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import callbacks
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow import feature_column as fc
logging.info(tf.version.VERSION)
CSV_COLUMNS = [
'fare_amount',
'pickup_datetime',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count',
'key',
]
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']]
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
def features_and_labels(row_data):
for unwanted_col in ['key']:
row_data.pop(unwanted_col)
label = row_data.pop(LABEL_COLUMN)
return row_data, label
def load_dataset(pattern, batch_size, num_repeat):
dataset = tf.data.experimental.make_csv_dataset(
file_pattern=pattern,
batch_size=batch_size,
column_names=CSV_COLUMNS,
column_defaults=DEFAULTS,
num_epochs=num_repeat,
)
return dataset.map(features_and_labels)
def create_train_dataset(pattern, batch_size):
dataset = load_dataset(pattern, batch_size, num_repeat=None)
return dataset.prefetch(1)
def create_eval_dataset(pattern, batch_size):
dataset = load_dataset(pattern, batch_size, num_repeat=1)
return dataset.prefetch(1)
def parse_datetime(s):
if type(s) is not str:
s = s.numpy().decode('utf-8')
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S %Z")
def euclidean(params):
lon1, lat1, lon2, lat2 = params
londiff = lon2 - lon1
latdiff = lat2 - lat1
return tf.sqrt(londiff*londiff + latdiff*latdiff)
def get_dayofweek(s):
ts = parse_datetime(s)
return DAYS[ts.weekday()]
@tf.function
def dayofweek(ts_in):
return tf.map_fn(
lambda s: tf.py_function(get_dayofweek, inp=[s], Tout=tf.string),
ts_in
)
@tf.function
def fare_thresh(x):
return 60 * activations.relu(x)
def transform(inputs, NUMERIC_COLS, STRING_COLS, nbuckets):
# Pass-through columns
transformed = inputs.copy()
del transformed['pickup_datetime']
feature_columns = {
colname: fc.numeric_column(colname)
for colname in NUMERIC_COLS
}
# Scaling longitude from range [-70, -78] to [0, 1]
for lon_col in ['pickup_longitude', 'dropoff_longitude']:
transformed[lon_col] = layers.Lambda(
lambda x: (x + 78)/8.0,
name='scale_{}'.format(lon_col)
)(inputs[lon_col])
# Scaling latitude from range [37, 45] to [0, 1]
for lat_col in ['pickup_latitude', 'dropoff_latitude']:
transformed[lat_col] = layers.Lambda(
lambda x: (x - 37)/8.0,
name='scale_{}'.format(lat_col)
)(inputs[lat_col])
# Adding Euclidean dist (no need to be accurate: NN will calibrate it)
transformed['euclidean'] = layers.Lambda(euclidean, name='euclidean')([
inputs['pickup_longitude'],
inputs['pickup_latitude'],
inputs['dropoff_longitude'],
inputs['dropoff_latitude']
])
feature_columns['euclidean'] = fc.numeric_column('euclidean')
# hour of day from timestamp of form '2010-02-08 09:17:00+00:00'
transformed['hourofday'] = layers.Lambda(
lambda x: tf.strings.to_number(
tf.strings.substr(x, 11, 2), out_type=tf.dtypes.int32),
name='hourofday'
)(inputs['pickup_datetime'])
feature_columns['hourofday'] = fc.indicator_column(
fc.categorical_column_with_identity(
'hourofday', num_buckets=24))
latbuckets = np.linspace(0, 1, nbuckets).tolist()
lonbuckets = np.linspace(0, 1, nbuckets).tolist()
b_plat = fc.bucketized_column(
feature_columns['pickup_latitude'], latbuckets)
b_dlat = fc.bucketized_column(
feature_columns['dropoff_latitude'], latbuckets)
b_plon = fc.bucketized_column(
feature_columns['pickup_longitude'], lonbuckets)
b_dlon = fc.bucketized_column(
feature_columns['dropoff_longitude'], lonbuckets)
ploc = fc.crossed_column(
[b_plat, b_plon], nbuckets * nbuckets)
dloc = fc.crossed_column(
[b_dlat, b_dlon], nbuckets * nbuckets)
pd_pair = fc.crossed_column([ploc, dloc], nbuckets ** 4)
feature_columns['pickup_and_dropoff'] = fc.embedding_column(
pd_pair, 100)
return transformed, feature_columns
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model(nbuckets, nnsize, lr):
# input layer is all float except for pickup_datetime which is a string
STRING_COLS = ['pickup_datetime']
NUMERIC_COLS = (
set(CSV_COLUMNS) - set([LABEL_COLUMN, 'key']) - set(STRING_COLS)
)
inputs = {
colname: layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUMERIC_COLS
}
inputs.update({
colname: layers.Input(name=colname, shape=(), dtype='string')
for colname in STRING_COLS
})
# transforms
transformed, feature_columns = transform(
inputs, NUMERIC_COLS, STRING_COLS, nbuckets=nbuckets)
dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed)
x = dnn_inputs
for layer, nodes in enumerate(nnsize):
x = layers.Dense(nodes, activation='relu', name='h{}'.format(layer))(x)
output = layers.Dense(1, name='fare')(x)
model = models.Model(inputs, output)
lr_optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(optimizer=lr_optimizer, loss='mse', metrics=[rmse, 'mse'])
return model
def train_and_evaluate(hparams):
batch_size = hparams['batch_size']
eval_data_path = hparams['eval_data_path']
nnsize = hparams['nnsize']
nbuckets = hparams['nbuckets']
lr = hparams['lr']
num_evals = hparams['num_evals']
num_examples_to_train_on = hparams['num_examples_to_train_on']
output_dir = hparams['output_dir']
train_data_path = hparams['train_data_path']
if tf.io.gfile.exists(output_dir):
tf.io.gfile.rmtree(output_dir)
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
savedmodel_dir = os.path.join(output_dir, 'savedmodel')
model_export_path = os.path.join(savedmodel_dir, timestamp)
checkpoint_path = os.path.join(output_dir, 'checkpoints')
tensorboard_path = os.path.join(output_dir, 'tensorboard')
dnn_model = build_dnn_model(nbuckets, nnsize, lr)
logging.info(dnn_model.summary())
trainds = create_train_dataset(train_data_path, batch_size)
evalds = create_eval_dataset(eval_data_path, batch_size)
steps_per_epoch = num_examples_to_train_on // (batch_size * num_evals)
checkpoint_cb = callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
tensorboard_cb = callbacks.TensorBoard(tensorboard_path,
histogram_freq=1)
history = dnn_model.fit(
trainds,
validation_data=evalds,
epochs=num_evals,
steps_per_epoch=max(1, steps_per_epoch),
verbose=2, # 0=silent, 1=progress bar, 2=one line per epoch
callbacks=[checkpoint_cb, tensorboard_cb]
)
# Exporting the model with default serving function.
tf.saved_model.save(dnn_model, model_export_path)
# TODO 1
hp_metric = history.history['val_rmse'][num_evals-1]
# TODO 1
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='rmse',
metric_value=hp_metric,
global_step=num_evals
)
return history
# -
# ### Modify task.py
# +
# %%writefile taxifare/trainer/task.py
import argparse
import json
import os
from trainer import model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--batch_size",
help = "Batch size for training steps",
type = int,
default = 32
)
parser.add_argument(
"--eval_data_path",
help = "GCS location pattern of eval files",
required = True
)
parser.add_argument(
"--nnsize",
help = "Hidden layer sizes (provide space-separated sizes)",
nargs = "+",
type = int,
default=[32, 8]
)
parser.add_argument(
"--nbuckets",
help = "Number of buckets to divide lat and lon with",
type = int,
default = 10
)
parser.add_argument(
"--lr",
help = "learning rate for optimizer",
type = float,
default = 0.001
)
parser.add_argument(
"--num_evals",
help = "Number of times to evaluate model on eval data training.",
type = int,
default = 5
)
parser.add_argument(
"--num_examples_to_train_on",
help = "Number of examples to train on.",
type = int,
default = 100
)
parser.add_argument(
"--output_dir",
help = "GCS location to write checkpoints and export models",
required = True
)
parser.add_argument(
"--train_data_path",
help = "GCS location pattern of train files containing eval URLs",
required = True
)
parser.add_argument(
"--job-dir",
help = "this model ignores this field, but it is required by gcloud",
default = "junk"
)
args, _ = parser.parse_known_args()
hparams = args.__dict__
hparams["output_dir"] = os.path.join(
hparams["output_dir"],
json.loads(
os.environ.get("TF_CONFIG", "{}")
).get("task", {}).get("trial", "")
)
print("output_dir", hparams["output_dir"])
model.train_and_evaluate(hparams)
# -
# ### Create config.yaml file
#
# Specify the hyperparameter tuning configuration for your training job
# Create a HyperparameterSpec object to hold the hyperparameter tuning configuration for your training job, and add the HyperparameterSpec as the hyperparameters object in your TrainingInput object.
#
# In your HyperparameterSpec, set the hyperparameterMetricTag to a value representing your chosen metric. If you don't specify a hyperparameterMetricTag, AI Platform Training looks for a metric with the name training/hptuning/metric. The following example shows how to create a configuration for a metric named metric1:
# %%writefile hptuning_config.yaml
trainingInput:
scaleTier: BASIC
hyperparameters:
goal: MINIMIZE
maxTrials: 10
maxParallelTrials: 2
hyperparameterMetricTag: rmse # TODO
enableTrialEarlyStopping: True
params:
- parameterName: lr
# TODO
type: DOUBLE
minValue: 0.0001
maxValue: 0.1
scaleType: UNIT_LOG_SCALE
- parameterName: nbuckets
# TODO
type: INTEGER
minValue: 10
maxValue: 25
scaleType: UNIT_LINEAR_SCALE
- parameterName: batch_size
# TODO
type: DISCRETE
discreteValues:
- 15
- 30
- 50
# #### Report your hyperparameter metric to AI Platform Training
#
# The way to report your hyperparameter metric to the AI Platform Training service depends on whether you are using TensorFlow for training or not. It also depends on whether you are using a runtime version or a custom container for training.
#
# We recommend that your training code reports your hyperparameter metric to AI Platform Training frequently in order to take advantage of early stopping.
#
# TensorFlow with a runtime version
# If you use an AI Platform Training runtime version and train with TensorFlow, then you can report your hyperparameter metric to AI Platform Training by writing the metric to a TensorFlow summary. Use one of the following functions.
#
# You may need to install `cloudml-hypertune` on your machine to run this code locally.
# !sudo pip3 install cloudml-hypertune
# + language="bash"
#
# EVAL_DATA_PATH=./taxifare/tests/data/taxi-valid*
# TRAIN_DATA_PATH=./taxifare/tests/data/taxi-train*
# OUTPUT_DIR=./taxifare-model
#
# rm -rf ${OUTDIR}
# export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
#
# python3 -m trainer.task \
# --eval_data_path $EVAL_DATA_PATH \
# --output_dir $OUTPUT_DIR \
# --train_data_path $TRAIN_DATA_PATH \
# --batch_size 5 \
# --num_examples_to_train_on 100 \
# --num_evals 1 \
# --nbuckets 10 \
# --lr 0.001 \
# --nnsize 32 8
# -
# ls taxifare-model/tensorboard
# + language="bash"
#
# PROJECT_ID=$(gcloud config list project --format "value(core.project)")
# BUCKET=$PROJECT_ID
# REGION="us-central1"
# TFVERSION="2.1"
#
# # Output directory and jobID
# OUTDIR=gs://${BUCKET}/taxifare/trained_model_$(date -u +%y%m%d_%H%M%S)
# JOBID=taxifare_$(date -u +%y%m%d_%H%M%S)
# echo ${OUTDIR} ${REGION} ${JOBID}
# gsutil -m rm -rf ${OUTDIR}
#
# # Model and training hyperparameters
# BATCH_SIZE=15
# NUM_EXAMPLES_TO_TRAIN_ON=100
# NUM_EVALS=10
# NBUCKETS=10
# LR=0.001
# NNSIZE="32 8"
#
# # GCS paths
# GCS_PROJECT_PATH=gs://$BUCKET/taxifare
# DATA_PATH=$GCS_PROJECT_PATH/data
# TRAIN_DATA_PATH=$DATA_PATH/taxi-train*
# EVAL_DATA_PATH=$DATA_PATH/taxi-valid*
#
# # TODO
# gcloud ai-platform jobs submit training $JOBID \
# --module-name=trainer.task \
# --package-path=taxifare/trainer \
# --staging-bucket=gs://${BUCKET} \
# --config=hptuning_config.yaml \
# --python-version=3.7 \
# --runtime-version=${TFVERSION} \
# --region=${REGION} \
# -- \
# --eval_data_path $EVAL_DATA_PATH \
# --output_dir $OUTDIR \
# --train_data_path $TRAIN_DATA_PATH \
# --batch_size $BATCH_SIZE \
# --num_examples_to_train_on $NUM_EXAMPLES_TO_TRAIN_ON \
# --num_evals $NUM_EVALS \
# --nbuckets $NBUCKETS \
# --lr $LR \
# --nnsize $NNSIZE
# -
# Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| courses/machine_learning/deepdive2/building_production_ml_systems/solutions/2_hyperparameter_tuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# 
# # Fuzzy Grouping
#
# Unprepared data often represents the same entity with multiple values; examples include different spellings, varying capitalizations, and abbreviations. This is common when working with data gathered from multiple sources or through human input. One way to canonicalize and reconcile these variants is to use Data Prep's fuzzy_group_column (also known as "text clustering") functionality.
#
# Data Prep inspects a column to determine clusters of similar values. A new column is added in which clustered values are replaced with the canonical value of its cluster, thus significantly reducing the number of distinct values. You can control the degree of similarity required for values to be clustered together, override canonical form, and set clusters if automatic clustering did not provide the desired results.
#
# Let's explore the capabilities of `fuzzy_group_column` by first reading in a dataset and inspecting it.
import azureml.dataprep as dprep
dflow = dprep.read_json(path='../data/json.json')
dflow.head(5)
# As you can see above, the column `inspections.business.city` contains several forms of the city name "San Francisco".
# Let's add a column with values replaced by the automatically detected canonical form. To do so call fuzzy_group_column() on an existing Dataflow:
dflow_clean = dflow.fuzzy_group_column(source_column='inspections.business.city',
new_column_name='city_grouped',
similarity_threshold=0.8,
similarity_score_column_name='similarity_score')
dflow_clean.head(5)
# The arguments `source_column` and `new_column_name` are required, whereas the others are optional.
# If `similarity_threshold` is provided, it will be used to control the required similarity level for the values to be grouped together.
# If `similarity_score_column_name` is provided, a second new column will be added to show similarity score between every pair of original and canonical values.
#
# In the resulting data set, you can see that all the different variations of representing "San Francisco" in the data were normalized to the same string, "San Francisco".
#
# But what if you want more control over what gets grouped, what doesn't, and what the canonical value should be?
#
# To get more control over grouping, canonical values, and exceptions, you need to use the `FuzzyGroupBuilder` class.
# Let's see what it has to offer below:
builder = dflow.builders.fuzzy_group_column(source_column='inspections.business.city',
new_column_name='city_grouped',
similarity_threshold=0.8,
similarity_score_column_name='similarity_score')
# calling learn() to get fuzzy groups
builder.learn()
builder.groups
# Here you can see that `fuzzy_group_column` detected one group with four values that all map to "San Francisco" as the canonical value.
# You can see the effects of changing the similarity threshold next:
builder.similarity_threshold = 0.9
builder.learn()
builder.groups
# Now that you are using a similarity threshold of `0.9`, two distinct groups of values are generated.
#
# Let's tweak some of the detected groups before completing the builder and getting back the Dataflow with the resulting fuzzy grouped column.
builder.similarity_threshold = 0.8
builder.learn()
groups = builder.groups
groups
# change the canonical value for the first group
groups[0]['canonicalValue'] = 'SANFRAN'
duplicates = groups[0]['duplicates']
# remove the last duplicate value from the cluster
duplicates = duplicates[:-1]
# assign modified duplicate array back
groups[0]['duplicates'] = duplicates
# assign modified groups back to builder
builder.groups = groups
builder.groups
# Here, the canonical value is modified to be used for the single fuzzy group and removed 'S.F.' from this group's duplicates list.
#
# You can mutate the copy of the `groups` list from the builder (be careful to keep the structure of objects inside this list). After getting the desired groups in the list, you can update the builder with it.
#
# Now you can get a dataflow with the FuzzyGroup step in it.
# +
dflow_clean = builder.to_dataflow()
df = dflow_clean.to_pandas_dataframe()
df
| how-to-use-azureml/work-with-data/dataprep/how-to-guides/fuzzy-group.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!-- dom:TITLE: Data Analysis and Machine Learning: Neural networks, from the simple perceptron to deep learning and convolutional networks -->
# # Data Analysis and Machine Learning: Neural networks, from the simple perceptron to deep learning and convolutional networks
# <!-- dom:AUTHOR: <NAME> at Department of Physics, University of Oslo & Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University -->
# <!-- Author: -->
# **<NAME>**, Department of Physics, University of Oslo and Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University
#
# Date: **Mar 11, 2019**
#
# Copyright 1999-2019, <NAME>. Released under CC Attribution-NonCommercial 4.0 license
#
#
#
#
# ## Neural networks
#
# Artificial neural networks are computational systems that can learn to
# perform tasks by considering examples, generally without being
# programmed with any task-specific rules. It is supposed to mimic a
# biological system, wherein neurons interact by sending signals in the
# form of mathematical functions between layers. All layers can contain
# an arbitrary number of neurons, and each connection is represented by
# a weight variable.
#
#
# ## Artificial neurons
#
# The field of artificial neural networks has a long history of
# development, and is closely connected with the advancement of computer
# science and computers in general. A model of artificial neurons was
# first developed by McCulloch and Pitts in 1943 to study signal
# processing in the brain and has later been refined by others. The
# general idea is to mimic neural networks in the human brain, which is
# composed of billions of neurons that communicate with each other by
# sending electrical signals. Each neuron accumulates its incoming
# signals, which must exceed an activation threshold to yield an
# output. If the threshold is not overcome, the neuron remains inactive,
# i.e. has zero output.
#
# This behaviour has inspired a simple mathematical model for an artificial neuron.
# <!-- Equation labels as ordinary links -->
# <div id="artificialNeuron"></div>
#
# $$
# \begin{equation}
# y = f\left(\sum_{i=1}^n w_ix_i\right) = f(u)
# \label{artificialNeuron} \tag{1}
# \end{equation}
# $$
# Here, the output $y$ of the neuron is the value of its activation function, which have as input
# a weighted sum of signals $x_i, \dots ,x_n$ received by $n$ other neurons.
#
# Conceptually, it is helpful to divide neural networks into four
# categories:
# 1. general purpose neural networks for supervised learning,
#
# 2. neural networks designed specifically for image processing, the most prominent example of this class being Convolutional Neural Networks (CNNs),
#
# 3. neural networks for sequential data such as Recurrent Neural Networks (RNNs), and
#
# 4. neural networks for unsupervised learning such as Deep Boltzmann Machines.
#
# In natural science, DNNs and CNNs have already found numerous
# applications. In statistical physics, they have been applied to detect
# phase transitions in 2D Ising and Potts models, lattice gauge
# theories, and different phases of polymers, or solving the
# Navier-Stokes equation in weather forecasting. Deep learning has also
# found interesting applications in quantum physics. Various quantum
# phase transitions can be detected and studied using DNNs and CNNs,
# topological phases, and even non-equilibrium many-body
# localization. Representing quantum states as DNNs quantum state
# tomography are among some of the impressive achievements to reveal the
# potential of DNNs to facilitate the study of quantum systems.
#
# In quantum information theory, it has been shown that one can perform
# gate decompositions with the help of neural.
#
# The applications are not limited to the natural sciences. There is a
# plethora of applications in essentially all disciplines, from the
# humanities to life science and medicine.
#
# ## Neural network types
#
# An artificial neural network (ANN), is a computational model that
# consists of layers of connected neurons, or nodes or units. We will
# refer to these interchangeably as units or nodes, and sometimes as
# neurons.
#
# It is supposed to mimic a biological nervous system by letting each
# neuron interact with other neurons by sending signals in the form of
# mathematical functions between layers. A wide variety of different
# ANNs have been developed, but most of them consist of an input layer,
# an output layer and eventual layers in-between, called *hidden
# layers*. All layers can contain an arbitrary number of nodes, and each
# connection between two nodes is associated with a weight variable.
#
# Neural networks (also called neural nets) are neural-inspired
# nonlinear models for supervised learning. As we will see, neural nets
# can be viewed as natural, more powerful extensions of supervised
# learning methods such as linear and logistic regression and soft-max
# methods we discussed earlier.
#
#
# ## Feed-forward neural networks
#
# The feed-forward neural network (FFNN) was the first and simplest type
# of ANNs that were devised. In this network, the information moves in
# only one direction: forward through the layers.
#
# Nodes are represented by circles, while the arrows display the
# connections between the nodes, including the direction of information
# flow. Additionally, each arrow corresponds to a weight variable
# (figure to come). We observe that each node in a layer is connected
# to *all* nodes in the subsequent layer, making this a so-called
# *fully-connected* FFNN.
#
#
#
# ## Convolutional Neural Network
#
# A different variant of FFNNs are *convolutional neural networks*
# (CNNs), which have a connectivity pattern inspired by the animal
# visual cortex. Individual neurons in the visual cortex only respond to
# stimuli from small sub-regions of the visual field, called a receptive
# field. This makes the neurons well-suited to exploit the strong
# spatially local correlation present in natural images. The response of
# each neuron can be approximated mathematically as a convolution
# operation. (figure to come)
#
# Convolutional neural networks emulate the behaviour of neurons in the
# visual cortex by enforcing a *local* connectivity pattern between
# nodes of adjacent layers: Each node in a convolutional layer is
# connected only to a subset of the nodes in the previous layer, in
# contrast to the fully-connected FFNN. Often, CNNs consist of several
# convolutional layers that learn local features of the input, with a
# fully-connected layer at the end, which gathers all the local data and
# produces the outputs. They have wide applications in image and video
# recognition.
#
# ## Recurrent neural networks
#
# So far we have only mentioned ANNs where information flows in one
# direction: forward. *Recurrent neural networks* on the other hand,
# have connections between nodes that form directed *cycles*. This
# creates a form of internal memory which are able to capture
# information on what has been calculated before; the output is
# dependent on the previous computations. Recurrent NNs make use of
# sequential information by performing the same task for every element
# in a sequence, where each element depends on previous elements. An
# example of such information is sentences, making recurrent NNs
# especially well-suited for handwriting and speech recognition.
#
# ## Other types of networks
#
# There are many other kinds of ANNs that have been developed. One type
# that is specifically designed for interpolation in multidimensional
# space is the radial basis function (RBF) network. RBFs are typically
# made up of three layers: an input layer, a hidden layer with
# non-linear radial symmetric activation functions and a linear output
# layer (''linear'' here means that each node in the output layer has a
# linear activation function). The layers are normally fully-connected
# and there are no cycles, thus RBFs can be viewed as a type of
# fully-connected FFNN. They are however usually treated as a separate
# type of NN due the unusual activation functions.
#
# ## Multilayer perceptrons
#
# One uses often so-called fully-connected feed-forward neural networks
# with three or more layers (an input layer, one or more hidden layers
# and an output layer) consisting of neurons that have non-linear
# activation functions.
#
# Such networks are often called *multilayer perceptrons* (MLPs).
#
# ## Why multilayer perceptrons?
#
# According to the *Universal approximation theorem*, a feed-forward
# neural network with just a single hidden layer containing a finite
# number of neurons can approximate a continuous multidimensional
# function to arbitrary accuracy, assuming the activation function for
# the hidden layer is a **non-constant, bounded and
# monotonically-increasing continuous function**.
#
# Note that the requirements on the activation function only applies to
# the hidden layer, the output nodes are always assumed to be linear, so
# as to not restrict the range of output values.
#
#
# ## Mathematical model
#
# The output $y$ is produced via the activation function $f$
# $$
# y = f\left(\sum_{i=1}^n w_ix_i + b_i\right) = f(z),
# $$
# This function receives $x_i$ as inputs.
# Here the activation $z=(\sum_{i=1}^n w_ix_i+b_i)$.
# In an FFNN of such neurons, the *inputs* $x_i$ are the *outputs* of
# the neurons in the preceding layer. Furthermore, an MLP is
# fully-connected, which means that each neuron receives a weighted sum
# of the outputs of *all* neurons in the previous layer.
#
# ## Mathematical model
#
# First, for each node $i$ in the first hidden layer, we calculate a weighted sum $z_i^1$ of the input coordinates $x_j$,
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation} z_i^1 = \sum_{j=1}^{M} w_{ij}^1 x_j + b_i^1
# \label{_auto1} \tag{2}
# \end{equation}
# $$
# Here $b_i$ is the so-called bias which is normally needed in
# case of zero activation weights or inputs. How to fix the biases and
# the weights will be discussed below. The value of $z_i^1$ is the
# argument to the activation function $f_i$ of each node $i$, The
# variable $M$ stands for all possible inputs to a given node $i$ in the
# first layer. We define the output $y_i^1$ of all neurons in layer 1 as
# <!-- Equation labels as ordinary links -->
# <div id="outputLayer1"></div>
#
# $$
# \begin{equation}
# y_i^1 = f(z_i^1) = f\left(\sum_{j=1}^M w_{ij}^1 x_j + b_i^1\right)
# \label{outputLayer1} \tag{3}
# \end{equation}
# $$
# where we assume that all nodes in the same layer have identical
# activation functions, hence the notation $f$. In general, we could assume in the more general case that different layers have different activation functions.
# In this case we would identify these functions with a superscript $l$ for the $l$-th layer,
# <!-- Equation labels as ordinary links -->
# <div id="generalLayer"></div>
#
# $$
# \begin{equation}
# y_i^l = f^l(u_i^l) = f^l\left(\sum_{j=1}^{N_{l-1}} w_{ij}^l y_j^{l-1} + b_i^l\right)
# \label{generalLayer} \tag{4}
# \end{equation}
# $$
# where $N_l$ is the number of nodes in layer $l$. When the output of
# all the nodes in the first hidden layer are computed, the values of
# the subsequent layer can be calculated and so forth until the output
# is obtained.
#
#
#
# ## Mathematical model
#
# The output of neuron $i$ in layer 2 is thus,
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# y_i^2 = f^2\left(\sum_{j=1}^N w_{ij}^2 y_j^1 + b_i^2\right)
# \label{_auto2} \tag{5}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="outputLayer2"></div>
#
# $$
# \begin{equation}
# = f^2\left[\sum_{j=1}^N w_{ij}^2f^1\left(\sum_{k=1}^M w_{jk}^1 x_k + b_j^1\right) + b_i^2\right]
# \label{outputLayer2} \tag{6}
# \end{equation}
# $$
# where we have substituted $y_k^1$ with the inputs $x_k$. Finally, the ANN output reads
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# y_i^3 = f^3\left(\sum_{j=1}^N w_{ij}^3 y_j^2 + b_i^3\right)
# \label{_auto3} \tag{7}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto4"></div>
#
# $$
# \begin{equation}
# = f_3\left[\sum_{j} w_{ij}^3 f^2\left(\sum_{k} w_{jk}^2 f^1\left(\sum_{m} w_{km}^1 x_m + b_k^1\right) + b_j^2\right)
# + b_1^3\right]
# \label{_auto4} \tag{8}
# \end{equation}
# $$
# ## Mathematical model
#
# We can generalize this expression to an MLP with $l$ hidden
# layers. The complete functional form is,
# <!-- Equation labels as ordinary links -->
# <div id="completeNN"></div>
#
# $$
# \begin{equation}
# y^{l+1}_i = f^{l+1}\left[\!\sum_{j=1}^{N_l} w_{ij}^3 f^l\left(\sum_{k=1}^{N_{l-1}}w_{jk}^{l-1}\left(\dots f^1\left(\sum_{n=1}^{N_0} w_{mn}^1 x_n+ b_m^1\right)\dots\right)+b_k^2\right)+b_1^3\right]
# \label{completeNN} \tag{9}
# \end{equation}
# $$
# which illustrates a basic property of MLPs: The only independent
# variables are the input values $x_n$.
#
# ## Mathematical model
#
# This confirms that an MLP, despite its quite convoluted mathematical
# form, is nothing more than an analytic function, specifically a
# mapping of real-valued vectors $\hat{x} \in \mathbb{R}^n \rightarrow
# \hat{y} \in \mathbb{R}^m$.
#
# Furthermore, the flexibility and universality of an MLP can be
# illustrated by realizing that the expression is essentially a nested
# sum of scaled activation functions of the form
# <!-- Equation labels as ordinary links -->
# <div id="_auto5"></div>
#
# $$
# \begin{equation}
# f(x) = c_1 f(c_2 x + c_3) + c_4
# \label{_auto5} \tag{10}
# \end{equation}
# $$
# where the parameters $c_i$ are weights and biases. By adjusting these
# parameters, the activation functions can be shifted up and down or
# left and right, change slope or be rescaled which is the key to the
# flexibility of a neural network.
#
# ### Matrix-vector notation
#
# We can introduce a more convenient notation for the activations in an A NN.
#
# Additionally, we can represent the biases and activations
# as layer-wise column vectors $\hat{b}_l$ and $\hat{y}_l$, so that the $i$-th element of each vector
# is the bias $b_i^l$ and activation $y_i^l$ of node $i$ in layer $l$ respectively.
#
# We have that $\mathrm{W}_l$ is an $N_{l-1} \times N_l$ matrix, while $\hat{b}_l$ and $\hat{y}_l$ are $N_l \times 1$ column vectors.
# With this notation, the sum becomes a matrix-vector multiplication, and we can write
# the equation for the activations of hidden layer 2 (assuming three nodes for simplicity) as
# <!-- Equation labels as ordinary links -->
# <div id="_auto6"></div>
#
# $$
# \begin{equation}
# \hat{y}_2 = f_2(\mathrm{W}_2 \hat{y}_{1} + \hat{b}_{2}) =
# f_2\left(\left[\begin{array}{ccc}
# w^2_{11} &w^2_{12} &w^2_{13} \\
# w^2_{21} &w^2_{22} &w^2_{23} \\
# w^2_{31} &w^2_{32} &w^2_{33} \\
# \end{array} \right] \cdot
# \left[\begin{array}{c}
# y^1_1 \\
# y^1_2 \\
# y^1_3 \\
# \end{array}\right] +
# \left[\begin{array}{c}
# b^2_1 \\
# b^2_2 \\
# b^2_3 \\
# \end{array}\right]\right).
# \label{_auto6} \tag{11}
# \end{equation}
# $$
# ### Matrix-vector notation and activation
#
# The activation of node $i$ in layer 2 is
# <!-- Equation labels as ordinary links -->
# <div id="_auto7"></div>
#
# $$
# \begin{equation}
# y^2_i = f_2\Bigr(w^2_{i1}y^1_1 + w^2_{i2}y^1_2 + w^2_{i3}y^1_3 + b^2_i\Bigr) =
# f_2\left(\sum_{j=1}^3 w^2_{ij} y_j^1 + b^2_i\right).
# \label{_auto7} \tag{12}
# \end{equation}
# $$
# This is not just a convenient and compact notation, but also a useful
# and intuitive way to think about MLPs: The output is calculated by a
# series of matrix-vector multiplications and vector additions that are
# used as input to the activation functions. For each operation
# $\mathrm{W}_l \hat{y}_{l-1}$ we move forward one layer.
#
#
# ### Activation functions
#
# A property that characterizes a neural network, other than its
# connectivity, is the choice of activation function(s). As described
# in, the following restrictions are imposed on an activation function
# for a FFNN to fulfill the universal approximation theorem
#
# * Non-constant
#
# * Bounded
#
# * Monotonically-increasing
#
# * Continuous
#
# ### Activation functions, Logistic and Hyperbolic ones
#
# The second requirement excludes all linear functions. Furthermore, in
# a MLP with only linear activation functions, each layer simply
# performs a linear transformation of its inputs.
#
# Regardless of the number of layers, the output of the NN will be
# nothing but a linear function of the inputs. Thus we need to introduce
# some kind of non-linearity to the NN to be able to fit non-linear
# functions Typical examples are the logistic *Sigmoid*
# $$
# f(x) = \frac{1}{1 + e^{-x}},
# $$
# and the *hyperbolic tangent* function
# $$
# f(x) = \tanh(x)
# $$
# ### Relevance
#
# The *sigmoid* function are more biologically plausible because the
# output of inactive neurons are zero. Such activation function are
# called *one-sided*. However, it has been shown that the hyperbolic
# tangent performs better than the sigmoid for training MLPs. has
# become the most popular for *deep neural networks*
# +
# %matplotlib inline
"""The sigmoid function (or the logistic curve) is a
function that takes any real number, z, and outputs a number (0,1).
It is useful in neural networks for assigning weights on a relative scale.
The value z is the weighted sum of parameters involved in the learning algorithm."""
import numpy
import matplotlib.pyplot as plt
import math as mt
z = numpy.arange(-5, 5, .1)
sigma_fn = numpy.vectorize(lambda z: 1/(1+numpy.exp(-z)))
sigma = sigma_fn(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, sigma)
ax.set_ylim([-0.1, 1.1])
ax.set_xlim([-5,5])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('sigmoid function')
plt.show()
"""Step Function"""
z = numpy.arange(-5, 5, .02)
step_fn = numpy.vectorize(lambda z: 1.0 if z >= 0.0 else 0.0)
step = step_fn(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, step)
ax.set_ylim([-0.5, 1.5])
ax.set_xlim([-5,5])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('step function')
plt.show()
"""Sine Function"""
z = numpy.arange(-2*mt.pi, 2*mt.pi, 0.1)
t = numpy.sin(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, t)
ax.set_ylim([-1.0, 1.0])
ax.set_xlim([-2*mt.pi,2*mt.pi])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('sine function')
plt.show()
"""Plots a graph of the squashing function used by a rectified linear
unit"""
z = numpy.arange(-2, 2, .1)
zero = numpy.zeros(len(z))
y = numpy.max([zero, z], axis=0)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, y)
ax.set_ylim([-2.0, 2.0])
ax.set_xlim([-2.0, 2.0])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('Rectified linear unit')
plt.show()
# -
# ## The multilayer perceptron (MLP)
#
# The multilayer perceptron is a very popular, and easy to implement approach, to deep learning. It consists of
# 1. A neural network with one or more layers of nodes between the input and the output nodes.
#
# 2. The multilayer network structure, or architecture, or topology, consists of an input layer, one or more hidden layers, and one output layer.
#
# 3. The input nodes pass values to the first hidden layer, its nodes pass the information on to the second and so on till we reach the output layer.
#
# As a convention it is normal to call a network with one layer of input units, one layer of hidden
# units and one layer of output units as a two-layer network. A network with two layers of hidden units is called a three-layer network etc etc.
#
# For an MLP network there is no direct connection between the output nodes/neurons/units and the input nodes/neurons/units.
# Hereafter we will call the various entities of a layer for nodes.
# There are also no connections within a single layer.
#
# The number of input nodes does not need to equal the number of output
# nodes. This applies also to the hidden layers. Each layer may have its
# own number of nodes and activation functions.
#
# The hidden layers have their name from the fact that they are not
# linked to observables and as we will see below when we define the
# so-called activation $\hat{z}$, we can think of this as a basis
# expansion of the original inputs $\hat{x}$. The difference however
# between neural networks and say linear regression is that now these
# basis functions (which will correspond to the weights in the network)
# are learned from data. This results in an important difference between
# neural networks and deep learning approaches on one side and methods
# like logistic regression or linear regression and their modifications on the other side.
#
#
# ## From one to many layers, the universal approximation theorem
#
#
# A neural network with only one layer, what we called the simple
# perceptron, is best suited if we have a standard binary model with
# clear (linear) boundaries between the outcomes. As such it could
# equally well be replaced by standard linear regression or logistic
# regression. Networks with one or more hidden layers approximate
# systems with more complex boundaries.
#
# As stated earlier,
# an important theorem in studies of neural networks, restated without
# proof here, is the [universal approximation
# theorem](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.441.7873&rep=rep1&type=pdf).
#
# It states that a feed-forward network with a single hidden layer
# containing a finite number of neurons can approximate continuous
# functions on compact subsets of real functions. The theorem thus
# states that simple neural networks can represent a wide variety of
# interesting functions when given appropriate parameters. It is the
# multilayer feedforward architecture itself which gives neural networks
# the potential of being universal approximators.
#
#
# ## Deriving the back propagation code for a multilayer perceptron model
#
#
# **Note: figures will be inserted later!**
#
# As we have seen now in a feed forward network, we can express the final output of our network in terms of basic matrix-vector multiplications.
# The unknowwn quantities are our weights $w_{ij}$ and we need to find an algorithm for changing them so that our errors are as small as possible.
# This leads us to the famous [back propagation algorithm](https://www.nature.com/articles/323533a0).
#
# The questions we want to ask are how do changes in the biases and the
# weights in our network change the cost function and how can we use the
# final output to modify the weights?
#
# To derive these equations let us start with a plain regression problem
# and define our cost function as
# $$
# {\cal C}(\hat{W}) = \frac{1}{2}\sum_{i=1}^n\left(y_i - t_i\right)^2,
# $$
# where the $t_i$s are our $n$ targets (the values we want to
# reproduce), while the outputs of the network after having propagated
# all inputs $\hat{x}$ are given by $y_i$. Below we will demonstrate
# how the basic equations arising from the back propagation algorithm
# can be modified in order to study classification problems with $K$
# classes.
#
# ## Definitions
#
# With our definition of the targets $\hat{t}$, the outputs of the
# network $\hat{y}$ and the inputs $\hat{x}$ we
# define now the activation $z_j^l$ of node/neuron/unit $j$ of the
# $l$-th layer as a function of the bias, the weights which add up from
# the previous layer $l-1$ and the forward passes/outputs
# $\hat{a}^{l-1}$ from the previous layer as
# $$
# z_j^l = \sum_{i=1}^{M_{l-1}}w_{ij}^la_i^{l-1}+b_j^l,
# $$
# where $b_k^l$ are the biases from layer $l$. Here $M_{l-1}$
# represents the total number of nodes/neurons/units of layer $l-1$. The
# figure here illustrates this equation. We can rewrite this in a more
# compact form as the matrix-vector products we discussed earlier,
# $$
# \hat{z}^l = \left(\hat{W}^l\right)^T\hat{a}^{l-1}+\hat{b}^l.
# $$
# With the activation values $\hat{z}^l$ we can in turn define the
# output of layer $l$ as $\hat{a}^l = f(\hat{z}^l)$ where $f$ is our
# activation function. In the examples here we will use the sigmoid
# function discussed in our logistic regression lectures. We will also use the same activation function $f$ for all layers
# and their nodes. It means we have
# $$
# a_j^l = f(z_j^l) = \frac{1}{1+\exp{-(z_j^l)}}.
# $$
# ## Derivatives and the chain rule
#
# From the definition of the activation $z_j^l$ we have
# $$
# \frac{\partial z_j^l}{\partial w_{ij}^l} = a_i^{l-1},
# $$
# and
# $$
# \frac{\partial z_j^l}{\partial a_i^{l-1}} = w_{ji}^l.
# $$
# With our definition of the activation function we have that (note that this function depends only on $z_j^l$)
# $$
# \frac{\partial a_j^l}{\partial z_j^{l}} = a_j^l(1-a_j^l)=f(z_j^l)(1-f(z_j^l)).
# $$
# ## Derivative of the cost function
#
# With these definitions we can now compute the derivative of the cost function in terms of the weights.
#
# Let us specialize to the output layer $l=L$. Our cost function is
# $$
# {\cal C}(\hat{W^L}) = \frac{1}{2}\sum_{i=1}^n\left(y_i - t_i\right)^2=\frac{1}{2}\sum_{i=1}^n\left(a_i^L - t_i\right)^2,
# $$
# The derivative of this function with respect to the weights is
# $$
# \frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \left(a_j^L - t_j\right)\frac{\partial a_j^L}{\partial w_{jk}^{L}},
# $$
# The last partial derivative can easily be computed and reads (by applying the chain rule)
# $$
# \frac{\partial a_j^L}{\partial w_{jk}^{L}} = \frac{\partial a_j^L}{\partial z_{j}^{L}}\frac{\partial z_j^L}{\partial w_{jk}^{L}}=a_j^L(1-a_j^L)a_k^{L-1},
# $$
# ## Bringing it together, first back propagation equation
#
# We have thus
# $$
# \frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \left(a_j^L - t_j\right)a_j^L(1-a_j^L)a_k^{L-1},
# $$
# Defining
# $$
# \delta_j^L = a_j^L(1-a_j^L)\left(a_j^L - t_j\right) = f'(z_j^L)\frac{\partial {\cal C}}{\partial (a_j^L)},
# $$
# and using the Hadamard product of two vectors we can write this as
# $$
# \hat{\delta}^L = f'(\hat{z}^L)\circ\frac{\partial {\cal C}}{\partial (\hat{a}^L)}.
# $$
# This is an important expression. The second term on the right handside
# measures how fast the cost function is changing as a function of the $j$th
# output activation. If, for example, the cost function doesn't depend
# much on a particular output node $j$, then $\delta_j^L$ will be small,
# which is what we would expect. The first term on the right, measures
# how fast the activation function $f$ is changing at a given activation
# value $z_j^L$.
#
# Notice that everything in the above equations is easily computed. In
# particular, we compute $z_j^L$ while computing the behaviour of the
# network, and it is only a small additional overhead to compute
# $f'(z^L_j)$. The exact form of the derivative with respect to the
# output depends on the form of the cost function.
# However, provided the cost function is known there should be little
# trouble in calculating
# $$
# \frac{\partial {\cal C}}{\partial (a_j^L)}
# $$
# With the definition of $\delta_j^L$ we have a more compact definition of the derivative of the cost function in terms of the weights, namely
# $$
# \frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \delta_j^La_k^{L-1}.
# $$
# ## Derivatives in terms of $z_j^L$
#
# It is also easy to see that our previous equation can be written as
# $$
# \delta_j^L =\frac{\partial {\cal C}}{\partial z_j^L}= \frac{\partial {\cal C}}{\partial a_j^L}\frac{\partial a_j^L}{\partial z_j^L},
# $$
# which can also be interpreted as the partial derivative of the cost function with respect to the biases $b_j^L$, namely
# $$
# \delta_j^L = \frac{\partial {\cal C}}{\partial b_j^L}\frac{\partial b_j^L}{\partial z_j^L}=\frac{\partial {\cal C}}{\partial b_j^L},
# $$
# That is, the error $\delta_j^L$ is exactly equal to the rate of change of the cost function as a function of the bias.
# ## Bringing it together
#
# We have now three equations that are essential for the computations of the derivatives of the cost function at the output layer. These equations are needed to start the algorithm and they are
#
# **The starting equations.**
# <!-- Equation labels as ordinary links -->
# <div id="_auto8"></div>
#
# $$
# \begin{equation}
# \frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \delta_j^La_k^{L-1},
# \label{_auto8} \tag{13}
# \end{equation}
# $$
# and
# <!-- Equation labels as ordinary links -->
# <div id="_auto9"></div>
#
# $$
# \begin{equation}
# \delta_j^L = f'(z_j^L)\frac{\partial {\cal C}}{\partial (a_j^L)},
# \label{_auto9} \tag{14}
# \end{equation}
# $$
# and
# <!-- Equation labels as ordinary links -->
# <div id="_auto10"></div>
#
# $$
# \begin{equation}
# \delta_j^L = \frac{\partial {\cal C}}{\partial b_j^L},
# \label{_auto10} \tag{15}
# \end{equation}
# $$
# An interesting consequence of the above equations is that when the
# activation $a_k^{L-1}$ is small, the gradient term, that is the
# derivative of the cost function with respect to the weights, will also
# tend to be small. We say then that the weight learns slowly, meaning
# that it changes slowly when we minimize the weights via say gradient
# descent. In this case we say the system learns slowly.
#
# Another interesting feature is that is when the activation function,
# represented by the sigmoid function here, is rather flat when we move towards
# its end values $0$ and $1$ (see the above Python codes). In these
# cases, the derivatives of the activation function will also be close
# to zero, meaning again that the gradients will be small and the
# network learns slowly again.
#
#
#
# We need a fourth equation and we are set. We are going to propagate
# backwards in order to the determine the weights and biases. In order
# to do so we need to represent the error in the layer before the final
# one $L-1$ in terms of the errors in the final output layer.
#
# ## Final back propagating equation
#
# We have that (replacing $L$ with a general layer $l$)
# $$
# \delta_j^l =\frac{\partial {\cal C}}{\partial z_j^l}.
# $$
# We want to express this in terms of the equations for layer $l+1$. Using the chain rule and summing over all $k$ entries we have
# $$
# \delta_j^l =\sum_k \frac{\partial {\cal C}}{\partial z_k^{l+1}}\frac{\partial z_k^{l+1}}{\partial z_j^{l}}=\sum_k \delta_k^{l+1}\frac{\partial z_k^{l+1}}{\partial z_j^{l}},
# $$
# and recalling that
# $$
# z_j^{l+1} = \sum_{i=1}^{M_{l}}w_{ij}^{l+1}a_j^{l}+b_j^{l+1},
# $$
# with $M_l$ being the number of nodes in layer $l$, we obtain
# $$
# \delta_j^l =\sum_k \delta_k^{l+1}w_{kj}^{l+1}f'(z_j^l),
# $$
# This is our final equation.
#
# We are now ready to set up the algorithm for back propagation and learning the weights and biases.
#
# ## Setting up the Back propagation algorithm
#
#
#
# The four equations provide us with a way of computing the gradient of the cost function. Let us write this out in the form of an algorithm.
#
# First, we set up the input data $\hat{x}$ and the activations
# $\hat{z}_1$ of the input layer and compute the activation function and
# the pertinent outputs $\hat{a}^1$.
#
#
#
# Secondly, we perform then the feed forward till we reach the output
# layer and compute all $\hat{z}_l$ of the input layer and compute the
# activation function and the pertinent outputs $\hat{a}^l$ for
# $l=2,3,\dots,L$.
#
#
#
# Thereafter we compute the ouput error $\hat{\delta}^L$ by computing all
# $$
# \delta_j^L = f'(z_j^L)\frac{\partial {\cal C}}{\partial (a_j^L)}.
# $$
# Then we compute the back propagate error for each $l=L-1,L-2,\dots,2$ as
# $$
# \delta_j^l = \sum_k \delta_k^{l+1}w_{kj}^{l+1}f'(z_j^l).
# $$
# Finally, we update the weights and the biases using gradient descent for each $l=L-1,L-2,\dots,2$ and update the weights and biases according to the rules
# $$
# w_{jk}^l\leftarrow = w_{jk}^l- \eta \delta_j^la_k^{l-1},
# $$
# $$
# b_j^l \leftarrow b_j^l-\eta \frac{\partial {\cal C}}{\partial b_j^l}=b_j^l-\eta \delta_j^l,
# $$
# The parameter $\eta$ is the learning parameter discussed in connection with the gradient descent methods.
# Here it is convenient to use stochastic gradient descent (see the examples below) with mini-batches with an outer loop that steps through multiple epochs of training.
#
#
# <!-- !split -->
# ## Setting up a Multi-layer perceptron model for classification
#
# We are now gong to develop an example based on the MNIST data
# base. This is a classification problem and we need to use our
# cross-entropy function we discussed in connection with logistic
# regression. The cross-entropy defines our cost function for the
# classificaton problems with neural networks.
#
# In binary classification with two classes $(0, 1)$ we define the
# logistic/sigmoid function as the probability that a particular input
# is in class $0$ or $1$. This is possible because the logistic
# function takes any input from the real numbers and inputs a number
# between 0 and 1, and can therefore be interpreted as a probability. It
# also has other nice properties, such as a derivative that is simple to
# calculate.
#
# For an input $\boldsymbol{a}$ from the hidden layer, the probability that the input $\boldsymbol{x}$
# is in class 0 or 1 is just. We let $\theta$ represent the unknown weights and biases to be adjusted by our equations). The variable $x$
# represents our activation values $z$. We have
# $$
# P(y = 0 \mid \hat{x}, \hat{\theta}) = \frac{1}{1 + \exp{(- \hat{x}})} ,
# $$
# and
# $$
# P(y = 1 \mid \hat{x}, \hat{\theta}) = 1 - P(y = 0 \mid \hat{x}, \hat{\theta}) ,
# $$
# where $y \in \{0, 1\}$ and $\hat{\theta}$ represents the weights and biases
# of our network.
#
#
# ## Defining the cost function
#
# Our cost function is given as (see the Logistic regression lectures)
# $$
# \mathcal{C}(\hat{\theta}) = - \ln P(\mathcal{D} \mid \hat{\theta}) = - \sum_{i=1}^n
# y_i \ln[P(y_i = 0)] + (1 - y_i) \ln [1 - P(y_i = 0)] = \sum_{i=1}^n \mathcal{L}_i(\hat{\theta}) .
# $$
# This last equality means that we can interpret our *cost* function as a sum over the *loss* function
# for each point in the dataset $\mathcal{L}_i(\hat{\theta})$.
# The negative sign is just so that we can think about our algorithm as minimizing a positive number, rather
# than maximizing a negative number.
#
# In *multiclass* classification it is common to treat each integer label as a so called *one-hot* vector:
#
# $y = 5 \quad \rightarrow \quad \hat{y} = (0, 0, 0, 0, 0, 1, 0, 0, 0, 0) ,$ and
#
#
# $y = 1 \quad \rightarrow \quad \hat{y} = (0, 1, 0, 0, 0, 0, 0, 0, 0, 0) ,$
#
#
# i.e. a binary bit string of length $C$, where $C = 10$ is the number of classes in the MNIST dataset (numbers from $0$ to $9$)..
#
# If $\hat{x}_i$ is the $i$-th input (image), $y_{ic}$ refers to the $c$-th component of the $i$-th
# output vector $\hat{y}_i$.
# The probability of $\hat{x}_i$ being in class $c$ will be given by the softmax function:
# $$
# P(y_{ic} = 1 \mid \hat{x}_i, \hat{\theta}) = \frac{\exp{((\hat{a}_i^{hidden})^T \hat{w}_c)}}
# {\sum_{c'=0}^{C-1} \exp{((\hat{a}_i^{hidden})^T \hat{w}_{c'})}} ,
# $$
# which reduces to the logistic function in the binary case.
# The likelihood of this $C$-class classifier
# is now given as:
# $$
# P(\mathcal{D} \mid \hat{\theta}) = \prod_{i=1}^n \prod_{c=0}^{C-1} [P(y_{ic} = 1)]^{y_{ic}} .
# $$
# Again we take the negative log-likelihood to define our cost function:
# $$
# \mathcal{C}(\hat{\theta}) = - \log{P(\mathcal{D} \mid \hat{\theta})}.
# $$
# See the logistic regression lectures for a full definition of the cost function.
#
# The back propagation equations need now only a small change, namely the definition of a new cost function. We are thus ready to use the same equations as before!
#
# ## Example: binary classification problem
#
# As an example of the above, relevant for project 2 as well, let us consider a binary class. As discussed in our logistic regression lectures, we defined a cost function in terms of the parameters $\beta$ as
# $$
# \mathcal{C}(\hat{\beta}) = - \sum_{i=1}^n \left(y_i\log{p(y_i \vert x_i,\hat{\beta})}+(i-y_i)\log{1-p(y_i \vert x_i,\hat{\beta})}\right),
# $$
# where we had defined the logistic (sigmoid) function
# $$
# p(y_i =1\vert x_i,\hat{\beta})=\frac{\exp{(\beta_0+\beta_1 x_i)}}{1+\exp{(\beta_0+\beta_1 x_i)}},
# $$
# and
# $$
# p(y_i =0\vert x_i,\hat{\beta})=1-p(y_i =1\vert x_i,\hat{\beta}).
# $$
# The parameters $\hat{\beta}$ were defined using a minimization method like gradient descent or Newton-Raphson's method.
#
# Now we replace $x_i$ with the activation $z_i^l$ for a given layer $l$ and the outputs as $y_i=a_i^l=f(z_i^l)$, with $z_i^l$ now being a function of the weights $w_{ij}^l$ and biases $b_i^l$.
# We have then
# $$
# a_i^l = y_i = \frac{\exp{(z_i^l)}}{1+\exp{(z_i^l)}},
# $$
# with
# $$
# z_i^l = \sum_{j}w_{ij}^l a_j^{l-1}+b_i^l,
# $$
# where the superscript $l-1$ indicates that these are the outputs from layer $l-1$.
# Our cost function at the final layer $l=L$ is now
# $$
# \mathcal{C}(\hat{W}) = - \sum_{i=1}^n \left(t_i\log{a_i^L}+(1-t_i)\log{(1-a_i^L)}\right),
# $$
# where we have defined the targets $t_i$. The derivatives of the cost function with respect to the output $a_i^L$ are then easily calculated and we get
# $$
# \frac{\partial \mathcal{C}(\hat{W})}{\partial a_i^L} = \frac{a_i^L-t_i}{a_i^L(1-a_i^L)}.
# $$
# In case we use another activation function than the logistic one, we need to evaluate other derivatives.
#
#
# ## The Softmax function
# In case we employ the more general case given by the Softmax equation, we need to evaluate the derivative of the activation function with respect to the activation $z_i^l$, that is we need
# $$
# \frac{\partial f(z_i^l)}{\partial w_{jk}^l} =
# \frac{\partial f(z_i^l)}{\partial z_j^l} \frac{\partial z_j^l}{\partial w_{jk}^l}= \frac{\partial f(z_i^l)}{\partial z_j^l}a_k^{l-1}.
# $$
# For the Softmax function we have
# $$
# f(z_i^l) = \frac{\exp{(z_i^l)}}{\sum_{m=1}^K\exp{(z_m^l)}}.
# $$
# Its derivative with respect to $z_j^l$ gives
# $$
# \frac{\partial f(z_i^l)}{\partial z_j^l}= f(z_i^l)\left(\delta_{ij}-f(z_j^l)\right),
# $$
# which in case of the simply binary model reduces to having $i=j$.
#
# <!-- !split -->
# ## Developing a code for doing neural networks with back propagation
#
#
# One can identify a set of key steps when using neural networks to solve supervised learning problems:
#
# 1. Collect and pre-process data
#
# 2. Define model and architecture
#
# 3. Choose cost function and optimizer
#
# 4. Train the model
#
# 5. Evaluate model performance on test data
#
# 6. Adjust hyperparameters (if necessary, network architecture)
#
# ## Collect and pre-process data
#
# Here we will be using the MNIST dataset, which is readily available through the **scikit-learn**
# package. You may also find it for example [here](http://yann.lecun.com/exdb/mnist/).
# The *MNIST* (Modified National Institute of Standards and Technology) database is a large database
# of handwritten digits that is commonly used for training various image processing systems.
# The MNIST dataset consists of 70 000 images of size $28\times 28$ pixels, each labeled from 0 to 9.
# The scikit-learn dataset we will use consists of a selection of 1797 images of size $8\times 8$ collected and processed from this database.
#
# To feed data into a feed-forward neural network we need to represent
# the inputs as a feature matrix $X = (n_{inputs}, n_{features})$. Each
# row represents an *input*, in this case a handwritten digit, and
# each column represents a *feature*, in this case a pixel. The
# correct answers, also known as *labels* or *targets* are
# represented as a 1D array of integers
# $Y = (n_{inputs}) = (5, 3, 1, 8,...)$.
#
# As an example, say we want to build a neural network using supervised learning to predict Body-Mass Index (BMI) from
# measurements of height (in m)
# and weight (in kg). If we have measurements of 5 people the feature matrix could be for example:
#
# $$ X = \begin{bmatrix}
# 1.85 & 81\\
# 1.71 & 65\\
# 1.95 & 103\\
# 1.55 & 42\\
# 1.63 & 56
# \end{bmatrix} ,$$
#
# and the targets would be:
#
# $$ Y = (23.7, 22.2, 27.1, 17.5, 21.1) $$
#
# Since each input image is a 2D matrix, we need to flatten the image
# (i.e. "unravel" the 2D matrix into a 1D array) to turn the data into a
# feature matrix. This means we lose all spatial information in the
# image, such as locality and translational invariance. More complicated
# architectures such as Convolutional Neural Networks can take advantage
# of such information, and are most commonly applied when analyzing
# images.
# +
# import necessary packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
# ensure the same random numbers appear every time
np.random.seed(0)
# display images in notebook
# %matplotlib inline
plt.rcParams['figure.figsize'] = (12,12)
# download MNIST dataset
digits = datasets.load_digits()
# define inputs and labels
inputs = digits.images
labels = digits.target
print("inputs = (n_inputs, pixel_width, pixel_height) = " + str(inputs.shape))
print("labels = (n_inputs) = " + str(labels.shape))
# flatten the image
# the value -1 means dimension is inferred from the remaining dimensions: 8x8 = 64
n_inputs = len(inputs)
inputs = inputs.reshape(n_inputs, -1)
print("X = (n_inputs, n_features) = " + str(inputs.shape))
# choose some random images to display
indices = np.arange(n_inputs)
random_indices = np.random.choice(indices, size=5)
for i, image in enumerate(digits.images[random_indices]):
plt.subplot(1, 5, i+1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title("Label: %d" % digits.target[random_indices[i]])
plt.show()
# -
# ## Train and test datasets
#
# Performing analysis before partitioning the dataset is a major error, that can lead to incorrect conclusions.
#
# We will reserve $80 \%$ of our dataset for training and $20 \%$ for testing.
#
# It is important that the train and test datasets are drawn randomly from our dataset, to ensure
# no bias in the sampling.
# Say you are taking measurements of weather data to predict the weather in the coming 5 days.
# You don't want to train your model on measurements taken from the hours 00.00 to 12.00, and then test it on data
# collected from 12.00 to 24.00.
# +
from sklearn.model_selection import train_test_split
# one-liner from scikit-learn library
train_size = 0.8
test_size = 1 - train_size
X_train, X_test, Y_train, Y_test = train_test_split(inputs, labels, train_size=train_size,
test_size=test_size)
# equivalently in numpy
def train_test_split_numpy(inputs, labels, train_size, test_size):
n_inputs = len(inputs)
inputs_shuffled = inputs.copy()
labels_shuffled = labels.copy()
np.random.shuffle(inputs_shuffled)
np.random.shuffle(labels_shuffled)
train_end = int(n_inputs*train_size)
X_train, X_test = inputs_shuffled[:train_end], inputs_shuffled[train_end:]
Y_train, Y_test = labels_shuffled[:train_end], labels_shuffled[train_end:]
return X_train, X_test, Y_train, Y_test
#X_train, X_test, Y_train, Y_test = train_test_split_numpy(inputs, labels, train_size, test_size)
print("Number of training images: " + str(len(X_train)))
print("Number of test images: " + str(len(X_test)))
# -
# ## Define model and architecture
#
# Our simple feed-forward neural network will consist of an *input* layer, a single *hidden* layer and an *output* layer. The activation $y$ of each neuron is a weighted sum of inputs, passed through an activation function. In case of the simple perceptron model we have
#
# $$ z = \sum_{i=1}^n w_i a_i ,$$
#
# $$ y = f(z) ,$$
#
# where $f$ is the activation function, $a_i$ represents input from neuron $i$ in the preceding layer
# and $w_i$ is the weight to input $i$.
# The activation of the neurons in the input layer is just the features (e.g. a pixel value).
#
# The simplest activation function for a neuron is the *Heaviside* function:
#
# $$ f(z) =
# \begin{cases}
# 1, & z > 0\\
# 0, & \text{otherwise}
# \end{cases}
# $$
#
# A feed-forward neural network with this activation is known as a *perceptron*.
# For a binary classifier (i.e. two classes, 0 or 1, dog or not-dog) we can also use this in our output layer.
# This activation can be generalized to $k$ classes (using e.g. the *one-against-all* strategy),
# and we call these architectures *multiclass perceptrons*.
#
# However, it is now common to use the terms Single Layer Perceptron (SLP) (1 hidden layer) and
# Multilayer Perceptron (MLP) (2 or more hidden layers) to refer to feed-forward neural networks with any activation function.
#
# Typical choices for activation functions include the sigmoid function, hyperbolic tangent, and Rectified Linear Unit (ReLU).
# We will be using the sigmoid function $\sigma(x)$:
#
# $$ f(x) = \sigma(x) = \frac{1}{1 + e^{-x}} ,$$
#
# which is inspired by probability theory (see logistic regression) and was most commonly used until about 2011. See the discussion below concerning other activation functions.
#
# <!-- !split -->
# ## Layers
#
# * Input
#
# Since each input image has 8x8 = 64 pixels or features, we have an input layer of 64 neurons.
#
# * Hidden layer
#
# We will use 50 neurons in the hidden layer receiving input from the neurons in the input layer.
# Since each neuron in the hidden layer is connected to the 64 inputs we have 64x50 = 3200 weights to the hidden layer.
#
# * Output
#
# If we were building a binary classifier, it would be sufficient with a single neuron in the output layer,
# which could output 0 or 1 according to the Heaviside function. This would be an example of a *hard* classifier, meaning it outputs the class of the input directly. However, if we are dealing with noisy data it is often beneficial to use a *soft* classifier, which outputs the probability of being in class 0 or 1.
#
# For a soft binary classifier, we could use a single neuron and interpret the output as either being the probability of being in class 0 or the probability of being in class 1. Alternatively we could use 2 neurons, and interpret each neuron as the probability of being in each class.
#
# Since we are doing multiclass classification, with 10 categories, it is natural to use 10 neurons in the output layer. We number the neurons $j = 0,1,...,9$. The activation of each output neuron $j$ will be according to the *softmax* function:
#
# $$ P(\text{class $j$} \mid \text{input $\hat{a}$}) = \frac{\exp{(\hat{a}^T \hat{w}_j)}}
# {\sum_{c=0}^{9} \exp{(\hat{a}^T \hat{w}_c)}} ,$$
#
# i.e. each neuron $j$ outputs the probability of being in class $j$ given an input from the hidden layer $\hat{a}$, with $\hat{w}_j$ the weights of neuron $j$ to the inputs.
# The denominator is a normalization factor to ensure the outputs (probabilities) sum up to 1.
# The exponent is just the weighted sum of inputs as before:
#
# $$ z_j = \sum_{i=1}^n w_ {ij} a_i+b_j.$$
#
# Since each neuron in the output layer is connected to the 50 inputs from the hidden layer we have 50x10 = 500
# weights to the output layer.
#
# <!-- !split -->
# ## Weights and biases
#
# Typically weights are initialized with small values distributed around zero, drawn from a uniform
# or normal distribution. Setting all weights to zero means all neurons give the same output, making the network useless.
#
# Adding a bias value to the weighted sum of inputs allows the neural network to represent a greater range
# of values. Without it, any input with the value 0 will be mapped to zero (before being passed through the activation). The bias unit has an output of 1, and a weight to each neuron $j$, $b_j$:
#
# $$ z_j = \sum_{i=1}^n w_ {ij} a_i + b_j.$$
#
# The bias weights $\hat{b}$ are often initialized to zero, but a small value like $0.01$ ensures all neurons have some output which can be backpropagated in the first training cycle.
# +
# building our neural network
n_inputs, n_features = X_train.shape
n_hidden_neurons = 50
n_categories = 10
# we make the weights normally distributed using numpy.random.randn
# weights and bias in the hidden layer
hidden_weights = np.random.randn(n_features, n_hidden_neurons)
hidden_bias = np.zeros(n_hidden_neurons) + 0.01
# weights and bias in the output layer
output_weights = np.random.randn(n_hidden_neurons, n_categories)
output_bias = np.zeros(n_categories) + 0.01
# -
# ## Feed-forward pass
#
# Denote $F$ the number of features, $H$ the number of hidden neurons and $C$ the number of categories.
# For each input image we calculate a weighted sum of input features (pixel values) to each neuron $j$ in the hidden layer $l$:
#
# $$ z_{j}^{l} = \sum_{i=1}^{F} w_{ij}^{l} x_i + b_{j}^{l},$$
#
# this is then passed through our activation function
#
# $$ a_{j}^{l} = f(z_{j}^{l}) .$$
#
# We calculate a weighted sum of inputs (activations in the hidden layer) to each neuron $j$ in the output layer:
#
# $$ z_{j}^{L} = \sum_{i=1}^{H} w_{ij}^{L} a_{i}^{l} + b_{j}^{L}.$$
#
# Finally we calculate the output of neuron $j$ in the output layer using the softmax function:
#
# $$ a_{j}^{L} = \frac{\exp{(z_j^{L})}}
# {\sum_{c=0}^{C-1} \exp{(z_c^{L})}} .$$
#
# <!-- !split -->
# ## Matrix multiplications
#
# Since our data has the dimensions $X = (n_{inputs}, n_{features})$ and our weights to the hidden
# layer have the dimensions
# $W_{hidden} = (n_{features}, n_{hidden})$,
# we can easily feed the network all our training data in one go by taking the matrix product
#
# $$ X W^{h} = (n_{inputs}, n_{hidden}),$$
#
# and obtain a matrix that holds the weighted sum of inputs to the hidden layer
# for each input image and each hidden neuron.
# We also add the bias to obtain a matrix of weighted sums to the hidden layer $Z^{h}$:
#
# $$ \hat{z}^{l} = \hat{X} \hat{W}^{l} + \hat{b}^{l} ,$$
#
# meaning the same bias (1D array with size equal number of hidden neurons) is added to each input image.
# This is then passed through the activation:
#
# $$ \hat{a}^{l} = f(\hat{z}^l) .$$
#
# This is fed to the output layer:
#
# $$ \hat{z}^{L} = \hat{a}^{L} \hat{W}^{L} + \hat{b}^{L} .$$
#
# Finally we receive our output values for each image and each category by passing it through the softmax function:
#
# $$ output = softmax (\hat{z}^{L}) = (n_{inputs}, n_{categories}) .$$
# +
# setup the feed-forward pass, subscript h = hidden layer
def sigmoid(x):
return 1/(1 + np.exp(-x))
def feed_forward(X):
# weighted sum of inputs to the hidden layer
z_h = np.matmul(X, hidden_weights) + hidden_bias
# activation in the hidden layer
a_h = sigmoid(z_h)
# weighted sum of inputs to the output layer
z_o = np.matmul(a_h, output_weights) + output_bias
# softmax output
# axis 0 holds each input and axis 1 the probabilities of each category
exp_term = np.exp(z_o)
probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True)
return probabilities
probabilities = feed_forward(X_train)
print("probabilities = (n_inputs, n_categories) = " + str(probabilities.shape))
print("probability that image 0 is in category 0,1,2,...,9 = \n" + str(probabilities[0]))
print("probabilities sum up to: " + str(probabilities[0].sum()))
print()
# we obtain a prediction by taking the class with the highest likelihood
def predict(X):
probabilities = feed_forward(X)
return np.argmax(probabilities, axis=1)
predictions = predict(X_train)
print("predictions = (n_inputs) = " + str(predictions.shape))
print("prediction for image 0: " + str(predictions[0]))
print("correct label for image 0: " + str(Y_train[0]))
# -
# ## Choose cost function and optimizer
#
# To measure how well our neural network is doing we need to introduce a cost function.
# We will call the function that gives the error of a single sample output the *loss* function, and the function
# that gives the total error of our network across all samples the *cost* function.
# A typical choice for multiclass classification is the *cross-entropy* loss, also known as the negative log likelihood.
#
# In *multiclass* classification it is common to treat each integer label as a so called *one-hot* vector:
#
# $$ y = 5 \quad \rightarrow \quad \hat{y} = (0, 0, 0, 0, 0, 1, 0, 0, 0, 0) ,$$
#
#
# $$ y = 1 \quad \rightarrow \quad \hat{y} = (0, 1, 0, 0, 0, 0, 0, 0, 0, 0) ,$$
#
#
# i.e. a binary bit string of length $C$, where $C = 10$ is the number of classes in the MNIST dataset.
#
# Let $y_{ic}$ denote the $c$-th component of the $i$-th one-hot vector.
# We define the cost function $\mathcal{C}$ as a sum over the cross-entropy loss for each point $\hat{x}_i$ in the dataset.
#
# In the one-hot representation only one of the terms in the loss function is non-zero, namely the
# probability of the correct category $c'$
# (i.e. the category $c'$ such that $y_{ic'} = 1$). This means that the cross entropy loss only punishes you for how wrong
# you got the correct label. The probability of category $c$ is given by the softmax function. The vector $\hat{\theta}$ represents the parameters of our network, i.e. all the weights and biases.
#
#
# ## Optimizing the cost function
#
# The network is trained by finding the weights and biases that minimize the cost function. One of the most widely used classes of methods is *gradient descent* and its generalizations. The idea behind gradient descent
# is simply to adjust the weights in the direction where the gradient of the cost function is large and negative. This ensures we flow toward a *local* minimum of the cost function.
# Each parameter $\theta$ is iteratively adjusted according to the rule
#
# $$ \theta_{i+1} = \theta_i - \eta \nabla \mathcal{C}(\theta_i) ,$$
#
# where $\eta$ is known as the *learning rate*, which controls how big a step we take towards the minimum.
# This update can be repeated for any number of iterations, or until we are satisfied with the result.
#
# A simple and effective improvement is a variant called *Batch Gradient Descent*.
# Instead of calculating the gradient on the whole dataset, we calculate an approximation of the gradient
# on a subset of the data called a *minibatch*.
# If there are $N$ data points and we have a minibatch size of $M$, the total number of batches
# is $N/M$.
# We denote each minibatch $B_k$, with $k = 1, 2,...,N/M$. The gradient then becomes:
#
# $$ \nabla \mathcal{C}(\theta) = \frac{1}{N} \sum_{i=1}^N \nabla \mathcal{L}_i(\theta) \quad \rightarrow \quad
# \frac{1}{M} \sum_{i \in B_k} \nabla \mathcal{L}_i(\theta) ,$$
#
# i.e. instead of averaging the loss over the entire dataset, we average over a minibatch.
#
# This has two important benefits:
# 1. Introducing stochasticity decreases the chance that the algorithm becomes stuck in a local minima.
#
# 2. It significantly speeds up the calculation, since we do not have to use the entire dataset to calculate the gradient.
#
# The various optmization methods, with codes and algorithms, are discussed in our lectures on [Gradient descent approaches](https://compphysics.github.io/MachineLearning/doc/pub/Splines/html/Splines-bs.html).
#
# <!-- !split -->
# ## Regularization
#
# It is common to add an extra term to the cost function, proportional
# to the size of the weights. This is equivalent to constraining the
# size of the weights, so that they do not grow out of control.
# Constraining the size of the weights means that the weights cannot
# grow arbitrarily large to fit the training data, and in this way
# reduces *overfitting*.
#
# We will measure the size of the weights using the so called *L2-norm*, meaning our cost function becomes:
#
# $$ \nabla \mathcal{C}(\theta) = \frac{1}{N} \sum_{i=1}^N \nabla \mathcal{L}_i(\theta) \quad \rightarrow \quad
# \frac{1}{N} \sum_{i=1}^N \nabla \mathcal{L}_i(\theta) + \lambda \lvert \lvert \hat{w} \rvert \rvert_2^2
# = \frac{1}{N} \sum_{i=1}^N \nabla \mathcal{L}(\theta) + \lambda \sum_{ij} w_{ij}^2,$$
#
# i.e. we sum up all the weights squared. The factor $\lambda$ is known as a regularization parameter.
#
#
# In order to train the model, we need to calculate the derivative of
# the cost function with respect to every bias and weight in the
# network. In total our network has $(64 + 1)\times 50=3250$ weights in
# the hidden layer and $(50 + 1)\times 10=510$ weights to the output
# layer ($+1$ for the bias), and the gradient must be calculated for
# every parameter. We use the *backpropagation* algorithm discussed
# above. This is a clever use of the chain rule that allows us to
# calculate the gradient efficently.
#
#
# ## Matrix multiplication
#
# To more efficently train our network these equations are implemented using matrix operations.
# The error in the output layer is calculated simply as, with $\hat{t}$ being our targets,
#
# $$ \delta_L = \hat{t} - \hat{y} = (n_{inputs}, n_{categories}) .$$
#
# The gradient for the output weights is calculated as
#
# $$ \nabla W_{L} = \hat{a}^T \delta_L = (n_{hidden}, n_{categories}) ,$$
#
# where $\hat{a} = (n_{inputs}, n_{hidden})$. This simply means that we are summing up the gradients for each input.
# Since we are going backwards we have to transpose the activation matrix.
#
# The gradient with respect to the output bias is then
#
# $$ \nabla \hat{b}_{L} = \sum_{i=1}^{n_{inputs}} \delta_L = (n_{categories}) .$$
#
# The error in the hidden layer is
#
# $$ \Delta_h = \delta_L W_{L}^T \circ f'(z_{h}) = \delta_L W_{L}^T \circ a_{h} \circ (1 - a_{h}) = (n_{inputs}, n_{hidden}) ,$$
#
# where $f'(a_{h})$ is the derivative of the activation in the hidden layer. The matrix products mean
# that we are summing up the products for each neuron in the output layer. The symbol $\circ$ denotes
# the *Hadamard product*, meaning element-wise multiplication.
#
# This again gives us the gradients in the hidden layer:
#
# $$ \nabla W_{h} = X^T \delta_h = (n_{features}, n_{hidden}) ,$$
#
# $$ \nabla b_{h} = \sum_{i=1}^{n_{inputs}} \delta_h = (n_{hidden}) .$$
# +
# to categorical turns our integer vector into a onehot representation
from sklearn.metrics import accuracy_score
# one-hot in numpy
def to_categorical_numpy(integer_vector):
n_inputs = len(integer_vector)
n_categories = np.max(integer_vector) + 1
onehot_vector = np.zeros((n_inputs, n_categories))
onehot_vector[range(n_inputs), integer_vector] = 1
return onehot_vector
#Y_train_onehot, Y_test_onehot = to_categorical(Y_train), to_categorical(Y_test)
Y_train_onehot, Y_test_onehot = to_categorical_numpy(Y_train), to_categorical_numpy(Y_test)
def feed_forward_train(X):
# weighted sum of inputs to the hidden layer
z_h = np.matmul(X, hidden_weights) + hidden_bias
# activation in the hidden layer
a_h = sigmoid(z_h)
# weighted sum of inputs to the output layer
z_o = np.matmul(a_h, output_weights) + output_bias
# softmax output
# axis 0 holds each input and axis 1 the probabilities of each category
exp_term = np.exp(z_o)
probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True)
# for backpropagation need activations in hidden and output layers
return a_h, probabilities
def backpropagation(X, Y):
a_h, probabilities = feed_forward_train(X)
# error in the output layer
error_output = probabilities - Y
# error in the hidden layer
error_hidden = np.matmul(error_output, output_weights.T) * a_h * (1 - a_h)
# gradients for the output layer
output_weights_gradient = np.matmul(a_h.T, error_output)
output_bias_gradient = np.sum(error_output, axis=0)
# gradient for the hidden layer
hidden_weights_gradient = np.matmul(X.T, error_hidden)
hidden_bias_gradient = np.sum(error_hidden, axis=0)
return output_weights_gradient, output_bias_gradient, hidden_weights_gradient, hidden_bias_gradient
print("Old accuracy on training data: " + str(accuracy_score(predict(X_train), Y_train)))
eta = 0.01
lmbd = 0.01
for i in range(1000):
# calculate gradients
dWo, dBo, dWh, dBh = backpropagation(X_train, Y_train_onehot)
# regularization term gradients
dWo += lmbd * output_weights
dWh += lmbd * hidden_weights
# update weights and biases
output_weights -= eta * dWo
output_bias -= eta * dBo
hidden_weights -= eta * dWh
hidden_bias -= eta * dBh
print("New accuracy on training data: " + str(accuracy_score(predict(X_train), Y_train)))
# -
# ## Improving performance
#
# As we can see the network does not seem to be learning at all. It seems to be just guessing the label for each image.
# In order to obtain a network that does something useful, we will have to do a bit more work.
#
# The choice of *hyperparameters* such as learning rate and regularization parameter is hugely influential for the performance of the network. Typically a *grid-search* is performed, wherein we test different hyperparameters separated by orders of magnitude. For example we could test the learning rates $\eta = 10^{-6}, 10^{-5},...,10^{-1}$ with different regularization parameters $\lambda = 10^{-6},...,10^{-0}$.
#
# Next, we haven't implemented minibatching yet, which introduces stochasticity and is though to act as an important regularizer on the weights. We call a feed-forward + backward pass with a minibatch an *iteration*, and a full training period
# going through the entire dataset ($n/M$ batches) an *epoch*.
#
# If this does not improve network performance, you may want to consider altering the network architecture, adding more neurons or hidden layers.
# <NAME> goes through some of these considerations in this [video](https://youtu.be/F1ka6a13S9I). You can find a summary of the video [here](https://kevinzakka.github.io/2016/09/26/applying-deep-learning/).
#
# ## Full object-oriented implementation
#
# It is very natural to think of the network as an object, with specific instances of the network
# being realizations of this object with different hyperparameters. An implementation using Python classes provides a clean structure and interface, and the full implementation of our neural network is given below.
class NeuralNetwork:
def __init__(
self,
X_data,
Y_data,
n_hidden_neurons=50,
n_categories=10,
epochs=10,
batch_size=100,
eta=0.1,
lmbd=0.0):
self.X_data_full = X_data
self.Y_data_full = Y_data
self.n_inputs = X_data.shape[0]
self.n_features = X_data.shape[1]
self.n_hidden_neurons = n_hidden_neurons
self.n_categories = n_categories
self.epochs = epochs
self.batch_size = batch_size
self.iterations = self.n_inputs // self.batch_size
self.eta = eta
self.lmbd = lmbd
self.create_biases_and_weights()
def create_biases_and_weights(self):
self.hidden_weights = np.random.randn(self.n_features, self.n_hidden_neurons)
self.hidden_bias = np.zeros(self.n_hidden_neurons) + 0.01
self.output_weights = np.random.randn(self.n_hidden_neurons, self.n_categories)
self.output_bias = np.zeros(self.n_categories) + 0.01
def feed_forward(self):
# feed-forward for training
self.z_h = np.matmul(self.X_data, self.hidden_weights) + self.hidden_bias
self.a_h = sigmoid(self.z_h)
self.z_o = np.matmul(self.a_h, self.output_weights) + self.output_bias
exp_term = np.exp(self.z_o)
self.probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True)
def feed_forward_out(self, X):
# feed-forward for output
z_h = np.matmul(X, self.hidden_weights) + self.hidden_bias
a_h = sigmoid(z_h)
z_o = np.matmul(a_h, self.output_weights) + self.output_bias
exp_term = np.exp(z_o)
probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True)
return probabilities
def backpropagation(self):
error_output = self.probabilities - self.Y_data
error_hidden = np.matmul(error_output, self.output_weights.T) * self.a_h * (1 - self.a_h)
self.output_weights_gradient = np.matmul(self.a_h.T, error_output)
self.output_bias_gradient = np.sum(error_output, axis=0)
self.hidden_weights_gradient = np.matmul(self.X_data.T, error_hidden)
self.hidden_bias_gradient = np.sum(error_hidden, axis=0)
if self.lmbd > 0.0:
self.output_weights_gradient += self.lmbd * self.output_weights
self.hidden_weights_gradient += self.lmbd * self.hidden_weights
self.output_weights -= self.eta * self.output_weights_gradient
self.output_bias -= self.eta * self.output_bias_gradient
self.hidden_weights -= self.eta * self.hidden_weights_gradient
self.hidden_bias -= self.eta * self.hidden_bias_gradient
def predict(self, X):
probabilities = self.feed_forward_out(X)
return np.argmax(probabilities, axis=1)
def predict_probabilities(self, X):
probabilities = self.feed_forward_out(X)
return probabilities
def train(self):
data_indices = np.arange(self.n_inputs)
for i in range(self.epochs):
for j in range(self.iterations):
# pick datapoints with replacement
chosen_datapoints = np.random.choice(
data_indices, size=self.batch_size, replace=False
)
# minibatch training data
self.X_data = self.X_data_full[chosen_datapoints]
self.Y_data = self.Y_data_full[chosen_datapoints]
self.feed_forward()
self.backpropagation()
# ## Evaluate model performance on test data
#
# To measure the performance of our network we evaluate how well it does it data it has never seen before, i.e. the test data.
# We measure the performance of the network using the *accuracy* score.
# The accuracy is as you would expect just the number of images correctly labeled divided by the total number of images. A perfect classifier will have an accuracy score of $1$.
#
# $$ \text{Accuracy} = \frac{\sum_{i=1}^n I(\hat{y}_i = y_i)}{n} ,$$
#
# where $I$ is the indicator function, $1$ if $\hat{y}_i = y_i$ and $0$ otherwise.
# +
epochs = 100
batch_size = 100
dnn = NeuralNetwork(X_train, Y_train_onehot, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size,
n_hidden_neurons=n_hidden_neurons, n_categories=n_categories)
dnn.train()
test_predict = dnn.predict(X_test)
# accuracy score from scikit library
print("Accuracy score on test set: ", accuracy_score(Y_test, test_predict))
# equivalent in numpy
def accuracy_score_numpy(Y_test, Y_pred):
return np.sum(Y_test == Y_pred) / len(Y_test)
#print("Accuracy score on test set: ", accuracy_score_numpy(Y_test, test_predict))
# -
# ## Adjust hyperparameters
#
# We now perform a grid search to find the optimal hyperparameters for the network.
# Note that we are only using 1 layer with 50 neurons, and human performance is estimated to be around $98\%$ ($2\%$ error rate).
# +
eta_vals = np.logspace(-5, 1, 7)
lmbd_vals = np.logspace(-5, 1, 7)
# store the models for later use
DNN_numpy = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
# grid search
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
dnn = NeuralNetwork(X_train, Y_train_onehot, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size,
n_hidden_neurons=n_hidden_neurons, n_categories=n_categories)
dnn.train()
DNN_numpy[i][j] = dnn
test_predict = dnn.predict(X_test)
print("Learning rate = ", eta)
print("Lambda = ", lmbd)
print("Accuracy score on test set: ", accuracy_score(Y_test, test_predict))
print()
# -
# ## Visualization
# +
# visual representation of grid search
# uses seaborn heatmap, you can also do this with matplotlib imshow
import seaborn as sns
sns.set()
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
test_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
for i in range(len(eta_vals)):
for j in range(len(lmbd_vals)):
dnn = DNN_numpy[i][j]
train_pred = dnn.predict(X_train)
test_pred = dnn.predict(X_test)
train_accuracy[i][j] = accuracy_score(Y_train, train_pred)
test_accuracy[i][j] = accuracy_score(Y_test, test_pred)
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(test_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Test Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
# -
# ## scikit-learn implementation
#
# **scikit-learn** focuses more
# on traditional machine learning methods, such as regression,
# clustering, decision trees, etc. As such, it has only two types of
# neural networks: Multi Layer Perceptron outputting continuous values,
# *MPLRegressor*, and Multi Layer Perceptron outputting labels,
# *MLPClassifier*. We will see how simple it is to use these classes.
#
# **scikit-learn** implements a few improvements from our neural network,
# such as early stopping, a varying learning rate, different
# optimization methods, etc. We would therefore expect a better
# performance overall.
# +
from sklearn.neural_network import MLPClassifier
# store models for later use
DNN_scikit = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
dnn = MLPClassifier(hidden_layer_sizes=(n_hidden_neurons), activation='logistic',
alpha=lmbd, learning_rate_init=eta, max_iter=epochs)
dnn.fit(X_train, Y_train)
DNN_scikit[i][j] = dnn
print("Learning rate = ", eta)
print("Lambda = ", lmbd)
print("Accuracy score on test set: ", dnn.score(X_test, Y_test))
print()
# -
# ## Visualization
# +
# optional
# visual representation of grid search
# uses seaborn heatmap, could probably do this in matplotlib
import seaborn as sns
sns.set()
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
test_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
for i in range(len(eta_vals)):
for j in range(len(lmbd_vals)):
dnn = DNN_scikit[i][j]
train_pred = dnn.predict(X_train)
test_pred = dnn.predict(X_test)
train_accuracy[i][j] = accuracy_score(Y_train, train_pred)
test_accuracy[i][j] = accuracy_score(Y_test, test_pred)
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(test_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Test Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
# -
# ## Building neural networks in Tensorflow and Keras
#
# Now we want to build on the experience gained from our neural network implementation in NumPy and scikit-learn
# and use it to construct a neural network in Tensorflow. Once we have constructed a neural network in NumPy
# and Tensorflow, building one in Keras is really quite trivial, though the performance may suffer.
#
# In our previous example we used only one hidden layer, and in this we will use two. From this it should be quite
# clear how to build one using an arbitrary number of hidden layers, using data structures such as Python lists or
# NumPy arrays.
#
# ## Tensorflow
#
# Tensorflow is an open source library machine learning library
# developed by the Google Brain team for internal use. It was released
# under the Apache 2.0 open source license in November 9, 2015.
#
# Tensorflow is a computational framework that allows you to construct
# machine learning models at different levels of abstraction, from
# high-level, object-oriented APIs like Keras, down to the C++ kernels
# that Tensorflow is built upon. The higher levels of abstraction are
# simpler to use, but less flexible, and our choice of implementation
# should reflect the problems we are trying to solve.
#
# [Tensorflow uses](https://www.tensorflow.org/guide/graphs) so-called graphs to represent your computation
# in terms of the dependencies between individual operations, such that you first build a Tensorflow *graph*
# to represent your model, and then create a Tensorflow *session* to run the graph.
#
# In this guide we will analyze the same data as we did in our NumPy and
# scikit-learn tutorial, gathered from the MNIST database of images. We
# will give an introduction to the lower level Python Application
# Program Interfaces (APIs), and see how we use them to build our graph.
# Then we will build (effectively) the same graph in Keras, to see just
# how simple solving a machine learning problem can be.
#
# To install tensorflow on Unix/Linux systems, use pip as
pip3 install tensorflow
# and/or if you use **anaconda**, just write (or install from the graphical user interface)
conda install tensorflow
# ## Collect and pre-process data
# +
# import necessary packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
# ensure the same random numbers appear every time
np.random.seed(0)
# display images in notebook
# %matplotlib inline
plt.rcParams['figure.figsize'] = (12,12)
# download MNIST dataset
digits = datasets.load_digits()
# define inputs and labels
inputs = digits.images
labels = digits.target
print("inputs = (n_inputs, pixel_width, pixel_height) = " + str(inputs.shape))
print("labels = (n_inputs) = " + str(labels.shape))
# flatten the image
# the value -1 means dimension is inferred from the remaining dimensions: 8x8 = 64
n_inputs = len(inputs)
inputs = inputs.reshape(n_inputs, -1)
print("X = (n_inputs, n_features) = " + str(inputs.shape))
# choose some random images to display
indices = np.arange(n_inputs)
random_indices = np.random.choice(indices, size=5)
for i, image in enumerate(digits.images[random_indices]):
plt.subplot(1, 5, i+1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title("Label: %d" % digits.target[random_indices[i]])
plt.show()
# +
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
# one-hot representation of labels
labels = to_categorical(labels)
# split into train and test data
train_size = 0.8
test_size = 1 - train_size
X_train, X_test, Y_train, Y_test = train_test_split(inputs, labels, train_size=train_size,
test_size=test_size)
# -
# ## Using TensorFlow backend
#
# 1. Define model and architecture
#
# 2. Choose cost function and optimizer
# +
import tensorflow as tf
class NeuralNetworkTensorflow:
def __init__(
self,
X_train,
Y_train,
X_test,
Y_test,
n_neurons_layer1=100,
n_neurons_layer2=50,
n_categories=2,
epochs=10,
batch_size=100,
eta=0.1,
lmbd=0.0):
# keep track of number of steps
self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.X_train = X_train
self.Y_train = Y_train
self.X_test = X_test
self.Y_test = Y_test
self.n_inputs = X_train.shape[0]
self.n_features = X_train.shape[1]
self.n_neurons_layer1 = n_neurons_layer1
self.n_neurons_layer2 = n_neurons_layer2
self.n_categories = n_categories
self.epochs = epochs
self.batch_size = batch_size
self.iterations = self.n_inputs // self.batch_size
self.eta = eta
self.lmbd = lmbd
# build network piece by piece
# name scopes (with) are used to enforce creation of new variables
# https://www.tensorflow.org/guide/variables
self.create_placeholders()
self.create_DNN()
self.create_loss()
self.create_optimiser()
self.create_accuracy()
def create_placeholders(self):
# placeholders are fine here, but "Datasets" are the preferred method
# of streaming data into a model
with tf.name_scope('data'):
self.X = tf.placeholder(tf.float32, shape=(None, self.n_features), name='X_data')
self.Y = tf.placeholder(tf.float32, shape=(None, self.n_categories), name='Y_data')
def create_DNN(self):
with tf.name_scope('DNN'):
# the weights are stored to calculate regularization loss later
# Fully connected layer 1
self.W_fc1 = self.weight_variable([self.n_features, self.n_neurons_layer1], name='fc1', dtype=tf.float32)
b_fc1 = self.bias_variable([self.n_neurons_layer1], name='fc1', dtype=tf.float32)
a_fc1 = tf.nn.sigmoid(tf.matmul(self.X, self.W_fc1) + b_fc1)
# Fully connected layer 2
self.W_fc2 = self.weight_variable([self.n_neurons_layer1, self.n_neurons_layer2], name='fc2', dtype=tf.float32)
b_fc2 = self.bias_variable([self.n_neurons_layer2], name='fc2', dtype=tf.float32)
a_fc2 = tf.nn.sigmoid(tf.matmul(a_fc1, self.W_fc2) + b_fc2)
# Output layer
self.W_out = self.weight_variable([self.n_neurons_layer2, self.n_categories], name='out', dtype=tf.float32)
b_out = self.bias_variable([self.n_categories], name='out', dtype=tf.float32)
self.z_out = tf.matmul(a_fc2, self.W_out) + b_out
def create_loss(self):
with tf.name_scope('loss'):
softmax_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.Y, logits=self.z_out))
regularizer_loss_fc1 = tf.nn.l2_loss(self.W_fc1)
regularizer_loss_fc2 = tf.nn.l2_loss(self.W_fc2)
regularizer_loss_out = tf.nn.l2_loss(self.W_out)
regularizer_loss = self.lmbd*(regularizer_loss_fc1 + regularizer_loss_fc2 + regularizer_loss_out)
self.loss = softmax_loss + regularizer_loss
def create_accuracy(self):
with tf.name_scope('accuracy'):
probabilities = tf.nn.softmax(self.z_out)
predictions = tf.argmax(probabilities, axis=1)
labels = tf.argmax(self.Y, axis=1)
correct_predictions = tf.equal(predictions, labels)
correct_predictions = tf.cast(correct_predictions, tf.float32)
self.accuracy = tf.reduce_mean(correct_predictions)
def create_optimiser(self):
with tf.name_scope('optimizer'):
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.eta).minimize(self.loss, global_step=self.global_step)
def weight_variable(self, shape, name='', dtype=tf.float32):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name, dtype=dtype)
def bias_variable(self, shape, name='', dtype=tf.float32):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name, dtype=dtype)
def fit(self):
data_indices = np.arange(self.n_inputs)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(self.epochs):
for j in range(self.iterations):
chosen_datapoints = np.random.choice(data_indices, size=self.batch_size, replace=False)
batch_X, batch_Y = self.X_train[chosen_datapoints], self.Y_train[chosen_datapoints]
sess.run([DNN.loss, DNN.optimizer],
feed_dict={DNN.X: batch_X,
DNN.Y: batch_Y})
accuracy = sess.run(DNN.accuracy,
feed_dict={DNN.X: batch_X,
DNN.Y: batch_Y})
step = sess.run(DNN.global_step)
self.train_loss, self.train_accuracy = sess.run([DNN.loss, DNN.accuracy],
feed_dict={DNN.X: self.X_train,
DNN.Y: self.Y_train})
self.test_loss, self.test_accuracy = sess.run([DNN.loss, DNN.accuracy],
feed_dict={DNN.X: self.X_test,
DNN.Y: self.Y_test})
# -
# ## Optimizing and using gradient descent
epochs = 100
batch_size = 100
n_neurons_layer1 = 100
n_neurons_layer2 = 50
n_categories = 10
eta_vals = np.logspace(-5, 1, 7)
lmbd_vals = np.logspace(-5, 1, 7)
# +
DNN_tf = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
DNN = NeuralNetworkTensorflow(X_train, Y_train, X_test, Y_test,
n_neurons_layer1, n_neurons_layer2, n_categories,
epochs=epochs, batch_size=batch_size, eta=eta, lmbd=lmbd)
DNN.fit()
DNN_tf[i][j] = DNN
print("Learning rate = ", eta)
print("Lambda = ", lmbd)
print("Test accuracy: %.3f" % DNN.test_accuracy)
print()
# +
# optional
# visual representation of grid search
# uses seaborn heatmap, could probably do this in matplotlib
import seaborn as sns
sns.set()
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
test_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
for i in range(len(eta_vals)):
for j in range(len(lmbd_vals)):
DNN = DNN_tf[i][j]
train_accuracy[i][j] = DNN.train_accuracy
test_accuracy[i][j] = DNN.test_accuracy
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(test_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Test Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
# -
# optional
# we can use log files to visualize our graph in Tensorboard
writer = tf.summary.FileWriter('logs/')
writer.add_graph(tf.get_default_graph())
# ## Using Keras
#
# Keras is a high level [neural network](https://en.wikipedia.org/wiki/Application_programming_interface)
# that supports Tensorflow, CTNK and Theano as backends.
# If you have Tensorflow installed Keras is available through the *tf.keras* module.
# If you have Anaconda installed you may run the following command
conda install keras
# Alternatively, if you have Tensorflow or one of the other supported backends install you may use the pip package manager:
pip3 install keras
# or look up the [instructions here](https://keras.io/).
# +
from keras.models import Sequential
from keras.layers import Dense
from keras.regularizers import l2
from keras.optimizers import SGD
def create_neural_network_keras(n_neurons_layer1, n_neurons_layer2, n_categories, eta, lmbd):
model = Sequential()
model.add(Dense(n_neurons_layer1, activation='sigmoid', kernel_regularizer=l2(lmbd)))
model.add(Dense(n_neurons_layer2, activation='sigmoid', kernel_regularizer=l2(lmbd)))
model.add(Dense(n_categories, activation='softmax'))
sgd = SGD(lr=eta)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
# +
DNN_keras = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
DNN = create_neural_network_keras(n_neurons_layer1, n_neurons_layer2, n_categories,
eta=eta, lmbd=lmbd)
DNN.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, verbose=0)
scores = DNN.evaluate(X_test, Y_test)
DNN_keras[i][j] = DNN
print("Learning rate = ", eta)
print("Lambda = ", lmbd)
print("Test accuracy: %.3f" % scores[1])
print()
# +
# optional
# visual representation of grid search
# uses seaborn heatmap, could probably do this in matplotlib
import seaborn as sns
sns.set()
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
test_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
for i in range(len(eta_vals)):
for j in range(len(lmbd_vals)):
DNN = DNN_keras[i][j]
train_accuracy[i][j] = DNN.evaluate(X_train, Y_train)[1]
test_accuracy[i][j] = DNN.evaluate(X_test, Y_test)[1]
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(test_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Test Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
# -
# <!-- !split -->
# ## Which activation function should I use?
#
# The Back propagation algorithm we derived above works by going from
# the output layer to the input layer, propagating the error gradient on
# the way. Once the algorithm has computed the gradient of the cost
# function with regards to each parameter in the network, it uses these
# gradients to update each parameter with a Gradient Descent (GD) step.
#
#
# Unfortunately for us, the gradients often get smaller and smaller as the
# algorithm progresses down to the first hidden layers. As a result, the
# GD update leaves the lower layer connection weights
# virtually unchanged, and training never converges to a good
# solution. This is known in the literature as
# **the vanishing gradients problem**.
#
# In other cases, the opposite can happen, namely the the gradients can grow bigger and
# bigger. The result is that many of the layers get large updates of the
# weights the
# algorithm diverges. This is the **exploding gradients problem**, which is
# mostly encountered in recurrent neural networks. More generally, deep
# neural networks suffer from unstable gradients, different layers may
# learn at widely different speeds
#
# <!-- !split -->
# ## Is the Logistic activation function (Sigmoid) our choice?
#
# Although this unfortunate behavior has been empirically observed for
# quite a while (it was one of the reasons why deep neural networks were
# mostly abandoned for a long time), it is only around 2010 that
# significant progress was made in understanding it.
#
# A paper titled [Understanding the Difficulty of Training Deep
# Feedforward Neural Networks by <NAME> and <NAME>](http://proceedings.mlr.press/v9/glorot10a.html) found that
# the problems with the popular logistic
# sigmoid activation function and the weight initialization technique
# that was most popular at the time, namely random initialization using
# a normal distribution with a mean of 0 and a standard deviation of
# 1.
#
# They showed that with this activation function and this
# initialization scheme, the variance of the outputs of each layer is
# much greater than the variance of its inputs. Going forward in the
# network, the variance keeps increasing after each layer until the
# activation function saturates at the top layers. This is actually made
# worse by the fact that the logistic function has a mean of 0.5, not 0
# (the hyperbolic tangent function has a mean of 0 and behaves slightly
# better than the logistic function in deep networks).
#
#
# ## The derivative of the Logistic funtion
#
# Looking at the logistic activation function, when inputs become large
# (negative or positive), the function saturates at 0 or 1, with a
# derivative extremely close to 0. Thus when backpropagation kicks in,
# it has virtually no gradient to propagate back through the network,
# and what little gradient exists keeps getting diluted as
# backpropagation progresses down through the top layers, so there is
# really nothing left for the lower layers.
#
# In their paper, Glorot and Bengio propose a way to significantly
# alleviate this problem. We need the signal to flow properly in both
# directions: in the forward direction when making predictions, and in
# the reverse direction when backpropagating gradients. We don’t want
# the signal to die out, nor do we want it to explode and saturate. For
# the signal to flow properly, the authors argue that we need the
# variance of the outputs of each layer to be equal to the variance of
# its inputs, and we also need the gradients to have equal variance
# before and after flowing through a layer in the reverse direction.
#
#
#
# One of the insights in the 2010 paper by Glorot and Bengio was that
# the vanishing/exploding gradients problems were in part due to a poor
# choice of activation function. Until then most people had assumed that
# if Nature had chosen to use roughly sigmoid activation functions in
# biological neurons, they must be an excellent choice. But it turns out
# that other activation functions behave much better in deep neural
# networks, in particular the ReLU activation function, mostly because
# it does not saturate for positive values (and also because it is quite
# fast to compute).
#
#
# ## The RELU function family
#
# The ReLU activation function suffers from a problem known as the dying
# ReLUs: during training, some neurons effectively die, meaning they
# stop outputting anything other than 0.
#
# In some cases, you may find that half of your network’s neurons are
# dead, especially if you used a large learning rate. During training,
# if a neuron’s weights get updated such that the weighted sum of the
# neuron’s inputs is negative, it will start outputting 0. When this
# happen, the neuron is unlikely to come back to life since the gradient
# of the ReLU function is 0 when its input is negative.
#
# To solve this problem, nowadays practitioners use a variant of the ReLU
# function, such as the leaky ReLU discussed above or the so-called
# exponential linear unit (ELU) function
# $$
# ELU(z) = \left\{\begin{array}{cc} \alpha\left( \exp{(z)}-1\right) & z < 0,\\ z & z \ge 0.\end{array}\right.
# $$
# ## Which activation function should we use?
#
# In general it seems that the ELU activation function is better than
# the leaky ReLU function (and its variants), which is better than
# ReLU. ReLU performs better than $\tanh$ which in turn performs better
# than the logistic function.
#
# If runtime
# performance is an issue, then you may opt for the leaky ReLU function over the
# ELU function If you don’t
# want to tweak yet another hyperparameter, you may just use the default
# $\alpha$ of $0.01$ for the leaky ReLU, and $1$ for ELU. If you have
# spare time and computing power, you can use cross-validation or
# bootstrap to evaluate other activation functions.
#
#
# <!-- !split -->
# ## A top-down perspective on Neural networks
#
#
# The first thing we would like to do is divide the data into two or three
# parts. A training set, a validation or dev (development) set, and a
# test set. The test set is the data on which we want to make
# predictions. The dev set is a subset of the training data we use to
# check how well we are doing out-of-sample, after training the model on
# the training dataset. We use the validation error as a proxy for the
# test error in order to make tweaks to our model. It is crucial that we
# do not use any of the test data to train the algorithm. This is a
# cardinal sin in ML. Then:
#
#
# * Estimate optimal error rate
#
# * Minimize underfitting (bias) on training data set.
#
# * Make sure you are not overfitting.
#
# If the validation and test sets are drawn from the same distributions,
# then a good performance on the validation set should lead to similarly
# good performance on the test set.
#
# However, sometimes
# the training data and test data differ in subtle ways because, for
# example, they are collected using slightly different methods, or
# because it is cheaper to collect data in one way versus another. In
# this case, there can be a mismatch between the training and test
# data. This can lead to the neural network overfitting these small
# differences between the test and training sets, and a poor performance
# on the test set despite having a good performance on the validation
# set. To rectify this, Andrew Ng suggests making two validation or dev
# sets, one constructed from the training data and one constructed from
# the test data. The difference between the performance of the algorithm
# on these two validation sets quantifies the train-test mismatch. This
# can serve as another important diagnostic when using DNNs for
# supervised learning.
#
# ## Limitations of supervised learning with deep networks
#
# Like all statistical methods, supervised learning using neural
# networks has important limitations. This is especially important when
# one seeks to apply these methods, especially to physics problems. Like
# all tools, DNNs are not a universal solution. Often, the same or
# better performance on a task can be achieved by using a few
# hand-engineered features (or even a collection of random
# features).
#
# Here we list some of the important limitations of supervised neural network based models.
#
#
#
# * **Need labeled data**. All supervised learning methods, DNNs for supervised learning require labeled data. Often, labeled data is harder to acquire than unlabeled data (e.g. one must pay for human experts to label images).
#
# * **Supervised neural networks are extremely data intensive.** DNNs are data hungry. They perform best when data is plentiful. This is doubly so for supervised methods where the data must also be labeled. The utility of DNNs is extremely limited if data is hard to acquire or the datasets are small (hundreds to a few thousand samples). In this case, the performance of other methods that utilize hand-engineered features can exceed that of DNNs.
#
# * **Homogeneous data.** Almost all DNNs deal with homogeneous data of one type. It is very hard to design architectures that mix and match data types (i.e. some continuous variables, some discrete variables, some time series). In applications beyond images, video, and language, this is often what is required. In contrast, ensemble models like random forests or gradient-boosted trees have no difficulty handling mixed data types.
#
# * **Many problems are not about prediction.** In natural science we are often interested in learning something about the underlying distribution that generates the data. In this case, it is often difficult to cast these ideas in a supervised learning setting. While the problems are related, it is possible to make good predictions with a *wrong* model. The model might or might not be useful for understanding the underlying science.
#
# Some of these remarks are particular to DNNs, others are shared by all supervised learning methods. This motivates the use of unsupervised methods which in part circumvent these problems.
| doc/pub/NeuralNet/ipynb/NeuralNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Linear algebra games including SVD for PCA
#
# Some parts adapted from [Computational-statistics-with-Python.ipynb](https://github.com/cliburn/Computational-statistics-with-Python), which is itself from a course taught at Duke University; other parts from <NAME>' [blog](https://blog.statsbot.co/singular-value-decomposition-tutorial-52c695315254).
#
# The goal here is to practice some linear algebra manipulations by hand and with Python, and to gain some experience and intuition with the Singular Value Decomposition (SVD).
# $\newcommand{\Amat}{\mathbf{A}} \newcommand{\AmatT}{\mathbf{A^\top}}
# \newcommand{\thetavec}{\boldsymbol{\theta}}
# \newcommand{\Sigmamat}{\mathbf{\Sigma}}
# \newcommand{\Yvec}{\mathbf{Y}}
# $
# + [markdown] slideshow={"slide_type": "slide"}
# ## Preliminary exercise: manipulations using the index form of matrices
#
# If you haven't already done this earlier, prove that the Maximum Likelihood Estimate (MLE) for $\chi^2$ given by
#
# $$
# \chi^2 = (\Yvec - \Amat\thetavec)^{\mathbf{\top}} \Sigmamat^{-1} (\Yvec - \AmatT\thetavec)
# $$
#
# is
#
# $$
# \thetavec_{\mathrm{MLE}} = (\AmatT \Sigmamat^{-1} \Amat)^{-1} (\AmatT \Sigmamat^{-1} \Yvec) \;.
# $$
#
# Here $\thetavec$ is a $m\times 1$ matrix of parameters (i.e., there are $m$ parameters), $\Sigmamat$ is the $m\times m$ covariance matrix, $\Yvec$ is a $N\times 1$ matrix of observations (data), and $\Amat$ is an $N\times m$ matrix
#
# $$
# \Amat =
# \left(
# \begin{array}{cccc}
# 1 & x_1 & x_1^2 & \cdots \\
# 1 & x_2 & x_2^2 & \cdots \\
# \vdots & \vdots & \vdots &\cdots \\
# 1 & x_N & x_N^2 & \cdots
# \end{array}
# \right)
# $$
#
# where $N$ is the number of observations. The idea is to do this with explicit indices for vectors and matrices, using the Einstein summation convention.
#
# A suggested approach:
# * Write $\chi^2$ in indices: $\chi^2 = (Y_i - A_{ij}\theta_j)\Sigma^{-1}_{ii'}(Y_{i'}- A_{i'j'}\theta_{j'})$, where summations over repeated indices are implied (be careful of transposes). *How do we see that $\chi^2$ is a scalar?*
# * Find $\partial\chi^2/\partial \theta_k = 0$ for all $k$, using $\partial\theta_j/\partial\theta_k = 0$. Isolate the terms with one component of $\thetavec$ from those with none.
# * You should get the matrix equation $ (\AmatT \Sigmamat^{-1} \Yvec) = (\AmatT \Sigmamat^{-1} \Amat)\thetavec$. At this point you can directly solve for $\thetavec$. *Why can you do this now?*
# * If you get stuck, see Dick's notes from the Parameter Estimation III lecture.
# + [markdown] slideshow={"slide_type": "slide"}
# ## SVD basics
#
# A singular value decomposition (SVD) decomposes a matrix $A$ into three other matrices (we'll skip the boldface font here):
#
# $$
# A = U S V^\top
# $$
#
# where (take $m > n$ for now)
# * $A$ is an $m\times n$ matrix;
# * $U$ is an $m\times n$ (semi)orthogonal matrix;
# * $S$ is an $n\times n$ diagonal matrix;
# * $V$ is an $n\times n$ orthogonal matrix.
#
# Comments and tasks:
# * *Verify that these dimensions are compatible with the decomposition of $A$.*
# * The `scipy.linalg` function `svd` has a Boolean argument `full_matrices`. If `False`, it returns the decomposition above with matrix dimensions as stated. If `True`, then $U$ is $m\times m$, $S$ is $m \times n$, and $V$ is $n\times n$. We will use the `full_matrices = False` form here. *Can you see why this is ok?*
# * Recall that orthogonal means that $U^\top U = I_{n\times n}$ and $V V^\top = I_{n\times n}$. *Are $U U^\top$ and $V^\top V$ equal to identity matrices?*
# * In index form, the decomposition of $A$ is $A_{ij} = U_{ik} S_k V_{jk}$, where the diagonal matrix elements of $S$ are
# $S_k$ (*make sure you agree*).
# * These diagonal elements of $S$, namely the $S_k$, are known as **singular values**. They are ordinarily arranged from largest to smallest.
# * $A A^\top = U S^2 U^\top$, which implies (a) $A A^\top U = U S^2$.
# * $A^\top A = V S^2 V^\top$, which implies (b) $A^\top A V = V S^2$.
# * If $m > n$, we can diagonalize $A^\top A$ to find $S^2$ and $V$ and then find $U = A V S^{-1}$. If $m < n$ we switch the roles of $U$ and $V$.
#
# Quick demonstations for you to do or questions to answer:
# * *Show from equations (a) and (b) that both $U$ and $V$ are orthogonal and that the eigenvalues, $\{S_i^2\}$, are all positive.*
# * *Show that if $m < n$ there will be at most $m$ non-zero singular values.*
# * *Show that the eigenvalues from equations (a) and (b) must be the same.*
#
# A key feature of the SVD for us here is that the sum of the squares of the singular values equals the total variance in $A$, i.e., the sum of squares of all matrix elements (squared Frobenius norm). Thus the size of each says how much of the total variance is accounted for by each singular vector. We can create a truncated SVD containing a percentage (e.g., 99%) of the variance:
#
# $$
# A_{ij} \approx \sum_{k=1}^{p} U_{ik} S_k V_{jk}
# $$
#
# where $p < n$ is the number of singular values included. Typically this is not a large number.
# -
# ### Geometric interpretation of SVD
#
# - Geometric interpretation of SVD
# - rotate orthogonal frame $V$ onto standard frame
# - scale by $S$
# - rotate standard frame into orthogonal frame $U$
#
# Consider the two-dimensional case: $\mathbf{x_1} = (x_1, y_1)$, $\mathbf{x_2} = (x_2, y_2)$. We can fit these to an ellipse with major axis $a$ and minor axis $b$, made by stretching and rotating a unit circle. Let $\mathbf{x'} = (x', y')$ be the transformed coordinates:
#
# $$
# \mathbf{x'} = \mathbf{x} R M^{-1} \quad\mbox{with}\quad
# R = \left(\begin{array}{cc}
# \cos\theta & \sin\theta \\
# -\sin\theta & \cos\theta
# \end{array}
# \right)
# \quad\mbox{and}\quad
# M = \left(\begin{array}{cc}
# a & 0 \\
# 0 & b
# \end{array}
# \right)
# $$
#
# In index form this is $x'_j = \frac{1}{m_j} x_i R_{ij}$ or (clockwise rotation):
#
# $$\begin{align}
# x' &= \frac{x \cos\theta - y\sin\theta}{a} \\
# y' &= \frac{x \sin\theta + y\cos\theta}{b} \\
# \end{align}$$
#
# The equation for a unit circle $\mathbf{x' \cdot x'} = 1$ becomes
#
# $$
# (M^{-1} R^\top \mathbf{x}) \cdot (\mathbf{x} R M^{-1}) = 1.
# $$
#
# With $X = \left(\begin{array}{cc}
# x_1 & y_1 \\
# x_2 & y_2
# \end{array}
# \right)$ we find the matrix equation:
#
# $$
# M^{-1} R^\top X^\top X R M^{-1}= 1.
# $$
#
# which is just a rearrangement of the equation from above, $A^\top A V = V S^2$.
#
# **Interpretation:** If $A$ is considered to be a collection of points, then the singular values are the axes of a least-squares fitted ellipsoid while $V$ is its orientation. The matrix $U$ is the projection of each of the points in $A$ onto the axes.
#
#
#
# ### Solving matrix equations with SVD
#
# We can solve for $\mathbf{x}$:
#
# $$\begin{align}
# A \mathbf{x} &= b \\
# \mathbf{x} &= V S^{-1} U^\top b
# \end{align}$$
#
# or $x_i = \sum_j \frac{V_{ij}}{S_j} \sum_k U_{kj} b_k$. The value of this solution method is when we have an ill-conditioned matrix, meaning that the smallest eigenvalues are zero or close to zero. We can throw away the corresponding components and all is well! See [also](https://personalpages.manchester.ac.uk/staff/timothy.f.cootes/MathsMethodsNotes/L3_linear_algebra3.pdf).
#
# Comments:
# - If we have a non-square matrix, it still works. If $m\times n$ with $m > n$, then only $n$ singular values.
# - If $m < n$, then only $n$ singular values.
# - This is like solving
#
# $$A^\top A \mathbf{x} = A^\top b$$
#
# which is called the *normal equation*. It produces the solution to $\mathbf{x}$ that is closest to the origin, or
#
# $$
# \min_{\mathbf{x}} |A\mathbf x - b| \;.
# $$
#
# **Task:** *prove these results (work backwards from the last equation as a least-squares minimization)*.
# ### Data reduction
#
# For machine learning (ML), there might be several hundred variables but the algorithms are made for a few dozen. We can use SVD in ML for variable reduction. This is also the connection to sloppy physics models. In general, our matrix $A$ can be closely approximated by only keeping the largest of the singular values. We'll see that visually below using images.
# ## Python imports
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg as la
from sklearn.decomposition import PCA
# -
# *Generate random matrices and verify the properties for SVD given above. Check what happens when $m > n$.*
# +
A = np.random.rand(9, 4)
print('A = ', A)
Ap = np.random.randn(5, 3)
print('Ap = ', Ap)
# -
# Check the definition of `scipy.linalg.svd` with shift-tab-tab.
# SVD from scipy.linalg
U, S, V_trans = la.svd(A, full_matrices=False)
Up, Sp, Vp_trans = la.svd(Ap, full_matrices=False)
print(U.shape, S.shape, V_trans.shape)
# Transpose with T, matrix multiplication with @
print(U.T @ U)
# Here's one way to suppress small numbers from round-off error
np.around(U.T @ U, decimals=15)
# Predict this one before evaluating!
print(U @ U.T)
# Go on and check the other claimed properties.
#
# For example, is $A = U S V^\top$? (Note: you'll need to make $S$ a matrix with `np.diag(S)`.)
# +
# Check the other properties, changing the matrix size and shapes.
# -
# For a square matrix, compare the singular values in $S$ to the eigenvalues from `la.eig`. What do you conclude? Now try this for a symmetric matrix (note that a matrix plus its transpose is symmetric).
# ## SVD applied to images for compression
#
# Read in `figs/elephant.jpg` as a gray-scale image. The image has $1066 \times 1600$ values. Using SVD, recreate the image with a relative error of less than 0.5%. What is the relative size of the compressed image as a percentage?
# +
from skimage import io
img = io.imread('figs/elephant.jpg', as_gray=True)
plt.imshow(img, cmap='gray');
print('shape of img: ', img.shape)
# -
# turn off axis
plt.imshow(img, cmap='gray')
plt.gca().set_axis_off()
# Do the svg
U, S, Vt = la.svd(img, full_matrices=False)
# Check the shapes
U.shape, S.shape, Vt.shape
# Check that we can recreate the image
img_orig = U @ np.diag(S) @ Vt
print(img_orig.shape)
plt.imshow(img_orig, cmap='gray')
plt.gca().set_axis_off()
# Here's how we can efficiently reduce the size of the matrices. Our SVD should be sorted, so we are keeping only the largest singular values up to a point.
# Pythonic way to figure out when we've accumulated 99.5% of the result
k = np.sum(np.cumsum((S**2)/(S**2).sum()) <= 0.995)
# #### Aside: dissection of the Python statement to find the index for accumulation
test = np.array([5, 4, 3, 2, 1])
threshold = 0.995
print('initial matrix, in descending magnitude: ', test)
print( 'fraction of total sum of squares: ', (test**2) / (test**2).sum() )
print( 'cumulative fraction: ', np.cumsum((test**2) / (test**2).sum()) )
print( 'mark entries as true if less than or equal to threshold: ',
(np.cumsum((test**2) / (test**2).sum()) <= threshold) )
print( 'sum up the Trues: ',
np.sum(np.cumsum((test**2) / (test**2).sum()) <= threshold) )
print( 'The last result is the index we are looking for.')
# Let's plot the eigenvalues and mark where k is
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1,1,1)
ax.semilogy(S, color='blue', label='eigenvalues')
ax.axvline(k, color='red', label='99.5% of the variance');
ax.set_xlabel('eigenvalue number')
ax.legend()
fig.tight_layout()
# Now keep only the most significant eigenvalues (those up to k).
img2 = U[:,:k] @ np.diag(S[:k])@ Vt[:k, :]
img2.shape
plt.imshow(img2, cmap='gray')
plt.gca().set_axis_off();
k99 = np.sum(np.cumsum((S**2)/(S**2).sum()) <= 0.99)
img99 = U[:,:k99] @ np.diag(S[:k99])@ Vt[:k99, :]
plt.imshow(img99, cmap='gray')
plt.gca().set_axis_off();
# Let's try another interesting picture . . .
# +
fraction_kept = 0.995
def svd_shapes(U, S, V, k=None):
if k is None:
k = len(S)
U_shape = U[:,:k].shape
S_shape = S[:k].shape
V_shape = V[:,:k].shape
print(f'U shape: {U_shape}, S shape: {S_shape}, V shape: {V_shape}')
img_orig = io.imread('figs/Dick_in_tailcoat.jpg')
img = io.imread('figs/Dick_in_tailcoat.jpg', as_gray=True)
U, S, V = la.svd(img)
svd_shapes(U, S, V)
k995 = np.sum(np.cumsum((S**2)/(S**2).sum()) <= fraction_kept)
print(f'k995 = {k995}')
img995 = U[:,:k995] @ np.diag(S[:k995])@ V[:k995, :]
print(f'img995 shape = {img995.shape}')
svd_shapes(U, S, V, k995)
fig = plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(1,3,1)
ax1.imshow(img_orig)
ax1.set_axis_off()
ax2 = fig.add_subplot(1,3,2)
ax2.imshow(img, cmap='gray')
ax2.set_axis_off()
ax3 = fig.add_subplot(1,3,3)
ax3.imshow(img995, cmap='gray')
ax3.set_axis_off()
fig.tight_layout()
# -
# Let's plot the eigenvalues and mark where k is
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1,1,1)
ax.semilogy(S, color='blue', label='eigenvalues')
ax.axvline(k995, color='red', label='99.5% of the variance');
ax.set_xlabel('eigenvalue number')
ax.legend()
fig.tight_layout()
# ### Things to do:
#
# * Get your own figure and duplicate these results. Then play!
# * As you reduce the percentage of the variance kept, what features of the image are retained and what are lost?
# * See how small you can make the percentage and still recognize the picture.
# * How is this related to doing a spatial Fourier transform, applying a low-pass filter, and transforming back. (Experts: try this!)
# ## Covariance, PCA and SVD
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.linalg as la
np.set_printoptions(precision=3)
# -
# Recall the formula for covariance
#
# $$
# \text{Cov}(X, Y) = \frac{\sum_{i=1}^n(X_i - \bar{X})(Y_i - \bar{Y})}{n-1}
# $$
#
# where $\text{Cov}(X, X)$ is the sample variance of $X$.
def cov(x, y):
"""Returns covariance of vectors x and y)."""
xbar = x.mean()
ybar = y.mean()
return np.sum((x - xbar)*(y - ybar))/(len(x) - 1)
X = np.random.random(10)
Y = np.random.random(10)
np.array([[cov(X, X), cov(X, Y)], [cov(Y, X), cov(Y,Y)]])
np.cov(X, Y) # check against numpy
# Extension to more variables is done in a pair-wise way
Z = np.random.random(10)
np.cov([X, Y, Z])
# ### Eigendecomposition of the covariance matrix
# Zero mean but off-diagonal correlation matrix
mu = [0,0]
sigma = [[0.6,0.2],[0.2,0.2]]
n = 1000
x = np.random.multivariate_normal(mu, sigma, n).T
plt.scatter(x[0,:], x[1,:], alpha=0.2);
# Find the covariance matrix of the matrix of points x
A = np.cov(x)
# +
# m = np.array([[1,2,3],[6,5,4]])
# ms = m - m.mean(1).reshape(2,1)
# np.dot(ms, ms.T)/2
# -
# Find the eigenvalues and eigenvectors
e, v = la.eigh(A)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1)
ax.scatter(x[0,:], x[1,:], alpha=0.2)
for e_, v_ in zip(e, v.T):
ax.plot([0, 3*e_*v_[0]], [0, 3*e_*v_[1]], 'r-', lw=2)
ax.axis([-3,3,-3,3])
ax.set_aspect(1)
ax.set_title('Eigenvectors of covariance matrix scaled by eigenvalue.');
# ### PCA (from Duke course)
#
# "Principal Components Analysis" (PCA) basically means to find and rank all the eigenvalues and eigenvectors of a covariance matrix. This is useful because high-dimensional data (with $p$ features) may have nearly all their variation in a small number of dimensions $k<p$, i.e. in the subspace spanned by the eigenvectors of the covariance matrix that have the $k$ largest eigenvalues. If we project the original data into this subspace, we can have a dimension reduction (from $p$ to $k$) with hopefully little loss of information.
#
# Numerically, PCA is typically done using SVD on the data matrix rather than eigendecomposition on the covariance matrix. Numerically, the condition number for working with the covariance matrix directly is the square of the condition number using SVD, so SVD minimizes errors."
# For zero-centered vectors,
#
# \begin{align}
# \text{Cov}(X, Y) &= \frac{\sum_{i=1}^n(X_i - \bar{X})(Y_i - \bar{Y})}{n-1} \\
# &= \frac{\sum_{i=1}^nX_iY_i}{n-1} \\
# &= \frac{XY^T}{n-1}
# \end{align}
#
# and so the covariance matrix for a data set $X$ that has zero mean in each feature vector is just $XX^T/(n-1)$.
#
# In other words, we can also get the eigendecomposition of the covariance matrix from the positive semi-definite matrix $XX^T$.
# Note: Here $x$ is a matrix of **row** vectors.
X = np.random.random((5,4))
X
Y = X - X.mean(axis=1)[:, None] # eliminate the mean
print(Y.mean(axis=1))
np.around(Y.mean(1), 5)
Y
# Check that the covariance matrix is unaffected by removing the mean:
np.cov(X)
np.cov(Y)
# Find the eigenvalue and eigenvectors
e1, v1 = np.linalg.eig(np.dot(x, x.T)/(n-1))
# #### Principal components
#
# Principal components are simply the eigenvectors of the covariance matrix used as basis vectors. Each of the original data points is expressed as a linear combination of the principal components, giving rise to a new set of coordinates.
# Check that we reproduce the previous result
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1)
ax.scatter(x[0,:], x[1,:], alpha=0.2)
for e_, v_ in zip(e1, v1.T):
ax.plot([0, 3*e_*v_[0]], [0, 3*e_*v_[1]], 'r-', lw=2)
ax.axis([-3,3,-3,3]);
ax.set_aspect(1)
# ### Using SVD for PCA
#
# SVD is a decomposition of the data matrix $X = U S V^T$ where $U$ and $V$ are orthogonal matrices and $S$ is a diagonal matrix.
#
# Recall that the transpose of an orthogonal matrix is also its inverse, so if we multiply on the right by $X^T$, we get the following simplification
#
# \begin{align}
# X &= U S V^T \\
# X X^T &= U S V^T (U S V^T)^T \\
# &= U S V^T V S U^T \\
# &= U S^2 U^T
# \end{align}
#
# Compare with the eigendecomposition of a matrix $A = W \Lambda W^{-1}$, we see that SVD gives us the eigendecomposition of the matrix $XX^T$, which as we have just seen, is basically a scaled version of the covariance for a data matrix with zero mean, with the eigenvectors given by $U$ and eigenvalues by $S^2$ (scaled by $n-1$)..
u, s, v = np.linalg.svd(x)
# reproduce previous results yet again!
e2 = s**2/(n-1)
v2 = u
plt.scatter(x[0,:], x[1,:], alpha=0.2)
for e_, v_ in zip(e2, v2):
plt.plot([0, 3*e_*v_[0]], [0, 3*e_*v_[1]], 'r-', lw=2)
plt.axis([-3,3,-3,3]);
v1 # from eigenvectors of covariance matrix
v2 # from SVD
e1 # from eigenvalues of covariance matrix
e2 # from SVD
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercises: covariance matrix manipulations in Python (taken from the Duke course)
# + [markdown] slideshow={"slide_type": "fragment"}
# Given the following covariance matrix
# ```python
# A = np.array([[2,1],[1,4]])
# ```
# use Python to do these basic tasks (that is, do not do them by hand but use `scipy.linalg` functions).
#
# 1. Show that the eigenvectors of $A$ are orthogonal.
# 1. What is the vector representing the first principal component direction?
# 1. Find $A^{-1}$ without performing a matrix inversion.
# 1. What are the coordinates of the data points (0, 1) and (1, 1) in the standard basis expressed as coordinates of the principal components?
# 1. What is the proportion of variance explained if we keep only the projection onto the first principal component?
#
# We'll give you a headstart on the Python manipulations (you should take a look at the `scipy.linalg` documentation).
# + slideshow={"slide_type": "fragment"}
A = np.array([[2,1],[1,4]])
eigval, eigvec = la.eig(A)
# -
# - Find the matrix $A$ that results in rotating the standard vectors in $\mathbb{R}^2$ by 30 degrees counter-clockwise and stretches $e_1$ by a factor of 3 and contracts $e_2$ by a factor of $0.5$.
# - What is the inverse of this matrix? How you find the inverse should reflect your understanding.
#
# The effects of the matrix $A$ and $A^{-1}$ are shown in the figure below:
#
# 
# We observe some data points $(x_i, y_i)$, and believe that an appropriate model for the data is that
#
# $$
# f(x) = ax^2 + bx^3 + c\sin{x}
# $$
#
# with some added noise. Find optimal values of the parameters $\beta = (a, b, c)$ that minimize $\Vert y - f(x) \Vert^2$
#
# 1. using `scipy.linalg.lstsq`
# 2. solving the normal equations $X^TX \beta = X^Ty$
# 3. using `scipy.linalg.svd`
#
# In each case, plot the data and fitted curve using `matplotlib`.
#
# Data
# ```
# x = array([ 3.4027718 , 4.29209002, 5.88176277, 6.3465969 , 7.21397852,
# 8.26972154, 10.27244608, 10.44703778, 10.79203455, 14.71146298])
# y = array([ 25.54026428, 29.4558919 , 58.50315846, 70.24957254,
# 90.55155435, 100.56372833, 91.83189927, 90.41536733,
# 90.43103028, 23.0719842 ])
# ```
x = np.array([ 3.4027718 , 4.29209002, 5.88176277, 6.3465969 , 7.21397852,
8.26972154, 10.27244608, 10.44703778, 10.79203455, 14.71146298])
y = np.array([ 25.54026428, 29.4558919 , 58.50315846, 70.24957254,
90.55155435, 100.56372833, 91.83189927, 90.41536733,
90.43103028, 23.0719842 ])
| topics/bayesian-parameter-estimation/linear_algebra_games_I.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fastpages Notebook Blog Post
# > A tutorial of fastpages for Jupyter notebooks.
#
# - toc: true
# - badges: true
# - comments: true
# - categories: [jupyter]
# - image: images/chart-preview.png
# # About
#
# This notebook is a demonstration of some of capabilities of [fastpages](https://github.com/fastai/fastpages) with notebooks.
#
#
# With `fastpages` you can save your jupyter notebooks into the `_notebooks` folder at the root of your repository, and they will be automatically be converted to Jekyll compliant blog posts!
#
# ## Front Matter
#
# The first cell in your Jupyter Notebook or markdown blog post contains front matter. Front matter is metadata that can turn on/off options in your Notebook. It is formatted like this:
#
# ```
# # "My Title"
# > "Awesome summary"
#
# - toc: true- branch: master- badges: true
# - comments: true
# - author: <NAME> & <NAME>
# - categories: [fastpages, jupyter]
# ```
#
# - Setting `toc: true` will automatically generate a table of contents
# - Setting `badges: true` will automatically include GitHub and Google Colab links to your notebook.
# - Setting `comments: true` will enable commenting on your blog post, powered by [utterances](https://github.com/utterance/utterances).
#
# The title and description need to be enclosed in double quotes only if they include special characters such as a colon. More details and options for front matter can be viewed on the [front matter section](https://github.com/fastai/fastpages#front-matter-related-options) of the README.
# ## Markdown Shortcuts
# A `#hide` comment at the top of any code cell will hide **both the input and output** of that cell in your blog post.
#
# A `#hide_input` comment at the top of any code cell will **only hide the input** of that cell.
#hide_input
print('The comment #hide_input was used to hide the code that produced this.')
# put a `#collapse-hide` flag at the top of any cell if you want to **hide** that cell by default, but give the reader the option to show it:
#collapse-hide
import pandas as pd
import altair as alt
# put a `#collapse-show` flag at the top of any cell if you want to **show** that cell by default, but give the reader the option to hide it:
#collapse-show
cars = 'https://vega.github.io/vega-datasets/data/cars.json'
movies = 'https://vega.github.io/vega-datasets/data/movies.json'
sp500 = 'https://vega.github.io/vega-datasets/data/sp500.csv'
stocks = 'https://vega.github.io/vega-datasets/data/stocks.csv'
flights = 'https://vega.github.io/vega-datasets/data/flights-5k.json'
# ## Interactive Charts With Altair
#
# Charts made with Altair remain interactive. Example charts taken from [this repo](https://github.com/uwdata/visualization-curriculum), specifically [this notebook](https://github.com/uwdata/visualization-curriculum/blob/master/altair_interaction.ipynb).
# hide
df = pd.read_json(movies) # load movies data
genres = df['Major_Genre'].unique() # get unique field values
genres = list(filter(lambda d: d is not None, genres)) # filter out None values
genres.sort() # sort alphabetically
#hide
mpaa = ['G', 'PG', 'PG-13', 'R', 'NC-17', 'Not Rated']
# ### Example 1: DropDown
# +
# single-value selection over [Major_Genre, MPAA_Rating] pairs
# use specific hard-wired values as the initial selected values
selection = alt.selection_single(
name='Select',
fields=['Major_Genre', 'MPAA_Rating'],
init={'Major_Genre': 'Drama', 'MPAA_Rating': 'R'},
bind={'Major_Genre': alt.binding_select(options=genres), 'MPAA_Rating': alt.binding_radio(options=mpaa)}
)
# scatter plot, modify opacity based on selection
alt.Chart(movies).mark_circle().add_selection(
selection
).encode(
x='Rotten_Tomatoes_Rating:Q',
y='IMDB_Rating:Q',
tooltip='Title:N',
opacity=alt.condition(selection, alt.value(0.75), alt.value(0.05))
)
# -
# ### Example 2: Tooltips
alt.Chart(movies).mark_circle().add_selection(
alt.selection_interval(bind='scales', encodings=['x'])
).encode(
x='Rotten_Tomatoes_Rating:Q',
y=alt.Y('IMDB_Rating:Q', axis=alt.Axis(minExtent=30)), # use min extent to stabilize axis title placement
tooltip=['Title:N', 'Release_Date:N', 'IMDB_Rating:Q', 'Rotten_Tomatoes_Rating:Q']
).properties(
width=600,
height=400
)
# ### Example 3: More Tooltips
# +
# select a point for which to provide details-on-demand
label = alt.selection_single(
encodings=['x'], # limit selection to x-axis value
on='mouseover', # select on mouseover events
nearest=True, # select data point nearest the cursor
empty='none' # empty selection includes no data points
)
# define our base line chart of stock prices
base = alt.Chart().mark_line().encode(
alt.X('date:T'),
alt.Y('price:Q', scale=alt.Scale(type='log')),
alt.Color('symbol:N')
)
alt.layer(
base, # base line chart
# add a rule mark to serve as a guide line
alt.Chart().mark_rule(color='#aaa').encode(
x='date:T'
).transform_filter(label),
# add circle marks for selected time points, hide unselected points
base.mark_circle().encode(
opacity=alt.condition(label, alt.value(1), alt.value(0))
).add_selection(label),
# add white stroked text to provide a legible background for labels
base.mark_text(align='left', dx=5, dy=-5, stroke='white', strokeWidth=2).encode(
text='price:Q'
).transform_filter(label),
# add text labels for stock prices
base.mark_text(align='left', dx=5, dy=-5).encode(
text='price:Q'
).transform_filter(label),
data=stocks
).properties(
width=700,
height=400
)
# -
# ## Data Tables
#
# You can display tables per the usual way in your blog:
movies = 'https://vega.github.io/vega-datasets/data/movies.json'
df = pd.read_json(movies)
# display table with pandas
df[['Title', 'Worldwide_Gross',
'Production_Budget', 'Distributor', 'MPAA_Rating', 'IMDB_Rating', 'Rotten_Tomatoes_Rating']].head()
# ## Images
#
# ### Local Images
#
# You can reference local images and they will be copied and rendered on your blog automatically. You can include these with the following markdown syntax:
#
# ``
# 
# ### Remote Images
#
# Remote images can be included with the following markdown syntax:
#
# ``
# 
# ### Animated Gifs
#
# Animated Gifs work, too!
#
# ``
# 
# ### Captions
#
# You can include captions with markdown images like this:
#
# ```
# 
# ```
#
#
# 
#
#
#
#
# # Other Elements
# ## Tweetcards
#
# Typing `> twitter: https://twitter.com/jakevdp/status/1204765621767901185?s=20` will render this:
#
# > twitter: https://twitter.com/jakevdp/status/1204765621767901185?s=20
# ## Youtube Videos
#
# Typing `> youtube: https://youtu.be/XfoYk_Z5AkI` will render this:
#
#
# > youtube: https://youtu.be/XfoYk_Z5AkI
# ## Boxes / Callouts
#
# Typing `> Warning: There will be no second warning!` will render this:
#
#
# > Warning: There will be no second warning!
#
#
#
# Typing `> Important: Pay attention! It's important.` will render this:
#
# > Important: Pay attention! It's important.
#
#
#
# Typing `> Tip: This is my tip.` will render this:
#
# > Tip: This is my tip.
#
#
#
# Typing `> Note: Take note of this.` will render this:
#
# > Note: Take note of this.
#
#
#
# Typing `> Note: A doc link to [an example website: fast.ai](https://www.fast.ai/) should also work fine.` will render in the docs:
#
# > Note: A doc link to [an example website: fast.ai](https://www.fast.ai/) should also work fine.
# ## Footnotes
#
# You can have footnotes in notebooks, however the syntax is different compared to markdown documents. [This guide provides more detail about this syntax](https://github.com/fastai/fastpages/blob/master/_fastpages_docs/NOTEBOOK_FOOTNOTES.md), which looks like this:
#
# ```
# {% raw %}For example, here is a footnote {% fn 1 %}.
# And another {% fn 2 %}
# {{ 'This is the footnote.' | fndetail: 1 }}
# {{ 'This is the other footnote. You can even have a [link](www.github.com)!' | fndetail: 2 }}{% endraw %}
# ```
#
# For example, here is a footnote {% fn 1 %}.
#
# And another {% fn 2 %}
#
# {{ 'This is the footnote.' | fndetail: 1 }}
# {{ 'This is the other footnote. You can even have a [link](www.github.com)!' | fndetail: 2 }}
| _notebooks/2020-02-20-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import functools
import json
import math
import operator
import re
import sys
from datetime import datetime
import numpy as np
import pandas as pd
sys.path.append("../python-src")
from presidential_employment import *
# -
print(sys.argv[0])
output_dir = "/home/pvh/Documents/code/pvh-forks/presidential-employment-stimulus/data"
# ### Data structure
#
# Each department has a total budget and total opportunities target.
#
# The overall programme has outcome targets
# 1. Jobs created
# 2. Jobs retained
# 3. Livelihoods supported
#
# Each department has a "blurb" describing their programme.
#
# Within each department there are multiple programmes that can contribute to each of these targets.
#
# Each programme has a demographic split of outcomes, with gender and youth percentages.
#
# Each programme has a per-province split of outcomes.
#
# Files:
#
# `Consolidated data (Dec) - Presidential Employment Stimulus.xlsx` - December sheet
#
# `Consolidated Presidential Employment Stimulus Reporting Template.xlsx` - January sheet
# dump metric titles (defined in python_src/presidential_employment.py) into metric_title.json
json.dump(metric_titles, open(output_dir + "/metric_titles.json", "w"), indent=2)
# +
# this is where we define the input excel
july_excel = "Dashboard input_PES targets and opportunities per month 130721 Final.xlsx"
august_excel = 'Dashboard input_PES targets and opportunities per month 030821 Final.xlsx'
august_excel2 = 'Dashboard input_PES targets and opportunities per month 090821 Final.xlsx'
august_excel3 = 'Dashboard input_PES targets and opportunities per month 100821 Final.xlsx'
august_excel4 = 'Dashboard input_PES targets and opportunities per month 100821_2 Final.xlsx'
september_excel = 'Dashboard input_PES targets and opportunities September data final 07.10.2021.xlsx'
september_excel2 = 'Dashboard input_PES targets and opportunities September data final 11.10.2021.xlsx'
consolidated_dashboard_excel = september_excel2
opportunity_targets_df = pd.read_excel(
consolidated_dashboard_excel, sheet_name="Targets", header=None
).fillna(0)
opportunity_achievements_df = pd.read_excel(
consolidated_dashboard_excel, sheet_name="Trends", header=None
).fillna(0)
implementation_status_df = pd.read_excel(
consolidated_dashboard_excel,
sheet_name="Implementation status",
skiprows=2,
usecols=range(4),
names=["department", "programme", "status", "detail"],
)
implementation_status_df.department = implementation_status_df.department.fillna(method='pad')
opportunity_type_df = pd.concat(
[opportunity_targets_df.iloc[2:56, 1], opportunity_targets_df.iloc[2:56, 4]], axis=1
).set_index(1)
description_df = pd.read_excel(
consolidated_dashboard_excel,
sheet_name="Department Descriptions",
index_col=0,
usecols=range(4),
)
# +
department_names = (
pd.read_excel(consolidated_dashboard_excel, sheet_name="Targets", skiprows=1)
.loc[:, "Department"]
.dropna()
.iloc[:-1]
)
# programmes_names = (
# pd.read_excel(consolidated_dashboard_excel, sheet_name="Targets", skiprows=1)
# .loc[:, "Programme"]
# .dropna()
# .iloc[:-1]
# )
# programmes_by_department = {}
# for row in (
# pd.read_excel(consolidated_dashboard_excel, sheet_name="Targets", skiprows=1)
# .loc[:, "Department":"Programme"]
# .iloc[
# :-1,
# ]
# .iterrows()
# ):
# if not pd.isna(row[1][0]):
# department_name = row[1][0]
# programme_name = row[1][1]
# programme_names = programmes_by_department.get(department_name, [])
# programme_names.append(programme_name)
# programmes_by_department[department_name] = programme_names
# list(department_names)
# -
leads = description_df.lead.to_dict()
paragraphs = description_df.paragraph.to_dict()
# +
targets_df = pd.read_excel(
consolidated_dashboard_excel,
sheet_name="Targets",
skiprows=1,
usecols=list(range(6)),
names=["department", "programme", "target", "unk", "section", "display_name"],
).drop("unk", axis=1)
targets_df.department = targets_df.department.fillna(method="pad")
targets_df.section = targets_df.section.fillna(
method="pad"
) # this is a hack to deal with the fact that DPWI section identifier is missing for most of their programmes
# the line below was removed because it is better to special-case those programmes
# targets_df = targets_df.fillna(-1) # this is to ensure that targets are -1 when no value is available
trends_df = pd.read_excel(
consolidated_dashboard_excel,
sheet_name="Trends",
skiprows=5,
usecols=list(range(total_achievement_column+1)),
)
trends_df.columns = [c.lower() for c in trends_df.columns]
trends_df.department = trends_df.department.fillna(method="pad")
trends_df = trends_df.fillna(0)
provincial_df = pd.read_excel(
consolidated_dashboard_excel,
sheet_name="Provincial (beneficiaries)",
skiprows=4,
usecols=list(range(12)),
)
provincial_df.columns = [
c.lower().replace(" ", "_").replace("-", "_") for c in provincial_df.columns
]
provincial_df.department = provincial_df.department.fillna(method="pad")
provincial_df = provincial_df.fillna(0)
demographic_df = pd.read_excel(
consolidated_dashboard_excel,
sheet_name="Demographic data",
skiprows=8,
usecols=list(range(9)),
)
demographic_df.columns = [
c.lower().replace(" ", "_").replace("%", "perc").replace('no.', 'no') for c in demographic_df.columns
]
demographic_df.department = demographic_df.department.fillna(method="pad")
# demographic_df = demographic_df.fillna(0)
# -
# ```
# interface DepartmentMonth {
# month: number // 202101
# name: string // Basic Education
# lead: string // Strengthening the learning environment in schools
# paragraph: string
# sections: Array<{
# name: string // Budget allocated to date
# matrics: Array<{
# name: string // Educational and general assistants
# type: 'currency' | 'count'
# value: number
# valueTarget?: number
# time?: {
# name: string // spend
# values: Array<{
# month: number // 202101
# value: number
# }>
# }
# gender?: {
# name: string // opportunities
# values: Array<{
# gender: 'female' | 'male'
# value: number
# }>
# }
# age?: {
# name: string // opportunities
# values: Array<{
# age: string // 18-35
# value: number
# }>
# }
# province?: {
# name: string // opportunities
# values: Array<{
# province: 'EC' | 'FS' | 'GP' | 'KZN' | 'LP' | 'MP' | 'NC' | 'NW' | 'WC'
# }>
# }
# }>
# }>
# }
# ```
# ##
# # Top level structure
# +
all_data = Everything(
overview=Overview(
month=202102,
name="Programme overview",
lead=leads["overview"],
paragraph=paragraphs["overview"],
footer_header="",
footer_paragraph="",
sections=[],
),
departments=[],
)
def add_or_replace(departments, department):
# if a department with sheet_name exists in the list, replace it with the new department, else append to list
for i, el in enumerate(departments):
if el.sheet_name == department.sheet_name:
departments[i] = department
break
else:
departments.append(department)
return departments
# -
# +
all_data.departments=[]
def make_dim(dim_type, lookup_type, df, col_start, col_end, key_lookup):
row = df.loc[(df.department == department_name) & (df.programme == programme_name)]
values = []
if len(df.loc[(df.department == department_name) & (df.programme == programme_name)]) == 0:
data_missing = True
else:
nonzero = False
for key in list(row)[col_start:col_end]:
value = int(row.loc[:, key])
if value > 0:
nonzero = True
values.append(MetricValue(key=key_lookup(key), value=value))
if not nonzero:
data_missing = True
values = []
else:
data_missing = False
dim = Dimension(
name=metric_titles[section_abbrev_to_name[section]][
MetricTypeEnum.count.name + "_" + dim_type
],
lookup=dim_type,
viz=lookup_type,
values=values,
data_missing=data_missing,
)
return dim
desc_abbrevs = {"DoH": "DOH"} # deal with special cases in description lookup
departments = {}
for department_name in department_names:
department_implementation_details = []
target_section = Section(
name=section_titles[SectionEnum.targets.name],
section_type=SectionEnum.targets.name,
metrics=[
Metric(
name=metric_titles[SectionEnum.targets.name][
MetricTypeEnum.currency.name
],
metric_type=MetricTypeEnum.currency.name,
value_target=department_budget_targets[department_name],
value=-1,
dimensions=[],
),
Metric(
name=metric_titles[SectionEnum.targets.name][MetricTypeEnum.count.name],
metric_type=MetricTypeEnum.count.name,
value_target=targets_df.loc[
targets_df.department == department_name
].target.sum(), # overall target of beneficiaries
value=trends_df.loc[trends_df.department == department_name]
.iloc[:, -1]
.sum(), # get the achievement by summing the last column in trends
dimensions=[],
),
],
)
sections = [target_section]
for section in ["CRE", "LIV", "RET"]:
programme_names = list(
targets_df.loc[
(targets_df.section == section)
& (targets_df.department == department_name)
].programme
)
if section == 'CRE' and department_name == 'Agriculture, Land Reform and Rural Development':
# this does not have a target so needs to be added manually
programme_names += ['Graduate verifiers']
metrics = []
for programme_name in programme_names:
if department_name == 'Public Works and Infrastructure' and programme_name == 'Project Administrators':
# this programme is mentioned in Targets and has a line in Implementation Status but has no other data
continue
imp_status_row = implementation_status_df.loc[
(implementation_status_df.department == department_name)
& (implementation_status_df.programme == programme_name)
]
if len(imp_status_row) == 0 or pd.isna(imp_status_row.status.iloc[0]):
imp_detail = None
else:
imp_detail = ImplementationDetail(
programme_name=programme_name,
status=implementation_status_to_enum[imp_status_row.status.iloc[0].strip()],
detail=imp_status_row.detail.iloc[0].strip(),
)
if (
department_name == "Public Works and Infrastructure"
and programme_name
== "Graduate programmes (Property Management Trading Entity)"
) or (
department_name == "Agriculture, Land Reform and Rural Development"
and programme_name == "Subsistence producer relief fund"
):
department_implementation_details.append(imp_detail)
continue # these programmes have no detailed metrics
else:
try:
# collect detailed metrics for programme
dimensions = []
time_dimension_row = trends_df.loc[
(trends_df.department == department_name)
& (trends_df.programme == programme_name)
]
dimensions.append(make_dim(LookupTypeEnum.province.name, VizTypeEnum.bar.name, provincial_df, 2, -1, lambda key: province_header_to_abbrev[key]))
dimensions.append(make_dim(LookupTypeEnum.time.name, VizTypeEnum.line.name, trends_df, 2, None, lambda key: month_lookup[key]))
demographic_row = demographic_df.loc[
(demographic_df.department == department_name)
& (demographic_df.programme == programme_name)
]
values = []
if len(demographic_row) == 0:
data_missing = True
else:
male_perc = demographic_row.loc[:, "perc_male"].iloc[0]
female_perc = demographic_row.loc[:, "perc_female"].iloc[0]
if male_perc + female_perc == 0:
data_missing = True
else:
values=[
MetricValue(
key=GenderEnum.Male.name,
value=male_perc,
),
MetricValue(
key=GenderEnum.Female.name,
value=female_perc,
),
]
if male_perc + female_perc != 1.0:
print(programme_name, male_perc, female_perc, male_perc + female_perc)
data_missing=False
gender_dim = Dimension(
name=metric_titles[section_abbrev_to_name[section]][
MetricTypeEnum.count.name + "_gender"
],
lookup=LookupTypeEnum.gender.name,
viz=VizTypeEnum.two_value.name,
values=values,
data_missing=data_missing
)
dimensions.append(gender_dim)
values = []
if len(demographic_row) == 0:
data_missing = True
else:
age_perc = demographic_row.loc[:, "perc_youth"].iloc[0]
if age_perc == 0:
data_missing = True
values = []
else:
values=[
MetricValue(
key="18-35",
value=age_perc,
)
]
data_missing = False
youth_dim = Dimension(
name=metric_titles[section_abbrev_to_name[section]][
MetricTypeEnum.count.name + "_age"
],
lookup=LookupTypeEnum.age.name,
viz=VizTypeEnum.percentile.name,
values=values,
data_missing=data_missing
)
dimensions.append(youth_dim)
# TODO: Rationalise this - disabled and military vets share a lot of code
disabled = demographic_row.no_disability.iloc[0]
if disabled > 0:
disabled_dim = Dimension(
name=metric_titles[section_abbrev_to_name[section]][MetricTypeEnum.count.name + '_disabled'],
lookup=LookupTypeEnum.disabled.name,
viz=VizTypeEnum.count.name,
values=[MetricValue(key='disabled', value=disabled)]
)
dimensions.append(disabled_dim)
military_vets = demographic_row.no_military_veterans.iloc[0]
if military_vets > 0:
mv_dim = Dimension(
name=metric_titles[section_abbrev_to_name[section]][MetricTypeEnum.count.name + '_vets'],
lookup=LookupTypeEnum.vets.name,
viz=VizTypeEnum.count.name,
values=[MetricValue(key='vets', value=military_vets)]
)
dimensions.append(mv_dim)
total_value = int(time_dimension_row.iloc[:,-1].iloc[0])
target_row = targets_df.fillna(0).loc[
(targets_df.department == department_name)
& (targets_df.programme == programme_name)
].target
if len(target_row) == 0:
# e.g. Graduate verifiers programme doesn't have a target
target = -1
else:
target = target_row.iloc[0]
programme_metric = Metric(
name=programme_name,
metric_type=MetricTypeEnum.count.name,
value=total_value,
value_target=target,
dimensions=dimensions,
implementation_detail=imp_detail,
)
metrics.append(programme_metric)
except IndexError as e:
print("IndexError on", section, department_name, programme_name, str(e))
sections.append(
Section(
name=section_titles[section_abbrev_to_name[section]],
section_type=section_abbrev_to_name[section],
metrics=metrics,
)
)
abbrev = department_name_to_abbreviation[department_name]
month = description_df.loc[
desc_abbrevs.get(abbrev, abbrev), "Data captured until"
].strftime("%Y%m")
departments[department_name] = Department(
month=month,
name=department_name,
sheet_name=abbrev,
lead=leads[desc_abbrevs.get(abbrev, abbrev)],
paragraph=paragraphs[desc_abbrevs.get(abbrev, abbrev)],
sections=sections,
target_lines=[],
achievement_lines=[],
implementation_details=department_implementation_details
)
for name in sorted(departments.keys()):
all_data.departments.append(departments[name])
# print(all_data.to_json(indent=2))
# +
total_male = total_female = total_unknown_gender = total_beneficiaries = 0
total_youth = total_unknown_youth = 0
total_provincial = {}
total_unknown_province = 0
for abbreviation in province_abbreviations:
total_provincial[abbreviation] = 0
for department in all_data.departments:
department_male = department_female = department_beneficiaries = 0
for section in department.sections:
for metric in section.metrics:
if section.section_type == SectionEnum.targets.name and metric.name == "Beneficiaries":
total_beneficiaries += metric.value
department_beneficiaries = metric.value
continue
if metric.value == -1:
continue
total_value = metric.value
gender_found = False
age_found = False
province_found = False
for dimension in metric.dimensions:
if dimension.data_missing:
continue
if dimension.lookup == LookupTypeEnum.gender.name:
gender_found = True
for value in dimension.values:
if value.key == 'Male':
department_male += total_value * value.value
total_male += total_value * value.value
elif value.key == 'Female':
department_female += total_value * value.value
total_female += total_value * value.value
elif dimension.lookup == LookupTypeEnum.age.name:
age_found = True
youth_value = dimension.values[0].value
total_youth += youth_value * total_value
elif dimension.lookup == LookupTypeEnum.province.name:
province_found = True
for value in dimension.values:
total_provincial[value.key] += value.value
if metric.value <= 0:
continue
if not gender_found:
total_unknown_gender += metric.value
if not age_found:
total_unknown_youth += metric.value
if not province_found:
total_unknown_province += metric.value
print(total_beneficiaries, total_unknown_gender, round(total_unknown_gender / total_beneficiaries, 2),
total_unknown_youth, round(total_unknown_youth / total_beneficiaries, 2),
total_unknown_province, round(total_unknown_province / total_beneficiaries, 2))
# -
# ## Overview picture
# +
programmes_by_type = {
SectionEnum.job_opportunities.name: {},
SectionEnum.livelihoods.name: {},
SectionEnum.jobs_retain.name: {},
}
achievements_by_type_by_month = {}
for section_type in [
e.name for e in SectionEnum if e.name != "targets" and e.name != "budget_allocated"
]:
achievements_by_type_by_month[section_type] = {}
for month in months:
achievements_by_type_by_month[section_type][month] = 0
achievements_df = opportunity_achievements_df.iloc[3:, 1:].set_index(1)
for department in all_data.departments:
section_value = 0
section_target_value = 0
for section in department.sections:
if section.section_type == SectionEnum.targets.name:
continue
total_value = 0
total_target_value = 0
for metric in section.metrics:
# if (
# department.sheet_name == "DALRRD"
# and metric.name == "Graduate Employment"
# ):
# continue
if metric.name not in achievements_df.index:
print(
"Metric not found in achievements_df", department.name, metric.name
)
total_value += metric.value
if metric.value_target > 0:
total_target_value += metric.value_target
for dimension in metric.dimensions:
if dimension.lookup == LookupTypeEnum.time.name:
for metric_value in dimension.values:
month = metric_value.key
value = metric_value.value
achievements_by_type_by_month[section.section_type][
month
] += value
if (
department.name == "Agriculture, Land Reform and Rural Development"
and section.section_type == SectionEnum.livelihoods.name
):
total_target_value = int(opportunity_targets_df.iloc[8, 2])
elif (
department.name == "Public Works and Infrastructure"
and section.section_type == SectionEnum.job_opportunities.name
):
total_target_value = int(opportunity_targets_df.iloc[47, 2])
# print(department.name, section.name, total_value, total_target_value)
programmes_by_type[section.section_type][department.sheet_name] = {
"value": total_value,
"value_target": total_target_value,
}
if "Total" not in programmes_by_type[section.section_type]:
programmes_by_type[section.section_type]["Total"] = dict(
value=0, value_target=0
)
programmes_by_type[section.section_type]["Total"]["value"] += total_value
programmes_by_type[section.section_type]["Total"][
"value_target"
] += total_target_value
section_value += total_value
section_target_value += total_target_value
achievement_totals_df = pd.read_excel(consolidated_dashboard_excel, sheet_name='Demographic data', skiprows=2, usecols=range(2), nrows=3, names=['section', 'total'], index_col=0)
assert (
programmes_by_type[SectionEnum.job_opportunities.name]["Total"]["value_target"]
== opportunity_targets_df.iloc[6, 7]
), f'{SectionEnum.job_opportunities.name} total mismatch: {programmes_by_type[SectionEnum.job_opportunities.name]["Total"]["value_target"]} vs {opportunity_targets_df.iloc[6, 7]}'
assert (
programmes_by_type[SectionEnum.job_opportunities.name]["Total"]["value"] == achievement_totals_df.loc["Jobs created","total"]
), f'{SectionEnum.job_opportunities.name} total mismatch {programmes_by_type[SectionEnum.job_opportunities.name]["Total"]["value"]} vs {achievement_totals_df.loc["Jobs created"]}'
assert (
programmes_by_type[SectionEnum.livelihoods.name]["Total"]["value_target"]
== opportunity_targets_df.iloc[7, 7]
), f'{SectionEnum.livelihoods.name} total mismatch: {programmes_by_type[SectionEnum.livelihoods.name]["Total"]["value_target"]} vs {opportunity_targets_df.iloc[7, 7]}'
assert (
programmes_by_type[SectionEnum.livelihoods.name]["Total"]["value"] == achievement_totals_df.loc["Livelihoods supported","total"]
), f'{SectionEnum.job_opportunities.name} total mismatch {programmes_by_type[SectionEnum.livelihoods.name]["Total"]["value"]} vs {achievement_totals_df.loc["Livelihoods supported"]}'
assert (
programmes_by_type[SectionEnum.jobs_retain.name]["Total"]["value_target"]
== opportunity_targets_df.iloc[8, 7]
), f'{SectionEnum.jobs_retain.name} total mismatch: {programmes_by_type[SectionEnum.jobs_retain.name]["Total"]["value_target"]} vs {opportunity_targets_df.iloc[8, 7]}'
assert (
programmes_by_type[SectionEnum.jobs_retain.name]["Total"]["value"] == achievement_totals_df.loc["Jobs retained","total"]
), f'{SectionEnum.job_opportunities.name} total mismatch {programmes_by_type[SectionEnum.jobs_retain.name]["Total"]["value"]} vs {achievement_totals_df.loc["Jobs retained"]}'
overview_metrics = [
Metric(
name=section_titles[section_name],
metric_type=section_name,
value=programmes_by_type[section_name]["Total"]["value"],
value_target=programmes_by_type[section_name]["Total"][
"value_target"
],
dimensions=[
Dimension(
name="by department",
viz=VizTypeEnum.bar.name,
lookup=LookupTypeEnum.department.name,
values=[
MetricValue(
key=department_name,
value=outputs["value"],
value_target=outputs["value_target"],
)
for department_name, outputs in sorted(
department_info.items(),
key=lambda e: e[1]["value"],
reverse=True,
)
if not (
department_name.startswith("value")
or department_name == "Total"
)
],
),
Dimension(
name="over time",
viz=VizTypeEnum.line.name,
lookup=LookupTypeEnum.time.name,
values=[
MetricValue(key=key, value=value)
for key, value in achievements_by_type_by_month[
section_name
].items()
],
),
],
)
for section_name, department_info in programmes_by_type.items()
if not section_name.startswith("value")
]
current_target = sum([metric.value if metric.value > 0 else 0 for metric in overview_metrics])
current_achievement = sum([metric.value_target if metric.value_target > 0 else 0 for metric in overview_metrics])
overview_metrics.extend(
[
Metric(
name="Beneficiaries by Gender",
metric_type="targets_count",
value=-1,
value_target=-1,
dimensions=[
Dimension(
name="Beneficiaries by Gender",
viz=VizTypeEnum.two_value.name,
lookup=LookupTypeEnum.gender.name,
values=[
MetricValue(
key=GenderEnum.Male.name,
value=total_male / (total_beneficiaries - total_unknown_gender)
),
MetricValue(
key=GenderEnum.Female.name,
value=total_female / (total_beneficiaries - total_unknown_gender)
)
],
data_missing=False
),
]
),
Metric(
name="Beneficiaries that are Youth",
metric_type="targets_count",
value=-1,
value_target=-1,
dimensions=[
Dimension(
name="Beneficiaries that are Youth",
viz=VizTypeEnum.two_value.name,
lookup=LookupTypeEnum.age.name,
values=[
MetricValue(key="18-35",
value=total_youth / (total_beneficiaries - total_unknown_youth)
),
MetricValue(key="36+",
value=1 - (total_youth / (total_beneficiaries - total_unknown_youth))
)
],
data_missing=False
),
]
),Metric(
name="Beneficiaries by province",
metric_type="targets_count",
value=-1,
value_target=-1,
dimensions=[
Dimension(
name="Beneficiaries by province",
viz=VizTypeEnum.bar.name,
lookup=LookupTypeEnum.province.name,
values=[
MetricValue(key=abbrev, value=total_provincial[abbrev]) for abbrev in province_abbreviations
],
data_missing=False
)
]
)
])
overview = Overview(
month=(description_df.loc["overview", "Data captured until"]).strftime("%Y%m"),
name="Programme overview",
lead=leads["overview"],
paragraph=paragraphs["overview"],
footer_header=leads["Disclaimer"],
footer_paragraph=paragraphs["Disclaimer"],
sections=[
Section(
name="Current status",
section_type=SectionEnum.overview.name,
metrics=overview_metrics,
value=current_target,
value_target=current_achievement,
)
],
)
assert (
opportunity_achievements_df.iloc[6:59, total_achievement_column].sum()
== opportunity_achievements_df.iloc[59, total_achievement_column]
), "Sum of achievements does not add up to reported total"
assert (
opportunity_targets_df.iloc[2:56, 2].sum() == opportunity_targets_df.iloc[56, 2]
), "Sum of targets does not add up to reported total"
overview.sections.insert(
0,
Section(
name=section_titles[SectionEnum.targets.name + "_overview"],
section_type=SectionEnum.targets.name,
metrics=[
Metric(
name=metric_titles[SectionEnum.targets.name][
MetricTypeEnum.currency.name
],
metric_type=MetricTypeEnum.currency.name,
dimensions=[],
# value=int(opportunity_targets_df.iloc[2, 7] * 1000),
value=0,
value_target=(opportunity_targets_df.iloc[2, 6] * 1000),
),
Metric(
name=metric_titles[SectionEnum.targets.name][MetricTypeEnum.count.name],
metric_type=MetricTypeEnum.count.name,
dimensions=[],
value=int(
opportunity_achievements_df.iloc[59, total_achievement_column]
),
value_target=int(opportunity_targets_df.iloc[56, 2]),
),
Metric(
name="Opportunities in process",
metric_type=MetricTypeEnum.count.name,
dimensions=[],
value_target=int(opportunity_achievements_df.iloc[2, 1]),
value=0,
),
],
value=None,
value_target=None,
),
)
all_data.overview = overview
# print(overview.to_json(indent=2))
# -
# +
ad_set = set()
for department in all_data.departments:
for section in department.sections:
if section.section_type == SectionEnum.targets.name:
continue
for metric in section.metrics:
ad_set.add(metric.name)
ot_set = set(list(opportunity_targets_df.iloc[2:55, 1]))
imp_set = set(list(implementation_status_df.iloc[:53, 1]))
ot_set.difference(ad_set)
# -
# # Save final data
output_filename = output_dir + "/all_data.json"
all_data.departments.sort(key=operator.attrgetter("sheet_name"))
open(output_filename, "w").write(all_data.to_json(indent=2))
# print(all_data.to_json(indent=2))
print("DONE")
# +
# programme_status_df = pd.read_excel(
# mar_opportunities_excel, sheet_name="Implementation status", header=None
# )
# to_camel_case = lambda match: match.group(1) + match.group(2).upper() + match.group(3)
# [
# re.sub(r"(\S*) (\w)(.*)", to_camel_case, status)
# for status in implementation_status_df.iloc[3:, 2].dropna().unique()
# ]
# +
# for dept in all_data.departments:
# print(f"\t'{dept.name}': '{dept.sheet_name}',")
# +
# json.dump(metric_titles, open(output_dir + "/metric_titles.json", "w"), indent=2)
# -
| notebooks/p-e_to_json.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="k79o1BFFRYW1"
from __future__ import print_function
import numpy as np
import math
import scipy
import pandas as pd
import PIL
import gdal
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import sys, os
from pathlib import Path
import time
import xml.etree.ElementTree as ET
import random
import collections, functools, operator
import csv
import ee
from osgeo import gdal,osr
from gdalconst import *
import subprocess
from osgeo.gdalconst import GA_Update
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torch.autograd import Variable
from torch.nn import Linear, ReLU, CrossEntropyLoss, MSELoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout, Sigmoid
from torch.optim import Adam, SGD
from torchvision import transforms, utils
import skimage
from skimage import io, transform
import sklearn
import sklearn.metrics
from sklearn.feature_extraction import image
from sklearn import svm
# + [markdown] id="cC7FYGdRRj_o"
# # P2P architecture
# + id="M3R_yWgXRZEk"
'''
P2P architecture code is based on deeplearning.ai's architecture as defined in the GANs specialization
'''
class ContractingBlock(nn.Module):
'''
ContractingBlock Class
Performs two convolutions followed by a max pool operation.
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(ContractingBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels * 2, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(input_channels * 2, input_channels * 2, kernel_size=3, padding=1)
self.activation = nn.LeakyReLU(0.2)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
if use_bn:
self.batchnorm = nn.BatchNorm2d(input_channels * 2)
self.use_bn = use_bn
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, x):
'''
Function for completing a forward pass of ContractingBlock:
Given an image tensor, completes a contracting block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x = self.conv1(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
x = self.conv2(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
x = self.maxpool(x)
return x
class ExpandingBlock(nn.Module):
'''
ExpandingBlock Class:
Performs an upsampling, a convolution, a concatenation of its two inputs,
followed by two more convolutions with optional dropout
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(ExpandingBlock, self).__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=2)
self.conv2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=2, padding=1)
if use_bn:
self.batchnorm = nn.BatchNorm2d(input_channels // 2)
self.use_bn = use_bn
self.activation = nn.ReLU()
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, x, skip_con_x):
'''
Function for completing a forward pass of ExpandingBlock:
Given an image tensor, completes an expanding block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
skip_con_x: the image tensor from the contracting path (from the opposing block of x)
for the skip connection
'''
x = self.upsample(x)
x = self.conv1(x)
skip_con_x = crop(skip_con_x, x.shape)
x = torch.cat([x, skip_con_x], axis=1)
x = self.conv2(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
x = self.conv3(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
return x
class FeatureMapBlock(nn.Module):
'''
FeatureMapBlock Class
The final layer of a U-Net -
maps each pixel to a pixel with the correct number of output dimensions
using a 1x1 convolution.
Values:
input_channels: the number of channels to expect from a given input
output_channels: the number of channels to expect for a given output
'''
def __init__(self, input_channels, output_channels):
super(FeatureMapBlock, self).__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=1)
def forward(self, x):
'''
Function for completing a forward pass of FeatureMapBlock:
Given an image tensor, returns it mapped to the desired number of channels.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x = self.conv(x)
return x
class UNet(nn.Module):
'''
UNet Class
A series of 4 contracting blocks followed by 4 expanding blocks to
transform an input image into the corresponding paired image, with an upfeature
layer at the start and a downfeature layer at the end.
Values:
input_channels: the number of channels to expect from a given input
output_channels: the number of channels to expect for a given output
'''
def __init__(self, input_channels, output_channels, hidden_channels=32):
super(UNet, self).__init__()
self.upfeature = FeatureMapBlock(input_channels, hidden_channels)
self.contract1 = ContractingBlock(hidden_channels, use_dropout=True)
self.contract2 = ContractingBlock(hidden_channels * 2, use_dropout=True)
self.contract3 = ContractingBlock(hidden_channels * 4, use_dropout=True)
self.contract4 = ContractingBlock(hidden_channels * 8)
self.contract5 = ContractingBlock(hidden_channels * 16)
self.contract6 = ContractingBlock(hidden_channels * 32)
self.expand0 = ExpandingBlock(hidden_channels * 64)
self.expand1 = ExpandingBlock(hidden_channels * 32)
self.expand2 = ExpandingBlock(hidden_channels * 16)
self.expand3 = ExpandingBlock(hidden_channels * 8)
self.expand4 = ExpandingBlock(hidden_channels * 4)
self.expand5 = ExpandingBlock(hidden_channels * 2)
self.downfeature = FeatureMapBlock(hidden_channels, output_channels)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
'''
Function for completing a forward pass of UNet:
Given an image tensor, passes it through U-Net and returns the output.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x0 = self.upfeature(x)
x1 = self.contract1(x0)
x2 = self.contract2(x1)
x3 = self.contract3(x2)
x4 = self.contract4(x3)
x5 = self.contract5(x4)
x6 = self.contract6(x5)
x7 = self.expand0(x6, x5)
x8 = self.expand1(x7, x4)
x9 = self.expand2(x8, x3)
x10 = self.expand3(x9, x2)
x11 = self.expand4(x10, x1)
x12 = self.expand5(x11, x0)
xn = self.downfeature(x12)
return self.sigmoid(xn)
class Discriminator(nn.Module):
'''
Discriminator Class
Structured like the contracting path of the U-Net, the discriminator will
output a matrix of values classifying corresponding portions of the image as real or fake.
Parameters:
input_channels: the number of image input channels
hidden_channels: the initial number of discriminator convolutional filters
'''
def __init__(self, input_channels, hidden_channels=8):
super(Discriminator, self).__init__()
self.upfeature = FeatureMapBlock(input_channels, hidden_channels)
self.contract1 = ContractingBlock(hidden_channels, use_bn=False)
self.contract2 = ContractingBlock(hidden_channels * 2)
self.contract3 = ContractingBlock(hidden_channels * 4)
self.contract4 = ContractingBlock(hidden_channels * 8)
self.final = nn.Conv2d(hidden_channels * 16, 1, kernel_size=1)
def forward(self, x, y):
x = torch.cat([x, y], axis=1)
x0 = self.upfeature(x)
x1 = self.contract1(x0)
x2 = self.contract2(x1)
x3 = self.contract3(x2)
x4 = self.contract4(x3)
xn = self.final(x4)
return xn
def crop(image, new_shape):
'''
Function for cropping an image tensor: Given an image tensor and the new shape,
crops to the center pixels.
Parameters:
image: image tensor of shape (batch size, channels, height, width)
new_shape: a torch.Size object with the shape you want x to have
'''
middle_height = image.shape[2] // 2
middle_width = image.shape[3] // 2
starting_height = middle_height - round(new_shape[2] / 2)
final_height = starting_height + new_shape[2]
starting_width = middle_width - round(new_shape[3] / 2)
final_width = starting_width + new_shape[3]
cropped_image = image[:, :, starting_height:final_height, starting_width:final_width]
return cropped_image
def get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon):
'''
Return the loss of the generator given inputs.
Parameters:
gen: the generator; takes the condition and returns potential images
disc: the discriminator; takes images and the condition and
returns real/fake prediction matrices
real: the real images (e.g. maps) to be used to evaluate the reconstruction
condition: the source images (e.g. satellite imagery) which are used to produce the real images
adv_criterion: the adversarial loss function; takes the discriminator
predictions and the true labels and returns a adversarial
loss (which you aim to minimize)
recon_criterion: the reconstruction loss function; takes the generator
outputs and the real images and returns a reconstructuion
loss (which you aim to minimize)
lambda_recon: the degree to which the reconstruction loss should be weighted in the sum
'''
fake = gen(condition)
disc_fake_hat = disc(fake, condition)
gen_adv_loss = adv_criterion(disc_fake_hat, torch.ones_like(disc_fake_hat))
gen_rec_loss = recon_criterion(real, fake)
gen_loss = gen_adv_loss + lambda_recon * gen_rec_loss
return gen_loss
def P2Ptrain(save_model=False):
mean_generator_loss_list = []
mean_discriminator_loss_list = []
for epoch in range(num_epochs):
# Dataloader returns the batches
mean_generator_loss = 0
mean_discriminator_loss = 0
if epoch == 70: # lr: 0.005 => 0.001
gen_opt.param_groups[0]['lr'] = 0.001
disc_opt.param_groups[0]['lr'] = 0.001
for sample in train_loader:
condition = sample['input'] # ALI
real = sample['target'] # hyperion
if readFromPatches:
condition = condition[0]
real = real[0]
condition = condition.to(device)
real = real.to(device)
### Update discriminator ###
disc_opt.zero_grad() # Zero out the gradient before backpropagation
with torch.no_grad():
fake = gen(condition)
disc_fake_hat = disc(fake.detach(), condition) # Detach generator
disc_fake_loss = adv_criterion(disc_fake_hat, torch.zeros_like(disc_fake_hat))
disc_real_hat = disc(real, condition)
disc_real_loss = adv_criterion(disc_real_hat, torch.ones_like(disc_real_hat))
disc_loss = (disc_fake_loss + disc_real_loss) / 2
disc_loss.backward(retain_graph=True) # Update gradients
disc_opt.step() # Update optimizer
### Update generator ###
gen_opt.zero_grad()
gen_loss = get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon)
gen_loss.backward() # Update gradients
gen_opt.step() # Update optimizer
# Keep track of the average discriminator loss
mean_discriminator_loss += disc_loss.item()
# Keep track of the average generator loss
mean_generator_loss += gen_loss.item()
mean_generator_loss = mean_generator_loss / len(train_loader)
mean_discriminator_loss = mean_discriminator_loss / len(train_loader)
mean_generator_loss_list.append(mean_generator_loss)
mean_discriminator_loss_list.append(mean_discriminator_loss)
### Visualization code ###
if epoch % display_epoch == 0:
fig, axs = plt.subplots(2,1)
axs[0].plot(mean_generator_loss_list)
axs[0].set_title('Generator loss')
axs[1].plot(mean_discriminator_loss_list)
axs[1].set_title('Discriminator loss')
plt.show()
print(f"Epoch {epoch}: Generator (U-Net) loss: {mean_generator_loss}, Discriminator loss: {mean_discriminator_loss}")
show_patches(condition.cpu(), fake.cpu(), real.cpu())
#calc_metrics(real.cpu().numpy(), fake.cpu().numpy())
if epoch % 20 == 0:
torch.save({'gen': gen.state_dict(),
'gen_opt': gen_opt.state_dict(),
'disc': disc.state_dict(),
'disc_opt': disc_opt.state_dict()
}, os.getcwd() + f"/drive/My Drive/TFG/Models/NewModel/epoch{epoch}.pth")
def P2Ptest(inferenceDataset, vizImages=False, svc=None, saveMetrics=None):
metrics = {'PCC': np.array([0.]*170),
'RMSE': np.array([0.]*170),
'PSNR': np.array([0.]*170),
'SSIM': np.array([0.]*170),
'SAM': np.array([0.]*64*64),
'SID': np.array([0.]*64*64)}
for i, sample in enumerate(inferenceDataset):
input = sample['input'][0]
prediction = gen(input.to(device)).detach().cpu().numpy()
target = sample['target'][0].numpy()
#VISUALIZATION
if vizImages:
show_patches(input, prediction, target)
# BATCH EVALUATION
metrics_batch = calc_metrics(target, prediction, verbose=True)
# BAND-WISE EVALUATION
metrics['PCC'] += metrics_batch['PCC']
metrics['RMSE'] += metrics_batch['RMSE']
metrics['PSNR'] += metrics_batch['PSNR']
metrics['SSIM'] += metrics_batch['SSIM']
# PIXEL-WISE EVALUATION
metrics['SAM'] += metrics_batch['SAM']
metrics['SID'] += metrics_batch['SID']
'''
if saveMetrics != None:
metrics = {k: np.mean(m) for k,m in metrics.items()}
df = pd.DataFrame({key: pd.Series(value) for key, value in metrics.items()})
df.to_csv(os.getcwd() + f"/drive/My Drive/TFG/Metrics/P2P_metrics/{saveMetrics}.csv", encoding='utf-8', index=False)
break
'''
# CROP CLASSIFICATION
if svc != None:
crop = np.array(crop)
crop_class, pred_class = svc.test(crop, predictions)
print('Accuracy:', sklearn.metrics.accuracy_score(crop_class, pred_class))
# DATASET EVALUATION
metrics = {k: m/5 for k,m in metrics.items()}
show_metrics(metrics)
if saveMetrics != None:
df = pd.DataFrame({key: pd.Series(value) for key, value in metrics.items()})
df.to_csv(os.getcwd() + f"/drive/My Drive/TFG/Metrics/P2P_metrics/{saveMetrics}.csv", encoding='utf-8', index=False)
| 3.Methods/pix2pix_architecture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # xlsxWriter
#
# ## [tutorial](https://xlsxwriter.readthedocs.io/)
#
#
# ## install
# > sudo pip install XlsxWriter
#
# - 아나콘다 설치시 기본적으로 내장되어 있음
#
import xlsxwriter
# +
workbook = xlsxwriter.Workbook('hello.xlsx')
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Hello world')
workbook.close()
# -
# # tutorial 1: Created a Simple XLSX file
expenses = (
['Rent', 1000],
['Gas', 100],
['Food', 300],
['Gym', 50],
)
# +
workbook = xlsxwriter.Workbook('Expenses01.xlsx')
# worksheet은 sheet1, sheet2, ... 기본 이름이지만 이름을 별도로 붙일 수 있음!
worksheet = workbook.add_worksheet()
# start from the first cell! zero index
row = 0
col = 0
for item, cost in (expenses):
worksheet.write(row, col, item)
worksheet.write(row, col+1, cost)
row += 1
# 데이터 입력할 경우 write 사용
worksheet.write(row, 0, 'Total')
worksheet.write(row, 1, '=SUM(B1:B4)')
workbook.close() # 항상 닫아줘야함
# -
# <img src="https://xlsxwriter.readthedocs.io/_images/tutorial01.png">
# - XlsxWriter can only create new files. It cannot read or modify existing files.
# - 수정하진 못함 주륵..!
# default는 sheet1, sheet2 .... 임
worksheet1 = workbook.add_worksheet() # sheet1
worksheet2 = workbook.add_worksheet('Data')
worksheet3 = workbook.add_worksheet() # sheet3
# # Tutorial 2 : Adding formatting to the XLSX file
# - 특정 포맷..! bold처리 같은 것들
#
# <img src="https://xlsxwriter.readthedocs.io/_images/tutorial02.png">
# +
workbook = xlsxwriter.Workbook("Expenses02.xlsx")
worksheet = workbook.add_worksheet("Sheet1")
# bold 처리
bold = workbook.add_format({"bold": True})
# format for cells
money = workbook.add_format({"num_format": "$#,##0"})
# header 설정
worksheet.write('A1', 'Item', bold)
worksheet.write('B1', 'Cost', bold)
expenses = (
['Rent', 1000],
['Gas', 100],
['Food', 300],
['Gym', 50],
)
row = 1 # header를 작성했기에 row가 1부터 시작
col = 0
for item, cost in (expenses):
worksheet.write(row, col, item)
worksheet.write(row, col+1, cost, money)
# write(row, col, 넣을 값(token), 형식[format])
row += 1
worksheet.write(row, 0, "Total", bold)
worksheet.write(row, 1, "=sum(B2:B5)", money)
workbook.close()
# -
# # Tutorial 3: Writing different typs of data to the XLSX file
#
# - 다른 타입의 데이터를 넣기!
#
# <img src="https://xlsxwriter.readthedocs.io/_images/tutorial03.png">
#
# +
from datetime import datetime
workbook = xlsxwriter.Workbook("Expenses03.xlsx")
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold":1})
money_format = workbook.add_format({"num_format": "$#,##0"})
date_format = workbook.add_format({"num_format": "mmmm d yyyy"})
worksheet.set_column(1, 1, 15) # column의 width 조절
worksheet.write("A1", "Item", bold)
worksheet.write("B1", "Date", bold)
worksheet.write("C1", "Cost", bold)
expenses = (
['Rent', '2013-01-13', 1000],
['Gas', '2013-01-14', 100],
['Food', '2013-01-16', 300],
['Gym', '2013-01-20', 50],
)
# Start from the first cell below the headers.
row = 1
col = 0
for item, date_str, cost in (expenses):
date = datetime.strptime(date_str, "%Y-%m-%d")
worksheet.write_string (row, col, item)
worksheet.write_datetime(row, col + 1, date, date_format )
worksheet.write_number (row, col + 2, cost, money_format)
row += 1
# Write a total using a formula.
worksheet.write(row, 0, 'Total', bold)
worksheet.write(row, 2, '=SUM(C2:C5)', money_format)
workbook.close()
# -
# ## write() method
#
#
# [여기에 더 많은 정보가 있어요](https://xlsxwriter.readthedocs.io/worksheet.html#write_blank)
#
# write_string()
#
# write_number()
#
# write_blank()
#
# write_formula()
#
# write_datetime()
#
# write_boolean()
#
# write_url()
# # workbook class
#
#
# ## constant_memory : 메모리에 있는 데이터를 효율적으로 관리
# workbook = xlsxwriter.Workbook(filename, {"constant_memory":True})
#
# ## tmpdir : 임시 파일을 저장할 장소..!
# workbook = xlsxwriter.Workbook(filename, {'tmpdir': '/home/user/tmp'})
# # working with python pandas and xlsxwriter
#
# - xlwt와 openpyxl or xlsxWriter 모듈사용
import pandas as pd
df = pd.DataFrame({"Data": [10, 20, 30, 20, 15, 30, 45]})
df
# +
## pandas로 xlsxwriter 접근하기
df = pd.DataFrame({'Data': [10, 20, 30, 20, 15, 30, 45]})
writer = pd.ExcelWriter("pandas_simple.xlsx", engine="xlsxwriter")
df.to_excel(writer, sheet_name="Sheet1")
workbook = writer.book
worksheet = writer.sheets["Sheet1"]
# chart 객체 생성
chart = workbook.add_chart({"type":'column'})
# dataframe 데이터에서 chart 범위설정
chart.add_series({"values":"=Sheet1!$B$2:$B$8"})
# worksheet에 chart 삽입
worksheet.insert_chart("D2", chart)
writer.save()
# -
# <img src="https://xlsxwriter.readthedocs.io/_images/pandas_chart.png">
# +
# 차트 속성을 수정하려면 어떻게 해야할까..!
# +
# dataframe formatting 색깔 설정
worksheet.conditional_format('B2:B8', {'type': '3_color_scale'})
# 이걸 하면 알록달록 아래처럼 나옴
# -
# <img src="https://xlsxwriter.readthedocs.io/_images/pandas_conditional.png">
# +
df1 = pd.DataFrame({'Data': [11, 12, 13, 14]})
df2 = pd.DataFrame({'Data': [21, 22, 23, 24]})
df3 = pd.DataFrame({'Data': [31, 32, 33, 34]})
df4 = pd.DataFrame({'Data': [41, 42, 43, 44]})
# writer 설정
writer = pd.ExcelWriter('pandas_positioning.xlsx', engine='xlsxwriter')
# df1을 넣는 위치 설정
df1.to_excel(writer, sheet_name='Sheet1') # 기초 A1
df2.to_excel(writer, sheet_name='Sheet1', startcol=3) # col 4번째(3)부터 시작
df3.to_excel(writer, sheet_name='Sheet1', startrow=6) # row 7번째부터 시작
# header와 index 없이 설정가능! 단 시작 위치는 고려해야함
df4.to_excel(writer, sheet_name='Sheet1',
startrow=7, startcol=4, header=False, index=False)
# writer를 닫음
writer.save()
# -
# <img src="https://xlsxwriter.readthedocs.io/_images/pandas_positioning.png">
# # pandas + xlsx writer + vincent 로 차트 그리기
list_data = [10, 20, 30, 20, 15, 30, 45]
df = pd.DataFrame(list_data)
# +
excel_file = 'column.xlsx'
sheet_name = 'Sheet1'
writer = pd.ExcelWriter(excel_file, engine='xlsxwriter')
df.to_excel(writer, sheet_name=sheet_name)
workbook = writer.book
worksheet = writer.sheets[sheet_name]
chart = workbook.add_chart({'type': 'column'})
chart.add_series({
'values': '=Sheet1!$B$2:$B$8',
'gap': 2 # 여백
})
# chart : y axis 설정
chart.set_y_axis({'major_gridlines': {'visible': False}})
# chart legend(범주) 해제
chart.set_legend({'position': 'none'})
# insert the chart into the worksheet
worksheet.insert_chart('D2', chart) # d2는 에러나고 D2라고 해야되요
writer.save()
# -
# <img src="https://pandas-xlsxwriter-charts.readthedocs.io//_images/chart_column.png">
| python/xlsxWriter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
import multiprocessing
import os
from torch import autograd
from fastai.transforms import TfmType
from fasterai.transforms import *
from fastai.conv_learner import *
from fasterai.images import *
from fasterai.dataset import *
from fasterai.visualize import *
from fasterai.callbacks import *
from fasterai.loss import *
from fasterai.modules import *
from fasterai.training import *
from fasterai.generators import *
from fastai.torch_imports import *
from pathlib import Path
from itertools import repeat
import tensorboardX
torch.cuda.set_device(0)
plt.style.use('dark_background')
torch.backends.cudnn.benchmark=True
IMAGENET = Path('data/imagenet/ILSVRC/Data/CLS-LOC/train')
IMAGENET_SMALL = IMAGENET/'n01440764'
gpath = IMAGENET.parent/('defade_gen_192.h5')
default_sz=400
torch.backends.cudnn.benchmark=True
netG = Unet34(nf_factor=2).cuda()
load_model(netG, gpath)
netG = netG.eval()
# +
x_tfms = []
data_loader = ImageGenDataLoader(sz=256, bs=8, path=IMAGENET_SMALL, random_seed=42, x_noise=False,
keep_pct=1.0, x_tfms=x_tfms)
md = data_loader.get_model_data()
# -
vis = ModelImageVisualizer(default_sz=default_sz)
vis.plot_transformed_image("test_images/FadedOvermiller.PNG", netG, md.val_ds, tfms=x_tfms)
vis.plot_transformed_image("test_images/FadedSphynx.PNG", netG, md.val_ds, tfms=x_tfms, sz=500)
vis.plot_transformed_image("test_images/FadedRacket.PNG", netG, md.val_ds, tfms=x_tfms)
vis.plot_transformed_image("test_images/FadedDutchBabies.PNG", netG, md.val_ds, tfms=x_tfms, sz=500)
vis.plot_transformed_image("test_images/FadedDelores.PNG", netG, md.val_ds, tfms=x_tfms, sz=500)
| DeFadeVisualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import geopandas as gpd
import json
import pandas as pd
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
with open('MGN_MPIO_POLITICO_2.json') as geo:
munijson = json.loads(geo.read())
# +
import pandas as pd
from sqlalchemy import create_engine, text
# maximum number of rows to display
pd.options.display.max_rows = 20
DB_USERNAME = 'alagos'
DB_PASSWORD = '<PASSWORD>!'
DB_ENDPOINT = 'ds4a-demo-instance.cqjr4hyu9xaq.us-east-1.rds.amazonaws.com'
DB_NAME = 'desertion_pj_team67'
engine=create_engine(f'postgresql://{DB_USERNAME}:{DB_PASSWORD}@{DB_ENDPOINT}/{DB_NAME}', max_overflow=20)
def runQuery(sql):
result = engine.connect().execution_options(isolation_level="AUTOCOMMIT").execute((text(sql)))
return pd.DataFrame(result.fetchall(), columns=result.keys())
# -
df_clusters = runQuery("""
select code_municip, name_municip, desertion_no, me_cobertura_neta, desertion_perc, deser_perc_rank,
cobertura_rank, desercion_rank, dane_doc_31
from cluster_master_table_by_municipio; """)
for col in ['desertion_no', 'me_cobertura_neta', 'desertion_perc','dane_doc_31']:
df_clusters[col] = df_clusters[col].astype(np.float64)
df_clusters.rename(columns = {
"name_municip": "Municipio", "desertion_no": "# Dropouts",
"me_cobertura_neta": "Coverage", "desertion_perc": "% Dropouts",
"deser_perc_rank": "Cluster Description","cobertura_rank": "Coverage Type",
"desercion_rank": "Desertion Type"}, inplace = True)
df_clusters['Cluster'] = df_clusters['Cluster Description'].astype(str).str[0]
# 2.2 Query for features
# ------------------------------
df_vars = runQuery("""
select cvr.var_id, cvr.var_name, vd.label, vd.description, cvr.weight
from cluster_vars_ranking cvr
left join var_definition vd
on cvr.var_id = vd.var_id ; """)
df_vars['weight'] = df_vars['weight'].astype(np.float64)
df_vars.rename(columns = {"weight": "Weight",'label':'Feature'}, inplace = True)
# +
cl_map = px.choropleth_mapbox(df_clusters, # Data
locations='code_municip', # Column containing the identifiers used in the GeoJSON file
featureidkey="properties.MPIO_CCNCT", # Column in de JSON containing the identifier of the municipality.
color='Cluster', # Column giving the color intensity of the region
geojson=munijson, # The GeoJSON file
zoom=4, # Zoom
mapbox_style="carto-positron", # Mapbox style, for different maps you need a Mapbox account and a token
center={"lat": 4.5709, "lon": -74.2973}, # Center
#color_continuous_scale="Blues", # Color Scheme
#opacity=0.5, # Opacity of the map
height=380,
hover_name='Municipio',
#hover_data=['# Dropouts','Coverage','% Dropouts']
hover_data={'Coverage':':,', # remove species from hover data
'# Dropouts':':',
'% Dropouts':True
# data not in dataframe, default formatting
#'suppl_1': np.random.random(len(df)),
# data not in dataframe, customized formatting
#'suppl_2': (':.3f', np.random.random(len(df)))
}
)
cl_map.update_geos(fitbounds="locations", visible=False)
cl_map.update_layout(title_text ='Municipalities by cluster',margin={"r":20,"t":40,"l":20,"b":0},height=300,hovermode="x unified")
cl_map.show()
# -
| notebooks/Cluster Maps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''pytorch'': conda)'
# metadata:
# interpreter:
# hash: eb031bbce033fa812dcec88de62f5abea6a352f76cdf43a5d1f21e2ea96289b4
# name: python3
# ---
# # Project II. Local Feature Match
#
# 1. Loads and resizes images
# 2. Finds interest points in those images (you code this)
# 3. Describes each interest point with a local feature (you code this)
# 4. Finds matching features (you code this)
# 5. Visualizes the matches
# 6. Evaluates the matches based on ground truth correspondences
# +
import csv
import sys
import argparse
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from skimage import io, filters, feature, img_as_float32
from skimage.transform import rescale
from skimage.color import rgb2gray
import student
import visualize
from helpers import cheat_interest_points, evaluate_correspondence
from utils import load_data
# -
# You should choose data_pair among [notre_dame, mt_rushmore, e_gaudi]
data_pair = "notre_dame"
# +
# (1) Load in the data
image1, image2, eval_file = load_data(data_pair)
# You don't have to work with grayscale images. Matching with color
# information might be helpful. If you choose to work with RGB images, just
# comment these two lines and make sure scale_factor be changed, too.
image1 = rgb2gray(image1)
image2 = rgb2gray(image2)
# make images smaller to speed up the algorithm. This parameter
# gets passed into the evaluation code, so don't resize the images
# except for changing this parameter - We will evaluate your code using
# scale_factor = 0.5, so be aware of this
scale_factor = 0.5
# scale_factor = [0.5, 0.5, 1]
# Bilinear rescaling
image1 = np.float32(rescale(image1, scale_factor))
image2 = np.float32(rescale(image2, scale_factor))
# width and height of each local feature, in pixels
feature_width = 16
# +
# (2) Find distinctive points in each image. See Szeliski 4.1.1
# !!! You will need to implement get_interest_points. !!!
print("Getting interest points...")
# For development and debugging get_features and match_features, you will likely
# want to use the ta ground truth points, you can comment out the precedeing two
# lines and uncomment the following line to do this.
#(x1, y1, x2, y2) = cheat_interest_points(eval_file, scale_factor)
(x1, y1) = student.get_interest_points(image1, feature_width)
(x2, y2) = student.get_interest_points(image2, feature_width)
# if you want to view your corners uncomment these next lines!
# plt.imshow(image1, cmap="gray")
# plt.scatter(x1, y1, alpha=0.9, s=3)
# plt.show()
# plt.imshow(image2, cmap="gray")
# plt.scatter(x2, y2, alpha=0.9, s=3)
# plt.show()
print("Done!")
# +
# 3) Create feature vectors at each interest point. Szeliski 4.1.2
# !!! You will need to implement get_features. !!!
print("Getting features...")
image1_features = student.get_features(image1, x1, y1, feature_width)
image2_features = student.get_features(image2, x2, y2, feature_width)
print("Done!")
# +
# 4) Match features. Szeliski 4.1.3
# !!! You will need to implement match_features !!!
print("Matching features...")
matches, confidences = student.match_features(image1_features, image2_features)
if len(matches.shape) == 1:
print("No matches!")
return
print("Done!")
# +
# 5) Visualization
# You might want to do some preprocessing of your interest points and matches
# before visualizing (e.g. only visualizing 100 interest points). Once you
# start detecting hundreds of interest points, the visualization can become
# crowded. You may also want to threshold based on confidence
# visualize.show_correspondences produces a figure that shows your matches
# overlayed on the image pairs. evaluate_correspondence computes some statistics
# about the quality of your matches, then shows the same figure. If you want to
# just see the figure, you can uncomment the function call to visualize.show_correspondences
num_pts_to_visualize = matches.shape[0]
print("Matches: " + str(num_pts_to_visualize))
# visualize.show_correspondences(image1, image2, x1, y1, x2, y2, matches, filename=args.pair + "_mtches.jpg")
# -
## 6) Evaluation
# This evaluation function will only work for the Notre Dame, Episcopal
# Gaudi, and Mount Rushmore image pairs. Comment out this function if you
# are not testing on those image pairs. Only those pairs have ground truth
# available.
#
# It also only evaluates your top 100 matches by the confidences
# that you provide.
#
# Within evaluate_correspondences(), we sort your matches in descending order
#
num_pts_to_evaluate = matches.shape[0]
evaluate_correspondence(image1, image2, eval_file, scale_factor,
x1, y1, x2, y2, matches, confidences, num_pts_to_visualize)
| cv_class/proj2/code/proj2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from collections import OrderedDict
import pandas as pd
from bokeh.charts import TimeSeries
from bokeh.io import output_notebook, show
output_notebook()
# +
# read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
xyvalues = OrderedDict(
AAPL=AAPL['Adj Close'],
Date=AAPL['Date'],
MSFT=MSFT['Adj Close'],
IBM=IBM['Adj Close'],
)
# -
ts = TimeSeries(
xyvalues, index='Date', title="timeseries, dict input",
legend='top_left', ylabel='Stock Prices')
show(ts)
df = pd.DataFrame(xyvalues)
ts = TimeSeries(
df, index='Date', title="timeseries, pandas input",
legend='top_left', ylabel='Stock Prices')
show(ts)
lindex = xyvalues.pop('Date')
lxyvalues = list(xyvalues.values())
ts = TimeSeries(
lxyvalues, index=lindex, title="timeseries, list input",
ylabel='Stock Prices: 0-AAPL, 1-IBM, 2-MSFT', legend=True)
show(ts)
# +
from blaze import Data
b = Data(df)
ts = TimeSeries(b, index='Date', title="timeseries, blaze input", ylabel='Stock Prices', legend='bottom_right')
show(ts)
# -
| examples/charts/notebook/timeseries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # ensembling classifiers internally using sampling
#
# ##### Authors: <NAME> <<EMAIL>>
#
# part of script below was cited from:
# ref. https://imbalanced-learn.readthedocs.io/en/stable/auto_examples/ensemble/plot_comparison_ensemble_classifier.html#sphx-glr-auto-examples-ensemble-plot-comparison-ensemble-classifier-py
#
# +
# Authors: <NAME> <<EMAIL>>
# License: MIT
import itertools
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import balanced_accuracy_score
from imblearn.ensemble import BalancedBaggingClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.ensemble import EasyEnsembleClassifier
from imblearn.ensemble import RUSBoostClassifier
from imblearn.metrics import geometric_mean_score
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, roc_curve
from xgboost import XGBClassifier
# -
from helper import *
def evaluate_trained_model(lr, X_train, y_train, X_test, y_test):
print('clf report for the testing set:')
print(classification_report(y_test, lr.predict(X_test)))
print('the roc_auc score is:{:0.2f}'.format(roc_auc_score(y_test, lr.predict(X_test))))
print('clf report for the training set:')
print(classification_report(y_train, lr.predict(X_train)))
print('the roc_auc score is:{:0.2f}'.format(roc_auc_score(y_train, lr.predict(X_train))))
# # Load the data set
#
data = pd.read_pickle("mailout_train_cleaned.pkl")
# data = data.drop_duplicates(subset= data.columns[:-1:])
data.drop_duplicates(inplace=True)
X = data.iloc[:, 0:-1:]
y = data.iloc[:, -1]
# split into train/test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, stratify=y)
for col in X.columns:
if any(isinstance(y,(str)) for y in X[col].unique()):
print('{:}, {:}'.format( col , X[col].unique()))
#
#
#
def train_clf(model, model_name, ax, X_train = X_train, y_train = y_train, X_test = X_test, y_test = y_test):
model.fit(X_train, y_train)
y_pred_model = model.predict(X_test)
ra_score = roc_auc_score( y_test, y_pred_model)
print('{:} ROC AUC accuracy: {:.2f}:'.format(model_name, roc_auc_score( y_test, y_pred_model)))
cm_model = confusion_matrix(y_test, y_pred_model)
if ax is not None:
plot_confusion_matrix(cm_model, classes= [0,1], ax=ax,
title= '{:}'.format(model_name))
return cm_model, ra_score
# +
n_e = 50
seed = 0
base_estimator = AdaBoostClassifier(n_estimators=10)
models = dict({
# "DecisionTree": DecisionTreeClassifier(),
# "Bagging": BaggingClassifier(n_estimators=n_e, random_state=seed, n_jobs=-1),
"BalancedBagging": BalancedBaggingClassifier(n_estimators=n_e, random_state=seed,n_jobs=-1),
# "RandomForest":RandomForestClassifier(n_estimators=n_e, random_state=seed, n_jobs=-1),
"BalancedRandomForest": BalancedRandomForestClassifier(n_estimators=n_e, random_state=seed, n_jobs=-1),
# "AdaBoost": AdaBoostClassifier(n_estimators=10),
"EasyEnsemble":EasyEnsembleClassifier(n_estimators=10,base_estimator=base_estimator,n_jobs=-1),
"RUSBoost":RUSBoostClassifier(n_estimators=10,base_estimator=base_estimator),
# "XGB":XGBClassifier()
})
# +
fig, ax = plt.subplots(int(np.ceil(len(models)/3)), 3, figsize=(12,9))
for idx, item in enumerate(models.items()):
train_clf(model=item[1], model_name=item[0], ax=ax[idx//3, idx%3])
# -
# # use the subset to retrain the model
features_importance = pd.DataFrame(item[1].feature_importances_, columns=['importance'])
features_importance['feature_names']=X.columns
features_importance_sorted = features_importance.sort_values(by = ['importance'], ascending=False)
base_estimator = AdaBoostClassifier(n_estimators=10)
model = EasyEnsembleClassifier(n_estimators=10, base_estimator=base_estimator,n_jobs=-1)
# +
scores = []
feature_counts = np.arange(3, 50, 5)
feature_counts = np.append(feature_counts, len(features_importance))
cms = []
for feature_count in feature_counts:
subset_features = features_importance_sorted.iloc[:feature_count]['feature_names']
X_train_sub, X_test_sub = X_train[subset_features], X_test[subset_features]
cm, score = train_clf(model, 'clf with {:} features'.format(feature_count), ax = None , X_train = X_train_sub, y_train = y_train, X_test = X_test_sub, y_test = y_test)
scores.append(score)
cms.append(cm)
# -
most_relavent_features = feature_counts[np.argmax(scores)]
subset_features = features_importance_sorted.iloc[:most_relavent_features]['feature_names']
features_importance_sorted.head(10)
# # fine tune the model using grid_search_cv
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
# +
base_estimator = AdaBoostClassifier()
clf = EasyEnsembleClassifier(base_estimator=base_estimator)
cv = StratifiedKFold(n_splits= 5, shuffle=True, random_state= 42)
param_grid = [{'base_estimator__n_estimators':[30],
"base_estimator": [EasyEnsembleClassifier(),
RUSBoostClassifier(),
BalancedBaggingClassifier()
],
'n_estimators':[10, 30, 50],
'warm_start':[False]
}]
score = 'roc_auc'
grid = GridSearchCV( clf, param_grid, scoring= score, n_jobs= 6, verbose=1, cv = cv)
grid.fit(X[subset_features], y)
grid.best_score_
# +
# grid.get_params()
# -
grid.best_params_
grid.cv_results_['mean_test_score']
grid.best_score_
# # Part 3: Kaggle Competition
#
# Now that you've created a model to predict which individuals are most likely to respond to a mailout campaign, it's time to test that model in competition through Kaggle. If you click on the link here, you'll be taken to the competition page where, if you have a Kaggle account, you can enter. If you're one of the top performers, you may have the chance to be contacted by a hiring manager from Arvato or Bertelsmann for an interview!
#
# Your entry to the competition should be a CSV file with two columns. The first column should be a copy of "LNR", which acts as an ID number for each individual in the "TEST" partition. The second column, "RESPONSE", should be some measure of how likely each individual became a customer – this might not be a straightforward probability. As you should have found in Part 2, there is a large output class imbalance, where most individuals did not respond to the mailout. Thus, predicting individual classes and using accuracy does not seem to be an appropriate performance evaluation method. Instead, the competition will be using AUC to evaluate performance. The exact values of the "RESPONSE" column do not matter as much: only that the higher values try to capture as many of the actual customers as possible, early in the ROC curve sweep.
mail_out_test = pd.read_pickle("mailout_test_cleaned.pkl")
mailout_test = pd.read_csv('./data/Udacity_MAILOUT_052018_TEST.csv', sep=';')
subset_features
pre_dictions = grid.predict(mail_out_test[subset_features])
test_prob = grid.predict_proba(mail_out_test[subset_features])
mailout_test['RESPONSE'] = test_prob[:,1:2]
to_submission = mailout_test[['LNR', "RESPONSE"]]
to_submission.to_csv('thrid_trial.csv', index=None)
| Part_III_Ensembling Classifiers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/biranchi2018/My-NLP-Examples/blob/master/6.Transformers_Fill_Mask_Pipeline.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="c3qYOSE6e4qF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 557} outputId="636845af-86f2-4f93-cac4-922f43fe9b11"
# !pip install transformers
# + id="natV9ep3e6j9" colab_type="code" colab={}
from transformers import pipeline
# + id="H5pSKb47fIqM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 308, "referenced_widgets": ["7b7a3ce8b8a24b39897da0da078c0f93", "ca33b2432ef945cd96e64ecaf4eb100d", "7f17add389774c01a47f9abed4733d4c", "c5d7c3f010e949f2a9245690ee821c4e", "649d1c00beaa4ab2948ef3a8a2ebb611", "bbce3bc198cf4053a5b69bbe53fe8534", "d35b9938460f47e7880df873f40795ef", "d3b7197733e445be9c2476b3521f7104"]} outputId="10f61754-9485-48a9-dfce-c3df0d241bbd"
nlp_fill = pipeline('fill-mask')
result = nlp_fill('Do ' + nlp_fill.tokenizer.mask_token + ' every day and you will ')
result
# + id="rVsQsOb7g28y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 261} outputId="3edfcf2a-f53a-4798-d19e-1728646c757d"
result = nlp_fill('Do it every day and you will ' + nlp_fill.tokenizer.mask_token + '!')
result
# + id="WeBPWy_OfYb_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 261} outputId="bbc3a08c-1017-4942-ae8b-16945022e0a1"
result = nlp_fill('Do yoga every day and you will ' + nlp_fill.tokenizer.mask_token + '!')
result
# + id="3euAUYSBfsX9" colab_type="code" colab={}
# + id="QWKh9Y2lgHib" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 395} outputId="26417927-4a27-4a0a-c5c9-5095fca7031a"
# !pip install pytorch_pretrained_bert
# + id="QReOao5DgHlE" colab_type="code" colab={}
import torch
from pytorch_pretrained_bert import BertTokenizer, BertForMaskedLM
# + id="L5w6IF-JgIFx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="05d20028-b02a-4ca5-a1a6-9f5c9b42a0bc"
text = (
"Do _ every day and you will _ !"
)
# Load pre-trained model with masked language model head
bert_version = 'bert-large-uncased'
model = BertForMaskedLM.from_pretrained(bert_version)
# Preprocess text
tokenizer = BertTokenizer.from_pretrained(bert_version)
tokenized_text = tokenizer.tokenize(text)
mask_positions = []
for i in range(len(tokenized_text)):
if tokenized_text[i] == '_':
tokenized_text[i] = '[MASK]'
mask_positions.append(i)
# Predict missing words from left to right
model.eval()
for mask_pos in mask_positions:
# Convert tokens to vocab indices
token_ids = tokenizer.convert_tokens_to_ids(tokenized_text)
tokens_tensor = torch.tensor([token_ids])
# Call BERT to predict token at this position
predictions = model(tokens_tensor)[0, mask_pos]
predicted_index = torch.argmax(predictions).item()
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
# Update text
tokenized_text[mask_pos] = predicted_token
for mask_pos in mask_positions:
tokenized_text[mask_pos] = "_" + tokenized_text[mask_pos] + "_"
print(' '.join(tokenized_text).replace(' ##', ''))
# + id="cWqj7IV2gW7A" colab_type="code" colab={}
| 6.Transformers_Fill_Mask_Pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import dill
import numpy as np
import matplotlib.pyplot as plt
data = dill.load(open('pa', 'rb'))
# +
ind = np.arange(len(data))
lin = list(map(lambda o: o[0], data))
lin_adopters = np.asarray(list(map(lambda o: o[0], lin)))
lin_seeds = np.asarray(list(map(lambda o: o[1], lin)))
lin_rounds = np.asarray(list(map(lambda o: o[2], lin)))
lin_adopters_mean = np.mean(lin_adopters)
lin_seeds_mean = np.mean(lin_seeds)
lin_rounds_mean = np.mean(lin_rounds)
print(lin_adopters_mean)
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.bar(ind, lin_adopters, width, color='g', alpha=0.4)
ax1.plot([0, 100], [lin_adopters_mean, lin_adopters_mean], 'k-', lw=2)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,8000)
ax1.set_ylabel('Adopters')
ax1.set_title("Lin's Index")
plt.show()
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.bar(ind, lin_seeds, width, color='r', alpha=0.4)
ax1.plot([0, 100], [lin_seeds_mean, lin_seeds_mean], 'k-', lw=2)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,105)
ax1.set_ylabel('Seeds')
plt.show()
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [lin_rounds_mean, lin_rounds_mean], 'k-', lw=2)
ax1.bar(ind, lin_rounds, width, color='y', alpha=0.4)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,10)
ax1.set_ylabel('Rounds')
plt.show()
# +
eigen = list(map(lambda o: o[1], data))
eigen_adopters = np.asarray(list(map(lambda o: o[0], eigen)))
eigen_seeds = np.asarray(list(map(lambda o: o[1], eigen)))
eigen_rounds = np.asarray(list(map(lambda o: o[2], eigen)))
eigen_adopters_mean = np.mean(eigen_adopters)
eigen_seeds_mean = np.mean(lin_seeds)
eigen_rounds_mean = np.mean(lin_rounds)
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.bar(ind, eigen_adopters, width, color='g', alpha=0.4)
ax1.plot([0, 100], [eigen_adopters_mean, eigen_adopters_mean], 'k-', lw=2)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,8000)
ax1.set_ylabel('Adopters')
ax1.set_title("Eigenvectors")
plt.show()
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [eigen_seeds_mean, eigen_seeds_mean], 'k-', lw=2)
ax1.bar(ind, eigen_seeds, width, color='r', alpha=0.4)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,105)
ax1.set_ylabel('Seeds')
plt.show()
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [eigen_rounds_mean, eigen_rounds_mean], 'k-', lw=2)
ax1.bar(ind, eigen_rounds, width, color='y', alpha=0.4)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,10)
ax1.set_ylabel('Rounds')
plt.show()
# +
betweeness = list(map(lambda o: o[2], data))
betweeness_adopters = np.asarray(list(map(lambda o: o[0], betweeness)))
betweeness_seeds = np.asarray(list(map(lambda o: o[1], betweeness)))
betweeness_rounds = np.asarray(list(map(lambda o: o[2], betweeness)))
betweeness_adopters_mean = np.mean(betweeness_adopters)
betweeness_seeds_mean = np.mean(betweeness_seeds)
betweeness_rounds_mean = np.mean(betweeness_rounds)
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [betweeness_adopters_mean, betweeness_adopters_mean], 'k-', lw=2)
ax1.bar(ind, betweeness_adopters, width, color='g', alpha=0.4)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,8000)
ax1.set_ylabel('Adopters')
ax1.set_title("Betweeness")
plt.show()
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [betweeness_seeds_mean, betweeness_seeds_mean], 'k-', lw=2)
ax1.bar(ind, betweeness_seeds, width, color='r', alpha=0.4)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,105)
ax1.set_ylabel('Seeds')
plt.show()
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [betweeness_rounds_mean, betweeness_rounds_mean], 'k-', lw=2)
ax1.bar(ind, betweeness_rounds, width, color='y', alpha=0.4)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,10)
ax1.set_ylabel('Rounds')
plt.show()
# +
info = list(map(lambda o: o[3], data))
edges = np.asarray(list(map(lambda o: o[0], info)))
edges_mean = np.mean(edges)
average_clustering = np.asarray(list(map(lambda o: o[1], info)))
average_clustering_mean = np.mean(average_clustering)
timings = np.asarray(list(map(lambda o: o[2], info)))
timings_mean = np.mean(timings)
d = np.asarray(list(map(lambda o: o[4], info)))
d_mean = np.mean(d)
p = np.asarray(list(map(lambda o: o[3], info)))
p_mean = np.mean(p)
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [edges_mean, edges_mean], 'k-', lw=2)
ax1.bar(ind, edges, width, color='g', alpha=0.4)
ax1.bar(101, 103689, width, color='r')
# axes and labels
ax1.set_xlim(-width,len(ind)+width + 3)
ax1.set_ylim(0,125000)
ax1.set_ylabel('Edges')
ax1.set_title("Edges")
plt.show()
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [average_clustering_mean, average_clustering_mean], 'k-', lw=2)
ax1.bar(ind, average_clustering, width, color='r', alpha=0.4)
ax1.bar(101, 0.140898, width, color='b')
# axes and labels
ax1.set_xlim(-width,len(ind)+width+3)
ax1.set_ylim(0,0.15)
ax1.set_ylabel('Avg Clustering')
plt.show()
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [timings_mean, timings_mean], 'k-', lw=2)
ax1.bar(ind, timings, width, color='y', alpha=0.4)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,max(timings))
ax1.set_ylabel('Timings')
plt.show()
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [d_mean, d_mean], 'k-', lw=2)
ax1.bar(ind, d, width, color='y', alpha=0.4)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,max(d))
ax1.set_ylabel('d times')
plt.show()
fig = plt.figure(figsize = (12,9))
ax1 = fig.add_subplot(111)
width = 0.35
ax1.plot([0, 100], [p_mean, p_mean], 'k-', lw=2)
ax1.bar(ind, p, width, color='y', alpha=0.4)
# axes and labels
ax1.set_xlim(-width,len(ind)+width)
ax1.set_ylim(0,max(p))
ax1.set_ylabel('prob')
plt.show()
# +
wv_lin = (5841,100,4)
wv_eigen = (5875,90,4)
wv_bet = (5783,100,4)
wv_data = dill.load(open('ww', 'rb'))
wv_lin = list(map(lambda o: o[0], wv_data))
wv_lin_adopters = np.asarray(list(map(lambda o: o[0], wv_lin)))
wv_lin_seeds = np.asarray(list(map(lambda o: o[1], wv_lin)))
wv_lin_rounds = np.asarray(list(map(lambda o: o[2], wv_lin)))
wv_lin_adopters_mean = np.mean(wv_lin_adopters)
wv_lin_seeds_mean = np.mean(wv_lin_seeds)
wv_lin_rounds_mean = np.mean(wv_lin_rounds)
wv_lin_adopters_std = np.std(wv_lin_adopters)
wv_lin_seeds_std = np.std(wv_lin_seeds)
wv_lin_rounds_std = np.std(wv_lin_rounds)
wv_eigen = list(map(lambda o: o[1], wv_data))
wv_eigen_adopters = np.asarray(list(map(lambda o: o[0], wv_eigen)))
wv_eigen_seeds = np.asarray(list(map(lambda o: o[1], wv_eigen)))
wv_eigen_rounds = np.asarray(list(map(lambda o: o[2], wv_eigen)))
wv_eigen_adopters_mean = np.mean(wv_eigen_adopters)
wv_eigen_seeds_mean = np.mean(wv_eigen_seeds)
wv_eigen_rounds_mean = np.mean(wv_eigen_rounds)
wv_eigen_adopters_std = np.std(wv_eigen_adopters)
wv_eigen_seeds_std = np.std(wv_eigen_seeds)
wv_eigen_rounds_std = np.std(wv_eigen_rounds)
wv_bet = list(map(lambda o: o[2], wv_data))
wv_bet_adopters = np.asarray(list(map(lambda o: o[0], wv_bet)))
wv_bet_seeds = np.asarray(list(map(lambda o: o[1], wv_bet)))
wv_bet_rounds = np.asarray(list(map(lambda o: o[2], wv_bet)))
wv_bet_adopters_mean = np.mean(wv_bet_adopters)
wv_bet_seeds_mean = np.mean(wv_bet_seeds)
wv_bet_rounds_mean = np.mean(wv_bet_rounds)
wv_bet_adopters_std = np.std(wv_bet_adopters)
wv_bet_seeds_std = np.std(wv_bet_seeds)
wv_bet_rounds_std = np.std(wv_bet_rounds)
lin_adopters_std = np.std(lin_adopters)
lin_seeds_std = np.std(lin_seeds)
lin_rounds_std = np.std(lin_rounds)
betweeness_adopters_std = np.std(betweeness_adopters)
betweeness_seeds_std = np.std(betweeness_seeds)
betweeness_rounds_std = np.std(betweeness_rounds)
eigen_adopters_std = np.std(eigen_adopters)
eigen_seeds_std = np.std(eigen_seeds)
eigen_rounds_std = np.std(eigen_rounds)
# +
fig = plt.figure(figsize = (9,6))
ax1 = fig.add_subplot(111)
width = 0.35
r1=ax1.bar(0, wv_lin_adopters_mean, width, color='g', alpha=0.4, yerr=wv_lin_adopters_std)
r2=ax1.bar(0.40, lin_adopters_mean, width, color='r', alpha=0.4, yerr=lin_adopters_std)
ax1.bar(1, wv_eigen_adopters_mean, width, color='g', alpha=0.4, yerr=wv_eigen_adopters_std)
ax1.bar(1.40, eigen_adopters_mean, width, color='r', alpha=0.4, yerr=eigen_adopters_std)
ax1.bar(2, wv_bet_adopters_mean, width, color='g', alpha=0.4, yerr=wv_bet_adopters_std)
ax1.bar(2.40, betweeness_adopters_mean, width, color='r', alpha=0.4, yerr=betweeness_adopters_std)
# axes and labels
ax1.set_xlim(-width,4)
ax1.set_ylim(0,8000)
ax1.set_xticklabels(('LIN', 'EIGEN', 'BET'))
ax1.set_ylabel('Adopters')
ax1.set_title("Comparisons")
ax1.set_xticks((0.4,1.4,2.4))
ax1.legend((r1[0], r2[0]), ('Wiki', 'PA'))
plt.show()
fig = plt.figure(figsize = (9,6))
ax1 = fig.add_subplot(111)
width = 0.35
r1=ax1.bar(0, wv_lin_seeds_mean, width, color='g', alpha=0.4, yerr=wv_lin_seeds_std)
r2=ax1.bar(0.40, lin_seeds_mean, width, color='r', alpha=0.4, yerr=lin_seeds_std)
ax1.bar(1, wv_eigen_seeds_mean, width, color='g', alpha=0.4, yerr=wv_eigen_seeds_std)
ax1.bar(1.40, eigen_seeds_mean, width, color='r', alpha=0.4, yerr=eigen_seeds_std)
ax1.bar(2, wv_bet_seeds_mean, width, color='g', alpha=0.4, yerr=wv_bet_seeds_std)
ax1.bar(2.40, betweeness_seeds_mean, width, color='r', alpha=0.4, yerr=betweeness_seeds_std)
# axes and labels
ax1.set_xlim(-width,4)
ax1.set_ylim(0,110)
ax1.set_xticklabels(('LIN', 'EIGEN', 'BET'))
ax1.set_ylabel('seeds')
ax1.set_xticks((0.4,1.4,2.4))
ax1.legend((r1[0], r2[0]), ('Wiki', 'PA'))
plt.show()
fig = plt.figure(figsize = (9,6))
ax1 = fig.add_subplot(111)
width = 0.35
r1=ax1.bar(0, wv_lin_rounds_mean, width, color='g', alpha=0.4, yerr=wv_lin_rounds_std)
r2=ax1.bar(0.40, lin_rounds_mean, width, color='r', alpha=0.4, yerr=lin_rounds_std)
ax1.bar(1, wv_eigen_rounds_mean, width, color='g', alpha=0.4, yerr=wv_eigen_rounds_std)
ax1.bar(1.40, eigen_rounds_mean, width, color='r', alpha=0.4, yerr=eigen_rounds_std)
ax1.bar(2, wv_bet_rounds_mean, width, color='g', alpha=0.4, yerr=wv_bet_rounds_std)
ax1.bar(2.40, betweeness_rounds_mean, width, color='r', alpha=0.4, yerr=betweeness_rounds_std)
# axes and labels
ax1.set_xlim(-width,4)
ax1.set_ylim(0,10)
ax1.set_xticklabels(('LIN', 'EIGEN', 'BET'))
ax1.set_ylabel('Rounds')
ax1.set_xticks((0.4,1.4,2.4))
ax1.legend((r1[0], r2[0]), ('Wiki', 'PA'))
plt.show()
# -
| PreferentialAttachment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simple example of time domain beamforming
# Let's imagine that we have one or more idealised point sound sources with spherical spreading impinging upon an n-element linear microphone array.
#
# The situation is depicted in the figure below.
#
# 
#
# Note that there are three different frames of reference here.
#
# There is the absolute frame of reference where North is $0^o$.
# There is the frame of reference relative to the forehead end-fire of the linear array.
# For both these frames of reference, the origin is the centre of the array, and bearings
# are positive in the clockwise direction, and negative in the anti-clockwise direction.
# We use θ to denote the angle of rotation of the array, relative to absolute North.
# There is also the standard cartesian coordinate system.
# The bearing, $\Omega$, of a point given in cartesian coordinates is
# $$ \Omega = atan2(x,y) $$
# (Note the order of the elements)
#
# A relative bearing, $\phi$, is given by $\Omega - \theta$.
#
# Note that we are using point sources, not the plane-wave approximation.
#
# We have used the following helpful links in developing the code for this example:
#
# * [Time delay estimation for passive sonar signal processing](https://ieeexplore.ieee.org/document/1163560)
# * [Jupyter User guide](https://docs.bokeh.org/en/latest/docs/user_guide/jupyter.html)
# * https://en.wikipedia.org/wiki/Wavelength
# * [Equations for Plane Waves, Spherical Waves, and Gaussian Beams](https://onlinelibrary.wiley.com/doi/pdf/10.1002/9781118939154.app3)
# * https://github.com/bokeh/bokeh/blob/1.3.2/examples/howto/layouts/dashboard.py
# * [Beamforming in the Time Domain](https://onlinelibrary.wiley.com/doi/10.1002/9781119293132.ch11)
#
# For a linear array, a delay of 0 implies a beam-steer direction of broadside (perpendicular
# to the array).
#
# A delay equivalent to the spacing between two adjacent elements divided by the velocity of
# the soundwave implies a beam steering direction of either endfire (parallel to the array),
# where the endfire (front or back) is determined by the sign of the delay
#
# More generally, a delay of $\delta = \cos\phi \frac{d}{c}$ corresponds to a relative beam-steer at
# an angle $\phi$ for $\phi \in [0^o, 180^o]$.
#
# Given that we are dealing with discrete time signals, the delay cannot be chosen arbitrarily.
#
# Instead, for sample rate $S$, we have
#
# $$ \cos\phi = \frac{c}{d} \delta = \frac{c}{d} \frac{m}{S} $$
#
# where
#
# $$ -\frac{Sd}{c} < m < \frac{Sd}{c} $$
#
# Note that a linear array cannot differentiate between a negative and a positive relative bearing (left-right ambiguity).
#
# ## Import modules
# +
from ipywidgets import interactive, fixed, GridspecLayout, Layout
from IPython.display import display
import numpy as np
import seaborn as sns
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
from bokeh.models import Label, Span, Legend, LegendItem, Range1d, LinearColorMapper, ColorBar
from bokeh.layouts import grid, column, gridplot
from bokeh.models import CustomJS, Slider, ColumnDataSource
import soundfile
from os import path
output_notebook()
# -
# ## Set up parameters
# In order to create interactive Bokeh widgets, it is necessary to separate the plotting and the
# calculating code, so that an update function can be written, which simple updates the data in
# the plots.
#
# See:
# * https://github.com/bokeh/bokeh/blob/2.3.0/examples/howto/notebook_comms/Jupyter%20Interactors.ipynb
#
# We create a class whose single instance will hold all the information regarding this example.
# +
class Info(object):
'''Holder for global information'''
def __repr__(self):
''' Print a list of the attributes of an instance of the class'''
print_str = '['
for attr, value in self.__dict__.items():
print_str += f"'{attr}',"
print_str = print_str[:-1] + ']'
return print_str
def __str__(self):
''' Print a list of the attributes and their values for an
instance of the class'''
print_str = ''
for attr, value in self.__dict__.items():
print_str += f' {attr} : {value}\n'
return print_str
info = Info()
def bearing2cart(r,theta):
# convert absolute bearing to polar coordinates
return (r*np.sin(np.deg2rad(theta)), r*np.cos(np.deg2rad(theta)))
def cart2bearing(x,y):
return ( np.hypot(x,y), np.rad2deg(np.atan2(x,y)) )
# +
def calculate_everything(X,# Instance of the `Info` class
# Theoretical parameters we wish to vary:
num_elements=2,
array_angle=0,
num_sources=1,
src_radius=24,
frequency=625,
sample_length=0.02,
t_0=0,
# Experimental parameters we wish to vary:
trial_type='sweep',
run=2,
# Debug parameters:
verbose=False,
):
'''This function calculates the temporal and spacial solution
for a simple (idealistic) sound-field when we have a number of
sound sources and a linear array of receivers.
It also, optionally incorporates experimental data.
There is much work to be done to incorporate the experimental data
properly. Issues at the moment include:
* File names and directories are hardcoded
* Sample rates for theory and experiment must match
'''
#TODO: make src_radius, frequency, src_angle, src_amp into lists of
# length num_sources
# pass
#if True:
#-------------------------------------------------------------------
# Array characteristics
#-------------------------------------------------------------------
X.max_elements = 12
X.array_angle = array_angle
X.spacing = 0.08575 # metres
X.x_location = 0 # metres
Arr = []
X.array_length = X.spacing*(num_elements-1)
for k in range(num_elements):
x_pos = (X.array_length/2 -
k*X.spacing)*np.sin(np.deg2rad(array_angle))
y_pos = (X.array_length/2 -
k*X.spacing)*np.cos(np.deg2rad(array_angle))
Arr.append([x_pos,y_pos])
#Arr = np.array(Arr)
X.Arr = Arr
X.col = sns.color_palette(None, 2*X.max_elements).as_hex()
#-------------------------------------------------------------------
# Source characteristics
#-------------------------------------------------------------------
amp_src = 1
delta_angle = 90
Src = {}
Src['position'] = []
Src['absolute_direction'] = []
Src['amplitude'] = []
Src['frequency'] = []
for n in range(num_sources):
src_angle = (n+1)*delta_angle
x, y = bearing2cart(src_radius, src_angle)
Src['position'].append([x,y]) # Source position
Src['absolute_direction'].append(src_angle)
Src['amplitude'].append(amp_src) # Source amplitude
Src['frequency'].append(frequency) # Source wavelength
X.Src = Src
π = np.pi
c = 340 # m/s (Speed of sound)
X.c = c
#-------------------------------------------------------------------
# Spacial characteristics
#-------------------------------------------------------------------
N = 500
X.x_min = -1
X.x_max = 1
x = np.linspace(X.x_min, X.x_max, N)
y = np.linspace(X.x_min, X.x_max, N)
xx, yy = np.meshgrid(x, y)
#-------------------------------------------------------------------
# Temporal characteristics
#-------------------------------------------------------------------
X.sample_rate = 96000
X.sample_length = sample_length
# Discrete beam-steer deltas directions
m = np.arange(int(-np.floor(X.spacing*X.sample_rate/c)),
int(np.ceil(X.spacing*X.sample_rate/c)),1)
# These deltas are the same for (-180,0) and (0,180), so for
# convenience, we duplicate this list of discrete directions
X.m = np.concatenate([m,np.flip(m)])
cos_phi = c/X.spacing*X.m/X.sample_rate
X.phi = np.rad2deg(np.arccos(cos_phi))
# We need to change the sign of the first half of the phi array
X.phi[:len(X.phi)//2] = - X.phi[:len(X.phi)//2]
X.beam_widths = np.stack([
np.concatenate((-180,X.phi[:-1] + np.diff(X.phi)/2), axis=None),
np.concatenate((X.phi[:-1] + np.diff(X.phi)/2,180), axis=None)
])
# We need to get enough time samples, so that we can shift
# by num_elements * (m[-1] - m[0]) without wrapping around
t = np.arange(t_0 + m[0]*num_elements/X.sample_rate,
t_0 + m[-1]*num_elements/X.sample_rate + sample_length,
1/X.sample_rate)
X.t_0 = t_0
X.t = t
#-------------------------------------------------------------------
# Sound-pressure level over all space at time, t_0
#-------------------------------------------------------------------
Z = 0*xx
for n in range(num_sources):
[s_x, s_y] = Src['position'][n] #location of source
A = Src['amplitude'][n]
f = Src['frequency'][n]
λ = c/f # m
rr = np.hypot(xx-s_x,yy-s_y)
Z = Z + A/rr*np.sin(2*π/λ*(rr - c*t_0))
if verbose:
print(f'Source {n+1}: A = {A}, ',
f'f = {f} Hz, λ = {λ:0.2f} m , T = {1/f:0.2e} s')
X.Z = Z
#-------------------------------------------------------------------
# Sound-pressure level at each array element over all time, t
# for real wav data
#-------------------------------------------------------------------
if num_elements == 2:
if array_angle < 0:
aa = array_angle + 360
else:
aa = array_angle
if trial_type == 'sweep':
folder = 'Pi4_Sweeps_Data_12042021/'
#folder = f'Anechoic Chamber Data 1/Sweep Test {run}/'
fname = f'Pi4sweepTest_2_{aa:03d}_500_3000_5.2_trimmed.wav'
wav_file = 'Data/' + folder + fname
real_data = True
elif trial_type == 'burst':
folder = 'Pi4_BurstSweeps_Data_12042021/'
folder = f'Anechoic Chamber Data 1/Sweep Test {run}/'
fname = f'Pi4sweepBurstTest_2_{aa:03d}_500_3000_10.2_trimmed.wav'
wav_file = 'Data/' + folder + fname
real_data = True
else:
real_data = False
else:
real_data = False
if real_data and path.isfile(wav_file):
if verbose:
print(wav_file)
raw_wav, sr = soundfile.read(wav_file)
if sr != X.sample_rate:
print(f'ERROR: theoretical sample rate ({X.sample_rate}) '
f'does not equal experimental sample rate ({sr})')
if trial_type == 'sweep':
offset = int(0*sr)
start = int(5/(3000-500)*(frequency-500)*sr) + offset
elif trial_type == 'burst':
offset = int(0.2*sr)
start = int(0.45*(frequency-500)/125*sr) + offset
stop = start + len(info.t)
fun1 = raw_wav[start:stop,0]
fun2 = raw_wav[start:stop,1]
W = [fun1 - np.mean(fun1),fun2 - np.mean(fun2)]
else:
if verbose:
print("No experimental data")
W = []
for k in range(num_elements):
W.append(0*t + 1)
wav_file = 'NA'
X.real_data = real_data
X.W = W
X.wav_file = wav_file
#-------------------------------------------------------------------
# Sound-pressure level at each array element over all time, t
#-------------------------------------------------------------------
Y = []
for k in range(num_elements):
F = 0*t
for n in range(num_sources):
[s_x, s_y] = Src['position'][n] #location of source
A = Src['amplitude'][n]
λ = c/Src['frequency'][n] # metres
r = np.hypot(Arr[k][0]-s_x,Arr[k][1]-s_y)
F = F + A/r*np.sin(2*π/λ*(r - c*t))
Y.append(F)
X.Y = Y
#-------------------------------------------------------------------
# Time-domain beamforming
#-------------------------------------------------------------------
a = np.argmin(np.abs(t-t_0))
b = np.argmin(np.abs(t-t_0-sample_length))
# We create a delay-sum time series for each look direction by
# shifting the time-series for each element by an amount, m*k,
# padding with zeros
beams = []
beams_real = []
sum_F = []
sum_F_real = []
for j in X.m:
current_beam = 0*Y[0]
current_beam_real = 0*Y[0]
for k in range(num_elements):
current_beam = (current_beam + pad(Y[k],-j*k))
current_beam_real = (current_beam_real + pad(W[k],-j*k))
current_beam = current_beam/num_elements
current_beam_real = current_beam_real/num_elements
sum_F.append(current_beam)
sum_F_real.append(current_beam_real)
# We only sum the section of the beam which doesn't contain
# any padding
beams.append(np.sum(np.abs(current_beam[a:b]))/(b-a))
beams_real.append(np.sum(np.abs(current_beam_real[a:b]))/(b-a))
X.sum_F = sum_F
X.beams = beams
X.sum_F_real = sum_F_real
X.beams_real = beams_real
# Calculating area difference between normalised theoretical and real beams
X.beam_diff = np.sum(np.abs(beams/np.max(beams) -
beams_real/np.max(beams_real) ))
# Calculating Root Mean Square Error between normalised theoretical and real beams
RMSE = np.sqrt(np.mean(np.square(np.abs(beams/np.max(beams) - beams_real/np.max(beams_real)))))
X.RMSE = RMSE*100
#print(X.RMSE, "%")
#-------------------------------------------------------------------
# Peak angle difference
#-------------------------------------------------------------------
theoretical_peak_angle = 90 - X.array_angle
real_peak_index = np.argmax(X.beams_real)
real_peak_angle = -1*X.phi[real_peak_index]
if 0 < theoretical_peak_angle < 179:
angle_diff = abs(theoretical_peak_angle - real_peak_angle)
elif 179 <= theoretical_peak_angle <= 270:
theoretical_peak_angle = abs(theoretical_peak_angle)
angle_diff = abs(360 - abs(-1*(theoretical_peak_angle) - real_peak_angle))
else:
theoretical_peak_angle = abs(theoretical_peak_angle)
angle_diff = abs(theoretical_peak_angle - real_peak_angle)
X.angle_difference = round(angle_diff,2)
return X
def pad(F,n):
'''Return an array of the same size as F, where the elements of
F are shifted by n, padding with zeros.'''
if n == 0:
return F
if n > 0:
return np.concatenate([np.zeros(n), F[:-n]])
if n < 0:
return np.concatenate([F[-n:], np.zeros(-n)])
# -
info = calculate_everything(info,verbose=True)
# +
def plot_everything(D, look_direction=90, verbose=False):
normalise = True
# pass
#if True:
num_elements = len(D.Arr)
num_sources = len(D.Src['absolute_direction'])
li_left = np.argmin(np.abs(D.phi+look_direction))
bs_left = D.phi[li_left] # beam steer
li_right = np.argmin(np.abs(D.phi-look_direction))
bs_right = D.phi[li_right] # beam steer
if verbose:
print(f'Look directions : {-look_direction}, {look_direction}')
print(f'Look index : {li_left}, {li_right}')
print(f'beam-steer : {bs_left:0.2f}, {bs_right:0.2f}')
print(f'beam-widths : {D.beam_widths[:,li_left]}, {D.beam_widths[:,li_right]}')
title_str = (f'Look directions : ({look_direction}°,' +
f'{-look_direction}°), ' +
f'Array angle : {D.array_angle}°, ' +
f'f : {D.Src["frequency"][0]:0.2f} Hz, ' +
f'λ : {D.c/D.Src["frequency"][0]:0.2f} m, ' +
f'Peak difference : {D.angle_difference}°'
)
#-------------------------------------------------------------------
# Spacial Window
#-------------------------------------------------------------------
p = figure(tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")],
title=title_str)
p.title.text_font_size = '8pt'
p.xaxis.axis_label = p.yaxis.axis_label = 'Distance (m)'
p.x_range.range_padding = p.y_range.range_padding = 0
# Plot the sound field
D.spacial = p.image(image=[D.Z],
x=D.x_min, y=D.x_min,
dw=(D.x_max-D.x_min), dh=(D.x_max-D.x_min),
palette="Spectral11", level="image")
# Add the look-directions to the plot
ang = np.deg2rad(-look_direction + D.array_angle)
x = [0,2*np.sin(ang)]
y = [0,2*np.cos(ang)]
D.look_beam_port = p.line(x,y,color='red',
legend_label='Port Look Direction',line_width=2)
ang = np.deg2rad(look_direction + D.array_angle)
x = [0,2*np.sin(ang)]
y = [0,2*np.cos(ang)]
D.look_beam_starboard = p.line(x,y,color='green',
legend_label='Starboard Look Direction', line_width=2)
# Add the beamwidths to the plot
ang1 = np.deg2rad(D.beam_widths[0,li_left] + D.array_angle)
ang2 = np.deg2rad(D.beam_widths[1,li_left] + D.array_angle)
x = [0,2*np.sin(ang1),2*np.sin(ang2)]
y = [0,2*np.cos(ang1),2*np.cos(ang2)]
D.beam_shade_port = p.patch(x,y,color='red',alpha=0.4,
legend_label='Port Look Direction',
line_width=0)
ang1 = np.deg2rad(D.beam_widths[0,li_right] + D.array_angle)
ang2 = np.deg2rad(D.beam_widths[1,li_right] + D.array_angle)
x = [0,2*np.sin(ang1),2*np.sin(ang2)]
y = [0,2*np.cos(ang1),2*np.cos(ang2)]
D.beam_shade_starboard = p.patch(x,y,color='green',alpha=0.4,
legend_label='Starboard Look Direction',line_width=0)
# Add the source directions to the plot
D.source_line = []
for n in range(num_sources):
D.source_line.append(p.line([0, D.Src['position'][n][0]],
[0,D.Src['position'][n][1]],color='black',
legend_label='Source direction(s)', line_width=2))
# Add the sensors to the plot
D.sensor = []
for k in range(D.max_elements):
D.sensor.append(p.circle([],[],size=10,fill_color=D.col[k],
legend_label=f'Sensor {k}',
))
for k in range(D.max_elements):
if k < num_elements:
D.sensor[k].visible = True
x = [D.Arr[k][0]]
y = [D.Arr[k][1]]
D.sensor[k].data_source.data = {'x': x, 'y': y}
else:
D.sensor[k].visible = False
D.spacial_legend = p.legend.items
p.legend.items = D.spacial_legend[:3+num_elements]
p.grid.grid_line_width = 0.5
p.legend.click_policy="hide"
p.legend.background_fill_alpha = 0.5
p.x_range.start = D.x_min
p.x_range.end = D.x_max
p.y_range.start = D.x_min
p.y_range.end = D.x_max
D.p = p
#-------------------------------------------------------------------
# Time Series Window
#-------------------------------------------------------------------
q = figure(title=title_str)
q.title.text_font_size = '8pt'
q.xaxis.axis_label = 'Time (s)'
q.yaxis.axis_label = 'Normalised Pressure'
x = D.t
# Plot the delay-sum for the look direction
y = D.sum_F[li_right]
if normalise:
y = y/np.max(y)
D.delay_sum = q.line(x,y,color='black',
legend_label='Theoretical Delay-sum')
# Add the time series to the plot
D.time_series = []
for k in range(D.max_elements):
D.time_series.append(q.line([],[],color=D.col[2*k],
legend_label=f'Sensor {k}',
))
# Plot the delay-sum for the look direction
y = D.sum_F_real[li_right]
if normalise:
y = y/np.max(y)
D.delay_sum_real = q.line(x,y,color='cyan',
legend_label='Real Delay-sum')
# Add the time series to the plot
D.time_series_real = []
for k in range(D.max_elements):
D.time_series_real.append(q.line([],[],color=D.col[2*k+1],
legend_label=f'Microphone {k}',
))
# plot the time series for each element
plot_max = 0
for k in range(D.max_elements):
if k < num_elements:
D.time_series[k].visible = True
y = pad(D.Y[k],-k*D.m[li_right])
if normalise:
y = y/np.max(y)
D.time_series[k].data_source.data = {'x': x, 'y': y}
plot_max = np.max([plot_max,np.max(D.Y[k])])
if D.real_data:
D.time_series_real[k].visible = True
y = pad(D.W[k],-k*D.m[li_right])
if normalise:
y = y/np.max(y)
D.time_series_real[k].data_source.data = {'x': x, 'y': y}
plot_max = np.max([plot_max,np.max(D.Y[k])])
else:
D.time_series_real[k].visible = False
else:
D.time_series[k].visible = False
# Add a shading for the region used to calculate the beams
x = [D.t_0, D.t_0,D.t_0+D.sample_length,D.t_0+D.sample_length]
if normalise:
plot_max = 1
y = plot_max*np.array([-1,1,1,-1])
D.sample_patch = q.patch(x,y,color='yellow',alpha=0.2,line_width=0)
D.temporal_legend = q.legend.items
if D.real_data:
q.legend.items = (D.temporal_legend[:1+num_elements] +
D.temporal_legend[1+D.max_elements:2+D.max_elements+num_elements])
D.delay_sum_real.visible = True
else:
q.legend.items = D.temporal_legend[:1+num_elements]
D.delay_sum_real.visible = False
q.legend.click_policy="hide"
q.legend.background_fill_alpha = 0.5
D.q = q
#-------------------------------------------------------------------
# Beamforming Window
#-------------------------------------------------------------------
title_str = (
f'Array angle: {D.array_angle}°, ' +
f'f: {D.Src["frequency"][0]:0.2f} Hz, ' +
f'Peak angle difference: {D.angle_difference}°'
)
b = figure(title=title_str)
b.title.text_font_size = '12pt'
b.xaxis.axis_label = 'Steering Angle (degrees)'
b.xaxis.axis_label_text_font_size = "12pt"
b.yaxis.axis_label = 'Normalised Pressure'
b.yaxis.axis_label_text_font_size = "12pt"
b.axis.axis_label_text_font_style = 'bold'
leg1 = 'Theoretical Beam Pattern'
leg2 = 'Real Beam Pattern'
x = D.phi
y = D.beams
if normalise:
y = y/np.max(y)
D.beampattern_points = b.circle(x,y, legend_label=leg1)
D.beampattern_line = b.line(x,y, legend_label=leg1)
D.look_line_port = Span(
location=-look_direction,dimension='height',
line_color='red', line_width=1)
D.look_line_starboard = Span(
location=look_direction,dimension='height',
line_color='green', line_width=1)
D.source_line = []
for n in range(num_sources):
D.source_line.append(Span(
location=D.Src['absolute_direction'][n]-D.array_angle,
dimension='height', line_color='black', line_width=2))
# b.line([[], []], legend_label='Port Look direction',
# line_color="red", line_width=1)
# b.line([[], []], legend_label='Starboard Look direction',
# line_color="green",line_width=1)
b.line([[], []], legend_label='Source direction(s)',
line_color="black", line_width=2)
# b.renderers.extend([D.look_line_port,D.look_line_starboard,
# *D.source_line])
b.renderers.extend([*D.source_line])
# Add the beamwidths to the plot
ang1 = D.beam_widths[0,li_left]
ang2 = D.beam_widths[1,li_left]
x = np.array([ang1, ang1 ,ang2, ang2])
y = 1*np.array([0, np.max(D.beams),
np.max(D.beams), 0])
# D.beampattern_shade_port = b.patch(x,y,color='red',alpha=0.4,
# legend_label='Port Look direction',line_width=0)
ang1 = D.beam_widths[0,li_right]
ang2 = D.beam_widths[1,li_right]
x = np.array([ang1, ang1 ,ang2, ang2])
y = 1*np.array([0, np.max(D.beams),
np.max(D.beams), 0])
# D.beampattern_shade_starboard = b.patch(x,y,color='green',alpha=0.4,
# legend_label='Starboard Look direction',line_width=1)
x = D.phi
y = D.beams_real
if normalise:
y = y/np.max(y)
D.beampattern_points_real = b.circle(x,y, legend_label=leg2,
color='cyan')
D.beampattern_line_real = b.line(x,y, legend_label=leg2,
color='cyan')
D.beamform_legend = b.legend.items
if D.real_data:
b.legend.items = D.beamform_legend
D.beampattern_points_real.visible = True
D.beampattern_line_real.visible = True
else:
b.legend.items = D.beamform_legend[:-1]
D.beampattern_points_real.visible = False
D.beampattern_line_real.visible = False
b.legend.background_fill_alpha = 0.5
D.b = b
return D
# -
info = plot_everything(info,verbose=True)
show(gridplot([info.p, info.q, info.b], ncols=3), notebook_handle=True)
def update(D,θ,look_direction,
num_elements,
Source_distance,frequency,sample_length,t_0,
trial_type,run):
num_sources = 1
D = calculate_everything(D,
num_elements=num_elements,
array_angle=θ,
num_sources=num_sources,
src_radius=Source_distance,
frequency=frequency,
sample_length=sample_length,
t_0=t_0,
trial_type=trial_type,
run=run,
)
li_left = np.argmin(np.abs(D.phi+look_direction))
bs_left = D.phi[li_left] # beam steer
li_right = np.argmin(np.abs(D.phi-look_direction))
bs_right = D.phi[li_right] # beam steer
title_str = (f'Look directions : ({look_direction}°,' +
f'{-look_direction}°), ' +
f'Array angle : {D.array_angle}°, ' +
f'f : {D.Src["frequency"][0]:0.2f} Hz, ' +
f'λ : {D.c/D.Src["frequency"][0]:0.2f} m, ' +
f'Peak difference : {D.angle_difference}°'
)
normalise = True
#-------------------------------------------------------------------
# Update the spacial window
#-------------------------------------------------------------------
D.spacial.data_source.data['image'] = [D.Z]
for k in range(D.max_elements):
if k < num_elements:
D.sensor[k].visible = True
x = [D.Arr[k][0]]
y = [D.Arr[k][1]]
D.sensor[k].data_source.data = {'x': x, 'y': y}
else:
D.sensor[k].visible = False
ang = np.deg2rad(-look_direction + D.array_angle)
x = [0,2*np.sin(ang)]
y = [0,2*np.cos(ang)]
D.look_beam_port.data_source.data = {'x': x, 'y': y}
ang = np.deg2rad(look_direction + D.array_angle)
x = [0,2*np.sin(ang)]
y = [0,2*np.cos(ang)]
D.look_beam_starboard.data_source.data = {'x': x, 'y': y}
# Update the beamwidths to the plot
ang1 = np.deg2rad(D.beam_widths[0,li_left] + D.array_angle)
ang2 = np.deg2rad(D.beam_widths[1,li_left] + D.array_angle)
x = [0,2*np.sin(ang1),2*np.sin(ang2)]
y = [0,2*np.cos(ang1),2*np.cos(ang2)]
D.beam_shade_port.data_source.data = {'x': x, 'y': y}
ang1 = np.deg2rad(D.beam_widths[0,li_right] + D.array_angle)
ang2 = np.deg2rad(D.beam_widths[1,li_right] + D.array_angle)
x = [0,2*np.sin(ang1),2*np.sin(ang2)]
y = [0,2*np.cos(ang1),2*np.cos(ang2)]
D.beam_shade_starboard.data_source.data = {'x': x, 'y': y}
D.p.title.text = title_str
D.p.legend.items = D.spacial_legend[:3+num_elements]
#-------------------------------------------------------------------
# Update the temporal window
#-------------------------------------------------------------------
plot_max = 0
x = D.t
for k in range(D.max_elements):
if k < num_elements:
D.time_series[k].visible = True
if D.real_data:
D.time_series_real[k].visible = True
else:
D.time_series_real[k].visible = False
y = pad(D.Y[k],-D.m[li_right]*k)
if normalise:
y = y/np.max(y)
D.time_series[k].data_source.data = {'x': x, 'y': y}
plot_max = np.max([plot_max,np.max(D.Y[k])])
y = pad(D.W[k],-D.m[li_right]*k)
if normalise:
y = y/np.max(y)
D.time_series_real[k].data_source.data = {'x': x, 'y': y}
plot_max = np.max([plot_max,np.max(D.W[k])])
else:
D.time_series[k].visible = False
D.time_series_real[k].visible = False
y = D.sum_F[li_right]
if normalise:
y = y/np.max(y)
D.delay_sum.data_source.data = {'x': x,'y': y}
y = D.sum_F_real[li_right]
if normalise:
y = y/np.max(y)
D.delay_sum_real.data_source.data = {'x': x,'y': y}
# Add a shading for the region used to calculate the beams
x = [D.t_0, D.t_0,D.t_0+D.sample_length,D.t_0+D.sample_length]
if normalise:
plot_max = 1
y = plot_max*np.array([-1,1,1,-1])
D.sample_patch.data_source.data = {'x': x, 'y': y}
D.q.title.text = title_str
if D.real_data:
D.q.legend.items = (D.temporal_legend[:1+num_elements] +
D.temporal_legend[1+D.max_elements:2+D.max_elements+num_elements])
D.delay_sum_real.visible = True
else:
D.q.legend.items = D.temporal_legend[:1+num_elements]
D.delay_sum_real.visible = False
#-------------------------------------------------------------------
# Update the Beamforming window
#-------------------------------------------------------------------
title_str = (
f'Array angle: {D.array_angle}°, ' +
f'f: {D.Src["frequency"][0]:0.2f} Hz, ' +
f'Peak angle difference: {D.angle_difference}°'
)
x = D.phi
y = D.beams
if normalise:
y = y/np.max(y)
D.beampattern_points.data_source.data = {'x': x, 'y': y}
D.beampattern_line.data_source.data = {'x': x, 'y': y}
y = D.beams_real
if normalise:
y = y/np.max(y)
D.beampattern_points_real.data_source.data = {'x': x, 'y': y}
D.beampattern_line_real.data_source.data = {'x': x, 'y': y}
D.look_line_port.location = -look_direction
D.look_line_starboard.location = look_direction
# Add the beamwidths to the plot
ang1 = D.beam_widths[0,li_left]
ang2 = D.beam_widths[1,li_left]
x = np.array([ang1, ang1 ,ang2, ang2])
y = 1*np.array([0, np.max(D.beams),
np.max(D.beams), 0])
#D.beampattern_shade_port.data_source.data = {'x': x, 'y':y}
ang1 = D.beam_widths[0,li_right]
ang2 = D.beam_widths[1,li_right]
x = np.array([ang1, ang1 ,ang2, ang2])
y = 1*np.array([0, np.max(D.beams),
np.max(D.beams), 0])
#D.beampattern_shade_starboard.data_source.data = {'x': x, 'y':y}
for n in range(num_sources):
source_direction = (D.Src['absolute_direction'][n] - D.array_angle)
if source_direction > 180:
source_direction = source_direction - 360
if source_direction < -180:
source_direction = source_direction + 360
D.source_line[n].location = source_direction
D.b.title.text = title_str
if D.real_data:
D.b.legend.items = D.beamform_legend
D.beampattern_points_real.visible = True
D.beampattern_line_real.visible = True
else:
D.b.legend.items = D.beamform_legend[:-1]
D.beampattern_points_real.visible = False
D.beampattern_line_real.visible = False
push_notebook()
# + tags=[]
w = interactive(update, D=fixed(info),
θ=(-180, 180, 20),
look_direction = (1,179,1),
num_elements=(2,12,1),
Source_distance=(1,100),
#frequency=(500,3000,125),
frequency=(540,3000,0.5),
sample_length=(0.001,0.01,0.001),
t_0=(0,0.01,0.0001),
trial_type=['sweep','burst'],
run=['1','2','3'],
)
for q in w.children:
q.style = {'description_width': 'initial'}
grid = GridspecLayout(3, 4)
for i in range(3):
for j in range(4):
if i*4+j <= 9:
grid[i, j] = w.children[i*4+j]
# + tags=[]
show(gridplot([info.p, info.q, info.b], ncols=3), notebook_handle=True)
grid[1,0].value = 540
grid[0,2].value = 2
grid[0,3].value = 31
grid[1,1].value = 0.01
grid
# +
# Create an array to hold all the results - Area difference between normalised theoretical and real data beampattern
theta = np.arange(-180,200,20)
freq = np.arange(500,3000,10)
H = np.zeros([len(freq),len(theta)])
RMSE_total = []
# Populate the array
for j in range(len(theta)):
print(theta[j])
for k in range(len(freq)):
info = calculate_everything(info,
array_angle=theta[j],
frequency=freq[k],
verbose=False)
H[k,j] = info.beam_diff
RMSE_total.append(info.RMSE) # Creating array to hold RMSE for each combination of array angle and frequency
# -
RMSE_total = np.mean(RMSE_total) # Overall dataset RMSE
print("{:.2f}".format(RMSE_total), "%") # Rounding to 2 decimal places
# +
# Display the array
title_str = "Normalised area difference between theoretical and real beampattern (nonanechoic)"
p = figure(tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")],
)
p.xaxis.axis_label = 'Angle (degrees)'
p.xaxis.axis_label_text_font_size = "12pt"
p.yaxis.axis_label = 'Frequency (Hz)'
p.yaxis.axis_label_text_font_size = "12pt"
p.axis.axis_label_text_font_style = 'bold'
p.x_range.range_padding = p.y_range.range_padding = 0
mapper = LinearColorMapper(palette="Spectral11", low=0, high=60)
colour_bar = ColorBar(color_mapper=mapper, location=(0,0))
Heatmap = p.image(image=[H],
x=-180, y=500,
dw=360, dh=2500,
palette="Spectral11", level="image")
p.add_layout(colour_bar,'right')
show(p)
# +
# Create an array to hold all the results - Peak angle difference between theoretical and real data beampattern
theta = np.arange(-180,200,20)
freq = np.arange(500,3000,10)
I = np.zeros([len(freq),len(theta)])
# Populate the array
for j in range(len(theta)):
print(theta[j])
for k in range(len(freq)):
info = calculate_everything(info,
array_angle=theta[j],
frequency=freq[k],
verbose=False)
I[k,j] = info.angle_difference
# +
# Display the array
title_str = "Angle difference between theoretical and real beampattern"
q = figure(tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")],
title=title_str)
q.xaxis.axis_label = 'Angle (degrees)'
q.yaxis.axis_label = 'Frequency (Hz)'
q.x_range.range_padding = p.y_range.range_padding = 0
mapper = LinearColorMapper(palette="Spectral11", low=np.amin(I), high=np.amax(I))
colour_bar = ColorBar(color_mapper=mapper, location=(0,0))
Heatmap = q.image(image=[I],
x=-180, y=500,
dw=360, dh=2500,
palette="Spectral11", level="image")
q.add_layout(colour_bar,'right')
show(q)
# -
| time_domain_beamforming.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Feeding data to a model
#
# - Queue runners
# - Dataset(TFRecord)
#
# ### File I/O using queue runners
#
# - Dataset Class는 Image read 기능을 직접적으로 제공하지 않기 때문에, 하위 레벨 API 인 `queue runner`를 이용한다.
# - queue runners 는 일종의 thread runner
# - Loading 과 Session이 병렬적으로 동작한다. $\rightarrow$ Loading Delay없이 연속적인 training이 가능하다.
# - Directory 내 모든 학습 데이터(eg. Images)들을 직접 load하여 train할 경우 유용!
#
#
# #### $\sharp$ 데이터를 학습 시키는 두 가지 방법
#
# 1 . Load $\rightarrow$ Preprocessing $\rightarrow$ train
# > raw data를 로드하여 전처리를 수행 한 후 학습
#
# 2 . Load $\rightarrow$ Preprocessing $\rightarrow$ write datasets $\rightarrow$ load dataset $\rightarrow$ train
# > raw data를 미리 전처리하여 파일로 쓰고, 전처리된 데이터를 로드하여 학습
#
# > - 전처리 라인 길면, 학습의 성능이 저하 될 수 있다.
# > - 일반적으로는 전처리 파이프라인을 별도로 운영하고, 전처리된 데이터를 활용하지만, Case by Case이다.
# > - 각 데이터 포맷 별 로 to tfrecord transformer를 구축하여 트레인 시스템 운영해보겠다.
#
#
# ### File I/O using Dataset(TFRecord)
#
# - Tensorflow에서 권장하는 record dataset
# - csv와 유사하게 Dataset 클래스로 쉽게 데이터를 로드 할 수 있다.
# - 실제로 file load는 한 번만 이루어지므로 효율적인 data read각 가능하다.
# - HDFS 활용 시 매우 효과적이다.
#
# ## Implementation
# +
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os
tf.reset_default_graph()
image_dir = "./cnn_dataset/images"
age_dir = './cnn_dataset/age.csv'
# Dataset 디렉토리에 있는 파일들의 리스트를 저장
imgname_list = [os.path.join(image_dir, name) for name in os.listdir(image_dir)]
imgname_queue = tf.train.string_input_producer(imgname_list, num_epochs=1, shuffle=False,)
agename_queue = tf.train.string_input_producer([age_dir], num_epochs=1,shuffle=False)
img_reader = tf.WholeFileReader()
age_reader = tf.TextLineReader()
# key_img:파일이름, values_img:데이터 return
key_img, raw_img = img_reader.read(imgname_queue)
key_age, raw_age = age_reader.read(agename_queue)
decoded_img = tf.image.decode_png(raw_img)
decoded_age = tf.decode_csv(raw_age, [[0]])
# gray image reshaping
mean_reduced_image = tf.reduce_mean(decoded_img, axis=-1)
reshaped_img = tf.reshape(mean_reduced_image, [-1])
# +
with tf.Session() as sess :
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
thread = tf.train.start_queue_runners(sess, coord)
# sess.run(tf.global_variables_initializer())
# tf.initialize_all_variables()
face_train_dir = './cnn_dataset/face_train.tfrecord'
face_test_dir = './cnn_dataset/face_test.tfrecord'
train_writer = tf.python_io.TFRecordWriter(face_train_dir)
test_writer = tf.python_io.TFRecordWriter(face_test_dir)
for i in range(99999999):
try :
_age, _img, _key = sess.run([decoded_age, reshaped_img, key_img])
example = tf.train.Example()
example.features.feature['age'].int64_list.value.append(_age[0])
example.features.feature['img'].int64_list.value.extend(_img.tolist())
if i < 6000:
train_writer.write(example.SerializeToString())
if i % 500 == 0:
print('{} train data has been written'.format(i))
else:
test_writer.write(example.SerializeToString())
if i % 500 == 0 :
print('{} test data has been written'.format(i))
except tf.errors.OutOfRangeError:
print('size of total dataset {}'.format(i))
break
train_writer.close()
test_writer.close()
coord.request_stop()
coord.join(thread)
# -
| 3.FileIO_TFRecord.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ml2labs]
# language: python
# name: conda-env-ml2labs-py
# ---
# ### Authors:
# <NAME> 11640758
#
# <NAME> 11636785
# + [markdown] deletable=false editable=false nbgrader={"checksum": "96a94c5280b3796c2e3bda8d652f9ee1", "grade": false, "grade_id": "cell-66ada6339bda77a6", "locked": true, "schema_version": 1, "solution": false}
# # Lab 2: Inference in Graphical Models
#
# ### Machine Learning 2 (2017/2018)
#
# * The lab exercises should be made in groups of two people.
# * The deadline is Thursday, 29.04, 23:59.
# * Assignment should be submitted through BlackBoard! Make sure to include your and your teammates' names with the submission.
# * Attach the .IPYNB (IPython Notebook) file containing your code and answers. Naming of the file should be "studentid1\_studentid2\_lab#", for example, the attached file should be "12345\_12346\_lab1.ipynb". Only use underscores ("\_") to connect ids, otherwise the files cannot be parsed.
#
# Notes on implementation:
#
# * You should write your code and answers in an IPython Notebook: http://ipython.org/notebook.html. If you have problems, please ask.
# * Use __one cell__ for code and markdown answers only!
# * Put all code in the cell with the ```# YOUR CODE HERE``` comment and overwrite the ```raise NotImplementedError()``` line.
# * For theoretical questions, put your solution using LaTeX style formatting in the YOUR ANSWER HERE cell.
# * Among the first lines of your notebook should be "%pylab inline". This imports all required modules, and your plots will appear inline.
# * Large parts of you notebook will be graded automatically. Therefore it is important that your notebook can be run completely without errors and within a reasonable time limit. To test your notebook before submission, select Kernel -> Restart \& Run All.
# + [markdown] deletable=false editable=false nbgrader={"checksum": "2a0482dac028fae34551f9e75cc82068", "grade": false, "grade_id": "cell-7c4914def40aeb03", "locked": true, "schema_version": 1, "solution": false}
# ### Introduction
# In this assignment, we will implement the sum-product and max-sum algorithms for factor graphs over discrete variables. The relevant theory is covered in chapter 8 of Bishop's PRML book, in particular section 8.4. Read this chapter carefuly before continuing!
#
# We will implement sum-product and max-sum and apply it to a poly-tree structured medical diagnosis example.
#
# For this assignment you should use numpy ndarrays (constructed with np.array, np.zeros, np.ones, etc.). We need n-dimensional arrays in order to store conditional distributions with more than one conditioning variable. If you want to perform matrix multiplication on arrays, use the np.dot function; all infix operators including *, +, -, work element-wise on arrays.
# + [markdown] deletable=false editable=false nbgrader={"checksum": "b85b4dd3e9c765f6ed0d81b98161f0cd", "grade": false, "grade_id": "cell-7f9d339abedb92d3", "locked": true, "schema_version": 1, "solution": false}
# ## Part 0: Doing the math (5 points)
# We start with a set of three Bernoulli distributed variables X, Y, Z. Calculate the marginals $p(Y=1)$ and $p(Y=0)$ using only the sum and product rule where,
#
# $$
# p(X=1) = 0.05 \\\\
# p(Z=1) = 0.2 \\\\
# $$
# $$
# p(Y = 1 | X = 1, Z = 1) = 0.99 \\\\
# p(Y = 1 | X = 1, Z = 0) = 0.9 \\\\
# p(Y = 1 | X = 0, Z = 1) = 0.7 \\\\
# p(Y = 1 | X = 0, Z = 0) = 0.0001 \\\\
# $$
#
# While implementing the message passing algorithms you should be able to use the results of this question as a guidance.
# + [markdown] deletable=false nbgrader={"checksum": "cafc6734ba3ef70e630191e714f3c0b3", "grade": true, "grade_id": "cell-653065eb00c64d46", "locked": false, "points": 5, "schema_version": 1, "solution": true}
# \begin{align*}
# p(Y=1) &= p(Y=1|X=1,Z=1)\cdot p(X=1)\cdot p(Z=1) + p(Y=1|X=1,Z=0)\cdot p(X=1)\cdot p(Z=0)\\
# &+ p(Y=1|X=0,Z=1)\cdotp(X=0)\cdotp(Z=1) + p(Y=1|X=0,Z=0)\cdot p(X=0)\cdot p(Z=0) \\
# &= p(Y=1|X=1,Z=1)\cdot p(X=1)\cdot p(Z=1) + p(Y=1|X=1,Z=0)\cdot p(X=1)\cdot (1 - p(Z=1))\\
# &+ p(Y=1|X=0,Z=1)\cdot (1 - p(X=1)) \cdot p(Z=1) + p(Y=1|X=0,Z=0)\cdot (1 - p(X=1))\cdot (1 - p(Z=1)) \\
# &= 0.99 \cdot 0.05 \cdot 0.2 + 0.9 \cdot 0.05 \cdot 0.8 + 0.7 \cdot 0.95 \cdot 0.2 + 0.0001 \cdot 0.95 \cdot 0.8 = 0.178976\\
# p(Y=0) &= (1 - p(Y=1)) = (1 - 0.178976) = 0.821024
# \end{align*}
# +
# Calculate marginals for all the variables
# this is not required, we added it for double checking
import numpy as np
px = np.array([0.95, 0.05])
pz = np.array([0.80, 0.20])
m = np.array([
[
[0.9999, 0.3],
[0.1, 0.01]
],
[
[0.0001, 0.7],
[0.9, 0.99]
]
])
joint = np.zeros(m.shape)
for x in range(2):
for y in range(2):
for z in range(2):
joint[x, y, z] = np.around(m[y, x, z] * px[x]*pz[z], 6)
print(f'joint -> y={y}, x={x}, z={z}, {joint[x, y, z]}')
print('marginal for y')
print(joint.sum(0).sum(1))
print('marginal for x (given)')
print(joint.sum(1).sum(1))
print('marginal for z (given)')
print(joint.sum(0).sum(0))
# + [markdown] deletable=false editable=false nbgrader={"checksum": "28024e8ece48c8b90f5fa0fdd0c13a6e", "grade": false, "grade_id": "cell-9afa97d7b5de1808", "locked": true, "schema_version": 1, "solution": false}
# ## Part 1: The sum-product algorithm
#
# We will implement a data structure to store a factor graph and to facilitate computations on this graph. Recall that a factor graph consists of two types of nodes, factors and variables. Below you will find some classes for these node types to get you started. Carefully inspect this code and make sure you understand what it does. Step by step will update its functionality.
# + deletable=false editable=false nbgrader={"checksum": "1affe45cf22d1acc9bdb7a7994d07c6a", "grade": false, "grade_id": "cell-70843f7b3041a417", "locked": true, "schema_version": 1, "solution": false}
# %pylab inline
class Node(object):
"""
Base-class for Nodes in a factor graph. Only instantiate sub-classes of Node.
"""
def __init__(self, name):
# A name for this Node, for printing purposes
self.name = name
# Neighbours in the graph, identified with their index in this list.
# i.e. self.neighbours contains neighbour 0 through len(self.neighbours) - 1.
self.neighbours = []
# Reset the node-state (not the graph topology)
self.reset()
def reset(self):
# Incomming messages; a dictionary mapping neighbours to messages.
# That is, it maps Node -> np.ndarray.
self.in_msgs = {}
# A set of neighbours for which this node has pending messages.
# We use a python set object so we don't have to worry about duplicates.
self.pending = set([])
def add_neighbour(self, nb):
self.neighbours.append(nb)
def send_sp_msg(self, other):
# To be implemented in subclass.
raise Exception('Method send_sp_msg not implemented in base-class Node')
def send_ms_msg(self, other):
# To be implemented in subclass.
raise Exception('Method send_ms_msg not implemented in base-class Node')
def receive_msg(self, other, msg):
# Store the incomming message, replacing previous messages from the same node
self.in_msgs[other] = msg
# TODO: add pending messages
# self.pending.update(...)
def __str__(self):
# This is printed when using 'print node_instance'
return self.name
class Variable(Node):
def __init__(self, name, num_states):
"""
Variable node constructor.
Args:
name: a name string for this node. Used for printing.
num_states: the number of states this variable can take.
Allowable states run from 0 through (num_states - 1).
For example, for a binary variable num_states=2,
and the allowable states are 0, 1.
"""
self.num_states = num_states
# Call the base-class constructor
super(Variable, self).__init__(name)
def set_observed(self, observed_state):
"""
Set this variable to an observed state.
Args:
observed_state: an integer value in [0, self.num_states - 1].
"""
# Observed state is represented as a 1-of-N variable
# Set all-but-one states to a very low probability;
# (a bit of a hack to avoid -inf values when taking logs)
self.observed_state[:] = 1e-10
self.observed_state[observed_state] = 1.0
def set_latent(self):
"""
Erase an observed state for this variable and consider it latent again.
"""
# No state is preferred, so set all entries of observed_state to 1.0
# Using this representation we need not differentiate observed an latent
# variables when sending messages.
self.observed_state[:] = 1.0
def reset(self):
super(Variable, self).reset()
self.observed_state = np.ones(self.num_states)
def marginal(self):
"""
Compute the marginal distribution of this Variable.
It is assumed that message passing has completed when this function is called.
"""
# TODO: compute marginal
return None
def unnormalized_log_marginal(self):
"""
Compute the unnormalized log marginal distribution of this Variable.
It is assumed that message passing has completed when this function is called.
"""
# TODO: compute unnormalized log marginal
return None
def send_sp_msg(self, other):
# TODO: implement Variable -> Factor message for sum-product
pass
def send_ms_msg(self, other):
# TODO: implement Variable -> Factor message for max-sum
pass
class Factor(Node):
def __init__(self, name, f, neighbours):
"""
Factor node constructor.
Args:
name: a name string for this node. Used for printing
f: a numpy.ndarray with N axes, where N is the number of neighbours.
That is, the axes of f correspond to variables, and the index along that axes corresponds to a value of that variable.
Each axis of the array should have as many entries as the corresponding neighbour variable has states.
neighbours: a list of neighbouring Variables. Bi-directional connections are created.
"""
# Call the base-class constructor
super(Factor, self).__init__(name)
f = np.array(f) # For convenience, accept lists (of lists (of numbers or potentially lists of ...))
assert len(neighbours) == f.ndim, 'Factor function f should accept as many arguments as this Factor node has neighbours'
for nb_ind in range(len(neighbours)):
nb = neighbours[nb_ind]
assert f.shape[nb_ind] == nb.num_states, 'The range of the factor function f is invalid for input %i %s' % (nb_ind, nb.name)
self.add_neighbour(nb)
nb.add_neighbour(self)
self.f = f
def send_sp_msg(self, other):
# TODO: implement Factor -> Variable message for sum-product
pass
def send_ms_msg(self, other):
# TODO: implement Factor -> Variable message for max-sum
pass
# + [markdown] deletable=false editable=false nbgrader={"checksum": "24aa9afccd84106fdc0727236c1cb186", "grade": false, "grade_id": "cell-e0ada624a5583fce", "locked": true, "schema_version": 1, "solution": false}
# ### 1.1. Initialize the graph (5 points)
# The equations in Part 0 can be represented by a factor graph. Instantiate this graph by creating Variable and Factor instances and linking them according to the graph structure.
# To instantiate the factor graph, first create the Variable nodes and then create Factor nodes, passing a list of neighbour Variables to each Factor. To get you started, we initialize the variable node for $X$ and the factor node corresponding to the prior $p(X)$.
# + deletable=false nbgrader={"checksum": "e8e1e5033150190f7ba288f5a2b004b4", "grade": false, "grade_id": "cell-85011c26139bed2e", "locked": false, "schema_version": 1, "solution": true}
X = Variable(name='X', num_states=2)
X_prior = Factor(name='p(X)',
f=np.array([0.95, 0.05]),
neighbours=[X])
# Please stick to the naming convention used below, otherwise the test functionality throughout the lab won't work
Z = Variable(name='Z', num_states=2)
Z_prior = Factor(name='p(Z)',
f=np.array([0.80, 0.20]),
neighbours=[Z])
Y = Variable(name='Y', num_states=2)
Y_cond = Factor(name='p(Y | X, Z)',
f=np.array([
[[0.9999, 0.3], [0.1, 0.01]],
[[0.0001, 0.7], [0.9, 0.99]],
]),
neighbours=[Y, X, Z])
# + deletable=false editable=false nbgrader={"checksum": "1d247840a0cad095db98f369d0531275", "grade": true, "grade_id": "cell-1bf4d184d405f181", "locked": true, "points": 5, "schema_version": 1, "solution": false}
### Test test test
assert Z_prior.f.shape == (2,)
assert Y_cond.f.shape == (2, 2, 2)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "09f3f672778ffb77e22ba8702e9a2911", "grade": false, "grade_id": "cell-af817385e2927300", "locked": true, "schema_version": 1, "solution": false}
# We will be doing a lot of marginalizations, i.e. obtain the distribution of a *single* variable from a distribution over multiple variables. Let's first write a function `marginalize` for that.
# + deletable=false nbgrader={"checksum": "d81924d48029a907c55cea627ae5b56f", "grade": false, "grade_id": "cell-74ecd6f85f99a2d5", "locked": false, "schema_version": 1, "solution": true}
# To marginalize, we sum over all variables except for the one, that we are interested in
def marginalize(P, dim):
n_dims = len(P.shape)
indexes = tuple(i for i in range(n_dims) if i != dim)
return np.sum(P, indexes)
# Lets try it
test_P = np.random.rand(2, 3, 4)
test_P = test_P / test_P.sum() # Normalize for proper distribution
# Do the marginal distributions look like you expect?
print (marginalize(test_P, 0))
print (marginalize(test_P, 1))
print (marginalize(test_P, 2))
# + [markdown] deletable=false editable=false nbgrader={"checksum": "83cf7c2b9bf87fc4a0268c36436195f6", "grade": false, "grade_id": "cell-56015dc94e6caaf4", "locked": true, "schema_version": 1, "solution": false}
# ### 1.2 Factor to variable messages (20 points)
# Write a method `send_sp_msg(self, other)` for the Factor class, that checks if all the information required to pass a message to `Variable` `other` is present, computes the message and sends it to `other`. "Sending" here simply means calling the `receive_msg` function of the receiving node (we will implement this later). The message itself should be represented as a numpy array (np.array) whose length is equal to the number of states of the variable.
#
# In the very end of 1.2 below we overwrite `send_sp_msg(self, other)` for the Factor class. In general, this is considered bad practise but in this lab it saves us from scrolling up and down all the time.
#
# You will implement a function `send_sp_msg` that sends a message from a factor to a variable for the max-sum algorith. This function implements Equation 8.66 from Bishop. The message should be a numpy array (np.array) whose length is equal to the number of states of the variable.
#
# It is a good idea to write a number of helper functions to implement these equations.
#
# Since it is always a good idea to include checks, you should first write a method `can_send_message` that checks whether a node `node` has all the information required to pass a message to `other`. This should work for both variable and factor nodes.
# + deletable=false nbgrader={"checksum": "8b75b5269753ba1861f6c7dbd41fa1a5", "grade": false, "grade_id": "cell-4164822e95a6b68a", "locked": false, "schema_version": 1, "solution": true}
# We check if all the messages for a receiver have been received
# If there is at least one message missing - we return False
def can_send_message(sender, receiver):
for neighbour in sender.neighbours:
if neighbour == receiver:
continue
if neighbour not in sender.in_msgs.keys():
return False
return True
# Do the results make sense?
print (can_send_message(X, X_prior))
print (can_send_message(X_prior, X))
# + [markdown] deletable=false editable=false nbgrader={"checksum": "15c446225bb0473c69caa2219f4c80bd", "grade": false, "grade_id": "cell-816ded386be1b608", "locked": true, "schema_version": 1, "solution": false}
#
# In Eq. 8.66, Bishop writes $f(x, x_1, ..., x_M)$, where $x$ corresponds to the variable that will receive the message. For now, assume this variable is the `index`-th neighbour of the factor. In order to ease implementation, it may be a good idea to write a function that rearanges the dimensions of `f` to comply with this notation, i.e. moves the dimension `index` to the front. Make sure to return a copy and keep all other dimensions in order! Use `np.moveaxis`.
# + deletable=false nbgrader={"checksum": "296bbf4e2783a797fe753c30c7ad7877", "grade": false, "grade_id": "cell-702210e632c423bd", "locked": false, "schema_version": 1, "solution": true}
def move_dimension_first(f, index):
moved = np.moveaxis(f, index, 0)
return moved
# -
# You should calculate the product of the incoming messages of all neighbours of the sending node except the receiving node. Therefore it may be useful to write a function `get_neighbour_messages` that gathers these messages in a list. If you want to not make things complicated, make sure the order of the messages in the list corresponds to the order of the variables in `neighbours`.
# + deletable=false nbgrader={"checksum": "aec4aebe3b5cc9ee35ae118c1763ff26", "grade": false, "grade_id": "cell-9e7f7986263e1543", "locked": false, "schema_version": 1, "solution": true}
# We get messages from all the neighbours except for the receiver
# We do an additional check to ensure that the message from a specified neighbour is actually in the list of incoming messages
def get_neighbour_messages(sender, receiver):
l = [sender.in_msgs[n] for n in sender.neighbours if n != receiver and n in sender.in_msgs]
return l
# -
# Before marginalizing, we need to calculate $\prod_{m\in\text{ne}(f_s)\setminus x} \mu_{x_m\rightarrow f_s}(x_m)$ (Eq. 8.66) for all possible combinations of $x_1, ..., x_M$ (values of the neighbour nodes except the receiving node). An elegant and efficient way to calculate these is using the n-way outer product of vectors. This product takes n vectors $\mathbf{x}^{(1)}, \ldots, \mathbf{x}^{(n)}$ and computes a $n$-dimensional tensor (ndarray) whose element $i_0,i_1,...,i_n$ is given by $\prod_j \mathbf{x}^{(j)}_{i_j}$. In python, this is realized as `np.multiply.reduce(np.ix_(*vectors))` for a python list `vectors` of 1D numpy arrays. Try to figure out how this statement works -- it contains some useful functional programming techniques. What should `vectors` be? Try to see the link between the result and Eq. 8.66.
# + deletable=false nbgrader={"checksum": "e5e5d3c0d6252b785451adf1c41bd5fc", "grade": false, "grade_id": "cell-67269e06a554035c", "locked": false, "schema_version": 1, "solution": true}
def calc_other_neighbour_msg_prod(sender, receiver):
messages = get_neighbour_messages(sender, receiver)
prod = np.multiply.reduce(np.ix_(*messages))
return prod
# + [markdown] deletable=false editable=false nbgrader={"checksum": "00442c496ed4ebda7eff1672a64c9f59", "grade": false, "grade_id": "cell-d5366f0c524b30d0", "locked": true, "schema_version": 1, "solution": false}
# Following Eq. 8.66, before marginalizing, you should calculate the product of $f(x, x_1, ..., x_M)$ with $\prod_{m\in\text{ne}(f_s)\setminus x} \mu_{x_m\rightarrow f_s}(x_m)$ for all configurations of $x, x_1, ..., x_M$. Since the second part does not depend on $x$, its tensor representations are of different dimensions. You can overcome this problem by using a loop, but preferably use numpy broadcasting by first aligning the dimensions of the tensors. You can use `np.expand_dims` or `X[None, ...]` to insert one dimension at the front. Write a function `calculate_factor` that, given `f` (which is reordered such that $x$ corresponds to the first dimension) and the (outer) product of the other neighbour messages, computes $f(x, x_1, ..., x_M) \prod_{m\in\text{ne}(f_s)\setminus x} \mu_{x_m\rightarrow f_s}(x_m)$ for all configurations of $x, x_1, ..., x_M$.
# + deletable=false nbgrader={"checksum": "3ab59a95b589ff71451036c73b289138", "grade": false, "grade_id": "cell-d34359928f3a997d", "locked": false, "schema_version": 1, "solution": true}
def calculate_factor(f_neighb_first, neighbour_msg_prod):
# Because of the broadcasting rules of numpy, we don't necessarily need to expand dimensions before multiplication
factor = f_neighb_first * neighbour_msg_prod
return factor
# + [markdown] deletable=false editable=false nbgrader={"checksum": "ef9e90a515d362d281f313955fbbc75f", "grade": false, "grade_id": "cell-d9b12fad1b574924", "locked": true, "schema_version": 1, "solution": false}
# Put all the pieces together to define a function `calc_sum_product_factor_to_variable_msg` that calculates Eq. 8.66.
# + deletable=false nbgrader={"checksum": "f3a2dc41ed909c8d0eb88b213ce66a99", "grade": false, "grade_id": "cell-ec6b421860d40ae0", "locked": false, "schema_version": 1, "solution": true}
def calc_sum_product_factor_to_variable_msg(factor, variable):
var_index = factor.neighbours.index(variable)
f_neighb_first = move_dimension_first(factor.f, var_index)
neighbour_msg_prod = calc_other_neighbour_msg_prod(factor, variable)
f2 = calculate_factor(f_neighb_first, neighbour_msg_prod)
f = marginalize(f2, 0)
return f
# + deletable=false nbgrader={"checksum": "c65b2bb803c0f8b53c5eb427e8bb2906", "grade": false, "grade_id": "cell-7d334ab3a40918b0", "locked": true, "schema_version": 1, "solution": false}
# Finally, we will define the send message function for you
# Note, that we have edited the cell in order to return message
# It is being printed during backward and forward pass, which greatly simplified the debugging procedure for us
def factor_send_sp_msg(self, variable):
assert isinstance(variable, Variable), "Factor can only send messages to variable!"
assert can_send_message(self, variable), "Cannot send message!"
out_msg = calc_sum_product_factor_to_variable_msg(self, variable)
# Send the message
variable.receive_msg(self, out_msg)
# Remove the pending sign if present
self.pending.discard(variable)
return out_msg
Factor.send_sp_msg = factor_send_sp_msg
# + deletable=false editable=false nbgrader={"checksum": "4ddb724608cd7205b51cbadda1a3664f", "grade": true, "grade_id": "cell-429fba08a94a3bd2", "locked": true, "points": 20, "schema_version": 1, "solution": false}
### Test test test
# message from X_prior to X
X_prior.reset()
X.reset()
X_prior.send_sp_msg(X)
assert np.allclose(list(X.in_msgs.values()), [0.95, 0.05])
# message from Z_prior to Z
Z_prior.reset()
Z.reset()
Z_prior.send_sp_msg(Z)
assert np.allclose(list(Z.in_msgs.values()), [0.8, 0.2])
# message from Y_cond to Y
Y_cond.reset()
Y.reset()
Y_cond.receive_msg(X, X_prior.f) # simulating that Y_cond received all necessary messages from X
Y_cond.receive_msg(Z, Z_prior.f) # simulating that Y_cond received all necessary messages from Z
Y_cond.send_sp_msg(Y)
assert np.allclose(list(Y.in_msgs.values()), [0.821024, 0.178976])
# + [markdown] deletable=false editable=false nbgrader={"checksum": "2fd488d52a30d4081cdbc910c280a279", "grade": false, "grade_id": "cell-2ac03f39f1b1281c", "locked": true, "schema_version": 1, "solution": false}
# ### 1.3 Variable to factor messages (10 points)
#
# Write a method `calc_sum_product_variable_to_factor_msg(variable, factor)` that computes the message to be sent to a neighbour variable by a factor.
# + deletable=false nbgrader={"checksum": "5c3c49af77b02653acaac2455d864132", "grade": false, "grade_id": "cell-93d0a0d839eb99f3", "locked": false, "schema_version": 1, "solution": true}
def calc_sum_product_variable_to_factor_msg(variable, factor):
if len(variable.neighbours) == 1:
message = np.ones(variable.num_states)
else:
messages = get_neighbour_messages(variable, factor)
# we can do elementwise product because the messages to a variable have the same dimension
message = np.prod(messages, axis=0)
return message
# + deletable=false nbgrader={"checksum": "bb0a564a418df48edc0a627ff3df1bc4", "grade": false, "grade_id": "cell-8ce3afc9581048ad", "locked": true, "schema_version": 1, "solution": false}
# Finally, we will define the send message function for you
def variable_send_sp_msg(self, factor):
assert isinstance(factor, Factor), "Variable can only send messages to factor!"
assert can_send_message(self, factor), "Cannot send message!"
out_msg = calc_sum_product_variable_to_factor_msg(self, factor)
# Send the message
factor.receive_msg(self, out_msg)
# Remove the pending sign if present
self.pending.discard(factor)
return out_msg
Variable.send_sp_msg = variable_send_sp_msg
# + deletable=false editable=false nbgrader={"checksum": "80a738a13d17bc6014f56719bc935ca4", "grade": true, "grade_id": "cell-0cbe0bfa635450f3", "locked": true, "points": 10, "schema_version": 1, "solution": false}
### Test test test
Y_cond.reset()
Y.reset()
# First message from X to Y_cond
X_prior.reset()
X.reset()
X_prior.send_sp_msg(X) # simulating that X received all necessary messages
X.send_sp_msg(Y_cond)
assert np.allclose(list(Y_cond.in_msgs.values()), [0.95, 0.05])
# Second message from Z to Y_cond
Z_prior.reset()
Z.reset()
Z_prior.send_sp_msg(Z) # simulating that Z received all necessary messages
Z.send_sp_msg(Y_cond)
assert np.allclose(list(Y_cond.in_msgs.values()), [[0.95, 0.05], [0.8, 0.2]])
# + [markdown] deletable=false editable=false nbgrader={"checksum": "72a4f47ec3cd02e4e32b7d68e9d0692b", "grade": false, "grade_id": "cell-6346fc68da031348", "locked": true, "schema_version": 1, "solution": false}
# ### Testing a single forward pass and a single backward pass
# Before we go on we will make sure that messages can be passed in both directions the graph.
# + deletable=false editable=false nbgrader={"checksum": "d50a8fd57d73f85f1c8f81be1ba13f69", "grade": false, "grade_id": "cell-f44819044a283ec9", "locked": true, "schema_version": 1, "solution": false}
X_prior.reset()
X.reset()
Z_prior.reset()
Z.reset()
Y_cond.reset()
Y.reset()
# Forward pass
X_prior.send_sp_msg(X)
Z_prior.send_sp_msg(Z)
X.send_sp_msg(Y_cond)
Z.send_sp_msg(Y_cond)
Y_cond.send_sp_msg(Y)
assert np.allclose(list(Y.in_msgs.values()), [0.821024, 0.178976])
# Backward pass
Y.send_sp_msg(Y_cond)
Y_cond.send_sp_msg(X)
Y_cond.send_sp_msg(Z)
X.send_sp_msg(X_prior)
Z.send_sp_msg(Z_prior)
assert np.allclose(list(X.in_msgs.values()), [[0.95, 0.05],[1., 1.]])
assert np.allclose(list(Z.in_msgs.values()), [[0.8, 0.2],[1., 1.]])
# + [markdown] deletable=false editable=false nbgrader={"checksum": "27ce20145a58d03708168b08fbbfeadd", "grade": false, "grade_id": "cell-f1b2c01d81e8d007", "locked": true, "schema_version": 1, "solution": false}
# ### 1.4 Compute marginal (10 points)
# Later in this assignment, we will implement message passing schemes to do inference. Once the message passing has completed, we will want to compute local marginals for each variable.
# Write the method `marginal` for the `Variable` class, that computes a marginal distribution over that node.
# + deletable=false nbgrader={"checksum": "8002994abb8ad6ada1ad19a0b433c273", "grade": false, "grade_id": "cell-0f6434f5c5d4dc9c", "locked": false, "schema_version": 1, "solution": true}
def marginal(self):
marginal = np.multiply.reduce(list(self.in_msgs.values()), 0)
return marginal
Variable.marginal = marginal
# + deletable=false editable=false nbgrader={"checksum": "8dc0f9a18f2801cf7f3182732f45b52e", "grade": true, "grade_id": "cell-7f666a68bbbe81e5", "locked": true, "points": 10, "schema_version": 1, "solution": false}
### Test test test
# Simulate a single forward pass
X_prior.reset()
X.reset()
Z_prior.reset()
Z.reset()
Y_cond.reset()
Y.reset()
X_prior.send_sp_msg(X)
Z_prior.send_sp_msg(Z)
X.send_sp_msg(Y_cond)
Z.send_sp_msg(Y_cond)
Y_cond.send_sp_msg(Y)
assert np.allclose(X.marginal(), [0.95, 0.05])
assert np.allclose(Z.marginal(), [0.8, 0.2])
assert np.allclose(Y.marginal(), [0.821024, 0.178976])
# + [markdown] deletable=false editable=false nbgrader={"checksum": "3d2d9dd5139b1ed6891107868305e57b", "grade": false, "grade_id": "cell-e38ca86d50f6c9c1", "locked": true, "schema_version": 1, "solution": false}
# ### 1.5 Receiving messages (10 points)
# In order to implement a message passing algorithms, we need some way to determine which nodes are ready to send messages to which neighbours. We make use of the concept of "pending messages", which is explained in Bishop (8.4.7):
# "we will say that a (variable or factor)
# node a has a message pending on its link to a node b if node a has received any
# message on any of its other links since the last time it sent a message to b. Thus,
# when a node receives a message on one of its links, this creates pending messages
# on all of its other links."
#
# Before we say node a has a pending message for node b, we **must check that node a has received all messages needed to compute the message that is to be sent to b**.
#
# Modify the function `receive_msg`, so that it updates the self.pending variable as described above. The member self.pending is a set that is to be filled with Nodes to which self has pending messages.
# + deletable=false nbgrader={"checksum": "d4256320e0a6fff7550f0a0c744ddb61", "grade": false, "grade_id": "cell-39b7b3a84cdf1e56", "locked": false, "schema_version": 1, "solution": true}
# ANSWER 1.5
def receive_msg(self, other, msg):
self.in_msgs[other] = msg
for n in self.neighbours:
if can_send_message(self, n):
self.pending.add(n)
Node.receive_msg = receive_msg
# + deletable=false editable=false nbgrader={"checksum": "05683e9bfc7be13ffac9b88806845c3b", "grade": true, "grade_id": "cell-42c321c98bf9b6b1", "locked": true, "points": 10, "schema_version": 1, "solution": false}
### Test test test
X_prior.reset()
X.reset()
assert X_prior.pending == set()
X_prior.pending.add(X)
assert str(list(X_prior.pending)[0]) == X.name
X_prior.send_sp_msg(X)
assert X_prior.pending == set()
# -
# ### 1.6 Inference Engine (10 points)
# Write a function `sum_product(node_list)` that runs the sum-product message passing algorithm on a tree-structured factor graph with given nodes. The input parameter `node_list` is a list of all Node instances in the graph, which is assumed to be ordered correctly. That is, the list starts with a leaf node, which can always send a message. Subsequent nodes in `node_list` should be capable of sending a message when the pending messages of preceding nodes in the list have been sent. The sum-product algorithm then proceeds by passing over the list from beginning to end, sending all pending messages at the nodes it encounters. Then, in reverse order, the algorithm traverses the list again and again sends all pending messages at each node as it is encountered. For this to work, we initialized pending messages for all the leaf nodes, e.g. `X_prior.pending.add(X)`, where `X_prior` is a Factor node corresponding the the prior, `X` is a `Variable` node and the only connection of `X_prior` goes to `X`.
# + deletable=false nbgrader={"checksum": "0295fa5b1b0d1c2dd532935bd543056f", "grade": false, "grade_id": "cell-6a6fb2568d840c68", "locked": false, "schema_version": 1, "solution": true}
def sum_product(node_list):
print('Forward pass')
for i, node in enumerate(node_list):
# Iterate over all neighbours, which haven't received all necessary messages in the current pass
# we are using set between neighbours and nodes further in the message passing chain
# to make sure that we don't send messages back to nodes that have
# already "forwarded" messages in a current pass
for neigh in set(node.neighbours) & set(node_list[i:]):
if can_send_message(node, neigh):
msg = node.send_sp_msg(neigh)
print(f'{node} -> {neigh}: {msg}')
rev_node_list = node_list[::-1]
print('\nBackward pass')
for i, node in enumerate(rev_node_list):
# Again, iterate over all neighbours, which haven't received all necessary messages in the current pass
for neigh in set(node.neighbours) & set(rev_node_list[i:]):
if neigh in node.pending:
msg = node.send_sp_msg(neigh)
print(f'{node} -> {neigh}: {msg}')
# + deletable=false editable=false nbgrader={"checksum": "261e257a05c618b4e1c5d9568f7403d5", "grade": true, "grade_id": "cell-fb8315046f8fde42", "locked": true, "points": 10, "schema_version": 1, "solution": false}
### Test test test
nodes = [X_prior, X, Z_prior, Z, Y_cond, Y]
for n in nodes:
n.reset()
X_prior.pending.add(X)
Z_prior.pending.add(Z)
Y.pending.add(Y_cond)
sum_product(nodes)
assert np.allclose(Y.marginal(), [0.821024, 0.178976])
# -
# ### 1.7 Observed variables and probabilistic queries (15 points)
# We will now use the inference engine to answer probabilistic queries. That is, we will set certain variables to observed values, and obtain the marginals over latent variables. We have already provided functions `set_observed` and `set_latent` that manage a member of Variable called `observed_state`. Modify the `calc_sum_product_variable_to_factor_msg` and `Variable.marginal` routines that you wrote before, to use `observed_state` so as to get the required marginals when some nodes are observed.
# + deletable=false nbgrader={"checksum": "da29573eaddaf62947074fc00f714df1", "grade": false, "grade_id": "cell-55c50fb9c1836789", "locked": false, "schema_version": 1, "solution": true}
def calc_sum_product_variable_to_factor_msg(variable, factor):
if len(variable.neighbours) == 1:
message = np.ones(variable.num_states)
else:
messages = get_neighbour_messages(variable, factor)
message = np.prod(messages, axis=0)
return message * variable.observed_state
# + deletable=false editable=false nbgrader={"checksum": "d27bd6da3b20252943b2828ceb5bb472", "grade": true, "grade_id": "cell-13801e018b6f9d27", "locked": true, "points": 10, "schema_version": 1, "solution": false}
### Test, test, test
X_prior.reset()
X.reset()
Y_cond.reset()
X_prior.send_sp_msg(X)
X.set_observed(0)
X.send_sp_msg(Y_cond)
assert np.allclose(list(Y_cond.in_msgs.values()), [9.5e-01, 5.0e-12])
# + deletable=false nbgrader={"checksum": "55a9272099aabff3579cac4f41f08973", "grade": false, "grade_id": "cell-d0b91da808874e73", "locked": false, "schema_version": 1, "solution": true}
def marginal(self):
marginal = np.multiply.reduce(list(self.in_msgs.values()), 0) * self.observed_state
return marginal / marginal.sum()
Variable.marginal = marginal
# + deletable=false editable=false nbgrader={"checksum": "fa342104c5f9d55b65ad4c202475a2c5", "grade": true, "grade_id": "cell-2f8e5d9052ed73ef", "locked": true, "points": 5, "schema_version": 1, "solution": false}
### Test, test, test
# Simulate a single forward pass
X_prior.reset()
X.reset()
Z_prior.reset()
Z.reset()
Y_cond.reset()
Y.reset()
X.set_observed(0)
Z.set_observed(0)
X_prior.send_sp_msg(X)
Z_prior.send_sp_msg(Z)
X.send_sp_msg(Y_cond)
Z.send_sp_msg(Y_cond)
Y_cond.send_sp_msg(Y)
assert np.allclose(Y.marginal(), [9.99900000e-01, 1.00000022e-04])
# + [markdown] deletable=false editable=false nbgrader={"checksum": "97cce5eb6885d02431cf74e2affba069", "grade": false, "grade_id": "cell-2277868e4ec25b1d", "locked": true, "schema_version": 1, "solution": false}
# ## Part 2: The max-sum algorithm
# Next, we implement the max-sum algorithm as described in section 8.4.5 of Bishop.
# + [markdown] deletable=false editable=false nbgrader={"checksum": "34cda2c1859ee99af59b1e6f6609b5b6", "grade": false, "grade_id": "cell-f1dbc98349fb824f", "locked": true, "schema_version": 1, "solution": false}
# ### 2.1 Factor to variable messages (10 points)
# Implement the function `Factor.send_ms_msg` that sends Factor -> Variable messages for the max-sum algorithm. It is analogous to the `Factor.send_sp_msg` function you implemented before. Make sure it works for observed and unobserved nodes. Consider using a number of helper functions as seen in Part 1.
# -
def ms_calc_other_neighbour_msg_sum(sender, receiver):
messages = get_neighbour_messages(sender, receiver)
sum_var = np.add.reduce(np.ix_(*messages))
return sum_var
def maximize(P, dim):
n_dims = len(P.shape)
indexes = tuple(i for i in range(n_dims) if i != dim)
return np.max(P, indexes)
def ms_calculate_factor(f_neighb_first, neighbour_msg_prod):
factor = np.log(f_neighb_first) + neighbour_msg_prod
return factor
def ms_calc_max_sum_factor_to_variable_msg(factor, variable):
var_index = factor.neighbours.index(variable)
f_neighb_first = move_dimension_first(factor.f, var_index)
neighbour_msg_prod = ms_calc_other_neighbour_msg_sum(factor, variable) + np.log(variable.observed_state)
f = ms_calculate_factor(f_neighb_first, neighbour_msg_prod)
return maximize(f, 0)
# + deletable=false nbgrader={"checksum": "c16471807291ab66dbe760bddc2f1480", "grade": false, "grade_id": "cell-389621edefd825a3", "locked": false, "schema_version": 1, "solution": true}
def factor_send_ms_msg(self, variable):
assert isinstance(variable, Variable), "Factor can only send messages to variable!"
assert can_send_message(self, variable), "Cannot send message!"
out_msg = ms_calc_max_sum_factor_to_variable_msg(self, variable)
# Send the message
variable.receive_msg(self, out_msg)
# Remove the pending sign if present
self.pending.discard(variable)
return out_msg
Factor.send_ms_msg = factor_send_ms_msg
# + deletable=false editable=false nbgrader={"checksum": "36080ee2c75dc501b57f32f527c65656", "grade": true, "grade_id": "cell-c6156e1505633bc4", "locked": true, "points": 10, "schema_version": 1, "solution": false}
### Test test test
# message from X_prior to X
X_prior.reset()
X.reset()
X_prior.send_ms_msg(X)
assert np.allclose(list(X.in_msgs.values()), [-0.05129329, -2.99573227])
# message from Z_prior to Z
Z_prior.reset()
Z.reset()
Z_prior.send_ms_msg(Z)
assert np.allclose(list(Z.in_msgs.values()), [-0.22314355, -1.60943791])
# message from Y_cond to Y
Y_cond.reset()
Y.reset()
Y_cond.receive_msg(X, X_prior.f) # simulating that Y_cond received all necessary messages from X
Y_cond.receive_msg(Z, Z_prior.f) # simulating that Y_cond received all necessary messages from Z
Y_cond.send_ms_msg(Y)
assert np.allclose(list(Y.in_msgs.values()), [1.74989999, 0.79332506])
# + [markdown] deletable=false editable=false nbgrader={"checksum": "cfbc002bdf7c6ff9b40c06ef10403001", "grade": false, "grade_id": "cell-6eff0dd8439243ca", "locked": true, "schema_version": 1, "solution": false}
# ### 2.2 Variable to factor messages (10 points)
# Implement the `Variable.send_ms_msg` function that sends Variable -> Factor messages for the max-sum algorithm.
# -
def ms_calc_max_sum_variable_to_factor_msg(variable, factor):
if len(variable.neighbours) == 1:
message = np.zeros(variable.num_states)
else:
messages = get_neighbour_messages(variable, factor)
message = np.sum(messages, axis=0)
return message + np.log(variable.observed_state)
# + deletable=false nbgrader={"checksum": "a8e8c05dffcf10717b1e43299b96b627", "grade": false, "grade_id": "cell-183948f06650a7e0", "locked": false, "schema_version": 1, "solution": true}
def variable_send_ms_msg(self, factor):
out_msg = ms_calc_max_sum_variable_to_factor_msg(self, factor)
factor.receive_msg(self, out_msg)
# Remove the pending sign if present
self.pending.discard(factor)
return out_msg
Variable.send_ms_msg = variable_send_ms_msg
# + deletable=false editable=false nbgrader={"checksum": "b597b2dc5896b79f1fb39862d19f2ea4", "grade": true, "grade_id": "cell-bbf08d58e9b309c9", "locked": true, "points": 10, "schema_version": 1, "solution": false}
### Test test test
Y_cond.reset()
Y.reset()
# First message from X to Y_cond
X_prior.reset()
X.reset()
X_prior.send_ms_msg(X) # simulating that X received all necessary messages
X.send_ms_msg(Y_cond)
assert np.allclose(list(Y_cond.in_msgs.values()), [-0.05129329, -2.99573227])
# Second message from Z to Y_cond
Z_prior.reset()
Z.reset()
Z_prior.send_ms_msg(Z) # simulating that Z received all necessary messages
Z.send_ms_msg(Y_cond)
assert np.allclose(list(Y_cond.in_msgs.values()), [[-0.05129329, -2.99573227], [-0.22314355, -1.60943791]])
# + [markdown] deletable=false editable=false nbgrader={"checksum": "d94749234e759cb16863184e99919a57", "grade": false, "grade_id": "cell-6e1b3e4ee8013fcb", "locked": true, "schema_version": 1, "solution": false}
# ### 2.3 Implement unnormalized log marginal (5 points)
# Write the method `unnormalized_log_marginal` for the `Variable` class, that computes a unnormalized log marginal distribution over that node.
# + deletable=false nbgrader={"checksum": "2a82b636f2eb1326ea6c7b4cce15f428", "grade": false, "grade_id": "cell-96de1de3d9b1da94", "locked": false, "schema_version": 1, "solution": true}
def unnormalized_log_marginal(self):
marginal = np.add.reduce(list(self.in_msgs.values()), 0) + np.log(self.observed_state)
return marginal
Variable.unnormalized_log_marginal = unnormalized_log_marginal
# + deletable=false editable=false nbgrader={"checksum": "977c4965c9797bd48b917039e495038a", "grade": true, "grade_id": "cell-d0c08fd4b0857dde", "locked": true, "points": 5, "schema_version": 1, "solution": false}
### Test test test
# Simulate a single forward pass
X_prior.reset()
X.reset()
Z_prior.reset()
Z.reset()
Y_cond.reset()
Y.reset()
X_prior.send_ms_msg(X)
Z_prior.send_ms_msg(Z)
X.send_ms_msg(Y_cond)
Z.send_ms_msg(Y_cond)
Y_cond.send_ms_msg(Y)
assert np.allclose(X.unnormalized_log_marginal(), [-0.05129329, -2.99573227])
assert np.allclose(Z.unnormalized_log_marginal(), [-0.22314355, -1.60943791])
assert np.allclose(Y.unnormalized_log_marginal(), [-0.27453685, -2.01740615])
# + [markdown] deletable=false editable=false nbgrader={"checksum": "811de1e611d918a437d3436251ae8f56", "grade": false, "grade_id": "cell-eb540987e2fb70d6", "locked": true, "schema_version": 1, "solution": false}
# ### 2.4 Find a MAP state (10 points)
#
# Using the same message passing schedule we used for sum-product, implement the max-sum algorithm. For simplicity, we will ignore issues relating to non-unique maxima. So there is no need to implement backtracking; the MAP state is obtained by a per-node maximization (eq. 8.98 in Bishop). Make sure your algorithm works with both latent and observed variables.
# + deletable=false nbgrader={"checksum": "17fb320cc9db70730f6e7a6fd0702903", "grade": false, "grade_id": "cell-f94559550902ac16", "locked": false, "schema_version": 1, "solution": true}
def max_sum(node_list):
print('Forward pass')
for i, node in enumerate(node_list):
for neigh in set(node.neighbours) & set(node_list[i:]):
if can_send_message(node, neigh):
msg = node.send_ms_msg(neigh)
print(f'{node} -> {neigh}: {np.exp(msg)}')
rev_node_list = node_list[::-1]
print('\nBackward pass')
for i, node in enumerate(rev_node_list):
for neigh in set(node.neighbours) & set(rev_node_list[i:]):
if neigh in node.pending:
msg = node.send_ms_msg(neigh)
print(f'{node} -> {neigh}: {np.exp(msg)}')
# + deletable=false editable=false nbgrader={"checksum": "49674d2f737491907099ead2f661ab25", "grade": true, "grade_id": "cell-ac2d26dcb27a8d75", "locked": true, "points": 5, "schema_version": 1, "solution": false}
### Test test test: unobserved
nodes = [X_prior, X, Z_prior, Z, Y_cond, Y]
for n in nodes:
n.reset()
X_prior.pending.add(X)
Z_prior.pending.add(Z)
Y.pending.add(Y_cond)
max_sum(nodes)
assert np.allclose(Y.unnormalized_log_marginal(), [-0.27453685, -2.01740615] )
# + deletable=false editable=false nbgrader={"checksum": "d00995d3a4fb74ce8ed9481349e9044d", "grade": true, "grade_id": "cell-f06b8b4bec85b4dd", "locked": true, "points": 5, "schema_version": 1, "solution": false}
### Test test test: partiallY observed
nodes = [X_prior, X, Z_prior, Z, Y_cond, Y]
for n in nodes:
n.reset()
X_prior.pending.add(X)
Z_prior.pending.add(Z)
Y.pending.add(Y_cond)
Z.set_observed(1)
max_sum(nodes)
assert np.allclose(Y.unnormalized_log_marginal(), [-2.86470401, -2.01740615])
# + [markdown] deletable=false editable=false nbgrader={"checksum": "6e5b7cb4dc60f898345d08a07889f32c", "grade": false, "grade_id": "cell-f79ce013522649ed", "locked": true, "schema_version": 1, "solution": false}
# Given the max-marginals what do you have to do in order to find the global optimum? Why can we neglect the normalization constant here?
# + [markdown] deletable=false nbgrader={"checksum": "1bd8a1937fd5e81b184e36bc4c4b1b68", "grade": true, "grade_id": "cell-605e729ee910dbc8", "locked": false, "points": 5, "schema_version": 1, "solution": true}
#
# In order to find the global optimum, as mentioned in Bishop, sec. 8.4.5., we would have to do following:
# - Keep track of which values of the variables gave rise to the maximum state of each variable
# - Once we know the most probable value of the final node $x_N$, simply follow the link back to find the most probable state of node $x_{N−1}$ and so on back to the initial node $x_1$. This procedure is known as back-tracking.
#
# We can neglect the normalization constant, because we care for the maximum probability and $max (\frac{\mathbf{x}}{const}) = \frac{max(\mathbf{x})}{const}$, which means, that normalization constant won't change the final result, it will just change it proportionally.
#
# + [markdown] deletable=false editable=false nbgrader={"checksum": "80bbe06f90fa8e5f278119c02cf5081d", "grade": false, "grade_id": "cell-d0592bafd25122c0", "locked": true, "schema_version": 1, "solution": false}
# ### Part 3: Medical graph
# Now that we implemented the sum-product and max-sum algorithm. We will apply them to a poly-tree structured medical diagnosis example.
#
# + [markdown] deletable=false editable=false nbgrader={"checksum": "9c99fffc963ea4db625082689e3b7c55", "grade": false, "grade_id": "cell-f0022d301754179a", "locked": true, "schema_version": 1, "solution": false}
# ### 3.1 Initialize the graph (5 points)
#
# Convert the directed graphical model ("Bayesian Network") shown below to a factor graph. Instantiate this graph by creating Variable and Factor instances and linking them according to the graph structure.
# To instantiate the factor graph, first create the Variable nodes and then create Factor nodes, passing a list of neighbour Variables to each Factor.
# Use the following prior and conditional probabilities.
#
# $$
# p(\verb+Influenza+) = 0.05 \\\\
# p(\verb+Smokes+) = 0.2 \\\\
# $$
# $$
# p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 1) = 0.3 \\\\
# p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 0) = 0.001 \\\\
# p(\verb+Fever+ = 1| \verb+Influenza+ = 1) = 0.9 \\\\
# p(\verb+Fever+ = 1| \verb+Influenza+ = 0) 0.05 \\\\
# p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 1) = 0.99 \\\\
# p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 0) = 0.9 \\\\
# p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 1) = 0.7 \\\\
# p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 0) = 0.0001 \\\\
# p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 1) = 0.8 \\\\
# p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 0) = 0.07 \\\\
# p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 1) = 0.6 \\\\
# p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 0) = 0.001 \\\\
# $$
# -
# Below we show the corresponding factor graph
# 
# + deletable=false nbgrader={"checksum": "4335e3cbfd46c7579a4b7d368c5353cc", "grade": true, "grade_id": "cell-216d36a73771fdb7", "locked": false, "points": 5, "schema_version": 1, "solution": true}
Influenza = Variable(name='Influenza', num_states=2)
Influenza_prior = Factor(name='p(Influenza)',
f=np.array([0.95, 0.05]),
neighbours=[Influenza])
Smoke = Variable(name='Smoke', num_states=2)
Smoke_prior = Factor(name='p(Smoke)',
f=np.array([0.80, 0.20]),
neighbours=[Smoke])
Bronchitis = Variable(name='Bronchitis', num_states=2)
Bronchitis_cond = Factor(name='p(Bronchitis | Influenza, Smoke)',
f=np.array([
[[0.9999, 0.3], [0.1, 0.01]],
[[0.0001, 0.7], [0.9, 0.99]],
]),
neighbours=[Bronchitis, Influenza, Smoke])
SoreThroat = Variable(name='SoreThroat', num_states=2)
SoreThroat_cond = Factor(name='p(SoreThroat | Influenza)',
f=np.array([
[0.999, 0.70], [0.001, 0.30],
]),
neighbours=[SoreThroat, Influenza])
Fever = Variable(name='Fever', num_states=2)
Fever_cond = Factor(name='p(Fever | Influenza)',
f=np.array([
[0.95, 0.10], [0.05, 0.90],
]),
neighbours=[Fever, Influenza])
Coughing = Variable(name='Coughing', num_states=2)
Coughing_cond = Factor(name='p(Coughing | Bronchitis)',
f=np.array([
[0.93, 0.20], [0.07, 0.80],
]),
neighbours=[Coughing, Bronchitis])
Wheezing = Variable(name='Wheezing', num_states=2)
Wheezing_cond = Factor(name='p(Wheezing | Bronchitis)',
f=np.array([
[0.999, 0.40], [0.001, 0.60],
]),
neighbours=[Wheezing, Bronchitis])
# + [markdown] deletable=false editable=false nbgrader={"checksum": "8e443821bd73340b3e8bc3eb58db013c", "grade": false, "grade_id": "cell-65ee0e0d85c57f32", "locked": true, "schema_version": 1, "solution": false}
# ### 3.2. Sum-product algorithm (10 points)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "48d2bd6a0a7dba5d29de2c939ca5c67f", "grade": false, "grade_id": "cell-fed678d3f9f5ce50", "locked": true, "schema_version": 1, "solution": false}
# 3.2.1 Run the sum-product algorithm on an unobserved graph. Print the marginal for every variable node.
# + deletable=false nbgrader={"checksum": "a7fadef738bc786fd10febd2b25f15f5", "grade": true, "grade_id": "cell-07681153d48af573", "locked": false, "points": 5, "schema_version": 1, "solution": true}
# maybe I just ordered it in a bad way..?
nodes = [Influenza_prior, SoreThroat, Smoke_prior, Coughing,
Wheezing, Fever, SoreThroat_cond, Fever_cond, Coughing_cond, Wheezing_cond,
Smoke, Bronchitis, Influenza, Bronchitis_cond]
leaves = [Influenza_prior, SoreThroat, Fever, Smoke_prior, Coughing, Wheezing]
for n in nodes:
n.reset()
for l in leaves:
for n in l.neighbours:
l.pending.add(n)
sum_product(nodes)
print('\n')
for n in nodes:
if type(n) == Variable:
print(f'Marginal of {n} is {n.marginal()}')
# + [markdown] deletable=false editable=false nbgrader={"checksum": "7ab62562db323a454298e365f0989326", "grade": false, "grade_id": "cell-9cf9ad87ef338231", "locked": true, "schema_version": 1, "solution": false}
# 3.2.2 Rerun the sum-product algorithm on an partially observed graph, where the variable 'Influenza' is set to 0. Print the marginal for every variable node.
# + deletable=false nbgrader={"checksum": "0bf552eaf0812c495b3cb3c79f2fc431", "grade": true, "grade_id": "cell-83214d3cc28cb7db", "locked": false, "points": 5, "schema_version": 1, "solution": true}
for n in nodes:
n.reset()
for l in leaves:
for n in l.neighbours:
l.pending.add(n)
Influenza.set_observed(0)
sum_product(nodes)
print('\n')
for n in nodes:
if type(n) == Variable:
print(f'Marginal of {n} is {n.marginal()}')
# + [markdown] deletable=false editable=false nbgrader={"checksum": "d5c513d1f3fcdd0e153834c8cd7013e7", "grade": false, "grade_id": "cell-c2dbbc1def846b5e", "locked": true, "schema_version": 1, "solution": false}
# ### 3.3. max-sum algorithm (10 points)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "e3397f5416e8ef7a287fd0e1844d5ec6", "grade": false, "grade_id": "cell-2aeb9d6f8bb0d6d9", "locked": true, "schema_version": 1, "solution": false}
# 3.3.1 Run the max_sum algorithm on an unobserved graph. Print the marginal for every variable node.
# + deletable=false nbgrader={"checksum": "350a548fee4372022d9d7b51805ae71a", "grade": true, "grade_id": "cell-e5088e48afe006b9", "locked": false, "points": 5, "schema_version": 1, "solution": true}
for n in nodes:
n.reset()
for l in leaves:
for n in l.neighbours:
l.pending.add(n)
max_sum(nodes)
print('\n')
for n in nodes:
if type(n) == Variable:
print(f'Unnormalized log-marginal of {n} is {n.unnormalized_log_marginal()}')
# + [markdown] deletable=false editable=false nbgrader={"checksum": "083775bc40177f2573543fd168e86e09", "grade": false, "grade_id": "cell-7e7771be2511ebd2", "locked": true, "schema_version": 1, "solution": false}
# 3.3.2 Rerun the max_sum algorithm on an partially observed graph, where the variable 'Influenza' is set to 0. Print the marginal for every variable node.
# + deletable=false nbgrader={"checksum": "b467a95a6dbe003435c78b970ed0e318", "grade": true, "grade_id": "cell-f74e50a37f4e31b9", "locked": false, "points": 5, "schema_version": 1, "solution": true}
for n in nodes:
n.reset()
for l in leaves:
for n in l.neighbours:
l.pending.add(n)
Influenza.set_observed(0)
max_sum(nodes)
print('\n')
for n in nodes:
if type(n) == Variable:
print(f'Unnormalized log-marginal of {n} is {n.unnormalized_log_marginal()}')
| part2/labs/11636785_11640758_lab2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import pathlib
import numpy as np
import math
import glob
import os
# %matplotlib inline
print(tf.__version__)
# # config
EPOCHS = 10
BATCH_SIZE = 16
Width=224 #256
Hight=224
dataset_dir = "./data/dc_3000/"
train_dir = dataset_dir + "train"
valid_dir = dataset_dir + "val"
save_model_dir = "./weight/"
# # data
train_image_path = glob.glob(train_dir+"/*/*.jpg")
valid_image_path = glob.glob(valid_dir+"/*/*.jpg")
# print(train_image_path)
# print(valid_image_path)
print(len(train_image_path))
print(len(valid_image_path))
def load_preprocess_image(path,label):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image,channels=3)
image = tf.image.resize(image,[Width,Hight])
# 数据增强 -- 随机裁剪
#image = tf.image.random_crop(image,[256,256,3])
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
# image = tf.image.random_brightness(image,0.5)
# image = tf.image.random_contrast(image,0.3 ,1)
image = tf.cast(image,tf.float32)
image = image/255
label = tf.reshape(label,[1])
return image, label
def load_preprocess_valid_image(path,label):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image,channels=3)
image = tf.image.resize(image,[Width,Hight])
image = tf.cast(image,tf.float32)
image = image/255
label = tf.reshape(label,[1])
return image, label
# ### Make traindata && label
# ### train data
train_image_label = [int((p.split("\\")[1]))for p in train_image_path] #only label
#train_image_label = [tf.one_hot(int((p.split("\\")[1])),3,on_value=1,off_value=None,axis=0)for p in train_image_path] #one-hot
train_image_label = tf.cast(train_image_label,tf.float32)
print(train_image_label)
# +
train_count = len(train_image_path)
train_image_ds = tf.data.Dataset.from_tensor_slices((train_image_path,train_image_label)) # 组合
train_image_ds = train_image_ds.map(load_preprocess_image) # map
train_image_ds = train_image_ds.shuffle(train_count).batch(BATCH_SIZE)# shuffle && batch
# -
train_image_ds
# +
# for img,label in train_image_ds.take(1):
# print(label)
# -
# ### valid data
# +
valid_image_label = [int((p.split("\\")[1]))for p in valid_image_path] #only label
# valid_image_label = [tf.one_hot(int((p.split("\\")[1])),3,on_value=1,off_value=None,axis=0)for p in valid_image_path] #one-hot
valid_image_label = tf.cast(valid_image_label,tf.float32)
valid_count = len(valid_image_path)
valid_image_ds = tf.data.Dataset.from_tensor_slices((valid_image_path,valid_image_label)) #组合
valid_image_ds = valid_image_ds.map(load_preprocess_valid_image)#map
valid_image_ds = valid_image_ds.shuffle(valid_count).batch(BATCH_SIZE) # shuffle && batch
valid_image_ds
# +
# for img,label in valid_image_ds.take(1):
# print(label)
# -
# ### Model
# model = keras.Sequential([
# tf.keras.layers.Conv2D(64, (3, 3), input_shape=(256, 256, 3), activation='relu'),
# tf.keras.layers.MaxPooling2D(),
# tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
# tf.keras.layers.MaxPooling2D(),
# tf.keras.layers.GlobalAveragePooling2D(),
# tf.keras.layers.Dense(128, activation='relu'),
# tf.keras.layers.Dense(3),
# tf.keras.layers.Activation('softmax', dtype='float32')
# ])
#[244,244]
# import resnet_model
# model = resnet_model.resnet50(
# num_classes = 3,
# use_l2_regularizer = True)
from models.vgg16 import VGG16
model = VGG16(3)
model.summary()
# +
# #[244,244]
#from resnet_model3 import ResNet18
#model = ResNet18([2, 2, 2, 2])
# from models.resnet_model3 import resnet_18
# model=resnet_18()
# model.compile(optimizer='adam',
# loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
# metrics=['sparse_categorical_accuracy'])
# +
# from models.resnet import resnet_18
# model = resnet_18()
# model.build(input_shape=(None, 224, 224, 3))
# model.summary()
# -
# ### model test
imgs, labels = next(iter(train_image_ds))
pred = model(imgs)
print(pred.shape)
print(pred[0].numpy(),print(pred[0]))
print(labels[0].numpy(),print(labels[0]))
# print(np.array([p[0].numpy() for p in tf.cast(pred>0,tf.int32)]))
# print(np.array([l[0].numpy() for l in labels]))
# ### train
# +
# define loss and optimizer
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adadelta()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
valid_loss = tf.keras.metrics.Mean(name='valid_loss')
valid_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='valid_accuracy')
# +
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = loss_object(y_true=labels, y_pred=predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
@tf.function
def valid_step(images, labels):
predictions = model(images, training=False)
v_loss = loss_object(labels, predictions)
valid_loss(v_loss)
valid_accuracy(labels, predictions)
# +
# optimizer = tf.keras.optimizers.Adadelta() #Adam() SGD()
# loss_object =tf.keras.losses.SparseCategoricalCrossentropy() #BinaryCrossentropy()
# train_loss = tf.keras.metrics.Mean("train_loss")
# train_accuracy = tf.keras.metrics.SparseCategoricalCrossentropy() #Accuracy() CategoricalAccuracy() SparseCategoricalCrossentropy()
# valid_loss = tf.keras.metrics.Mean("valid_loss")
# valid_accuracy = tf.keras.metrics.SparseCategoricalCrossentropy() #Accuracy() CategoricalAccuracy() SparseCategoricalCrossentropy()
# +
# def train_step(images, labels):
# with tf.GradientTape() as t:
# pred = model(images,training=True)
# #loss_step = tf.keras.losses.BinaryCrossentropy(from_logits=True)(labels, pred)
# loss_step = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)(labels, pred) # 这里参数的意思是没有激活
# grads = t.gradient(loss_step, model.trainable_variables)
# optimizer.apply_gradients(zip(grads, model.trainable_variables))
# train_loss(loss_step)
# train_accuracy(labels,pred)
# #print(labels)
# #print(pred)
# def valid_step(images, labels):
# pred = model(images,training=False)
# #loss_step = tf.keras.losses.BinaryCrossentropy(from_logits=True)(labels, pred) # 这里参数的意思是没有激活
# loss_step = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)(labels, pred) # 这里参数的意思是没有激活
# valid_loss(loss_step)
# valid_accuracy(labels,pred)
# #print(labels)
# #print(pred)
# -
# 定义四个个空列表进行每一个epoch的记录
EPOCHS=1000
train_loss_result = []
train_acc_result = []
valid_loss_result = []
valid_acc_result = []
for epoch in range(EPOCHS):
for imgs_,labels_ in train_image_ds:
train_step(imgs_, labels_)
train_loss_result.append(train_loss.result())
train_acc_result.append(train_accuracy.result())
for imgs_,labels_ in valid_image_ds:
valid_step(imgs_, labels_)
valid_loss_result.append(valid_loss.result())
valid_acc_result.append(valid_accuracy.result())
print(f'Train Epoch:{epoch+1};loss{train_loss.result():.3f},accuracy:{train_accuracy.result():.3f},valid_loss:{valid_loss.result():.3f},valid_accuracy:{valid_accuracy.result():.3f};')
train_loss.reset_states()
valid_accuracy.reset_states()
valid_loss.reset_states()
valid_accuracy.reset_states()
# +
# start training
EPOCHS=1000
for epoch in range(EPOCHS):
train_loss.reset_states()
train_accuracy.reset_states()
valid_loss.reset_states()
valid_accuracy.reset_states()
step = 0
for images, labels in train_image_ds:
step += 1
train_step(images, labels)
print("Epoch: {}/{}, step: {}/{}, loss: {:.5f}, accuracy: {:.5f}".format(epoch + 1,
EPOCHS,
step,
math.ceil(train_count / BATCH_SIZE),
train_loss.result(),
train_accuracy.result()))
for valid_images, valid_labels in valid_image_ds:
valid_step(valid_images, valid_labels)
print("Epoch: {}/{}, train loss: {:.5f}, train accuracy: {:.5f}, "
"valid loss: {:.5f}, valid accuracy: {:.5f}".format(epoch + 1,
EPOCHS,
train_loss.result(),
train_accuracy.result(),
valid_loss.result(),
valid_accuracy.result()))
# +
# model.save_weights(filepath=save_model_dir, save_format='tf')
model_path = save_model_dir+"/fashion_mnist.h5"
print(model_path)
model.save(model_path)
# -
| tensorflow2.1/helloworld/DCF-Onehot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
#
# Author Attribution is a classic problem of NLP which is a part text classification problem. Authorship attribution is a well-studied problem which led to the field of [Stylometry](https://en.wikipedia.org/wiki/Stylometry). Here, we given a set of documents from certain authors, we train a model to understand the author's style and use this to indentify the author of unknown documents. As with many other NLP problems, it has benefited greatly from the increase in available computer power, data and advanced machine learning techniques. All of these make authorship attribution a natural candidate for the use of deep learning (DL). In particular, we can benefit from DL's ability to automatically extract the relevant features for a specific problem.
#
# In this lab we will focus on the following:
# 1. Extract chracter level features from text of each author (to get author's style)
# 2. Using these features for building a classification model for authorship attribution
# 3. Applying the model for identifying the author of a set of unknown documents
# As mentioned above, this problem can be solved in three steps. First is feature extraction. Here, since there is a limited amount of data, we are going to use character as our features instead of words or sentences. If we use words or sentences as our features, we are going to end up with small dataset which might be problematic to train our model.
#
# ### Features
#
# 1. A sequence of characters (length of sequence is a hyperparameter)
# 2. An embedding layer for characters (dimensionality of the embedding is a hyperparameter)
#
# Embedding layer is a part of our model, but we can definitely consider it as feature extactor, since it encodes the features space into more meaningful semantic space.
#
# ### Classifier
#
# 1. Build a classifier using RNN layers and Dense layers.
# 2. Choose an Optimizer, learning rate and train the model with extacted features
#
# ### Predict
#
# 1. Break the entire document to sequences of the same length, as determined by the hyperparameter
# 2. Retrieve an author prediction for each one of these sequences
# 3. Determine which author has received more 'votes'. We will then use this author as our prediction for the entire document. (Note: in order to have a clear majority, we need to ensure that the number of sequences is odd).
#
#
# ## Prepare the data
#
# We begin by setting up the data pre-processing pipeline. For each one of the authors, we aggregate all the known papers into a single long text. We assume that style does not change across the various papers, hence a single text is equivalent to multiple small ones yet it is much easier to deal with programmatically.
#
# For each paper of each author we perform the following steps:
# 1. Convert all text into lower-case (ignoring the fact that capitalization may be a stylistic property)
# 2. Converting all newlines and multiple whitespaces into single whitespaces
# 3. Remove any mention of the authors' names, otherwise we risk data leakage (authors names are hamilton and madison)
#
# Do the above steps in a function as it is needed for predicting the unknown papers.
# +
import numpy as np
import os
from sklearn.model_selection import train_test_split
# Classes for A/B/Unknown
A = 0
B = 1
UNKNOWN = -1
def preprocess_text(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
text = ' '.join(lines[1:]).replace("\n", ' ').replace(' ',' ').lower().replace('hamilton','').replace('madison', '')
text = ' '.join(text.split())
return text
# Concatenate all the papers known to be written by A/B into a single long text
all_authorA, all_authorB = '',''
for x in os.listdir('./papers/A/'):
all_authorA += preprocess_text('./papers/A/' + x)
for x in os.listdir('./papers/B/'):
all_authorB += preprocess_text('./papers/B/' + x)
# Print lengths of the large texts
print("AuthorA text length: {}".format(len(all_authorA)))
print("AuthorB text length: {}".format(len(all_authorB)))
# -
# The next step is to break the long text for each author into many small sequences. As described above, we empirically choose a length for the sequence and use it throughout the model's lifecycle. We get our full dataset by labeling each sequence with its author.
#
# To break the long texts into smaller sequences we use the *Tokenizer* class from the Keras framework. In particular, note that we set it up to tokenize according to *characters* and not words.
#
# 1. Choose SEQ_LEN hyper parameter, this might have to be changed if the model doesn't fit well to training data.
# 2. Write a function make_subsequences to turn each document into sequences of length SEQ_LEN and give it a correct label.
# 3. Use keras Tokenizer with char_level=True
# 4. fit the tokenizer on all the texts
# 5. Use this tokenizer to convert all texts into sequences using texts_to_sequences()
# 6. Use make_subsequences() to turn these sequences into appropriate shape and length
# +
from keras.preprocessing.text import Tokenizer
# Hyperparameter - sequence length to use for the model
SEQ_LEN = 30
def make_subsequences(long_sequence, label, sequence_length=SEQ_LEN):
len_sequences = len(long_sequence)
X = np.zeros(((len_sequences - sequence_length)+1, sequence_length))
y = np.zeros((X.shape[0], 1))
for i in range(X.shape[0]):
X[i] = long_sequence[i:i+sequence_length]
y[i] = label
return X,y
# We use the Tokenizer class from Keras to convert the long texts into a sequence of characters (not words)
tokenizer = Tokenizer(char_level=True)
# Make sure to fit all characters in texts from both authors
tokenizer.fit_on_texts(all_authorA + all_authorB)
authorA_long_sequence = tokenizer.texts_to_sequences([all_authorA])[0]
authorB_long_sequence = tokenizer.texts_to_sequences([all_authorB])[0]
# Convert the long sequences into sequence and label pairs
X_authorA, y_authorA = make_subsequences(authorA_long_sequence, A)
X_authorB, y_authorB = make_subsequences(authorB_long_sequence, B)
# Print sizes of available data
print("Number of characters: {}".format(len(tokenizer.word_index)))
print('author A sequences: {}'.format(X_authorA.shape))
print('author B sequences: {}'.format(X_authorB.shape))
# -
# Compare the number of raw characters to the number of labeled sequences for each author. Deep Learning requires many examples of each input. The following code calculates the number of total and unique words in the texts.
# +
# Calculate the number of unique words in the text
word_tokenizer = Tokenizer()
word_tokenizer.fit_on_texts([all_authorA, all_authorB])
print("Total word count: ", len((all_authorA + ' ' + all_authorB).split(' ')))
print("Total number of unique words: ", len(word_tokenizer.word_index))
# -
# We now proceed to create our train, validation sets.
#
# 1. Stack x data together and y data together
# 2. use train_test_split to split the dataset into 80% training and 20% validation
# 3. Reshape the data to make sure that they are sequences of correct length
# +
# Take equal amounts of sequences from both authors
X = np.vstack((X_authorA, X_authorB))
y = np.vstack((y_authorA, y_authorB))
# Break data into train and test sets
X_train, X_val, y_train, y_val = train_test_split(X,y, train_size=0.8)
# Data is to be fed into RNN - ensure that the actual data is of size [batch size, sequence length]
X_train = X_train.reshape(-1, SEQ_LEN)
X_val = X_val.reshape(-1, SEQ_LEN)
# Print the shapes of the train, validation and test sets
print("X_train shape: {}".format(X_train.shape))
print("y_train shape: {}".format(y_train.shape))
print("X_validate shape: {}".format(X_val.shape))
print("y_validate shape: {}".format(y_val.shape))
# -
# Finally, we construct the model graph and perform the training procedure.
#
# 1. Create a model using RNN and Dense layers
# 2. Since its a binary classification problem, the output layer should be Dense with sigmoid activation
# 3. Compile the model with optimizer, appropriate loss function and metrics
# 4. Print the summary of the model
# +
from keras.layers import SimpleRNN, Embedding, Dense
from keras.models import Sequential
from keras.optimizers import SGD, Adadelta, Adam
Embedding_size = 100
RNN_size = 256
model = Sequential()
model.add(Embedding(len(tokenizer.word_index)+1, Embedding_size, input_length=30))
model.add(SimpleRNN(RNN_size, return_sequences=False))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy'])
model.summary()
# -
# 1. Decide upon the batch size, epochs and train the model using training data and validate with vailadation data
# 2. Based on the results, go back to model above, change it if needed ( use more layers, use regularization, dropout, etc., use different optimizer, or a different learning rate, etc.)
# 3. Change Batch size, epochs if needed
Batch_size = 4096
Epochs = 20
model.fit(X_train, y_train, batch_size=Batch_size, epochs=Epochs, validation_data=(X_val, y_val))
# ### Applying the Model to the Unknown Papers
#
# Do this all the papers in the Unknown folder
# 1. preprocess them same way as training set (lower case, removing white lines, etc.)
# 2. use tokenizer and make_subsequences function above to turn them into sequences of required size
# 3. Use the model to predict on these sequences.
# 4. Count the number of sequences assigned to author A and the ones assigned to author B
# 5. Based on the count, pick the author with highest votes/count
for x in os.listdir('./papers/Unknown/'):
unknown = preprocess_text('./papers/Unknown/' + x)
unknown_long_sequences = tokenizer.texts_to_sequences([unknown])[0]
X_sequences, _ = make_subsequences(unknown_long_sequences, UNKNOWN)
X_sequences = X_sequences.reshape((-1,SEQ_LEN))
votes_for_authorA = 0
votes_for_authorB = 0
y = model.predict(X_sequences)
y = y>0.5
votes_for_authorA = np.sum(y==0)
votes_for_authorB = np.sum(y==1)
print("Paper {} is predicted to have been written by {}, {} to {}".format(
x.replace('paper_','').replace('.txt',''),
("Author A" if votes_for_authorA > votes_for_authorB else "Author B"),
max(votes_for_authorA, votes_for_authorB), min(votes_for_authorA, votes_for_authorB)))
# # Summary
#
# In this lab, we discussed the problem of authorship attribution. Finally, we looked at the model internals to get an intuition for how the it encodes stylometric properties.
#
# The first two papers are written by author B, and next three papers are written by author A.
#
# The model was able capture the style of each author based on the character sequences given to it. This is a hyper parameter which needs to be tuned. So, play with this parameter as a part of feature extarcation stage.
#
# Finally, you are able to train a model to solve author attribution.
#
# Good luck for your next lessons! Do try this assigment with the layers you learn next and see if there is any improvement in the model.
| Lesson 05/Activity 6_Author_Attribution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
ad_data = pd.read_csv("advertisingTRY.csv")
ad_data.head()
ad_data.info()
ad_data.describe()
sns.set_style('whitegrid')
ad_data["Age"].hist(bins=30)
from sklearn.linear_model import LogisticRegression
logReg = LogisticRegression()
sns.heatmap(ad_data.isnull(), cbar = False, cmap ='viridis')
from sklearn.cross_validation import train_test_split
#ad_data.drop("Timestamp", axis=1, inplace=True)
#ad_data.drop("Country", axis=1, inplace=True)
#ad_data.drop("City", axis=1, inplace=True)
ad_data.drop("Ad Topic Line", axis=1, inplace=True)
X=ad_data.drop(("Clicked on Ad"), axis = 1)
y=ad_data["Clicked on Ad"]
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
logReg.fit(X_train, y_train)
predictions = logReg.predict(X_test)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
print(classification_report(y_test, predictions))
print(confusion_matrix(y_test, predictions))
| ProjectLogisticRegSid.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MobileNetV2で欅坂46とけやき坂46のメンバーの顔認識
# +
import keras
from keras.applications.mobilenetv2 import MobileNetV2
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from utils.utils import load_data
# %matplotlib inline
# -
# ### モデルと学習の設定
# EPOCHを200に設定した理由は、ある程度大きい数値、かつ、1エポックあたり40秒なので全体で2.2時間とトライ・アンド・エラーしやすかったため。
# テストとバリデーションのデータサイズはともに0.3(=全体の30%)を指定した。
# +
# モデルの設定
NUMBER_OF_MEMBERS = 41 # 漢字とひらがな合わせたメンバー数
CLASSES = NUMBER_OF_MEMBERS + 1 # one hot表現は0から始まるため
# 学習の設定
EPOCHS = 200
TEST_SIZE = 0.3
VALIDATION_SPLIT = 0.3
# -
# ### データの読み込み
X, Y = load_data('/home/ishiyama/notebooks/keyakizaka_member_detection/image/mobilenet/')
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=TEST_SIZE, shuffle=True)
# ### モデル構築
model = MobileNetV2(include_top=True, weights=None, classes=CLASSES)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# ### 学習させる
fit_result = model.fit(
x=X_train,
y=Y_train,
epochs=EPOCHS,
validation_split=VALIDATION_SPLIT,
verbose=2
)
# ### LossとAccuracyのグラフを表示する
# (参考)[MNISTでハイパーパラメータをいじってloss/accuracyグラフを見てみる](https://qiita.com/hiroyuki827/items/213146d551a6e2227810)
# +
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(16,5))
# loss
def plot_history_loss(fit):
# Plot the loss in the history
axL.plot(fit.history['loss'],label="loss for training")
axL.plot(fit.history['val_loss'],label="loss for validation")
axL.set_title('model loss')
axL.set_xlabel('epoch')
axL.set_ylabel('loss')
axL.legend(loc='upper right')
# acc
def plot_history_acc(fit):
# Plot the loss in the history
axR.plot(fit.history['acc'],label="accuracy for training")
axR.plot(fit.history['val_acc'],label="accuracy for validation")
axR.set_title('model accuracy')
axR.set_xlabel('epoch')
axR.set_ylabel('accuracy')
axR.legend(loc='upper right')
plot_history_loss(fit_result)
plot_history_acc(fit_result)
plt.show()
plt.close()
# -
# 周期的に精度が下がる原因を調査する。
# ### テストデータで精度を確認する
test_result = model.evaluate(
x=X_test,
y=Y_test
)
print('loss for test:', test_result[0])
print('accuracy for test:', test_result[1])
# ### 今回学習したモデルを保存する
model.save('keyakizaka_member_detection_mobilenetv2.h5')
| Keyakizaka_Member_Face_Detection001.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="QWlanPgwTAJO"
# # Extended results and code explanation supporting paper *Message-Passing Neural Networks Learn Little's Law* by <NAME> and <NAME>
#
# This *jupyter notebook* provides detailed results related to application and implementation of Neural Message-Passing for performance evaluation in Jackson networks of queues. Some of the presented ideas may be not clear until the paper is read.
#
#
# First, the obtained results are given. At the end of the notebook, the full code of the implementation is discussed as well.
#
# **If you decide to apply the concepts presented or base on the provided code, please do refer our paper: <NAME> and <NAME>, ''Message-Passing Neural Networks Learn Little's Law'', IEEE Communications Letters, 2018, *accepted for publication*.**
# + colab={} colab_type="code" id="_UcPcjFfTAJS"
import tensorflow as tf
import numpy as np
import datetime
import argparse
import os
import graph_nn
import matplotlib.pyplot as plt
# %matplotlib inline
import pandas as pd
import IPython
# + colab={} colab_type="code" id="acxZZQOITAJf"
args = graph_nn.args
# + colab={} colab_type="code" id="T-aIHxI4TAJn"
def make_set():
ds = tf.data.TFRecordDataset([args.eval])
ds = ds.batch(args.batch_size)
serialized_batch = ds.make_one_shot_iterator().get_next()
return serialized_batch
# + [markdown] colab_type="text" id="JNhaN3XKTAJw"
# ## Confidence intervals
#
# The confidence intervals for the test statistics are computed using the method similar to the bootstrap technique. According to the bootstrap approach, the distribution in the population is approximated by a random sampling with replacement from some set. Since we have the full generative model for the networks, we can estimate the confidence interval using the Monte Carlo method just by sampling from the whole population (instead of using the bootstrap technique). Then, the confidence intervals can be estimated as quantiles of the samples.
#
# + colab={} colab_type="code" id="P5I8Wxs-TAJx"
def bootstrap(ckpt_='model.ckpt-197400', nboot=9, nval=3):
graph_nn.REUSE = None
g=tf.Graph()
with g.as_default():
global_step = tf.train.get_or_create_global_step()
with tf.variable_scope('model'):
serialized_batch = make_set()
batch, labels = graph_nn.make_batch(serialized_batch)
n_batch = tf.layers.batch_normalization(batch)
predictions = graph_nn.inference(n_batch)
loss= tf.losses.mean_squared_error(labels,predictions)
saver = tf.train.Saver(tf.trainable_variables() + [global_step])
with tf.Session(graph=g) as ses:
ses.run(tf.local_variables_initializer())
ses.run(tf.global_variables_initializer())
ckpt=tf.train.latest_checkpoint(args.log_dir)
ckpt = os.path.join(args.log_dir,ckpt_ )
print(ckpt)
if ckpt:
tf.logging.info("Loading checkpint: %s" % (ckpt))
saver.restore(ses, ckpt)
for boot_index in range(nboot):
label_py=[]
predictions_py=[]
for i in range(nval):
val_label_py, val_predictions_py, step = ses.run( [labels,
predictions,
global_step]
)
label_py.append(val_label_py)
predictions_py.append(val_predictions_py)
label_py = np.concatenate(label_py,axis=0)
predictions_py = np.concatenate(predictions_py,axis=0)
yield [
np.mean((label_py-predictions_py)**2),
graph_nn.fitquality(label_py,predictions_py),
np.corrcoef(label_py,predictions_py, rowvar=False)[0,1]
],label_py,predictions_py
# + [markdown] colab_type="text" id="B7DgHpeaTAJ2"
# We use 99 bootstrap samples of 32 batches (with a single batch size equal to 64 samples).
# + colab={} colab_type="code" id="G_EdZakQTAJ4" outputId="2c03593a-081e-4587-884c-6effca643dda"
32*64*99
# + [markdown] colab_type="text" id="Z9Y2fbcATAJ_"
# ## Evaluation samples
#
# All the evaluation sets are generated using commands given below. The details concerning generation of the code are provided in further parts of this notebook.
#
# ```bash
# python3 samples.py -o eval.tfrecords -N 202753 -n 40 --rmax 0.9 -g ba
# python3 samples.py -o eval_er.tfrecords -N 202753 -n 40 --rmax 0.9 -g er
# python3 samples.py -o eval_er60.tfrecords -N 202753 -n 60 --rmax 0.9 -g er
#
# python3 samples.py -o eval_snd_2038.tfrecords -N 202753 --rmax 0.9 -g snd \
# --sndlib sndlib/sndlib-networks-xml/cost266.graphml \
# --sndlib sndlib/sndlib-networks-xml/france.graphml \
# --sndlib sndlib/sndlib-networks-xml/geant.graphml \
# --sndlib sndlib/sndlib-networks-xml/india35.graphml \
# --sndlib sndlib/sndlib-networks-xml/janos-us.graphml \
# --sndlib sndlib/sndlib-networks-xml/nobel-eu.graphml \
# --sndlib sndlib/sndlib-networks-xml/norway.graphml \
# --sndlib sndlib/sndlib-networks-xml/sun.graphml \
# --sndlib sndlib/sndlib-networks-xml/ta1.graphml
#
#
#
# python3 samples.py -o eval_snd_janos-us.tfrecords -N 202753 -n 40 --rmax 0.9 -g snd \
# --sndlib sndlib/sndlib-networks-xml/janos-us.graphml
#
# python3 samples.py -o eval_snd_janos-us-ca.tfrecords -N 202753 -n 40 --rmax 0.9 -g snd \
# --sndlib sndlib/sndlib-networks-xml/janos-us-ca.graphml
#
# python3 samples.py -o eval_snd_cost266.tfrecords -N 202753 -n 40 --rmax 0.9 -g snd \
# --sndlib sndlib/sndlib-networks-xml/cost266.graphml
#
# python3 samples.py -o eval_snd_germany50.tfrecords -N 202753 -n 40 --rmax 0.9 -g snd \
# --sndlib sndlib/sndlib-networks-xml/germany50.graphml
# ```
#
# + [markdown] colab_type="text" id="zIRTpAoOTAKA"
#
# ## Evaluation results
#
# Below, the results are organised into sections labelled as **X**/**Y**, where __X__ describes the training set, and __Y__ describes the test set. Both **X** and **Y** are represented with abbreviations BA or ER (the Barabasi-Albert or Erdos-Renyi models, respectively), or a network name retrieved from the SNDLib library. For example, the section BA/ER shows the results of a model trained with Barabasi-Albert random networks, and tested on a test set drawn from the Erdos-Renyi network model.
#
# Our best checkpoints (snapshots of the weights) based on the evaluation results are as follows (respectively for the both types of random network models):
# - BA 197400
# - ER 199700
#
# All the snapshots are stored locally in subfolders of `log/`. They are not published within this notebook because of their large size.
#
#
# Each section with result analysis contains the confidence interval (95%) for mean squared error ($\mathit{MSE}$, denoted as `mse`), $\mathcal{R}^2$ (`R**2`), and Person correlation $\rho$ (`rho`) presented in a table form. The quality of the output prediction is visualized in the form of a regression plot comparing the 'true labels' (normalized delays) and the model predictions. The analysis is concluded with a histogram of residuals (i.e., model errors).
#
#
# + colab={} colab_type="code" id="950QBenbTAKC"
def print_report(bootstraps_samples):
ci=np.percentile(bootstraps_samples,[2.5, 97.5], axis=0)
m = np.mean(bootstraps_samples, axis=0)
stat=np.stack([ci[0],m,ci[1]],axis=0)
with pd.option_context('display.precision', 4):
df = pd.DataFrame(stat,
columns=['mse','R**2','rho'],
index=['ci_lo','mean','ci_hi'])
IPython.display.display_html(df)
# + colab={} colab_type="code" id="RMTnjMjLTAKG"
def print_regplots(label_py,predictions_py):
plt.figure()
plt.scatter(label_py,predictions_py,
color="gray",
facecolors='none',
s=1)
l = graph_nn.line_1(label_py, label_py)
l[0].set_color('k')
l[0].set_linewidth(1)
plt.grid(True,color='lightgray')
plt.xlabel('True label')
plt.ylabel('Predicted label')
plt.title('Evaluation' )
plt.tight_layout(True)
plt.show()
plt.close()
plt.figure()
plt.hist(label_py-predictions_py,50)
plt.title('Histogram of residuals' )
fig_path = 'rez_hist.pdf'
plt.show()
plt.close()
# + [markdown] colab_type="text" id="tMZPT8zzTAKM"
# ## BA/BA
# + colab={} colab_type="code" id="7jsJIcZQTAKP" outputId="ab192e54-92b8-4d79-e90a-a4dd617733f0"
args.eval='eval.tfrecords'
args.log_dir = 'log/ba16'
args.ninf=16
args.rn = 8
args.W_shift=55.3
args.W_scale = 22.0
args.mu_shift = 0.34
args.mu_scale = 0.27
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-197400',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="63cvcd7ITAKW"
# ## BA/ER
#
# With this exemplary results, you can note that despite there is a non-negligible bias, the correlation between true labels and model predictions is really high. Therefore, such a model can be tuned to a particular network using a small number of additional labels. This property is useful for the following procedure of transfer learning:
#
# 1. Given a trained network, compute the prediction.
# 1. Collect a few delays in a particular network under study.
# 1. Prepare a simple linear model that corrects predictions obtained form an MPNN.
# + colab={} colab_type="code" id="vWtpJv0vTAKX" outputId="ffdf03d3-6363-4bee-fdcb-4c1e5fa3635c"
args.eval='eval_er.tfrecords'
args.log_dir = 'log/ba16'
args.ninf=16
args.rn = 8
args.W_shift=55.3
args.W_scale = 22.0
args.mu_shift = 0.34
args.mu_scale = 0.27
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-197400',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="iD58yoA0TAKa"
# The result is likely to be biased because the mean and standard deviation of the delay in this network are not the same as for training samples for BA graphs. We can check this hypothesis by using the true values of mean and standard deviation obtained from Erdos-Renyi networks. The predictions are better, but still biased.
# + colab={} colab_type="code" id="utZUUnubTAKc" outputId="f8dc824a-be65-4456-8a33-31ad9010cb48"
args.eval='eval_er.tfrecords'
args.log_dir = 'log/ba16'
args.ninf=16
args.rn = 8
args.W_shift=69.3
args.W_scale = 15.95
args.mu_shift = 0.199
args.mu_scale = 0.12
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-197400',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="w1CD8PG0TAKe"
# ## ER/ER
#
# In general, the model trained with use of Erdos-Renyi networks generalizes far better in comparison to the models trained with Barabasi-Albert networks.
#
# + colab={} colab_type="code" id="px1iuX6yTAKf" outputId="1ece42b3-58fc-49a6-c918-cac4bb10e03e"
args.eval='eval_er.tfrecords'
args.log_dir = 'log/er3'
args.ninf=16
args.rn = 8
args.W_shift=69.3
args.W_scale = 15.95
args.mu_shift = 0.199
args.mu_scale = 0.12
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-199700',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="JyERIaEFTAKl"
# ## ER/BA
# + colab={} colab_type="code" id="C4IP-K7xTAKm" outputId="568540a3-c040-48be-b6fc-7bb8b5b5b58f"
args.eval='eval.tfrecords'
args.log_dir = 'log/er3'
args.ninf=16
args.rn = 8
args.W_shift=69.3
args.W_scale = 15.95
args.mu_shift = 0.199
args.mu_scale = 0.12
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-199700',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="YhRG9sa1TAKq"
# ## ER/ER 60
#
# The model based on the Erdos-Renyi networks generalizes to a larger network size, never present in the training sets.
# + colab={} colab_type="code" id="aoJM9h10TAKs" outputId="3599b0ff-816b-46b8-9279-61924e45f8ee"
args.eval='eval_er60.tfrecords'
args.log_dir = 'log/er3'
args.ninf=16
args.rn = 8
args.W_shift=69.3
args.W_scale = 15.95
args.mu_shift = 0.199
args.mu_scale = 0.12
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-199700',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="_IhWlwmaTAKw"
# ## ER/ SNDLib $n\in (20,38)$
#
# Here, we evaluate the model with the tests using some real network topologies retrieved from SNDLib. The node range $n\in (20,38)$ is most common in the whole training set (the details are provided below in the notebook).
# + colab={} colab_type="code" id="BH6fOQr_TAKx" outputId="b0f48931-fc67-4802-a49f-4aebac160d24"
args.eval='eval_snd_2038.tfrecords'
args.log_dir = 'log/er3'
args.ninf=16
args.rn = 8
args.W_shift=69.3
args.W_scale = 15.95
args.mu_shift = 0.199
args.mu_scale = 0.12
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-199700',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="R-MFkQlzTAK2"
# ## BA/SNDLib $n\in (20,38)$
#
# The Barabasi-Albert networks are not useful for training if we would like to obtain high quality predictions for type of networks. However, we have noticed that BA networks provide a very interesting property. When you look at the clusters in the regression plots given below, you can note that each of five clusters is separately characterized with a large level of correlation (with a true label), although the overall correlation value is small. You can see that the 'within-cluster variance' corresponds to the variance of parameters for one particular topology; on the other hand, the 'between-cluster variance' represents the variance between topologies (__BA/germany50__ supports this claim).
#
# Given the general model and a particular network topology, one is interested in obtaining an accurate model for this topology. Such the model can be constructed by using a small number of training samples. Since the 'within-network correlation' is high, a simple linear model is sufficient to exercise transfer learning of the topology representation from the general training set. In other words, the model can be inaccurate for some topologies. However, change of other parameters (such as traffic) results in the error at a similar level, so it can be easily corrected.
#
#
# + colab={} colab_type="code" id="zLPdQoaSTAK3" outputId="b2c24e83-4c13-4dc7-e22a-2bde6aa305d0"
args.eval='eval_snd_2038.tfrecords'
args.log_dir = 'log/ba16'
args.ninf=16
args.rn = 8
args.W_shift=55.3
args.W_scale = 22.0
args.mu_shift = 0.34
args.mu_scale = 0.27
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-197400',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="avyPt7BuTAK7"
# ## ER/janos_us
# + colab={} colab_type="code" id="IsYm2setTAK7" outputId="7f72902e-3b85-4ec1-922a-ac8ccdf27ef4"
args.eval='eval_snd_janos-us.tfrecords'
args.log_dir = 'log/er3'
args.ninf=16
args.rn = 8
args.W_shift=69.3
args.W_scale = 15.95
args.mu_shift = 0.199
args.mu_scale = 0.12
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-199700',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="-wdOqUPrTAK-"
# ## ER/janos_us_ca
# + colab={} colab_type="code" id="aM2TQFRgTAK_" outputId="2997b25a-c036-4d2a-d642-6172b192d952"
args.eval='eval_snd_janos-us-ca.tfrecords'
args.log_dir = 'log/er3'
args.ninf=16
args.rn = 8
args.W_shift=69.3
args.W_scale = 15.95
args.mu_shift = 0.199
args.mu_scale = 0.12
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-199700',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="v7PmWheMTALC"
# ## ER/cost266
# + colab={} colab_type="code" id="M62USjDPTALD" outputId="c8e937a0-b82a-49ff-810b-cff3fb2ddb55"
args.eval='eval_snd_cost266.tfrecords'
args.log_dir = 'log/er3'
args.ninf=16
args.rn = 8
args.W_shift=69.3
args.W_scale = 15.95
args.mu_shift = 0.199
args.mu_scale = 0.12
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-199700',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="LD1pWwWFTALG"
# ## ER/germany50
# + colab={} colab_type="code" id="EuL0pgv1TALI" outputId="bf24a994-768c-44c1-befc-9f20658ce8c7"
args.eval='eval_snd_germany50.tfrecords'
args.log_dir = 'log/er3'
args.ninf=16
args.rn = 8
args.W_shift=69.3
args.W_scale = 15.95
args.mu_shift = 0.199
args.mu_scale = 0.12
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-199700',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="NCHzmGpkTALM"
# ## BA/germany50
#
# Again, a large systematic error can be observed in this case. Nevertheless, a large correlation that can be corrected by a simple linear model is also noticeable. Generally, it can be observed that only a small number of samples from `germany50`(where traffic is randomized) is required to train a highly accurate model for this particular topology.
# + colab={} colab_type="code" id="30nLe2WzTALN" outputId="c7721dfc-8049-4609-b5c8-a80dfc04fd3e"
args.eval='eval_snd_germany50.tfrecords'
args.log_dir = 'log/ba16'
args.ninf=16
args.rn = 8
args.W_shift=55.3
args.W_scale = 22.0
args.mu_shift = 0.34
args.mu_scale = 0.27
bootstraps_samples = []
for b,label_py,predictions_py in bootstrap(ckpt_='model.ckpt-197400',
nboot=99,
nval=32):
bootstraps_samples.append(b)
bootstraps_samples = np.asarray(bootstraps_samples)
print_report(bootstraps_samples)
print_regplots(label_py,predictions_py)
# + [markdown] colab_type="text" id="Ag5-kML0TALQ"
# # Implementation Details
# This section describes low-level details related to our implementation of the MPNN. The full code presented below will be made publicly available via *Github* and in the form of this *Jupyter notebook*.
# + [markdown] colab_type="text" id="i5Df-DoHTALR"
# ## Random Graph Model
#
# Example usage:
#
# ```python3 samples.py -o eval.tfrecords -N 2000 -n 40 --rmax 0.9 -g ba```
# + colab={} colab_type="code" id="GuXZnk-ZTALR"
# # %load samples.py
import networkx as nx
import numpy as np
import scipy as sp
import tensorflow as tf
import argparse
import datetime
import glob
import os
import sys
sndlib_networks = None
class GraphProvider:
def get(self):
G = self._get()
G=nx.convert_node_labels_to_integers(G)
return G
class BarabasiAlbert(GraphProvider):
def __init__(self,n):
self.n = n
self.nmin=10
self.m = 2
def _get(self):
return nx.barabasi_albert_graph(np.random.randint(self.nmin,self.n),
self.m)
class ErdosReni(GraphProvider):
def __init__(self,n):
self.n = n
self.p = 2.0/n
def _get(self):
G=nx.fast_gnp_random_graph(self.n,self.p,directed=False)
largest_cc = max(nx.connected_components(G), key=len)
Gm=G.subgraph(largest_cc)
return Gm
class SNDLib(GraphProvider):
def __init__(self,flist):
self.sndlib_networks = {os.path.split(f)[1][0:-8]:nx.read_graphml(f)
for f in flist}
self.names = list(self.sndlib_networks.keys())
def _get(self):
name = np.random.choice(self.names)
Gm = nx.Graph( self.sndlib_networks[name] )
return Gm
def make_sample(provider, rl=0.3, rh=0.7):
Gm=provider.get()
A=nx.convert_matrix.to_numpy_matrix(Gm)
# Make all intensities addup to 1
L=np.random.uniform(size=(len(Gm),1))
L = L /np.sum(L)
p=1.0/(np.sum(A,axis=1)+1.0)
R=np.multiply(A,p)
lam=np.linalg.solve(np.identity(len(Gm))-np.transpose( R ) ,L)
#random utilisation of each node
rho=np.random.uniform(low=rl,high=rh, size=lam.shape)
mu = lam/rho
ll=rho/(1-rho)
W=np.sum(ll)/np.sum(L)
# Max value of W is of order n*0.99/(1 -0.99)
nx.set_node_attributes(Gm,
name='mu',
values=dict(zip(Gm.nodes(),
np.ndarray.tolist(mu[:,0]))))
nx.set_node_attributes(Gm,
name='Lambda',
values=dict(zip(Gm.nodes(),
np.ndarray.tolist(L[:,0]))))
it=np.nditer(R, order='F', flags=['multi_index'])
at = {it.multi_index:float(x) for x in it if x > 0}
nx.set_edge_attributes(Gm,name='R', values=at)
Gm.graph['W']=W
return mu,L,R,W,Gm
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def make_dataset(count, file, producer):
writer = tf.python_io.TFRecordWriter(file)
for i in range(count):
if not i % 500:
print('{} generated'
' {} samples.'.format(str(datetime.datetime.now()),
i ) )
mu,L,R,W,Gm=producer()
#while W > 3.3:
# mu,L,R,W,Gm=make_sample(n,p)
mu = mu[:,0].tolist()
L = L[:,0].tolist()
first,last=np.nonzero(R)
e=R[first,last].tolist()[0]
example = tf.train.Example(features=tf.train.Features(feature={
'mu': _float_feature(mu),
'Lambda': _float_feature(L),
'W':_float_feature([W]),
'R':_float_feature(e),
'first':_int64_feature(first.tolist()),
'second':_int64_feature(last.tolist()) }))
writer.write(example.SerializeToString())
writer.close()
if __name__ =='__main__':
random_org_help='''Seed, if none, downloads from random.org'''
parser = argparse.ArgumentParser(description='Generates saple networks')
parser.add_argument('-N', help='number of samples',
required=True, type=int)
parser.add_argument('-n', help='number of nodes', default=40, type=int)
parser.add_argument('-o', help='Output file', required=True, type=str)
parser.add_argument('--rmin', help='Min rho', type=float, default=0.3)
parser.add_argument('--rmax', help='max rho', type=float, default=0.7)
parser.add_argument('-s', help=random_org_help, required=False, type=int)
parser.add_argument('-g', help='random graph type: [ba | er | snd]',
type=str, default="ba")
parser.add_argument('--sndlib', help='Sndlib files', type=str ,nargs='+')
args = parser.parse_args()
if args.s is None:
import urllib.request
with urllib.request.urlopen(
'https://www.random.org/integers/?'
'num=1&min=0&max=1000000&'
'col=1&base=10&format=plain&rnd=new') as response:
rnd_seed = int(response.read())
print( str(datetime.datetime.now()),
"Random response: {}".format(rnd_seed))
np.random.seed(rnd_seed)
else:
np.random.seed(args.s)
provider = None
if args.g == 'er':
provider = ErdosReni(args.n)
elif args.g == 'ba':
provider = BarabasiAlbert(args.n)
elif args.g == 'snd':
provider = SNDLib(args.sndlib)
make_dataset(args.N,args.o, lambda: make_sample(provider,
args.rmin,
args.rmax))
# + [markdown] colab_type="text" id="3pxWLCpDTALU"
# ## Statistics
#
# Here we present how selected statistics related to the used random networks can be acquired. It is important to use the data properly normalized (so that they are characterized with zero mean and unit standard deviation) as it helps with the training process.
# + colab={} colab_type="code" id="uw4Ts2n1TALU"
def set_stat(filename):
gL=[]
gM=[]
gW=[]
geW=[]
gR=[]
nnodes=[]
r=tf.python_io.tf_record_iterator(filename)
for be in r:
e = tf.train.Example()
e.ParseFromString(be)
Lambda=e.features.feature["Lambda"].float_list.value
mu=e.features.feature["mu"].float_list.value
R=e.features.feature["R"].float_list.value
first=e.features.feature["first"].int64_list.value
second=e.features.feature["second"].int64_list.value
W=e.features.feature["W"].float_list.value[0]
gL += Lambda
gM += mu
gW.append(W)
gR += R
nnodes.append(len(Lambda))
return gL,gW,gM, nnodes
def vis_set(gL,gW,gM, nnodes):
stats=pd.DataFrame({
'Delay':{f.__name__:f(gW) for f in [np.mean, np.std]},
'Traffic':{f.__name__:f(gL) for f in [np.mean, np.std]},
'Serice':{f.__name__:f(gM) for f in [np.mean, np.std]}
})
IPython.display.display_html(stats)
plt.hist(gW,50);
plt.title('Histogram of the delay');
plt.figure()
plt.hist(nnodes,30);
plt.title('Network size Histogram')
# + [markdown] colab_type="text" id="3eMg3Jt6TALX"
# #### Erdos-Renyi
#
# Most of the training samples are contained in the range $(20,38)$.
# + colab={} colab_type="code" id="GqgSIlznTALX" outputId="ff3d34b2-55e7-41ee-880b-534bb009c60c"
gL,gW,gM, nnodes = set_stat('eval_er.tfrecords')
vis_set(gL,gW,gM, nnodes)
# + [markdown] colab_type="text" id="lq9iKBSKTALb"
# #### Barabasi-Albert
# + colab={} colab_type="code" id="3YBPQ90tTALb" outputId="c8f02546-2c81-44a0-faa5-0093634644ad"
gL,gW,gM, nnodes = set_stat('eval.tfrecords')
vis_set(gL,gW,gM, nnodes)
# + [markdown] colab_type="text" id="T2HqSMhLTALm"
# ## Tensorflow Implementation of MPNN
#
# NOTE: This code is to be run via CLI as a script not as a notebook cell (see below for instructions)!
#
# ### Training options
#
# Below, we provide the hyper-parameters we have found optimal for the task. Most of the hyper-parameters are encoded with default values for the training script options.
#
# #### ba16
#
# ```bash
# sbatch -J ba16 -t 72:0:0 ./train.sh --rn 8 --train train.tfrecords --test test.tfrecords --buf 10000 --buf 10000 --ninf 16 -I 200000
# ```
#
#
#
# #### er3
#
# ```bash
# sbatch -J er3 -t 72:0:0 ./train.sh --rn 8 --train train_er.tfrecords --test test_er.tfrecords --buf 10000 --W-shift 69.3 --W-scale 15.95 --mu-shift 0.199 --mu-scale 0.12 --buf 10000 --ninf 16 -I 200000
#
# ```
#
# + colab={} colab_type="code" id="oMziIWRjTALn"
# # %load graph_nn.py
import tensorflow as tf
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import datetime
import argparse
import os
import io
parser = argparse.ArgumentParser(description='Train the graph neural network')
parser.add_argument('--pad', help='extra padding for node embeding', type=int,
default=12)
parser.add_argument('--pas', help='number of passes', type=int, default=4)
parser.add_argument('--batch_size', help='batch_size', type=int, default=64)
parser.add_argument('--lr', help='learning rate', type=float, default=0.001)
parser.add_argument('--log_dir', help='log dir', type=str, default='log')
parser.add_argument('--rn', help='number of readout neurons', type=int,
default=8)
parser.add_argument('--buf', help='buffer', type=int, default=200)
parser.add_argument('-I', help='number of iteration', type=int, default=80000)
parser.add_argument('--eval', help='evaluatioin file', type=str,
default='eval.tfrecords')
parser.add_argument('--train', help='train file', type=str,
default='train.tfrecords')
parser.add_argument('--test', help='test file', type=str,
default='test.tfrecords')
parser.add_argument('--ninf',
help='Number of hidden neurions in inference layer',
type=int, default=256)
parser.add_argument('--Mhid',
help='Number of hidden neurons in message layer',
type=int, default=8)
def stat_args(name, shift=0,scale=1):
parser.add_argument('--{}-shift'.format(name),
help='Shift for {} (usualy np.mean)'.format(name) ,
type=float, default=shift)
parser.add_argument('--{}-scale'.format(name),
help='Scale for {} (usualy np.std)'.format(name) ,
type=float, default=scale)
stat_args('mu',shift=0.34, scale=0.27)
stat_args('W',shift=55.3, scale=22.0)
if __name__ == '__main__':
args = parser.parse_args()
else:
args = parser.parse_args([])
def test():
return args.I
N_PAD=args.pad
N_PAS=args.pas
N_H=2+N_PAD
REUSE=None
batch_size=args.batch_size
# + [markdown] colab_type="text" id="zx01eT8TTALr"
# #### MPNN
#
# Below, we present the code implementing message-passing neural network. The algorithm is described in the paper, here we only give its basic mathematical sketch.
#
# Message: $$ \mathbf m_v^{t+1} = \sum_{w\in{N(v)}} M_t\left(\mathbf h_v^t,\mathbf h_w^t,\mathbf e_{vw}\right) \quad t=1,\dots,T,$$
#
# Update: $$\mathbf h_v^{t+1} = U_t\left(\mathbf h_v^t,\mathbf m_v^{t+1}\right) \quad t=0,1,\dots,T,$$
#
# $$\mathbf h_v^0=[\mathbf x_v,0,\ldots, 0]$$
#
# Readout: $$\hat{\mathbf y} = R(\mathbf h)$$
#
# The implementation is very low-level and uses only basic tensorflow options. A high-level implementation based on `tf.keras` and `tf.estimators` will be provided in the future to enable users to operate easily. However, we would like to stress that the presented implementation does provide the full assumed functionality.
# + [markdown] colab_type="text" id="GgqFQ8W9_6kK"
# <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAqgAAADfCAIAAAC1YrZbAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAHfPSURBVHhe7f3dT1PZ//eP7z9gn/SQAxIS0oQDE0NIDzDEQK5AnJAAGUMIakgx49WSGdOil6ATAaMt/rRmZtrfjEXHXg4NDEXplxmrb6tDHamT1pHOTA3061SlfspIZyiC9FP4gLRsvmutvVt2y2651RF5PQ4I3Tdrr7X2Xuu51mvdvKhFAAAAAAC2DSD8AAAAALCNAOEHAAAAgG2EsPBTAAAAAABscThRTwR6/AAAAACwjQDhBwAAAIBtBAg/ALwnMJFxzz3zt2r9L5PcEQAAgM1nFcL/5g+drHN4gfsVIzI52C7PoSmqUOMKc8c2DDPp0JQVlGkckwx3IDLy8IFvlpx63KncTVFUtsYVYU9uApGw/1Gvrr5SuzxMZtb3oFsrz0OPFCksY0nn50fNdSJ0SrxPZez3zXLRfQ+IjLtvXFQUoxcjKlWc05xpqCnKzi6Ra256w1HuknWx4O85KM47aH6R/CGsjWlf/3dNZTkUlSc3+2J5GoszXay4eMM9vnmvl2Mu6L7TcbFFVtnmTsgD9IrvG5o+zqZosdzsj8S+uXF370VlIZ1ZqPi61x18d6+WCbpvXlRIRFSNOcAdAgAA2HxWFH5mxtm6g9qjcU1xB5YYMddkb67wRz36AqSmBXoPqaCZsLutqiwWfiRglm2e8EdeDz+0mb+oESOJTB1mxKVBSaSyKoxPEwRvflBfnIFOvJ91NBvrWKKioYEvkajmNPZNbEDEmPBgt1rT7ZnasBAyc051Fso60YG2waXQGJ+xvNzoeysyOzfutRtkO4Vf85xThWMjrmpzh5di88xYXmv0zXE/BWAifrNStymfIh9SpkD4AQB4m6wk/MzfVmUuRdE5Tf0h7lCczRf+RSbke/DTA18I18CRv/qaP6KXwt9c4WcJuzSF6cJEEirOys6iqfwLrpklWZhxfbGn9CPcRtkCwo96608MJRlUxglbaGPd9U2Ca02hr6rovGMy1gcPmGveYmaSFy34mpdiU6p2jMfeMfq2FeZAyg+NNEnFm/opsoDwAwDw1llB+JnRvvYLjbtQtSiqM4/Oc0c5koR/LuBsb1K06I0XVepe75L1Oxr23tQ0ntZ3XdU0njE4R3FdOfvsRuMe3Neu6Rjo10kl2RJ5l3dm3H3pEzE6iCvUuRHbhZo8EfpRqlBpjK7XceE/f3eo96y0cGdeZQvpgEbG/zDIyKDD+T5Hd/OhUklBZdN1T2jCa/lSUSYRF32q63+ZooJehfDnNWg1pRSVq7T+zSWJGbM1SjXmSzU4/mwdHQ17rrc0NJ8/IysUFSt7/sSDE1geHhvrj2m6Oi8qisXkEcuPLDJTnu4WWcPXXe1nqvJlhqFYJ5iZGjKekB+rKxXl8MY+hB+URLLws2+KPmwZeyXwLPygz5Uao+mispCLUzQ81FmvvNBl0ikK93DvlwkN31RX4TGR6WHrpWZZSfbec136OgnK+OxKjTMmmcLRTgBn6pnvzcp83KBUWkZZG/uS8C9PYzTsu6upFJMkRcbdFq1MQlEyc2B63HOvS1NXlHeyvateQmcWa3+bCQ92H/+s4WJHu2p/vvS7IW6AI63w5zX3mI/loLeZc8wyyvby48IvlOHMK1f7sVIRO5jybdeVehSbjJq238dG3VeV5P+vfvZNLzLB33XSrKqrf+KyMBfo/1ourVedP1GTXyjV3g9E8Ii+rUsjLyo93f5tnSSDLta5Z/wx4Z8bH2yXZRbItL0PvJs/+AEAwHYmvfDPDV/vfvj6N+0uJMDLzN0Jws/MuHXFdEaJ4cnC4qzXUElR4sLKeqNnen7YVCvOKjc+YxaZaXtLZqxfxXW0cg+12e5ekeZSoqPWiSiu/Zd0OKlhwQm/qOxcnz8Ucp7DulFt8mPVYK/cITW4grPjTnUJuipPqrV6X4Wfd0pR3He0Opf663xWI/yaR8Omajr+LNQYMh/caxx+SaLKatXMI83uRhz/xbBbW0rRB03+N6h1g/KBDZmZsNZ/agkKHFlcGDZW0Gwap5yqgthTohO2kznFbZ75aa9hH40zs+Yzkzcq/KBkkoV/xqHaQdPo/T0VeNaC11DCxelva/3nlmCUWAhKyGXRCWvzp5bRxchfNvXepTCxJTyTEh/CTYfIc5M0Z4Vok1jEIZnqioQeaooz0RcgNXqwmsaFXziNfOVmvwQi/KwNn8pXdP3c19Z0wjz4xHiAZi/DNvzsahM7epBe+NHxiQFNOXrJYmkHabPGhD9VhpMs5sIj4z4iuWUM3cf4TNXZ3P+Lb/wmRZ3lJfrycenYdc4ZwjnBTN5vlmQXa12hca/DIM9ArR9Fp6PvcuOJ3uH5mPBH/upr+ayl76/l8QUAANggaYX/jfvy1w9nOCFHtSvf3I3gC/O0W7sH6UKNeSRWL0uU1gCz+MreVEBRBSonniLAjJiqkFpXGIcXOHGiqkwjqF6cHXvxNwl6FcLPnUx5ZeJl3GOQSAhWoasSfleEHe8o1bpR+DNew6GDZj/DRgBrFVLHoyJWtNjGDZUjQ2K5OOPRl1OSo91e1LGeeuL2zwscQTLwa9vxb+xB9HwsxlxUiH5w/05YlRlsqyvVg5JhE01un0O9yvaGPTTb3hJ6FplXsVvZ7QkzC+Engy/mmcXooL4gU6K8hucDhp+5X8zgQMlYeCyjSIZzMeFpqnC0k+EydZEhjUKktuWagQmcn+kyU1D40Q9yPEvtnGO/zOjkgOH4F/14Sl5ChFcUfqTfT0216C1nFmsehhhW+OdSZjjJ4lh4c8OotcHNAA25NMXcbFCUGwdOkeGVf6yKnZlN9ml8MYIUKNLSJcFwpYNAMnav+gqoPgAAb400ws/MOC994XyN/sOdQiRyVEGT/RV7jsCXW9S5OUgnCD+pFqNubS42wnPizYolfRh1K1lxiolHjPdU+JkZ14V8dqID6gLuOWNHtfmS8ONAiNU3zoVL9lHUnokE7jYXZVJ0gUz/SwAbtJcfITAh3732M9JSSW4GFxWSbxkNNjKvAqVOnKt1IxFO8aBk2ETTeaUHampqZA1q/TU7O20CsfxZ3FyKDInskiPAGrrnAn1nipAiS+r0DjI0gyCBxjKKZDj37niaKhztZLhMxf9GJx3n0YMoSbPN3UWEP1UaeU9ZLvyxaLEw4eckibvQpxc7I3AZBy82zOQDNXo7VOFxm7MHC//rFJFJyo1Fxm+qpnfIUed+xqEqkuSJ8P/R4Y4DzWRmDMkW3sPZNgT+XEkw8S8cwX7J6OVV6YfiBwEAADaT1MKPDb/SpvYeM6LnKzKITolqzTyd4cstuxIvJ0NpnWDnA2ZLDZ4wW+VtfeFH3UGPoTyLEh36tv3ER9o/sLV3SfjHbQ15tAzb7ZeBlP4XvawAaWhsMHvZEay7FcWNJnfwFU+cZoZ76iUZ+3WP/p4J3G4s/sw0jLrdgg/iksDC3s0mWiBRws9Cx0cd7Gj90iD3XMBxSSbJoKh8pcWPryKBxu4gGb5c+IWjnUw8Uwlhr1Empmhx4e7cdJnJf0oa4SdNq+LyRtPvwdkBXoSTLuOREBtm1tshFdOUuKAwt84c+Dvlm03IDXSf31ybQ1cbf/2xYX/Xg/8oc0Xyzp8ufqYi7Wa2FIgU1glyLYLcXa73zJB/lgn/x8eaqsV08ZcDZGgAAABgc0kp/MzojTOdcTstGbtFwkLvM3jjVXmSMEfDQ92tmu9QS6HX+nD4NTsTkDX1c9cImPq3ivAvRsYsCrxwnz5gHCbSuCT8ZJiDP/kx8uJWp2MCicgPtzxvUM9/ashwSIwr+ollR0Ik2FrTCLo3QZyYsPuqWtuFM/ORn5uhlupB3K84bKKXJYpNQvKzot7bPZ5p8vq+k4ozCvSD0aj3hx4Patww4ccGaS63upIEGgszlfALRjuZpUxliYxYG0nbJV1m8p+SWviZlxb5Dnb8KDHCiZfxSY7NXMB6UoJjg8KfSpnhCYHjEzhv6R15+cetE/Mh24kMOiu7WOdG7xqDWzO8iSYL+IKcU/bQAglmmfDX9Pifd1SL2EEH7gQAAMBmkUr4Z7wGdQd/HfO0vSkT1Yb0jub+WGXEl1tmduhSGV1jJJvt8GDHcVNP7ksn/KMWGWps7JSb/nhy64cHE3NvQfjJULeARsaYc6p2tNinSYLJFLnYvC2+8LMTG2lxzYUb9l9dzv9cUR7WuUNEbGRkWgC5GM8Lm1h25DUe9afKNK6J+eCjNukOFJWBqampCGowfVSlvT3gcrmWJnWnelAybKKXJYrMMFj2rFcOTQlrw8DZmIdnw6H7S1jRwpnJTdwjgcbCTCX8gtFOAn8GO1RO3reFmgsuXVlW2sxkx8UPtA2+ng8+bEPNESojr0xjn5xKUHQ8O0FEFX3pCs8Ef/9WikczHk69DkXSCD/6sHfEpwgQmNduXSWNv5n5lBlOHoTtW2/8rsev0M1kToMoX/MIt4tD/U05Gbu4XEWwpSO/3krsZXhVSHmt6SlqTZBMXS78KB9mnhtrUUqa7e9wByEAALYHgsI/7ev7skZcqDDc874mNeVswP2TnrX2U/Sexu7Hk0xk0n0FG0WprFLV3ZHZeVZuY9DZhdLm7kGyIwpZztd0zmASWs4nrtXe+fM1W7cxYwO6Wrycj65odSB5jU46dVV5mdlF9Qb3xMKk+6oc98To0rN9I2PPTYeJBaJC1fdsZEBP9uHJKm19MDYx2KnIj102PtJ3thQ/prDhmod7CgczGxiy37jAbuBD5dXprY98bGI5mFlfv1G1T0ztrFJ19PtQn3jao2eNt/hUe1PF0s59//Pa23uqLBsHxRsXn3brKsQf1V80dV9pOtJq+ysicISJ+H9UYot6Tmljl+PGyRwqA69s/J+gQ00izhGbbc5MCT2Iz9LOfVSeXNv9gLeroPCzPA7tbnGp4uL3PVda6lr78MyDqFu3O/cjhc7Uc7mp7oItMLcYCXpuaypFlKhSc9szMuruVEpElORIhzsw6butKs2iRNWavudhZlw42ktM+/o7VFU7KbFUY0qKm1ku78UtiRRpZIL3W/GWf7S46su+npa8IpnKcMvh6FVXiim6vLnnITEwzPpvNJAVhuWN3bYbJwsoukDe9Yff/QO+DEXy9tB4fGrF0ivOrdGY7PgVx4j4zPIG3FhMmeHcCEU2anzgyZKIV/ZmZcwe9tqpPsZZhjjI0El+Yc3JVlXDEVI0mEjQfUNdJULfbfM1FHuGCfnsBrxzn+SI0fli8mWvHL0r9IGZHi1NBwEAANgwaSb3rQ3m9SO9tLwMTydD7CvNQ5VWpcEruM4cWAlm3HnhVPvAk8eo6+z61W41tzdJ663/cGffW7ZotAEAALYTmyX8r+xNxWWGJ/EOKDNla8zcn3bTUyAVZEYFO1DNwUzbL3xuG+d+vads0WgDAABsLzZL+ENu7cc7FKahcWy9ZWbHPDc09bqHghu3ASsx6zVU0XG7dCTosbU3n+gZnn/Pc3OLRhsAAGB7sWmm/kVmJuB5aLP0ms29FttDz0ho2fAzsGoiL+1tJ2oKxXgDxKp6zVYZ5d2i0QYAANhObJ7wAwAAAADw3gPCDwAAAADbCGHhJ2uxAAAAAADYwnCingj0+AEAAABgGwHCDwAAAADbiHcp/GQLP3ltg/6KpuH0VRe39e2/AxN0m9WVIooSVTS193NbyM36+tsUEryP213epnJJMLMB1494w7Vl+w0DAAAAwHvPOxR+dh/1bI1rekCzM8FZ2b8D88xYnslz5Y5gZpytO8sN3pWWnnMeAED4AQAAgK3GuoQ/EvTcMZysLJJi7/urZsmtDhMJT7yejXn+e1vMBd23DJpGGbuJcE1NrdaZ6NCG9dDD89/D+M0HD2iF3N4kAcIPAAAAbFHWI/zM9NQUdnaeXbN64Y/6rKdr8pBYikoV5/RW7MSPWP4bT+u7eM57FqeHb5wswi5RDnUN/KSV7hJJFF3euO+yVRP5y6aWVjW12zycBxUh5kdMtTy3v9EJW3NJ8/2Uuw1GXvbrlGWSPTKV4Wb70Sy+8DNTXstXjc0Xu9o1jc3tzgBsVAwAAAC8p6zX1I/7vDzhf/NYX5Kzx/BE2AE7S4Ij3ZTuemO9aYm0zXr/yiExtVPB9/Iy4zFrFKUisdSMXcUS/6eSOgtve3jMa5dm/z7jk5hT1FQwc051FpUjs4ziX7OP9fuarGOp2gkht7aCpnY12sYYJvynQcob458ZNtWJKdYxwYS9KZ8uOu+YTJcTAAAAAPBvsUnCz0x5ett7PVPpxsYThP+VvamAogpUzin0gxkxVVEUXWEcXogLf61pZH6Rmfn7xVjyNDvmpUW+I7PJTryoTnv0+7ELeXKGhQmYpeUG7yo2i2V8xnJKVKAfjC7O+k3K/UbvPHdmGRNWBZL6jBO2EB6hSDD148YHHZsrwFoRsiqMT9/2SAYAAAAArINNEv7VwBf+qFubSy+Z2dlT9GFLMBoTVd7QezIzHn05VW7Eas+M2VrOWAJ8vV4I2U4WYS1fBdP2pkwqo8E2NdF3fN/loZQz+Ul845FPFP7EU5GAWYaTIrME8YUAAAAA8H6xFYWfiCsOJxoaMJy96U+8Dp2VZxfu5Wb08Vg2uQ+1G8jE/vILxnNytXMijYmAixQIPwAAALDF+ZeEnzP1c8IvZOpPI/xkToDoyI3Bu19fdYWWyTUKYEeDbeWp+Rg8JI+enNPYl072EUGLDDVU0pj6uXSBqR8AAAB4r9nEMf7Om97Vj/GvOLkvjfCzl2RJlL1+wYH8heemffs1rpTT83nMeg2VlKjOPJpycJ+DGbM17qKo3c32Vwzz2q2rxCsPyi578KJEmNwHAAAAbBnWJfyRcW+ftkqUUdjU4xoJY31dcVY/MzF49dMcJJb0nsauB8OvkaiT5XxN5wwmweV8uTXaO158mQBR99el8o6hcOqnhR8b62qU+h8feMdTNh8wCyFby16DZyXZxzDhwe5maWFOmeJ8+13L1dNfGUy9VvtQYBafm/JatE3qb02wnA8AAAB4v1lvj//9hwn57Nf06obUG/gAAAAAwLbjwxV+AAAAAACWAcIPAAAAANsIEH4AAAAA2EaA8AMAAADANuJDEv5o2NN9XHm2vf2s8niPN/U2fAAAAACwbfmAhH/eYyjfq3WHFxfDbu3e8tUt0gMAAACAbcWHI/xRj76A21lvIWQ7kVGg92zeJjoRl0acV3pA+qV9C+/ME7Rr5AdK88TcvsMAAADAdmSrCT8zMdR5RLK0A2Cc+Ab+5Afe2i/d3n9rBQWYtzl6GQ0PddSVHzx5/rSiUqq2/fWWNJhsYVQlPdmqUuyrUvcFYlscbl5CAAAAgC3JFuzxJ2z9GycatBym+cLPuvzZJDZLL5nAjTrxRxoXcUY8aq7NqNC638KuQsyIpS4/X/NoBv/vN9fmFWt/w/+D8AMAAGx71iH80bDnWmMp2X5Xctg4lHZ//reBsPATU39mi30aRYds/r/Zpv7N0MuwW1tKLUXsH6tip0huGdvkHGTeuHW7qHK9h9X66IT1qEiksIzh6IPwAwAAbHPWLvwzjy58qn8UnIsEH7ZJc6ldOvebdyv9nPC39g39oJYWifOqm7oHscMA7C/g41VM7mPm3Lo9axS/mF7GXQlkFDbdHpkZ+10nzSxq7HAHV5UFC08MJRk8j73Tbu0eSnTUOrG58waI5yGewYM4Dt6psP6D/gfhBwAA2OasWfgXfHevc9ZpZs6pzmKH0ld00rOJsMIv+ljd92Im5FDniyj6oMn/hpgi4sv5uj0pXfi8sjdVN9lfcb9Wx5JeMkF7c1HMk+/M8+5LvX7spmdVTFgVIr6pgnXeH++abxbYkJBgEcE5JirQD6IcAeEHAADY5mxkjJ9Y1EsM3gX075Snt73X807M/gmm/hFzTXbcr/8qiIYHr0gPdPnW6C2fr5dknB71+guPfXXhizuja0gyifky4V995FcJyZNk4ecOgPADAABsczYg/NhF/f5m++qs3JvIuoWfCQ1bL9RKtY70rnqFSNTLmefGWhGKw47T9tBabBwhW0PGcuHf9B7/uK0hb5nwQ48fAAAAwKxb+KOTjm+OX31MvPG/W9Yp/HM+89HCbJrOKz3A+elNyXIHvsl6SYz2FJVVpn+8akM/N8YvUlgnuN8zHn05xe09sImQMX7e1AE87ZHKa7CNo/9B+AEAALY56xP+aHjo+9ar7n9B9RHr7vGjPn/4saF2n9rxaq0RT9BLZmLg0pXbP31RTFOUSGZix/jn/fdbq0TlRh+Dtw/KrDAOC6g5zOoHAAAA/mXWIfxMJHD3wgV2TxgmEvzlYscfc3iMv/Om918c4//v+RFba2VuufEZg23duyuMTwX70Uyov7nwzNpM9Al6GQ0NGL64N8bgkY5deJZf/W1OuVHE2PWEc85WpLMMM++ztCjOWnxLlnwyP4Bbu8+MWeoy+ev4Ba5fnH9uaTnaYnm+tEJh+ZHFGZ/lrKLF4puPZT9ex7+rSPs7bpIwLy11+bCOHwAAAGBZs/AzwZ9bijKR8sYoxSvo3tmsfmZi8OqnZA+BClXfi8nn3fIc1O/OKlXdHZmdD5hlmU32abzc4EuF5WWKVsis1yCVmv1raqPE9HLa1/dljXhP47XByZn/6lNV4JV91O4j1r9wIwNdlC0zB+ZnXN99gac+LIQHviiiucH1GGl27iPXi8o0A5PcAUT4oaYop0jzcMmgsfzI4uSApoxOnCsAO/cBAAAAgmxgct/7B1beGnNgccrVdjnNpvoLAbvhjm9NbZRV6SVeL/+R1uHouPTLZLxZMffwmyt84X9LoLbOd1dWMUkQhB8AAGCb80EJP1be3C8d7uuXHmzyWoPV6eWIuSa3sPaKa2kcgZlxGS+RDXrfMlOuS0bXzMqJBuEHAADY5nxQwo9H2UVEeTdX9lcv/HKF0cuzwc+4O755B+sdmRl39zf2v1fzHBB+AACAbc4HJvy9cmmHd3bzlRbpJbjlBQAAAD4APizhBwAAAAAgLSD8AAAAALCNAOEHAAAAgG0ECD8AAAAAbCNA+AEAAABgGwHCDwAAAADbCBB+AAAAANhGgPADAAAAwDYChB8AACANc+Oee6bvfh4RdPcJrB8mMv6n3dx9b2SOO7BVeR8SMjfufWBuX+1X+i6FPxr23tTIaxv0VzQNp6+6NtsN/TpBseptLqs3BzZlO7uQS1NMZRfVNJzRNNXkUZSoVHFOc+ZYpYSOecQXgpkNuH5UV4koisJOhgAASMX6iti6mR1xdDSVZcf8gG8iTCRwX9d44kxDpTh7b+vqttz+kGD+dhiOlYmwU3XeNudbkFUlJDLmtnZebJZV6t1vYfPX+b+dV4+VruErfYfCHx3UF4hwzKYHNDtRIbVOcCf+PSKjDkOztEhMUbKUwh/xmZX61Zb56GBbjXYAO+lZCNlOZFCZ5cZnuDxP25sqjb60JTvi0mSD8ANAejZQxNZL2KUp3HzhXxg27ZPUmEcWmSnvvVt23zR3fDtBKr2NCD8T8ZuVurW+mVm/WaXbeGuDJw0rJyQy7nUYZBmb+x3xk7+2r3Qdwo8aqn3qMuITX3LYODS12rIWMNege3DMmEh44vXsBgxnKMdPGj2b03CKBMyylMLPTA22HRCt/l2F3TcfsC130i+J+8iPen/80Zs+viD8ALAyGyhi6+XtCH/QIqOzsfBvYzYo/EzY3VYlXuObiYYHv60SbdjMkCgNq0oIuWgTv6PE5L9t4Wf8N7/oGgpHF5kJp7pktR33qM96GtvliGFOb/XN4hfgvalpPK3vuqppPGNwjpIITw/fOFlEo+sOdQ38pJXuEkkUXXx/d6jZEfzVIC/IbLCFuCMbJI3wRydd7crSbBJnrcn9enFxLtD/tVxarzp/oia/UKq9H4ikaPYsPDGUZFA7Na65tO2iyMt+nbJMskemMtxsP5rFF37UD7B81dh8satd09jc7gxs9WEwAEhPNGhRiEtPdLj+WVXdlbKIvbI3FVA5p+whwa6FQBGOjHtsXRp5Uenp9m/rJBl0sc49wzDhx8a6KulJ1UnZ/rLCrFiVGg17rrc0NJ8/IysUFSt7/pyNBD227zXykrzTV7vqCmi6QutOrJlQGdcqpMrT50/W5Od/ou1/GVlcCLuvk3EKUV7N5xoNW7cswYQHu49/1nCxo121P1/6Ha5sZ59b9S2yory9mu/0sgKaorPLvnQucxi27MaQ76fzlSKKKvy814t6aHOBXy/WZFaocRxWk5DJsOfacdmJi10GVVWJ1PA4HMtpnDn1nx07XCrKrtQ4x8nhZQGSK/mQu45pujovKopZV2FECvOP6S81lqKeJD9Rq3hN/+vIV///o6Uidqznujs8LxiB5IeSOl1EZZcqVBqTe0laUH17x6CSleQe+6ajsRz3wbL3qm1/4XfOTHm6W2QNX3e1n6nKlxlwXzdZGlInhEeC8C/Prulh66VmWUn23nNd+joJEsGlvMVxGDKekB+rKxXllGkck+go88rVfoyXfNQOLqSylPqOz0uz0c2xy1KwduGPTAResSI0P2L6JF/zCLe33zzWl+TsMTxJ1+Je6vGjH8z8sKlWnEXMdMy0vSWTLlU7SCJJ7lCURNpmvX/lkJjaqbD+Q+7HMJOujmZUYJa9tvWTtsef0IxiZty64l3nnMTdPjN5v1mSXaz9jXQ3kmECZilNZazQOgm5tRU0tavRNoaqmT8NUt4Y/8ywqU5M7Tf6UFZP2Jvy6aLzji3sGBAAVgSVxMN7v7re3Vxdqvj2nm8F39qpi1jY23WkvK5LyEunYBF2hbAVVp5B0TmKTkff5cYTvcMzw2Z5sdT0HJf7yHOTNIerBGYeaXY3WifQ7WG3tpSiD5qev2RNuFROfZfD2taoMg/zqwRUxvfuUjtIYqKT9jOSeMsA14eCPf65YeMBmn3cnFOVlV1twiMYjM9YTtFi0g6I+LulIu44D8EbSTMo44SNNINQIHuV1gl022oS8vQPY0UWCY+Zc6qz0DX+N/g5zJitsbBYPzg/7zGUoyZRYWVNo+n3+8kBshcvMes1VLKxYyas9Z9aglxln1nUZBlGiRruqELpM/uRIqz2Nf3PwJKULk8RjkCqhy7rnS/4bRfkhahJVdRiGQ7NBx+2SXMp0Scm/9zCsLGCZq+fcqoK6GoTiiJ6Cr+HnSIhifCFXzC2zDNjeSYlPoTbFuSriz0rOmE7mVPc5pmf9hr20ZQY5fhnJm+UHyAbH/qjJsvTMDM7bJTStDzNxLV1j/EzkcDdln1n+3HU0a8pT297ryet2T9B+MnnSBWonFPoBzNiqkLNpArjMPo4SWIoqtY0Mo8+gL9fjCWWX2bOpdnJKWIizF/WlkM1qZG2WAMC8Vu98P9jVezMbLLHxuLwJ0WJjpKXlwQ7+ij07vlMWBVI6mNlkks3K/yh/qYcmspSO3FvBjWwaikqq8L4dAOjIwDwnoNKooIIIbEF1lbWam56UWdXmNUVsWRSFmFS+rjqCFUlb9y6XUtFO14JRCesR0Vc05x0V6gcmWWULbpZKqeAUQ6X8fwme8wqSqwUnJU0pfBHJwcMx7/oD6KEYf2OVT/86xNq/DiCN5K0UHkNtnHUP/ObGpvtr7CQrCYhzKuBttNf4FmHRPhjYsn4TdWcEKJwjmXQB4zDM8IBJjDj0ZdTkqPd2PYw9cTtn+fSEdPgpUSt8jXxb0mRohUfymcpNAQz72krJjNIFiZ/bTv+jT2IDmPhj12xXPiXJySRlWM7Yq7JjvX9eOEzPlN1NvfvhFWZEdOChAeljo8Q6xP+6Gt3e0NVYTZqgdbdEFJTIfjCH3Vrc2kqHjP2FH3YEoyyUU6txOQjKDF4N00DVy38JM6xjEWwL0wwc9nRx0qDd7m5awkcXjxDuFfFCX/iKTaGFC3DzVUA+ECJCz+BCfnufasorW7ufRK3MPNYVRFLJnURTqwoSSWzdF28EsD/EMtqnAuX7KNMQv2bACnI/CoCW++4S1MKP4YJP7/XfkZaugtVk1zIKws/RuDG+UF9cYZIbhmLPjXWaJwzKDdXn5Bo2Pdz+5lPSiU76FhCSKLYlgQbqz1a9z/CAeIr4uC+YnNRJkUXyPS/sIOkCdkef/pqXxM/H1KkaMWH8lkKjcBvdeGvEWerJDc+PY8nDakSgn/wWDm2KYSfZEjMuIWuEedqydKAhAeljo8Q6+7xI6Ih57n81Ze9TRF+xm+W5uzUDAg0rtfJ2oSfP6eBxDQ2t4gPO/pYoE8//ZBLaMKrAuEHti1rEf7VFbFkUhdh8k+8oiSlnrO3xX7iwjhua8gTKIbk5lg5ToAUZP5gJQmKjXZK4SdaVVzeaPo9OMszZa8s/CluxB39gyjZP9y/dIAdmV1cZULmAn1niotPmNxjs/z8mX/WU1eQUfXNo2AoYG0qrjUNzweFA0wGxfAXdo5CjtIyGmFbGsv0crWviR/hFCnCpH0on6XQ4j8zSgxPFiJ/9TVXFDea3MFXPHFNLbRJ4cRZOp4qtimEf3FmuKdekrFf9+jvmcDtxuLPTOxwUsKDUsdHiI0IPxu8kPIJwhd+ztTPxUzI1J9CibHpLNbYTOKtm/rx26J2tJImM4IYG4XmEJHROIpnqkoBntab1tTPPRdM/cB2IC780fDwna/SmvpXW8SSSVmEEyvKOZ9xP8+cEK8Ept3aPZSozjw6T46jKL+41emYIDfHqtxEQraGDHqHyhGrIlEEJDlN/bjrlkr4mZcW+Q6qyjSC4sgPeUXhT3UjOjNqrhWJsrM/1nLTolaVEGbMIhdlVZlexMKL5080PNipvtBhNv9gffhfpFmWIkA2mzlmvT/c8rxBnfCpIcMh8XIhX3r6Kl8T/5ZUEVjpoXyWQiNgrSlUOcfHLAoRO/ScIAephTYpnDgrxzaV8C8yYfdVtbbLbO61PvLHC0XCg1LHR4h1CP+bqdfcC0Hf08GPDd55nK2e3s6beBwlNQnCv+LkPkElJleKDvf8Yv765ovlp9dFeuEnQ0QZx6wT4Rcuz+jgpTI6v95KTFh4hkt5relp7NXFiZAPJWFOojA4hF0UtbvZ/ophXrt1lagZQJVd9uCFjjC5D9huoJJ4eO9XPWZ1TbHsoi3d5L70RSzd5L7ZIeEinFRRLvhN+0QiSeNtbBxm/rYqc6nMFvv0Ap50RtPimgs37L+6nP+5ojysc4cS698kwkP6Kjqn0Up2FmIm+hp3fcp111IJP7vfSdGXrvBM8Pdvpdi0/HDqdQjlzgrCn+pGdIpNAp4dxuYJmT23UkKiHn0BlVGk+TU8P/Z728EMlD8DY6+n3uBuyc4D2p8eulx/eMdZ22uKABNAyiTjWh4oLWQ6W0K2Lz19ta+JTXKG0jrx5kV/29H/JRABgYeSdOUqrYE3L9yP+ZUqCT2z0UZkbNZvku/AxgwiAVSZxjUxH3zUJt2BojgwNTUV4UvDkyC2sKxe+FNlVyrhR/3kj6q0twdcLpd3fCnYpeT7XY9fDLxd4Z/8uUmSLZGebTd1Gq5c45aZrTirn5kYvPopWfu/p7HrwfBrFDsyhafpnMEkuJwvt0Z7x4sv4xMNWg7TVC5/YcmGYMIjLqteJkHqqzT+7B5ZHipqMXZIxajzzW6tNRdwXJLlF9acbFU1HGnuHky+YdZn79YpCjNRb6So4ZLZjpctpgEvv2mWFuaUKc6337VcPf2VwdRrtQ8F8F14OZ+2Sf2tCZbzAduC1S3nW7mIrbScL7kIM5Gg+wbeNzOrtPnaQz9bpucC/V9WiWk676PKmsOHK3Pzyj5TdQyML0x5e0+V4eVSFC2p0ztGI5Ex942zlSKKLj3Vw3V/E4mMOvR1+fk1J8+faVCouvEMaLwyrU8vz6HoHJn+Jxf3yBiz/hsNZDVXeWO37cbJAooukLf3OTqOSCiRRNnpHhv3WVtLaUpUeaEvoXkkdGPXE5I/CyF76wG+yRBVLysmJPLihnI3XjpY+nm3w3wyh6bJ4mpm8oG6COV/nFyp0TO7PEDuSXGm3boK8Uf1F03dV5qOtNr+mg8/79NU42xX3fAE/vJY1LxErfI1hb1GmRgvn9PYx14JRSD5ofjgrMcozcUL3lrv44mQcYhainaX7JWeOHfyYHldB15IiV6W/0elBK92KG3sctw4mUNlSOSoWbkQlwa1pf+nlAmJEcteUaXmticYEciuuaC7UykRUZIjHe7ApO+2qjSLElVr+p6HmXEHDjIOLZZ2kHZtPPmtFsctTSX6t0JlGQwEBi2qCpoSV2ru+lLYzDZm6gcAAAC2E8yk48KR9oHng6j/6RqwW82GporPhRY3bTVS9dT/dZhx54VT7QNPHuMc/xXleHuTtH5Fi3JaQPgBAACA1YEN78XswH+MCfup8+xEpa3Neyr8ZBE/O3WDg5m2X/hccKLbqgHhBwAAAFZH9IlhTyZnryZjFrZ21Ynu5VOdthxM5GWvPIPKkPe+TLUf67/DrNdQRYuqNbeHxlHE8AaL7c0neoa56RrrBIQfAAAAWCVMJGBva6wpzKap7MIqpcY0IDwveosRdpu4JfWI6+7w+2TAiLy0t52oKRRTlLiwql5jesRuSLARQPgBAAAAYBsBwg8AAAAA2wgQfgAAAADYRoDwAwAAAMA2YisKP9n5R17boL+iaTh91TX2b07BZIJusxp7vBZVNLX3+9jNwmZ9/W0KCZVbo7nLHRGAmQ24fsT7UcRd8QIAAADAW2cLCj+7M2W2xjU9oNmZ4Mvh34F1orzk0gPBzDhbd5aTzYzTQhaOgvADAAAA744NCP/skKGqLM1uwG+LpT3/mUh44jXe2f5tEhlz37yqaZBzrn5qPtU6k1oaxEkzf7d/xm8+eECbvFW1ACD8AAAAwDtm3cJPnE+kdQPwVoj6rKdr8pBYYm/GeiveqJtY/htP67t4e/7PPrvRuAdvblzTMdCvk0qyye7Kax0TYCKBPnVVTZPR5uF8UQjCes+LZ0V0wtZc0nx/MtXTIi/7dcoyyR6ZynCz/WgWT/iZ8BOL5mSzvqMd/TU4Nr5YEwAAAACSWJ/wR8PuKycMV05nx9RuRSc9m8hSjx/9SOnlj+tM5x5qs929Is2lREcTdpOe8Zg1ilKRWGr2Y3XFbnAldRberoiI8K+aMrnRu6LnT2bOqc6icmSWUfxr9rF+XxPrjEuIkFtbQVO7Gm1jDBP+0yBdGuOff2qqzaXKjT4UiWl7U2ZmkfpBytYDAAAAAKyL9Qg/E3r4xdGu5zMDmrjwY7e87b3Y8dTbJ0H4Wb/+BSrnFPrB9+vPCT/Z4piZHXvxN+dKeAnivjrm0nvao99fbcKaGwM7CS03PEkl4HyId3BRgX4wip05KvcbvSk3sMQ+ngV98C+E7KdyKCpL5cS2BeaFqSqLog8Yh8EpHwAAALCZrF34mYkB3bnu4RmiWu/c1I/gC3/Urc2ll8zs7Cn6sCUYXcXwOXGozPawmTFbyxlLgK/X47aGWr2HeM5eEdxBpzIabFMTfcf3XR5KPaaA4xuPfILwT7u1e3hniGPmuBUBAAAAADaJtQp/dNJxsaXXh8Vpyws/6tPLSDjR0IDh7E0/Udw4SHolhZUHuCl9Syyf3IfaDWRif/kF4zm52jmRxuzBxQqEHwAAAPiXWKvwc/rEg87Vut+pK2YBUz8n/AKm/nTCT+YEiI7cGLz79VVXKFmuwy5NZcNqXR9O2Jvy0ZNzGvvSyT4iaJGhhkpqUz+XLDD1AwAAAG+H9U3uI2DVinW18Rh/503vux/jX2lyXzrhZ6/Jkih7/ULz5xd8XfvKvnKFV9OqmfUaKilRnXl0Je+U2Jv1Lora3Wx/xTCv3bpKvPSg7LJndgEm9wEAAADvgE0S/nc2q5+ZGLz6KeoZU/Sexq4Hw6+R+JPlfE3nDCah5XziWu2dP1+nkM+o++tSecdQSmmPhoc66qqO6i2/eNMt50MshGwtew2e1TilZsKD3c3Swpwyxfn2u5arp78ymHqt9qHALD71xKI5pTZ0wnI+AAAA4C2xAeHfHjDhYbtJr063gQ8AAAAAbBlA+AEAAABgGwHCDwAAAADbCBB+AAAAANhGgPADAAAAwDYChH+ziIZ9P7efkRZxO/AAAAAAwPsICP+KRMPPvH8ledYPmGXJAv9m6vXYgKYwtvXelkMomWtgg7cDAAAA7wgQ/vTMBRzf1Ozr4HvvwQgIPyLseh+Fn4kEfjp79u5YOlFOkczVssHb1wQzO9TRcvVxGNoYAAAALJGg5575ivrKg8lVbaYDwp8GZn7E1lq5s7Dh21vuYILQvEvhZ4Lu3q9keSKK2m/08fcRYuafd1Sjw6KKJtPveLdCIZiwS/fp1wMh9DVEJoe+V0rQDUkeFlInM4EN3r6JzPrNLcctSb4VgO3HxopGjGjY09Oqs7+bb3dTQW36+7rGE2caKsXZe1vtf292Cub85s/E4s/M/rVvHM6MO3XnO4aE93Jd8PccFOcdNL/A+5ZvGeaC7jsdF1tklW3vdo/61RAZd9/SK3ZTlMwcWFW9uFHhZ8KBv6beu2xYA8zUkOGQmMqQyL+1+4N+c6vOFeJOIRaeGEoqtG6+zhHecY//zR/aXcsUd95rrBZTqHJTWFPvKBQe0n923Bbv7bO+f5KUO3Uyk9ng7ZvHvMewv8k6BtK/7Vl/0WCJhgZ0B1vubsldMheGTfskNeYRVIl5792y+4iD8Y2CWtUqHZeZuEmkVvd4VrVt+TKYoL21ySCk/Xj3UrWm+924cd805sa9doNsZ9yR2vqJ+MxK/WbrBPE593aFn/nbqsTeZTG7dO43W7DMxHnz9M4t7ywqOb2nyrJ3lKn7+FUA4zdVZ7XYp9kj0z77DTOLoaGkRnOd+3HHPc7m9dsS/gWvoQTnNd9ZHyqf9ZLsTIoSS83+VC+AmbAq81qdM/HzwsqdmMw0bPD2TeSN3yQr0Q+uZo9k4ANm3UWDhRm7XV/QZJvYml0X7PErGwv/phEND35bJVrWsl8vqP6p33PesTrj81YA1/AbFX5marDtgGjzdeKtCz8z723/rG2L17lM8I9Lcrznf3Zrn/t686EyiaS6qXswFHpi0SjK8nYWyb/pD0yPWRRZx7odruFkt/zr7vHP/a7d88UaX/mcz3joQNOxMiqryvSCVGRMxG+uq1ZfPFlIUZUG7yy5bDnRCevR7CY7ryPAKff5voe96oOF4vzKpuue8FzKZCazwds5mPCQufW0pv2qtvks51th7TA+Y/lWb3QCqUCSJi5v7BgIrtARX3fRYJlyafZkcgWEmR2+q28+VJS97yvz1cZSVDdkSJQ/PPf/rJXuoimKljTc8KPQUCf4ektD8/kzskJRsbLnT+4BzNSQ8XOlxmi6qCwUkypg+RF877XjshMXuwyqqhKpIT5PJRoe6qyX1x8uFWeXfenkZDLFgzgWwu7rmqaaPEqUV/O5RqPtsj/q01SLSKOcGXdbtLIcirQJZp9b9S2yory9mu/0sgKaonmPmAs4Liuk9arzxyole5stz/5n0tWuLBVR2aUKlcaELXhM+OlN1WEt1w6YC/R/LcfXn6jJL5Rq7+M+UrrwEeO2ht3ly52YMKHhm+oqLVsRkuQrL3SZdIrCPcvaHKkyLUbKCAhkIBN+LpRL/si4x9alkReVnm7/tk6SQRfr3DOTnu4WWcPXXe1nqvJlMbuFoPDHv5xqTZdOJslAzdAyjYP4V1sehyjJ42xKVKo41/x//nelBH1nNW2/j426ryrJ/1/97JtG8vS7TppVdfXPWRQKHtDRyj9RqlpP1hTmS3X9gTk8om/7XiMvyTt9tauugKYrtO6JuPBHxn83yCQSmfbGgz/HU5SgdQj/a6eqKLuwpkHTcc8Xc2b7zpz0bCKsl7+MTwx//DMbcqjzRRSVL9VavZOTzzsOiSh6h+reQ+0eUdn5+yijkxAQ/rlx7z29dEeGzODwchaAZSyE7KcLmvp5Ywmr4R+r4hNDv0lGx10Sjlg+lesfPzSUZFA7Na454VdLnAUXJ3YIWOXOLlPf9c+MO9Wos5RdbfL8kSqZyWzwdgJxQkhLzQGGeFMU11sC62pDzjlVWUkju8CHAiqbe7+40d1SXnrkyr3nqSdyrrtoEGYcqh1iXgFBzYj9FLVb2e0JM8yMs3UHlZXf0PM8HF2ceaTJz8D+x9E/uxut2EIQdmtLKfqgyf8G3YkND2wMmL+t9Z9bgtHlRxYXnhorssgxZs6pzordy0z0Nebs1Xum572GcprOLtxb85nJGxZ+UAK4Blvq8RNfo7HOOu8UbiJTtFj63VA4GvF3S0WozPoYdhpEwTknnv2DqvRCij6MIskLBOlNn7oMNYC4nzNuXfEu9vpFZvJ+syS7WPsbauunCJ9lfsRUm/wiIn/Z1HtxPcK+MjxQWEIegToqzZ8uWW4IKTKNj3AEUrwpoVxCwu91GOQZFJ2j6HT0XW480fv0SXsFzV425VQV0NUmYjtK1eNnv5xc0i6Z9ZtkIvZxwnHgBTI/qC/OEMkteCyW8Zmqs7n/sUVTUWd5if+d+U1bXKZ2En/vTNDeXITbJaEgirEMtzHquxzWtkaVeTgUE/7pQN/Z2pVGr9Yu/EzQfbPjIm7g0JT4ENcUwm5523u31pgNK/zcW0wwYpOPI61L3+DNY7o1z/Bgwu42qcK0Vq0K2RqK9J45HCcyZhkZs578WPMwNGqW0lRGgy1lMwI79S9ROae4nxh+MlnTkNBnnJIN3o5g3rh1u2J3kXzmG2nXgkDqgA8FVDZJ6SP+KmXltRqLoMvvdRcNAhGMmAZgEo2lCbJKvvya7kHrURFXLZBma+zrjXr0BVyLYSH8ZPDFPLP8yCLzaqDt9Bd4Ch7RMO7RqIo/SLPlgQyh0hXG4QVsqxN8UAIJMUwp/An/k4vI05DYl5QYnpDpdUjjH3R0OpFUJASSEE/UxtoZs44gWEfkR7GqCYfPwoawrIGOW+2xy6KD+oJMifKaFzWwws/cLxLthsKZlohABB6OpcjAVLlEjhfE6xNm8te249/YgyiCWPhjcU0l/AlfTuwRUyleIj+QuWHjAVqksODpSiGXphh9x/h/1Ag4cMoWQi+HfAmZSwOpZGxrp8L6D/uYLJUzlrNsHKrOXzm1ouoj1j+5jwk/Nkhz6T0G71oF8D0Bv/X1Cv+aiYaHrZpaucaRdlWdAAyq1vJReWOeGcszqSqTf+x2/Z7zzlAkZDuBOjX4C0gFTkZSOfnXhZ+7K4PMkOjRKyRrDiEOikweV9qBDwxUNpdKH94a64ri4/LmXqwNS2ygaBBiFXS8gCRU3wlywgm/wawpxDZaTZwLl+yjuERH/upr/gg1NiSySw7W9LX8CIbd5uuTUskOmnv0tFu7h8o4QWp5EgFsWMDaIPwgPgkxTEwO/xT/f3IRLnFYbnOWF5+kPFn6GXVrc7n2CYFVMnJKMHxyESIpQI6Ey+YCfWeKaIqW1OkdggN/yzMtEYEI2JwpMjAhPrwbBeLJhHz32s9ISyW5GbG4rkn4/07xEhMCwROk6B1y1LmfcaiKJHki/H90uONAM2sYJp8H/4nT9qZMciAhDxFc1UpRWWX6x+mHuBDrF34EaX3EysmWA7/1dyT8C76eTwvFFJ1XeoBz7puS2q+duAqIM+PR15AqjMQw62CDcn8zXnk0YW/KT1zFtGwLHZyMpHKyQeXe4O0I0lGgKF7XYd2gyIDwf6Cgsrmy8G+gaBCwliXU9QnVd4KcsI+o0Roa8miZJUgOJRMZdejrJDRF5RyzjLLan3QEK1xx8QmTe2x2SWaY+eGeOsnOKp0zOOO3Nu6tNT2dx0PjqR8UJyGGKSUt4f+4WuB/MmI9/iUSAuH/JMLPXyVBTpXrPTPC4ZNrEEkBciRfNhdwXCKj4/nK5GW6gpmWiEAELNYUGZgQH96NyfHE7baK4kaTO/iKp9NrEn5fipeYGAjjN9fm0NXGX39s2N/14D/KXJG886eLn6mcr8lpIvysZYUFhy4q0A+yozK8uLBx+PjzpmoRXa4ZIEMDqdmQ8OOM41khthj4rb8j4cfgdYN1e9S/xKZFrA7cm6klVRj5XCg6p7EPv1LcYBdRBXoP9z0IbaGDjeEfJZaTDSr3Bm9HLJDeGEUVt3m4ipiJRKKL8/77rVWicqOPwRdklv//etrrJWw3iFv6go5/vrOhb5LcgwFT/wcMKpuk9DHhZ9av5MKm/o0UDRY8Kz6vwTbO/VxZ+P/vDVwL15lHY7NSIi9udTrQQ6Pe2z0e1JSNhoe+k4ozUL38ZtmR6JhFLuImISbIDDM1eFVzoavH3HvnoZ+dz8BW9wIPSiAhholh8k/x/4+rBdIbqZhXDKOhgWvXPNMJgSSEidsi1I74EiFSkHNO2VEJFQyfXMRdRmYPcAdY+JdFvT/0eN7gd41NyLwXh2FSZRofgQj87EqRgQmB8G5MDDwyZlGIqFrTCLqdr9NrEv5/UrzEpEDIs+gdefnHrRPzJLuysovj05bZCrMw1g4gw1sZBU32V7GUxuMSi4N/yFgtpou/JHu3pGTNws+ERx5zk9eiE7ZTH5P5HWSMv/Om4Djcewt+6ysJf5Ig4eE3cu/6YF7Zm1Gn5NXqcwkbgvK0ZDYB6SvnNLKL18nwZHyAJ9UWOqkm97HJ5Cm3YDLnn1tajrZYnsc+W4Tw7fOvH3codpMhVTynWqlDOYqqifIG2xiOm8/Sojhr8cWG7sIPNUWZeCJM24PhgN/T12G49xdOEnodbCNyztmqsIzNsnP3psesjTlsg3f6wRff/LE0sQe/pGVjh8CHAfoY9n5xw9xaXfzpRZvw5L6NFQ0Cnk61g9frXVH4e567dcU0La65cMP+q8v5nyvKwzo3tshGXBdKtOzHia7Mqzb55pcdieBR/4wiza/h+bHf2w6i2lwzMPZ6ajZkP7WzSvvTgMvlehqbg01m0gk9KIGEGLL2V3FVmzs8/89A2ydi1BTKqzpvDyZctqQW8wFLvZjKLFRctjofOW/r5EcsAdSCwZHMVVoDb164H08mzPWbHbpURufXW8mIAzNmaywnxonEaCSrEZ71RmYtcL85+Jeh/0tYncP5H5tGx0HiszzTlqoBjEAEHoVSZGCqXOKlFDHj0aOPqEzjmpgPPmqT7kAhDkxNTUXWJPz/neIlksAzjlknwi9cTyYZsug6Q5SveYSryFB/U07GLu7LIcw+1pdl59TfJoPESHObdtWahlFzjZ+HmKU4kN2rRJLm+2RlgTBrFf6F8MAXRTT6XHRdXZcuXOrnJhFsuVn9zNiArhbv8UFXtDpeTvzZpcihUXVRqro78vpFn6oC/aAkjde8kwz6qviClDorVwP67PbgCe3cz7Qws74+nWwXJapSmd3jDHqvDXWWEWZx2tffoaraiSIoqlR12X14OEd4Cx3cluQt54tMDrbL48mcfGKSS3AGlJ7tGyH2uqRksgq91ABPc/s0nneD2g3RUWt9PrEHMtP2S9/g+LAfDLFNscHgmUS/tB37OI+ms4s+1dle4BUrCPwdo692fsb13Rf2IIP7c3vO/6e39dTJw1no+Nzkg+s3eTuI4fo9sXMAfDissJxv40WDBVWjJ3dwShMN+x1G5W48q9/o8AeGncYjEorOkRsc/ldj7k68Z6XkiNHhHvh/TpVl4+qBPyYddet2i0sVF7/vudJS14r3All+BHX4bih34/VmpZ93O8wnc2haoujyTk06zhfh8GKIZUZvGHWlyM4iyQ+KwUTGPX16eQ6KoUz/k4sYCpi/7a14tjwl3q/pu3Ymr0SmumrzPnd3oISIJMpO99i4z9paSqPMudDnCzF4weFhPBKBty/rGGKHUWY9RtTtpnLKWn8eDQ7dxovfxJWaW55xVPSIQT6/sOZkq6rhSHP3IH5oZCxl+DiaqGlVhJoRCW8xEvTc1lSK0GWa255gBOdU7kcKnannclPdBVvS4iDhTOO9zVQRGB75UzADBXLp/952PLihrhKhmq35GjG6oA7Mj0qyMK+0sctx42QOziLDfUevulJMiao1t4d4y+SYSPC3DvbL6fhtbPKZFcsHyrS7vvCE0EtkZr0dUjFNLW22iDqESoOX7Rq9dqqPGYf5mYArTL2sKL/mxHlVo6L5Ot5PCaX6xlmUh3TpqZ6H/xVmomFfvwHv3Ee+3snnZjkqBTurVN0DKRZbbczUvx1IEiTu6HpZeGk33PVttlyl2kIHG8py+Rv4pEY4mVPOb75fjbhiGc5W/8eqPdV0KKsGtWyCD9rv+OPN/LmH31yJC38K8CDiR1qHo+PSL6ShivpJO/Pyj5pfeozllRf771zqGOTNWIENfIBVscLuUkzAdkKpHxJsFrwbopPOr4+0P3z+GPX4XQN2q7m9qaJ+xQ0HtwTRkPN8idzsF269Af8mIPwrkSxI7yG4Z59iC52QWyvjbdmbGsFkzrguXXItC1OIgLlGtDNf0fvyOeqI6/p/NqIeBHdqkZlxGS+5VhyMR0qfW1h7xcUNTY3bGvKJ+Qu3ACTS9iHOMkCY9xj2Na5zAwBgG5GmaHAw4cdXj3/Zt8pdKDYbsoj/IBlLjjFtP/X5CgsRtwKon/pT6/GYIQF4zwDhX5EkQXoPwVOBUm2hw3PSk57lyQy5O9rJStZVgKec7NEgdcctgF3Sdl7vfMbd8c1qTCUjZrkCGzk5ULxbibRP2NWfmxLqbXDSA6ySdEVjicjL/vaf/hWPMVGvYQ+9ZEsf99jam1Xdq90D8z1m4YXte8eKq8mBfwsQ/hVJEqQtB256r+SWF7GxZEZcuvobeChv+oFaRuaevC3ALS/wITEXsH/bWFOUTdHZhdVKzbVUg7IAsImA8AMAAADANgKEHwAAAAC2ESD8AAAAALCNAOEHAAAAgG0ECD8AAAAAbCNA+AEAAABgGwHCDwAAAADbCBB+AAAAANhGgPADAAAAwDZiKwp/NOy9qZHXNuivaBpOX3Vt0GHexmCCbrO6UkRRooqm9n4fu5/8rK+/TSGhcms0d7kjAjCzAdeP2CXUZjv+BwAAAIDUbEHhjw7qC0TYEfH0gGYnTZzA/qtgB7KZVJbaORfXeGbG2bqz3OBdaeda4lIZhB8AAAB4d6xX+JnQ8B19vezYV6YHvnfsfylgrkFiiYQ/wkTCE69n37JvjciY++ZVTYO8huNTrTOppTHlVBVQlMwciHmNYfzmgwe07pU9bIHwAwAAAO+YdQl/5K++5ori5rv/gvOlqM96uiYPiaWoVHFOb/XNcpb/xtP6rquaxjMG5yiW39lnNxr30FhTOwb6dVJJtkTe5U1pdU8FEwn0qatqmow24jsrFfMjplqKKtS4WA830Qlbc0nz/ZQ+fCMv+3XKMskemcpws/1oFk/4mfATi+Zks76jHf01gG8rAAAAYPNZh/DPPDfWivLPOfmeXt881pfk7DE8eRd9/6UeP/rBzA+basVZ5cZnzCIzbW/JpEvVjnEkmFxnOvdQm+3uFWkuJTpqneDFbsZj1ihKRWKp2Y/VNdTflCOps4wkKG34V02Z3Oid5n6mhJlzqrOoHJllFP+afazf12QdS+UzNuTWVtDUrkbbGMOE/zRIl8b455+aanOpcqMPRWLa3pSZWaR+kLL1AAAAAADrYu3CP+NQ7RDlHDr9ZaNcqmg1sj1sZsrT297rmXoXOpUg/K/sTQUUVaByTqEfzIipiqLoCuPwQkz4q0xIzJnZsRd/zyTHjXlpke/IbLITYZ/26PdXm7DmxogEzIfLDU9SCTgfxmcsp0QF+sHo4qzfpNxv9M5zZ5YxYVUgqc84YQvhEQqeqX8hZD+VQ1FZKie2LTAvTFVZFH3AOAw+OgEAAIDNZM3Cv+A1lFCFDb3eMBMa7j0uofcZvDPcuXcDX/ijbm0uvWRmZ0/Rhy3B6CqGz2c8+nKuh82M2VrOWAJ8vR63NdTqPatLGu6gUxkNtqmJvuP7Lg+lHlPA8Y1HPkH4p93aPbwzI+YadCZmRQAAAACATWLNwk+0KjaRbeGJoSSL9HTfIZsm/KhPLyPhREMDhrM3/SRJcZD0SgorD3BT+pZYPrkPtRvIxP7yC8ZzcrVzIo3Zg4sVCD8AAADwL7Fm4WcCZim93+hjTdBIn3L+TeHnTP2c8AuY+tMJP5kTIDpyY/Du11ddoWS5Drs0lQ22ce7XCkzYm/LRk3Ma+9LJPiJokaGGSmpTP5csMPUDAAAAb4e1j/EzfnNtQS07J25+UF9Sidet4TH+zpvedz/Gv9LkvnTCz16TJVH2+oXmzy/4uvaVfeVa1WLFWa+hkhLVmUdTDu5zMGO2xl0UtbvZ/ophXrt1lXjpQdllz+wCTO4DAAAA3gFrF368yO1nTe2hpiud7ZqTLd2DYSRO72xWPzMxePVT1DOm6D2NXQ+GXyPxJ8v5ms4ZTELL+cS12jt/vk4hn1H316XyjqGU0h4ND3XUVR3VW37xplvOh1gI2Vr2GjwryT6GCQ92N0sLc8oU59vvWq6e/spg6rXahwKz+NQTi+aU2oAzFpbzAQAAAG+DdQj/9oIJD9tNenW6DXwAAAAAYMsAwg8AAAAA2wgQfgAAAADYRoDwAwAAAMA2AoQfAAAAALYRIPybRTTs+7n9jLSI24EHeLsw4ef32s/UFH0B2Q0AwObAhHy4WtkX87j2wQLCvyLR8DPvX0me9QNmWbLAv5l6PTagKYxtvQesD6HcXhwxy7SJ2cpEpqamBjTZkN0AAKzM8oolEjA3JQt8JPR66qEmO+5q9YMFhD89cwHHNzX7OvjeezACwo8Iu0D4N0SK3BYQfgzefwmyGwCAFRCsWISEH4GrlQ9B+JnwU9fTlKkA4U8DMz9ia63cWdjw7S13MOGbeUfCP+M17MPbEFHiRK8Bewuz0eGYx4QtBDM1ZDgkpjIk8m/t/qDf3KpzhbhTaXIbhB8APgQi4+4ftbJdeAO2vNJYjXagslC80dpsPRXLvyf8kTG3tfNis6xS78abxy28MB/MEx/s8eNt3DcCM+9pK8Z6QRApLCm9w4Pwpwd7IarQupd9BO+qx89M9DXmII0n/vu5Y4TIc5O0fusJ/5und255Z5kpb++psuwdZeq+hN0JU+U2CD8AfCjgYrvkjYyAt4HfWG22norlXxT+ca/DIMuI7zs/5enWqNk9cDcCM2Y7fqipvceM6b3pSpehaxV+5o1bhxtscXa0Opd5uv9gYPym6qwW+zSbwGmf/QbJU7PZ0FBSo7nO/bjjHmdz+G2Y+rlOP11u8CYMUM0H7pgfTL5T70hvm8TcZmZ9D3q5HL7cUHIwlt298WY7CD8AbDkEhB8p4ePffam9mW+QhIpl1mfnqpXrhobKGk0n+8N8y41dvCDegfAjSC5sbu3FjJprcxu6n3BqlJ61Cv9rp+5C7zDrym4hZDshbrJPkxNbDCb4xyU53vM/u7XPfb35UJlEUt3UPRgKPbFoFGV5O4vk3/QHpscsiqxj3Q7XcLJb/nX3+Od+1+5Z20T0lJ3+LcQGcxt6/ADwvhMNWhTi0hMdrn/Sl8hlwj/r/fGOF3dhmEjgvlb+iVLVerKmMF+q6w/MLUaCHtv3GnlJ3umrXXUFNI067nEb/iIzPnBJJsH1yvk77u4zh0p3SSpbuj0TIe9NjaIiT1wi190PROZTVCzr6PFHw0Od9coLXSadonAPuSYa9lxvaWg+f0ZWKCpW9vw5y14mcDCRBOGPhodvqap0rggzO3xX33yoKLta06WTSTIoKqdM4yCu2lYMM+zWluKuOOolSg5qLE/S2w82YuoftzWUco5r35mTnk2E9fKX8Ynhj39mQw51voii8qVaq3dy8nnHIRFF71Dde6jdIyo7fx99f0kICP/cuPeeXrojQ2ZweFO1uRZC9tMFTf1LX+6qSNXp31JsJLcFZ/WP/3lffzAjQ06ye8tmCwB8ICAdPbz3q+vdzdWlim/v+Zb5OY+RKPxMJHhfXUb6QjO/aYvL1E7i2JwJ2puL6GKdOxRkreJUTn2Xw9rWqDInyDd6qAzXK1LDH8HpkPNcPvqR94nW+mQy/KRDivoahSrnqFu4YhES/si4975emrFTZrALOGbDQwYlrN5PWJs/tYwuzjzS7G60TiDdI7pLHzT53wgfTGJJ+OcCtnNl2XQsR+Z8xv0UlSs1PA4zs36TTLTaMKPhkcEB+60ObX0pngGWJzf7UsgQZgPCH7I1ZHN+5Ylb3vZezztxy7tZJLj3HTHXoK+Ra+ixn2Y6l77Bm8d0ZF7GWmDC7japwuRL7+hPgM3p9Ef+sunanWseHYhOOtt1tr/SfEOrYiO5vThqOXZpzdkNAMC7A+moosY8ghUIdbhrK2s1N71Cjk/Z8h6f3HegNI/G1QKS0qOizPhI3+KC11BC7VRY/2FvyFI5hepNTvi5eiWhksH2V/SDREmQaNByVudei8E6OqgvyJQor+F0hZ+5X4RxnLmKiziFp3JklhGhg6P4F58l4UdMOVUFsR9siripjuQqVE9OrSpMDtSWcuqqxFTOKTurzkKsW/ixnT+7wbbGzuv7xIakaK1Ew8NWTa1c41ifcsc6/VJzYJ3KP+s3n9O5prhfa2PKpTtn9gvYqxKIBAcN8gxqBxk2u268eLwyr9U5F4vuO81tAADeMXHhJ+CdcL5VlFY39ybbnNnyHtM8dOVLi/JrV2Qa9ct5RxcXp+1NmeRAgkYmsRHhXwdzgb4zRTRFS+r0jtEIeYSoVHFOE+fCJfufjwQOjiZX2wmJIlHlfggK/99CD1oW5hLMjLN1B7VHm7pZs27h59n5tyjvUIoWfD2fFor5S1hSUvu1U6iZRtq/G+jxzzjUh7r967x5kfF3H1I7eCa2uaD7Tof+C/IJfmPxsp8XbgtmiI4SexQicMf4YJL8hwHhB4APmfUKP+oU/T0WZojwL9Ue7HWiAv1glNzAu57POxZ+xFzAcYmMvucrLY9uN+TRMkuQO8WClHH5wWUkJGpF4fetKkw+OCvK9Z5l06VirFf4Q7aGrHTLBLcA71iK8ErTuj3qX1KOfaWBjHiJpN3+5YPZzMRQR70kg4y5RHxmpd4VQQL8+c6GvsnJvgZxkcr5Gl005/qyyvCENChmR+5rKkX7jb45/BIzDxiHsQmNef24Q7E7A5twmIjfrNShXEFfcHmDbQzftPDEUPWli+2+o4RcbW68cteTPAYWcmmKiU2CmR+2//wi8SwIPwB8yMSFPxoevvPVSqb+mObFId0GPCSP6isCqp0yCprsrxI1Mol3K/xR7w89njeoCgw/NkhzqYJWwwXUWKkzj85zF0Re3Oq889NXyw86yMwFHgmJWlH4/yGtopXC5IE7im/B1I+HGbLllqXuJx7j77zp/eDG+Of991urROVGH4O/y8wK43DKnFwFzCt78/5m+6s15hIzO3SpTJQwnTWBOacqCwn59Ji1MYdtMk8/+OKbP94wU94HD3247M2PmI422Se463Fii8nPKWfr6VjrjZlzqrNQSqOj1vp8kcI6gd/ypW+4xa+oEVBvGkGfXXTC+vk+o5e9JwE88yWD2KNOKz4+Fx+r41iV8As3SgAAeO9BinV471c9ZnVNseyibbWT+3jMPtaXZefU3yayEp2wNe2qNQ3PM4kamcSqhJ9s4JNbbnzG4Hpsd4Xx6TprcRSTEp37DYoffi5d/f2T33XFNC2uuXDD/qvL+Z8rysM699SMe/nBZVV3QqJWFP7/XjFMJjzUq7vCSTAz4VTvexuT+147VRVyy8ulV7vlZvUzYwO6WjH6SuiKVsfLiT+7FHj2XFap6u7I6xd9qgr0g5I0XvNOMuhjYqeczDlbFby2zrpADbE9ax2nn/caq3NyGvuWte+i4aeeF7PMIvPMWL7n/H96W0+dPJyFvpi5yQfXb/r5kok+rBpWZQnoZyluCM+42r6wk7UiGMZnLM9W/8eqPdV0KAvJMBN80H4ntplUPIR/rPUtS+Y4Hvh2Ylxipmyff544+WP1uS3cKAEA4D1nNcv5eDv3FSovdj9IXLvPRAK/6GVF+TUnzqsaFc3XPajTEhlz3zhbKaLo0lM9D/8radSAGX+oq8nFoZVqHGNjf3Yq8ZphukLV9+L1yF1VaRZFiSQNPd7Xs0hNM/HKc9S9+VLBV641EXXrdud+pNCZei431V2wBeZQj5dsGYQrsNjAP4qW0EE+sUSJKjW3PSMBzy1NpZgSVWtuDwZGXR3K3RS1W9nx29jkMyuuG8WVmru+8ET6MJnArXpJBkXvqjmpVjWr9NZnb2853/YAN7qQms7PuL77wp60lezaWXhpN9z1raF9NB+w1Iv5Rh4OJhJ8oNmvIbsnIbHcmZd/1PzSYyyvvNh/51LHYOJMvCThn3ZrP8rV3nd3GBO2AEJNHNHOfEXvy+fG8gJd/8/GjiF+W4ENYcRcW8mfZBIb44+MWRTcpFxm4sWL9QxoEIQbJQAAAOsG1+LYpjjlarts/7D2PVsfIPwrEXVrcz/SOhwdl3559zoUW8gnDBmSR4zbGvLzNY9mSAtAIm0fYtvRCab+et5MTGxNEhXWXXFNMIvRsM/1kLXL4UG1PRrXFGkB7JK281sPcVP/3LDxaIvAaAW6IC8WH/To1+6vtTfX019P0SgBAABYL7gWz/3S4b5+6cGGO28fBCD8K4LUNLew9oor9M51iJkY0JSnlH1KLDWz8/TDLl2rJYBUecKu/twU3+AiYXLfF+X6wVgCkPAfkxo9RNdfO1WFVInBu4Bbxbr6G3gYYvqBWkZG1+JEB/XlX3CT+yJ/9Z2qb+l56F+auTMX/OM7eQ4tqlR9b+4xGb48WbMrc51LPfmNEgAAgM0Ad2ZILQ7VCgGEf0VGzHKF0Rs3em9N3vyhPXDZI7zx34T962tY+FPCzHsuH9D+wdspai7ovmXQ8k39mwW/UQIAALAZBHrl0g7vW3MHsOUA4d8mRMas504JbAMQDQ/98N2Dv9MVCGbMduqcFebZAQAAfBCA8G8bmKmhTuO9Net3ZOyesXNoSy3UBAAAAFIDwg8AAAAA2wgQfgAAAADYRoDwrx8m/Pxe+5maorX513/7MJHA4OPAMkeQ/zbLsmsh/PSx4KaeAAAAwNsDhF+QaPiZ96+EOfCRZc6bmcjU1NSAJjvFZpL/Ekj1f1If7/Gt4KJ+eQI3wjqziwk/0h03DoH2A9uCuXHPPdN3P49sZNtvABAkEvTcM19RX1nl9ifbVfiZiaHOIxJKcM/ouYDjm5p9Hb4EWVyuZBi8IdTmC39k3H1Lr9iNYkcXNVy56R7HMUEHzapKMV2o1NuGU612Y0K/qAsV5peoux+ZHPpeKRHFt8TnIZjAdbOR7GLeuHUlUtNKzRQA2BhM0H3zogIXh8yihss33WQXF3TQrK4UZRYq2my+tEtSmSlP95c6e9rFLyswO+LoaCpLuen8ukCt/Pu6xhNnGirF2XtbNxQ9Qeb85s/E4s/MCft/rw5m3Kk735FiUvCCv+egOO+g+cWWagIRl6QXW2SVbe73rqsSlwxuk/8V2cY9/gSPDnEY4tFhZ2HDt7fY2oHjXQo/Ys5n3I80e8lXFWbKqSpLcJGQzIRDVVG95EEnwRdOjFQJXB8bz65X9qayg+b/gl4Q8HbBLi0yqR2tZJfrGHNOlXglL6PMxMAFZUvfXxsu5WGeLxZB4o4xV8fCsGmfBO9vzUx5792yp2+7rJZZv1ml40puNOzpUat78J7564AJ2lubDELaz4QHu9Wabs/WWis0N+61G2Q7N6HpxrlR5X5tEqjKXfLusyLrEP5o2NvbIj/2lbHjYsNnx81/btW9VoSFHxWnJ4aSCi3nlS7OuxV+5qVFvoNine3GQRHbIzP5Uw7eM2MWeYac9+IFhT9VAtfLRrOLmXNpdu5ifV4BwNsClw5RfJdrDuw0q9rE7n+ZgsiY9XiBgIusdbCC8DNhd1uVeA3VSdAiozfX33w0PPhtlWi5jXCdMBPW+j3nHR/O3tsrNt1WATM12HZAtPmq8baFf+GpsaJE5ZzC/0/bm7KJH9itCCf8rX1DP6ilReK86qbuwTCzyPhN1VnE2Qxi1mfvNROuGxoqazSd7A/zLdb8vhrhZ+bcuj1rfct423xKxHd8jAIKmKV7yN66wqAXLxcluP7jhP9838Ne9cFCcX5lE3Z4lZDAlETG/zDIsJsAdLuju/lQqaQA3x6a8Fq+VJRJxEWf6vpfokRtQnZht8LFGte6dvgFtjujFllBaWOnK5jeHM26e9+RaDBDReazPYYn6axNM480+QWcS2sm5Ou7UCkiNT8TdFu0uIBgP5ZT3jsGlawk99g3HY3l2Md09l61jbMQMOHHxroq6UnVSdn+ssIsTjbw2EGLrOHrrvYzVfky3C1mXrnaj5WKKOLY+ro7PB/2XG9paD5/RlYoKlb2JPWvFsLu65qmmjxKlFfzuUaj7bI/6tNUi0grnxknUWNd0c8+t+pbZEV5ezXf6WUFNEVnl33p5JR4LuC4rJDWq84fq5TsbbY8+59JV7sSRSG7VKHSmHBbngk/vak6rOXaAXOB/q/l+PoTNfmFUu39QIRJGz4Cu8EtN3iSPIyhnBy+qa7SsjVBNDzUWa+80GXSKQpRTZnU5oiGPdeOy05c7DKoqkqkhsfJTudSRgDdmJyBTPi5UC75I+MeW5dGXlR6uv3bOkkGXaxzz0wmvyD8MEHhZ2aH7+qbDxVlV2u6dDJJBkXllGkcxLHL8jhESR5nk9fc/H/+d6WEojJq2n4fG3VfVZL/v/rZN73IBH/XSbOqrv6JtxrEAzpa+SdKVevJmsJ8qa4/MIdH9G3fa+QleaevdtUV0DTqek3EhT8y/rtBJpHItDce/DmeYhR17cIfHdQXiMt0KOdIIzqXmM62nFteBCv8oo/VfS9mQg51voiiD5r8M2MWRdaxbocrvuU9y7p7/K/sTdVN9lfcr1XBzDhbd1CZxIF0nOiEtaFwab/95fxjVeQVJFzACn92mfquf2bcqS5B/1ebnv0jnMDlsLfvkBpcwVn2dlGeVGv1vgo/75SK0Bn06uc3I7uWxxwAVgn6Smu/utHZXP6x4srPxCWVIMQnBbXf6OO3D9CHV6P3pCkHbEnk9aLwFxyv+UkBQcK/4LddkBcizSlqsQyH5oMP26S5lOgTkx9V0D6zvFhqeo6vjzw3SZHQ4JsXho0VNNuxnnKqCmjW6sAPHDU4djeSPlXYrS0lVdMyUx+uwZZ6/OTuWGedd4r4y6bF0u+GwtGIv1sqQpWAD1Xe8887qgvOObELEpI59GFLMMoLBOlNn7osJ2YyZLBL+F3s9YvM5P1mSXax9jeUdynCZ5kfMdVSOzWcmw+WyF829V5cMbEpxSbDEvIIVMU1f2oZZa/iwP3MLHIlM+dUZwnlg3AEUmSgUC4h4fc6DPIMis5RdDr6Ljee6H36pF3gBaXs8bMjs7mkXTLrN8lE7OOE48ALZH5QX5zBdfAYn6k6O9bZe+M3KerYRurMb9riMrWTmJyYoL25CLdLQkEUYxluY9R3OaxtjSrzcCgm/NOBvrO1LXdxsyw16zD1R4L96iJ6Z5Xqa43ycyPbFEIN2N723q01ZsP1+Nm3yHWONa4xt3aPqOz8fdSqSmC5kjGR8T/v6w9mZMgNDm+KhlU0PHhFeqDLt7YR7BmPvpyiKg1efit/wt60P123GH/RS7UAIZ4oFG0Uf/RZoOQ++E04gcsRvJ3kFik95COb2ozsIiUBO80EgLWCvlIFFmbU7bZoastlGssTAU/kuLsi4pxRxZm2N+UnalIypELnV/Tky48diAl/8nFm3tNWjBvuT+fcul2iuE10qcZnJn9tO/6NPYgux7rC3bkUCJLAoyKuRDDT9pZMKkeWpIiI1Ql/wv9Lj0BiX1LCWTuQxj/o6HQiqUgIhNVa7idqJO0kLu1ZZr2GSopNmnD4LGwISe0t1sgXuwy/mkyJ8hpe2Rt+5n6R2A5jXg20nf4Cz1vkRyYRgQg8HEuRgalyiRwv4CzZ6B7BF5RS+NnqkWsgxh4xleIl8gOZGzYeoEXsLJOQS1NMsf+jRsCBU2Scl3wJrLtzwoLXUELtVFj/YR+TpXLGcpaNQ9X5K6dWVH3Euib3kXYHhRtZlwdWsLC9x+C3vlz4l31VHNGg5azOvZbpM0xo2HqhVqp1jCd/JivAzkJKaia/+UNbdDqdfX7CqkgenEut3KtiNcKfKrA1ZRcJvEDvYatHAFgDMeEn4L0irhwpLT/V603ohLCdwp2aAV5thVeUFC0pmSDLKnry5ccOpBL+uLDZPUgdl44mhsaEfPfaz0hLJbkZ3MGlQPCVxOYf58Il+2hy4ecLHnd3rAbgnxLQRVcEy21O/N44CYHwf2K/tvRSUjglI6cEwycXIZIC5Ei4bC7Qd6aIpmhJnd4xGr+RRzTs+7n9zCelkh308qAQAhGwOVNkYEJ8eDcKxHP5C1qb8P+d4iUmBIKHSmkyAjXjUBVJ8kT4/+hwx4HmftLJm0Y9q4QnotZqJjmQkIcIroqmqKwy/eMVJ96tXfiZqSGDss7knRy+pSrLEVV3PN+05eDvFvzWVy/8a2XOZz5amE3TeaUHalagVutM6MiTVytSWMm4IgvuQ3y0NC9JaBU+Tk5S/P9F4V8TJPC1xAwAYqxG+FmVIv2kJaY9+k8abOPcL8EytbzaTahtUws//<KEY>>
# + [markdown] colab_type="text" id="-MlxVvPZTALt"
# ##### Message function
#
# The message function is a neural network parametrized affine function.
# We use the SELU activation function because of its good scaling properties.
#
# For more details on `selu`, see for instance paper by Klambauer et al. "Self-Normalizing Neural Networks", 31st Conference on Neural Information Processing Systems NIPS 2017 ([arXiv:1706.02515v5](https://arxiv.org/abs/1706.02515)).
# + colab={} colab_type="code" id="DOwY4E_tTALu"
def M(h,e):
with tf.variable_scope('message'):
bs = tf.shape(h)[0]
l = tf.layers.dense(e,args.Mhid ,activation=tf.nn.selu)
l = tf.layers.dense(l,N_H*N_H)
l=tf.reshape(l,(bs,N_H,N_H))
m=tf.matmul(l,tf.expand_dims(h,dim=2) )
m=tf.reshape(m,(bs,N_H))
b = tf.layers.dense(e,args.Mhid ,activation=tf.nn.selu)
b = tf.layers.dense(b,N_H)
m = m + b
return m
# + [markdown] colab_type="text" id="5GO8gy7kTALz"
# ##### Update function
#
# The update function is based on a custom implementation of a GRU (Gated Recurrent Unit) recurrent neural network.
# The reason for this is that GRU is simpler in comparison to LSTM (i.e., there is no output gate) and has fewer parameters. This is a recurrent unit that has an internal structure that by design reuses weights (i.e., weight tying) for every element in the sequence. We tried using different units for subsequent t in the experiments, but the training was much longer without noticeable improvement of accuracy.
#
# For more details on `GRU`, see for instance paper by Chung et al. "Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling", 28th Conference on Neural Information Processing Systems NIPS 2014 ([arXiv:1409.1259v2](https://arxiv.org/pdf/1412.3555.pdf))
# + colab={} colab_type="code" id="XfXWPHq4TALz"
def U(h,m,x):
init = tf.truncated_normal_initializer(stddev=0.01)
with tf.variable_scope('update'):
wz=tf.get_variable(name='wz',shape=(N_H,N_H),dtype=tf.float32)
uz=tf.get_variable(name='uz',shape=(N_H,N_H),dtype=tf.float32)
wr=tf.get_variable(name='wr',shape=(N_H,N_H),dtype=tf.float32)
ur=tf.get_variable(name='ur',shape=(N_H,N_H),dtype=tf.float32)
W=tf.get_variable(name='W',shape=(N_H,N_H),dtype=tf.float32)
U=tf.get_variable(name='U',shape=(N_H,N_H),dtype=tf.float32)
z = tf.nn.sigmoid(tf.matmul(m,wz) + tf.matmul(h,uz))
r = tf.nn.sigmoid(tf.matmul(m,wr) + tf.matmul(h,ur))
h_tylda = tf.nn.tanh(tf.matmul(m,W) + tf.matmul(r*h,U) )
u = (1.0-z)*h + z*h_tylda
return u
# + [markdown] colab_type="text" id="v_SZihSRTAL1"
# ##### Readout function
#
# The readout function is obtained just as a neural network with attention.
# By 'attention' we mean the mechanism in the neural network allowing it to learn what information is important in a given input. In our readout function, the output of the network $j$ is multiplied with an output of network $i$ processed by the `sigmoid` activation function at the output. The sigmoid function takes values in the range $(0,1)$. Multiplication by such a number acts as a gate selecting the important parts of the output from $j$ for the final summation.
# + colab={} colab_type="code" id="OM-Yb7B4TAL2"
def R(h,x):
with tf.variable_scope('readout'):
hx=tf.concat([h,x],axis=1)
i = tf.layers.dense(hx,args.rn,activation=tf.nn.tanh)
i = tf.layers.dense(i,args.rn)
j = tf.layers.dense(h,args.rn,activation=tf.nn.selu)
j = tf.layers.dense(j,args.rn)
RR = tf.nn.sigmoid(i)
RR = tf.multiply(RR,j)
return tf.reduce_sum(RR,axis=0)
# + [markdown] colab_type="text" id="0rf3eChjTAL6"
# The Python function ```graph_features```, given below, exercises message-passing phases on a graph and produces a vector representation of a network.
# + colab={} colab_type="code" id="r_a7WuboTAL6"
def graph_features(x,e,first,second):
global REUSE
h=tf.pad(x,[[0,0],[0,N_PAD]])
initializer =tf.contrib.layers.xavier_initializer()
for i in range(N_PAS):
with tf.variable_scope('features',
reuse=REUSE,
initializer=initializer,
) as scope:
m=M(tf.gather(h,first),e)
num_segments=tf.cast(tf.reduce_max(second)+1,tf.int32)
m = tf.unsorted_segment_sum(m,second,num_segments)
h = U(h,m,x)
REUSE=True
return R(h,x)
# + [markdown] colab_type="text" id="YAjLRLACTAL-"
# At this point, the network is represented by a vector. Therefore, we can pass it through a neural network for inference (forward pass).
# + colab={} colab_type="code" id="C9TMPqnlTAL-"
def inference(batch,reuse=None):
initializer =tf.contrib.layers.xavier_initializer()
with tf.variable_scope("inference",
reuse=reuse,
initializer=initializer):
l=batch
l=tf.layers.dense(l, args.ninf, activation=tf.nn.selu)
l=tf.layers.dense(l,1)
return l
def make_batch(serialized_batch):
bs = tf.shape(serialized_batch)[0]
to=tf.TensorArray(tf.float32,size=bs)
labelto=tf.TensorArray(tf.float32,size=bs)
condition = lambda i,a1,a2: i < bs
def body(i,to,lto):
with tf.device("/cpu:0"):
#unpack
with tf.name_scope('load'):
features = tf.parse_single_example(
serialized_batch[i],
features={
'mu': tf.VarLenFeature(tf.float32),
"Lambda": tf.VarLenFeature( tf.float32),
"W":tf.FixedLenFeature([],tf.float32),
"R":tf.VarLenFeature(tf.float32),
"first":tf.VarLenFeature(tf.int64),
"second":tf.VarLenFeature(tf.int64)})
ar=[(tf.sparse_tensor_to_dense(features['mu'])
-args.mu_shift)/args.mu_scale,
(tf.sparse_tensor_to_dense(features['Lambda']))]
x=tf.stack(ar,axis=1)
e=tf.sparse_tensor_to_dense(features['R'])
e = tf.expand_dims(e,axis=1)
first=tf.sparse_tensor_to_dense(features['first'])
second=tf.sparse_tensor_to_dense(features['second'])
g_feature = graph_features(x,e,first,second)
W = (features['W']-args.W_shift)/args.W_scale # 0.7-0.9
return i+1,to.write(i,g_feature ),lto.write(i,W)
with tf.control_dependencies([serialized_batch]):
_,batch,labelst = tf.while_loop(condition,body,[tf.constant(0),
to,
labelto])
batch = batch.stack()
labels = labelst.stack()
labels = tf.reshape(labels,[bs,1])
return batch, labels
def make_trainset():
filename_queue = tf.train.string_input_producer( [args.train])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
serialized_batch= tf.train.shuffle_batch( [serialized_example],
batch_size=batch_size,
capacity=args.buf,
min_after_dequeue=batch_size,
num_threads=2)
return serialized_batch
def make_testset():
filename_queue = tf.train.string_input_producer( [args.test])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
serialized_batch= tf.train.batch( [serialized_example], batch_size=200)
return serialized_batch
def line_1(x1,x2):
xmin=np.min(x1.tolist()+x2.tolist())
xmax=np.max(x1.tolist()+x2.tolist())
lines = plt.plot([1.1*xmin,1.1*xmax],[1.1*xmin,1.1*xmax])
return lines
def fitquality (y,f):
'''
Computes $R^2$
Args:
x true label
f predictions
'''
ssres=np.sum((y-f)**2)
sstot=np.sum( (y-np.mean(y))**2 )
R2 = 1-ssres/sstot
return R2
# + [markdown] colab_type="text" id="9iiHd4D0TAMA"
# Below, the main training part is presented. We run message-passing on a graph, then perform the forward pass of the inference and compute $MSE$ loss for the delay. The loss backpropagates and the weights of $M$, $U$, and $R$ networks are updated.
# + colab={} colab_type="code" id="xJvPoXfCTAMB"
if __name__== "__main__":
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
REUSE=None
g=tf.Graph()
with g.as_default():
global_step = tf.train.get_or_create_global_step()
with tf.variable_scope('model'):
serialized_batch = make_trainset()
batch, labels = make_batch(serialized_batch)
n_batch = tf.layers.batch_normalization(batch)
predictions = inference(n_batch)
loss= tf.losses.mean_squared_error(labels,predictions)
rel = tf.reduce_mean(tf.abs( (labels-predictions)/labels) )
trainables = tf.trainable_variables()
grads = tf.gradients(loss, trainables)
grad_var_pairs = zip(grads, trainables)
summaries = [tf.summary.histogram(var.op.name, var) for var
in trainables]
summaries += [tf.summary.histogram(g.op.name, g) for g
in grads if g is not None]
summaries.append(tf.summary.scalar('train_mse', loss))
summary_op = tf.summary.merge(summaries)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train=tf.train.RMSPropOptimizer(learning_rate=0.001
).minimize(loss,
global_step=global_step)
# Evaluation
with tf.variable_scope('model', reuse=True):
test_batch, test_labels = make_batch(make_testset())
test_batch = tf.layers.batch_normalization(test_batch,reuse=True)
test_predictions = inference(test_batch,reuse=True)
test_relative = tf.abs( (test_labels-test_predictions)/
(test_labels + args.W_shift/args.W_scale ) )
mare = tf.reduce_mean(test_relative)
test_summaries = [tf.summary.histogram('test_relative_absolute_error',
test_relative)]
test_summaries.append(tf.summary.scalar('test_mse',
tf.reduce_mean(
(test_labels-test_predictions)**2 ) ) )
test_summary_op = tf.summary.merge(test_summaries)
saver = tf.train.Saver(trainables + [global_step])
with tf.Session(graph=g) as ses:
ses.run(tf.local_variables_initializer())
ses.run(tf.global_variables_initializer())
ckpt=tf.train.latest_checkpoint(args.log_dir)
if ckpt:
print("Loading checkpint: %s" % (ckpt))
tf.logging.info("Loading checkpint: %s" % (ckpt))
saver.restore(ses, ckpt)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=ses, coord=coord)
writer=tf.summary.FileWriter(args.log_dir, ses.graph)
try:
while not coord.should_stop():
_,mse_loss,summary_py, step = ses.run([train,
loss,
summary_op,
global_step])
writer.add_summary(summary_py, global_step=step)
if step % 100 ==0:
test_label_py,
test_predictions_py,
test_summary_py = ses.run([test_labels,
test_predictions,
test_summary_op])
test_error = test_predictions_py-test_label_py
R2 = fitquality(test_label_py,test_predictions_py)
print('{} step: {} train_mse: {},'
' test_mse: {} R**2: {}'.format(
str(datetime.datetime.now()),
step,
mse_loss,
np.mean(test_error**2),
R2 ), flush=True )
writer.add_summary(test_summary_py, global_step=step)
checkpoint_path = os.path.join(args.log_dir,
'model.ckpt')
saver.save(ses, checkpoint_path, global_step=step)
#make scatter plot
fig = plt.figure()
plt.plot(test_label_py,test_predictions_py,'.')
line_1(test_label_py, test_label_py)
plt.xlabel('test label')
plt.ylabel('test predictions')
plt.title(str(step))
with io.BytesIO() as buf:
w,h = fig.canvas.get_width_height()
plt.savefig(buf, format='png')
buf.seek(0)
plt.close()
summary = tf.Summary(value= [
tf.Summary.Value( tag="regression",
image=tf.Summary.Image(height = h,
width =w,
colorspace =3 ,
encoded_image_string = buf.read()) ),
tf.Summary.Value(tag="R2", simple_value=R2)
])
writer.add_summary(summary, global_step=step)
if step > args.I:
coord.request_stop()
except tf.errors.OutOfRangeError:
print('OutOfRange' )
finally:
coord.request_stop()
coord.join(threads)
writer.flush()
writer.close()
| jupyter_notebooks/LittlesLaw.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="xc7PIZE8JLOn"
# # TensorRT EfficientDet-Lite Model Conversion AutoML Models to ONNX Model
# + [markdown] id="gyTSjP0tSbOy"
# This notebook contains a sample that converts EfficientDet-Lite's AutoML Model into an ONNX model for running on TensorRT.
#
# Reference
# - [EfficientDet Object Detection in TensorRT](https://github.com/NVIDIA/TensorRT/tree/main/samples/python/efficientdet)
# - [EfficientDet](https://github.com/google/automl/tree/master/efficientdet)
# + [markdown] id="IVN4iRp-MANe"
# # Export Saved Model
# + [markdown] id="Y7KCVFV6v3hB"
# ## Clone [google/automl](https://github.com/google/automl) repository and install dependency.
#
# + id="p92ZK9WgMUO-" language="bash"
#
# cd /content
# git clone https://github.com/google/automl
# cd automl
# git checkout 38ecb93913fc19e429ab2a572f1aa8f5286723cf
# cd efficientdet
# pip3 install -r requirements.txt
# + id="-hdNRMKmghcl"
import os
import yaml
os.environ['PYTHONPATH'] = '/content/automl/efficientdet:' + os.environ['PYTHONPATH']
print(os.environ['PYTHONPATH'])
# + [markdown] id="5mAZX3UcwNOv"
# ## Download EfficentDet Lite checkpoint and export saved model.
# + [markdown] id="1I-MAXHA2ULX"
# ### Download checkpoint
#
# Select the checkpoint you want to export.
# + id="XBoqlDSUzj_X"
#@title Select EfficientDet-lite model.
checkpoints = 'efficientdet-lite1' #@param ["efficientdet-lite0", "efficientdet-lite1", "efficientdet-lite2", "efficientdet-lite3", "efficientdet-lite3x", "efficientdet-lite4"] {allow-input: false}
# + id="KL807XNIgbgH"
file_name = checkpoints + ".tgz"
path = "https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/" + file_name
# !wget $path
# !tar xf $file_name
# + id="bnLxJBP263Je"
size = {
"efficientdet-lite0":"320x320",
"efficientdet-lite1":"384x384",
"efficientdet-lite2":"448x448",
"efficientdet-lite3":"512x512",
"efficientdet-lite3x":"640x640",
"efficientdet-lite4":"640x640",
}
# + [markdown] id="hHbfx90N16-4"
# ### Set NMS configs
# + id="Oxq6eFnMltRu"
obj = { 'image_size': size[checkpoints],
'nms_configs': {
'method': 'hard',
'iou_thresh': 0.35,
'score_thresh': 0.,
'sigma': 0.0,
'pyfunc': False,
'max_nms_inputs': 0,
'max_output_size': 100
}
}
with open('saved_model.yaml', 'w') as file:
yaml.dump(obj, file)
# + id="7-k9lq3HtlOj"
# !cat saved_model.yaml
# + [markdown] id="4BNt4C-XWftC"
# ### Export Saved Model
# + id="NZxZ9sZX6tao"
model_dir = os.path.join("/content", checkpoints)
saved_model_dir = os.path.join("/content", "saved_model_" + checkpoints)
# + id="_uYpqiV9iSr5"
# Export Saved model
# !python /content/automl/efficientdet/tf2/inspector.py \
# --mode=export \
# --model_name=$checkpoints \
# --model_dir=$model_dir \
# --saved_model_dir=$saved_model_dir \
# --hparams=/content/saved_model.yaml
# + [markdown] id="HIrcd6Qy2l-P"
# # Export ONNX
# + [markdown] id="Kxml0yQr2nqm"
# ## Clone [NVIDIA/TensorRT](https://github.com/NVIDIA/TensorRT) repository and install dependency.
# + id="8JjgABL4gMKw" language="bash"
#
# cd /content
# git clone https://github.com/NVIDIA/TensorRT
# cd TensorRT
# git checkout 2d517d270e3697a5775e7861873dc21d5fba6bae
# cd /content/TensorRT/samples/python/efficientdet
#
# pip3 install -r requirements.txt
# pip3 install onnx-graphsurgeon --index-url https://pypi.ngc.nvidia.com
# + id="ha7r5YfSkoPh"
# %cd /content/TensorRT/samples/python/efficientdet
# + [markdown] id="B2VyLH1wMz0U"
# ## Export ONNX Model
# + id="2c8XWCfL287A"
input_shape = {
"efficientdet-lite0":"1,320,320,3",
"efficientdet-lite1":"1,384,384,3",
"efficientdet-lite2":"1,448,448,3",
"efficientdet-lite3":"1,512,512,3",
"efficientdet-lite3x":"1,640,640,3",
"efficientdet-lite4":"1,640,640,3",
}
# + id="HH4fyzBp3YBK"
input = input_shape[checkpoints]
output = os.path.join("/content", checkpoints + ".onnx")
# + id="gDCqKQdMkvDd"
# !python3 create_onnx.py \
# --input_shape $input \
# --saved_model $saved_model_dir \
# --onnx $output
# + [markdown] id="zKOXwyfDM43s"
# Now Download ONNX Model.
| cpp/efficientdet/Export_EfficientDetLite_TensorRT.ipynb |
;; ---
;; jupyter:
;; jupytext:
;; text_representation:
;; extension: .clj
;; format_name: light
;; format_version: '1.5'
;; jupytext_version: 1.14.4
;; kernelspec:
;; display_name: Clojure (backtesting_clojure)
;; language: clojure
;; name: backtesting_clojure
;; ---
; import libraries from kernel
(ns clojure-backtesting.demo
(:require [clojure.test :refer :all]
[clojure-backtesting.data :refer :all]
[clojure-backtesting.data-management :refer :all]
[clojure-backtesting.portfolio :refer :all]
[clojure-backtesting.order :refer :all]
[clojure-backtesting.evaluate :refer :all]
[clojure-backtesting.plot :refer :all]
[clojure-backtesting.counter :refer :all]
;;[clojure-backtesting.parameters :refer :all]
[clojure.string :as str]
[clojure.pprint :as pprint]
[java-time :as t]
[clojupyter.kernel.version :as ver]
[clojupyter.misc.helper :as helper]
) ;; require all libriaries from core
(:use clojure.pprint)
)
;; ### Calculating indices during the process
;; #### Moving average
(average (map (fn [_] (Double/parseDouble (get _ :PRC))) (get-prev-n-days :PRC 10 "AAPL"))) ;:PRC should be replaced
; by the key
;; ##### An example of defining moving average
(defn moving-average
[key period tic]
(average (map (fn [_] (Double/parseDouble (get _ key))) (get-prev-n-days key period tic))))
; path to dataset = "../resources/CRSP-extract.csv"
; change it to the relative to your own dataset
;
(reset! data-set (add-aprc (read-csv-row "../resources/CRSP-extract.csv")));
;; initialise with current date and initial capital (= $10000)
(init-portfolio "1982-2-12" 0);
;; define the "time span", i.e. to trade in the coming 10 days
;; +
(def num-of-days (atom 10))
(while (pos? @num-of-days)
(do
;; write your trading strategy here
(if (= 10 @num-of-days)
(do
(order "AAPL" 50) ; buy 50 stocks
(println ((fn [date] (str "Buy 50 stocks of AAPL on " date)) (get-date)))
)
)
(if (odd? @num-of-days)
(do
(order "AAPL" -10) ; sell 10 stocks
(println ((fn [date] (str "Sell 10 stocks of AAPL on " date)) (get-date)))
)
)
(update-eval-report (get-date))
; move on to the next trading day
(next-date)
; decrement counter
(swap! num-of-days dec)
)
)
; check whether counter == 0
(println ((fn [counter] (str "Counter: " counter)) @num-of-days))
;; -
(deref portfolio)
(if (= 0 0.0)
1
2)
| examples/Moving average.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: data
# language: python
# name: data
# ---
# +
import pandas as pd
from sqlalchemy import create_engine
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk import pos_tag, ne_chunk
from nltk.corpus import wordnet
from plotly import __version__
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
import plotly.graph_objs as go
from jupyterthemes import jtplot
import matplotlib.pyplot as plt
# %matplotlib inline
jtplot.style(theme='solarizedd')
plt.rcParams['figure.figsize'] = (20.0, 10.0)
import seaborn as sns
import data_utils_mt.utils as utils
# +
# load data from database
DB_NAME = '../data/DisasterResponse.db'
MESSAGES_TABLE = 'messages'
engine = create_engine('sqlite:///{}'.format(DB_NAME))
df = pd.read_sql_table(MESSAGES_TABLE, engine)
print(df.shape)
df.head()
# +
positive = df.iloc[:, 4:].sum()
negative = df.shape[0] - positive
positive = positive.sort_values(ascending=True)
negative = negative.sort_values(ascending=True)
# -
positive.index.tolist()
positive.shape
# +
trace1 = go.Bar(
y=positive.index.tolist(),
x=positive.values.tolist(),
name='Positive label',
orientation='h'
)
trace2 = go.Bar(
y=negative.index.tolist(),
x=negative.values.tolist(),
name='Negative label',
orientation='h'
)
data = [trace1, trace2]
layout = go.Layout(
barmode='stack'
)
fig = go.Figure(data=data, layout=layout)
iplot(fig, filename='stacked-bar')
# +
trace1 = go.Bar(
x=positive.index.tolist(),
y=positive.values.tolist(),
name='Positive label',
)
trace2 = go.Bar(
x=negative.index.tolist(),
y=negative.values.tolist(),
name='Negative label',
)
data = [trace1, trace2]
layout = go.Layout(
barmode='stack'
)
fig = go.Figure(data=data, layout=layout)
iplot(fig, filename='stacked-bar')
# -
clustered_labels, cluster_idx = utils.cluster_corr(df.iloc[:, 4:])
sns.heatmap(clustered_labels.corr().dropna(
how='all', axis=0).dropna(how='all', axis=1))
correlations = clustered_labels.corr().dropna(
how='all', axis=0).dropna(how='all', axis=1)
trace = go.Heatmap(
x=correlations.index.values,
y=correlations.index.values,
z=correlations.values
)
data=[trace]
iplot(data)
from models.model import Model
model = Model()
from tqdm import tqdm
tokenized = list()
for text in tqdm(df.message):
tokenized.append(model.tokenize(text))
# %time tokenized = [model.tokenize(text) for text in df.message]
import numpy as np
tokenized_arr = np.concatenate(tokenized)
tokenized_arr[:20]
tokenized_arr.shape
values, counts = np.unique(tokenized_arr, return_counts=True)
counts_df = pd.Series(counts, index=values)
counts_df = counts_df.sort_values(ascending=False)
counts_df.head()
counts_to_plot = counts_df.iloc[:20].sort_values(ascending=True)
counts_to_plot
# +
trace1 = go.Bar(
y=counts_to_plot.index.tolist(),
x=counts_to_plot.values.tolist(),
name='Most frequent tokens',
orientation='h'
)
data = [trace1]
layout = go.Layout(
)
fig = go.Figure(data=data, layout=layout)
iplot(fig)
# -
| notebooks/EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
print('Class labels: ', np.unique(y))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
from sklearn.linear_model import Perceptron
ppn = Perceptron(eta0=0.1, random_state=1)
ppn.fit(X_train_std, y_train)
from sklearn.metrics import accuracy_score
y_pred = ppn.predict(X_test_std)
wrong_examples_sum = (y_test != y_pred).sum()
miss_error = wrong_examples_sum / len(y_test)
print('Misclassified examples: {}'.format(wrong_examples_sum))
print("Misclassification error: {:.4f}".format(miss_error))
print("Accuracy: {:.4f}".format(accuracy_score(y_test, y_pred)))
print("(From classifier) Accuracy: {:.4f}".format(ppn.score(X_test_std, y_test)))
from dec_bound import plot_decision_regions
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined_std,
y=y_combined,
classifier=ppn,
test_idx=range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
| Ch_03/Iris Dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# # Find the following mean and standard deviations for the following distributions:
# • A lognormal distribution, in which the associated normal
# distribution has mu=5 and sigma=1.25.
# • A beta distribution in which the shape parameters are alpha = 2 and beta = 5.
# • A uniform distribution defined over the range a = 1 and b = 8.
# +
#A lognormal distribution, in which the associated normal distribution has mu=5 and sigma =1.25.
mu_n = 5
sigma_n = 1.25
mu_log = np.exp(mu_n + (1/2)*sigma_n**2)
sigma_log = np.sqrt((np.exp(sigma_n**2)-1)*np.exp(2*mu_n + sigma_n**2))
print("mean = ", mu_log)
print("standard deviation =", sigma_log)
# +
#A beta distribution in which the shape parameters are alpha = 2 and beta = 5
alpha = 2
beta = 5
mu_log2 = alpha/(alpha+beta)
sigma_log2 = np.sqrt((alpha*beta)/(((alpha+beta)**2)*(alpha+beta+1)))
print("mean = ", mu_log2)
print("standard deviation =", sigma_log2)
# +
#A uniform distribution defined over the range a = 1 and b = 8
a= 1
b=8
mu_log3 = (1/2)*(a+b)
sigma_log3 = np.sqrt((1/12)*(b-a)**2)
print("mean = ", mu_log3)
print("standard deviation =", sigma_log3)
# -
# # Using the data in the zip file: data.xlsx
# • Create a normal probability plot.
# • Create a lognormal probability plot.
# • Create an extreme value probability plot
# • Which distribution looks to be the best fit?
import pandas as pd
from scipy import stats
from scipy import special
import matplotlib.pyplot as plt
import math
# +
data = pd.read_csv('data.csv', header=None)
x = data[0]
#mean and standard deviation
mu = sum(x)/len(x)
std_dev = np.sqrt(sum([(i - mu)**2 for i in x])/(len(x)-1))
# +
#to create the weibull plot I will need to determine the
#shape parameter : beta (k)
#the scale parameter : lambda (lam)
k = (std_dev/mu)**-1.086
lam = (mu/special.gamma(1+1/k))
# +
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=[15, 6])
#Create a normal probability plot
normal = stats.probplot(x, plot=ax1)
ax1.set_title('Normal Probability Plot')
#Create a lognormal probability plot.
lognormal = stats.probplot(x, sparams = (std_dev, 0., 1), dist='lognorm', plot=ax2)
ax2.set_title('Lognormal Probability Plot')
#Create an extreme value probability plot
weibull = stats.probplot(x,sparams=(k, 0 ,lam),dist='weibull_min', plot=ax3)
ax3.set_title('Weibull Probability Plot')
fig.tight_layout(pad=1.0)
plt.savefig("HW1_plots")
# -
# # The maximum daily temperature in Phoenix AZ in June is known to vary between 80°F and 110°F. The distribution of maximum daily temperature is modeled using a beta distribution with parameters alpha = 2 and = 3.
# •What is the probability that the daily maximum temperature will exceed 100°F?
# (hint: you will need to scale your data, and use Matlab or similar to compute the CDF)
# • Redo the problem above (Pr{T > 100°F}), but now assume that the temperature is
# normally distributed with a mean of 95°F and std dev of 10°F.
# +
#problem metrics
min_temp = 80
max_temp = 110
x_temp = 100
alpha = 2
beta = 3
#normalization of the data
z = (x_temp-min_temp)/(max_temp-min_temp)
# +
#finding the cdf of the beta distribution at the x_temp
prob_temp_under_x = stats.beta.cdf(z, alpha, beta)
prob_temp_over_x = 1 - prob_temp_under_x
print("The probability that the temperature will exceed {0} degrees is {1:.2f}%". format(max_temp, prob_temp_over_x*100))
# +
#normally distributed
temp_mean = 95
temp_std_dev = 10
temp_over = 100
z_part2 = (temp_over-temp_mean)/temp_std_dev
prob_temp_over = 1 - stats.norm.cdf(z_part2)
print("The probability that the temperature will exceed {0} degrees is {1:.2f}%". format(temp_over, prob_temp_over*100))
# -
# # The maximum temperature in Phoenix AZ in June is modeled as a normal distribution with mean of 95°F and std dev of 10°F, while the maximum humidity in June is modeled as a normal distribution with mean of 21% and std dev of 5%. Temperature and Humidity are positively correlated, with a covariance of 4.
# • What is the probability that the daily maximum temperature will be less than 99°F
# and the humidity will be less than 23%.
# (hint: you will need to use the Matlab function mvncdf)
# • Redo the problem above, but now assume the two entities are uncorrelated.
# +
#normal distribution for both temp and humdity
#same mean and deviation as previous problem
#humidity in %
humidity_mean = 21
humidity_std_dev = 5
cov = 4
#probability that max temp will be less than 99 & humidity less than
temp_lessthan = 99
humidity_lessthan = 23
correlation = cov/(humidity_std_dev*std_dev)
# +
#covariance matrix = [std_dev_x^2 cov][cov std_dev_y^2]
#if we want to know what the probability is of it being above x points then we set x as the lower
# if we want to know what the probability is of it being under x points then we set x as the upper
from scipy.stats import mvn
#this is the lower bounds so we just want them to be unrealisticly small
low = np.array([-1000, -1000])
#upper bounds or the points we are looking for since we want to know the probability of it being less than this
upp = np.array([temp_lessthan, humidity_lessthan])
#these are the means (in the same order!)
mu = np.array([temp_mean, humidity_mean])
#this is the covariance matirx for first part
S_1 = np.array([[(temp_std_dev**2), cov], [cov, (humidity_std_dev**2)]])
#this is the covariance matirx for second part
S_2 = np.array([[(temp_std_dev**2), 0], [0, (humidity_std_dev**2)]])
p,i = mvn.mvnun(low, upp, mu, S_1)
p_2,i_2 = mvn.mvnun(low, upp, mu, S_2)
print("The probability that the temperature will be less than {0} degrees and the humidity will be less than {1}% is {2:.2f}% if there is a covariance of 4".
format(temp_lessthan, humidity_lessthan, p*100))
print("The probability that the temperature will be less than {0} degrees and the humidity will be less than {1}% is {2:.2f}% if there is there is no correlation".
format(temp_lessthan, humidity_lessthan, p_2*100))
| HW/HW_1/DesignUncertainty_HW1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Connecting to Amazon Redshift
# -
# # Overview
# `redshift_connector` provides multiple options when it comes to establishing a connection to an Amazon Redshift cluster. These options are discussed and shown below.
# ## Using Database credentials
# Raw database credentials can be used for establishing a connection to an Amazon Redshift cluster. While straight forward, this approach lack the strong security and user access controls provides by Identity and access management (IAM) and identity provider (IdP) plugins.
# + pycharm={"name": "#%%\n"}
import redshift_connector
# establish a connection to an Amazon Redshift cluster
# here we use "with" statements to ensure connection
# and cursor resources are cleaned up once we are finished
# with them
with redshift_connector.connect(
host='examplecluster.abc123xyz789.us-west-1.redshift.amazonaws.com',
database='dev',
user='awsuser',
password='<PASSWORD>'
# port value of 5439 is specified by default
) as conn:
with conn.cursor() as cursor:
# Please note: autocommit is disabled by default, per DB-API specification
# If you'd like to commit your changes, manually commit or enable autocommit
# on the cursor object
# conn.commit() # manually commits
# conn.autocommit = True # enables autocommit for subsequent SQL statements
cursor.execute("create table book(bookname varchar,author varchar)")
cursor.executemany("insert into book (bookname, author) values (%s, %s)",
[
('One Hundred Years of Solitude', '<NAME>'),
('A Brief History of Time', '<NAME>')
]
)
cursor.execute("select * from book")
result: tuple = cursor.fetchall()
print(result)
# -
# ## Using IAM Credentials
# IAM Credentials can be supplied directly to ``connect(...)`` using an AWS profile. This approach allows users the option of using temporary credentials and limiting the permissions the connected user has.
# + pycharm={"name": "#%%\n"}
import redshift_connector
# Connects to Redshift cluster using IAM credentials from default profile defined in ~/.aws/credentials
conn: redshift_connector.Connection = redshift_connector.connect(
iam=True,
database='dev',
db_user='awsuser',
password='',
user='',
cluster_identifier='examplecluster',
profile='default'
)
# -
# Below a sample `~/.aws/config` and `~/.aws/credentials` are shown. Please note that `redshift_connector` requires a `region` to be specified when usign IAM authentication. The region can be specified either in `~/.aws/config` or passed directly to `redshift_connector.connect(...)`. In the case where a region is specified in both `~/.aws/config` and `redshift_connector.connect(...)`, the value provided to `redshift_connector.connect(...)` will be used.
# + [markdown] pycharm={"name": "#%% md\n"}
# `~/.aws/credentials`
# ```
# [default]
# aws_access_key_id="my_aws_access_key_id"
# aws_secret_access_key="my_aws_secret_access_key"
# aws_session_token="my_aws_session_token"
# ```
# `~/.aws/config`
# ```
# [default]
# region=us-west-2
# ```
# -
# Alternatively, IAM credentials can be supplied directly to ``connect(...)`` using AWS credentials as shown below:
# + pycharm={"name": "#%%\n"}
import redshift_connector
# Connects to Redshift cluster using IAM credentials from default profile defined in ~/.aws/credentials
conn: redshift_connector.Connection = redshift_connector.connect(
iam=True,
database='dev',
db_user='awsuser',
password='',
user='',
cluster_identifier='examplecluster',
access_key_id="my_aws_access_key_id",
secret_access_key="my_aws_secret_access_key",
session_token="my_aws_session_token",
region="us-east-2"
)
# -
# # Connecting using an Amazon Redshift Authentication Profile
# An Amazon Redshift authentication profile can be used for authentication with Amazon Redshift via ``redshift_connector``. This approach allows connection properties to be stored in the server side and retrieved by ``redshift_connector``. Any connection parameter which appears in both the authentication profile and is directly provided to ``redshift_connector.connect(...)`` will be overriden by the value provided in the authentication profile.
#
# Please see the Amazon Redshift documentation to learn how to create and delete authentication profiles.
#
# In the following example we will be creating, using, and deleting an authentication profile. For this use case we would like to connect to an Amazon Redshift cluster, but store no credential or cluster information within the Python script. This will improve the portability of our code as well as its security.
#
# Firstly, we will create the Amazon Redshift authentication profile by using ``boto3``.
# + pycharm={"name": "#%%\n"}
import boto3
from botocore.exceptions import ClientError
import json
authentication_profile_contents = {
'host': 'examplecluster.abc123xyz789.us-west-1.redshift.amazonaws.com',
'region': 'us-west-1',
'cluster_identifier': 'examplecluster',
'db_name': 'dev'
}
try:
client = boto3.client("redshift")
client.create_authentication_profile(
AuthenticationProfileName="QAProfile",
AuthenticationProfileContent=json.dumps(authentication_profile_contents)
)
except ClientError:
raise
# -
# The Redshift authentication profile, named ``QAProfile`` has been created. This profile is intended for use by a QA team who would like to avoid hard-coded references to a specific cluster in their projects. Its contents are in JSON format and contain fields such as ``host`` and ``cluster_identifier``.
#
# Next we will establish a connection to this cluster by using this authentication profile.
# + pycharm={"name": "#%%\n"}
import redshift_connector
import os
with redshift_connector.connect(
iam=True,
region='us-west-2',
access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
session_token=os.environ["AWS_SESSION_TOKEN"],
auth_profile="QAProfile",
db_user="bobby_tables"
) as conn:
pass
# -
# Noting the ``region`` parameter above, we can see that while the Amazon Redshift authentication profile lives in ``us-west-2``, ``examplecluster`` lives in ``us-west-1``. When retrieving temporary IAM credentials to connect to this cluster, the ``region`` provided in the authentication profile will be used.
#
# Please see the ``redshift_connector.RedshiftProperty`` class for guidance on how to define the key and value contents of the JSON authentication profile contents.
#
# Finally, we will delete this authentication profile for demonstration purposes.
#
# + pycharm={"name": "#%%\n"}
try:
client = boto3.client("redshift")
client.delete_authentication_profile(
AuthenticationProfileName="QAProfile",
)
except ClientError:
raise
# -
# # Connecting using Identity Provider (IdP) Plugins
# Please refer to the following [Amazon Redshift documentation](https://docs.aws.amazon.com/redshift/latest/mgmt/options-for-providing-iam-credentials.html) for instructions on configuring the desired IdP.
# Check out our blog post on [AWS Big Data Blog](https://aws.amazon.com/blogs/big-data/federated-api-access-to-amazon-redshift-using-an-amazon-redshift-connector-for-python/) to learn more about integration with Okta IdP.
# ## Authenticating using Active Directory Federation Service (ADFS) identity provider plugin
# + pycharm={"name": "#%%\n"}
import redshift_connector
conn: redshift_connector.Connection = redshift_connector.connect(
iam=True,
database='dev',
cluster_identifier='my-testing-cluster',
credentials_provider='AdfsCredentialsProvider',
user='<EMAIL>',
password='<PASSWORD>',
idp_host='myadfshostname.com'
)
# -
# ## Authenticating using Azure identity provider plugin
# Values for `client_id`, `client_secret` can be created and found within the Enterprise Application created with Azure.
# + pycharm={"name": "#%%\n"}
import redshift_connector
conn: redshift_connector.Connection = redshift_connector.connect(
iam=True,
database='dev',
cluster_identifier='my-testing-cluster',
credentials_provider='AzureCredentialsProvider',
user='<EMAIL>',
password='<PASSWORD>',
idp_tenant='my_idp_tenant',
client_id='my_client_id',
client_secret='my_client_secret',
preferred_role='arn:aws:iam:123:role/MyFirstDinnerRoll'
)
# -
# ## Authenticating using Azure Browser identity provider plugin
# + pycharm={"name": "#%%\n"}
import redshift_connector
conn: redshift_connector.Connection = redshift_connector.connect(
iam=True,
database='dev',
cluster_identifier='my-testing-cluster',
credentials_provider='BrowserAzureCredentialsProvider',
user='<EMAIL>',
password='',
idp_tenant='my_idp_tenant',
client_id='my_client_id',
client_secret='my_client_secret',
)
# -
# ## Authenticating using Okta identity provider plugin
# Values for `idp_host`, `app_id`, and `app_name` can be located within the Okta application created.
# + pycharm={"name": "#%%\n"}
import redshift_connector
conn: redshift_connector.Connection = redshift_connector.connect(
iam=True,
database='dev',
cluster_identifier='my-testing-cluster',
credentials_provider='OktaCredentialsProvider',
user='<EMAIL>',
password='<PASSWORD>',
idp_host='my_idp_host',
app_id='my_first_appetizer',
app_name='dinner_party'
)
# -
# ## Authenticating using JumpCloud via generic Saml Browser identity provider plugin
# + pycharm={"name": "#%%\n"}
import redshift_connector
conn: redshift_connector.Connection = redshift_connector.connect(
iam=True,
database='dev',
cluster_identifier='my-testing-cluster',
credentials_provider='BrowserSamlCredentialsProvider',
user='<EMAIL>',
password='',
login_url='https://sso.jumpcloud.com/saml2/plustwo_melody'
)
| tutorials/001 - Connecting to Amazon Redshift.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="YI2vj-VJyzM-" colab_type="text"
# 
#
# [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/training/english/vivekn-sentiment/VivekNarayanSentimentApproach.ipynb)
#
# ## 0. Colab Setup
# + id="wfXHpaBVy8PY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="b302e8f9-9de6-4279-ebe3-fafa3a6c23a1" executionInfo={"status": "ok", "timestamp": 1589303303116, "user_tz": -120, "elapsed": 66940, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
import os
# Install java
# ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
# ! java -version
# Install pyspark
# ! pip install --ignore-installed -q pyspark==2.4.4
# Install Spark NLP
# ! pip install --ignore-installed -q spark-nlp==2.4.5
# + [markdown] id="N3lJrZweyzNA" colab_type="text"
# ## Vivekn Sentiment Analysis
#
# In the following example, we walk-through Sentiment Analysis training and prediction using Spark NLP Annotators.
#
# The ViveknSentimentApproach annotator will compute [Vivek Narayanan algorithm](https://arxiv.org/pdf/1305.6143.pdf) with either a column in training dataset with rows labelled 'positive' or 'negative' or a folder full of positive text and a folder with negative text. Using n-grams and negation of sequences, this statistical model can achieve high accuracy if trained properly.
#
# Spark can be leveraged in training by utilizing ReadAs.Dataset setting. Spark will be used during prediction by default.
#
# We also include in this pipeline a spell checker which shall correct our sentences for better Sentiment Analysis accuracy.
# + [markdown] id="zWmdcLPGyzNB" colab_type="text"
# #### 1. Call necessary imports and set the resource path to read local data files
# + id="1KcgP4dWyzNC" colab_type="code" colab={}
#Imports
import time
import sys
import os
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql import SparkSession
from pyspark.sql.functions import array_contains,when
from pyspark.sql.functions import col
import sparknlp
from sparknlp.annotator import *
from sparknlp.base import DocumentAssembler, Finisher
# + [markdown] id="JvGfY8_jyzNI" colab_type="text"
# #### 2. Load SparkSession if not already there
# + id="oycji8wiyzNJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="0d0ea1e1-840c-491b-ce0d-e10d13a7d0b2" executionInfo={"status": "ok", "timestamp": 1589303322225, "user_tz": -120, "elapsed": 86028, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
# + id="T4gVI6pwyzNP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="1019078e-6e58-4293-efb8-78915c621df5" executionInfo={"status": "ok", "timestamp": 1589303328151, "user_tz": -120, "elapsed": 91944, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
# ! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/spell/words.txt -P /tmp
# !rm -rf /tmp/sentiment.parquet
# ! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/sentiment.parquet.zip -P /tmp
# ! unzip /tmp/sentiment.parquet.zip -d /tmp/
# + [markdown] id="6-8QQ6YMyzNZ" colab_type="text"
# #### 3. Load a spark dataset and put it in memory
# + id="6iVXyeX5yzNa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 459} outputId="49dc5721-3e45-4745-e6bf-a16a134de311" executionInfo={"status": "ok", "timestamp": 1589303336604, "user_tz": -120, "elapsed": 100384, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
#Load the input data to be annotated
#We change 0 and 1 with negative and positive
data = spark. \
read. \
parquet("/tmp/sentiment.parquet"). \
withColumn("sentiment_label", when(col("sentiment") == 0, "negative").otherwise("positive")). \
limit(1000).cache()
data.show()
# + [markdown] id="RTiRUnXHyzNi" colab_type="text"
# #### 4. Create the document assembler, which will put target text column into Annotation form
# + id="I7kDWrFZyzNj" colab_type="code" colab={}
### Define the dataframe
document_assembler = DocumentAssembler() \
.setInputCol("text")\
.setOutputCol("document")
# + id="6Vi5ImpwyzNq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="90906cc2-2c70-4f9c-e6e6-60610f82fb2c" executionInfo={"status": "ok", "timestamp": 1589303337130, "user_tz": -120, "elapsed": 100885, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
### Example: Checkout the output of document assembler
assembled = document_assembler.transform(data)
assembled.show(5)
# + [markdown] id="DqFWhtGZyzN0" colab_type="text"
# #### 5. Create Sentence detector to parse sub sentences in every document
# + id="HK4qRt2tyzN1" colab_type="code" colab={}
### Sentence detector
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
# + id="7pkcAyQnyzN8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="7db76fad-0a65-46ba-b773-68fa4f6400fe" executionInfo={"status": "ok", "timestamp": 1589303337715, "user_tz": -120, "elapsed": 101455, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
### Example: Checkout the output of sentence detector
sentence_data = sentence_detector.transform(assembled)
sentence_data.show(5)
# + [markdown] id="JaVLnDbxyzOA" colab_type="text"
# #### 6. The tokenizer will match standard tokens
# + id="vwBEG3y6yzOB" colab_type="code" colab={}
### Tokenizer
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
# + id="40PP804uyzOE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="3a8da29e-a0a4-4721-c38f-0df991656c32" executionInfo={"status": "ok", "timestamp": 1589303338585, "user_tz": -120, "elapsed": 102309, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
### Example: Checkout the outout of tokenizer
tokenized = tokenizer.fit(sentence_data).transform(sentence_data)
tokenized.show(5)
# + [markdown] id="3LhoPH8fyzOJ" colab_type="text"
# #### 7. Normalizer will clean out the tokens
# + id="cDOtkZF7yzOK" colab_type="code" colab={}
normalizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normal")
# + [markdown] id="CvMB0iMGyzOP" colab_type="text"
# #### 8. The spell checker will correct normalized tokens, this trains with a dictionary of english words
# + id="_EziC6v0yzOP" colab_type="code" colab={}
### Spell Checker
spell_checker = NorvigSweetingApproach() \
.setInputCols(["normal"]) \
.setOutputCol("spell") \
.setDictionary("/tmp/words.txt")
# + [markdown] id="f0zDsQloyzOT" colab_type="text"
# #### 9. Create the ViveknSentimentApproach and set resources to train it
# + id="jgGbnXcryzOU" colab_type="code" colab={}
sentiment_detector = ViveknSentimentApproach() \
.setInputCols(["spell", "sentence"]) \
.setOutputCol("sentiment") \
.setSentimentCol("sentiment_label") \
.setPruneCorpus(0) \
# + [markdown] id="8A1uXXmxyzOd" colab_type="text"
# #### 10. The finisher will utilize sentiment analysis output
# + id="EcJeVOzVyzOe" colab_type="code" colab={}
finisher = Finisher() \
.setInputCols(["sentiment"]) \
.setIncludeMetadata(False)
# + [markdown] id="ccQhdcDXyzOk" colab_type="text"
# ##### 11. Fit and predict over data
# + id="btpI76ViyzOl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="49841b10-d723-4510-8723-f0bbee8141bf" executionInfo={"status": "ok", "timestamp": 1589303352022, "user_tz": -120, "elapsed": 115707, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
normalizer,
spell_checker,
sentiment_detector,
finisher
])
start = time.time()
sentiment_data = pipeline.fit(data).transform(data)
end = time.time()
print("Time elapsed pipeline process: " + str(end - start))
# + [markdown] id="NcYkKyN-yzOq" colab_type="text"
# ##### 13. Check the result
# + id="wdOIFzD7yzOr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="dc0d9cf2-c5de-45de-8409-84fadbb38509" executionInfo={"status": "ok", "timestamp": 1589303352874, "user_tz": -120, "elapsed": 116551, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
sentiment_data.show(5,False)
# + id="wPvfyTPdyzOw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6b99b5ee-4fa8-40fc-cb75-6ff4fc5ed21b" executionInfo={"status": "ok", "timestamp": 1589303352876, "user_tz": -120, "elapsed": 116546, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
type(sentiment_data)
# + id="h0vCTEo5yzO2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="23783ff7-387d-442f-f5bc-70c307ea28b6" executionInfo={"status": "ok", "timestamp": 1589303355230, "user_tz": -120, "elapsed": 118893, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
# Negative Sentiments
for r in sentiment_data.where(array_contains(sentiment_data.finished_sentiment, "negative")).take(5):
print(r['text'].strip(),"->",r['finished_sentiment'])
# + id="MM47a2PHyzPC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="b6587222-1f1a-4ff9-cf60-fd659fc81eaa" executionInfo={"status": "ok", "timestamp": 1589303356426, "user_tz": -120, "elapsed": 120082, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}}
# Positive Sentiments
for r in sentiment_data.where(array_contains(sentiment_data.finished_sentiment, "positive")).take(5):
print(r['text'].strip(),"->",r['finished_sentiment'])
# + id="9QagrcsKyzPK" colab_type="code" colab={}
| jupyter/training/english/vivekn-sentiment/VivekNarayanSentimentApproach.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Implementation part 2
# + pycharm={"name": "#%%\n"}
import os
import tifffile as tif
import cv2
import numpy as np
from src.controllers.utility import compute_spline
with tif.TiffFile(os.getcwd() + r"\test_data\Expansion dSTORM-Line Profile test.tif") as file:
image = file.asarray().astype(np.uint8)*40
# + [markdown] pycharm={"name": "#%% md\n"}
# We run the steps from part 1 with the helper function compute spline and receive a `CustomSpline` object. Using the `sample` function with the number of points gives us the center points of our spline.
# + pycharm={"name": "#%%\n"}
spline = compute_spline(image, blur=20, )[0]
sample = spline.sample(spline.n_points)
# -
# To fit the results into a bimodal gaussian we just have to sum two of those functions with independent parameters. To only apply a background once we set B=0 within the distributions and add an additional B to our bigaussian model.
# +
def gaussian(x, I, sig, c, B):
return I* np.exp(-(x - c) ** 2 / (2 * sig** 2)) + B
def bigaussian(x, I1, sig1, c1, I2, sig2, c2, B):
return (gaussian(x, I1, sig1, c1, 0) +
gaussian(x, I2, sig2, c2, 0) + B)
# -
# # Fit data to the defined function:
# Fitting a function to data in python is quite easy since (like for kinda everything else) there were already some people who did the work for us. We will use scipy.optimize to do the job.
# The estimization of initial parameters is crucial for a good fit. Therefore, we estimate a first guess for our parameters and bind the scipy optimization to stay within reasonable bounds.
# The first thing we require is that all our parameters are >0 that's why we set the first value of every tuple to 0. The intensity I is constraint with an upper bound of the datas maximum value. We basically don't care what our sigma is doing and set the maximum to np.inf . However we want our center to stay within the bounds of our line profile.
# +
def bounds(data):
return np.array([[0, data.max() + 0.1], [0, np.inf], [0, data.shape[0]],
[0, data.max() + 0.1], [0, np.inf], [0, data.shape[0]],
[0, 0.1]]).T
def guess(data):
return [data.max()/2, 0.5, np.where(data==data.max())[0],
data.max()/2, 0.5, np.where(data==data.max())[0], 0]
# -
def fit_data_to(func, x, data):
# calculate error by squared distance to data
errfunc = lambda p, x, y: (func(x, *p) - y) ** 2
result = optimize.least_squares(errfunc, guess(data), bounds=bounds(data), args=(x, data))
optim = result.x
return optim
# +
x = np.arange(0,interpolated_profile.shape[0],1)
fig = plt.figure()
ax1 = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax1.plot(x, interpolated_profile / interpolated_profile.max(), c="r", label="averaged line profile")
optim = fit_data_to(bigaussian, x, interpolated_profile)
ax1.plot(x, bigaussian(x, *optim) / interpolated_profile.max(),
lw=1, c="b", ls='--', label="bigaussian fit")
ax1.legend(loc='best')
ax1.set_ylabel("normed intensity [a.u.]")
ax1.set_xlabel("distance [nm]")
plt.show()
| notebooks/twdsp2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Audio data preparation from zero to PyTorch's DataLoader
# +
import pandas as pd
import matplotlib.pyplot as plt
from lhotse import WavAugmenter, CutSet, Fbank, FbankConfig
from lhotse.recipes.mini_librispeech import download_and_untar, prepare_mini_librispeech
# -
# ## Getting mini LibriSpeech and creating manifests
download_and_untar('data')
manifests = prepare_mini_librispeech('data/LibriSpeech')
train = manifests['train-clean-5']
dev = manifests['dev-clean-2']
# ## Creating the Cuts
#
# ### First, we create the "starting point" cut sets - i.e. cuts that actually span full recordings.
train_cuts = CutSet.from_manifests(recording_set=train['audio'], supervision_set=train['supervisions'])
dev_cuts = CutSet.from_manifests(recording_set=dev['audio'], supervision_set=dev['supervisions'])
# ### We can see the cut durations are far from equal - we'd like to use 5 second long cuts for this experiment.
pd.Series(c.duration for c in train_cuts).hist()
# ### We can cut the longer recordings into 5 second cuts by traversing them in windows; the left-over portion of the recording might still be shorter, so we will pad it with silence to 5 seconds.
train_cuts_filt = train_cuts.cut_into_windows(5).pad(5)
assert all(cut.duration == 5 for cut in train_cuts_filt)
len(train_cuts), len(train_cuts_filt)
# ### Let's create a simple dataset for our very specific task - classification whether an audio clip has been reverberated.
# +
import torch
import random
class ReverbDetectionDataset(torch.utils.data.Dataset):
def __init__(self, cuts):
self.cuts = cuts
self.cut_ids = list(cuts.ids)
self.extractor = Fbank(FbankConfig(num_mel_bins=80))
self.augmenter = WavAugmenter.create_predefined('reverb', cuts[0].sampling_rate)
def __getitem__(self, idx):
cut = self.cuts[self.cut_ids[idx]]
augment = random.choice([0, 1])
feats = cut.compute_features(self.extractor, self.augmenter if augment else None)
return torch.from_numpy(feats), float(augment)
def __len__(self):
return len(self.cuts)
# -
# ### Creating the DataLoader is very simple - no collate_fn is needed at all, since we used CutSet's capabilities to bring the data to equal length.
# +
train_dset = ReverbDetectionDataset(train_cuts_filt)
val_dset = ReverbDetectionDataset(dev_cuts)
train_dloader = torch.utils.data.DataLoader(train_dset, batch_size=8, shuffle=True, num_workers=2)
val_dloader = torch.utils.data.DataLoader(val_dset, batch_size=1, num_workers=1)
# +
for feats, targets in train_dloader:
print('Training DataLoader shapes:')
print(feats.shape, targets.shape)
print(targets)
break
for feats, targets in val_dloader:
print('Dev DataLoader shapes:')
print(feats.shape, targets.shape)
break
# -
| examples/mini_librispeech/dataloader_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Large-scale QAOA via Divide-and-Conquer
#
# <em> Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved. </em>
# ## Overview
#
# [Quantum Approximation Optimization Algorithm](./QAOA_EN.ipynb) (QAOA) is a promising hybrid quantum-classical algorithm to solve combinatorial optimization problems approximately. However, its applicability is restricted by the qubit limitation for large-scale problems and its execution time scales exponentially with the problem size. Detailed and other limitations can be found in [1].
#
# Divide-and-Conquer (DC) is a largely used technique to address similar challenges as those listed above, such as quicksort and fast Fourier transform (FFT). It recursively breaks down a problem into several subproblems of the same type until these subproblems can be solved directly.
#
# DC-QAOA proposed by <NAME> et al. in 2021 [2] applied such technique on QAOA to solve the Max-Cut problem. The methodology presented below adopts this DC-QAOA scheme with some modifications.
# ## Methodology
# ### Large Graph Partitioning (LGP)
#
# Let $G(V,E)$ be an undirected graph with $n$ vertices and $k$ be the qubit (vertex) limit, i.e. available qubit size in NISQ computers. LGP partitions $G$ into exactly two subgraphs $G_0$ and $G_1$, and at least one node is shared between these two subgraphs so that no edges of $G$ are missed. Those shared nodes that enables this separation are separation nodes. Note that number of separation nodes should be smaller than the max qubit limit.
#
# The following code shown is to define such partition.
# +
import networkx as nx # version of networkx >= 2.5
import matplotlib.pyplot as plt
from itertools import combinations
# Partition a graph into two subgraphs
def NaiveLGP(g, k):
E = list(g.edges)
E = [(min([u, v]),max([u, v])) for (u, v) in E]
E.sort(key = lambda tup:tup[0])
V = list(g.nodes)
V.sort()
counter = 1
while counter < k:
num_nodes = counter
nodes_combo = list(combinations(V, num_nodes))
# Check suitable separation path
for p in nodes_combo:
V1 = [x for x in V if x not in p]
E1 = [e for e in g.edges if e not in g.edges(p)]
G1 = nx.Graph()
G1.add_nodes_from(V1)
G1.add_edges_from(E1)
S = [G1.subgraph(c).copy() for c in nx.connected_components(G1)]
if len(S) == 2:
# Add the seperate paths to two subgraphs
V_S0 = list(S[0].nodes)
E_S0 = list(S[0].edges)
V_S1 = list(S[1].nodes)
E_S1 = list(S[1].edges)
for (u,v) in g.edges(p):
if u in V_S0 or v in V_S0:
S[0].add_edges_from([(u,v)])
if u in V_S1 or v in V_S1:
S[1].add_edges_from([(u,v)])
if u in p and v in p:
S[0].add_edges_from([(u,v)])
S[1].add_edges_from([(u,v)])
return S
counter += 1
print("G has connectivity above k")
return {}
# -
# One example for illustration is given below. The graph is a random with $10$ vertices and the qubit (vertex) limit is $9$ vertices. Then red vertices are those selected to be removed so that the rest is disconnected and can be partitioned. We then add separation nodes with incident edges back to these two subgraphs to avoid missing information of the graph.
# +
# Gnerate a connected graph with 10 vertices
n = 10
G = nx.Graph()
G.add_nodes_from([0,1,2,3,4,5,6,7,8,9])
G.add_edges_from([(0,1),(1,2),(2,3),(3,4),(4,5),(5,6),(6,7),(7,8),(8,9),(9,0)])
k = 9 # Set qubit (vertex) limit
S = NaiveLGP(G,k) # Partition G into two subgrahs once
sep_node = list(set(S[0].nodes).intersection(set(S[1].nodes))) # Obtain seperation nodes of the partition
# Show graph illustration
options = {
"with_labels": True,
"font_color": "white"
}
node_color0 = ["red" if i in sep_node else "blue" for i in range(n)]
node_color1 = ["red" if list(S[0].nodes)[i] in sep_node else "blue" for i in range(len(S[0].nodes))]
node_color2 = ["red" if list(S[1].nodes)[i] in sep_node else "blue" for i in range(len(S[1].nodes))]
fig, ax = plt.subplots(1, 3, figsize=(15, 4))
for i, a in enumerate(ax):
a.axis('off')
a.margins(0.20)
nx.draw_networkx(G, pos=nx.circular_layout(G), ax=ax[0], **options, node_color=node_color0)
nx.draw_networkx(S[0], pos=nx.circular_layout(S[0]), ax=ax[1], **options, node_color=node_color1)
nx.draw_networkx(S[1], pos=nx.circular_layout(S[1]), ax=ax[2], **options, node_color=node_color2)
# -
# Note that in our case, both subgraphs are under the qubit limit and can be directly solved by QAOA. But if the subgraph still exceeds the qubit limit, we will recursively do LGP through recursively doing DC-QAOA.
# ### Graph Reconstruction (GR)
#
# Once QAOA is applied to partitioned subgraphs, we should combine the solutions for the parent graph. Note that since two partitioned subgraphs share some set of nodes, we need to make sure that the labeling sets which these nodes are in are consistent. For example, if in one subgraph, the shared nodes are all in $S_0$, while in the other subgraph, some of the shared nodes are in $S_0$ and some are in $S_1$. In this situation, corresponding solutions cannot be combined. So we provide several possible cuts for each partitioned subgraphs to avoid failure for the combination.
def GR(str_cnt1, str_cnt2):
com_cnt = []
n = len(str_cnt1[0][0])
com_index = []
for i in range(n):
if str_cnt1[0][0][i] != "x" and str_cnt2[0][0][i] != "x":
com_index.append(i)
for (str1, cnt1) in str_cnt1:
for (str2, cnt2) in str_cnt2:
# Check equality for each bit in common nodes
validity = [str1[i] == str2[i] for i in com_index]
if False not in validity:
com_str = [[0]] * n
for i in range(n):
if str1[i] != "x":
com_str[i] = str(str1[i])
else:
com_str[i] = str(str2[i])
com_cnt.append(("".join(com_str), min(cnt1, cnt2)))
# Sort string-count map by counts in reverse order
com_cnt.sort(key=lambda tup:tup[1])
return com_cnt[::-1]
# We still take the above example for illustration. For two subgraphs partitioned above, we apply QAOA to find their approximate max cuts and then use GR policy to form the max cut for the original graph. The code presented below has achieved this. Note that there is a parameter $t$ controlling how many possible cuts are kept, and if this $t$ is larger than $2^k$, all cuts are provided and the combination can definitely happen.
# +
# We use direct QAOA to compute approximate max cuts for two subgraphs
# In the next section, we will compute these using DC-QAOA because subgraphs might also exceed the qubit limit
import paddle
from paddle_quantum.QAOA.maxcut import find_cut
# Set QAOA parameters
p = 3 # Number of layers of QAOA circuit
ITR = 100 # Iterations of the training network of QAOA
LR = 0.5 # Learning rate in the training network of QAOA
# Set graph reconstruction parameter
t = 10 # Number of partition strings kept after graph reconstruction
paddle.seed(999) # Fix the seed
# Start graph reconstruction procedure
S_str_cnt = []
for si in S:
siv = list(si.nodes)
# Compute the subgraph's maxcut
tmp, si_str_cnt_relabeled = find_cut(si, p, ITR, LR)
# Make the subgraph's maxcut match the original graph by relabeling
si_str_cnt = []
for str_relabeled in si_str_cnt_relabeled:
strr = ""
for i in range(len(G.nodes)):
if i in siv:
strr += str_relabeled[siv.index(i)]
else:
strr += "x"
si_str_cnt.append((strr, si_str_cnt_relabeled[str_relabeled]))
si_str_cnt.sort(key=lambda tup:tup[1])
S_str_cnt.append(si_str_cnt[::-1][:t])
# Once we have already obatined max cut strings for two paritions, we perform the graph reconstruction (GR)
print("Max cut for the first partitioned subgraph: \n" + str(dict(S_str_cnt[0])))
print("Max cut for the second partitioned subgraph: \n" + str(dict(S_str_cnt[1])))
out_cnt = GR(S_str_cnt[0], S_str_cnt[1])
print("Combined max cut for the original graph: \n" + str(dict(out_cnt[:t])))
# -
# The top several possible max cuts for the first subgraph include {'010xxxxxxx','101xxxxxxx'} and those for the second subgraph include {'1x10101010','0x01010101'}, where 'x' indicates those missing nodes in the subgraph. Shared nodes $0$ and $2$ are all '0's in the first possibility of the first subgraph, i.e. '010xxxxxxx', while they are all '1's in the first possibility of the second subgraph, i.e. '1x10101010', so they cannot combine. (Note that although we can flip 0s and 1s in this situation by symmetry, it is not necessary). We then try to combine '010xxxxxxx' (first possibility of the first subgraph) and '0x01010101' (second possibility of the second subgraph). It is clear that the shared nodes $0$ and $2$ are all '0's in both subgraphs as shown below in the left and middle figure, so we combine these two max cuts and get '0101010101' for the original cycle of six as shown below in the right.
#
# Graph illustration is shown below. The left and middle subgraphs are subgraphs with approximate max cuts, where red and blue nodes represent $S_0$ and $S_1$ and dashed lines represent cuts.
# +
# Computed max cut for two subgraphs
strr1 = '010xxxxxxx'
strr2 = '0x01010101'
strr = '0101010101'
# Show graph illustration
options0 = {
"node_color": ["red" if strr1[i] == '0' else "blue" for i in S[0].nodes],
"style": ["solid" if strr1[u] == strr1[v] else "dashed" for (u, v) in list(S[0].edges)]
}
options1 = {
"node_color": ["red" if strr2[i] == '0' else "blue" for i in S[1].nodes],
"style": ["solid" if strr2[u] == strr2[v] else "dashed" for (u, v) in list(S[1].edges)]
}
options2 = {
"node_color": ["red" if strr[i] == '0' else "blue" for i in range(n)],
"style": ["solid" if strr[u] == strr[v] else "dashed" for (u, v) in list(G.edges)]
}
fig, ax = plt.subplots(1, 3, figsize=(15,4))
for i, a in enumerate(ax):
a.axis('off')
a.margins(0.20)
nx.draw_networkx(S[0], pos=nx.circular_layout(S[0]), ax=ax[0], **options, **options0)
nx.draw_networkx(S[1], pos=nx.circular_layout(S[1]), ax=ax[1], **options, **options1)
nx.draw_networkx(G, pos=nx.circular_layout(G), ax=ax[2], **options, **options2)
# -
# ### DC-QAOA
#
# To find the max cut for a large graph with the number of vertices exceeding limits, DC-QAOA recursively divides it through LGP policy and conquers sub-solutions with GR policy described above. It adopts the divide-and-conquer paradigm to deal with the max-cut problem for large-scale graphs.
#
# The input graph is separated into exactly two subgraphs through LGP policy if the vertex size is larger than the qubit limit. Otherwise, its max cut would be directly calculated by QAOA. Each subgraph will recursively call DC-QAOA until its max-cut solution are returned, i.e. at some step, the vertex size of the subgraph is under the limit and can be directly processed by QAOA and returned values would be combined and then be given to the upper layer.
#
# The code below provides the way to run DC-QAOA and LGP and GR policies are both applied in the `DC_QAOA` function. Note that QAOA would return a series of possible cuts, sorted by frequency count.
# +
def DC_QAOA(g, p, t, s, k, ITR, LR):
if len(g.nodes) > k:
# get exactly two subgraphs with LGP policy
S = NaiveLGP(g, k)
S_str_cnt = []
for si in S:
siv = list(si.nodes)
# Compute the subgraph's maxcut recursively
_, si_str_cnt_relabeled = DC_QAOA(si, p, t, s, k, ITR, LR)
# refilling str_cnt1 and str_cnt2
si_str_cnt = []
for str_relabeled in si_str_cnt_relabeled:
strr = ""
for v in g.nodes:
if v in siv:
strr += str_relabeled[siv.index(v)]
else:
strr += "x"
si_str_cnt.append((strr, si_str_cnt_relabeled[str_relabeled]))
si_str_cnt.sort(key = lambda tup:tup[1])
S_str_cnt.append(si_str_cnt[::-1][:t])
# Reconstruct string-count map with GR policy
out_cnt = GR(S_str_cnt[0], S_str_cnt[1])
else:
if len(g.nodes) == 1:
return [("0", 99999), ("1", 99999)]
_, out_cnt = find_cut(g, p, ITR, LR, shots=3000)
# Transform {str:cnt} dictionary into [(st, cnt)] tuple list
out_cnt = [(k, v) for k, v in out_cnt.items()]
# Sort string-count map by counts in reverse order
out_cnt.sort(key=lambda tup:tup[1])
out_cnt = out_cnt[::-1]
# retain only top t (str,cnt) pairs by sorted order
out_cnt = out_cnt[:t]
# resacle total numer of counts to s or around
cnt_sum = sum(cnt for (str, cnt) in out_cnt)
out_cnt = [(k, int(s * v / cnt_sum)) for (k, v) in out_cnt]
return out_cnt[0][0], dict(out_cnt)
# Set QAOA parameters
p = 2 # Number of layers of QAOA circuit
ITR = 100 # Iterations of the training network of QAOA
LR = 0.5 # Learning rate in the training network of QAOA
#Set DC-QAOA parameters
s = 3000 # Multiplier to make frequency bigger
t = 10 # Number of partition strings kept after graph reconstruction
k = 5 # Maximum qubits/vertices limit
# Using DC-QAOA
max_cut, out_cnt = DC_QAOA(G, p, t, s, k, ITR, LR)
print("First t possible approximate maxcut for graph G: " + str(out_cnt))
print("Max cut found by DC-QAOA algorithms: " + str(max_cut))
# -
# ## Applicability
# **DC-QAOA described above can approximate max cut for a graph if and only if its and one family of its recursive children's pseudo-connectivities are all smaller than $k$, where $k$ is the qubit limit (vertex limit).**
#
# The pseudo-connectivity here is defined as the minimum number of vertices that need to be removed to separate the remaining vertices into exactly two isolated subgraphs. A graph's recursive children mean its two partitioned subgraphs and then their partitioned sub-subgraphs until some are smaller or equal to the qubit limit.
#
# Cycles are applicable examples if the qubit limit is larger than $1$. Its pseudo-connectivity is $2$ as removing two vertices can partition cycles into two paths. For the example of $C_6$ (cycle of six) with qubit limit 4, partitions described above are $P_5$ (path of five) and $P_2$. $P_2$ is under the qubit limit, and $P_5$ can be partitioned into $P'_4$ and $P'_2$ and thus have pseudo-connectivity $1$. Then $P'_4$ and $P'_2$ are under the qubit limit. So this family of the $C_6$ has all its recursive children's ($C_6$, $P_5$) pseudo-connectivities smaller than $4$.
#
# Non applicable examples include complete graphs with $n$ larger than $k$, because no matter how you remove vertices, the rest graph is still connected. Another example for which different qubit limits influence its applicability is shown below.
#
# The left graph is the original graph. If the qubit limit is $2$, then the number of separation nodes can only be $1$ and no vertice could partition the graph into $2$. If we remove vertex $4$ (as shown in the middle), the graph will be partitioned into three while others will leave a connected graph. Besides, the graph's pseudo-connectivity is $2$, not smaller than $k=2$, and so DC-QAOA fails in this case. However, if the qubit limit is $3$, then its pseudo-connectivity is under $k$ and we can remove vertex $0$ and $3$ as shown in the right. Then the rest are two components, with one under the qubit limit and one can be partitioned into two further (by removing vertex $4$ and with pseudo-connectivity being $1 < k$). So DC-QAOA succeeds here.
#
# 
# People can try the example shown above by adjusting the parameter $k$ in the code below.
G = nx.Graph()
G.add_nodes_from([0, 1, 2, 3, 4])
G.add_edges_from([(0, 4), (1, 2), (1, 4), (2, 4), (3, 4)])
k = 3
_, out_cnt = DC_QAOA(G, p, t, s, k, ITR, LR)
print("First t possible approximate maxcut for graph G: " + str(dict(out_cnt)))
# ## Performance
#
# We have compared DC-QAOA with classical max-cut approximation algorithms to test its performance. We provide the performance below of finding max-cut by DC-QAOA and the upper bound for max cut provided by SDP. We take five random graphs of 10 vertices as an example to show the performance (qubit limit for DC-QAOA is set to be 5).
#
# In order to run the following test, users need to install cvxpy: `pip install cvxpy`. **Windows users may encounter an error at runtime if they install cvxpy using pip. Instead, we recommend Windows users to create a new conda environment and install cvxpy with conda.** For more details please see [https://www.cvxpy.org/install/](https://www.cvxpy.org/install/).
# +
import cvxpy as cvx
import networkx as nx
def sdp_solver(G):
"""
Use SDP to find the upper bound for the max-cut problem.
"""
n = len(G)
adj_mat = nx.adjacency_matrix(G).toarray()
Y = cvx.Variable((n, n), PSD=True)
cut_size = 0.25 * cvx.sum(cvx.multiply(adj_mat, 1 - Y))
problem = cvx.Problem(cvx.Maximize(cut_size), [cvx.diag(Y) == 1])
opt_val = problem.solve(cvx.SCS)
return opt_val
# +
n = 10
iter = 5
print("Number of node = " + str(n))
print("Node = " + str([i for i in range(n)]))
value_dc_qaoa_ls = []
ubound_sdp_ls = []
for i in range(iter):
print("\nRandom graph " + str(i+1))
# Generate random graph
G = nx.erdos_renyi_graph(n, 0.1, 100 * i, directed=False)
while nx.is_connected(G) == False:
G = nx.erdos_renyi_graph(n, 0.5, directed=False)
print("Edges = " + str(list(G.edges)))
# SDP upper bound calculation
ubound_sdp = sdp_solver(G)
ubound_sdp_ls.append(ubound_sdp)
print("SDP upper bound: " + str(ubound_sdp))
# QAOA parameters
p = 2 # Number of layers of QAOA circuit
ITR = 100 # Iterations of the training network of QAOA
LR = 0.5 # Learning rate in the training network of QAOA
# DC-QAOA parameters
s = 3000 # Multiplier to make frequency bigger
t = 20 # Number of partition strings kept after graph reconstruction
k = 5 # Maximum qubits/vertices limit
try:
cut_dc_qaoa, out_cnt = DC_QAOA(G, p, t, s, k, ITR, LR)
cut_dc_qaoa1 = ["solid" if cut_dc_qaoa[u] == cut_dc_qaoa[v] else "dashed" for (u, v) in list(G.edges)]
value_dc_qaoa = cut_dc_qaoa1.count("dashed")
value_dc_qaoa_ls.append(value_dc_qaoa)
print("DC-QAOA node partition: " + str(cut_dc_qaoa) + ", max cut = " + str(float(value_dc_qaoa)))
except Exception as e:
value_dc_qaoa = 0
value_dc_qaoa_ls.append(value_dc_qaoa)
print("DC-QAOA fails with error message '" + str(e) + "'")
# +
import matplotlib.pyplot as plt
plt.plot(value_dc_qaoa_ls, label="DC-QAOA")
plt.plot(ubound_sdp_ls, label="SDP upper bound", linestyle="--")
plt.title('Max-Cut Performancce')
plt.xlabel('Random Graph')
plt.ylabel('Calculated Optimal Max Cut')
plt.legend()
plt.show()
# -
# From the line graph above, we have verified that max-cut generated by DC-QAOA is under and close to this bound.
#
# ## Applications
#
# The Max-Cut problem belongs to quadratic unconstrained binary optimization (QUBO), which has a wide range of applications, from finding ground states for the spin glass problem to modeling NP-hard problems [5]. The Max-Cut problem itself inherits widespread usefulness.
#
# It is extensively related to various fields, including VLSI circuit design, statistical physics. Both the problem of minimizing the number of vias subject to pin preassignments and layer preferences and the problem of finding ground states of spin glasses with exterior magnetic field in the Ising model can be reduced to the Max-Cut problem [6].
#
# Also importantly, the Max-Cut problem provides a prototypical testbed for algorithmic techniques that can be applied to many interesting problems. For example, SDP relaxation of the Max-Cut problem is adopted in designing data clustering algorithms [7] and addressing the phase retrieval problem [8, 9].
#
# More detailed investigations on the Max-Cut problem can be found in [10-12].
# ---
#
# ## References
#
# [1] <NAME>., et al. "Reachability deficits in quantum approximate optimization." [Physical Review Letters 124.9 (2020): 090504.](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.124.090504)
#
# [2] <NAME>, <NAME>, and <NAME>. "Large-scale Quantum Approximate Optimization via Divide-and-Conquer." [arXiv preprint arXiv:2102.13288 (2021).](https://arxiv.org/abs/2101.03717)
#
# [3] Goemans, <NAME>., and <NAME>. "Improved approximation algorithms for maximum cut and satisfiability problems using semidefinite programming." [Journal of the ACM (JACM) 42.6 (1995): 1115-1145.](http://www-math.mit.edu/~goemans/PAPERS/maxcut-jacm.pdf)
#
# [4] Burer, Samuel, and <NAME>. "Local minima and convergence in low-rank semidefinite programming." [Mathematical Programming 103.3 (2005): 427-444.](https://link.springer.com/article/10.1007/s10107-004-0564-1)
#
# [5] Kochenberger, Gary, et al. "The unconstrained binary quadratic programming problem: a survey." [Journal of Combinatorial Optimization 28.1 (2014): 58-81.](https://link.springer.com/article/10.1007/s10878-014-9734-0)
#
# [6] Barahona, Francisco, et al. "An application of combinatorial optimization to statistical physics and circuit layout design." [Operations Research 36.3 (1988): 493-513.](https://www.jstor.org/stable/170992?seq=1)
#
# [7] Poland, Jan, and <NAME>. "Clustering pairwise distances with missing data: Maximum cuts versus normalized cuts." [International Conference on Discovery Science. Springer, Berlin, Heidelberg, 2006.](https://link.springer.com/chapter/10.1007/11893318_21)
#
# [8] Candes, <NAME>., et al. "Phase retrieval via matrix completion." [SIAM review 57.2 (2015): 225-251.](https://epubs.siam.org/doi/10.1137/110848074)
#
# [9] Waldspurger, Irene, <NAME>, and <NAME>. "Phase recovery, maxcut and complex semidefinite programming." [Mathematical Programming 149.1 (2015): 47-81.](https://link.springer.com/article/10.1007/s10107-013-0738-9)
#
# [10] Deza, Michel, and <NAME>. "Applications of cut polyhedra—I." [Journal of Computational and Applied Mathematics 55.2 (1994): 191-216.](https://www.sciencedirect.com/science/article/pii/0377042794900205)
#
# [11] Deza, Michel, and <NAME>. "Applications of cut polyhedra—II." [Journal of Computational and Applied Mathematics 55.2 (1994): 217-247.](https://www.sciencedirect.com/science/article/pii/0377042794900213)
#
# [12] Poljak, Svatopluk, and <NAME>. "Maximum cuts and largest bipartite subgraphs." [DIMACS Series 20 (1995): 181-244.](https://arxiv.org/pdf/1810.12144.pdf)
| tutorial/combinatorial_optimization/DC-QAOA_EN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Stackoverflow Survey Data Analysis
#
# In this notebook, I analyze data for Stackoverflow developer surveys conducted over nine years 2011 - 2019, and answer some questions. Data is available at: https://insights.stackoverflow.com/survey
# ## Business Understanding
#
# Using the survey data, I would like to answer the following questions:
# 1. What is the trend of top Programming Languages over the years
# 2. What is the trend of job satisfcation over the years
# 3. What are the Programming Languages gaining/ losing popularity currently
# 4. What are the Database environments gaining/ losing popularity currently
# 5. What are the Platforms gaining/ losing popularity currently
# 6. What are the Web Frameworks gaining/ losing popularity currently
# ### Loading Libraries and Data
#load in libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import collections
# %matplotlib inline
#load 2011 data
df_survey_results_11 = pd.read_csv('./Datasets/Stack_Overflow/2011_Stack_Overflow_Survey_Results.csv', encoding = "ISO-8859-1")
#load 2012 data
df_survey_results_12 = pd.read_csv('./Datasets/Stack_Overflow/2012_Stack_Overflow_Survey_Results.csv', encoding = "ISO-8859-1")
#load 2013 data
df_survey_results_13 = pd.read_csv('./Datasets/Stack_Overflow/2013_Stack_Overflow_Survey_Responses.csv', \
encoding = "ISO-8859-1", low_memory=False)
#load 2014 data
df_survey_results_14 = pd.read_csv('./Datasets/Stack_Overflow/2014_Stack_Overflow_Survey_Responses.csv', \
encoding = "ISO-8859-1")
#load 2015 data
df_survey_results_15 = pd.read_csv('./Datasets/Stack_Overflow/2015_Stack_Overflow_Developer_Survey_Responses.csv', \
low_memory=False)
#load 2016 data
df_survey_results_16 = pd.read_csv('./Datasets/Stack_Overflow/2016_Stack_Overflow_Survey_Responses.csv')
#load 2017 data
df_survey_results_17 = pd.read_csv('./Datasets/Stack_Overflow/2017_survey_results_public.csv')
#load 2018 data
df_survey_results_18 = pd.read_csv('./Datasets/Stack_Overflow/2018_survey_results_public.csv', low_memory=False)
#load 2019 data
df_survey_results_19 = pd.read_csv('./Datasets/Stack_Overflow/2019_survey_results_public.csv')
# ### Functions to help in Analysis
# +
# Some of the earlier datasets have response to a question (e.g. 'Which languages are you proficient in?') spanning across
# many columns - with multiplt Unnamed columns following the column with question. This method concatenates the response
# across columns in one and drops the Unnamed coumns
# one dataset (2015) contains an empty top row
def clean_unnamed(df):
'''
Combine mutiple Unnamed columns corresponding to same question. Also remove any empty top rows in dataframe
INPUT
df - a dataframe to clean with Unnamed columns
OUTPUT
clean_df - a dataframe with no Unnamed columns
'''
#Check if 1st row needs to be dropped
all_unnamed = True
for col in df.columns[:3]:
if col.lower().find('unnamed:') != 0:
all_unnamed = False
if all_unnamed:
df.columns = df.iloc[0]
df = df.drop(df.index[0])
i = 0
cols_to_combine = []
potential_cols_to_combine = []
for col in df.columns:
if col.lower().find('unnamed:') == 0:
# i == 0 is the first column - ignore it
if i > 0:
potential_cols_to_combine.append(col)
else:
if len(potential_cols_to_combine) > 1 :
cols_to_combine.append(potential_cols_to_combine)
potential_cols_to_combine = [col]
i += 1
clean_df = df
for col_set in cols_to_combine:
i = 0
for col in col_set:
clean_df[col] = clean_df[col].fillna('')
if i == 0:
temp_col = clean_df[col]
first_col_name = col
other_cols = []
else:
temp_col = temp_col + ";" + clean_df[col]
other_cols.append(col)
i += 1
temp_col = temp_col.str.replace('[;]+', ';')
temp_col = temp_col.str.replace('^; ', '')
temp_col = temp_col.str.replace('; $', '')
clean_df[first_col_name] = temp_col
clean_df = clean_df.drop(other_cols, axis=1)
return clean_df
#Combine columns - for some years data for one question has been collected in multiple columns
#(e.g. year 2015)
def concatenate_cols(df, col_name):
'''
Combine mutiple columns corresponding to same question.
INPUT
df - a dataframe with columns to combine
col_name - partial name of column to combine
OUTPUT
clean_df - a dataframe with combined column
'''
cols_to_combine = []
for col in df.columns:
if col.lower().find(col_name.lower()) == 0:
cols_to_combine.append(col)
clean_df = df
i = 0
for col in cols_to_combine:
clean_df[col] = clean_df[col].fillna('')
if i == 0:
temp_col = clean_df[col]
else:
temp_col = temp_col + ";" + clean_df[col]
i += 1
temp_col = temp_col.str.replace('[;]+', ';')
temp_col = temp_col.str.replace('^; ', '')
temp_col = temp_col.str.replace('; $', '')
clean_df[col_name] = temp_col
clean_df = clean_df.drop(cols_to_combine, axis=1)
return clean_df
# +
# for year 2014, there is no job satisfaction
def get_common_job_sat_values (col_looking_for_job, col_visit_board, col_career_profile):
"""
Create job satisfaction values along with percentage of users from three columns - Looking for Job,
Visit Job borad frequency, Have a job profile
Inputs:
col_looking_for_job - col with information of user looking for job or not
col_visit_board
"""
all_values = []
i = 0
for looking_for_job in col_looking_for_job:
looking_for_job = looking_for_job.strip().lower()
visit_board = col_visit_board[i].strip().lower()
career_profile = col_career_profile[i].strip().lower()
i += 1
if looking_for_job == '':
val = ''
else:
val = 'neither satisfied nor dissatisfied'
if looking_for_job == 'no':
if visit_board == 'never' and career_profile == 'no thank you':
val = 'extremely satisfied'
elif visit_board == 'never' or career_profile == 'no thank you':
val = 'slightly satisfied'
else:
if visit_board == 'daily' and career_profile == 'yes':
val = 'extremely dissatisfied'
elif visit_board == 'daily' or career_profile == 'yes':
val = 'slightly dissatisfied'
if val != '':
all_values.append(val)
count_dict = collections.Counter(all_values)
most_common = count_dict.most_common()
most_common_as_percent = [(lang, round((count * 100/len(all_values)), 2)) for lang, count in most_common]
return most_common_as_percent
#create data frames for satisfaction
def create_sat_data_frame(percent, year):
"""
Create data frame from array of tupples and year
inputs:
percent - array of tupples with Name and percent values
year - year of the values
"""
sat_pd = pd.DataFrame(percent)
sat_pd = sat_pd.transpose()
sat_pd.columns = sat_pd.iloc[0]
sat_pd = sat_pd.drop(sat_pd.index[0])
sat_pd.insert(0, 'year', [year])
return sat_pd
# +
# get top values
def get_top_values(col, top_n=10):
"""
Returns array of tupples for top options and their percentage in a data frame column with multiple options seperated by ';'
inputs:
col - column with mutiple selection data.
top_n - top n values to return along with percentage. Defaults to 10.
"""
# as langauges were merged with framework, technology questiong in some years - ignoring them
ignore_arr = ['android', 'angularjs', 'cloud (aws, gae, azure, etc.)', 'none', 'others', \
'mongodb', 'node.js', 'reactjs', 'redis', 'wordpress', 'arduino / raspberry pi', \
'jquery', 'lamp', 'ios', 'sql server', 'response', 'other (please specify)', \
"i don't have a job", \
'i wish i had a job!']
replace_dict = {'bash':'bash/shell/powershell',
'bash/shell':'bash/shell/powershell',
'c++11':'c++',
'css':'html/css',
'html':'html/css',
'html5':'html/css',
'objective c':'objective-c',
'visual basic':'vb',
'visual basic 6':'vb',
'so happy it hurts':'extremely satisfied',
'i enjoy going to work':'slightly satisfied',
'it pays the bills':'neither satisfied nor dissatisfied',
"i'm not happy in my job":'slightly dissatisfied',
'fml':'extremely dissatisfied',
'moderately satisfied':'slightly satisfied',
'moderately dissatisfied':'slightly dissatisfied',
'0.0':'extremely dissatisfied',
'1.0':'extremely dissatisfied',
'2.0':'slightly dissatisfied',
'3.0':'slightly dissatisfied',
'4.0':'neither satisfied nor dissatisfied',
'5.0':'neither satisfied nor dissatisfied',
'6.0':'neither satisfied nor dissatisfied',
'7.0':'slightly satisfied',
'8.0':'slightly satisfied',
'9.0':'extremely satisfied',
'10.0':'extremely satisfied',
'i hate my job':'extremely dissatisfied',
'hate my job':'extremely dissatisfied',
'very dissatisfied':'extremely dissatisfied',
"i'm somewhat dissatisfied with my job":'slightly dissatisfied',
"i'm neither satisfied nor dissatisfied":'neither satisfied nor dissatisfied',
"i'm neither satisfied nor dissatisfied with my job":\
'neither satisfied nor dissatisfied',
'its a paycheck':'neither satisfied nor dissatisfied',
"it's a paycheck":'neither satisfied nor dissatisfied',
"i'm somewhat satisfied with my job":'slightly satisfied',
'i love my job':'extremely satisfied',
'love my job':'extremely satisfied',
'very satisfied':'extremely satisfied'
}
all_values = []
non_blank_values = 0
for val in col:
arr_to_extend = []
if isinstance(val, float):
val = str(val)
for val_split in val.split(';'):
val_append = val_split.strip().lower()
if val_append != '' and val_append not in ignore_arr:
if val_append in replace_dict:
val_append = replace_dict[val_append]
if val_append not in arr_to_extend:
arr_to_extend.append(val_append)
if len(arr_to_extend) > 0:
non_blank_values += 1
all_values.extend(arr_to_extend)
count_dict = collections.Counter(all_values)
most_common = count_dict.most_common(top_n)
most_common_as_percent = [(lang, round((count * 100)/non_blank_values, 2)) \
for lang, count in most_common]
return most_common_as_percent
# get top values
def get_top_values_without_ignore_or_replace(col, top_n=10):
"""
Returns array of tupples for top options and their percentage in a data frame column with multiple options seperated by ';'
inputs:
col - column with mutiple selection data.
top_n - top n values to return along with percentage. Defaults to 10.
"""
all_values = []
non_blank_values = 0
for val in col:
arr_to_extend = []
if isinstance(val, float):
val = str(val)
for val_split in val.split(';'):
val_append = val_split.strip()
if val_append != '':
if val_append not in arr_to_extend:
arr_to_extend.append(val_append)
if len(arr_to_extend) > 0:
non_blank_values += 1
all_values.extend(arr_to_extend)
count_dict = collections.Counter(all_values)
most_common = count_dict.most_common(top_n)
most_common_as_percent = [(val, round((count * 100)/non_blank_values, 2)) for val, count in most_common]
return most_common_as_percent
# +
def plot_values(df, col_to_plot, col_to_count, title='', xlabel='', ylabel='', width=6.4, height=4.8):
"""
Plot a bar chart for unique values in a data frame column.
inputs:
df - Data frame
col_to_plot - Name of data frame column to plot
col_to_count - Name of column to use for counting
title - Title of Plot. Defaults to ''
xlabel - x axis label. Defaults to ''
ylabel - y axis label. Defaults to ''
width - width of plot. Defaults to 6.4
height - height of plot. Defaults to 4.8
"""
series_to_plot = df.groupby([col_to_plot])[col_to_count].count()*100/df.shape[0]
series_to_plot.sort_values(inplace=True)
ax = series_to_plot.plot.barh(figsize=(width, height))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
for i in ax.patches:
# get_width pulls left or right; get_y pushes up or down
ax.text(i.get_width()+.3, i.get_y()+.15, str(round(i.get_width(), 2))+'%', fontsize=10, color='dimgrey')
plt.show()
def plot_top_values(top_values, title='', xlabel='', ylabel='', width=6.4, height=4.8):
"""
Plot a bar chart from an array of tupples.
inputs:
top_values - array of tupples to plot
title - Title of Plot. Defaults to ''
xlabel - x axis label. Defaults to ''
ylabel - y axis label. Defaults to ''
width - width of plot. Defaults to 6.4
height - height of plot. Defaults to 4.8
"""
top_pd = pd.DataFrame(top_values)
top_pd = top_pd.sort_values(by=[1])
ax = top_pd.plot.barh(x=0, y=1, figsize=(width, height))
ax.legend().set_visible(False)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
for i in ax.patches:
# get_width pulls left or right; get_y pushes up or down
ax.text(i.get_width()+.3, i.get_y()+.15, str(round(i.get_width(), 2))+'%', fontsize=10, color='dimgrey')
plt.show()
# -
# ## Data Understanding
#
# Below we try to get basic understanding of the data
df_survey_results_11.head()
df_survey_results_11.describe()
df_survey_results_12.head()
df_survey_results_12.describe()
df_survey_results_13.head()
df_survey_results_13.describe()
df_survey_results_14.head()
df_survey_results_14.describe()
df_survey_results_15.head()
df_survey_results_15.describe()
df_survey_results_16.head()
df_survey_results_16.describe()
df_survey_results_17.head()
df_survey_results_17.describe()
df_survey_results_18.head()
df_survey_results_18.describe()
df_survey_results_19.head()
df_survey_results_19.describe()
# ### Data Cleaning
#
# Removing Unnamed columns and rows.
# +
df_survey_results_11 = clean_unnamed(df_survey_results_11)
df_survey_results_12 = clean_unnamed(df_survey_results_12)
df_survey_results_13 = clean_unnamed(df_survey_results_13)
df_survey_results_14 = clean_unnamed(df_survey_results_14)
df_survey_results_15 = clean_unnamed(df_survey_results_15)
#Concatenate results from multiple columns in one
df_survey_results_15 = concatenate_cols(df_survey_results_15, 'Current Lang & Tech:')
# -
# ## Prepare Data
#
# Read in the relevant columns required for analysis. Filling NaNs with blanks where requried.
# +
lang_col_11 = df_survey_results_11['Which languages are you proficient in?']
lang_col_12 = df_survey_results_12['Which languages are you proficient in?']
lang_col_13 = df_survey_results_13['Which of the following languages or technologies have you used significantly in the past year?']
lang_col_14 = df_survey_results_14['Which of the following languages or technologies have you used significantly in the past year?']
lang_col_15 = df_survey_results_15['Current Lang & Tech:']
# Filling NaN with blanks
lang_col_16 = df_survey_results_16['tech_do'].fillna('')
lang_col_17 = df_survey_results_17['HaveWorkedLanguage'].fillna('')
lang_col_18 = df_survey_results_18['LanguageWorkedWith'].fillna('')
lang_col_19 = df_survey_results_19['LanguageWorkedWith'].fillna('')
# -
#Create clean satisfaction cols to work with
sat_col_11 = df_survey_results_11['Please rate your job/career satisfaction'].fillna('')
sat_col_12 = df_survey_results_12['What best describes your career / job satisfaction? '].fillna('')
sat_col_13 = df_survey_results_13['What best describes your career / job satisfaction?'].fillna('')
looking_job_col_14 = df_survey_results_14['Are you currently looking for a job or open to new opportunities?'].fillna('')
visit_job_board_14 = df_survey_results_14['How often do you visit job boards?'].fillna('')
stack_job_profile_14 = df_survey_results_14['Do you have a Stack Overflow Careers 2.0 Profile?'].fillna('')
sat_col_15 = df_survey_results_15['Job Satisfaction'].fillna('')
sat_col_16 = df_survey_results_16['job_satisfaction'].fillna('')
sat_col_17 = df_survey_results_17['JobSatisfaction'].fillna('')
sat_col_18 = df_survey_results_18['JobSatisfaction'].fillna('')
sat_col_19 = df_survey_results_19['JobSat'].fillna('')
lang_worked_with_19 = df_survey_results_19['LanguageWorkedWith']
lang_desired_to_work_19 = df_survey_results_19['LanguageDesireNextYear']
database_worked_with_19 = df_survey_results_19['DatabaseWorkedWith']
database_desired_to_work_19 = df_survey_results_19['DatabaseDesireNextYear']
platform_worked_with_19 = df_survey_results_19['PlatformWorkedWith']
platform_desired_to_work_19 = df_survey_results_19['PlatformDesireNextYear']
webframe_worked_with_19 = df_survey_results_19['WebFrameWorkedWith']
webframe_desired_to_work_19 = df_survey_results_19['WebFrameDesireNextYear']
# ## Data Modeling
#
# Below we get the top languages across the years along with percentage of users using the language. We get the percentage of users with various levels of job satifaction. We also get top languages, database environments, platforms and web frameworks.
top_langs_11 = get_top_values(lang_col_11)
top_langs_12 = get_top_values(lang_col_12)
top_langs_13 = get_top_values(lang_col_13)
top_langs_14 = get_top_values(lang_col_14)
top_langs_15 = get_top_values(lang_col_15)
top_langs_16 = get_top_values(lang_col_16)
top_langs_17 = get_top_values(lang_col_17)
top_langs_18 = get_top_values(lang_col_18)
top_langs_19 = get_top_values(lang_col_19)
sat_percent_11 = get_top_values(sat_col_11)
sat_percent_12 = get_top_values(sat_col_12)
sat_percent_13 = get_top_values(sat_col_13)
sat_percent_14 = get_common_job_sat_values(looking_job_col_14, visit_job_board_14, stack_job_profile_14)
sat_percent_15 = get_top_values(sat_col_15)
sat_percent_16 = get_top_values(sat_col_16)
sat_percent_17 = get_top_values(sat_col_17)
sat_percent_18 = get_top_values(sat_col_18)
sat_percent_19 = get_top_values(sat_col_19)
pd_sat_11 = create_sat_data_frame(sat_percent_11, 2011)
pd_sat_12 = create_sat_data_frame(sat_percent_12, 2012)
pd_sat_13 = create_sat_data_frame(sat_percent_13, 2013)
pd_sat_14 = create_sat_data_frame(sat_percent_14, 2014)
pd_sat_15 = create_sat_data_frame(sat_percent_15, 2015)
pd_sat_16 = create_sat_data_frame(sat_percent_16, 2016)
pd_sat_17 = create_sat_data_frame(sat_percent_17, 2017)
pd_sat_18 = create_sat_data_frame(sat_percent_18, 2018)
pd_sat_19 = create_sat_data_frame(sat_percent_19, 2019)
# +
combined_sat_data = pd.concat([pd_sat_11, pd_sat_12, pd_sat_13, pd_sat_14, pd_sat_15, pd_sat_16, pd_sat_17, \
pd_sat_18, pd_sat_19], ignore_index=True, sort=False)
cols = ['year', 'extremely dissatisfied', 'slightly dissatisfied', 'neither satisfied nor dissatisfied', \
'slightly satisfied', 'extremely satisfied']
combined_sat_data = combined_sat_data[cols]
# -
top_lang_worked_with_19 = get_top_values_without_ignore_or_replace(lang_worked_with_19, top_n=1000)
top_lang_desired_to_work_19 = get_top_values_without_ignore_or_replace(lang_desired_to_work_19, top_n=1000)
top_database_worked_with_19 = get_top_values_without_ignore_or_replace(database_worked_with_19, top_n=1000)
top_database_desired_to_work_19 = get_top_values_without_ignore_or_replace(database_desired_to_work_19, top_n=1000)
top_platform_worked_with_19 = get_top_values_without_ignore_or_replace(platform_worked_with_19, top_n=1000)
top_platform_desired_to_work_19 = get_top_values_without_ignore_or_replace(platform_desired_to_work_19, top_n=1000)
top_webframe_worked_with_19 = get_top_values_without_ignore_or_replace(webframe_worked_with_19, top_n=1000)
top_webframe_desired_to_work_19 = get_top_values_without_ignore_or_replace(webframe_desired_to_work_19, top_n=1000)
plot_top_values(top_langs_11, 'Top Langauges of 2011', 'Percentage of Respondents')
plot_top_values(top_langs_12, 'Top Langauges of 2012', 'Percentage of Respondents')
plot_top_values(top_langs_13, 'Top Langauges of 2013', 'Percentage of Respondents')
plot_top_values(top_langs_14, 'Top Langauges of 2014', 'Percentage of Respondents')
plot_top_values(top_langs_15, 'Top Langauges of 2015', 'Percentage of Respondents')
plot_top_values(top_langs_16, 'Top Langauges of 2016', 'Percentage of Respondents')
plot_top_values(top_langs_17, 'Top Langauges of 2017', 'Percentage of Respondents')
plot_top_values(top_langs_18, 'Top Langauges of 2018', 'Percentage of Respondents')
plot_top_values(top_langs_19, 'Top Langauges of 2019', 'Percentage of Respondents')
# # Identifying Job Satisfaction trend over the years
# +
ax = combined_data.plot(kind='bar', stacked=True, x='year', figsize=(10,6), legend=False)
#handles, labels = ax.get_legend_handles_labels()
#ax.legend(reversed(handles), reversed(labels), title='Line', loc='upper left')
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.show()
| .ipynb_checkpoints/Stack_Overflow_Survey_Analysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Data exploration
#
# To start with, let us load the dataframe, summarize the columns, and plot a sactter matrix of the data to check for e.g. missing values, non-linear scaling, etc..
import pandas as pd
# +
# Sample code number: id number
# Clump Thickness: 1 - 10
# 3. Uniformity of Cell Size: 1 - 10
# 4. Uniformity of Cell Shape: 1 - 10
# 5. Marginal Adhesion: 1 - 10
# 6. Single Epithelial Cell Size: 1 - 10
# 7. Bare Nuclei: 1 - 10
# 8. Bland Chromatin: 1 - 10
# 9. Normal Nucleoli: 1 - 10
# 10. Mitoses: 1 - 10
# 11. Class: (2 for benign, 4 for malignant)
names = ['sampleid', 'clumpthickness', 'sizeuniformity', 'shapeunformity',
'adhesion', 'epithelialsize', 'barenuclei', 'blandchromatin', 'normalnucleoli',
'mitoses', 'cellclass']
df = pd.read_csv('./breast-cancer-wisconsin.data', names=names)
# df.drop('sampleid')
df.drop('sampleid', axis=1, inplace=True)
df.head(10)
df.cellclass = (df.cellclass == 4).astype(int)
# It turns out one column is a string, but should be an int...
df.barenuclei = df.barenuclei.values.astype(int)
# -
df.describe()
# Check the class balance. Turns out to be pretty good so we should have a relatively unbiased view
print 'Num Benign', (df.cellclass==2).sum(), 'Num Malignant', (df.cellclass==4).sum()
# # Scatter matrix.
#
# None of the features appear to require rescaling transformations e.g. on a log-scales...
from pandas.tools.plotting import scatter_matrix
_ = scatter_matrix(df, figsize=(14,14), alpha=.4)
# # Constructing a logistic regression classifier
# Intriguingly, the logistic
# +
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
from sklearn import svm
LR = LogisticRegression(penalty='l1', dual=False, tol=0.0001, C=1,
fit_intercept=True, intercept_scaling=1,
class_weight=None, random_state=None,
solver='liblinear', max_iter=100,
multi_class='ovr', verbose=1,
warm_start=False, n_jobs=1)
X, Y = df.astype(np.float32).get_values()[:,:-1], df.get_values()[:,-1]
X2 = np.append(X,X**2, axis=1)
print X2.shape
LR.fit(X, Y)
print LR.score(X,Y)
C_list = np.logspace(-1, 2, 15)
CV_scores = []
CV_scores2 = []
for c in C_list:
LR = LogisticRegression(penalty='l1', dual=False, tol=0.0001, C=c,
fit_intercept=True, intercept_scaling=1,
class_weight=None, random_state=None,
solver='liblinear', max_iter=100,
multi_class='ovr', verbose=1,
warm_start=False, n_jobs=1)
CV_scores.append(np.average(cross_validation.cross_val_score(LR, X, Y, cv=6, n_jobs=12)))
svm_class = svm.SVC(C=c, kernel='linear', gamma='auto', coef0=0.0,
shrinking=True, probability=False, tol=0.001, cache_size=200,
class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None)
CV_scores2.append(np.average(cross_validation.cross_val_score(svm_class, X, Y, cv=6, n_jobs=12)))
# +
plt.plot(C_list, CV_scores, marker='o', label='Logistic Regression L1 loss')
plt.plot(C_list, CV_scores2, marker='o', label='SVM-Linear')
plt.xscale('log')
plt.xlabel(r'C = 1/$\lambda$')
plt.legend(loc=4)
# +
from sklearn.metrics import confusion_matrix
LR = LogisticRegression(penalty='l1', dual=False, tol=0.0001, C=1e10,
fit_intercept=True, intercept_scaling=1,
class_weight=None, random_state=None,
solver='liblinear', max_iter=100,
multi_class='ovr', verbose=1,
warm_start=False, n_jobs=1)
LR.fit(X[:300],Y[:300])
svm_class = svm.SVC(C=10., kernel='linear', gamma='auto', coef0=0.0,
shrinking=True, probability=True, tol=0.001, cache_size=200,
class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None)
svm_class.fit(X[:300],Y[:300])
# Confusion matrix
print
print 'Confusion Matrix - LASSO Regression'
print confusion_matrix(y_true=Y[300:], y_pred=LR.predict(X[300:]))
print 'Confusion Matrix - SVM-Linear'
print confusion_matrix(y_true=Y[300:], y_pred=svm_class.predict(X[300:]))
# -
# # Measuring precision/recall and ROC curves
# +
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
plt.figure(figsize=(7,2))
plt.subplot(121)
prec, rec, thresh = precision_recall_curve(y_true=Y[300:], probas_pred=LR.predict_proba(X[300:])[:,1])
plt.plot(rec, prec,)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim(0,1)
plt.ylim(0,1)
plt.subplot(122)
fp, tp, thresh = roc_curve(y_true=Y[300:], y_score=LR.predict_proba(X[300:])[:,1])
AUC = roc_auc_score(y_true=Y[300:], y_score=LR.predict_proba(X[300:])[:,1])
roc_curve(y_true=Y[300:], y_score=LR.predict_proba(X[300:])[:,1])
plt.text(.05, .05, 'AUC=%1.3f'%AUC)
plt.plot(fp, tp, linewidth=2)
plt.xlabel('False Positives')
plt.ylabel('True Positives')
# -
| notebooks/data_challenge/Data Summaries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from scipy.stats import matrix_normal, wishart
from statsmodels.stats.moment_helpers import cov2corr
from data_import import import_data_svenson
from simulation import generate_B, generate_E, generate_pheno
# %load_ext autoreload
# %autoreload 2
# -
# import data
geno_df, pheno_df = import_data_svenson()
kinship_df = geno_df.cov()
kinship = kinship_df.to_numpy()
kinship.shape
cov2corr(kinship)
matrix_normal.rvs(rowcov=np.eye(10), colcov=np.eye(5))
wishart.rvs(df=5, scale=np.eye(5)).shape
generate_pheno(kinship, hsquared=0.8, N=187).shape
# +
#geno_df = pd.read_csv('svenson_normalized_genotype.csv')
# -
geno_df.shape
kinship_df = geno_df.cov()
kinship = kinship_df.to_numpy()
kinship.shape
for hsquared in np.linspace(0.05, 0.95, 11):
print(hsquared)
pheno_mat = generate_pheno(kinship, hsquared, N=kinship.shape[0])
print(pheno_mat)
np.savetxt('simulated_hsquared_{:.2f}_.csv'.format(hsquared), pheno_mat, delimiter=',')
| simulation_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3.3 Lexical Texts and their Relation to Literary Vocabulary
#
# In section [3.2](./3_2_Lit_Lex.ipynb) we asked whether we can see differences between Old Babylonian literary compositions in their usage of vocabulary (lemmas and MWEs) attested in the lexical corpus. In this notebook we will change perspective and ask: are there particular lexical texts (or groups of lexical texts) that show a greater engagement with literary vocabulary than others?
#
# In [3.1](./3_1_Lit_Lex_Vocab.ipynb) and [3.2](./3_2_Lit_Lex.ipynb) we used Multiple Word Expressions, connecting words that are found in a lexical entry by underscores (using `MWEtokenizer()` from the `nltk` module). The lemmas and MWE were visualized in Venn diagrams to illustrate the intersection between lexical and literary vocabulary.
#
# In this notebook we will use the ngram option of the `CountVectorizer()` function in order to find sequences of lemmas that are shared between lexical and literary texts. A ngram is a continuous sequence of *n* words (or lemmas).
#
# In part, this notebook uses the same techniques and the same code as notebook [3.2](./3_2_Lit_Lex.ipynb), and the reader is referred there for further explanation.
# ## 3.3.0 Preparation
# We import the necessary modules and open files that were produced in earlier notebooks.
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning) # this suppresses a warning about pandas from tqdm
import pandas as pd
from ipywidgets import interact
from sklearn.feature_extraction.text import CountVectorizer
from tqdm.auto import tqdm
import zipfile
import json
# Open the file `lexlines.p` which was produced in [3_1_Lit_Lex_Vocab.ipynb](./3_1_Lit_Lex_Vocab.ipynb). The file contains the pickled version of the DataFrame `lex_lines` in which the lexical ([dcclt](http://oracc.org/dcclt)) corpus is represented in line-by-line format.
lex_lines = pd.read_pickle('output/lexlines.p')
# ### 3.3.0.1 Special Case: OB Nippur Ura 6
# The sixth chapter of the Old Babylonian Nippur version of the thematic list Ura deals with foodstuffs and drinks. This chapter was not standardized (each exemplar has its own order of items and sections) and therefore no composite text has been created in [DCCLT](http://oracc.org/dcclt). Instead, the "composite" of [OB Nippur Ura 6](http://oracc.org/dcclt/Q000043) consists of the concatenation of all known Nippur exemplars of the list of foodstuffs. In our current dataframe, therefore, there are no lines where the field `id_text` equals "Q000043".
#
# We create a "composite" by changing the field `id_text` in all exemplars of [OB Nippur Ura 6](http://oracc.org/dcclt/Q000043) to "Q000043".
Ura6 = ["dcclt/P227657",
"dcclt/P227743",
"dcclt/P227791",
"dcclt/P227799",
"dcclt/P227925",
"dcclt/P227927",
"dcclt/P227958",
"dcclt/P227967",
"dcclt/P227979",
"dcclt/P228005",
"dcclt/P228008",
"dcclt/P228200",
"dcclt/P228359",
"dcclt/P228368",
"dcclt/P228488",
"dcclt/P228553",
"dcclt/P228562",
"dcclt/P228663",
"dcclt/P228726",
"dcclt/P228831",
"dcclt/P228928",
"dcclt/P229015",
"dcclt/P229093",
"dcclt/P229119",
"dcclt/P229304",
"dcclt/P229332",
"dcclt/P229350",
"dcclt/P229351",
"dcclt/P229352",
"dcclt/P229353",
"dcclt/P229354",
"dcclt/P229356",
"dcclt/P229357",
"dcclt/P229358",
"dcclt/P229359",
"dcclt/P229360",
"dcclt/P229361",
"dcclt/P229362",
"dcclt/P229365",
"dcclt/P229366",
"dcclt/P229367",
"dcclt/P229890",
"dcclt/P229925",
"dcclt/P230066",
"dcclt/P230208",
"dcclt/P230230",
"dcclt/P230530",
"dcclt/P230586",
"dcclt/P231095",
"dcclt/P231128",
"dcclt/P231424",
"dcclt/P231446",
"dcclt/P231453",
"dcclt/P231458",
"dcclt/P231742",
"dcclt/P266520"]
lex_lines.loc[lex_lines["id_text"].isin(Ura6), "id_text"] = "dcclt/Q000043"
# ### 3.3.0.2 Open Shared Vocabulary List
# The file `lit_lex_vocab` is a list that includes all lemmas and Multiple Word Expressions that are shared by the literary corpus and the lexical corpus. This list was produced in [3_2_Lit_Lex.ipynb](./3_2_Lit_Lex.ipynb). In sections [3.1](./3_1_Lit_Lex_Vocab.ipynb) and [3.2](./3_2_Lit_Lex.ipynb) lexical *entries* were turned into MWEs by connecting the individual lemmas by underscores (as in `amar\[young\]n_ga\[milk\]n_gu\[eat\]v/t`). In this notebook we will take a different approach by using ngrams (sequences of words or lemmas). For that reason we need to replace all underscores by spaces.
#
# This vocabulary is used in the next section for building a Document Term Matrix.
with open('output/lit_lex_vocab.txt', 'r', encoding = 'utf8') as l:
lit_lex_vocab = l.read().splitlines()
lit_lex_vocab = [v.replace('_', ' ') for v in lit_lex_vocab]
lit_lex_vocab[:25]
# ## 3.3.1 Document Term Matrix: *ngrams*
#
# The lexical corpus is transformed into a Document Term Matrix (or DTM), in the same way we did in [3.2](./3_2_Lit_Lex.ipynb) for the literary corpus - but with some important differences.
#
# First, the parameter `ngram_range` is set to (1, 5). With this parameter, `Countvectorizer()` will create a column for each word (ngram n=1), but also for each sequence of two words (bigram; n=2), or three words (trigram; n=3), etc.
#
# Potentially, this results in a very big (and very sparse) matrix. In order to limit its size somewhat we use the vocabulary `lit_lex_vocab` which contains all lemmas and lexical entries shared by the lexical and literary corpora. These are the relevant vocabulary items that we wish to explore.
#
# Second, instead of creating a DTM for lexical *documents* we will use `CountVectorizer()` on the lexical corpus in *line* format, rather than in document format. This is important, because we do not want the ngrams to jump over line boundaries. The resulting DTM, therefore, is more properly called a Line Term Matrix, providing frequencies of terms (and ngrams) for each line in the lexical corpus. In the next step we group the data by text ID and aggregate the line-based frequencies to create a proper DTM. The `aggregate()` function, in this case, is `sum`: for every word or ngram we need the summation of the frequencies of all the lines of each lexical composition.
#
# `Countvectorizer()` is used here on the raw data in `lex_lines`, including unlemmatized words. By including the unlemmatized words, we prevent creating articifial ngrams that consist of one term before and one term after an illegible word. Thus, the lemma sequence **dumu\[child\]n x\[na\]na lugal\[king\]n** will *not* match the bigram **dumu\[child\]n lugal\[king\]n**. Since `lit_lex_vocab` has no entries that contain **\[na\]na**, meaningless ngrams such as **dumu\[child\]n x\[na\]na** are filtered out automatically.
cv = CountVectorizer(preprocessor = lambda x: x, tokenizer = lambda x: x.split(), vocabulary = lit_lex_vocab, ngram_range=(1, 5))
dtm = cv.fit_transform(lex_lines['lemma'])
lex_lines_dtm = pd.DataFrame(dtm.toarray(), columns= cv.get_feature_names(), index=lex_lines["id_text"])
lex_comp_dtm = lex_lines_dtm.groupby('id_text').agg(sum).reset_index()
# ## 3.3.2 Compute Number of Matches
# The field `n_matches` represents the number of unique words or ngrams that a lexical document shares with the literary corpus. For the code see [3.2](./3_2_Lit_Lex.ipynb) section 3.2.2.
lex_comp_dtm["n_matches"] = lex_comp_dtm[lit_lex_vocab].astype(bool).sum(axis = 1)
lex_comp_dtm
# ## 3.3.3 Document Length
# The number of matches is meaningless without a measure of document length. Length is defined here as the number of lemmatized words in a document. We cannot use the DTM for measuring length, because it includes ngrams and excludes words not found in the literary corpus. We therefore must go back to the raw data set in `lex_lines`, group lines to documents and omit non-lemmatized words from the count.
lex_comp = lex_lines.groupby(
[lex_lines["id_text"]]).aggregate(
{"lemma": ' '.join}).reset_index()
def lex_length(lemmas):
lemmas = lemmas.split()
lemmas = [lemma for lemma in lemmas if not '[na]na' in lemma] # remove unlemmatized words
length = len(lemmas)
return length
lex_comp['length'] = lex_comp['lemma'].map(lex_length)
# ## 3.3.3 Remove Duplicates and Empty Documents
# Since the lexical data are drawn from multiple (sub)projects, it is possible that there are duplicate documents. Duplicates have the same P, Q, or X number. We select the version with the largest number of (lemmatized) words and drop others.
#
# First we add the field `length` from the DataFrame `lex_comp` to the DataFrame `lex_comp_dtm` by merging on the field `id_text`. The merge method is `inner` (only merging those rows that are available in both DataFrames) so that documents that were omitted from `lex_comp` (because of length zero) do not show up again. Second, the field `id_text`, which has the format `dcclt/Q000041` or `dcclt/signlists/P447992`, is reduced to only the last 7 positions (P, Q, or X, followed by six digits). The merged DataFrame is ordered by length (from large to small) and, if duplicate `text_id`s are found, only the first one is kept with the Pandas method `drop_duplicates()`.
#
# Our data set has data from all Old Babylonian lexical documents currently in [DCCLT](http://oracc.org/dcclt). Not all of these documents are lemmatized. In particular, exemplars that have been linked to a composite text are usually not lemmatized. Such documents have no lemmatized contents and therefore have length 0. These documents are removed.
lex_comp_dtm = pd.merge(lex_comp_dtm, lex_comp[['id_text', 'length']], on = 'id_text', how = 'inner')
lex_comp_dtm['id_text'] = lex_comp_dtm['id_text'].str[-7:]
lex_comp_dtm = lex_comp_dtm.sort_values(by = 'length', ascending=False)
lex_comp_dtm = lex_comp_dtm.drop_duplicates(subset = 'id_text', keep = 'first')
lex_comp_dtm = lex_comp_dtm.loc[lex_comp_dtm['length'] > 0] # remove compositions that have no lemmatized content
# ## 3.3.4 Adding Metadata and Normalizing
# The metadata of the lexical texts (such as composition name, etc.) is found in the JSON files for each of the (sub)projects downloaded in section [3.1](./3_1_Lit_Lex_Vocab.ipynb). The code is essentially the same as in [3.2](./3_2_Lit_Lex.ipynb), but since there are multiple (sub)projects involved, it is done in a loop.
cat = {}
for proj in ['dcclt', 'dcclt/signlists', 'dcclt/nineveh', 'dcclt/ebla']:
f = proj.replace('/', '-')
file = f"jsonzip/{f}.zip" # The ZIP file was downloaded in notebook 3_1
z = zipfile.ZipFile(file)
st = z.read(f"{proj}/catalogue.json").decode("utf-8")
j = (json.loads(st))
cat.update(j["members"])
cat_df = pd.DataFrame(cat).T
cat_df["id_text"] = cat_df["id_text"].fillna(cat_df["id_composite"])
cat_df = cat_df.fillna('')
cat_df = cat_df[["id_text", "designation", "subgenre"]]
# ### 3.3.4.1 Merge Metadata
# Now merge `cat_df` with the DataFrame `lex_comp_dtm` on the field `id_text`. Of the DTM we only keep the fields `n_matches` and `length`. The resulting DataFrame contains descriptive information about lexical documents, plus the field `n_matches`, which is relevant only for the current exploration. The DataFrame is saved, minus the field `n_matches` for use in section 3.4.
#
# The DataFrame is shown in descending order of the number of matches.
lex = pd.merge(cat_df, lex_comp_dtm[['id_text', 'n_matches', 'length']], on = 'id_text', how = 'inner')
lex.drop('n_matches', axis = 1).to_pickle('output/lexdtm.p')
lex.sort_values(by='n_matches', ascending = False)
# ### 3.3.4.2 Normalizing
# Long lexical documents have more matches than short one. Normalize by dividing the number of matches (`n_matches`) by text length. For very short documents this measure has little value; only longer documents are displayed. Since the number of matches is based on *ngrams*, it is possible that `n_matches` is larger than `length` and that `norm` is higher than 1. This only happens in very short documents.
lex['norm'] = lex['n_matches'] / lex['length']
lex = lex.sort_values(by = 'norm', ascending = False)
lex.loc[lex.length > 250]
# ## 3.3.5 Explore the Results
# Explore the results in an interactive table. The slides, the check box, and the pull-down menus allow larger or smaller number of results, higher or lower threshold for text length, including only composites, only exemplars, or all, etc. The text ID numbers in the first column link to their editions in [DCCLT](http://oracc.org/dcclt).
lex.to_pickle('output/lex.p')
anchor = '<a href="http://oracc.org/dcclt/{}", target="_blank">{}</a>'
lex2 = lex.copy()
lex2['id_text'] = [anchor.format(val,val) for val in lex['id_text']]
lex2['PQ'] = ['Composite' if i[0] == 'Q' else 'Exemplar' for i in lex['id_text']]
@interact(sort_by = lex2.columns, rows = (1, len(lex2), 1), min_length = (0,500,5), show = ["Exemplars", "Composites", "All"])
def sort_df(sort_by = "norm", ascending = False, rows = 25, min_length = 250, show = 'All'):
if not show == 'All':
l = lex2.loc[lex2['PQ'] == show[:-1]]
else:
l = lex2
l = l.drop('PQ', axis = 1)
l = l.loc[l.length >= min_length].sort_values(by = sort_by, ascending = ascending).reset_index(drop=True)[:rows].style
return l
# ## 3.3.6 Discussion
# When ordered by `norm` the top of the list is formed by lexical compositions such as the sign lists [OB Nippur Ea](http://oracc.org/dcclt/Q000055) and [OB Nippur Diri](http://oracc.org/dcclt/Q000057), the acrographic list/list of professions [OB Nippur Lu](http://oracc.org/dcclt/Q000047), and the acrographic lists [OB Nippur Izi](http://oracc.org/dcclt/Q000050), and [OB Nippur Kagal](http://oracc.org/dcclt/Q000048), or (large) exemplars of such compositions. If we restrict the DataFrame to composites (Q numbers) only, this comes out even clearer. All these lexical texts belong to what Jay Crisostomo has labeled "ALE": Advanced Lexical Exercises ([Translation as Scholarship](https://doi.org/10.1515/9781501509810); SANER 22, 2019). These exercises belong to the advanced first stage of education, just before students would start copying literary texts. The thematic lists collected in [Ura](http://oracc.org/dcclt/Q000039,Q000040,Q000001,Q000041,Q000042,Q000043) (lists of trees, wooden objects, reeds, reed objects, clay, pottery, hides, metals and metal objects, animals, meat cuts, fish, birds, plants, etc.) have much lower `norm` values and thus less overlap with literary vocabulary. The lists that belong to [Ura](http://oracc.org/dcclt/Q000039,Q000040,Q000001,Q000041,Q000042,Q000043) are studied in a more elementary phase of scribal education and are further removed from the literary corpus, both in vocabulary and in curricular terms.
| _build/html/_sources/3_Vocabularies/3_3_Lex-Lit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R [conda env:.conda-r_4.0.3]
# language: R
# name: conda-env-.conda-r_4.0.3-r
# ---
.libPaths()
library("dplyr")
# library("cqn")
# library("devtools")
# library("SummarizedExperiment")
# library("rtracklayer")
# library("seqUtils")
getwd()
setwd('/mnt/DATA/bQTL_mapping_rerun')
# ## Subsample population genotype PCA
# Import genotype PCA
genotype_PCA = readr::read_delim("processed/PU1/qtltools/input/cqn/PU1.geno_pca.pca", delim = " ", col_types="cddddddddddddddddddddddddddddddddddddddddddddd") %>% as.data.frame()
# ## CEU subsample population genotype PCA
# +
# Import genotype PCA
sample_list = c("NA06985","NA06986","NA06994","NA07037","NA07048","NA07051","NA07056","NA07346","NA07357","NA10847","NA10851","NA11829","NA11830","NA11831","NA11832","NA11840","NA11881","NA11894","NA11918","NA11920","NA11931","NA11992","NA11993","NA11994","NA12005","NA12043","NA12154","NA12156","NA12234","NA12249","NA12275","NA12282","NA12286","NA12287","NA12383","NA12489","NA12750","NA12760","NA12761","NA12762","NA12763","NA12776","NA12812","NA12813","NA12814","NA12815","NA12873")
genotype_PCA = readr::read_delim("1KG_PCA/chr1_10_CEU_PCA.csv", delim = ",", col_types="cdddddddddd") %>% as.data.frame() %>% dplyr::filter(X1 %in% sample_list)
samples = genotype_PCA$X1
genotype_PCA = as.data.frame(t(genotype_PCA[,-1]))
colnames(genotype_PCA) = samples
genotype_PCA
# -
# ## Full population genotype PCA
# +
# Import genotype PCA
sample_list = c("NA06985","NA06986","NA06994","NA07037","NA07048","NA07051","NA07056","NA07346","NA07357","NA10847","NA10851","NA11829","NA11830","NA11831","NA11832","NA11840","NA11881","NA11894","NA11918","NA11920","NA11931","NA11992","NA11993","NA11994","NA12005","NA12043","NA12154","NA12156","NA12234","NA12249","NA12275","NA12282","NA12286","NA12287","NA12383","NA12489","NA12750","NA12760","NA12761","NA12762","NA12763","NA12776","NA12812","NA12813","NA12814","NA12815","NA12873")
genotype_PCA = readr::read_delim("1KG_PCA/chr1_50PCA.csv", delim = ",", col_types="cdddddddddd") %>% as.data.frame() %>% dplyr::filter(X1 %in% sample_list)
samples = genotype_PCA$X1
genotype_PCA = as.data.frame(t(genotype_PCA[,-1]))
colnames(genotype_PCA) = samples
genotype_PCA
# -
# ## Phenotype PCA
#Import phenotype PCA
phenotype_PCA = readr::read_delim("processed/PU1/qtltools/input/cqn/PU1.pheno_pca.pca", delim = " ") %>% as.data.frame() %>% tibble::as_tibble()
phenotype_PCA
#Filter genotype PCA to the same set of individuals
# genotype_PCA = genotype_PCA[colnames(phenotype_PCA),]
# genotype_PCA
phenotype_PCA[1:5,]
genotype_PCA[1:5,]
#Merge them together
covariates = bind_rows(phenotype_PCA[1:5,], genotype_PCA[1:5,])
covariates
output_path = "processed/PU1/qtltools/input/cqn/PU1.covariates_prop.txt"
write.table(covariates, output_path, sep = " ", quote = FALSE, row.names = FALSE)
| munge/PU1/merge_QTLtools_PCA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # G5 - PIL pro capite, reddito personale e spesa per consumi
# +
# Import librerie per analisi dati (Pandas) e dati Istat
import os
import pandas as pd
import numpy as np
from IPython.core.display import HTML
import istat
import jsonstat
# cache dir per velocizzare analisi in locale
cache_dir = os.path.abspath(os.path.join("..", "tmp/od_la_grande_fuga", "istat_cached"))
istat.cache_dir(cache_dir)
istat.lang(0) # lingua italiano
# -
dir_df = os.path.join(os.path.abspath(''),'stg')
#istat.areas()
istat_area_redditi = istat.area(17)
istat_area_redditi.datasets()
#istat_area_redditi.datasets()
ds_redditi_pro_capite = istat_area_redditi.dataset('DCCN_TNA')
# +
#ds_redditi_pro_capite.dimension('Tipo aggregato')
# -
#ds_redditi_pro_capite.dimensions()
ds_redditi_pro_capite
# +
# NORD
# Prodotto Interno Lordo ai prezzi di mercato per abitante -> OK
spec_n_pil = {
"Territorio":3,
"Tipo aggregato":27,
"Valutazione":9,
"Edizione":2223
}
# Reddito Disponibile per abitante -> OK
spec_n_red = {
"Territorio":3,
"Tipo aggregato":46,
"Valutazione":9,
"Edizione":2223
}
# Consumi finali interni per abitante -> OK
spec_n_cons = {
"Territorio":3,
"Tipo aggregato":34,
"Valutazione":9,
"Edizione":2223
}
# +
# SUD
# Prodotto Interno Lordo ai prezzi di mercato per abitante
spec_s_pil = {
"Territorio":89,
"Tipo aggregato":27,
"Valutazione":9,
"Edizione":2223
}
# Reddito Disponibile per abitante -> OK
spec_s_red = {
"Territorio":89,
"Tipo aggregato":46,
"Valutazione":9,
"Edizione":2223
}
# Consumi finali interni per abitante -> OK
spec_s_cons = {
"Territorio":89,
"Tipo aggregato":34,
"Valutazione":9,
"Edizione":2223
}
# -
def crea_df(ds,s,nome):
c = ds.getvalues(s)
ds = c.dataset(0)
df = ds.to_data_frame('Tempo e frequenza')
df.reset_index(level=0, inplace=True)
df = df[['Tempo e frequenza', 'Value']]
df.columns = ['Anno',nome]
return df
df_pil_nord = crea_df(ds_redditi_pro_capite,spec_n_pil,'PIL pro capite')
df_red_nord = crea_df(ds_redditi_pro_capite,spec_n_red,'Reddito Disponibile pro capite')
df_con_nord = crea_df(ds_redditi_pro_capite,spec_n_cons,'Consumi pro capite')
df_n1 = pd.merge(df_pil_nord,df_red_nord, how='inner', on=['Anno', 'Anno'])
df_nord = pd.merge(df_n1,df_con_nord, how='inner', on=['Anno', 'Anno'])
df_nord
df_pil_sud = crea_df(ds_redditi_pro_capite,spec_s_pil,'PIL pro capite')
df_red_sud = crea_df(ds_redditi_pro_capite,spec_s_red,'Reddito Disponibile pro capite')
df_con_sud = crea_df(ds_redditi_pro_capite,spec_s_cons,'Consumi pro capite')
df_s1 = pd.merge(df_pil_sud,df_red_sud, how='inner', on=['Anno', 'Anno'])
df_sud = pd.merge(df_s1,df_con_sud, how='inner', on=['Anno', 'Anno'])
df_sud
# +
df_filename = r'df_g5_nord.pkl'
df_fullpath = os.path.join(dir_df, df_filename)
df_nord.to_pickle(df_fullpath)
df_filename = r'df_g5_sud.pkl'
df_fullpath = os.path.join(dir_df, df_filename)
df_sud.to_pickle(df_fullpath)
| G5_data_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# You may need to run this cells at a specific order.
#
# If you have enough memory, you could do anything. But if you don't have enough memory, Please run as the following procedure, And clean the kernel.
# First run the following three cells. For conveniece, The following three cell will appear more than once, since it will be repeatedly run for several times.
perm= [[i, j, k, l] for i in range(4) for j in range(4) for k in range(4) for l in range(4) if (i-j)*(i-k)*(i-l)*(j-k)*(j-l)*(k-l)!=0]
all_possible_perms = []
all_possible_perms_one_side = []
for i in perm:
for j in perm:
equiv=""
mapping={}
for k in range(4):
for l in range(4):
mapping[k*4+l]=i[k]*4+j[l]
all_possible_perms.append(mapping)
all_possible_perms_one_side.append(mapping)
for i in perm:
for j in perm:
equiv=""
mapping={}
for k in range(4):
for l in range(4):
mapping[k+4*l]=i[k]*4+j[l]
all_possible_perms.append(mapping)
# +
def equiv_class(s):
ans={}
for perm in all_possible_perms:
equiv = ""
for ind in range(16):
equiv += s[perm[ind]]
perm2 = {v:k for k,v in perm.items()}
ans[equiv] = perm2
return ans
def equiv_class_one_side(s):
ans={}
for perm in all_possible_perms:
equiv = ""
for ind in range(16):
equiv += s[perm[ind]]
perm2 = {v:k for k,v in perm.items()}
ans[equiv] = perm2
return ans
# -
# Now, run the following 2 cells, it will use approximately 5GB of Memory
import json
char="01x"
all_graph=set()
for i in range (3**16):
s=""
n=i
for j in range(16):
s+=char[n%3]
n=n//3
all_graph.add(s)
f=open("all graphs 4.json", "w+")
json.dump(list(all_graph), f)
f.close()
# Now, please restart the kernel, then run the following five cells. It will use approximately 5GB of Memory
perm= [[i, j, k, l] for i in range(4) for j in range(4) for k in range(4) for l in range(4) if (i-j)*(i-k)*(i-l)*(j-k)*(j-l)*(k-l)!=0]
all_possible_perms = []
all_possible_perms_one_side = []
for i in perm:
for j in perm:
equiv=""
mapping={}
for k in range(4):
for l in range(4):
mapping[k*4+l]=i[k]*4+j[l]
all_possible_perms.append(mapping)
all_possible_perms_one_side.append(mapping)
for i in perm:
for j in perm:
equiv=""
mapping={}
for k in range(4):
for l in range(4):
mapping[k+4*l]=i[k]*4+j[l]
all_possible_perms.append(mapping)
# +
def equiv_class(s):
ans={}
for perm in all_possible_perms:
equiv = ""
for ind in range(16):
equiv += s[perm[ind]]
perm2 = {v:k for k,v in perm.items()}
ans[equiv] = perm2
return ans
def equiv_class_one_side(s):
ans={}
for perm in all_possible_perms:
equiv = ""
for ind in range(16):
equiv += s[perm[ind]]
perm2 = {v:k for k,v in perm.items()}
ans[equiv] = perm2
return ans
# -
import json
with open("all graphs 4.json", "r") as f:
all_graph = set(json.load(f))
types = set()
while len(all_graph)!=0:
for graph in all_graph:
break
class_of_graph = equiv_class(graph)
types.add(graph)
all_graph -= set(class_of_graph.keys())
f=open("types 4.json", "w+")
json.dump(list(types), f)
f.close()
# Now please restart the kernel once again, and run the following 5 cells.
perm= [[i, j, k, l] for i in range(4) for j in range(4) for k in range(4) for l in range(4) if (i-j)*(i-k)*(i-l)*(j-k)*(j-l)*(k-l)!=0]
all_possible_perms = []
all_possible_perms_one_side = []
for i in perm:
for j in perm:
equiv=""
mapping={}
for k in range(4):
for l in range(4):
mapping[k*4+l]=i[k]*4+j[l]
all_possible_perms.append(mapping)
all_possible_perms_one_side.append(mapping)
for i in perm:
for j in perm:
equiv=""
mapping={}
for k in range(4):
for l in range(4):
mapping[k+4*l]=i[k]*4+j[l]
all_possible_perms.append(mapping)
# +
def equiv_class(s):
ans={}
for perm in all_possible_perms:
equiv = ""
for ind in range(16):
equiv += s[perm[ind]]
perm2 = {v:k for k,v in perm.items()}
ans[equiv] = perm2
return ans
def equiv_class_one_side(s):
ans={}
for perm in all_possible_perms:
equiv = ""
for ind in range(16):
equiv += s[perm[ind]]
perm2 = {v:k for k,v in perm.items()}
ans[equiv] = perm2
return ans
# -
import json
all_graph_complete=set()
char_complete = "01"
for i in range (2**16):
s=""
n=i
for j in range(16):
s+=char_complete[n%2]
n=n//2
all_graph_complete.add(s)
f=open("all graphs complete 4.json", "w+")
json.dump(list(all_graph_complete), f)
f.close()
with open("all graphs complete 4.json", "r") as f:
all_graph_complete = set(json.load(f))
classes = []
while len(all_graph_complete)!=0:
for graph in all_graph_complete:
break
class_of_graph = equiv_class(graph)
classes.append(list(class_of_graph.keys()))
all_graph_complete -= set(class_of_graph.keys())
f=open("classes 4.json", "w+")
json.dump(classes, f)
f.close()
# Now, restart the kernel once more, and run the following cells.
perm= [[i, j, k, l] for i in range(4) for j in range(4) for k in range(4) for l in range(4) if (i-j)*(i-k)*(i-l)*(j-k)*(j-l)*(k-l)!=0]
all_possible_perms = []
all_possible_perms_one_side = []
for i in perm:
for j in perm:
equiv=""
mapping={}
for k in range(4):
for l in range(4):
mapping[k*4+l]=i[k]*4+j[l]
all_possible_perms.append(mapping)
all_possible_perms_one_side.append(mapping)
for i in perm:
for j in perm:
equiv=""
mapping={}
for k in range(4):
for l in range(4):
mapping[k+4*l]=i[k]*4+j[l]
all_possible_perms.append(mapping)
# +
def equiv_class(s):
ans={}
for perm in all_possible_perms:
equiv = ""
for ind in range(16):
equiv += s[perm[ind]]
perm2 = {v:k for k,v in perm.items()}
ans[equiv] = perm2
return ans
def equiv_class_one_side(s):
ans={}
for perm in all_possible_perms:
equiv = ""
for ind in range(16):
equiv += s[perm[ind]]
perm2 = {v:k for k,v in perm.items()}
ans[equiv] = perm2
return ans
# +
def bipartite(s):
for indices in perm:
if s[indices[0]]==s[4+indices[1]]==s[8+indices[2]]==s[12+indices[3]]=="1":
return True
return False
def bipartite_with_unknown(s):
good_s=s.replace("x","1")
bad_s=s.replace("x","0")
if bipartite(bad_s): return True
if bipartite(good_s): return None
return False
# +
import json
with open("classes 4.json", "r") as f:
classes_complete = json.load(f)
with open("types 4.json", "r") as f:
types = set(json.load(f))
print(len(types))
def canonical(graph):
return next(iter(types&set(equiv_class(graph))))
def guess(graph, algorithm):
canon = canonical(graph)
ask_in_canon = algorithm[canon]
map_from_canonical = equiv_class(canon)[graph]
return map_from_canonical[ask_in_canon]
def evaluation(algorithm):
#count = 0
#percent = 0
# I put some trackers above to see how much work it have done,
# Although this will not save time, it will look better
yes4yes = 0
crit_yes4yes = None
no4yes = 0
crit_no4yes = None
yes4no = 0
crit_yes4no = None
no4no = 0
crit_no4no = None
for graphs in classes_complete:
avg_num = len(graphs)
yes = 0
no = 0
for graph in graphs:
#if count%656 == 0:
#print("finished ", percent, " percent")
#percent+=1
#count += 1
begin = "x"*16
for _ in range(16):
if bipartite_with_unknown(begin)!=None:
break
guessing = guess(begin, algorithm)
if graph[guessing] == "0": no+=1
else: yes+=1
begin = begin[:guessing]+graph[guessing]+begin[guessing+1:]
avg_yes = yes/avg_num
avg_no = no/avg_num
if bipartite_with_unknown(graphs[0]) == True:
if avg_yes > yes4yes:
yes4yes = avg_yes
crit_yes4yes = graphs[0]
if avg_no > no4yes:
no4yes = avg_no
crit_no4yes = graphs[0]
if bipartite_with_unknown(graphs[0]) == False:
if avg_yes > yes4no:
yes4no = avg_yes
crit_yes4no = graphs[0]
if avg_no > no4no:
no4no = avg_no
crit_no4no = graphs[0]
return (yes4yes, crit_yes4yes, no4yes, crit_no4yes, yes4no, crit_yes4no, no4no, crit_no4no)
# -
number_of_known={i:set() for i in range(17)}
travelling_list = []
for graph in types:
number_of_known[graph.count("x")].add(graph)
for i in range(17):
travelling_list+=number_of_known[i]
# This cell is to calculate the number of true graph if the query edge is true
yes_vs_no={} #{graph: number of yes, number of no)}
#count = 0
#percent = 0
# I put some trackers above to see how much work it have done,
# Although this will not save time, it will look better
for graph in travelling_list:
#if count%466==0:
#print("finished ", percent, " percent")
#percent+=1
#count+=1
if "x" not in graph:
yes_vs_no[graph] = (1,0) if bipartite_with_unknown(graph) else (0,1)
else:
ind = graph.index("x")
yes_graph = canonical(graph[:ind]+"1"+graph[ind+1:])
no_graph = canonical(graph[:ind]+"0"+graph[ind+1:])
total_yes = yes_vs_no[yes_graph][0]+yes_vs_no[no_graph][0]
total_no = yes_vs_no[yes_graph][1]+yes_vs_no[no_graph][1]
yes_vs_no[graph] = (total_yes, total_no)
algorithm = {}
cur = {"x"*16}
for i in range(17):
aux = set()
for graph in cur:
fin = bipartite_with_unknown(graph)
if fin == None:
max_diff = -1
max_index = -1
selection = [i for i in range(16) if graph[i]=="x"]
for index in selection:
yes_graph_canonical = canonical(graph[:index]+"1"+graph[index+1:])
no_graph_canonical = canonical(graph[:index]+"0"+graph[index+1:])
yes_for_yes = yes_vs_no[yes_graph_canonical][0]
yes_for_no = yes_vs_no[no_graph_canonical][0]
if abs(yes_for_yes-yes_for_no) > max_diff:
max_index = index
max_diff = abs(yes_for_yes-yes_for_no)
algorithm[graph] = int(max_index)
aux|={canonical(graph[:max_index]+"0"+graph[max_index+1:]),canonical(graph[:max_index]+"1"+graph[max_index+1:])}
cur = aux
algorithm
evals = evaluation(algorithm)
yes4yes = evals[0]
no4yes = evals[2]
yes4no = evals[4]
no4no = evals[6]
import math
(yes4yes + no4no)/2 + math.sqrt((yes4yes - no4no)**2/4 + no4yes*yes4no)
(yes4yes*288, no4yes*288, yes4no*288, no4no*288)
evals
| Algorithm search for d=4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
import math
from statsmodels.tsa.stattools import acf, pacf
import statsmodels.tsa.stattools as ts
from statsmodels.tsa.arima_model import ARIMA
variables = pandas.read_csv('STATEWISE_ DATASET1/data_JAMMU_KASHMIR.csv')
rain1 = variables['ANNUAL']
#rain1 = (rain[0:90,:])
rain1=rain1.replace(0,rain1.mean())
rain1.fillna((rain1.mean()), inplace=True)
rain1=rain1.head(110)
lnrain1=np.log(rain1)
rain_matrix=lnrain1.as_matrix()
model = ARIMA(rain_matrix, order=(5,1,0))
model_fit = model.fit(disp=0)
model.dates=None
model.freq=None
model.missing=None
model_fit.save('model_arima_year_JAMMU_KASHMIR.pkl')
# -
from statsmodels.tsa.arima_model import ARIMAResults
loaded=ARIMAResults.load('model_arima_year_JAMMU_KASHMIR.pkl')
prediction_year_JAMMU_KASHMIR=loaded.predict(115,130 ,typ='levels')
prediction_year_JAMMU_KASHMIR
predictionsadjusted_year_JAMMU_KASHMIR=np.exp(prediction_year_JAMMU_KASHMIR)
predictionsadjusted_year_JAMMU_KASHMIR
| YEAR_WISE_PYTHON_CODE/ntt_arima_year_jammu_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python38-azureml
# kernelspec:
# display_name: Python 3.8 - AzureML
# language: python
# name: python38-azureml
# ---
# + [markdown] nteract={"transient": {"deleting": false}}
# # Hands on - Data Discovery using Azure REST API
#
# __Notebook Version:__ 1.0<br>
# __Python Version:__ Python 3.8 - AzureML<br>
# __Required Packages:__ No<br>
# __Platforms Supported:__ Azure Machine Learning Notebooks
#
# __Data Source Required:__ Log Analytics tables
#
# ### Description
# This notebook will provide step-by-step instructions and sample code to guide you through Azure authentication, Microsoft Sentinel data discovery by using Azure REST API.<br>
# *** No need to download and install any other Python modules. ***<br>
# *** Please run the cells sequentially to avoid errors. *** <br>
#
# ## Table of Contents
# 1. Warm-up
# 2. Azure Authentication
# 3. List Microsoft Sentinel Watchlists Using API
# 4. List Microsoft Sentinel Incidents Using API
# + [markdown] nteract={"transient": {"deleting": false}}
# ## 1. Warm-up
# + gather={"logged": 1627596066714} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# If you need to know what Python modules are available, you may run this:
# help("modules")
# + gather={"logged": 1632434870178} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Loading Python libraries
from azure.common.credentials import get_azure_cli_credentials
import requests
import json
import pandas
from IPython.display import display, HTML, Markdown
# + gather={"logged": 1632434872530} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Functions will be used in this notebook
def read_config_values(file_path):
"This loads pre-generated parameters for Microsoft Sentinel Workspace"
with open(file_path) as json_file:
if json_file:
json_config = json.load(json_file)
return (json_config["tenant_id"],
json_config["subscription_id"],
json_config["resource_group"],
json_config["workspace_id"],
json_config["workspace_name"],
json_config["user_alias"],
json_config["user_object_id"])
return None
def has_valid_token():
"Check to see if there is a valid AAD token"
try:
credentials, sub_id = get_azure_cli_credentials()
creds = credentials._get_cred(resource=None)
token = creds._token_retriever()[2]
print("Successfully signed in.")
return True
except Exception as ex:
if "Please run 'az login' to setup account" in str(ex):
print(str(ex))
return False
elif "AADSTS70043: The refresh token has expired" in str(ex):
message = "**The refresh token has expired. <br> Please continue your login process. Then: <br> 1. If you plan to run multiple notebooks on the same compute instance today, you may restart the compute instance by clicking 'Compute' on left menu, then select the instance, clicking 'Restart'; <br> 2. Otherwise, you may just restart the kernel from top menu. <br> Finally, close and re-load the notebook, then re-run cells one by one from the top.**"
display(Markdown(message))
return False
elif "[Errno 2] No such file or directory: '/home/azureuser/.azure/azureProfile.json'" in str(ex):
print("Please sign in.")
return False
else:
print(str(ex))
return False
except:
print("Please restart the kernel, and run 'az login'.")
return False
# Calling Microsoft Sentinel API for List, the same template can be used for calling other Azure REST APIs with different parameters.
# For different environments, such as national clouds, you may need to use different root_url, please contact with your admins.
# It can be ---.azure.us, ---.azure.microsoft.scloud, ---.azure.eaglex.ic.gov, etc.
def call_azure_rest_api_for_list(token, resource_name, api_version):
"Calling Microsoft Sentinel REST API"
headers = {"Authorization": token, "content-type":"application/json" }
provider_name = "Microsoft.OperationalInsights"
provider2_name = "Microsoft.SecurityInsights"
target_resource_name = resource_name
api_version = api_version
root_url = "https://management.azure.com"
arm_rest_url_template_for_list = "{0}/subscriptions/{1}/resourceGroups/{2}/providers/{3}/workspaces/{4}/providers/{5}/{6}?api-version={7}"
arm_rest_url = arm_rest_url_template_for_list.format(root_url, subscription_id, resource_group, provider_name, workspace_name, provider2_name, target_resource_name, api_version)
response = requests.get(arm_rest_url, headers=headers, verify=True)
return response
def display_result_name(response):
"Default to display column - name, you may change it to other columns"
column_name = "name"
if response != None:
entries = [item[column_name] for item in response.json()["value"]]
display(entries)
def display_result(response):
"Display the result set as pandas.DataFrame"
if response != None:
df = pandas.DataFrame(response.json()["value"])
display(df)
# + gather={"logged": 1632434875964} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Calling the above function to populate Microsoft Sentinel workspace parameters
# The file, config.json, was generated by the system, however, you may modify the values, or manually set the variables
tenant_id, subscription_id, resource_group, workspace_id, workspace_name, user_alias, user_object_id = read_config_values('config.json');
# + [markdown] nteract={"transient": {"deleting": false}}
# ## 2. Azure Authentication
# + gather={"logged": 1632434877884}
# Azure CLI is used to get device code to login into Azure, you need to copy the code and open the DeviceLogin site.
# You may add [--tenant $tenant_id] to the command
if has_valid_token() == False:
# !echo -e '\e[42m'
# !az login --tenant $tenant_id --use-device-code
# + gather={"logged": 1632434887274} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Extract access token, which will be used to access Microsoft Sentinel Watchlist API for your Watchlist data.
credentials, sub_id = get_azure_cli_credentials()
creds = credentials._get_cred(resource=None)
token = creds._token_retriever()[2]
access_token = token['accessToken']
header_token_value = "Bearer {}".format(access_token)
# + [markdown] nteract={"transient": {"deleting": false}}
# ## 3. List Microsoft Sentinel Watchlists Using API
# + gather={"logged": 1632434893904} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Calling Microsoft Sentinel Watchlist API
# If you don't have Watchlist, you may create one, or try to access different features, such as Bookmarks.
response_watchlist = call_azure_rest_api_for_list(header_token_value, "watchlists", "2019-01-01-preview")
# + gather={"logged": 1632434896691} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Display the result
display_result_name(response_watchlist)
# + [markdown] nteract={"transient": {"deleting": false}}
# ## 4. List Microsoft Sentinel Incidents Using API
# + gather={"logged": 1632434901252} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Calling Microsoft Sentinel Incident API
# If you don't have incidents, you may create one through Azure Portal.
response_incident = call_azure_rest_api_for_list(header_token_value, "incidents", "2020-01-01")
# + gather={"logged": 1632434904269} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Display the result
display_result(response_incident)
# + [markdown] nteract={"transient": {"deleting": false}}
# Thanks for coming along all the way to the end. In the next Hands-on notebook, I will show you how to access data using Azure SDK for Python. And keep one of the watchlist name, it will be used in the next notebook. A la prochaine.
| Hands-on 1. Data Discovery using Azure REST API.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import healpy as hp
import lsst.sims.maf.db as db
import lsst.sims.maf.utils as utils
import lsst.sims.maf.metrics as metrics
import lsst.sims.maf.slicers as slicers
import lsst.sims.maf.stackers as stackers
import lsst.sims.maf.metricBundles as metricBundles
# +
cons = []
dbFile = 'sims_featureScheduler_runs1.6/potential_schedulers/rolling_exgal_mod2_dust_sdf_0.80_v1.6_10yrs.db'
cons.append(db.OpsimDatabase(dbFile))
outDir='rolling_plot'
resultsDb = db.ResultsDb(outDir=outDir)
dbFile = 'sims_featureScheduler_runs1.6/potential_schedulers/baseline_nexp1_v1.6_10yrs.db'
cons.append(db.OpsimDatabase(dbFile))
runnames = ['rolling_exgal_mod2_dust_sdf_0.80_v1.6',
'baseline_nexp1_v1.6']
# -
titles = ['Rolling Exgal', 'Baseline']
for conn, runName, title in zip(cons, runnames, titles):
bundleList = []
sql = 'note not like "DD%%"'
metric = metrics.CountMetric('filter')
slicer = slicers.HealpixSlicer()
summaryStats = []
plotDict = {'xlabel': 'Number of Observations', 'title':title}
bundleList.append(metricBundles.MetricBundle(metric,slicer,sql,
plotDict=plotDict,
summaryMetrics=summaryStats,
runName=runName))
bd = metricBundles.makeBundlesDictFromList(bundleList)
bg = metricBundles.MetricBundleGroup(bd, conn, outDir=outDir, resultsDb=resultsDb)
bg.runAll()
bg.plotAll(closefigs=False)
titles = ['Rolling Exgal, y3.5-4.5', 'Baseline, y3.5-4.5']
for conn, runName,title in zip(cons, runnames, titles):
bundleList = []
sql = 'night > %f and night < %f and note not like "DD%%"' % ((365.25*3.5), (365.25*4.5) )
metric = metrics.CountMetric('filter')
slicer = slicers.HealpixSlicer()
summaryStats = []
plotDict = {'xlabel': 'Number of Observations', 'colorMin': 0, 'colorMax': 200,
'title': title}
bundleList.append(metricBundles.MetricBundle(metric,slicer,sql,
plotDict=plotDict,
summaryMetrics=summaryStats,
runName=runName))
bd = metricBundles.makeBundlesDictFromList(bundleList)
bg = metricBundles.MetricBundleGroup(bd, conn, outDir=outDir, resultsDb=resultsDb)
bg.runAll()
bg.plotAll(closefigs=False)
| plots/Rolling_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center><b><big> CITS 5508 LAB SHEET 2: CLASSIFICATION ON FOREST TYPE MAPPING DATASET </big></b></center>
#
# **Name: <NAME>**<br>
# **Student Number: 22803018**<br>
# **Date created: 10th March 2020 <br>
# **Last modified: 19th March 2020
#
# ## 1. Setup
#
# Before going to loading data part, we need to make sure that MatplotLib figures inline and prepare a function to save the figures. Moreover, we should use Python 3.0 as well as Scikit-Learn >= 0.20
# +
#Python >= 3.0 is required
import sys
assert sys.version_info >= (3,5)
#Scikit-Learn >= 0.20
import sklearn
assert sklearn.__version__ >= '0.20'
#Get the pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
#Save the figures
import os
LAB2_ROOT_DIR = "."
CHAPTER_ID = "LAB SHEET 2"
IMAGES_PATH = os.path.join(LAB2_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
#Ignore useless warnings
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
# -
# ## 2. Loading data
# First, we need to dowload the zip file from http://archive.ics.uci.edu/ml/datasets/Forest+type+mapping# and extract two files csv training and testing into the same directory of this lab file.
#Load the Pandas libraries with alias "pd"
import os
import pandas as pd
#Read data from 2 files "traning.csv" and "testing.csv"
train_set = pd.read_csv("training.csv")
test_origin_set = pd.read_csv("testing.csv")
#Take a look at some lines of training dataset
train_set.head()
#Get an overview of training dataset
train_set.info()
train_set.describe()
# There are 28 attributes which can be easily seen in the table above and the list below: class, b1, b2, b3, b4, b5, b6, b7, b8, b9, ..., pred_minus_obs_S_b9. There are 325 instances in the training set and 198 instances in test set which mean that the data size is relatively small comapred to the Machine Learning standards, but we can give it a try with small data size and then move to the bigger one. Both training and testing set have the same format. There are not any missing values in both dataset. All attributes are numerical, except "class".
##Visualize the training dataset
import matplotlib.pyplot as plt
train_set.hist(bins=50, figsize=(20,15))
save_fig("attribute_histogram_plots")
plt.show()
# As we can notice few things in these histograms:
# 1. Most of attributes have the same scales. We may not focus on those columns which are behind the column "b9"
# 2. Most of instances of "b4", "b6", "b7" ranged from 80 to 120.
# 3. Other instances of other column ranged from 20 to 60.
# => These histograms are easy for Machine Learning algorithms to detect patterns
# ## 3. Cleaning data
#Split training set into training set and test set. Original test set is kept for cross validation.
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(train_set, test_size = 0.2, random_state = 1)
#Drop all columns with the names begin with "pred_minus_obs".
import pandas as pd
train_set.drop(train_set.iloc[:, 10:], inplace = True, axis = 1)
test_set.drop(test_set.iloc[:, 10:], inplace = True, axis = 1)
test_origin_set.drop(test_set.iloc[:, 10:], inplace = True, axis = 1)
#Show a few lines of training dataset after drop out columns as required.
train_set.head()
#At this time, the datas of both set are clean as well.
#Count the number of instances for each class label and visualize the graph to easily see the pattern.
train_set['class'].value_counts()
#Visualize
# %matplotlib inline
import matplotlib.pyplot as plt
train_set['class'].hist(bins=50, figsize=(20,15))
save_fig("class_histogram_plot")
plt.show()
# As we can see, the instances between 4 classes are not fluctuated much. It ranges from round 40 to 140. So, this is a balance dataset
# ## 4. Normalization
#We need to take all numerical columns
train_set_num = train_set.drop(train_set.iloc[:,0:1], axis = 1)
test_set_num = test_set.drop(test_set.iloc[:,0:1], axis = 1)
test_origin_set_num = test_origin_set.drop(test_origin_set.iloc[:,0:1], axis = 1)
train_set_num.head()
#Find the correlation between attributes
corr_matrix = train_set.corr()
corr_matrix["b1"].sort_values(ascending = False)
#Plot scatter matrix with pandas
from pandas.plotting import scatter_matrix
attributes = ["b1","b4","b7","b9"]
scatter_matrix(train_set[attributes], figsize = (12,8))
save_fig("scatter_matrix_plot")
# As we can see, b4 and b7 are the promising attribute to predict b1.There are upward trends in this scatterplot. This data size is really small, the points somehow are dispeared. However, the correlation of these attributes with b1 are relatively high. To conclude that, we should try to remove some irrelated attributes to prevent algorithms for quick data reproduction.
#Zoom in the most correlated attributes on the scatterplot
train_set.plot(kind ="scatter", x ="b1", y="b4")
train_set.plot(kind ="scatter", x="b1", y="b7")
save_fig("b1_b4_vs_b7_value_scatterplot")
#Excute the code of the categorical columns of training and testing set
train_set_cat = train_set["class"]
test_set_cat = test_set["class"]
test_origin_cat = test_origin_set["class"]
train_set_cat
#Transform all numerical columns by using StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([
('std_scaler', StandardScaler())
])
train_set_num_tr = num_pipeline.fit_transform(train_set_num)
test_set_num_tr = num_pipeline.fit_transform(test_set_num)
test_origin_num_tr = num_pipeline.fit_transform(test_origin_set_num)
train_set_num_tr
test_set_num_tr
# ## 5. Classification
# ### 5.1 Support Vector Machine
#Assign x_train, y_train, x_test, y_test and apply SVM into this
from sklearn.svm import SVC
x_train, y_train, x_test, y_test = train_set_num_tr, train_set_cat, test_set_num_tr, test_set_cat
svm_clf = SVC(gamma="auto", random_state=1)
svm_clf.fit(x_train, y_train)
svm_y_pred = svm_clf.predict(x_test)
#Draw the confusion matrix of svm_y_pred
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, svm_y_pred)
#This code shows the label of the confusion matrix above.
svm_clf.classes_
# Here are some comments about the confusion matrix:
# - 21 values were correctly classified as "d".
# - 4 values were correctly classified as "h".
# - 7 values were correctly classified as "o".
# - 22 values were correctly classified as "s".
# - Read down to the "h" row, 1 value that should have been "s" were classified as "d".
# - For the "h" row, 3 values that should have been "s" were classified as "h"
# - 2 values from "o" row were classified as "o", but they must have been "d".
# - 5 values were classified as "s", but they must have been respectively "d"(2) and "h"(3).
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, svm_y_pred)
# The accuracy is 83.08%, this ratio is quite good when it can predicted classes correctly.
# ## 5.2 Experimenting with some hyperparameters
# ### Kernel = sigmoid
#Use kernel : sigmoid for classification
svm_clf_sig = SVC(kernel="sigmoid", random_state = 1)
svm_clf_sig.fit(x_train, y_train)
svm_y_pred_sig = svm_clf_sig.predict(x_test)
svm_y_pred_sig
#Draw the confusion matrix of svm_y_pred_sig
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, svm_y_pred_sig)
# Here are some comments about the confusion matrix:
# - 16 values were correctly classified as "d".
# - 7 values were correctly classified as "h".
# - 46 values were correctly classified as "o".
# - 23 values were correctly classified as "s".
# - Read down to the "h" row, 6 values that should have been "o"(3) and "s"(3) were classified as "d".
# - For the "h" row, there is no any wrong classification in this row.
# - 5 values from "o" row were classified as "o", but they must have been "d"(5)
# - 4 values were classified as "s", but they must have been respectively "h"(2)
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, svm_y_pred_sig)
# The accuracy is 76.92%, this ratio is quite low, so that it is not reliable for further step.
# ### Kernel = linear
#Use kernel : linear for classification
svm_clf_lin = SVC(kernel="linear", random_state = 1)
svm_clf_lin.fit(x_train, y_train)
svm_y_pred_lin = svm_clf_lin.predict(x_test)
svm_y_pred_lin
#Draw the confusion matrix of svm_y_pred_lin
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, svm_y_pred_lin)
# Here are some comments about the confusion matrix:
# - 21 values were correctly classified as "d".
# - 5 values were correctly classified as "h".
# - 8 values were correctly classified as "o".
# - 23 values were correctly classified as "s".
# - Read down to the "d" row, 1 value that should have been "s" were classified as "d".
# - For the "h" row, 2 values that should have been "s" were classified as "h".
# - 1 value from "o" row were classified as "o", but they must have been "d".
# - 4 values were classified as "s", but they must have been respectively "d"(2) and "h"(2).
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, svm_y_pred_lin)
# The accuracy is 87.69%, this ratio is more higher than SVC with gamma ="auto", kernel = "poly" and also kernel = "sigmoid" when it can predicted classes correctly. It seems to exceed a perfect rate (90%). We can do more with this.
# ### Kernel = "poly"
#Use kernel :polynomial for classification
svm_clf_poly = SVC(kernel="poly", degree = 3, random_state = 1)
svm_clf_poly.fit(x_train, y_train)
svm_y_pred_poly = svm_clf_poly.predict(x_test)
svm_y_pred_poly
#Draw the confusion matrix of svm_y_pred_poly
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, svm_y_pred_poly)
# Here are some comments about the confusion matrix:
# - 9 values were correctly classified as "d".
# - 4 values were correctly classified as "h".
# - 7 values were correctly classified as "o".
# - 26 values were correctly classified as "s".
# - Read down to the "d" row, 13 values that should have been "s" were classified as "d".
# - For the "h" row, 3 values that should have been "s" were classified as "h".
# - 2 values from "o" row were classified as "o", but they must have been "d"(1) and "s"(1).
# - 1 values were classified as "s", but they must have been respectively "d"(1).
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, svm_y_pred_poly)
# The accuracy is 70.77%, this ratio is the lowest one when it can predicted classes correctly.
# ### Stochastic Gradient Descent
#Apply Stochastic Gradient Descent Classifier into dataset. loss = hinge
from sklearn import linear_model
sgd_clf_hinge = linear_model.SGDClassifier(loss = "hinge", random_state = 1)
sgd_clf_hinge.fit(x_train, y_train)
sgd_y_pred_hinge = sgd_clf_hinge.predict(x_test)
sgd_y_pred_hinge
#Draw the confusion matrix of svm_y_pred_hinge
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, sgd_y_pred_hinge)
#This code shows the label of the confusion matrix above.
sgd_clf_hinge.classes_
# Here are some comments about the confusion matrix:
# - 18 values were correctly classified as "d".
# - 4 values were correctly classified as "h".
# - 8 values were correctly classified as "o".
# - 25 values were correctly classified as "s".
# - Read down to the "d" row, 4 values that should have been "o"(1) and "s"(3) were classified as "d".
# - For the "h" row, 3 values that should have been "s" were classified as "h"
# - 1 value from "o" row were classified as "o", but they must have been "d".
# - 2 values were classified as "s", but they must have been "d"
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, sgd_y_pred_hinge)
# The accuracy is 84.62%, this ratio is quite good when it can predicted classes correctly.
# ### Loss = "log"
#Use loss: log for classification
from sklearn import linear_model
sgd_clf_log = linear_model.SGDClassifier(loss ='log', random_state = 1)
sgd_clf_log.fit(x_train, y_train)
sgd_y_pred_log = sgd_clf_log.predict(x_test)
sgd_y_pred_log
#Draw the confusion matrix of svm_y_pred_log
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, sgd_y_pred_log)
# Here are some comments about the confusion matrix:
# - 19 values were correctly classified as "d".
# - 4 values were correctly classified as "h".
# - 8 values were correctly classified as "o".
# - 20 values were correctly classified as "s".
# - Read down to the "d" row, 3 values that should have been "o"(1) and "s"(2) were classified as "d".
# - For the "h" row, 3 values that should have been "d" were classified as "s".
# - 1 value from "o" row were classified as "o", but they must have been "d".
# - 7 values were classified as "s", but they must have been "d"(5) and "h"(2).
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, sgd_y_pred_log)
# The accuracy is 78.46%, this ratio is not good because its accuracy is below 80% when it can predicted classes correctly.
# ### Loss = "modified_huber"
#Use modified_huber for classification
from sklearn import linear_model
sgd_clf_hub = linear_model.SGDClassifier(loss ='modified_huber', random_state = 1)
sgd_clf_hub.fit(x_train, y_train)
sgd_y_pred_hub = sgd_clf_hub.predict(x_test)
sgd_y_pred_hub
#Draw the confusion matrix of svm_y_pred_hub
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, sgd_y_pred_hub)
# Here are some comments about the confusion matrix:
# - 18 values were correctly classified as "d".
# - 4 values were correctly classified as "h".
# - 8 values were correctly classified as "o".
# - 23 values were correctly classified as "s".
# - Read down to the "d" row, 4 values that should have been "o"(1) and "s"(3) were classified as "d".
# - For the "h" row, 3 values that should have been "s" were classified as "h".
# - 1 value from "o" row were classified as "o", but they must have been "d".
# - 4 values were classified as "s", but they must have been "d"
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, sgd_y_pred_hub)
# The accuracy is 81.54% this ratio is quite good when it can predicted classes correctly.
# ## Cross Validation
# +
from sklearn.model_selection import cross_val_score
cross_val_score(svm_clf_lin, test_origin_num_tr, test_origin_cat, cv=3, scoring="accuracy")
# -
# Wow, when I used cross validation on the original test set, the results are almost perfect such as 89.39% nearly 90%, others are 96.97%.
# The accuracy with model cross validation is: 94.44%.
# ## 6. Conclusion
# | Classification | Prediction Rate |
# |:----------------------------:|:---------------:|
# | SVM - gamma = 'auto' | 83.08% |
# | SVM - kernel = 'sigmoid' | 76.92% |
# | SVM - kernel = 'linear' | 87.69% |
# | SVM - kernel = 'poly' | 70.77% |
# | SGD - loss = 'hinge' | 84.62% |
# | SGD - loss = 'log' | 78.46% |
# | SDG - loss = 'modifed_huber' | 81.54% |
# In this dataset, using SVM Classifier with hyperparameter kernel = "linear" gave out the highest prediction rate (87.69%) which was at a perfect rate (above 90%). Most of classifications with different hyperparameters results in the good prediction rate which were above 80%, except for the SVM with kernel ="poly"(70.77%), "sigmoid(76.92%), and loss = "log"(78.46%) which were not reliable to go further step.
| lab02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''manimce'': conda)'
# name: python3
# ---
# +
import pandas as pd
import numpy as np
print(f'pandas version:{pd.__version__}')
print(f'numpy version:{np.__version__}')
# -
# ## 01 Series
#
#
# https://pandas.pydata.org/docs/reference/api/pandas.Series.html
# ### 通过 list 创建 series
s1 = pd.Series([1,2,3])
s1
s2 = pd.Series([1,2,3],index=['a','b','c'])
s2
s3 = pd.Series(
[1,2,3],
index=['a','b','c'],
name='hello'
)
s3
s3.name
# ### 通过字典创建 series
# 字典中的 key 为 series 的索引,
d1 = {'a':1,'b':2,'c':3}
s4 = pd.Series(d1)
s4
# ### 通过 ndarray 来创建 series
np.random.seed(0)
s = pd.Series(np.random.randint(5,size=3))
s
# ### 通过标量值来创建 series
s = pd.Series(
1,
index=['a','b','c']
)
s
# ### 创建空的 series
s = pd.Series()
s
# ## 02 DataFrame
#
# https://pandas.pydata.org/docs/reference/frame.html
# ### 创建空的 dataframe
df = pd.DataFrame()
print(df)
# ### 从 ndarray 创建 DataFrame
np.random.seed(0)
arr = np.random.randint(10,size=[5,3])
df = pd.DataFrame(arr,columns=list('abc'))
df
# from_records
arr = np.array(
[
("Lemon", "长沙", 80, 90),
("Jack", "上海", 90, 75),
("Peter", "深圳", 60, 80),
]
)
df = pd.DataFrame.from_records(
arr,
columns=["name", "city", "math", "chem"]
)
df
arr = np.array(
[
("Lemon", "长沙", 80, 90),
("Jack", "上海", 90, 75),
("Peter", "深圳", 60, 80),
]
)
df = pd.DataFrame(
arr,
columns=["name", "city", "math", "chem"]
)
df
# ### 从列表创建 dataframe
# list,一维列表
lst = [1,2,3,5]
df = pd.DataFrame(lst)
df
type(df)
# list of list,二维列表
lst = [
["Lemon", "长沙", 80, 90],
["Jack", "上海", 90, 75],
["Peter", "深圳", 60, 80],
]
df = pd.DataFrame(
data=lst,
columns=["name", "city", "math", "chem"]
)
df
# list of dict,二维列表,demo1
lst = [
{'name':'Lemon','city':"长沙",
'math':80,'chem':90},
{'name':'Jack','city':"上海",
'math':90,'chem':75},
{'name':'Peter','city':"深圳",
'math':60,'chem':80},
]
df = pd.DataFrame(data=lst)
df
# list of dict,二维列表,demo2
lst = [
{'name':'Lemon','city':"长沙",'math':80,'chem':90},
{'name':'Jack','city':"上海",'math':90,'chem':75},
{'name':'Peter','city':"深圳",'math':60},
]
df = pd.DataFrame(data=lst)
df
# list of dict,二维列表,demo3
lst = [
{'name':'Lemon','city':"长沙",'math':80,'chem':90},
{'name':'Jack','city':"上海",'math':90,'chem':75},
{'name':'Peter','city':"深圳",'math':60},
]
df = pd.DataFrame(data=lst,columns=['name','city','math'])
df
# list of dict,二维列表,demo4
lst = [
{'name':'Lemon','city':"长沙",'math':80,'chem':90},
{'name':'Jack','city':"上海",'math':90,'chem':75},
{'name':'Peter','city':"深圳",'math':60},
]
df = pd.DataFrame(data=lst,columns=['name','city','math','化学'])
df
# list of tuple,二维列表
lst = [
("Lemon", "长沙", 80, 90),
("Jack", "上海", 90, 75),
("Peter", "深圳", 60, 80),
]
df = pd.DataFrame(
data=lst,
columns=["name", "city", "math", "chem"]
)
df
# list of tuple,二维列表
lst1 = ["Lemon","Jack","Peter"]
lst2 = ["长沙","上海","深圳"]
lst3 = [80,90,60]
lst4 = [90,75,80]
lst = list(zip(lst1,lst2,lst3,lst4))
print(lst)
df = pd.DataFrame(
data=lst,
columns=["name", "city", "math", "chem"]
)
df
# ### 从字典创建dataframe
d = {
"name": [ "Lemon", "Jack", "Peter"],
"city": ["长沙", "上海", "深圳"],
"math": [80, 90, 60],
"chem": [90, 75, 80],
}
df = pd.DataFrame(d)
df
df = pd.DataFrame.from_dict(d)
df
d = {
"name": {0: "Lemon", 1: "Jack", 2: "Peter"},
"city": {0: "长沙", 1: "上海", 2: "深圳"},
"math": {0: 80, 1: 90, 2: 60},
"chem": {0: 90, 1: 75, 2: 80},
}
df = pd.DataFrame(d)
df
# ### 通过标量创建 DataFrame
df = pd.DataFrame(
1,
index=[1,2,3,4],
columns=list('abcde')
)
df
# ## 03 通过读取文件来创建dataframe
#
# 此外, Sereis 和 DataFrame 均可以通过读取 csv、excel等数据文件来创建,这部分内容后续介绍
# ## 04《图解Pandas》专题汇总
#
# 《图解Pandas》系列已发布的图文汇总如下:
#
# - [图文00-《图解Pandas》内容框架介绍](https://mp.weixin.qq.com/s/gh063BUAM90vFhy6ZLaznw)
# - [图文01-数据结构介绍](https://mp.weixin.qq.com/s/H9kJf9zJU7ys6esr0DBhHg)
#
# 考虑到《图解Pandas》系列内容在不断更新过程中,大家可以通过下面的专题来找到最新发布的内容。
#
# [](https://mp.weixin.qq.com/mp/appmsgalbum?__biz=MzI2NjY5NzI0NA==&action=getalbum&album_id=2293754972943122444#wechat_redirect)
#
# 同时考虑到,以后如果文章数量较多(比如超过50篇文章),可能在专题中也不好快速的找到所需要的内容,我会以文章汇总的形式,将《图解Pandas》系列的文章进行手动汇总,并形成 `图解Pandas汇总` 的专题,最新的汇总文章,可以点击下面专题,找到最新的文章即可。
#
# [](https://mp.weixin.qq.com/mp/appmsgalbum?__biz=MzI2NjY5NzI0NA==&action=getalbum&album_id=2293756873331933190#wechat_redirect)
| code/002-create-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# cd /home/liya
# +
# %matplotlib inline
from mujoco_py.mjviewer import MjViewer
from numpy.testing import assert_array_equal,assert_almost_equal
from mujoco_py import MjSim, load_model_from_xml, load_model_from_path, MjSimState, ignore_mujoco_warnings, load_model_from_mjb
import mujoco_py
import numpy as np
import cv2
from skimage.io import imsave, imshow
import matplotlib.pyplot as plt
def getCamParams():
angle=np.deg2rad(45.0)
f = 0.008 #m
fx=640/np.tan(angle)
fy=480/np.tan(angle)
return fx,fy
def imgFloat_To_Uint8(img):
pix_max=img.max()
pix_min=img.min()
scale=255.0/(pix_max-pix_min)
img=(img-pix_min)*scale
img=np.array(img,dtype=np.uint8)
return pix_min,scale,img
xml_path = "/mnt/liya2/mujoco200_linux/model/cloth.xml"
model = load_model_from_path(xml_path)
sim = MjSim(model)
viewer = MjViewer(sim)
sim.forward()
# while True:
# sim.step()
# image = viewer.render()
# print(np.size(image))
# cv2.imshow("depth image", image)
# cv2.imsave('/mnt/liya2/depth_image.png', image)
for i in range(1):
sim.step()
frame_size = (640, 480)
camera_name = None
curr_frame, depth_img = sim.render(width=frame_size[0], height=frame_size[1], depth=True, mode='offscreen', camera_name=camera_name, device_id=0)
# depth = (depth_img - np.min(depth_img))/ (np.max(depth_img) - np.min(depth_img))
# depth = np.array(depth * 255, dtype=np.uint8)
# print('depth = ', depth)
# print(np.shape(depth))
plt.figure("cloth")
plt.imshow(np.flipud(curr_frame))
plt.figure('depth')
plt.imshow(np.flipud(depth_img))
print(np.shape(curr_frame))
plt.show()
# curr_frame = sim.render(width=frame_size[0], height=frame_size[1],mode='window', camera_name=camera_name, device_id=0)
# -
| tools/mujoco_depth_image.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import sys
sys.path.insert(0, './core')
sys.path.insert(0, './models')
import DAELightTransferDataLoader as lightDL
import torchvision.utils as tutils
import matplotlib.pyplot as plt
from PIL import Image
from torchvision import transforms
data_root = './multipie_select_batches'
# +
training_data = []
training_data.append(data_root + '/session01_01_select')
training_data.append(data_root + '/session01_02_select')
training_data.append(data_root + '/session01_03_select')
training_data.append(data_root + '/session01_04_select')
training_data.append(data_root + '/session01_05_select')
training_data.append(data_root + '/session01_06_select')
training_data.append(data_root + '/session01_07_select')
transform = transforms.Compose([
#transforms.Resize(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
data_set = lightDL.FareMultipieLightingTripletsFrontal(None, root=training_data, transform = None, resize=64)
print(len(data_set))
data_loader = torch.utils.data.DataLoader(data_set, batch_size = 64, shuffle = True)
# -
data = iter(data_loader)
s1, img1, s2, img2 = next(data)
print(img1.shape)
grid = tutils.make_grid(img1, nrow=8, padding=2)
#ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
ndarr = grid.permute(1, 2, 0).cpu().numpy()
im = Image.fromarray(ndarr)
plt.imshow(im)
plt.show()
grid = tutils.make_grid(img2, nrow=8, padding=2)
#ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
ndarr = grid.permute(1, 2, 0).cpu().numpy()
im = Image.fromarray(ndarr)
plt.imshow(im)
plt.show()
| 2_lighting_model_in_latent_space_single_val/.ipynb_checkpoints/Test_data_loader-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Sampler statistics
#
# When checking for convergence or when debugging a badly behaving
# sampler, it is often helpful to take a closer look at what the
# sampler is doing. For this purpose some samplers export
# statistics for each generated sample.
# + deletable=true editable=true
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
import pandas as pd
import pymc3 as pm
# %matplotlib inline
# + [markdown] deletable=true editable=true
# As a minimal example we sample from a standard normal distribution:
# + deletable=true editable=true
model = pm.Model()
with model:
mu1 = pm.Normal("mu1", mu=0, sd=1, shape=10)
# + deletable=true editable=true
with model:
step = pm.NUTS()
trace = pm.sample(2000, tune=1000, init=None, step=step, cores=2)
# + [markdown] deletable=true editable=true
# NUTS provides the following statistics:
# + deletable=true editable=true
trace.stat_names
# + [markdown] deletable=true editable=true
# - `mean_tree_accept`: The mean acceptance probability for the tree that generated this sample. The mean of these values across all samples but the burn-in should be approximately `target_accept` (the default for this is 0.8).
# - `diverging`: Whether the trajectory for this sample diverged. If there are many diverging samples, this usually indicates that a region of the posterior has high curvature. Reparametrization can often help, but you can also try to increase `target_accept` to something like 0.9 or 0.95.
# - `energy`: The energy at the point in phase-space where the sample was accepted. This can be used to identify posteriors with problematically long tails. See below for an example.
# - `energy_error`: The difference in energy between the start and the end of the trajectory. For a perfect integrator this would always be zero.
# - `max_energy_error`: The maximum difference in energy along the whole trajectory.
# - `depth`: The depth of the tree that was used to generate this sample
# - `tree_size`: The number of leafs of the sampling tree, when the sample was accepted. This is usually a bit less than $2 ^ \text{depth}$. If the tree size is large, the sampler is using a lot of leapfrog steps to find the next sample. This can for example happen if there are strong correlations in the posterior, if the posterior has long tails, if there are regions of high curvature ("funnels"), or if the variance estimates in the mass matrix are inaccurate. Reparametrisation of the model or estimating the posterior variances from past samples might help.
# - `tune`: This is `True`, if step size adaptation was turned on when this sample was generated.
# - `step_size`: The step size used for this sample.
# - `step_size_bar`: The current best known step-size. After the tuning samples, the step size is set to this value. This should converge during tuning.
# - `model_logp`: The model log-likelihood for this sample.
# + [markdown] deletable=true editable=true
# If the name of the statistic does not clash with the name of one of the variables, we can use indexing to get the values. The values for the chains will be concatenated.
#
# We can see that the step sizes converged after the 1000 tuning samples for both chains to about the same value. The first 2000 values are from chain 1, the second 2000 from chain 2.
# + deletable=true editable=true
plt.plot(trace['step_size_bar'])
# + [markdown] deletable=true editable=true
# The `get_sampler_stats` method provides more control over which values should be returned, and it also works if the name of the statistic is the same as the name of one of the variables. We can use the `chains` option, to control values from which chain should be returned, or we can set `combine=False` to get the values for the individual chains:
# + deletable=true editable=true
sizes1, sizes2 = trace.get_sampler_stats('depth', combine=False)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True)
ax1.plot(sizes1)
ax2.plot(sizes2)
# + deletable=true editable=true
accept = trace.get_sampler_stats('mean_tree_accept', burn=1000)
sb.distplot(accept, kde=False)
# + deletable=true editable=true
accept.mean()
# + [markdown] deletable=true editable=true
# Find the index of all diverging transitions:
# + deletable=true editable=true
trace['diverging'].nonzero()
# + [markdown] deletable=true editable=true
# It is often useful to compare the overall distribution of the
# energy levels with the change of energy between successive samples.
# Ideally, they should be very similar:
# + deletable=true editable=true
energy = trace['energy']
energy_diff = np.diff(energy)
sb.distplot(energy - energy.mean(), label='energy')
sb.distplot(energy_diff, label='energy diff')
plt.legend()
# + [markdown] deletable=true editable=true
# If the overall distribution of energy levels has longer tails, the efficiency of the sampler will deteriorate quickly.
# + [markdown] deletable=true editable=true
# ## Multiple samplers
#
# If multiple samplers are used for the same model (e.g. for continuous and discrete variables), the exported values are merged or stacked along a new axis.
#
# Note that for the `model_logp` sampler statistic, only the last column (i.e. `trace.get_sampler_stat('model_logp')[-1]`) will be the overall model logp.
# + deletable=true editable=true
model = pm.Model()
with model:
mu1 = pm.Bernoulli("mu1", p=0.8)
mu2 = pm.Normal("mu2", mu=0, sd=1, shape=10)
# + deletable=true editable=true
with model:
step1 = pm.BinaryMetropolis([mu1])
step2 = pm.Metropolis([mu2])
trace = pm.sample(10000, init=None, step=[step1, step2], cores=2, tune=1000)
# + deletable=true editable=true
trace.stat_names
# + [markdown] deletable=true editable=true
# Both samplers export `accept`, so we get one acceptance probability for each sampler:
# + deletable=true editable=true
trace.get_sampler_stats('accept')
| docs/source/notebooks/sampler-stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="yBKNmdwgv7f8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34.0} outputId="1fd85232-cb70-45c7-ab2b-7f2a99510154"
import tensorflow as tf
# Get the GPU device name.
device_name = tf.test.gpu_device_name()
# The device name should look like the following:
if device_name == '/device:GPU:0':
print('Found GPU at: {}'.format(device_name))
else:
raise SystemError('GPU device not found')
# + id="axqYi9TDwDPc" colab_type="code" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200.0, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 69.0} outputId="a9a47ae2-60c7-477d-b21e-0114041a1616"
from google.colab import files
uploaded = files.upload()
# + id="55u9GIMiw2jO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68.0} outputId="ff8a69a3-05a1-4b02-c238-7623070429fe"
import torch
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
print(torch.cuda.get_device_name())
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# + id="YrGX0y8gw4-5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 360.0} outputId="35575b29-e79d-4bd1-b4c1-8dbcba4d66dd"
# !pip install transformers
# + id="m3zJCQtww7o7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 427.0} outputId="a525bec0-e3f0-4e46-a3fa-f08521d599bb"
import pandas as pd
# Load the dataset into a pandas dataframe.
df = pd.read_csv("./reddit_comments_race_black_processed.csv", encoding='latin-1')
# Report the number of sentences.
print('Number of training sentences: {:,}\n'.format(df.shape[0]))
# Display 10 random rows from the data.
df = df.dropna()
print('Number of training sentences: {:,}\n'.format(df.shape[0]))
df.sample(10)
# + id="zzWNm6khw-bc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224.0} outputId="e77916f3-7743-47cb-c816-e6e4a38dda69"
sentences = df.comments_processed.values
# df['Code'] = df['Code'].astype('category').cat.codes
# labels = df.code_label.values
# labels = df['class'].values
print(sentences.dtype)
# print(labels.dtype)
print(sentences[:10])
# print(labels[:10])
# + id="laZ4d8z3xQxn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34.0} outputId="909a40bd-71fe-4051-8206-019cdcb0df62"
from transformers import BertTokenizer
# Load the BERT tokenizer.
print('Loading BERT tokenizer...')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# + id="-1Sx1OacxTxV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71.0} outputId="7379af07-41a1-4438-f5c7-eb681cbc0484"
input_ids = []
# For every sentence...
for sent in sentences:
# `encode` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
if len(sent.split()) > 200:
# sent = sent[:512]
print('in')
sent = ' '.join(sent.split()[:100])
encoded_sent = tokenizer.encode(
sent, # Sentence to encode.
add_special_tokens = True)
#print(tokenizer.tokenize(sent))
# Add the encoded sentence to the list.
#print(encoded_sent)
input_ids.append(encoded_sent)
# Print sentence 0, now as a list of IDs.
print('Original: ', sentences[0])
print('Token IDs:', input_ids[0])
#print(tokenizer.decode(input_ids=[12731, 3372]))
# + id="xJfsOnohxWhD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68.0} outputId="40ea6908-f76b-4d7f-be10-9dd840af7c77"
print('Max sentence length: ', max([len(sen) for sen in input_ids]))
print(len(input_ids[0]))
long_sen = 0
for sen in input_ids:
if len(sen) > 64:
long_sen += 1
print(long_sen)
# + id="gbzvsZb6xZDM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102.0} outputId="33ba0cea-0b21-48d7-ff52-0c2743740aa0"
from keras.preprocessing.sequence import pad_sequences
# Set the maximum sequence length.
MAX_LEN = 64
print('\nPadding/truncating all sentences to %d values...' % MAX_LEN)
print('\nPadding token: "{:}", ID: {:}'.format(tokenizer.pad_token, tokenizer.pad_token_id))
# Pad our input tokens with value 0.
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long",
value=0, truncating="post", padding="post")
print('\Done.')
# + id="xc9QzbNTxbfs" colab_type="code" colab={}
# Create attention masks
attention_masks = []
# For each sentence...
for sent in input_ids:
# Create the attention mask.
# - If a token ID is 0, then it's padding, set the mask to 0.
# - If a token ID is > 0, then it's a real token, set the mask to 1.
att_mask = [int(token_id > 0) for token_id in sent]
# Store the attention mask for this sentence.
attention_masks.append(att_mask)
# + id="W34-o2suxlwS" colab_type="code" colab={}
test_inputs = torch.tensor(input_ids)
test_masks = torch.tensor(attention_masks)
# + id="iO3KJzoKx0Mn" colab_type="code" colab={}
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
batch_size = 32
test_data = TensorDataset(test_inputs, test_masks)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
# + id="QjpfhHp7x6yg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000.0} outputId="cc257861-6842-47b2-d2b3-830950d37893"
model = torch.load('./bert_davidson.pt')
model.cuda()
# + id="6TTY3m2gyRge" colab_type="code" colab={}
import time
import datetime
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
# + id="3K_p6wYMyUdr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51.0} outputId="cfff7017-5586-4389-b096-3fec60a23dfd"
import random
import numpy as np
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
# ========================================
# Test
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
# Tracking variables
# Evaluate data for one epoch
pred_labels_test = []
for batch in test_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask = batch
# Telling the model not to compute or store gradients, saving memory and
# speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions.
# This will return the logits rather than the loss because we have
# not provided labels.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
logits = outputs[0]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
pred_flat = np.argmax(logits, axis=1).flatten()
# Calculate the accuracy for this batch of test sentences.
pred_labels_test.append(pred_flat)
# + id="zncD-mxRUoBl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000.0} outputId="e01c62e6-20eb-441d-b298-e928be3617ef"
print(len(pred_labels_test))
print(pred_labels_test)
# + id="UwGYkmPWUwXM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71.0} outputId="89fe52d9-b684-4274-9076-d11e2d955c89"
preds = []
for l in pred_labels_test:
l = l.tolist()
preds.append(l)
preds_flat = [item for sublist in preds for item in sublist]
print(preds_flat)
print(len(preds_flat))
| Evaluation/BertOnTestSet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
# # Reflect Tables into SQLAlchemy ORM
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
conn = engine.connect()
# +
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect =True)
# -
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
session.close()
# create inspecter and connect to engine
inspector = inspect(engine)
# # Exploratory Climate Analysis
# identify table columns and their corresponding data type
columns = inspector.get_columns('measurement')
for column in columns:
print(column["name"], column["type"])
# +
# my trip dates
start_trip = '2018-08-01'
end_trip = '2018-08-10'
# latest date
last = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
# Calculate the date 1 year ago from the last data point in the database
last_year = dt.datetime.strptime(last[0], '%Y-%m-%d')
previous = dt.date(last_year.year, last_year.month, last_year.day) - dt.timedelta(days=365)
print(f"Latest Date: {last[0]}")
print(f"Date 1 year from the last data point: {previous}")
# -
# Design a query to retrieve the last 12 months of precipitation data and plot the results
first_row = session.query(Measurement).first()
first_row.__dict__
# number of stations available in dataset
engine.execute('SELECT COUNT(*) FROM station').fetchall()
# get Min and Max dates from measurment table
engine.execute('SELECT MIN(date), MAX(date) FROM measurement').fetchall()
engine.execute("SELECT DISTINCT(s.name)FROM station s INNER JOIN measurement m ON s.station = m.station WHERE Date(m.date) >='2016-08-23' AND Date(m.date) <='2017-08-23'").fetchall()
# April 17 to May 1 2017 table
search_string = f"SELECT s.station, s.name, s.latitude, s.longitude, m.date, m.prcp, m.tobs FROM station s INNER JOIN measurement m ON s.station = m.station WHERE Date(date) >='2017-04-17' AND Date(date) <='2017-05-01' and s.name LIKE 'KANEOHE%'"
engine.execute(search_string).fetchall()
# +
last_year = f"SELECT m.date, m.prcp FROM station s INNER JOIN measurement m ON s.station = m.station WHERE Date(m.date) >='2016-08-23' AND Date(m.date) <='2017-08-23' and s.name LIKE 'KANEOHE%'"
last_year_df = pd.DataFrame(engine.execute(last_year).fetchall())
last_year_df = last_year_df.rename(columns={0:"Date", 1:"Prcp"})
last_year_df.set_index("Date")
last_year_df= pd.DataFrame(last_year_df)
last_year_df
# +
# create empty list
date = []
prcp = []
# loop through data table and append date/prcp
for dt in last_year_df['Date']:
date.append(dt)
for prec in last_year_df['Prcp']:
prcp.append(prec)
# +
# Use Pandas Plotting with Matplotlib to plot the data
x = date
y = prcp
# Set x axis and tick locations
x_axis = np.arange(len(date))
tick_locations = [value+0.4 for value in x_axis]
# Set Figuresize/ chart type/ ticks/ limits/labels/legend/ save a picture and show
plt.figure(figsize=(12,8))
plt.bar(x, y,width=5, color='blue',alpha=0.5, align="center", label='precipitation')
ticks = np.arange(0,450,45)
plt.xticks(ticks, rotation=90)
plt.xlim("2016-08-23","2017-08-23")
plt.ylim(0,7)
plt.xlabel("Date")
plt.ylabel("Inches")
plt.grid(which='major', axis='both', linestyle='-')
plt.legend()
plt.savefig("Images/precipitation.png")
plt.show()
# -
# identify table columns and their corresponding data type
inspector = inspect(engine)
columns = inspector.get_columns('Station')
for c in columns:
print(c['name'], c["type"])
# Design a query to show how many stations are available in this dataset
total = session.query(Station.station).count()
total
# +
# Find most active stations in descending order
sel = [Measurement.station, func.count(Measurement.station)]
station_descending = session.query(*sel)\
.group_by(Measurement.station)\
.order_by(func.count(Measurement.station).desc()).all()
station_descending
# +
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
data = session.query(func.min(Measurement.tobs),
func.max(Measurement.tobs),
func.avg(Measurement.tobs))\
.filter(Measurement.station == station_descending[0][0]).all()
data
# +
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
s = [Measurement.date, Measurement.tobs]
query_t = session.query(*s)\
.filter(Measurement.date >= previous)\
.filter(Measurement.station == station_descending[0][0]).all()
# sort data frame with date as index
sorted_df = pd.DataFrame(query_t, columns=['Date','Temperature'])
sorted_df = sorted_df.dropna(how='any')
sorted_df = sorted_df.sort_values(["Date"], ascending=True)
sorted_df = sorted_df.set_index("Date")
sorted_df
# +
# Plot the results as a histogram
x = sorted_df["Temperature"].tolist()
# Size/histogram/labels/legend/save as picture/ title
plt.figure(figsize=(12,8))
plt.hist(x, bins=12, label="TOBS")
plt.xlabel("Temperature in Fahrenheit")
plt.ylabel("Frequency")
plt.legend()
plt.title(f"Station {station_descending[0][0]}'s Temperature Observations from {previous} to {last[0]}")
plt.savefig("Images/histogram.png")
plt.show()
# -
| climate_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Arvato Customer segmentation and Classification
# In this notebook we will work on the following task:
# - Dimensionality Reduction
# - Customer Segmentation
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.graph_objects as go
from plotly.offline import init_notebook_mode, iplot
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingRegressor, AdaBoostRegressor
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier
import xgboost as xgb
import time
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
df_azdias = pd.read_csv('../input/arvato-cleaned/Azdias_cleaned.csv')
df_customers = pd.read_csv('../input/arvato-cleaned/Customers_cleaned.csv')
# Feature Scaling
# - First, we will scale the data
scaler = StandardScaler()
scaler.fit(df_azdias)
df_azdias = pd.DataFrame(scaler.transform(df_azdias), columns = df_azdias.columns)
df_customers = pd.DataFrame(scaler.transform(df_customers), columns = df_customers.columns)
# Customer Segmentation Report
# - We will use unsupervised machine learning to create clusters
# - We will also investigate feature importance.
# - We will also do dimensionality reduction
# ## Dimenionality Reduction
randome_seed = 22
# We will implement PCA
def pca_model(data,n_components=None):
pca = PCA(n_components, random_state=22)
pca.fit(data)
data_transformed = pca.transform(data)
return pca, data_transformed
pca_azdias, _ = pca_model(df_azdias, None)
pca_customers,_ = pca_model(df_customers,None)
def plot_pca_variance(pca_data, cumulative=True, figsize=(8,10)):
if cumulative:
variance = np.cumsum(pca_data.explained_variance_ratio_)
y_label = "Percentage Explained Variance"
else:
variance = pca_data.explained_variance_ratio_
y_label = "Explained Variance Ratio"
x_label="No. of components"
x = [x for x in range(0,350,50)]
y = variance
trace1 = go.Scatter(y=y,marker=dict(color='#ffdc51'),name='')
layout = go.Layout(title=""
,xaxis=dict(title=x_label),
yaxis=dict(title=y_label))
fig = go.Figure(data=[trace1],layout=layout)
iplot(fig)
plot_pca_variance(pca_azdias)
plot_pca_variance(pca_customers)
# As we can see more than 90% of variance is explained by 200 features. We
# So we will reduce the number of dimensions and do a k-means clustering to get customer Segmentation
pca_azdias_200, azdias_pca_200 = pca_model(df_azdias, n_components=200)
pca_cust_200, cust_pca_200 = pca_model(df_customers, n_components=200)
# ## K-means Clustering
def perform_kmeans(data, K_start, K_end, step=1):
scores = []
print("Performing K-Means clustering")
print("Given range min:{}, max:{}, step:{}".format(K_start, K_end, step))
for n in range(K_start, K_end+1, step):
print("\nTraining for n_clusters: ", n)
start = time.time()
kmeans = KMeans(n, random_state=22)
model = kmeans.fit(data)
scores.append(abs(model.score(data)))
print("Done! Score: ", scores[-1])
print("Time elapsed: {:.2f} sec.".format(time.time()-start))
return scores, range(K_start, K_end+1, step)
# %%time
azdias_scores, azdias_range_ = perform_kmeans(azdias_pca_200, 1, 20, 1)
def plot_elbow(scores, range_):
y_label = "Sum of squared distances"
x_label="No. of Clusters"
x = [x for x in range_]
trace1 = go.Scatter(x=x,y=scores,mode='lines+markers',marker=dict(color='#ffdc51'),name='hj')
layout = go.Layout(title=""
,xaxis=dict(title=x_label),
yaxis=dict(title=y_label)
,legend=dict(x=0.1, y=1.1, orientation="h"))
fig = go.Figure(data=[trace1],layout=layout)
iplot(fig)
plt.show()
plot_elbow(azdias_scores, azdias_range_)
# %%time
cust_scores, cust_range_ = perform_kmeans(cust_pca_200, 1, 20, 1)
plot_elbow(cust_scores, cust_range_)
# The basic idea behind clustering algorithms is to select the number of clusters so as to minimize the intra-cluster variance. Although there is no definite way of selecting the number of clusters, there are some direct methods and statistical methods for selecting the number of clusters. In this process the elbow method is chosen to select the optimal number of clusters.
# - We will use 8 clusters after looking at the elbow plot
# We will use kmeans with 8 clusters on customers and azdias
kmeans_azdias = KMeans(8, random_state=22)
kmeans_azdias.fit(azdias_pca_200)
azdias_clusters = kmeans_azdias.predict(azdias_pca_200)
kmeans_cust = KMeans(8, random_state=22)
kmeans_cust.fit(cust_pca_200)
customer_clusters= kmeans_cust.predict(cust_pca_200)
print(azdias_clusters[:10], "\n",customer_clusters[:10])
customer_clusters = pd.Series(customer_clusters)
azdias_clusters = pd.Series(azdias_clusters)
customer_clusters.value_counts().sort_index()
# Let's see number of people in each cluster
cluster_info = pd.DataFrame([])
cluster_info["Population"] = azdias_clusters.value_counts().sort_index()
cluster_info["Customers"] = customer_clusters.value_counts().sort_index()
cluster_info.reset_index(inplace=True)
cluster_info.rename(columns={"index":"Cluster"}, inplace=True)
cluster_info
# +
trace1 = go.Bar(x=cluster_info["Cluster"],y=cluster_info["Population"],marker=dict(color='#ffdc51'),name='Azdias')
trace2 = go.Bar(x=cluster_info["Cluster"],y=cluster_info["Customers"],marker=dict(color='#9932CC'),name='Customer')
layout = go.Layout(title="Number of people in each cluster"
,legend=dict(x=0.1, y=1.1)
,xaxis=dict(title="Cluster"),
yaxis=dict(title="Population"))
fig = go.Figure(data=[trace1,trace2],layout=layout)
iplot(fig)
# -
# We will now see the proportion of population in each cluster
cluster_info["Pop_proportion"] = (cluster_info["Population"]/cluster_info["Population"].sum()*100).round(2)
cluster_info["Cust_proportion"] = (cluster_info["Customers"]/cluster_info["Customers"].sum()*100).round(2)
cluster_info["Cust_over_Pop"] = cluster_info["Cust_proportion"] / cluster_info["Pop_proportion"]
trace1 = go.Bar(x=cluster_info["Cluster"],y=cluster_info["Cust_over_Pop"],marker=dict(color='#ffdc51'),name='Average price of airbnb rental neighbourhood')
layout = go.Layout(title="Proportion of population in each cluster"
,legend=dict(x=0.1, y=1.1)
,xaxis=dict(title="Cluster"),
yaxis=dict(title="Proportion"))
fig = go.Figure(data=[trace1],layout=layout)
iplot(fig)
# The ratio > 1 shows that the customer base is more in that cluster, and there is a scope of the population in these cluster to be future customers.
| arvato-customer-segmentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib widget
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image
from scipy import sparse
import cv2
from pymatreader import read_mat
# from extract_graph import dic_to_sparse
from util import get_path, shift_skeleton
from plotutil import show_im,overlap, show_im_rgb, plot_nodes, plot_nodes_from_list,plot_t_tp1
from extract_graph import generate_graph_tab_from_skeleton,generate_nx_graph_from_skeleton,generate_skeleton,clean
import networkx as nx
from node_id import second_identification, whole_movement_identification,first_identification,relabel_nodes, clean_nodes, orient
from extract_graph import dic_to_sparse, from_sparse_to_graph, generate_nx_graph, prune_graph, from_nx_to_tab, from_nx_to_tab_matlab,sparse_to_doc, connections_pixel_list_to_tab, transform_list
from sparse_util import dilate, zhangSuen
from realign import realign, reconnect
from util import get_path
import pandas as pd
from datetime import datetime,timedelta
import ast
from time import time
import os
from random import choice
from pycpd import RigidRegistration, DeformableRegistration
from cycpd import rigid_registration
# +
plt.close('all')
X = []
dimx = 1000
dimy = 1000
for i in range(dimx):
X.append((i,0))
X.append((i,dimy))
for i in range(dimy):
X.append((dimx,i))
X.append((0,i))
X=np.transpose(np.array(X))
angle = np.random.uniform(0,np.pi/2)
R=np.array([[np.cos(angle),-np.sin(angle)],[np.sin(angle),np.cos(angle)]])
t = np.random.uniform(0,1000,(2,1))
print(R,t)
np.random.shuffle(np.transpose(X))
Y=np.transpose(np.transpose(np.dot(R,X))+np.transpose(t))+np.random.uniform(0,dimx/10,(2,4*dimx))
np.random.shuffle(np.transpose(Y))
Y = Y[:,:dimx//4]
X = X[:,:dimx//8]
fig=plt.figure(figsize=(10,9))
ax = fig.add_subplot(111)
ax.scatter(X[0,:],X[1,:])
ax.scatter(Y[0,:],Y[1,:])
X.shape,Y.shape
# + jupyter={"outputs_hidden": true}
reg = rigid_registration(**{'X': np.transpose(X.astype(float)), 'Y': np.transpose(Y.astype(float)),'scale': False})
out = reg.register()
Rfound = reg.R[0:2,0:2]
tfound= np.dot(Rfound,reg.t[0:2])
# -
Rfound,tfound,R,t
fig=plt.figure(figsize=(10,9))
ax = fig.add_subplot(111)
Yrep=np.transpose(np.transpose(np.dot(Rfound,np.transpose(np.transpose(X))))-tfound)
ax.scatter(np.transpose(Y)[:,0],np.transpose(Y)[:,1])
ax.scatter(np.transpose(Yrep)[:,0],np.transpose(Yrep)[:,1])
reg = RigidRegistration(**{'X': np.transpose(X), 'Y': np.transpose(Y)})
out = reg.register()
Rfound = reg.R
Yrep=np.dot(Rfound,np.transpose(np.transpose(X)))
reg = RigidRegistration(**{'X': np.transpose(Yrep), 'Y': np.transpose(Y)})
out = reg.register()
tfound= reg.t
def try_dim(dim):
X = []
dimx = dim
dimy = dim
for i in range(dimx):
X.append((i,0))
X.append((i,dimy))
for i in range(dimy):
X.append((dimx,i))
X.append((0,i))
X=np.transpose(np.array(X))
angle = np.random.uniform(0,np.pi/2)
R=np.array([[np.cos(angle),-np.sin(angle)],[np.sin(angle),np.cos(angle)]])
t = np.random.uniform(0,100,(2,1))
print(R,t)
Y=np.transpose(np.transpose(np.dot(R,X))+np.transpose(t))+np.random.uniform(0,5,(2,4*dimx))
np.random.shuffle(np.transpose(Y))
reg = RigidRegistration(**{'X': np.transpose(X), 'Y': np.transpose(Y)})
out = reg.register()
Rfound = reg.R
Yrep=np.dot(Rfound,np.transpose(np.transpose(X)))
reg = RigidRegistration(**{'X': np.transpose(Yrep), 'Y': np.transpose(Y)})
out = reg.register()
tfound= reg.t
import cProfile
cProfile.run('try_dim(2500)')
plate = 27
directory='//sun.amolf.nl/shimizu-data/home-folder/oyartegalvez/Drive_AMFtopology/PRINCE'
listdir=os.listdir(directory)
list_dir_interest=[name for name in listdir if name.split('_')[-1]==f'Plate{0 if plate<10 else ""}{plate}']
ss=[name.split('_')[0] for name in list_dir_interest]
ff=[name.split('_')[1] for name in list_dir_interest]
dates_datetime=[datetime(year=int(ss[i][:4]),month=int(ss[i][4:6]),day=int(ss[i][6:8]),hour=int(ff[i][0:2]),minute=int(ff[i][2:4])) for i in range(len(list_dir_interest))]
dates_datetime.sort()
dates_datetime_chosen=dates_datetime[1:6]
dates = [f'{0 if date.month<10 else ""}{date.month}{0 if date.day<10 else ""}{date.day}_{0 if date.hour<10 else ""}{date.hour}{0 if date.minute<10 else ""}{date.minute}' for date in dates_datetime_chosen]
mat_skels=[read_mat(get_path(date,plate,True))['skel'] for date in dates]
dic_skels=[dic_to_sparse(mat_skel) for mat_skel in mat_skels]
skeleton_docs=[sparse_to_doc(sparse_skel) for sparse_skel in dic_skels]
dates_datetime_chosen
non_zeros= skeleton_docs[1].keys()
# +
def get_neighbours(pixel,non_zero_pixel):
x=pixel[0]
y=pixel[1]
primary_neighbours = {(x+1,y),(x-1,y),(x,y+1),(x,y-1)}
secondary_neighbours = {(x+1,y-1),(x+1,y+1),(x-1,y+1),(x-1,y-1)}
num_neighbours = 0
actual_neighbours = []
for neighbour in primary_neighbours:
if neighbour in non_zero_pixel:
num_neighbours +=1
xp=neighbour[0]
yp=neighbour[1]
primary_neighboursp = {(xp+1,yp),(xp-1,yp),(xp,yp+1),(xp,yp-1)}
for neighbourp in primary_neighboursp:
secondary_neighbours.discard(neighbourp)
actual_neighbours.append(neighbour)
for neighbour in secondary_neighbours:
if neighbour in non_zero_pixel:
num_neighbours +=1
actual_neighbours.append(neighbour)
return(actual_neighbours,num_neighbours)
def get_degree3_nodes(skel):
deg_3=[]
non_zero= skel.keys()
for pixel in non_zero:
n, num = get_neighbours(pixel,non_zero)
if num ==3:
deg_3.append(pixel)
return(deg_3)
# -
node_center = choice(deg_3)
node_center
deg_3 = get_degree3_nodes(skeleton_docs[1])
plt.close('all')
fig=plt.figure(figsize=(10,9))
ax = fig.add_subplot(111)
window = 1000
ax.imshow(dic_skels[1][node_center[0]-window:node_center[0]+window,node_center[1]-window:node_center[1]+window].todense())
# +
def find_common_group_nodes(Sa,Sb,degree3_nodesa,degree3_nodesb,posa,posb,R0,t0,window=500,maxdist=50):
common_nodes_a = []
common_nodes_b = []
common_centroida = []
common_centroidb = []
t=time()
posarottrans = {key : np.round(np.transpose(np.dot(R0,np.transpose(np.array(posa[key])))+t0)).astype(np.int) for key in degree3_nodesa}
print("rotating translating",time()-t)
for node in degree3_nodesa:
t=time()
posanchor=posarottrans[node]
potential_surroundinga=Sa[posanchor[0]-2*window:posanchor[0]+2*window,posanchor[1]-2*window:posanchor[1]+2*window]
potential_surroundingb=Sb[posanchor[0]-2*window:posanchor[0]+2*window,posanchor[1]-2*window:posanchor[1]+2*window]
# print("candidates",len(potential_surroundinga.data))
# print("finding_potential_surrounding",time()-t)
t=time()
surrounding_nodesa=[node for node in potential_surroundinga.data if
(posanchor[0]-window<posarottrans[int(node)][0]<posanchor[0]+window and posanchor[1]-window<posarottrans[int(node)][1]<posanchor[1]+window
)]
surrounding_nodesb=[node for node in potential_surroundingb.data if
(posanchor[0]-window<posb[int(node)][0]<posanchor[0]+window and posanchor[1]-window<posb[int(node)][1]<posanchor[1]+window
)]
# print("finding_surrounding",time()-t)
t=time()
if len(surrounding_nodesa)==len(surrounding_nodesb):
possurroundinga=[posarottrans[node] for node in surrounding_nodesa]
possurroundingb=[posb[node] for node in surrounding_nodesb]
centroida= np.mean(possurroundinga,axis=0)
centroidb= np.mean(possurroundingb,axis=0)
if np.linalg.norm(centroida-centroidb)<=maxdist:
common_centroida.append(centroida)
common_centroidb.append(centroidb)
return(common_centroida,common_centroidb)
def realign2(skeleton1,skeleton2,convergence_threshold,window=500,maxdist=50,save=''):
converged=False
tim=time()
nx_graphA,posA=generate_nx_graph_from_skeleton(skeleton1)
nx_graphB,posB=generate_nx_graph_from_skeleton(skeleton2)
print("generate_nx_graph_from_skeleton, t=",tim-time())
tim=time()
t0=np.array([0,0])
R0=np.identity(2)
degree3_nodesa = [node for node in nx_graphA if nx_graphA.degree(node)==3]
degree3_nodesb = [node for node in nx_graphB if nx_graphB.degree(node)==3]
print("lennodes=",len(degree3_nodesa))
Sa=sparse.csr_matrix((22000, 46000))
Sb=sparse.csr_matrix((22000, 46000))
for node in degree3_nodesa:
Sa[posA[node][0],posA[node][1]]=node
for node in degree3_nodesb:
Sb[posB[node][0],posB[node][1]]=node
while not converged:
listeA,listeB = find_common_group_nodes(Sa,Sb,degree3_nodesa,degree3_nodesb,posA,posB,R0,t0,maxdist=maxdist,window=window)
H=np.dot(np.transpose(np.array(listeA)-np.mean(listeA,axis=0)),np.array(listeB)-np.mean(listeB,axis=0))
U,S,V=np.linalg.svd(H)
R=np.dot(V,np.transpose(U))
t=np.mean(listeB,axis=0)-np.dot(R,np.mean(listeA,axis=0))
print("number_common_nodes_found :",len(listeA))
if np.linalg.norm(t)<=convergence_threshold:
converged=True
R0=np.dot(R,R0)
t0=t+t0
print("Find R and T, t=",tim-time())
tim=time()
skeleton_transformed=transform_skeleton(skeleton1,R0,t0)
skeleton_transformed=dilate(skeleton_transformed)
skeleton_transformed=zhangSuen(skeleton_transformed)
print("transform, dilate and thin, t=",tim-time())
tim=time()
if len(save)>=0:
from_nx_to_tab(*generate_nx_graph_from_skeleton(skeleton_transformed)).to_csv(save+'_raw_aligned_skeleton.csv')
np.savetxt(save+'rot.txt',R0)
np.savetxt(save+'trans.txt',t0)
print("R0=",R0,'t0=',t0)
return(skeleton_transformed)
# -
def transform_skeleton(skeleton_doc,Rot,trans):
transformed_skeleton={}
transformed_keys = np.round(np.transpose(np.dot(Rot,np.transpose(np.array(list(skeleton_doc.keys())))))+trans).astype(np.int)
i=0
for pixel in list(transformed_keys):
i+=1
transformed_skeleton[(pixel[0],pixel[1])]=1
return(transformed_skeleton)
S=sparse.csr_matrix((22000, 46000))
S[12000:14000,12000:14000]=1
S.data
import cProfile
cProfile.run('S[12000:14000,12000:14000]')
r=realign2(skeleton_docs[4],skeleton_docs[3],2,save='Data/',maxdist=70)
r0=realign(skeleton_docs[4],nx_graph_pivot,pos_pivot,2,save='Data/',maxdist=150)
r1=realign(skeleton_docs[4],nx_graph_pivot,pos_pivot,2,save='Data/',maxdist=50)
def make_sparse(dico):
dim=(20800, 46000)
skel = sparse.dok_matrix(dim, dtype=bool)
for key in dico.keys():
skel[key]=dico[key]
return(skel)
skeletons=[make_sparse(skeleton_docs[0]),make_sparse(r0),make_sparse(r1)]
from plotutil import plot_t_tp1, compress_skeleton
factor = 5
final_pictures = [compress_skeleton(skeletons[i],factor)>=1 for i in range(len(skeletons))]
plot_t_tp1([],[],None,None,final_pictures[0],final_pictures[2],compress=5,)
plt.close('all')
r=realign(skeleton_docs[4],nx_graph_pivot,pos_pivot,2,save='Data/',maxdist=30)
r=realign(skeleton_docs[4],nx_graph_pivot,pos_pivot,2,save='Data/',maxdist=40)
| amftrack/notebooks/development/accelerate_realign.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy
import sys
sys.path.append("..")
import physics
from astropy.io import fits
from scipy.interpolate import interp1d
import math
import scipy.constants as p
import pandas as pd
import matplotlib.pyplot as plt
import timeit
# %precision %.4g
# <h1>Thermalization</h1>
#
#
#
# <p>
# This is perhaps the easiest process to model, and is where all electrons eventually end up once beneath the first inductive step corresponding to the lowest excitation energy of hydrogen.
#
# The preliminary source for cross sections is this paper: [https://arxiv.org/pdf/0910.4410.pdf]
# </p>
#
# <p> $ \sigma_{ee}=\frac{40\pi e^2}{E^2}ln(\Lambda)(\frac{0.05}{f})$
# As long as $f<0.05$, the results are the same within the error; $ln(\Lambda) \sim 10$. </p>
#
#
# <p> There is no output spectra; all output is stored as heat.</p>
#
#
def thermalize_cs(T, f=0.05, lnV=10):
'''
Calculates the heating cross section (xsec) at a particular kinetic energy.
Parameters
----------
T : float, ndarray
The electron's initial kinetic energy.
f : float
The fraction of energy lost in each interaction.
lnV : float
The Coulomb logarithm [ln(Lambda)]
Returns
----------
float, ndarray (same as T)
The cross section for heating at energy T
(given in cm^2).
See Also
--------
heating_dE : Preferred; finds dE/dt
'''
sigma_ee=(7.82*10**(-11))*(0.05/f)*lnV*(T)**(-2)
return sigma_ee
def heating_dE(T, x_e, rs, nH=physics.nH):
'''
Calculates the heating loss rate for electrons at a particular
kinetic energy given some ionization fraction and redshift.
Parameters
----------
T : float, ndarray
The electron's initial kinetic energy
x_e : float
The ionization fraction for Hydrogen.
rs : float
The redshift (1+z) during heating; used for n_e.
nH : float
Hydrogen density from physics.py
Returns
----------
float, ndarray (same as T)
The energy loss rate from heating (negative).
'''
lnV=10
n_e = x_e*nH*rs**3 #cm^-3
# from x_e=n_e/n_h/rs^3
e_charge=4.80326*10**-10 #esu units
mv=((T**2+2*T*physics.me)**0.5*physics.me/(T+physics.me))
numfac=(10**-14*physics.ele**-2*physics.c)
dE_dt = numfac*(-4*math.pi*(e_charge)**4*n_e*lnV)/mv
return dE_dt
#Examples and testing for heating_dE and thermalize
print('Heating test 1 return:')
heating_dE(100, 0.9, 1000)
# +
#Log plot of ionization cross section from thermalize_cs
x=numpy.logspace(1,3)
plt.plot(x,thermalize_cs(x,1), label="heating")
plt.xlabel('Energy (eV)')
plt.ylabel('Cross Section ($cm^2$)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# -
# <h1>Ionization</h1>
#
# <p>
# Ionization produces an electron spectrum, with one primary losing energy and generating a free secondary. The energy of the primary decreases by the ionization energy and the energy spectra of the secondary. The percent of hydrogen ionized is a stored parameter and main focus of the code.
#
# The preliminary source for cross sections is this paper:
#
# [https://journals.aps.org/pra/pdf/10.1103/PhysRevA.50.3954]
# </p>
#
# <p>
# $ \sigma_i(t)=\frac{S}{t+u+1}[D(t)ln(t)+(2-\frac{N_i}{N})(\frac{t-1}{t}-\frac{ln(t)}{t+1})] $
# </p>
# <p>
# with $D(t)$ given as
# $ D(t)\equiv \int_{0}^{(t-1)/2} \frac{1}{w+1}\frac{df(w)}{dw} dw$
#
# and $t=T/B$, $w=W/B$, $u=U/B$, $Q=\frac{2BM_i^2}{NR}$, and $S=4\pi a_0^2 N(R/B)^2$.
#
# The parameters needed for each ionization are then
# $ f(w), N_i, N, B, S, U,... $ </p>
#
# <p>Secondary Spectra:
# These spectra are found from the singly differential cross section as above (see [http://physics.nist.gov/PhysRefData/Ionization/intro.html])
# </p>
def ionize_cs(Energy, atoms):
'''
Calculates the ionization cross section (xsec) for electrons
impacting one of (H, He, He+) at a particular kinetic energy.
Parameters
----------
Energy : ndarray
Each electron's initial kinetic energy.
atoms : ndarray (same size as Energy)
Indicates a xsec corresponding to each
element of Energy (1=H, 2=He, 3=He+)
Returns
----------
ndarray
The cross section for ionization for
each pair (Energy[n],atoms[n])
(given in cm^2).
See Also
--------
ionize_s_cs : Often preferred; gives singly differential xsec
'''
#initialize return variable
sigma = numpy.zeros(len(atoms))
for n in range(len(Energy)):
T=Energy[n]
atom=atoms[n]
if atom==1: #H
B=13.6057 #eV: binding energy
U=13.6057 #eV:
t=T/B
D= (2834163/10+2*(-4536259-10736505*t - 7512905*t**(2) + 112365*t**(3))/(5*(1+t)**(5)))/1000000
N=1 # number of bound electrons in subshell
N_i= 0.4343 #integral of df/dw from 0 to infinity
elif atom==2: #He
B=24.59 #eV
U=39.51 #eV
t=T/B
D= 1/2*(53047/60-(4*(-58971+227814*t-78435*t**2+121780*t**3))/(15*(1+t)**6))/1000
N=2
N_i=1.605
elif atom==3: #He+
B=13.6057*4 #eV: scaled by Z^2
U=13.6057*4 #eV: scaled by Z^2
t=T/B
D= (2834163/10+2*(-4536259-10736505*t - 7512905*t**(2) + 112365*t**(3))/(5*(1+t)**(5)))/1000000 #same as H
N=1
N_i=0.4343 #seems same as H in approx
else:
print('error: some atom incorrectly specified')
return
u=U/B
S=4*math.pi*p.value('Bohr radius')**2*N*(13.6057/B)**2 #m^2
sigma_i=S/(t+u+1)*(D*numpy.log(t)+(2-N_i/N)*((t-1)/t-numpy.log(t)/(t+1)))*(10**4) #cm^2
#replace negatives with zero
if sigma_i<0:
sigma_i=0
sigma[n]=sigma_i
return sigma
ionize_cs(numpy.array([100, 60, 3]), numpy.array([1,2,3]))
def ionize_s_cs(E_in, E_sec, atoms):
'''
Calculates the singly-differential ionization cross section (xsec)
for electrons impacting one of (H, He, He+) at a particular
kinetic energy of the incident and one secondary electron.
Parameters
----------
E_in : ndarray
Each electron's initial kinetic energy (eV).
E_out : ndarray
The energy of one secondary electron for each initial electron (eV).
atoms : ndarray
Atomic xsec relevant to each ionization; (1=H, 2=He, 3=He+)
Returns
----------
ndarray
The cross section for ionization at each incident energy (E_in[n]),
secondary energy (E_out[n]), and atomic xsec (atoms[n]); (given in cm^2).
See Also
--------
ionize_cs : Gives total ionization xsec
'''
#initialize return variable
sigma=numpy.zeros(len(atoms))
for n in range(len(atoms)):
T=E_in[n]
W=E_sec[n]
atom=atoms[n]
if atom==1: #H
B=13.6057 #eV: binding energy
U=13.6057 #eV:
t=T/B
w=W/B
y=1/(w+1)
df_dw=-0.022473*y**2+1.1775*y**3-0.46264*y**4+0.089064*y**5
N=1 # number of bound electrons in subshell
N_i= 0.4343 #integral of df/dw from 0 to infinity
elif atom==2: #He
B=24.59 #eV
U=39.51 #eV
t=T/B
w=W/B
y=1/(w+1)
df_dw=12.178*y**3-29.585*y**4+31.251*y**5-12.175*y**6
N=2
N_i=1.605
elif atom==3: #He+
B=13.6057*4 #eV: scaled by Z^2
U=13.6057*4 #eV: scaled by Z^2
t=T/B
w=W/B
y=1/(w+1)
df_dw=-0.022473*y**2+1.1775*y**3-0.46264*y**4+0.089064*y**5
N=1
N_i=0.4343 #seems same as H in approx
else:
print('error: atom incorrectly specified')
return
u=U/B
S=4*math.pi*p.value('Bohr radius')**2*N*(13.6057/B)**2 #m^2
sigma_i=S/(B*t+(u+1))*((N_i/N-2)/(t+2)*(1/(w+1)+1/(t-w))+(2-N_i/N)*(1/(W+1)**2+1/(t-w)**2)+ numpy.log(t)/(N*(w+1))*df_dw) #cm^2
#replace negatives with zero
if sigma_i < 0:
sigma_i=0
sigma[n]=sigma_i
return sigma
ionize_s_cs(numpy.array([102,102,102]), numpy.array([20,20,20]), numpy.array([1,2,3]))
# +
#Log plot of ionization cross sections
x=numpy.logspace(1,4)
y=numpy.ones(len(x))
#x=numpy.linspace(1,300)
plt.plot(x,ionize_cs(x,y), label="H")
plt.plot(x,ionize_cs(x,2*y), label="He")
plt.plot(x,ionize_cs(x,3*y), label="He+")
#Data points
#H
H_i=pd.read_csv('Shah_H_ionization.csv', sep=',',header=None)
a=H_i.values[:,0]
b=H_i.values[:,1]*10**(-17) #cm^2
plt.scatter(a,b, label="H_data")
#He
Hep_i=pd.read_csv('Shah_He_ionization.csv', sep=',',header=None)
e=Hep_i.values[:,0]
f=Hep_i.values[:,1]*10**(-17) #cm^2
plt.scatter(e,f, label="He_data")
#He+
Hep_i=pd.read_csv('Peart_Hep_ionization.csv', sep=',',header=None)
c=Hep_i.values[:,0]
d=Hep_i.values[:,1]*10**(-18) #cm^2
plt.scatter(c,d, label="He+_data")
###############
plt.xlabel('Energy (eV)')
axes = plt.gca()
axes.set_xlim([0,500])
plt.ylabel('Cross Section ($cm^2$)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# +
#def ionize_sec(T, atom):
#Returns the seconday spectra following ionization
#atom: 1=H, 2=He, 3=He+
# -
# <h1>Excitation</h1>
# <p>
# Keep in mind that this will produce a singe output electron (use conservation of energy, perhaps looking at probability based on the transition distribution) and a output photon which is given by some transition distribution. We will need this distribution for $2s \rightarrow 1s$ and $2p \rightarrow 1s$ in H, He, and He+. The percent excited is also stored between steps, questionably; the time scale of going back to the ground state confuses this effect for me.
# The preliminary source for cross sections:
#
# [https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4859265/pdf/j74sto.pdf]
# </p>
#
# <h2>H, He</h2>
# <p>The total excitation cross section for H and He is of the form:
# $\sigma_{BEf}=(f_{accu}/f_{sc})*[T/(T+B+E)]*\sigma_{PWB}$
# where $\sigma_{PWB}$ is given as
# $\sigma_{PWB}=\frac{4\pi a_0^2R}{T}*F_{PWB}(T)$
# This is the valid form for sub 3keV.
#
# The values for f, B, and E are in the linked paper.
# </p>
# <h2>He+</h2>
# <p>The best total excitation cross section seems to be from:
#
# [http://iopscience.iop.org/article/10.1088/0022-3700/9/9/016/pdf]
#
# However, He+ presents unique challenges at low energies for ionization given its small Z.
# </p>
# <h2>Secondary Spectra</h2>
# <p>Good source for $2s\rightarrow 1s$ seems to be:
#
# [https://arxiv.org/pdf/0803.0808.pdf]
#
# For other transitions, Voigt profiles can be placed around the central spectral line with parameters as in:
#
# [http://www.sciencedirect.com/science/article/pii/S0022407304004030]
# </p>
def excite_cs(T, atom, transition=2):
#Calculates cross section for excitation in (cm^2) for a given electron energy T (eV) and parameters of the atom
#atom: 1=H, 2=He, 3=He+
#transition 1=1s->2s, 2=1s->2p // assumes transition=2
E=T
sigma=numpy.zeros(len(E))
for n in range(0,len(E)):
T=E[n]
if atom==1: #H
if T<10.2: #eV
sigma_e=0
elif transition==1: #2s
if T<=11.56: #eV
sigma_e=10**(-16)*(0.114+0.0575*(T-10.2)) #cm^2
elif T<=12.23: #eV
sigma_e=1.795*10**(-17) #cm^2
else:
X=T/10.2
sigma_e=5.984*10**(-16)/(T)*(0.88606-2.7990/X+5.9451/X**(2)-7.6948/X**(3)+4.4152/X**(4)) #cm^2
else: #2p
H2p=pd.read_csv('H_1s-2p_excite_table.csv', sep=',',header=None)
x=H2p.values[:,0]
y=H2p.values[:,1]
f = interp1d(x, y)
sigma_e=f(T)*10**(-16) #cm^2
elif atom==2: #He
if T<21.2:#eV
sigma_e=0
elif transition==1: #2s
sigma_e=0 #Need to find cross section
else: #2p
He2p=pd.read_csv('He_1s-2p_excite_table.csv', sep=',',header=None)
x=He2p.values[:,0]
y=He2p.values[:,1]
f = interp1d(x, y)
sigma_e=f(T)*10**(-16) #cm^2
else: #He+
pia2=math.pi*p.value('Bohr radius')**2
if T<40.8: #eV
sigma_e=0
elif transition==1: #2s
#extra interpolation values of 0 added at 40.8 and max(3keV)
HeP2s=pd.DataFrame({'CS' : [0, pia2*0.0065, pia2*0.0049, pia2*0.0031, pia2*0.0019, pia2*0.0013, 0], 'Energy' : [40.8, 100., 200., 400., 700., 1000., 3000.]})
x=HeP2s.values[:,1]
y=HeP2s.values[:,0]
f = interp1d(x, y)
sigma_e=f(T)*10**(4) #cm^2
else: #2p
#extra interpolation values of 0 added at 40.8 and max(3keV)
HeP2p=pd.DataFrame({'CS' : [0, pia2*0.082, pia2*0.067, pia2*0.048, pia2*0.033, pia2*0.026, 0], 'Energy' : [40.8, 100., 200., 400., 700., 1000., 3000.]})
x=HeP2p.values[:,1]
y=HeP2p.values[:,0]
f = interp1d(x, y)
sigma_e=f(T)*10**(4) #cm^2
if sigma_e < 0:
sigma_e=0
sigma[n]=sigma_e
return sigma
# +
#log plot showing 1s-2s excitation cross sections
x=numpy.logspace(1,3)
#x=numpy.linspace(10,20)
plt.plot(x,excite_cs(x,1,1), label="H")
plt.plot(x,excite_cs(x,2,1), label="He")
plt.plot(x,excite_cs(x,3,1), label="He+")
plt.xlabel('Energy (eV)')
plt.ylabel('Cross Section ($cm^2$)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# +
#log plot showing 1s-2p excitation cross sections
x=numpy.logspace(1,3)
plt.plot(x,excite_cs(x,1,2), label="H")
plt.plot(x,excite_cs(x,2,2), label="He")
plt.plot(x,excite_cs(x,3,2), label="He+")
plt.xlabel('Energy (eV)')
plt.ylabel('Cross Section ($cm^2$)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# -
def excite_sec(T, atom, transition=2):
#Returns the seconday spectra following ionization
#atom: 1=H, 2=He, 3=He+
#transition 1=1s->2s, 2=1s->2p
#transitions seem unnecessary given similarities in energy
#output contains electron energy, photon energy
output=numpy.zeros(2)
if transition==2:
if atom==1:
Ep=10.2 #eV
output[0]=T-Ep
output[1]=Ep
elif atom==2:
Ep=21.2 #eV
output[0]=T-Ep
output[1]=Ep
else:
Ep=40.8 #eV
output[0]=T-Ep
output[1]=Ep
return output
# +
#Plot all cross sections
#Log plot of ionization cross sections
x=numpy.logspace(0,3)
#x=numpy.linspace(200,1000)
#plt.plot(x,thermalize_cs(x,1), label="heating")
plt.plot(x,ionize_cs(x,1), label="H_i")
plt.plot(x,ionize_cs(x,2), label="He_i")
plt.plot(x,ionize_cs(x,3), label="He+_i")
plt.plot(x,excite_cs(x,1,1), label="H_es")
plt.plot(x,excite_cs(x,2,1), label="He_es")
plt.plot(x,excite_cs(x,3,1), label="He+_es")
plt.plot(x,excite_cs(x,1,2), label="H_ep")
plt.plot(x,excite_cs(x,2,2), label="He_ep")
plt.plot(x,excite_cs(x,3,2), label="He+_ep")
plt.xlabel('Energy (eV)')
plt.ylabel('Cross Section ($cm^2$)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# +
x=numpy.logspace(1,2)
y=x-numpy.array([20])
z=numpy.ones(len(x))
plt.plot(x,ionize_s_cs(x, y, z), label="H_i_sec")
plt.xlabel('Energy (eV)')
plt.ylabel('Cross Section ($cm^2$)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# -
| darkhistory/electrons/Electrons_v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # "[LinearAlgebra] CH02. Linear Transformation"
# > Linear algebra summary note.
#
# - toc: false
# - badges: false
# - comments: false
# - categories: [linear-algebra]
# - hide_{github,colab,binder,deepnote}_badge: true
# + [markdown] tags=[]
# #### 2.0. Elementary Definitions and Theorems
# + [markdown] tags=[]
# ##### Definition.2.1. Linear Transformation
# $ \mathbb{K} $상의 벡터공간 $V$와 $W$에 대해서, 다음 두 조건을 만족하는 함수 $T \,\ : \,\ V \rightarrow W$를 linear transformation이라고 한다.
# <br><br>
#
# $ ^\forall \mathbf{x}\mathbf{y} \in V, \,\ ^\forall \alpha \in \mathbb{K}, $<br>
#
# 1. $ T(\mathbf{x} + \mathbf{y}) = T(\mathbf{x}) + T(\mathbf{y}) $
# 2. $ T(\alpha \mathbf{x}) = \alpha T(\mathbf{x}) $<br><br>
#
# 특히, 일차변환 $T \,\ : \,\ V \rightarrow W$가 전단사일때, 이를 linear isomorphism이라 한다. 그리고 이 때의 벡터공간 $V$와 $W$는 linear isomorphic이라고 하고, $ V \cong W $로 나타낸다.
# -
# __EX)__ <br>
#
# - Zero transformation
# - Identity transformation
# - Inverse transformation of linear transformation $ T $(Not always).
# - Matrix transformation
#
# $ \text{Let} \,\ A = [a_{ij}]_{m \times n} \,\ \text{for} \,\ a_{ij} \in \mathbb{K} $.<br>
# $\text{If} \,\ ^\forall \mathbf{x} \in \mathbb{K}^n, \,\ T_A(\mathbf{x}) = A\mathbf{x} $,
#
# $$
# T_A \,\ : \,\ \mathbb{K}^n \rightarrow \mathbb{K}^m
# $$
#
# - Differential transformation
# - Definite integral transformation
#
# #### 2.1. Symmetric Matrix and Orthogonal Matrix
# ##### Definition.2.2. Symmetric Matrix and Orthogonal Matrix
# $$
# \text{If} \,\ A^T = A \,\ \text{, then} \,\ A \,\ \text{is a symetric matrix.}
# $$
#
# $$
# \text{If} \,\ A^T = A^{-1} \,\ \text{, then} \,\ A \,\ \text{is an orthogonal matrix.}
# $$
# ##### Theorem.2.1.
# $\text{Let} \,\ Q_1 = [q_{ij}^{(1)}]_{n \times n} \,\ \text{and} \,\ Q_2 = [q_{ij}^{(2)}]_{n \times n}$.<br>
#
# $$
# \text{If} \,\ Q_1, \,\ Q_2 \,\ \text{are orthogonal matrices, then the followings are true.}
# $$
#
# - $ \text{Every pairs of columns are orthogonal.} $
# - $ \text{Every columns are unit vector.} $
# - $ Q_1Q_2 \,\ \text{is also orthogonal matrix.} $
# - $ |Q| = 1 \,\ \text{or} \,\ |Q| = -1 $
#
# __Proof.__ <br>
# Trivial.
# ##### Theorem.2.2.
# $\text{Let} \,\ A \,\ \text{be a matrix.}$.<br>
#
# $$
# \text{Then,} \,\ A^TA \,\ \text{is a symmetric matrix.}
# $$
#
# - $ (i, i)\text{-element of} \,\ A^TA = A_{C_i}^TA_{C_i} \ge 0 $
#
#
# __Proof.__ <br>
# Trivial.
# #### 2.2. Eigenvalue and Eigenvector
# Consider $T_A \,\ : \,\ V \rightarrow V$ and following vector equation.
#
# $$
# T_A(\mathbf{x}) = A\mathbf{x} = \lambda \mathbf{x} \quad \text{for} \,\ \lambda \in \mathbb{R}.
# $$
#
# $ \text{Since} \,\ (A - \lambda I_n)\mathbf{x} = \mathbf{0} \Leftrightarrow A\mathbf{x} = \lambda \mathbf{x}, $<br>
# $ \text{by Basic Theorem of Algebra, above equation have} \,\ n \,\ \text{complex solutions in} \,\ V. $
# ##### Definition.2.3. Eigenvalue and Eigenvector
# $ \text{Let} \,\ T_A \,\ : \,\ V \rightarrow V \,\ \text{and} \,\ \text{correspond with matrix} \,\ A$.
#
# 1. For $\lambda \in \mathbb{K}, \,\ T(\mathbf{x}) = \lambda \mathbf{x}$ is called __characteristic equation of linear transform $T$__ .
# 2. In $T(\mathbf{x}) = \lambda \mathbf{x}$, $\lambda$ is called __eigenvalue__, and corresponding $\mathbf{x}(\neq \mathbf{0}, \in V)$ is called __eigenvector__ .
# ##### Theorem.2.3.
# $n$차 정방행렬 A와 정칙행렬 $N$에 대해서 $A, A^T, N^{-1}AN$의 고유치는 일치한다.
#
# __Proof.__ <br>
# Trivial.
# ##### Theorem.2.4.
# $\text{Let} \,\ A = [a_{ij}]_{n \times n}.$<br>
#
# $$
# \begin{matrix} \prod_{k = 1}^{n} \lambda_k = |A| \\ \sum_{k = 1}^{n} \lambda_k = \sum_{k = 1}^{n} a_{kk} \end{matrix}
# $$
#
# __Proof.__ <br>
# Trivial.
# #### 2.3. Diagonalization
# 유한차원 벡터공간 $V$의 일차변환 $T:V \rightarrow V$에 대해서, $T$의 행렬을 대각행렬로 만드는 $V$의 기저가 존재할까?
# 위의 문제는 __theorem.2.3.__ 에 의해서 다음 문제와 동치이다.
# 주어진 실정방행렬 $A$에 대해서, $N^{-1}AN$이 대각행렬이 되는 정칙행렬 $N$이 존재하는가? (복소정방행렬 $A$에 대해서는 $\bar{N}^{-1}AN$에 대해서 따진다.)
# ##### Definition.2.3. Diagonalizable
# $\text{Let} \,\ A = [a_{ij}]_{n \times n} \,\ \text{for} \,\ a_{ij} \in \mathbb{R}$. <br>
# $\text{If} \,\ \exists \,\ \text{invertible matrix} \,\ N \in \mathbb{M}_{n \times n}(\mathbb{R}) \quad s.t. \quad N^{-1}AN = diag(d_1, d_2, \cdots, d_n),$<br>
# $\text{then} \,\ A \,\ \text{is diagonalizable by} \,\ N$.
# ##### Theorem.2.5.
# $\text{Let} \,\ A = [a_{ij}]_{n \times n} \,\ \text{for} \,\ a_{ij} \in \mathbb{R}, \,\ \lambda_1, \lambda_2, \cdots, \lambda_n \,\ \text{are eigenvalues of matrix} \,\ A, \,\ \text{and} \,\ \mathbf{x}_1, \mathbf{x}_2, \cdots, \mathbf{x}_n \,\ \text{are corresponding eigenvectors of eigenvalues}$. <br>
# $\text{Assume that} \,\ {\mathbf{x}_1, \mathbf{x}_2, \cdots, \mathbf{x}_n} \,\ \text{are ordered basis of} \,\ \mathbb{R}^n$.<br>
# $\text{Let} \,\ N = [\mathbf{x}_1 \,\ \mathbf{x}_2 \,\ \cdots \,\ \mathbf{x}_n]$.<br>
# $\text{Then} \,\ N \,\ \text{is invertible and} \,\ A \,\ \text{is diagonalizable by} \,\ N$.<br>
# $\text{That is}$
#
# $$
# N^{-1}AN = diag(\lambda_1, \lambda_2, \cdots, \lambda_n)
# $$
# .
#
# - $ \text{If} \,\ \lambda_1, \lambda_2, \cdots, \lambda_n \,\ \text{are different with each other, then the eigenvectors} \,\ \mathbf{x}_1, \mathbf{x}_2, \cdots, \mathbf{x}_n \,\ \text{are linearly independent and diagonalizable.} $
# - $ \text{Eigenvector can be multiplied any scalar except zero.} $
#
# __Proof.__ <br>
# Trivial.
# ##### Theorem.2.6.
# $\text{Let} \,\ A \,\ \text{be a diagonalizable real matrix.}$<br>
# $\text{Then}$
#
# $$
# A^k = N^{-1}D^kN \quad \text{for} \,\ k \in \mathbb{Z}.
# $$
#
# __Proof.__ <br>
# Trivial.
# 유한차원 내적공간 $V$의 일차변환 $T:V \rightarrow V$에 대해서, $T$의 행렬을 대각행렬로 만드는 $V$의 정규직교기저가 존재할까?
# 위의 문제는 __theorem.2.3.__ 에 의해서 다음 문제와 동치이다.
# 주어진 실정방행렬 $A$에 대해서, $P^{-1}AP$이 대각행렬이 되는 직교행렬 $P$가 존재하는가? (복소정방행렬 $A$에 대해서는 $\bar{P}^{-1}AP$에 대해서 따진다.)
# ##### Definition.2.4. Orthogonally Diagonalizable
# 실정방행렬 $A$가 직교행렬 $P$에 의해서 대각화되면 A는 orthogonally diagonalizable이라고 한다.
# ##### Theorem.2.7.
# 실대칭행렬 $A$에 대해서, 서로 다른 고유치에 대응되는 고유벡터는 직교한다.
#
# __Proof.__ <br>
# $ \text{Let} \,\ A \,\ \text{be orthogonally diagonalizable and} \,\ \lambda_1, \lambda_2, \cdots, \lambda_n, \mathbf{x}_1, \mathbf{x}_2, \cdots, \mathbf{x}_n \,\ \text{are eigenvalues and corresponding eigenvectors.} $<br>
# $ \text{For} \,\ i \neq j, $<br>
# $$
# \begin{matrix}
# \lambda_i \mathbf{x}_i^T \mathbf{x}_j &= (A \mathbf{x}_i)^T \mathbf{x}_j \\
# &= \mathbf{x}_i^T A^T \mathbf{x}_j \\
# &= \mathbf{x}_i(A\mathbf{x}_j) \\
# &= \mathbf{x}_i^T (\lambda_j \mathbf{x}_j)
# \end{matrix}
# $$
#
# $ \text{Therefore,} \,\ (\lambda_i - \lambda_j)\mathbf{x}_i^T\mathbf{x}_j = 0. $<br>
# $ \text{Since} \,\ \lambda_i - \lambda_j \neq 0, \mathbf{x}_i^T\mathbf{x}_j = 0.$
#
# $$ \therefore \quad \mathbf{x}_i \perp \mathbf{x}_j \,\ \text{for} \,\ i \neq j \quad \blacksquare$$
#
# ##### Theorem.2.8.
# $n$차 실정방행렬 $A$에 대해서, $A$가 직교대각화가능일 필요충분조건은 $A$가 대칭행렬이다.
#
# __Proof.__ <br>
# Trivial.
# #### 2.3. Singular Value Decomposition
# ##### Definition.2.5. Positive Definite
# $ \text{Let} \,\ A \,\ \text{be a symmetric matrix.} $
#
# $$
# \text{If} \,\ ^\forall \mathbf{x} \neq \mathbf{0}, \,\ \mathbf{x}^T A \mathbf{x} > 0, \,\ \text{then} \,\ A \,\ \text{is called positive definite.}
# $$
#
# $$
# \text{If} \,\ ^\forall \mathbf{x} \neq \mathbf{0}, \,\ \mathbf{x}^T A \mathbf{x} \ge 0, \,\ \text{then} \,\ A \,\ \text{is called positive semidefinite.}
# $$
#
# $$
# \text{If} \,\ ^\forall \mathbf{x} \neq \mathbf{0}, \,\ \mathbf{x}^T A \mathbf{x} < 0, \,\ \text{then} \,\ A \,\ \text{is called negative definite.}
# $$
#
# $$
# \text{If} \,\ ^\forall \mathbf{x} \neq \mathbf{0}, \,\ \mathbf{x}^T A \mathbf{x} \le 0, \,\ \text{then} \,\ A \,\ \text{is called negative semidefinite.}
# $$
# ##### Theorem.2.9.
# 대칭행렬 $A$에 대하여 다음이 성립한다.
#
# - $A$가 Positive definite면 모든 $A$의 모든 고윳값은 양수이다.
# - $A$가 Positive semidefinite면 모든 $A$의 모든 고윳값은 음이 아닌 수수이다.
# - $A$가 Negative definite면 모든 $A$의 모든 고윳값은 음수이다.
# - $A$가 Negative semidefinite면 모든 $A$의 모든 고윳값은 양이 아닌 실수이다.
#
# __Proof.__ <br>
# Chapter03 이후
# ----
# ##### Application.2.1. Eigenvalue Decomposition(EVD)
# By above theorems.
# ##### Application.2.2. Singular Value Decomposition(SVD)
# Any matrix $A_{m \times n}$ can be decomposed as $ A = U_{m \times m} \Sigma_{m \times n} {V_{n \times n}}^T $<br>
# where $ AA^T = U \Sigma \Sigma^T U^T, \,\ A^TA = V \Sigma^T \Sigma V^T $
# ---
| _notebooks/math/linear-algebra/ch02-linear-transformation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # コースの概要
#
# - Numpy
# - 科学計算のための基本的なパッケージ
# - アレイの作り方などを学びます(ベクトル、行列)
# - Pandas
# - PandasはPythonでデータを処理するために作られた超高機能なライブラリ
# - SeriesやDataFrameを使ったデータの処理方法と可視化についても学びます。
# - データの入出力
# - さまざまなフォーマットでデータを扱う方法を紹介
# - テキスト,CSV,Excel,JSON,HTMLをPythonで扱えるようになります。
# - データの可視化
# - MatplotlibとSeaborn
# - MatplotlibやSeaborn(ライブラリ群)を使ったデータの可視化は、ただ見えるようにするだけでなく、データ解析そのもの
# - 実際のデータ解析(基本的なスキルを身に着けたら、実際のデータ解析に応用します。)
# - タイタニック号
# - 株式市場
# - 米大統領選挙
# - 機械学習(Scikit-Learn)
# - 機械学習アルゴリズムを学びます。
# - Scikit-Learnを使います。
# - 線形回帰、ロジスティック回帰、SVM(サポートベクターマシン)、決定木とランダムフォレストなどを扱います。
# - 付録(コースの理解を助ける内容)
# - 統計入門
# - 統計に関する基本的な内容
# - Pythonのコードを使って、統計の理解を助けます。
# - SQLとPython
# - SQLはデータベース操作のための言語ですが、Pythonを使って、SQLの基本を学びます。
# - Webスクレイピング
# - Pythonを使ってWebからデータを取得する便利な方法を学習する
| Learning/.ipynb_checkpoints/dataScience_Python-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:rqalpha]
# language: python
# name: conda-env-rqalpha-py
# ---
# +
#coding=utf-8
"""
@author: evilXu
@file: testFactor.py
@time: 2017/11/16 17:48
@description:
"""
from rqalpha.api import *
import traceback
from datetime import *
import pymysql
import pandas as pd
from rqalpha.utils.logger import user_log
def dependency():
return []
def compute(startdt,enddt,context):
'''
PE
:param startdt:
:param enddt:
:return:
'''
user_log.info("testFactor compute")
#context.config 对应配置的extra部分
_cnnConf = {"host":context.config.jydb,"port":3306,"db":"jydb","user":"liangh","passwd":"<PASSWORD>"}
jyConn = pymysql.connect(host=_cnnConf.get("host"), port=_cnnConf.get("port") \
, db=_cnnConf.get("db"), user=_cnnConf.get("user"),
passwd=_cnnConf.get("passwd"), charset='utf8')
_category = [1, ]
_sectors = [1, 2, 6]
_sql = "SELECT p.TradingDay,p.PE,a.SecuCode,a.SecuMarket" \
" FROM LC_DIndicesForValuation as p inner join secumain as a "\
"on a.innerCode=p.innerCode where a.SecuMarket in (83,90) " \
"and a.SecuCategory in (%s) and a.ListedSector in (%s) " \
"and a.ListedState!=9 and p.TradingDay between '%s' and '%s'" % (
",".join([str(i) for i in _category]), ",".join([str(i) for i in _sectors])
,startdt.strftime('%Y-%m-%d'),
enddt.strftime('%Y-%m-%d'))
# print(_sql)
_res = []
_res_tmp = []
try:
_now = datetime.now()
_cursor = jyConn.cursor()
_cursor.execute(_sql)
for _row in _cursor:
_res_tmp.append({
"code": _row[2] + "." + market(_row[3]), # secumain.code(_row[0],_row[13]),#
"date": _row[0],
"value": float(_row[1])
})
except Exception as e:
traceback.print_exc()
return
_res_tmp = sorted(_res_tmp,key=lambda x:x['date'])
_res = []
if len(_res_tmp) >0:
_lastDt = _res_tmp[0]['date']
_adt = {"date":_lastDt}
for item in _res_tmp:
if item['date'] != _lastDt:
_res.append(_adt)
_lastDt = item['date']
_adt = {"date": _lastDt}
else:
_adt.update({item['code']:item['value']})
if len(_adt) > 1:
_res.append(_adt)
if len(_res) < 1:
return pd.DataFrame()
return pd.DataFrame(_res).set_index(['date'])
# print("testFactor")
def market(market=90):
if market == 83:
return 'XSHG'
elif market == 90:
return 'XSHE'
else:
return ""
if __name__ == "__main__":
from rqalpha.utils import RqAttrDict
config= {"factor_data_path":"E:\\evilAlpha\\test","factor_data_init_date":"2017-01-01","extra":{"jydb":"172.18.44.5"}}
conf = RqAttrDict(config)
from rqalpha.mod.rqalpha_mod_alphaStar_factors.factor_context import FactorContext
context = FactorContext(conf)
context.registerDepending(dependency())
fValue = compute(datetime(2017,6,8),datetime(2017,7,1),context)
# _fValue_aDay = fValue.iloc[0]
print(fValue)
print(fValue.index)
from rqalpha.mod.rqalpha_mod_alphaStar_factors.factor_data import FactorData
FactorData(fname="testFactor",path=conf.factor_data_path,defaultInitDate = conf.factor_data_init_date).append(fValue)
# +
# evaluateFileDemo
from rqalpha.mod.rqalpha_mod_alphaStar_factors import evaluate_file
config = {
"base": {
"start_date": "2017-06-01",
"end_date": "2017-7-01",
},
}
factor_file_path = "./testFactor.ipynb"
evaluate_file(factor_file_path=factor_file_path,config=config,config_file = "../config_factor.yml")
# -
# %load_ext rqalpha.mod.rqalpha_mod_alphaStar_factors
# %evaluate -s 20170101 -e 20170131 -f ./testFactor.ipynb -rt r --config ../config_factor.yml
| ipynbs/factors/.ipynb_checkpoints/testFactor-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ucsb_python3
# language: python
# name: env
# ---
# +
import pandas as pd
# %matplotlib inline
def plot_hist(thing):
pd.to_numeric(thing).hist()
# -
df = pd.read_csv("lebron.csv")
df = df[~df.GS.str.contains("Did Not Play")]
df = df[~df.GS.str.contains("Inactive")]
df.tail()
fg = pd.to_numeric(df.FG)
fg.hist()
df.columns
plot_hist(df.PTS)
| misc/lebron.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import qcportal as ptl
import os
import cmiles
from openeye import oechem
# # Get some random molecule to use as an example
client = ptl.FractalClient()
ds = client.get_collection("TorsionDriveDataset", "OpenFF Gen 2 Torsion Set 1 Roche 2")
ds.status(["default"], collapse=False, status="COMPLETE")
# +
oemol = None
dihedrals = None
# take the first molecule
tdr = ds.df.loc[ds.df.index[1], "default"]
min_idx = min(tdr.final_energy_dict, key=tdr.final_energy_dict.get)
record = tdr.get_history(min_idx, minimum=True)
# get optimized molecule of the record
qc_mol = record.get_final_molecule()
# convert the qcelemental molecule to an OpenEye molecule
qcjson_mol = qc_mol.dict(encoding='json')
oemol = cmiles.utils.load_molecule(qcjson_mol)
dihedrals = tdr.keywords.dihedrals[0]
dihedrals, oemol
# -
# # Get wbo from xtb
def get_xtb_wbo(oemol, idx1, idx2):
'''
mol: OEMol to calculate wbo for
idx1: index of first molecule
idx2: index of second molecule
returns a float for wbo between the atoms specified by idx1, idx2
'''
# make a temporary folder -- xtb creates extra files but I want to clean those up
tmpdir = "_xtbtmpdir"
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
os.chdir(tmpdir)
# make some temporary files to run xtb
tmpfile = '_xtbtmp.sdf'
ofs = oechem.oemolostream()
if not ofs.open(tmpfile):
oechem.OEThrow.Fatal("Unable to open %s for writing" % outfile)
# make a copy of the molecule
molcopy = oechem.OEMol(oemol)
# add title
molcopy.SetTitle("xD")
#molcopy.SetCoords(qc_mol.geometry.flatten()/1.6299)
# write molecule to file
oechem.OEWriteConstMolecule(ofs, molcopy)
os.system("cp _xtbtmp.sdf ..")
# close ofs
ofs.close()
# run xtb
os.system("xtb _xtbtmp.sdf --charge-0 > out")
# xtb output is 1-based, so we add 1 to our 0-based indices
idxs = (idx1+1, idx2+1)
# our return value
wbo = None
# go back
os.chdir("../")
# xtb creates a files called "wbo" with all our information
with open(f"{tmpdir}/wbo", "r") as wbofile:
for line in wbofile:
mol1, mol2, wbo_ = line.split()
mol1 = int(mol1)
mol2 = int(mol2)
wbo_ = float(wbo_)
# if the indices match with the current line, we found our wbo
if (mol1,mol2) == idxs or (mol2, mol1) == idxs:
found = True
wbo = wbo_
break
if wbo is None:
# if we got here and wbo not found: probably never gonna happen but just in case
raise Exception(f"No wbo for indices {idx1}, {idx2}")
# and delete all the temporary files -- comment out to examine outputs
os.system(f"rm -rf {tmpdir}")
return wbo
get_xtb_wbo(oemol, dihedrals[1], dihedrals[2])
# # qcengine makes this easy
# +
import qcelemental as qcel
import qcengine as qcng
def get_xtb_wbo_qce(qcmol, idx1, idx2):
# xtb model
model = qcel.models.AtomicInput(
molecule=qcmol,
driver="energy",
model={"method": "GFN2-xTB"},
)
# result of single point energy calculation
result = qcng.compute(model, "xtb")
return result.extras["xtb"]["mayer_indices"][idx1, idx2]
# -
get_xtb_wbo_qce(qc_mol, dihedrals[1], dihedrals[2])
# # but are they actually the same?
# +
xtb = []
qce = []
xs = []
for i, index in enumerate(ds.df.index):
# get the record of each entry
tdr = ds.get_record(name=index, specification='default')
if tdr.status == "COMPLETE":
xs.append(i)
min_idx = min(tdr.final_energy_dict, key=tdr.final_energy_dict.get)
record = tdr.get_history(min_idx, minimum=True)
# get optimized molecule of the record
qc_mol = record.get_final_molecule()
# convert the qcelemental molecule to an OpenEye molecule
qcjson_mol = qc_mol.dict(encoding='json')
oemol = cmiles.utils.load_molecule(qcjson_mol)
# get torsion atoms
dihedrals = tdr.keywords.dihedrals[0]
xtb_ = get_xtb_wbo(oemol, dihedrals[1], dihedrals[2])
qce_ = get_xtb_wbo_qce(qc_mol, dihedrals[1], dihedrals[2])
xtb.append(xtb_)
qce.append(qce_)
# -
import matplotlib.pyplot as plt
plt.scatter(xs, np.array(xtb) - np.array(qce))
plt.show()
| benchmarking_ez/wbo_xtb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Post-Processing Exploration
# **Purpose:**
#
# This notebook explores different post-processing methods to evaluate the clustering outputs from the RecSys
#
# **Methodology:**
#
# The notebook assumes input from the RecSys. It will explore the timeseries cluster probabilities to evaluate the dynamics/change in users.
#
# **Author:**
#
# <NAME> (@prajnasoni)
# ## Testing Analysis Class
# +
import sys
import os
import pandas as pd
import scipy.sparse
### IMPORTANT ###
# Make sure you are correctly appending the path
# Otherwise the imports will not work!
sys.path.append("/Users/pvs262/Documents/rec-sys-dynamics/code")
from src.analysis.cluster import movielens, cluster, analysis, post_process
from src.analysis.simulate import simulate
# -
# ### Run Simulation using simulate.py
# To use simulate class,
# 1. Initiate simulate object. Each simulate object is specific to an algorithm ('ease','cosin' or 'mf) and a dataset (check Datasets). You can run simulations in series with the same dataset + algos.
#
# testx = simulate(algo, dataset)
#
# Note:
# * dataset has to be a string which exactly matches the dataset folders in the directory.
#
# 2. Start simulation with the following inputs.
#
# testx.run_dynamics(n_i, n_u, n_r, steps, prob_explore = 0.2, svd_threshold=0.3, n_clusters=3).
#
# Note:
# * n_i is new_items, n_u is new_users, n_r is num_recs, steps is the number of simulation iterations
# * The default probability to explore is 0.2, and the default svd_threshold is 0.3.
# * The default clustering algorithm is gmm, using 'full' covariance type and clustering into 3 groups. If you want to cluster into 2 groups, make sure to change the default value in run_dynamics().
test1 = simulate('cosin', 'Small_Test_Dataset')
#simulate.run_dynamics(n_i, n_u, n_r, steps)
test1_output = test1.run_dynamics(n_i=5, n_u=0, n_r=30, steps=10, n_clusters=2)
# +
directory = '../simulation_runs/test1_output/'
os.makedirs(directory)
# save updated ratings
test1_output[2].to_pickle(directory+'final_UI.pkl.gzip', compression = 'gzip')
for i in range(len(test1_output[0])):
test1_output[0][i].to_pickle(directory+'L'+str(i)+'pkl.gzip', compression = 'gzip')
test1_output[1][i].to_pickle(directory+'R'+str(i)+'pkl.gzip', compression = 'gzip')
#test1_output.to_pickle("../simulation_runs/test1_output.pkl.gzip",compression = 'gzip')
# pd.read_pickle()
# -
# store original output
test1_old_output = test1_output
run1 = analysis(test1_output[1])
for i in range(len(test1_output[1])):
print(test1_output[1][i].cluster.unique())
run1.rename_cluster(1,200);
run1.cluster_populations()
run1.plot_counts()
run1.plot_percent()
# FOR All_Neutral
run = simulate('cosin', 'All_Neutral')
#simulate.run_dynamics(n_i, n_u, n_r, steps)
run_output = run.run_dynamics(n_i=10, n_u=0, n_r=30, steps=5, n_clusters = 2)
# save the plot_counts() and plot_percent pngs
analyse = analysis(run_output[1])
analyse.rename_cluster(1,1000)
analyse.plot_counts(show=False, loc=run.run_name+'/counts.png')
analyse.plot_percent(show=False, loc=run.run_name+'/percent.png')
# ### Run times for different thresholds (GMM)
# %%time
print("Threshold = 0.1, Clustering = Gaussian Mixture Modelling")
cluster0 = cluster(UI,0.1)
proba0_g = cluster0.gmm(n=3,covariance_type="full",df="proba",svd=True)
if proba0_g['cluster'][1] == proba0_g['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# %%time
print("Threshold = 0.5, Clustering = Gaussian Mixture Modelling")
cluster1 = cluster(UI,0.5)
proba1_g = cluster1.gmm(n=3,covariance_type="full",df="proba",svd=True)
if proba1_g['cluster'][1] == proba1_g['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# %%time
print("Threshold = 0.7, Clustering = Gaussian Mixture Modelling")
cluster2 = cluster(UI,0.7)
proba2_g = cluster2.gmm(n=3,covariance_type="full",df='proba',svd=True)
if proba2_g['cluster'][1] == proba2_g['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# %%time
print("Threshold = 0.9, Clustering = Gaussian Mixture Modelling")
cluster3 = cluster(UI,0.9)
proba3_g = cluster3.gmm(n=3,covariance_type="full",df="proba",svd=True)
if proba3_g['cluster'][1] == proba3_g['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# %%time
print("Threshold = 0.95, Clustering = Gaussian Mixture Modelling")
cluster4 = cluster(UI,0.95)
proba4_g = cluster4.gmm(n=3,covariance_type="full",df="proba",svd=True)
if proba4_g['cluster'][1] == proba4_g['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# %%time
print("Threshold = N/A - we're using the complete UI matrix, Clustering = Gaussian Mixture Modelling")
clusterUI = cluster(UI,1)
probaUI_g = clusterUI.gmm(n=3,covariance_type="full",df="proba",svd=False)
if probaUI_g['cluster'][1] == probaUI_g['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# ### Run times for different thresholds (K-Means)
# %%time
print("Threshold = 0.1, Clustering = KMeans")
proba0_k = cluster0.kmeans(n=3,df="pred",svd=True)
if proba0_k['cluster'][1] == proba0_k['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# %%time
print("Threshold = 0.5, Clustering = KMeans")
proba1_k = cluster1.kmeans(n=3,df="pred",svd=True)
if proba1_k['cluster'][1] == proba1_k['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# %%time
print("Threshold = 0.7, Clustering = KMeans")
proba2_k = cluster2.kmeans(n=3,df="pred",svd=True)
if proba2_k['cluster'][1] == proba2_k['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# %%time
print("Threshold = 0.9, Clustering = KMeans")
proba3_k = cluster3.kmeans(n=3,df="pred",svd=True)
if proba3_k['cluster'][1] == proba3_k['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# %%time
print("Threshold = 0.95, Clustering = KMeans")
proba4_k = cluster4.kmeans(n=3,df="pred",svd=True)
if proba4_k['cluster'][1] == proba4_k['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# %%time
print("Threshold = N/A - we're using the complete UI matrix, Clustering = KMeans")
probaUI_k = clusterUI.kmeans(n=3,df="pred",svd=False)
if probaUI_k['cluster'][1] == probaUI_k['cluster'][943]:
print("Error: Left and Right placed in same cluster.")
else:
print("Cluster Success.")
# visualise
print("KMeans with 0.95 explained variance for SVD")
cluster4.plot_scatter(True, 'gmm')
# ### TestSet - Investigate why the clustering is wrong
test_list = []
for i in range(80,100):
df = pd.read_csv ('/Users/pvs262/Documents/rec-sys-dynamics/datasets/Testset/'+str(i)+'.csv')
df = df.drop(columns='Unnamed: 0')
df.index += 1
test_list.append(df)
#df = pd.read_csv (r'/datasets/80.csv')
test_list[0]['0']
p1 = post_process(test_list, test_list, test_list[0])
test_list[0]
p1.examine(2, 'kmeans')
#p1.plot_percent()
# # Appendix
# +
# Import the libraries we will be using
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import logging
from sklearn import metrics
# Test cluster dataset creation
# Append cluster porbabilities in a 3D Array [user, cluster_probas, sim_time]
A = np.array([[0.1,0.8,0.1],[0.9,0.0,0.1]])
B = np.array([[0.3,0.3,0.4],[0.5,0.1,0.4]])
stack = np.dstack((np.atleast_3d(A),np.atleast_3d(B)))
# +
from sklearn.decomposition import TruncatedSVD, PCA
# Dynamic PCA based on threshold
def svd(self, threshold):
SVD = TruncatedSVD(n_components = (len(self.UI)-1), algorithm = 'arpack')
SVD.fit_transform(self.UI)
n = 1
for i in range(1,len(SVD.singular_values_)):
if (SVD.singular_values_[i]/SVD.singular_values_[0]) > threshold:
n += 1
else:
break
SVD = TruncatedSVD(n_components = n, algorithm = 'arpack')
SVD.fit_transform(self.UI)
self.data.index += 1
return None
# -
from sklearn.decomposition import TruncatedSVD, PCA
# %%time
pca = PCA(n_components = 0.5, svd_solver='auto')
pca.fit_transform(cluster0.UI)
pca.explained_variance_ratio_
#pca.singular_values_
len(pca.explained_variance_ratio_)
def rename_cluster(proba,left_id,right_id):
# l and r are indexes of extreme left and extreme right users in synthetic dataset
# for each iteration i
for i in range(len(proba)):
# identify cluster names
groupA = proba[i].loc[left_id,'cluster']
print("groupA = "+str(groupA))
groupB = proba[i].loc[right_id,'cluster']
print("groupB = "+str(groupB))
groupC = proba[i].loc[(left_id+right_id)/2,'cluster']
print("groupC = "+str(groupC))
if len(proba[i].columns) > 2:
# rename columns
proba[i].rename(columns={'proba_C'+str(groupA):1,'proba_C'+str(groupB):-1, 'proba_C'+str(groupC):0},inplace = True)
print("columns renamed.")
# rename clusters
proba[i]['cluster'] = proba[i]['cluster'].replace([groupA,groupB,groupC],[1,-1,0])
print(proba[i].cluster.unique())
#clusters = [1,-1,0]
return proba
| notebooks/post-processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Best Learning Rate
# In this notebook, we are interested by the best learning rate (named **epsilon**). The following program will reach it by testing different epsilon values, and by getting their winning rate. First, let import the **AI_Tictactoe.py** program :
# %pylab inline
from AI_Tictactoe import *
# To find the best learning rate, the program will train different AI with different learning rates. The different learning rates are contained in **listEpsilon**, and the learning process is in the **training()** function.
def training(nbTraining, epsilon):
pp1 = Player('X', isIntelligent = False, learningRate = epsilon)
pp2 = Player('O', isIntelligent = False, learningRate = epsilon)
pp1.experience = {}
pp2.experience = {} #Force the experience to be null
for i in range(nbTraining) :
PlayTicTacToe(pp1,pp2, aiTraining = True)
pp1.experience.update(pp2.experience)
aiExperience = copy.deepcopy(pp1.experience)
#pp1.Display_Experience()
return aiExperience
listEpsilon = np.linspace(0,1,21)
print(listEpsilon)
# Then, we train different AI with the different learning rates. (**Warning** : this process can be long, in the current parametrization, the function PlayTicTacToe() is called **231 000** (= (10 000 + 1 000) * 21)
# +
listWinningRate = []
experience = {}
for epsilon in listEpsilon :
print('For epsilon = {}. '.format(epsilon), end='')
experience = training(10000, epsilon)
listWinningRate.append(WinningRate(experience))
experience = {}
# -
# Finaly, we are looking for the best learning rate, by using **argmax()** function.
# +
plot(listEpsilon,listWinningRate)
bestLearningRate = listEpsilon[argmax(listWinningRate)]
print('The argmax is :', bestLearningRate)
print('It is the best learning rate we had for 10 000 traning games')
# -
| Learning_Rate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Fast PDE/IE course, Skoltech, Spring 2015
# ## Midterm study guide
#
# For the midterm you should be able to:
#
# 1. Finite differences (FD)
# - write down basic FD approximations to differential operators, equations, and boundary/initial conditions
# - determine the order of approximation (and prove)
# - judge whether an initial-value problem is stable (or for what parameters it is stable)
# - understand how to implement FD on a computer
# 2. Finite elements (FE)
# - fomulate a (boundary-value problem for a) PDE in a weak form
# - formulate a FE problem, finite element space (piecewise linear functions)
# - write down the stiffness matrix in simple cases
# - understand how to implement FE on a computer
# 3. Spectral methods (SM)
# - formulate a spectral method for a given PDE
# - write down the stiffness matrix in simple cases
# - understand how to implement SM on a computer
# 4. Multigrid
# - understand the idea of multigrid: smoothing, restriction, prolongation
# - understand how to choose the $A_h$ operators on a coarse grid from a fine grid
# - formulate the smoothing property in terms of high/low frequences
# + run_control={"breakpoint": false, "read_only": false}
from IPython.core.display import HTML
def css_styling():
styles = open("./styles/alex.css", "r").read()
return HTML(styles)
css_styling()
| MT_study_guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reading Dutch Datasets
#
# Pastas comes with several helpful functions for reading data from files from common data sources in the Netherlands. These sources include:
#
# - [Dinoloket](https://www.dinoloket.nl/ondergrondgegevens): for groundwater and surface water level timeseries.
# - [KNMI](http://projects.knmi.nl/klimatologie/daggegevens/selectie.cgi): for precipitation and timeseries timeseries
# - Waterbase: for surface water level timeseries from Rijkswaterstaat.
# - [Menyanthes](https://www.kwrwater.nl/tools-producten/menyanthes/): read files created by Menyanthes
#
# The functionality for reading files from Dinoloket is shown in this notebook.
#
# First, start with the necessesary imports.
# + tags=[]
import pandas as pd
import matplotlib.pyplot as plt
import pastas as ps
ps.show_versions()
# -
# For reading groundwater level timeseries there is the `read_dino` function. For river level gauge data there is the `read_dino_level_gauge` function. These functions are available at the module level. The code below loads these timeseries from CSV files in the data directory.
# + tags=[]
gw_levels = ps.read_dino(r'../data/B49F0555001_1.csv')
river_levels = ps.read_dino_level_gauge(r'../data/P43H0001.csv')
# -
# Note that these `pastas.Timeseries` objects contain metadata, i.e. for the `river_levels`:
river_levels.metadata
# The resulting `pastas.TimeSeries` come with their own plot methods, to quickly visualize the data:
ax = gw_levels.plot()
river_levels.plot()
# As can be observed in the plot above, something went wrong with the registration of the river levels in the field. The shows a huge downward shift at the end of 2014. The negative values from end of 2014 onwards are assummed to be correct. The positive values were registered incorrectly (missing a minus sign).
#
# We fix the timeseries by updating the `TimeSeries` attribute called `series_original`. The `update_series` method is called in order to renew derived attributes.
river_levels.series_original = river_levels.series_original.abs() * -1 # set positive values to negative
river_levels.update_series()
# Plot the timeseries again, to see if the applied fix looks reasonable:
# + tags=["nbsphinx-thumbnail"]
gw_levels.plot()
river_levels.plot()
# -
# Another option for loading river levels is the `DinoPeilschaal` reader class. This creates a `DinoPeilschaal` object containing the data and the metadata. These are accessible through the `data` and `meta` attributes, respectively.
#
# The difference with the previous method is that the original data is available as a pandas.DataFrame when using this method.
#
river_levels_obj = ps.read.dinoloket.DinoPeilschaal(r'../data/P43H0001.csv')
# View the original data:
river_levels_obj.data.head()
# The metadata:
river_levels_obj.meta
| examples/notebooks/13_reading_dutch_datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
import numpy as np
import imutils
video='videoplayback.mp4'
# Create a VideoCapture object and read from input file
# If the input is the camera, pass 0 instead of the video file name
cap = cv2.VideoCapture(video)
cnt=0
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
ret,first_frame = cap.read()
# Read until video is completed
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
#removing scorecard
roi = frame[:800,:]
#cropping center of an image
thresh=600
end = roi.shape[1] - thresh
roi = roi[:,thresh:end]
cv2.imshow("image",roi)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.imwrite('frames/'+str(cnt)+'.png',roi)
cnt=cnt+1
# Break the loop
else:
break
cv2.destroyAllWindows()
# -
# %tb
| Assignments/A1 Submissions/Avinandan_A1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Univariate time series classification with sktime
#
# In this notebook, we will use sktime for univariate time series classification. Here, we have a single time series variable and an associated label for multiple instances. The goal is to find a classifier that can learn the relationship between time series and label and accurately predict the label of new series.
#
# When you have multiple time series variables and want to learn the relationship between them and a label, you can take a look at our [multivariate time series classification notebook](https://github.com/alan-turing-institute/sktime/blob/master/examples/03_classification_multivariate.ipynb).
# ## Preliminaries
# +
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from sktime.classification.compose import TimeSeriesForestClassifier
from sktime.datasets import load_arrow_head
from sktime.utils.slope_and_trend import _slope
# -
# ## Load data
#
# In this notebook, we use the [arrow head problem](https://timeseriesclassification.com/description.php?Dataset=ArrowHead).
#
# The arrowhead dataset consists of outlines of the images of arrow heads. The classification of projectile points is an important topic in anthropology. The classes are based on shape distinctions such as the presence and location of a notch in the arrow.
#
# <img src="./img/arrow-heads.png" width="400" alt="arrow heads">
#
# The shapes of the projectile points are converted into a sequence using the angle-based method as described in this [blog post](https://izbicki.me/blog/converting-images-into-time-series-for-data-mining.html) about converting images into time series for data mining.
#
# <img src="./img/from-shapes-to-time-series.png" width="400" alt="from shapes to time series">
# ### Data representation
# Throughout sktime, the expected data format is a `pd.DataFrame`, but in a slightly unusual format. A single column can contain not only primitives (floats, integers or strings), but also entire time series in form of a `pd.Series` or `np.array`.
#
# For more details on our choice of data container, see this [wiki entry](https://github.com/alan-turing-institute/sktime/wiki/Time-series-data-container).
X, y = load_arrow_head(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
# univariate time series input data
X_train.head()
# binary target variable
labels, counts = np.unique(y_train, return_counts=True)
print(labels, counts)
fig, ax = plt.subplots(1, figsize=plt.figaspect(0.25))
for label in labels:
X_train.loc[y_train == label, "dim_0"].iloc[0].plot(ax=ax, label=f"class {label}")
plt.legend()
ax.set(title="Example time series", xlabel="Time");
# ## Why not just use scikit-learn?
#
# We can still use scikit-learn, but using scikit-learn comes with some implicit modelling choices.
#
# ### Reduction: from time-series classification to tabular classification
#
# To use scikit-learn, we have to convert the data into the required tabular format. There are different ways we can do that:
#
# #### Treating time points as separate features (tabularisation)
# Alternatively, we could bin and aggregate observations in time bins of different length.
# +
from sklearn.ensemble import RandomForestClassifier
from sktime.utils.data_processing import from_nested_to_2d_array
X_train_tab = from_nested_to_2d_array(X_train)
X_test_tab = from_nested_to_2d_array(X_test)
X_train_tab.head()
# +
# let's get a baseline for comparison
from sklearn.dummy import DummyClassifier
classifier = DummyClassifier(strategy="prior")
classifier.fit(X_train_tab, y_train)
classifier.score(X_test_tab, y_test)
# -
# now we can apply any scikit-learn classifier
classifier = RandomForestClassifier(n_estimators=100)
classifier.fit(X_train_tab, y_train)
y_pred = classifier.predict(X_test_tab)
accuracy_score(y_test, y_pred)
# +
from sklearn.pipeline import make_pipeline
# with sktime, we can write this as a pipeline
from sktime.transformations.panel.reduce import Tabularizer
classifier = make_pipeline(Tabularizer(), RandomForestClassifier())
classifier.fit(X_train, y_train)
classifier.score(X_test, y_test)
# -
# What's the implicit modelling choice here?
#
# > We treat each observation as a separate feature and thus ignore they are ordered in time. A tabular algorithm cannot make use of the fact that features are ordered in time, i.e. if we changed the order of the features, the fitted model and predictions wouldn't change. Sometimes this works well, sometimes it doesn't.
#
# #### Feature extraction
#
# Another modelling choice: we could extract features from the time series and then use the features to fit our tabular classifier. Here we use [tsfresh](https://tsfresh.readthedocs.io) for automatic feature extraction.
# +
from sktime.transformations.panel.tsfresh import TSFreshFeatureExtractor
transformer = TSFreshFeatureExtractor(default_fc_parameters="minimal")
extracted_features = transformer.fit_transform(X_train)
extracted_features.head()
# -
classifier = make_pipeline(
TSFreshFeatureExtractor(show_warnings=False), RandomForestClassifier()
)
classifier.fit(X_train, y_train)
classifier.score(X_test, y_test)
# What's the implicit modelling choice here?
#
# > Instead of working in the domain of the time series, we extract features from time series and choose to work in the domain of the features. Again, sometimes this works well, sometimes it doesn't. The main difficulty is finding discriminative features for the classification problem.
#
#
# ## Time series classification with sktime
# sktime has a number of specialised time series algorithms.
#
# ### Time series forest
# Time series forest is a modification of the random forest algorithm to the time series setting:
#
# 1. Split the series into multiple random intervals,
# 2. Extract features (mean, standard deviation and slope) from each interval,
# 3. Train a decision tree on the extracted features,
# 4. Ensemble steps 1 - 3.
#
#
# For more details, take a look at the [paper](https://www.sciencedirect.com/science/article/pii/S0020025513001473).
#
# In sktime, we can write:
# +
from sktime.transformations.panel.summarize import RandomIntervalFeatureExtractor
steps = [
(
"extract",
RandomIntervalFeatureExtractor(
n_intervals="sqrt", features=[np.mean, np.std, _slope]
),
),
("clf", DecisionTreeClassifier()),
]
time_series_tree = Pipeline(steps)
# -
# We can directly fit and evaluate the single time series tree (which is simply a pipeline).
time_series_tree.fit(X_train, y_train)
time_series_tree.score(X_test, y_test)
# For time series forest, we can simply use the single tree as the base estimator in the forest ensemble.
tsf = TimeSeriesForestClassifier(
estimator=time_series_tree,
n_estimators=100,
criterion="entropy",
bootstrap=True,
oob_score=True,
random_state=1,
n_jobs=-1,
)
# Fit and obtain the out-of-bag score:
# +
tsf.fit(X_train, y_train)
if tsf.oob_score:
print(tsf.oob_score_)
# -
tsf = TimeSeriesForestClassifier()
tsf.fit(X_train, y_train)
tsf.score(X_test, y_test)
# We can also obtain feature importances for the different features and intervals that the algorithms looked at and plot them in a feature importance graph over time.
fi = tsf.feature_importances_
fig, ax = plt.subplots(1, figsize=plt.figaspect(0.25))
fi.plot(ax=ax)
ax.set(xlabel="Time", ylabel="Feature importance");
# #### More about feature importances
#
# The feature importances method is based on the example showcased in [this paper](https://arxiv.org/abs/1302.2277).
#
# In addition to the feature importances method [available in scikit-learn](https://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_importances.html), our method collects the feature importances values from each estimator for their respective intervals, calculates the sum of feature importances values on each timepoint, and normalises the values first by the number of estimators and then by the number of intervals.
#
# As a result, the temporal importance curves can be plotted, as shown in the previous example.
#
# Please note that this method currently supports only one particular structure of the tsf, where RandomIntervalFeatureExtractor() is used in the pipeline, or simply the default TimeSeriesForestClassifier() setting. For instance, two possible approaches could be:
# +
# Method 1: Default time-series forest classifier
tsf1 = TimeSeriesForestClassifier()
tsf1.fit(X_train, y_train)
fi1 = tsf1.feature_importances_
fig, ax = plt.subplots(1, figsize=plt.figaspect(0.25))
fi1.plot(ax=ax)
# Method 2: Pipeline
features = [np.mean, np.std, _slope]
steps = [
("transform", RandomIntervalFeatureExtractor(features=features)),
("clf", DecisionTreeClassifier()),
]
base_estimator = Pipeline(steps)
tsf2 = TimeSeriesForestClassifier(estimator=base_estimator)
tsf2.fit(X_train, y_train)
fi2 = tsf2.feature_importances_
fig, ax = plt.subplots(1, figsize=plt.figaspect(0.25))
fi2.plot(ax=ax);
# -
# ### RISE
#
# Another popular variant of time series forest is the so-called Random Interval Spectral Ensemble (RISE), which makes use of several series-to-series feature extraction transformers, including:
#
# * Fitted auto-regressive coefficients,
# * Estimated autocorrelation coefficients,
# * Power spectrum coefficients.
# +
from sktime.classification.interval_based import RandomIntervalSpectralForest
rise = RandomIntervalSpectralForest(n_estimators=10)
rise.fit(X_train, y_train)
rise.score(X_test, y_test)
# -
# ### K-nearest-neighbours classifier for time series
# For time series, the most popular k-nearest-neighbours algorithm is based on [dynamic time warping](https://en.wikipedia.org/wiki/Dynamic_time_warping) (dtw) distance measure.
#
# <img src="img/dtw.png" width=500 />
#
# Here we look at the [BasicMotions data set](http://www.timeseriesclassification.com/description.php?Dataset=BasicMotions). The data was generated as part of a student project where four students performed four activities whilst wearing a smart watch. The watch collects 3D accelerometer and a 3D gyroscope It consists of four classes, which are walking, resting, running and badminton. Participants were required to record motion a total of five times, and the data is sampled once every tenth of a second, for a ten second period.
# +
from sktime.datasets import load_basic_motions
X, y = load_basic_motions(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X.iloc[:, [0]], y)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
# -
labels, counts = np.unique(y_train, return_counts=True)
print(labels, counts)
fig, ax = plt.subplots(1, figsize=plt.figaspect(0.25))
for label in labels:
X_train.loc[y_train == label, "dim_0"].iloc[0].plot(ax=ax, label=label)
plt.legend()
ax.set(title="Example time series", xlabel="Time");
for label in labels[:2]:
fig, ax = plt.subplots(1, figsize=plt.figaspect(0.25))
for instance in X_train.loc[y_train == label, "dim_0"]:
ax.plot(instance)
ax.set(title=f"Instances of {label}")
# from sklearn.neighbors import KNeighborsClassifier
# knn = make_pipeline(
# Tabularizer(),
# KNeighborsClassifier(n_neighbors=1, metric="euclidean"))
# knn.fit(X_train, y_train)
# knn.score(X_test, y_test)
# +
from sktime.classification.distance_based import KNeighborsTimeSeriesClassifier
knn = KNeighborsTimeSeriesClassifier(n_neighbors=1, metric="dtw")
knn.fit(X_train, y_train)
knn.score(X_test, y_test)
# -
# ### Other classifiers
# To find out what other algorithms we have implemented in sktime, you can use our utility function:
# +
from sktime.utils import all_estimators
all_estimators(estimator_types="classifier")
| examples/02_classification_univariate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="HykKFEeAoXan"
# # Burgers Optimization with a Physics-Informed NN
#
# To illustrate how the physics-informed losses work, let's consider a reconstruction task
# as an inverse problem example.
# We'll use Burgers equation $\frac{\partial u}{\partial{t}} + u \nabla u = \nu \nabla \cdot \nabla u$ as a simple yet non-linear equation in 1D, for which we have a series of _observations_ at time $t=0.5$.
# The solution should fulfill the residual formulation for Burgers equation and match the observations.
# In addition, let's impose Dirichlet boundary conditions $u=0$
# at the sides of our computational domain, and define the solution in
# the time interval $t \in [0,1]$.
#
# Note that similar to the previous forward simulation example,
# we will still be sampling the solution with 128 points ($n=128$), but now we have a discretization via the NN. So we could also sample points in between without having to explicitly choose a basis function for interpolation. The discretization via the NN now internally determines how to use its degrees of freedom to arrange the activation functions as basis functions. So we have no direct control over the reconstruction.
# [[run in colab]](https://colab.research.google.com/github/tum-pbs/pbdl-book/blob/main/physicalloss-code.ipynb)
#
#
# + [markdown] id="g9TYIr2loXar"
# ## Formulation
#
# In terms of the $x,y^*$ notation from {doc}`overview-equations` and the previous section, this reconstruction problem means we are solving
#
# $$
# \text{arg min}_{\theta} \sum_i ( f(x_i ; \theta)-y^*_i )^2 + R(x_i) ,
# $$
#
# where $x$ and $y^*$ are solutions of $u$ at different locations in space and time. As we're dealing with a 1D velocity, $x,y^* \in \mathbb{R}$.
# They both represent two-dimensional solutions
# $x(p_i,t_i)$ and $y^*(p_i,t_i)$ for a spatial coordinate $p_i$ and a time $t_i$, where the index $i$ sums over a set of chosen $p_i,t_i$ locations at which we evaluate the PDE and the approximated solutions. Thus $y^*$ denotes a reference $u$ for $\mathcal{P}$ being Burgers equation, which $x$ should approximate as closely as possible. Thus our neural network representation of $x$ will receive $p,t$ as input to produce a velocity solution at the specified position.
#
# The residual function $R$ above collects additional evaluations of $f(;\theta)$ and its derivatives to formulate the residual for $\mathcal{P}$. This approach -- using derivatives of a neural network to compute a PDE residual -- is typically called a _physics-informed_ approach, yielding a _physics-informed neural network_ (PINN) to represent a solution for the inverse reconstruction problem.
#
# Thus, in the formulation above, $R$ should simply converge to zero above. We've omitted scaling factors in the objective function for simplicity. Note that, effectively, we're only dealing with individual point samples of a single solution $u$ for $\mathcal{P}$ here.
#
#
# + [markdown] id="3mh1Lf_XoXas"
# ## Preliminaries
#
# Let's just load phiflow with the tensorflow backend for now, and initialize the random sampling. (_Note: this example uses an older version 1.5.1 of phiflow._)
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="L30jgXowoXat" outputId="bcceefc0-fb33-44c7-959c-0b742e5a1606"
# !pip install --quiet phiflow==1.5.1
from phi.tf.flow import *
import numpy as np
#rnd = TF_BACKEND # for phiflow: sample different points in the domain each iteration
rnd = math.choose_backend(1) # use same random points for all iterations
# + [markdown] id="JvVRcVfUoXat"
# We're importing phiflow here, but we won't use it to compute a solution to the PDE as in {doc}`overview-burgers-forw`. Instead, we'll use the derivatives of an NN (as explained in the previous section) to set up a loss formulation for training.
#
# Next, we set up a simple NN with 8 fully connected layers and `tanh` activations with 20 units each.
#
# We'll also define the `boundary_tx` function which gives an array of constraints for the solution (all for $=0.5$ in this example), and the `open_boundary` function which stores constraints for $x= \pm1$ being 0.
# + id="AwWNtbm_oXau"
def network(x, t):
""" Dense neural network with 8 hidden layers and 3021 parameters in total.
Parameters will only be allocated once (auto reuse).
"""
y = math.stack([x, t], axis=-1)
for i in range(8):
y = tf.layers.dense(y, 20, activation=tf.math.tanh, name='layer%d' % i, reuse=tf.AUTO_REUSE)
return tf.layers.dense(y, 1, activation=None, name='layer_out', reuse=tf.AUTO_REUSE)
def boundary_tx(N):
x = np.linspace(-1,1,128)
# precomputed solution from forward simulation:
u = np.asarray( [0.008612174447657694, 0.02584669669548606, 0.043136357266407785, 0.060491074685516746, 0.07793926183951633, 0.0954779141740818, 0.11311894389663882, 0.1308497114054023, 0.14867023658641343, 0.1665634396808965, 0.18452263429574314, 0.20253084411376132, 0.22057828799835133, 0.23865132431365316, 0.25673879161339097, 0.27483167307082423, 0.2929182325574904, 0.3109944766354339, 0.3290477753208284, 0.34707880794585116, 0.36507311960102307, 0.38303584302507954, 0.40094962955534186, 0.4188235294008765, 0.4366357052408043, 0.45439856841363885, 0.4720845505219581, 0.4897081943759776, 0.5072391070000235, 0.5247011051514834, 0.542067187709797, 0.5593576751669057, 0.5765465453632126, 0.5936507311857876, 0.6106452944663003, 0.6275435911624945, 0.6443221318186165, 0.6609900633731869, 0.67752574922899, 0.6939334022562877, 0.7101938106059631, 0.7263049537163667, 0.7422506131457406, 0.7580207366534812, 0.7736033721649875, 0.7889776974379873, 0.8041371279965555, 0.8190465276590387, 0.8337064887158392, 0.8480617965162781, 0.8621229412131242, 0.8758057344502199, 0.8891341984763013, 0.9019806505391214, 0.9143881632159129, 0.9261597966464793, 0.9373647624856912, 0.9476871303793314, 0.9572273019669029, 0.9654367940878237, 0.9724097482283165, 0.9767381835635638, 0.9669484658390122, 0.659083299684951, -0.659083180712816, -0.9669485121167052, -0.9767382069792288, -0.9724097635533602, -0.9654367970450167, -0.9572273263645859, -0.9476871280825523, -0.9373647681120841, -0.9261598056102645, -0.9143881718456056, -0.9019807055316369, -0.8891341634240081, -0.8758057205293912, -0.8621229450911845, -0.8480618138204272, -0.833706571569058, -0.8190466131476127, -0.8041372124868691, -0.7889777195422356, -0.7736033858767385, -0.758020740007683, -0.7422507481169578, -0.7263049162371344, -0.7101938950789042, -0.6939334061553678, -0.677525822052029, -0.6609901538934517, -0.6443222327338847, -0.6275436932970322, -0.6106454472814152, -0.5936507836778451, -0.5765466491708988, -0.5593578078967361, -0.5420672759411125, -0.5247011730988912, -0.5072391580614087, -0.4897082914472909, -0.47208460952428394, -0.4543985995006753, -0.4366355580500639, -0.41882350871539187, -0.40094955631843376, -0.38303594105786365, -0.36507302109186685, -0.3470786936847069, -0.3290476440540586, -0.31099441589505206, -0.2929180880304103, -0.27483158663081614, -0.2567388003912687, -0.2386513127155433, -0.22057831776499126, -0.20253089403524566, -0.18452269630486776, -0.1665634500729787, -0.14867027528284874, -0.13084990929476334, -0.1131191325854089, -0.09547794429803691, -0.07793928430794522, -0.06049114408297565, -0.0431364527809777, -0.025846763281087953, -0.00861212501518312] );
t = np.asarray(rnd.ones_like(x)) * 0.5
perm = np.random.permutation(128)
return (x[perm])[0:N], (t[perm])[0:N], (u[perm])[0:N]
def _ALT_t0(N): # alternative, impose original initial state at t=0
x = rnd.random_uniform([N], -1, 1)
t = rnd.zeros_like(x)
u = - math.sin(np.pi * x)
return x, t, u
def open_boundary(N):
t = rnd.random_uniform([N], 0, 1)
x = math.concat([math.zeros([N//2]) + 1, math.zeros([N//2]) - 1], axis=0)
u = math.zeros([N])
return x, t, u
# + [markdown] id="4qpLH7XEoXaw"
# Most importantly, we can now also construct the residual loss function `f` that we'd like to minimize in order to guide the NN to retrieve a solution for our model equation. As can be seen in the equation at the top, we need derivatives w.r.t. $t$, $x$ and a second derivative for $x$. The first three lines of `f` below do just that.
#
# Afterwards, we simply combine the derivates to form Burgers equation. Here we make use of phiflow's `gradient` function:
# + id="E2LrmmT4oXaw"
def f(u, x, t):
""" Physics-based loss function with Burgers equation """
u_t = gradients(u, t)
u_x = gradients(u, x)
u_xx = gradients(u_x, x)
return u_t + u*u_x - (0.01 / np.pi) * u_xx
# + [markdown] id="TWR6OvKboXax"
# Next, let's set up the sampling points in the inner domain, such that we can compare the solution with the previous forward simulation in phiflow.
#
# The next cell allocates two tensors: `grid_x` will cover the size of our domain, i.e., the -1 to 1 range, with 128 cells, while `grid_t` will sample the time interval $[0,1]$ with 33 time stamps.
#
# The last `math.expand_dims()` call simply adds another `batch` dimension, so that the resulting tensor is compatible with the following examples.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="xxLRbUQzoXay" outputId="a2504aae-26dc-48a3-a5fa-12c6539f3f62"
N=128
grids_xt = np.meshgrid(np.linspace(-1, 1, N), np.linspace(0, 1, 33), indexing='ij')
grid_x, grid_t = [tf.convert_to_tensor(t, tf.float32) for t in grids_xt]
# create 4D tensor with batch and channel dimensions in addition to space and time
# in this case gives shape=(1, N, 33, 1)
grid_u = math.expand_dims(network(grid_x, grid_t))
# + [markdown] id="3iPGedVcoXaz"
# Now, `grid_u` contains a full graph to evaluate our NN at $128 \times 33$ positions, and returns the results in a $[1,128,33,1]$ array once we run it through `session.run`. Let's give this a try: we can initialize a TF session, evaluate `grid_u` and show it in an image, just like the phiflow solution we computed previously.
#
# (Note, we'll use the `show_state` as in {doc}`overview-burgers-forw`. Hence, the x axis does not show actual simulation time, but is showing 32 steps "blown" up by a factor of 16 to make the changes over time easier to see in the image.)
# + colab={"base_uri": "https://localhost:8080/", "height": 356} id="MHYmnuiEoXaz" outputId="7f48e8b7-97f3-442c-bf67-e1deb3aff6ea"
import pylab as plt
print("Size of grid_u: "+format(grid_u.shape))
session = Session(None)
session.initialize_variables()
def show_state(a, title):
for i in range(4): a = np.concatenate( [a,a] , axis=3)
a = np.reshape( a, [a.shape[1],a.shape[2]*a.shape[3]] )
fig, axes = plt.subplots(1, 1, figsize=(16, 5))
im = axes.imshow(a, origin='upper', cmap='inferno')
plt.colorbar(im) ; plt.xlabel('time'); plt.ylabel('x'); plt.title(title)
print("Randomly initialized network state:")
show_state(session.run(grid_u),"Uninitialized NN")
# + [markdown] id="amnB9mIBoXa0"
# This visualization already shows a smooth transition over space and time. So far, this is purely the random initialization of the NN that we're sampling here. So it has nothing to do with a solution of our PDE-based model up to now.
#
# The next steps will actually evaluate the constraints in terms of data (from the `boundary` functions), and the model constraints from `f` to retrieve an actual solution to the PDE.
# + [markdown] id="-PHuAh0UoXa1"
# ## Loss function and training
#
# As objective for the learning process we can now combine the _direct_ constraints, i.e., the solution at $t=0.5$ and the Dirichlet $u=0$ boundary conditions with the loss from the PDE residuals. For both boundary constraints we'll use 100 points below, and then sample the solution in the inner region with an additional 1000 points.
#
# The direct constraints are evaluated via `network(x, t)[:, 0] - u`, where `x` and `t` are the space-time location where we'd like to sample the solution, and `u` provides the corresponding ground truth value.
#
# For the physical loss points, we have no ground truth solutions, but we'll only evaluate the PDE residual via the NN derivatives, to see whether the solution satisfies the PDE model. If not, this directly gives us an error to be reduced via an update step in the optimization. The corresponding expression is of the form `f(network(x, t)[:, 0], x, t)` below. Note that for both data and physics terms the `network()[:, 0]` expressions don't remove any data from the $L^2$ evaluation, they simply discard the last size-1 dimension of the $(n,1)$ tensor returned by the network.
# + colab={"base_uri": "https://localhost:8080/"} id="bfZvwnFooXa1" outputId="f0188a14-5922-4faa-b1aa-fb6482bd81f3"
# Boundary loss
N_SAMPLE_POINTS_BND = 100
x_bc, t_bc, u_bc = [math.concat([v_t0, v_x], axis=0) for v_t0, v_x in zip(boundary_tx(N_SAMPLE_POINTS_BND), open_boundary(N_SAMPLE_POINTS_BND))]
x_bc, t_bc, u_bc = np.asarray(x_bc,dtype=np.float32), np.asarray(t_bc,dtype=np.float32) ,np.asarray(u_bc,dtype=np.float32)
#with app.model_scope():
loss_u = math.l2_loss(network(x_bc, t_bc)[:, 0] - u_bc) # normalizes by first dimension, N_bc
# Physics loss inside of domain
N_SAMPLE_POINTS_INNER = 1000
x_ph, t_ph = tf.convert_to_tensor(rnd.random_uniform([N_SAMPLE_POINTS_INNER], -1, 1)), tf.convert_to_tensor(rnd.random_uniform([N_SAMPLE_POINTS_INNER], 0, 1))
loss_ph = math.l2_loss(f(network(x_ph, t_ph)[:, 0], x_ph, t_ph)) # normalizes by first dimension, N_ph
# Combine
ph_factor = 1.
loss = loss_u + ph_factor * loss_ph # allows us to control the relative influence of loss_ph
optim = tf.train.GradientDescentOptimizer(learning_rate=0.02).minimize(loss)
#optim = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) # alternative, but not much benefit here
# + [markdown] id="rJBkiuECoXa2"
# The code above just initializes the evaluation of the loss, we still didn't do any optimization steps, but we're finally in a good position to get started with this.
#
# Despite the simple equation, the convergence is typically very slow. The iterations themselves are fast to compute, but this setup needs a _lot_ of iterations. To keep the runtime in a reasonable range, we only do 10k iterations by default below (`ITERS`). You can increase this value to get better results.
# + colab={"base_uri": "https://localhost:8080/"} id="64g5NVHAoXa3" outputId="b0425572-351c-4877-9d49-a83ae9a91a91"
session.initialize_variables()
import time
start = time.time()
ITERS = 10000
for optim_step in range(ITERS+1):
_, loss_value = session.run([optim, loss])
if optim_step<3 or optim_step%1000==0:
print('Step %d, loss: %f' % (optim_step,loss_value))
#show_state(grid_u)
end = time.time()
print("Runtime {:.2f}s".format(end-start))
# + [markdown] id="5NLrym59oXa4"
# The training can take a significant amount of time, around 2 minutes on a typical notebook, but at least the error goes down significantly (roughly from around 0.2 to ca. 0.03), and the network seems to successfully converge to a solution.
#
# Let's show the reconstruction of the network, by evaluating the network at the centers of a regular grid, so that we can show the solution as an image. Note that this is actually fairly expensive, we have to run through the whole network with a few thousand weights for all of the $128 \times 32$ sampling points in the grid.
#
# It looks pretty good on first sight, though. There's been a very noticeable change compared to the random initialization shown above:
#
# + colab={"base_uri": "https://localhost:8080/", "height": 323} id="JXcISF40oXa4" outputId="afa97e6c-4d63-433a-aa45-fa29c2c7b17e"
show_state(session.run(grid_u),"After Training")
# + [markdown] id="JD-WnU-1oXa5"
# ---
#
# ## Evaluation
#
# Let's compare solution in a bit more detail. Here are the actual sample points used for constraining the solution (at time step 16, $t=1/2$) shown in gray, versus the reconstructed solution in blue:
#
# + colab={"base_uri": "https://localhost:8080/", "height": 332} id="A95iq_2_oXa6" outputId="59c9373e-b56b-47dd-ed7f-de7830a6c43e"
u = session.run(grid_u)
# solution is imposed at t=1/2 , which is 16 in the array
BC_TX = 16
uT = u[0,:,BC_TX,0]
fig = plt.figure().gca()
fig.plot(np.linspace(-1,1,len(uT)), uT, lw=2, color='blue', label="NN")
fig.scatter(x_bc[0:100], u_bc[0:100], color='gray', label="Reference")
plt.title("Comparison at t=1/2")
plt.xlabel('x'); plt.ylabel('u'); plt.legend()
# + [markdown] id="dBbSjnJ8oXa6"
# Not too bad at the sides of the domain (the Dirichlet boundary conditions $u=0$ are fulfilled), but the shock in the center (at $x=0$) is not well represented.
#
# Let's check how well the initial state at $t=0$ was reconstructed. That's the most interesting, and toughest part of the problem (the rest basically follows from the model equation and boundary conditions, given the first state).
#
# It turns out that the accuracy of the initial state is actually not that good: the blue curve from the PINN is quite far away from the constraints via the reference data (shown in gray)... The solution will get better with larger number of iterations, but it requires a surprisingly large number of iterations for this fairly simple case.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 332} id="4n5dAMhBoXa7" outputId="e6c0e4a2-b5d3-476f-f349-f2b1ca75917a"
# ground truth solution at t0
t0gt = np.asarray( [ [-math.sin(np.pi * x) * 1.] for x in np.linspace(-1,1,N)] )
velP0 = u[0,:,0,0]
fig = plt.figure().gca()
fig.plot(np.linspace(-1,1,len(velP0)), velP0, lw=2, color='blue', label="NN")
fig.plot(np.linspace(-1,1,len(t0gt)), t0gt, lw=2, color='gray', label="Reference")
plt.title("Comparison at t=0")
plt.xlabel('x'); plt.ylabel('u'); plt.legend()
# + [markdown] id="7m382fpVoXa8"
# Especially the maximum / minimum at $x=\pm 1/2$ are far off, and the boundaries at $x=\pm 1$ are not fulfilled: the solution is not at zero.
#
# We have the forward simulator for this simulation, so we can use the $t=0$ solution of the network to
# evaluate how well the temporal evaluation was reconstructed. This measures how well the temporal evolution of the model equation was captured via the soft constraints of the PINN loss.
#
# The graph below shows the initial state in blue, and two evolved states at $t=8/32$ and $t=15/32$. Note that this is all from the simulated version, we'll show the learned version next.
#
# (Note: The code segments below also have some optional code to show the states at `[STEPS//4]`. It's commented out by default, you can uncomment or add additional ones to visualize more of the time evolution if you like.)
# + colab={"base_uri": "https://localhost:8080/", "height": 348} id="v9uHDCL0oXa8" outputId="a52cca99-fc5b-44de-aadb-e66c02ea3388"
# re-simulate with phiflow from solution at t=0
DT = 1./32.
STEPS = 32-BC_TX # depends on where BCs were imposed
INITIAL = u[...,BC_TX:(BC_TX+1),0] # np.reshape(u0, [1,len(u0),1])
print(INITIAL.shape)
DOMAIN = Domain([N], boundaries=PERIODIC, box=box[-1:1])
state = [BurgersVelocity(DOMAIN, velocity=INITIAL, viscosity=0.01/np.pi)]
physics = Burgers()
for i in range(STEPS):
state.append( physics.step(state[-1],dt=DT) )
# we only need "velocity.data" from each phiflow state
vel_resim = [x.velocity.data for x in state]
fig = plt.figure().gca()
pltx = np.linspace(-1,1,len(vel_resim[0].flatten()))
fig.plot(pltx, vel_resim[ 0].flatten(), lw=2, color='blue', label="t=0")
#fig.plot(pltx, vel_resim[STEPS//4].flatten(), lw=2, color='green', label="t=0.125")
fig.plot(pltx, vel_resim[STEPS//2].flatten(), lw=2, color='cyan', label="t=0.25")
fig.plot(pltx, vel_resim[STEPS-1].flatten(), lw=2, color='purple',label="t=0.5")
#fig.plot(pltx, t0gt, lw=2, color='gray', label="t=0 Reference") # optionally show GT, compare to blue
plt.title("Resimulated u from solution at t=0")
plt.xlabel('x'); plt.ylabel('u'); plt.legend()
# + [markdown] id="71n_TaJkoXa9"
# And here is the PINN output from `u` at the same time steps:
#
# + colab={"base_uri": "https://localhost:8080/", "height": 348} id="tetNobgfoXa9" outputId="c385a596-c24d-4164-8b4b-8d39014bf5ce"
velP = [u[0,:,x,0] for x in range(33)]
print(velP[0].shape)
fig = plt.figure().gca()
fig.plot(pltx, velP[BC_TX+ 0].flatten(), lw=2, color='blue', label="t=0")
#fig.plot(pltx, velP[BC_TX+STEPS//4].flatten(), lw=2, color='green', label="t=0.125")
fig.plot(pltx, velP[BC_TX+STEPS//2].flatten(), lw=2, color='cyan', label="t=0.25")
fig.plot(pltx, velP[BC_TX+STEPS-1].flatten(), lw=2, color='purple',label="t=0.5")
plt.title("NN Output")
plt.xlabel('x'); plt.ylabel('u'); plt.legend()
# + [markdown] id="qx6rOskWoXa-"
# Judging via eyeball norm, these two versions of $u$ look quite similar, but not surprisingly the errors grow over time and there are significant differences. Especially the steepening of the solution near the shock at $x=0$ is not "captured" well. It's a bit difficult to see in these two graphs, though, let's quantify the error and show the actual difference:
# + colab={"base_uri": "https://localhost:8080/", "height": 348} id="lWP_Rf2aoXa_" outputId="e6bb27b6-9020-44d1-f499-e1a8547b11cf"
error = np.sum( np.abs( np.asarray(vel_resim[0:16]).flatten() - np.asarray(velP[BC_TX:BC_TX+STEPS]).flatten() )) / (STEPS*N)
print("Mean absolute error for re-simulation across {} steps: {:7.5f}".format(STEPS,error))
fig = plt.figure().gca()
fig.plot(pltx, (vel_resim[0 ].flatten()-velP[BC_TX ].flatten()), lw=2, color='blue', label="t=5")
fig.plot(pltx, (vel_resim[STEPS//4].flatten()-velP[BC_TX+STEPS//4].flatten()), lw=2, color='green', label="t=0.625")
fig.plot(pltx, (vel_resim[STEPS//2].flatten()-velP[BC_TX+STEPS//2].flatten()), lw=2, color='cyan', label="t=0.75")
fig.plot(pltx, (vel_resim[STEPS-1 ].flatten()-velP[BC_TX+STEPS-1 ].flatten()), lw=2, color='purple',label="t=1")
plt.title("u Error")
plt.xlabel('x'); plt.ylabel('MAE'); plt.legend()
# + [markdown] id="XDI_7eK8oXbA"
# The code above will compute a mean absolute error of ca. $1.5 \cdot 10^{-2}$ between ground truth re-simulation and the PINN evolution, which is significant for the value range of the simulation.
#
# And for comparison with the forward simulation and following cases, here are also all steps over time with a color map.
# + colab={"base_uri": "https://localhost:8080/", "height": 370} id="2Trf5FSOoXbB" outputId="ec10bceb-fca2-4e96-d66d-609feed5e913"
# show re-simulated solution again as full image over time
sn = np.concatenate(vel_resim, axis=-1)
sn = np.reshape(sn, list(sn.shape)+[1] ) # print(sn.shape)
show_state(sn,"Re-simulated u")
# + [markdown] id="EKQ8qe4FoXbC"
# Next, we'll store the full solution over the course of the $t=0 \dots 1$ time interval, so that we can compare it later on to the full solution from a regular forward solve and compare it to the differential physics solution.
#
# Thus, stay tuned for the full evaluation and the comparison. This will follow in {doc}`diffphys-code-burgers`, after we've discussed the details of how to run the differential physics optimization.
# + colab={"base_uri": "https://localhost:8080/"} id="BY0ltx8uoXbC" outputId="f4a0333b-0250-40bf-fd9d-e65c35f3e1ac"
vels = session.run(grid_u) # special for showing NN results, run through TF
vels = np.reshape( vels, [vels.shape[1],vels.shape[2]] )
# save for comparison with other methods
import os; os.makedirs("./temp",exist_ok=True)
np.savez_compressed("./temp/burgers-pinn-solution.npz",vels) ; print("Vels array shape: "+format(vels.shape))
# + [markdown] id="50WLIo4voXbD"
# ---
#
# ## Next steps
#
# This setup is just a starting point for PINNs and physical soft-constraints, of course. The parameters of the setup were chosen to run relatively quickly. As we'll show in the next sections, the behavior of such an inverse solve can be improved substantially by a tighter integration of solver and learning.
#
# The solution of the PINN setup above can also directly be improved, however. E.g., try to:
#
# * Adjust parameters of the training to further decrease the error without making the solution diverge.
# * Adapt the NN architecture for further improvements (keep track of the weight count, though).
# * Activate a different optimizer, and observe the change in behavior (this typically requires adjusting the learning rate). Note that the more complex optimizers don't necessarily do better in this relatively simple example.
# * Or modify the setup to make the test case more interesting: e.g., move the boundary conditions to a later point in simulation time, to give the reconstruction a larger time interval to reconstruct.
| physicalloss-code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Zombie Humans ratio
#
# Consider that in a world of 300 people,
#
# 125 are Humans and 175 are Zombies
#
# Every day
# * 20percent of Humans convert to Zombies
# * 10percent of Zombies convert back to Humans
#
# Assumptions
# - Total population remains the same
# - There is no cure or immune system development
#
# ## What will be the Population of Zombies and Humans in the long Run
import numpy as np
import matplotlib.pyplot as plt
plt.style.use("seaborn-whitegrid")
H=125
Z=175
def day_simulation(h0,z0):
h = 0.8 * h0 + 0.1 * z0
z = 0.2 * h0 + 0.9 * z0
h_int=int(h)
z_int=300-h_int
return (h_int,z_int)
h=H
z=Z
humans,zombies=[],[]
for i in np.arange(1,10):
h,z=day_simulation(h,z)
humans.append(h)
zombies.append(z)
print("Day {} Humans {} Zombies {}".format(i,h,z))
plt.figure(dpi=120)
plt.plot(np.arange(1,10),humans,label="Humans")
plt.plot(np.arange(1,10),zombies,label="Zombies")
plt.xlabel("Days")
plt.ylabel("Population")
plt.legend()
plt.show()
print("Humans percentage is ",h/(h+z))
print("Zombie percentage is ",z/(h+z))
# # Using Matrix Method
mat=np.matrix([[0.8,0.1],[0.2,0.9]])
mat
np.matrix([125,175])
np.matrix([[125],[175]])
mat.dot(np.matrix([[125],[175]]))
for i in range(10):
print("Day {}\n".format(i),(mat**i).dot(np.matrix([[125],[175]])))
# # Method 3
x=np.arange(10,200,20)
y=np.arange(10,200,20)
X,Y=np.meshgrid(x,y)
print(X[:5,:5],"\n\n",Y[:5,:5])
X1 = 0.8 * X + 0.1 * Y
Y1 = 0.2 * X + 0.9 * Y
print(X1[:5,:5],"\n\n",Y1[:5,:5])
plt.figure(dpi=120)
plt.quiver(X,Y,X1,Y1)
plt.plot([10,100],[10,200])
plt.xlabel("Human Population")
plt.ylabel("Zombie Population")
plt.show()
# # Above Operation in a function for multiple calls
def matrix_simuation(X0,Y0,interation=1):
for i in np.arange(interation):
X1 = 0.8 * X0 + 0.1 * Y0
Y1 = 0.2 * X0 + 0.9 * Y0
X0,Y0=X1,Y1
return (X1,Y1)
# +
x=np.arange(10,200,20)
y=np.arange(10,200,20)
X,Y=np.meshgrid(x,y)
plt.figure(dpi=120)
X1,Y1=matrix_simuation(X,Y)
plt.quiver(X,Y,X1,Y1)
plt.plot([10,100],[10,200])
plt.xlabel("Human Population")
plt.ylabel("Zombie Population")
plt.show()
# +
x=np.arange(10,200,20)
y=np.arange(10,200,20)
X,Y=np.meshgrid(x,y)
i=[1,3,5,7,10,15]
#i=[1]
l=len(i)
fig = plt.figure(dpi=120,figsize=(13,8))
for j,index in zip(i,np.arange(1,l+1)):
ax = fig.add_subplot(2,3,index)
X1,Y1=matrix_simuation(X,Y,j)
ax.quiver(X,Y,X1,Y1)
ax.set(xlabel="Human Population",
ylabel="Zombie Population",
title="Day - {}".format(j))
plt.tight_layout()
plt.show()
| python/projects/zombie/BasicZombie.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Laboratoire facultatif
# ## Ce carnet web IPython ne fonctionne pas dans Google Colab mais il peut s'exécuter sur un poste de travail sur lequel est installé un fureteur piloté par Selenium.
#
# ### Rappel - Fonctionnement d'un carnet web iPython
#
# * Pour exécuter le code contenu dans une cellule d'un carnet iPython, cliquez dans la cellule et faites (⇧↵, shift-enter)
# * Le code d'un carnet iPython s'exécute séquentiellement de haut en bas de la page. Souvent, l'importation d'une bibliothèque Python ou l'initialisation d'une variable est préalable à l'exécution d'une cellule située plus bas. Il est donc recommandé d'exécuter les cellules en séquence. Enfin, méfiez-vous des retours en arrière qui peuvent réinitialiser certaines variables.
# # Moissonnage de données sur la Toile - formulaire web
#
# ## Les marées des sept prochains jours à Tadoussac
#
# <p>Vous planifiez une sortie en kayak de mer à Tadoussac qui est située sur le fleuve Saint-Laurent à l'embouchure du Saguenay. Vous allez moissonner les informations concernant les marées pour les sept prochains jours à compter du 21 septembre 2021. Pour cela, vous utiliserez un «vieux» formulaire disponible sur le site de Pêches et Océans Canada. </p>
#
# <p>Pour simuler les interactions que fait un humain avec le site web, vous utiliserez la bibliothèque python <a href="https://selenium-python.readthedocs.io/" target='_blank'>Selenium</a>. L'extraction des sections pertinentes de la page web des résultats sera faite avec l'outil python <a href="https://www.crummy.com/software/BeautifulSoup/bs4/doc/" target='_blank'>Beautiful Soup</a>. Enfin, des <a href="https://docs.python.org/fr/3/howto/regex.html" target='_blank'>expressions régulières<sup>1</sup></a> seront utilisées pour extraire les informations détaillées.</p>
# <hr/>
# <span style="font-size:80%"><sup>1</sup><b>Note - pratique: </b><a href="https://regex101.com/" target='_blank'>regex101.com</a> est un excellent site pour pratiquer et mettre au point des expressions régulières.</span>
# ## Importation des bibliothèques Python
# <ul>
# <ul>
# <li><b><i>re</i></b> (expressions régulières), <b><i>json</i></b> (traitement du format de données json: JavaScript Object Notation) et et <b><i>datetime</i></b> (traitement des dates et de l'heure) sont des bibliothèques Python standard, vous n'avez pas besoin de les installer, il suffit de les importer.</li>
# <li>Installation d'un <a href="https://selenium-python.readthedocs.io/installation.html#drivers" target='_blank'>pilote pour le contrôle de votre fureteur par Selenium</a></li>
# <ul>
# <li><a href="https://sites.google.com/a/chromium.org/chromedriver/downloads" target='_blank'>Pilotes</a> pour le fureteur Chrome</li>
# <li>Par exemple sur Mac OS et le fureteur Chrome, brew install chromedriver</li>
# </ul>
# <li>Installation de <a href="https://selenium-python.readthedocs.io/" target='_blank'>Selenium</a></li>
# <ul>
# <li>Par exemple, sudo pip3 install selenium</li>
# </ul>
# <li>Installation de <a href="https://www.crummy.com/software/BeautifulSoup/bs4/doc/" target='_blank'>Beautiful Soup</a></li>
# <ul>
# <li>Par exemple, sudo pip3 install bs4</li>
# </ul>
# </ul>
# </ul>
# +
# -*- coding: utf-8 -*-
# sudo pip3 install bs4
from bs4 import BeautifulSoup
# Installer le pilote ChromeDriver ou autre
# en fonction du fureteur utilisé
# https://selenium-python.readthedocs.io/installation.html#drivers
# Installer selenium
# sudo pip3 install selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import re
import pandas as pd
print("Bibliothèques Python importées")
# -
# ## Analyse des points d'entrée du formulaire avec la fonction d'inspection de votre fureteur
# <ul>
# <ul>
# <li>Ouvrir la page du <a href="https://www.marees.gc.ca/fra/station?sid=2985" target='_blank'>formulaire</a> sur le site de Pêches et Océans Canada;</li>
# <li>Reconstituer la séquence des interactions avec le formulaire (clics, menus et saisie clavier);</li>
# <li>Obtenir les identifiants des boutons, des menus, des champs de saisie;</li>
# </ul>
# </ul>
# ## Programmation d'un script d'interaction Selenium avec le formulaire
#
#
# +
# Instanciation d'un pilote pour le fureteur Chrome
pilote_chrome = webdriver.Chrome("/usr/local/bin/chromedriver")
# Obtenir la page du formulaire de Pêches et Océans
pilote_chrome.get("https://www.marees.gc.ca/fra/station?sid=2985")
date_requise = "2021/09/21"
saisie_date = pilote_chrome.find_element_by_id("date")
saisie_date.clear()
saisie_date.send_keys(date_requise, Keys.RETURN)
municipalite_requise = "Tadoussac"
# Obtenir le menu déroulant des municipalités
menu_deroulant_localites = pilote_chrome.find_element_by_id("sid")
# Obtenir la liste des municipalités
liste_municipalites = menu_deroulant_localites.find_elements_by_tag_name('option')
# Choisir la municipalité requise
for municipalite in liste_municipalites:
# print(municipalite.text)
if (municipalite.text == municipalite_requise):
municipalite.click()
format_requis = "texte"
# Obtenir le menu déroulant des formats de sortie
menu_deroulant_formats = pilote_chrome.find_element_by_id("pres")
# Obtenir la liste des formats de sortie
liste_formats = menu_deroulant_formats.find_elements_by_tag_name('option')
# Choisir le format requis
for format_type in liste_formats:
# print(format_type.text)
if (format_type.text == format_requis):
format_type.click()
# Activer la soumission du formulaire
bouton_soumission = pilote_chrome.find_element_by_css_selector("input.button.button-accent")
bouton_soumission.click()
# Obtenir la page web des résultats
page_reponse = pilote_chrome.page_source
print("Le script Selenium devrait ouvrir un fureteur et afficher un message")
print("du genre: « Browser is being controlled by automated test software. »")
print("puis le script Selenium simule les interactions avec le formulaire.\n")
print("Script Selenium exécuté, formulaire soumis...")
# -
# ## Analyse de la page de réponse retournée avec BeautifulSoup
# +
# Analyser la page de réponse retournée avec BeautifulSoup
dom_page_resultats = BeautifulSoup(page_reponse,"html.parser")
# Afficher la page web retournée
print(dom_page_resultats.prettify())
# -
# ## Extraction des sections pertinentes de la page de résultats avec Beautiful Soup;
# +
# Extraire de la page les sections pertinentes
# cela implique un examen du contenu de la page et
# une connaissance des possibilités de BeautifulSoup
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
# Extraire les paramètres de la requête de recherche d'information
parametres_bruts = dom_page_resultats.find("div", {"class":"stationTextHeader"})
# print("Paramètres bruts:")
parametres_bruts = parametres_bruts.getText().strip()
# Extraire les données de marée
donnees_marees_brutes = dom_page_resultats.find("div", {"class":"stationTextData"})
# print("Données marées brutes:\n",donnees_marees_brutes)
donnees_marees_brutes = donnees_marees_brutes.getText().strip()
donnees_marees_brutes_liste = donnees_marees_brutes.split('\n')
print("Sections pertinentes de la page de résultats extraites")
# -
# ## Extraction des informations détaillées avec des expressions régulières;
# +
# Extraction des attributs au moyen d'une expression régulière
# la mise au point de l'expression régulière se fait avec https://regex101.com/
import re
FORME1 = re.compile(r'# Station : (\W*\w*)\s\((\d*)\)\s*#\sfuseau horaire : (\w*)[\W*\w*\s*]*')
formes_reconnues = re.match(FORME1,parametres_bruts)
if formes_reconnues:
localite = formes_reconnues.group(1)
code_station = formes_reconnues.group(2)
fuseau_horaire = formes_reconnues.group(3)
# print(localite+'\t'+code_station+'\t'+fuseau_horaire)
# Extraction des attributs au moyen d'une expression régulière
# la mise au point de l'expression régulière se fait avec https://regex101.com/
FORME2 = re.compile(r'\s*(\d*-\d*-\d*);(\d*:\d*:\d*);(\d*.\d*)\(m\);\d*.\d*\(ft\)$')
donnees_marees_liste = []
for data_element in donnees_marees_brutes_liste:
formes_reconnues = re.match(FORME2,data_element)
if formes_reconnues:
date = formes_reconnues.group(1)
heure = formes_reconnues.group(2)
hauteur = formes_reconnues.group(3)
donnees_chaine = date+'\t'+heure+'\t'+hauteur
# print(donnees_chaine)
donnees_marees_liste.append(donnees_chaine)
print("Informations détaillées sur les marées extraites")
# -
# ## Sauvegarde des données dans le fichier `donnees_marees_formulaire_web.csv`
# +
# Sauvegarder les données dans un fichier .csv
chemin_fichier_sortie = './'
nom_fichier_sortie = "donnees_marees_formulaire_web.csv"
with open(chemin_fichier_sortie+nom_fichier_sortie,'w') as fichier_sortie:
# Écriture de l'entête du fichier listant les différents attributs
fichier_sortie.write('localite\tstation\tfuseau_horaire\tdate\theure\thauteur_m\n')
for donnees_marees in donnees_marees_liste:
# Écriture des données sur la marée dans le fichier
fichier_sortie.write(localite+'\t'+code_station+'\t'+fuseau_horaire+'\t'+donnees_marees+'\n')
print("Données de marées sauvegardées dans le fichier "+ nom_fichier_sortie)
# -
# ## Test de lecture du fichier de données
#
# Vérification que le fichier .csv est correct.
# +
import pandas as pd
donnees_marees_df = pd.read_csv(chemin_fichier_sortie+nom_fichier_sortie,delimiter='\t')
donnees_marees_df
# -
| Labos/Lab-Traitement_Donnees/Moissonnage_de_donnees_sur_la_Toile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exchange + DMI
# +
import os
import k3d
import random
import discretisedfield as df
import micromagneticmodel as mm
import micromagneticdata as md
import oommfc as oc
p1 = (-15e-9, 0, 0)
p2 = (15e-9, 1e-9, 1e-9)
n = (10, 1, 1)
region = df.Region(p1=p1, p2=p2)
mesh = df.Mesh(region, n=n)
Ms = 1e6
D = 2e-3
A = 4.77e-12
random.seed(2)
def value_random(point):
m = [random.random()*2 - 1 for i in range(3)]
return m
system = mm.System(name='exchange_dmi')
system.energy = mm.Exchange(A=A) + mm.DMI(D=D, crystalclass='T')
system.dynamics = mm.Damping(alpha=0.5) # No precession to make animations faster
system.m = df.Field(mesh, dim=3, value=value_random, norm=Ms)
try:
oc.delete(system)
except FileNotFoundError:
pass
td = oc.TimeDriver()
td.drive(system, t=70e-12, n=250) # The aim is to have 10s videos n=10*25=250
print(f'Average m: {system.m.orientation.average}')
system.m.k3d_vector()
# +
data = md.Data(name=system.name)
plot = k3d.plot()
@df.interact(drive_number=data.selector(),
timestep=data.drive(0).slider(continuous_update=False))
def my_plot(drive_number, timestep):
data.drive(drive_number).step(timestep).orientation.k3d_vector(plot=plot, interactive_field=system.m, head_size=2)
plot.display()
# -
data.drive(0).ovf2vtk()
| dev/animations/exchange-dmi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
from scipy.io import wavfile
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from keras.layers import Conv2D, MaxPool2D, Flatten, LSTM
from keras.layers import Dropout, Dense, TimeDistributed
from keras.models import Sequential
from keras.utils import to_categorical
from sklearn.utils.class_weight import compute_class_weight
from tqdm import tqdm
from python_speech_features import mfcc
import pickle
from keras.callbacks import ModelCheckpoint
from cfg import Config
# +
def check_data():
if os.path.isfile(config.p_path):
print('Loadning existing data for {} model'.format(config.mode))
with open(config.p_path, 'rb') as handle:
tmp = pickle.load(handle)
return tmp
else:
return None
def build_rand_feat():
tmp = check_data()
if tmp:
return tmp.data[0], tmp.data[1]
X = []
y = []
_min, _max = float('inf'), -float('inf')
print('tqdm: ',n_samples)
for _ in tqdm(range(int(n_samples))):
rand_class = np.random.choice(class_dist.index, p = prob_dist)
file = np.random.choice(df[df.label==rand_class].index)
rate, wav = wavfile.read('clean/'+file)
label = df.at[file, 'label']
rand_index = np.random.randint(0, wav.shape[0]-config.step)
sample = wav[rand_index:rand_index+config.step]
X_sample = mfcc(sample, rate, numcep=config.nfeat, nfilt=config.nfilt, nfft=config.nfft)
_min = min(np.amin(X_sample), _min)
_max = max(np.amax(X_sample), _max)
X.append(X_sample)
y.append(classes.index(label))
config.min = _min
config.max = _max
X, y = np.array(X), np.array(y)
X = (X - _min) / (_max - _min)
if config.mode == 'conv':
X = X.reshape(X.shape[0], X.shape[1], X.shape[2], 1)
elif config.mode == 'time':
X = X.reshape(X.shape[0], X.shape[1], X.shape[2])
y = to_categorical(y, num_classes=10)
config.data = (X, y)
with open(config.p_path, 'wb') as handle:
pickle.dump(config, handle, protocol=2)
return X, y
# +
def get_conv_model():
model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', strides = (1, 1),
padding='same', input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation='relu', strides = (1, 1),
padding='same', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu', strides = (1, 1),
padding='same', input_shape=input_shape))
model.add(Conv2D(128, (3, 3), activation='relu', strides = (1, 1),
padding='same', input_shape=input_shape))
model.add(MaxPool2D((2,2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
def get_recurrent_model():
#shape of data for RNN is (n, time, feat)
model = Sequential()
model.add(LSTM(128, return_sequences=True, input_shape = input_shape))
model.add(LSTM(128, return_sequences=True))
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(64, activation='relu')))
model.add(TimeDistributed(Dense(32, activation='relu')))
model.add(TimeDistributed(Dense(16, activation='relu')))
model.add(TimeDistributed(Dense(8, activation='relu')))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
| .ipynb_checkpoints/Train-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Typesetting Math in your book
#
# Jupyter Book uses [MathJax](http://docs.mathjax.org/) for typesetting math in your
# book. This allows you to have LaTeX-style mathematics in your online content.
# This page shows you a few ways to control this.
#
# For more information about equation numbering, see the
# [MathJax equation numbering documentation](http://docs.mathjax.org/en/v2.7-latest/tex.html#automatic-equation-numbering).
#
# # In-line math
#
# To insert in-line math use the `$` symbol within a Markdown cell.
# For example, the text `$this_{is}^{inline}$` will produce: $this_{is}^{inline}$.
# # Math blocks
#
# You can also include math blocks for separate equations. This allows you to focus attention
# on more complex or longer equations, as well as link to them in your pages. To use a block
# equation, wrap the equation in either `$$` or `\begin` statements.
#
# For example,
#
# ```latex
# \begin{equation}
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \end{equation}
# ```
# results in
#
# \begin{equation}
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \end{equation}
# and
#
# ```latex
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# $$
# ```
#
# results in
#
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# $$
# ## Numbering equations
#
# MathJax has built-in support for numbering equations. This makes it possible to
# easily reference equations throughout your page. To do so, add this tag
# to a block equation:
#
# `\tag{<number>}`
#
# The `\tag` provides a number for the equation that will be inserted when you refer
# to it in the text.
#
# For example, the following code:
#
# ```latex
# equation 1
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \tag{1}
# $$
#
# equation 2
#
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \tag{2}
# $$
#
# equation 999
#
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \tag{999}
# $$
# ```
#
# Results in these math blocks:
#
# equation 1
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \tag{1}
# $$
#
# equation 2
#
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \tag{2}
# $$
#
# equation 999
#
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \tag{999}
# $$
#
# ### Automatic numbering
#
# If you'd like **all** block equations to be numbered with MathJax, you can activate
# this with the following configuration in your `_config.yml` file:
#
# ```yaml
# number_equations: true
# ```
#
# In this case, all equations will have numbers. If you'd like to deactivate
# an equation's number, include a `\notag` with your equation, like so:
#
# ```latex
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \notag
# $$
# ```
# ## Linking to equations
#
# Adding `\label{mylabel}` to an equation allows you to refer to the equation elsewhere in the page. You
# can define a human-friendly label and MathJax will insert an anchor with the following form:
#
# ```html
# #mjx-eqn-mylabel
# ```
#
# If you use `\label` in conjunction with `\tag`, then you can insert references directly to an equation
# by using the `\ref` syntax. For example, here's an equation with a tag and label:
#
# ```latex
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \label{mylabel1}\tag{24}
# $$
#
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \label{mylabel2}\tag{25}
# $$
# ```
#
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \label{mylabel1}\tag{24}
# $$
#
# $$
# \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
# \label{mylabel2}\tag{25}
# $$
#
# Now, we can refer to these math blocks with `\ref` elements. For example,
# we can mention Equation \ref{mylabel1} using `\ref{mylabel1}` and
# Equation \ref{mylabel2} with `\ref{mylabel2}`.
#
# Note that these equations also have anchors on them, which can be used to link
# to an equation from elsewhere, for example with this link text:
#
# ```html
# <a href="#mjx-eqn-mylabel2">My link</a>
# ```
| jupyter_book/book_template/content/features/math.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ntds_2018
# language: python
# name: ntds_2018
# ---
# + [markdown] colab_type="text" id="xz9N0HUe9izB"
# # [NTDS'18] milestone 4: graph signal processing
# [ntds'18]: https://github.com/mdeff/ntds_2018
#
# [<NAME>](https://people.epfl.ch/254838), [EPFL LTS2](http://lts2.epfl.ch)
# + [markdown] colab_type="text" id="TMD1uxMy9izD"
# ## Students
#
# * Team: 50
# * Students: <NAME>, <NAME>, <NAME>, <NAME>
# * Dataset: Spammers on Social Network
# + [markdown] colab_type="text" id="tsEGn4jx9izE"
# ## Rules
#
# * Milestones have to be completed by teams. No collaboration between teams is allowed.
# * Textual answers shall be short. Typically one to two sentences.
# * Code has to be clean.
# * You cannot import any other library than we imported.
# * When submitting, the notebook is executed and the results are stored. I.e., if you open the notebook again it should show numerical results and plots. We won't be able to execute your notebooks.
# * The notebook is re-executed from a blank state before submission. That is to be sure it is reproducible. You can click "Kernel" then "Restart & Run All" in Jupyter.
# + [markdown] colab_type="text" id="YsMAvI_b9izF"
# ## Objective
#
# The goal of this milestone is to do some Graph Signal Processing (GSP) on the data of your project.
#
# ### A note about plotting
#
# There are several questions in this milestone that ask you to plot a signal on your network.
# There are several ways from which you could approach it.
# In all cases, compute the position of the nodes a single time at the beginning, as this is likely to be a costly operation.
# Using a single layout for all the graph plots will also make it easier to compare the plots.
# Indeed, the only thing changing between plots is the signal displayed.
# You can represent the features/labels lying on the graph via node **colors**.
# To do so, make sure to have a consistent color map throughout and remember to display a colorbar and scale in all plots, so that we can tell what numbers the colors represent.
#
# * An option is to use the **Laplacian eigenmaps** that you have seen in the previous milestone to embed your graph on the plane. For example:
# ```
# from matplotlib import pyplot as plt
# plt.scatter(eigenvectors[:, 1], eigenvectors[:, 2], c=signal, alpha=0.5)
# plt.colorbar()
# ```
# * Another option is to use the plotting capabilities of **[NetworkX](https://networkx.github.io)**.
# See the documentation of its [drawing methods](https://networkx.github.io/documentation/stable/reference/drawing.html).
# For example:
# ```
# import networkx as nx
# graph = nx.from_scipy_sparse_matrix(adjacency)
# coords = nx.spring_layout(graph) # Force-directed layout.
# coords = eigenvectors[:, 1:3] # Laplacian eigenmaps.
# nx.draw_networkx_nodes(graph, coords, node_size=60, node_color=signal)
# nx.draw_networkx_edges(graph, coords, alpha=0.3)
# ```
# * Another option is to use the plotting capabilities of the **[PyGSP](https://github.com/epfl-lts2/pygsp)**, a Python package for Graph Signal Processing.
# **Note that your are forbidden to use the PyGSP for anything else than plotting.**
# See the documentation of its [plotting utilities](https://pygsp.readthedocs.io/en/stable/reference/plotting.html).
# For example:
# ```
# import pygsp as pg
# graph = pg.graphs.Graph(adjacency)
# graph.set_coordinates('spring') # Force-directed layout.
# graph.set_coordinates(eigenvectors[:, 1:3]) # Laplacian eigenmaps.
# graph.plot_signal(signal)
# ```
# * Yet another option is to save your graph on disk, use **[Gephi](https://gephi.org)** externally, to visualize the graph, save the graph with the Gephi coordinates and finally load the nodes coordinates back into the notebook.
#
# We encourage you to try all the above methods before making your choice. Then be consistent and use only one throughout the milestone.
# NetworkX and PyGSP should already be installed in your environement. If that's not the case, install with `conda install networkx pygsp` (after activating the `ntds_2018` environment).
# + [markdown] colab_type="text" id="yuDZxsmq9izH"
# ## 0 - Load your network
# + colab={} colab_type="code" id="YCquIBvb9izI"
# %matplotlib inline
# + [markdown] colab_type="text" id="1UXnBC7I9izN"
# If you get a `No module named 'pyunlocbox'` error when running the below cell, install the [pyunlocbox](https://github.com/epfl-lts2/pyunlocbox) with `conda install pyunlocbox` (after activating the `ntds_2018` environment).
# + colab={} colab_type="code" id="86SZ4MqK9izO"
import numpy as np
from scipy import sparse
import scipy.sparse.linalg
from matplotlib import pyplot as plt
from pyunlocbox import functions, solvers
# + [markdown] colab_type="text" id="BlGO6XoK9izT"
# For this milestone, all we will need is a set of features/labels for each of the nodes on the network, as well as the Laplacian, $L,$ and Gradient, $\nabla_G,$ matrices that you have computed for your network while working on milestone 3.
#
# Import those objects in the cell below (or recompute the Laplacian and Gradient from your stored adjacency matrix, if you wish).
#
# _Note_: If your features/labels are not floating-point numbers, please convert them. For example, if your data has labels "cat" and "dog" for nodes that represent cats or dogs, respectively, you may assign the number `1.0` for the label "cat" and the number `-1.0` for the label "dog".
# + colab={} colab_type="code" id="IyswllmjBTe8"
import pandas as pd
features = pd.read_csv('features.csv')
edges = pd.read_csv("edges.csv")
num_edges = len(edges)
edges.drop(columns=["Unnamed: 0"], inplace=True)
# + colab={} colab_type="code" id="mGdmirMmC6QA"
adjacency = scipy.sparse.load_npz("adjacency_undirected_sparse_csr.npz")
adjacency_org = scipy.load("undirected_adjacency.npy")
n_nodes = adjacency.shape[0]
# + colab={} colab_type="code" id="N1CcrlTW9izV"
# combinatorial Laplacian, L = D-A
laplacian = scipy.sparse.load_npz("laplacian_combinatorial.npz") # or scipy.sparse.load_npz("laplacian_normalized.npz")
# gradient equals to S^t ( S is the incidence matrix (column being edges, and rows being nodes)
#gradient = # Your code here.
# read from features.csv spammers label
labels = np.array(features[["Spammer Label"]])
n_nodes = adjacency.shape[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 549, "status": "ok", "timestamp": 1543854143233, "user": {"displayName": "G\u00f6rkem \u00c7aml\u0131", "photoUrl": "https://lh4.googleusercontent.com/-Lq5WnNsQ-0w/AAAAAAAAAAI/AAAAAAAAABg/Cn2-Y9hDg-A/s64/photo.jpg", "userId": "16487060833582065143"}, "user_tz": -60} id="qdVhoCXLwSzN" outputId="88efbf92-1fd0-4aec-9935-0bae9740f969"
laplacian
# + [markdown] colab_type="text" id="vFujGPKe9izZ"
# ## 1 - Graph Fourier Transform
#
# In this section we will observe how your feature/label vector looks like in the "Graph Fourier" domain.
# + [markdown] colab_type="text" id="mBfqtLJy9izb" inputHidden=false outputHidden=false
# ### Question 1
#
# Compute the Fourier basis vectors and the Laplacian eigenvalues. Make sure to order those from smaller to larger, $\lambda_0 \leq \lambda_1 \leq \dots \leq \lambda_{N-1},$ and use the same ordering for the Fourier basis vectors.
# + colab={} colab_type="code" id="-rJ0cCAqHWa9"
eigenvalues, eigenvectors = sparse.linalg.eigsh(laplacian, k=1000, which='SM',tol=0.001)
# + colab={} colab_type="code" id="atcUH9R7myAk"
np.save("eigenvalues" ,eigenvalues)
np.save("eigenvectors" ,eigenvectors)
# + colab={} colab_type="code" id="nq8gvq0Nmfc6"
eigenvalues = np.load("eigenvalues.npy")
eigenvectors = np.load("eigenvectors.npy")
# + colab={} colab_type="code" id="Lh96tWQU9izd" inputHidden=false outputHidden=false
# Ordered Laplacian eigenvalues. (square of graph frequencies)
e = eigenvalues
# Ordered graph Fourier basis. = Eigenvectors of Laplacian
U = eigenvectors#? https://epfl-lts2.github.io/gspbox-html/doc/utils/gsp_compute_fourier_basis_code.html
# + [markdown] colab_type="text" id="t6lfajAQ9izj"
# Plot the first 3 and the last Fourier basis vectors as signals on your graph. Clearly indicate which plot belongs to which basis vector.
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" executionInfo={"elapsed": 11782, "status": "ok", "timestamp": 1543861233870, "user": {"displayName": "G\u00f6rkem \u00c7aml\u0131", "photoUrl": "https://lh4.googleusercontent.com/-Lq5WnNsQ-0w/AAAAAAAAAAI/AAAAAAAAABg/Cn2-Y9hDg-A/s64/photo.jpg", "userId": "16487060833582065143"}, "user_tz": -60} id="x6OxNict8Y0O" outputId="9aea6765-0dc6-4921-9a1c-4915129ed160"
import pygsp as pg
graph = pg.graphs.Graph(adjacency)
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
graph.set_coordinates('line1D')
graph.plot_signal(U[:, 1:4], ax=axes[0], plot_name='First 3 Fourier Basis Vector', colorbar=True)
legend = [r'$Basis Vec={}$'.format(t) for t in [1,2,3]]
axes[0].legend(legend)
graph.set_coordinates('line1D')
graph.plot_signal(U[:, -3:], ax=axes[1], plot_name='Last 3 Fourier Basis Vector' , colorbar=True)
legend = [r'$Basis Vec={}$'.format(t) for t in [-3,-2,-1]]
axes[1].legend(legend)
fig.tight_layout()
# + colab={} colab_type="code" id="3iKpiht1_n4S"
low = min (np.ndarray.min(U[:,:3]), np.ndarray.min(U[:,-3:]))
up = max (np.ndarray.max(U[:,:3]), np.ndarray.max(U[:,-3:]))
limits=[low ,up]
print(limits)
#or
limits=[0.1 ,-0.1]
# + colab={"base_uri": "https://localhost:8080/", "height": 740} colab_type="code" executionInfo={"elapsed": 28422, "status": "ok", "timestamp": 1543861253047, "user": {"displayName": "G\u00f6rkem \u00c7aml\u0131", "photoUrl": "https://lh4.googleusercontent.com/-Lq5WnNsQ-0w/AAAAAAAAAAI/AAAAAAAAABg/Cn2-Y9hDg-A/s64/photo.jpg", "userId": "16487060833582065143"}, "user_tz": -60} id="NEygXl9qLZXT" outputId="008f32a1-5ccf-4a77-e4df-e67b1a7b46bb"
# Plot the First and Last 3 Fourier Basis Vectors is Laplacian Eigenmaps coordinates
graph.set_coordinates(eigenvectors[:, 1:3]) # Laplacian eigenmaps.
fig, axes = plt.subplots(2, 3, figsize=(20, 12))
# first 3
graph.plot_signal(U[:, 1], ax=axes[0,0],plot_name='First 1 Fourier Basis Vector', colorbar=True , limits=limits)
graph.plot_signal(U[:, 2], ax=axes[0,1],plot_name='First 2 Fourier Basis Vector', colorbar=True , limits=limits)
graph.plot_signal(U[:, 3], ax=axes[0,2],plot_name='First 3 Fourier Basis Vector', colorbar=True , limits=limits)
# last 3
graph.plot_signal(U[:, -3], ax=axes[1,0],plot_name='Last 3 Fourier Basis Vector', colorbar=True , limits=limits)
graph.plot_signal(U[:, -2], ax=axes[1,1],plot_name='Last 2 Fourier Basis Vector', colorbar=True , limits=limits)
graph.plot_signal(U[:, -1], ax=axes[1,2],plot_name='Last 1 Fourier Basis Vector', colorbar=True , limits=limits)
# + [markdown] colab_type="text" id="ThCYNAma9izu"
# ### Question 2
#
# What can you observe in terms of local variations when comparing the basis vectors corresponding to the smallest eigenvalues to those corresponding to the largest eigenvalue? How would this justify the interpretation of the eigenvalues as "graph frequencies"?
# + [markdown] colab_type="text" id="G49Qj6SG9izy"
#
# **Our answer:**
#
# For the eigenvectors corresponding to the smaller eigenvalues (first three) which are close to zero, we see smoother and slow oscillating functions. Therefore, these smallest eigenvalues corresponds to low frequencies, on the other hand for the biggest 3 eigenvectors that have the largest 3 eigenvalues, we see that oscilation is more faster. Therefore, we can say that the largest eigenvalue (which are far from zero value) have graph high frequencies.
# + [markdown] colab_type="text" id="k52i3dbG9iz1"
# ### Question 3
#
# Implement a function that returns the Graph Fourier Transform (GFT) of a given vector $x \in \mathbb{R}^{N},$ with respect to your graph, and a function that computes the corresponding inverse GFT (iGFT).
# + colab={} colab_type="code" id="i4j6Wv-O9iz2" inputHidden=false outputHidden=false
def GFT(x):
x_gft = np.tensordot(U, x, (0, 0))
return x_gft
def iGFT(x):
x_igft = np.tensordot(U, x, (1, 0))
return x_igft
# + [markdown] colab_type="text" id="aIqRzLeE9iz9"
# ### Question 4
#
# Plot your feature/label vector as a signal on your graph
# + colab={"base_uri": "https://localhost:8080/", "height": 280} colab_type="code" executionInfo={"elapsed": 18718, "status": "ok", "timestamp": 1543861253242, "user": {"displayName": "G\u00f6rkem \u00c7aml\u0131", "photoUrl": "https://lh4.googleusercontent.com/-Lq5WnNsQ-0w/AAAAAAAAAAI/AAAAAAAAABg/Cn2-Y9hDg-A/s64/photo.jpg", "userId": "16487060833582065143"}, "user_tz": -60} id="lgmhlNGy9iz_" inputHidden=false outputHidden=false outputId="bea648fc-7979-43bf-bdd3-a0dd3ef75e7b"
graph.set_coordinates(eigenvectors[:, 1:3])
graph.plot_signal(labels, plot_name='Label Vector as a Signal on Graph', colorbar=True, limits=limits)
# + colab={"base_uri": "https://localhost:8080/", "height": 280} colab_type="code" executionInfo={"elapsed": 19775, "status": "ok", "timestamp": 1543861256574, "user": {"displayName": "G\u00f6rkem \u00c7aml\u0131", "photoUrl": "https://lh4.googleusercontent.com/-Lq5WnNsQ-0w/AAAAAAAAAAI/AAAAAAAAABg/Cn2-Y9hDg-A/s64/photo.jpg", "userId": "16487060833582065143"}, "user_tz": -60} id="UEhvPWOeM9EL" outputId="111d72ac-89e7-4a59-cb0e-73f742751a50"
# white values are way higher than black ones (Nonspammers higher than spammers)
np.unique(labels,return_counts=True)
# + [markdown] colab_type="text" id="iicW7LTM9i0E"
# Plot the absolute values of the GFT of your feature/label signal as a function of the graph eigenvalues. Make sure to add a marker indicating the position of each graph eigenvalue, and remember to properly name the axes.
# + colab={"base_uri": "https://localhost:8080/", "height": 780} colab_type="code" executionInfo={"elapsed": 671, "status": "error", "timestamp": 1543863072644, "user": {"displayName": "G\u00f6rkem \u00c7aml\u0131", "photoUrl": "https://lh4.googleusercontent.com/-Lq5WnNsQ-0w/AAAAAAAAAAI/AAAAAAAAABg/Cn2-Y9hDg-A/s64/photo.jpg", "userId": "16487060833582065143"}, "user_tz": -60} id="cS2rCYlJZhn7" outputId="12c3304b-1875-4878-f42e-a002fd4f31f1"
#graph.set_coordinates(eigenvectors[:, 1:3])
plt.plot(e, abs(GFT(labels)))
# + [markdown] colab_type="text" id="O3Dpr02y9i0K"
# ### Question 5
#
# Discuss the behavior of the GFT that you plotted in the last question via comparing the plot of your label signal and those of the Fourier basis of Question 1. Would you consider your labels a "low-pass" or "high-pass" signal, or yet something else entirely?
# -
# Stem Plot
# We only plotted for first 5000 labels but the remaining maintains the same behaviour
plt.figure(figsize=(400,5))
plt.stem(labels[:5000])
# In the last graph we draw our network with our labels signal, the values are either 1-black (spammers) or 0-white (non-spammers). Hence, there are more whites (non-spammers). We see this from the above stem plot of the signal as well. Once we look at to the abstract value of labels after GFT, we see that the last plot in question 4, result is smoother than the ones we see in the first question. Hence, we think that our labels signal is low pass.
#
# + [markdown] colab_type="text" id="m4ZDDeXz9i0L"
# **Your answer here.**
# + [markdown] code_folding=[] colab_type="text" id="0f40T10G9i0M"
# ## 2 - Filtering on graphs
#
# In this section we will check how filtered Dirac impulses diffuse on your graph.
# + [markdown] colab_type="text" id="_CDSXt939i0N" inputHidden=false outputHidden=false
# ### Question 6
#
# Implement the following three filter kernels and the graph filtering operation.
#
# - The **heat kernel** is supposed to take in a vector of eigenvalues `e` and a parameter `t` and output a vector of evaluations of the heat kernel at those eigenvalues (see the course slides for help).
# - The **inverse filter** kernel is supposed to take in a vector of eigenvalues `e` and a parameter `t` and implement spectrally the filter defined in the node domain by $f_{out} = (I + t L)^{-1} f_{in},$ where $f_{in}, f_{out} \in \mathbb{R}^{N}$ are, repectively, the input and output signals to the filter.
# - The **rectangle kernel** takes in a vector of eigenvalues `e` and parameters `l_min` and `l_max` and returns `1.0` at coordinates satisfying $(e[l] \geq l_{min}) \wedge (e[l] \leq l_{max}),$ and `0.0` otherwise.
# - The **graph filtering** operation takes a graph signal $x \in \mathbb{R}^{N}$, a spectral graph `kernel` and a set of keyworded variables, and returns the corresponding filtered signal.
# - _Hint:_ Remember that you have implemented the `GFT` and `iGFT` operations in Question 3.
# - The `**kwargs` is a placeholder to collect supplementary pairs of keyword-values that are not known by the implementation before execution time.
# The `kwargs` variable is a dictionary whose keyes and values are the parameter names and values.
# This is useful to allow both `graph_filter(x, heat_kernel, tau=1.0)` and `graph_filter(x, rectangle_kernel, lambda_min=0.0, lambda_max=1.0)` to be valid calls from the same implementation.
# One can then defer the keyword-value assignment to the `kernel` call: `foo = kernel(bar, **kwargs)`.
# + colab={} colab_type="code" id="fRPk4GD69i0O" inputHidden=false outputHidden=false
#http://www.robots.ox.ac.uk/~phst/BMVC2005/papers/58/bmvc05.pdf
def heat_kernel(e, t):
return np.exp(-t*e)
#http://jelena.ece.cmu.edu/repository/conferences/14_GlobalSIP_ChenSMK.pdf
def inverse_kernel(e, t):
return 1/(e*t+1)
def rectangle_kernel(e, l_min, l_max):
return np.where(e>=l_min,1,0)*np.where(e<=l_max,1,0)
def graph_filter(x, kernel, **kwargs):
return iGFT(sparse.spdiags(kernel(e,**kwargs),[0],e.size,e.size) @ GFT(x))
# + [markdown] colab_type="text" id="hNU6daPS9i0R"
# ### Question 7
#
# Plot all three filter kernels in the spectral domain. Remember to properly name the axes and title the plots. Choose filter parameters that best approximate the behavior of the GFT of your feature/label signal (as seen in Question 4).
# + colab={} colab_type="code" id="86sEV3qK9i0S" inputHidden=false outputHidden=false
plt.subplot(3,1,1)
plt.title("Heat Filter Kernel Frequency Response")
plt.xlabel("Laplacian EigenValues")
plt.ylabel("Filter Response")
plt.plot(e, heat_kernel(e,10**2))
plt.subplot(3,1,2)
plt.title("Inverse Filter Kernel Frequency Response")
plt.xlabel("Laplacian EigenValues")
plt.ylabel("Filter Response")
plt.plot(e, inverse_kernel(e,10**3))
plt.subplot(3,1,3)
plt.title("Reactangle Filter Kernel Frequency Response")
plt.xlabel("Laplacian EigenValues")
plt.ylabel("Filter Response")
plt.plot(e, rectangle_kernel(e,-1,0.01))
plt.subplots_adjust(hspace=0.5,top=3)
# + [markdown] colab_type="text" id="YFCH5fb89i0V"
# ### Question 8
#
# Consider two Dirac impulses arbitrarily placed on your graph. Plot their filtered versions by the three filter kernels implemented in Question 6.
# + colab={} colab_type="code" id="bgcHX9ee9i0W" inputHidden=false outputHidden=false
num_nodes = 62173
dirac_1 = np.zeros(num_nodes)
dirac_2 = np.zeros(num_nodes)
a = np.random.randint(0,num_nodes)
b = np.random.randint(0,num_nodes)
dirac_1[a] = 1
dirac_2[b] = 1
plt.subplot(4,1,1)
plt.title("Delta Dirac Signal")
plt.xlabel("Signal Frequency")
plt.ylabel("Filter Response")
plt.plot(range(num_nodes), dirac_1+dirac_2)
plt.subplot(4,1,2)
plt.title("Delta Dirac Signal Filtered by Heat Kernel in Graph Frequency Domain")
plt.xlabel("Signal Frequency")
plt.ylabel("Filter Response")
plt.plot(range(num_nodes),graph_filter(dirac_1+dirac_2,heat_kernel,t=10**2))
plt.subplot(4,1,3)
plt.title("Delta Dirac Signal Filtered by Inverse Kernel in Graph Frequency Domain")
plt.xlabel("Signal Frequency")
plt.ylabel("Filter Response")
plt.plot(range(num_nodes),graph_filter(dirac_1+dirac_2,inverse_kernel,t=10**3))
plt.subplot(4,1,4)
plt.title("Spectral Decomposition of Delta Dirac Signal Filtered by Rectangle Kernel in Graph Frequency Domain")
plt.xlabel("Signal Frequency")
plt.ylabel("Filter Response")
plt.plot(range(num_nodes),graph_filter(dirac_1+dirac_2,rectangle_kernel,l_min=-1,l_max=0.01))
plt.subplots_adjust(hspace=0.5,top=3)
# + [markdown] colab_type="text" id="Pf25R8mf9i0Z"
# Comment on the "diffusion" of the Diracs induced by the filters. What does it say about the "communication" of information across your network? Relate that to the network connectivity measures that you analyzed during the previous milestones.
# + [markdown] colab_type="text" id="HEQWkWuh9i0c" inputHidden=false outputHidden=false
# We notice that all three of our filters smooth out the spikes caused by or delta-dirac signals very nicely. This supports the notion that our network is well connected and thus robust : significant local pertubations are able to be elimited via fitering.
#
# This concordes with last week's measure of Alegbraic Connectivity ( magnitude of the eigenvalue associated with the Fiedler Vector ) which was not only high, but also gave us an accurate partitioning -- the latter suggests that there are significant underlying structural properties in our graph, which would in turn imply that it would be resistant to local pertubations.
#
# The robustness of our network is concordant with the way in which our original data-set was downsampled : starting around a highly connected node and then progressively adding neighbors.
# + [markdown] colab_type="text" id="UJ-zMW-09i0d"
# ## 3 - De-noising
#
# In this section we will add some centered Gaussian noise to your feature/label signal and attempt to recover it.
# + [markdown] colab_type="text" id="ffOCa14v9i0e"
# ### Question 9
#
# In the cell below, set the noise variance $\sigma^2$ by making sure that the signal-to-noise ratio $SNR = \frac{\operatorname{Var}(\text{labels})}{\sigma^2}$ is about $1.5$.
#
# _Note:_ Actually, you might want to play with the noise variance here and set it to different values and see how the denoising filters behave.
# + colab={} colab_type="code" id="F_G-4EO-9i0f" inputHidden=false outputHidden=false
labels[labels <=0 ] = -1
signal_to_noise_ratio = 1.5
noise_variance = np.var(labels)/signal_to_noise_ratio
noisy_measurements = labels - np.random.randn(n_nodes,1)*noise_variance
# + [markdown] colab_type="text" id="gLQryn_99i0i"
# ### Question 10
#
# In the denoising setting, a common graph signal processing assumption is that the signal $z$ that we want to recover is "smooth", in the sense that $\|\nabla_G z\|_2 = \sqrt{z^{\top} L z}$ is small, while remaining "close" to the measurements that we start with. This leads to denoising by solving the following optimization problem:
#
# $$
# z^\star = \text{arg} \, \underset{z \in \mathbb{R}^{N}}{\min} \, \|z - y\|_2^2 + \gamma z^{\top} L z,
# $$
#
# where $y \in \mathbb{R}^{N}$ is the vector of noisy measurements.
#
# Derive the close form solution to this problem giving $z^\star$ as a function of $y$, $\gamma$ and $L$. Does this solution correspond to any graph filtering operation that you know?
# + [markdown] colab_type="text" id="D4PE9t0p9i0j"
# $$
# \begin{align}
# & z = z^\star \\
# \equiv & \space \frac{\partial z^\star }{\partial z} = 0 \\
# \equiv & \space z-y + \gamma L z = 0 \\
# \equiv & \space I-yz^{-1} + \gamma L =0 \\
# \equiv & \space yz^{-1} = I + \gamma L \\
# \equiv & \space y(I + \gamma L)^{-1} = z
# \end{align}
# $$
#
# The closed form solution above corresponds to the inverse filtering operation
# + [markdown] colab_type="text" id="R8wDfoBh9i0k"
# ### Question 11
#
# Now, denoise the noisy measurements by passing them through the filters that you implemented in Question 6. Choose the filter parameters based on the behavior of the GFT of your original label signal (this is the prior knowledge that you input to the problem).
# + colab={} colab_type="code" id="JbKOogO59i0l" inputHidden=false outputHidden=false
t_heat_denoised=10**2
t_inv_denoised=10**3
l_min=-1
l_max=0.01
z_heat_denoised = graph_filter(noisy_measurements,heat_kernel,t=10**2)
z_inv_denoised = graph_filter(noisy_measurements,inverse_kernel,t=10**3)
z_rect_denoised = graph_filter(noisy_measurements,rectangle_kernel, l_min=-1, l_max=0.01)
# + [markdown] colab_type="text" id="y9fycbW29i0n"
# Plot, on your graph, the original label signal, the noisy measurements, and the three denoised version obtained above. Report on each plot the value of the corresponding relative error
# $$
# \text{rel-err} = \frac{\|\text{labels} - z \|_2}{\|\text{labels}\|_2},
# $$
# where $z$ is the plotted signal.
# + colab={} colab_type="code" id="nk8zdNdc9i0o" inputHidden=false outputHidden=false
plt.scatter(U[:, 1], U[:, 2], c=labels.reshape(num_nodes), alpha=0.5)
plt.title('Original Label Signal')
# -
# 1) Noised signal
rel_err_noised=np.linalg.norm(labels-noisy_measurements)/np.linalg.norm(labels)
plt.scatter(U[:, 1], U[:, 2], c=noisy_measurements.reshape(num_nodes), alpha=0.5)
plt.title('Relative Error of Noised Signal: %1.2f' %rel_err_noised)
print('The relative error of the noised signal is: ', rel_err_noised)
# 2) Heat kernel denoised signal
rel_err_heat_denoised=np.linalg.norm(labels-z_heat_denoised)/np.linalg.norm(labels)
plt.scatter(U[:, 1], U[:, 2], c=z_heat_denoised.reshape(num_nodes), alpha=0.5)
plt.title('Relative Error of Heat Kernel Denoised Signal: %1.2f' %rel_err_heat_denoised)
print('The relative error of the heat kernel denoised signal is: ', rel_err_heat_denoised)
# 3) Inverse filter kernel denoised signal
rel_err_inv_denoised=np.linalg.norm(labels-z_inv_denoised)/np.linalg.norm(labels)
plt.scatter(U[:, 1], U[:, 2], c=z_inv_denoised.reshape(num_nodes), alpha=0.5)
plt.title('Relative Error of Inverse Filter Kernel Denoised Signal: %1.2f' %rel_err_inv_denoised)
print('The relative error of the inverse filter kernel denoised signal is: ', rel_err_inv_denoised)
# 4) Rectangle kernel denoised signal
rel_err_rect_denoised=np.linalg.norm(labels-z_rect_denoised)/np.linalg.norm(labels)
plt.scatter(U[:, 1], U[:, 2], c=z_rect_denoised.reshape(num_nodes), alpha=0.5)
plt.title('Relative Error of Rectangle Kernel Denoised Signal: %1.2f' %rel_err_rect_denoised)
print('The relative error of the rectangle kernel denoised signal is: ', rel_err_rect_denoised)
# + [markdown] colab_type="text" id="NH07dMbk9i0q"
# Finally, overlay on the same plot the GFT of all five signals above.
# + colab={} colab_type="code" id="f0lzgNal9i0r" inputHidden=false outputHidden=false
overlay_labels=GFT(labels)
overlay_noisy_measurements=GFT(noisy_measurements)
overlay_heat_denoised=GFT(z_heat_denoised)
overlay_inv_denoised=GFT(z_inv_denoised)
overlay_rect_denoised=GFT(z_rect_denoised)
titles =["Ground Truth","Noisy Labels","Denoised by Heat Kernel",
"Denoised by Inv Kernel", "Denoised by Rectangle Kernel"]
overlay_all=[np.absolute(overlay_labels), np.absolute(overlay_noisy_measurements), np.absolute(overlay_heat_denoised), np.absolute(overlay_inv_denoised), np.absolute(overlay_rect_denoised)]
# -
for i,plot in enumerate(overlay_all):
line, = plt.semilogy(plot)
line.set_label(titles[i])
plt.title("Graph Fourier Transform of Signals")
plt.ylabel("GFT(x[i])")
plt.xlabel("i")
plt.legend(loc='lower center')
# + [markdown] colab_type="text" id="mxHx1twf9i0v"
# ### Question 12
#
# Comment on which denoised version seems to best match the original label signal. What is the underlying assumption behind the three filtering approaches? Do you think it holds for your label signal? Why?
# + [markdown] colab_type="text" id="Qm_3OEOP9i0w"
# *The denoised version with rectangle kernel seems to best match the original label signal as it has the least relative error estimation with the original signal compared to other denoised versions.*
# + [markdown] colab_type="text" id="Q4kcVx3J9i00"
# ## 4 - Transductive learning
#
# It is often the case in large networks that we can only afford to query properties/labels on a small subset of nodes. Nonetheless, if the underlying labels signal is "regular" enough, we might still be able to recover a good approximation of it by solving an offline variational problem, with constraints on the values of the measured nodes.
#
# In this section, we will be interested in solving such transductive learning problems by minimizing a (semi-) p-norm of the graph gradient applied to the signal of interest:
#
# $$
# \text{arg} \, \underset{z|_S = y}{\min} \|\nabla_G z\|_p^p,
# $$
#
# where $S$ is the set of measured nodes.
#
# In English, we can say that we are looking for solutions with small "aggregated local variations", as measured by $\|\nabla_G z\|_p^p = \sum_{i=1}^{n} \sum_{j=1}^{n} \left( \sqrt{W_{ij}} |z[i] - z[j]| \right)^p,$ while satisfying the measurement constraints $z[i] = y[i]$ for $i \in S.$
#
# We will work with two cases, according to the choices $p=1$ or $p=2.$ For $p=1,$ the problem is known as "interpolation by graph total-variation minimization," whereas for $p=2$ it is sometimes called "interpolation by Tikhonov regularization".
#
# In order to solve these variational problems with the black-box solver provided to you, you will use the [pyunlocbox](https://pyunlocbox.readthedocs.io). This toolbox implements iterative solvers based on so-called ["proximal-splitting"](https://en.wikipedia.org/wiki/Proximal_gradient_method) methods.
# + [markdown] colab_type="text" id="nu_f4kBe9i02"
# ### Question 13
#
# Throughout this section, we will consider only a binarized version of your label signal. If your variable `labels` currently has values other than $\{-1, 1\},$ threshold them so that those are the only values taken in this vector. This can be done for example by choosing a number $t \in \mathbb{R}$ and then setting $\text{labels_bin}[i] = 1$ if $\text{labels}[i] \geq t$ and $\text{labels_bin}[i] = 0$ otherwise.
# + colab={} colab_type="code" id="6xnpSiDY9i04" inputHidden=false outputHidden=false
#Drop Unnamed Column
labels_bin = features.drop(columns=["Unnamed: 0"]).copy()
labels_bin = labels_bin["Spammer Label"].copy()
labels_bin.replace(0, -1, inplace = True)
# + [markdown] colab_type="text" id="FXsgTOEu9i0_"
# Now, subsample this binarized label signal by $70\%$ by choosing, uniformly at random, $30\%$ of the nodes whose labels we will keep.
#
# You will do this by computing a "measurement mask" vector `w` with `1.0`'s at the measured coordinates, and $0.0$'s otherwise.
# -
#We are going to make our dataset smaller(10000 x 10000) as it takes too much time to compute Gradient to phase_transition Test.
new_adjacency = adjacency_org[:10000, :10000]
new_labels_bin = labels_bin.copy().head(10000)
# + colab={} colab_type="code" id="L9zpf34X9i1B" inputHidden=false outputHidden=false
n_nodes = labels_bin.shape[0]
mn_ratio = 0.3
m = int(mn_ratio * n_nodes) # Number of measurements.
#Mask for Original Adjacency Matrix
w = np.zeros(len(labels_bin))
new_index= np.random.choice(range(len(labels_bin)), m, replace=False)
for i in range(len(new_index)):
w[new_index[i]]=1
#Mask for 10000x10000 Adjacency Matrix
new_w = np.zeros(len(new_labels_bin))
new_m = int(mn_ratio * len(new_labels_bin)) #Number of Measuerment for new_adj
new_index= np.random.choice(range(len(new_labels_bin)), new_m, replace=False)
for i in range(len(new_index)):
new_w[new_index[i]]=1
# + [markdown] colab_type="text" id="2e7P4PHW9i1J"
# Plot the subsampled signal on the graph. _Hint:_ you might want to set to `numpy.nan` the values of the un-measured nodes for a cleaner plot.
# + colab={} colab_type="code" id="kZna3U809i1K" inputHidden=false outputHidden=false
# Your code here.
plot_w = np.where(w == 1.0, w, np.nan)
plt.scatter(U[:, 1], U[:, 2], c=plot_w, alpha=0.5)
plt.title('Plot of Subsampled Signal')
# + [markdown] colab_type="text" id="BGJuc6xr9i1Q"
# ### Interlude
#
# For the solution of the variational problems you can use the following function as a "black-box".
#
# You will just need to provide a `gradient` matrix (which you should already have from Section 0), and an orthogonal projection operator `P` onto the span of the measured coordinates (made precise in the next question).
# + colab={} colab_type="code" id="IN9htkZL9i1R"
def graph_pnorm_interpolation(gradient, P, x0=None, p=1., **kwargs):
r"""
Solve an interpolation problem via gradient p-norm minimization.
A signal :math:`x` is estimated from its measurements :math:`y = A(x)` by solving
:math:`\text{arg}\underset{z \in \mathbb{R}^n}{\min}
\| \nabla_G z \|_p^p \text{ subject to } Az = y`
via a primal-dual, forward-backward-forward algorithm.
Parameters
----------
gradient : array_like
A matrix representing the graph gradient operator
P : callable
Orthogonal projection operator mapping points in :math:`z \in \mathbb{R}^n`
onto the set satisfying :math:`A P(z) = A z`.
x0 : array_like, optional
Initial point of the iteration. Must be of dimension n.
(Default is `numpy.random.randn(n)`)
p : {1., 2.}
kwargs :
Additional solver parameters, such as maximum number of iterations
(maxit), relative tolerance on the objective (rtol), and verbosity
level (verbosity). See :func:`pyunlocbox.solvers.solve` for the full
list of options.
Returns
-------
x : array_like
The solution to the optimization problem.
"""
grad = lambda z: gradient.dot(z)
div = lambda z: gradient.transpose().dot(z)
# Indicator function of the set satisfying :math:`y = A(z)`
f = functions.func()
f._eval = lambda z: 0
f._prox = lambda z, gamma: P(z)
# :math:`\ell_1` norm of the dual variable :math:`d = \nabla_G z`
g = functions.func()
g._eval = lambda z: np.sum(np.abs(grad(z)))
g._prox = lambda d, gamma: functions._soft_threshold(d, gamma)
# :math:`\ell_2` norm of the gradient (for the smooth case)
h = functions.norm_l2(A=grad, At=div)
stepsize = (0.9 / (1. + scipy.sparse.linalg.norm(gradient, ord='fro'))) ** p
solver = solvers.mlfbf(L=grad, Lt=div, step=stepsize)
if p == 1.:
problem = solvers.solve([f, g, functions.dummy()], x0=x0, solver=solver, **kwargs)
return problem['sol']
if p == 2.:
problem = solvers.solve([f, functions.dummy(), h], x0=x0, solver=solver, **kwargs)
return problem['sol']
else:
return x0
# + [markdown] colab_type="text" id="3bpB9sfP9i1T"
# ### Question 14
#
# During the iterations of the algorithm used for solving the variational problem, we have to make sure that the labels at the measured nodes stay the same. We will do this by means of an operator `P` which, given a vector $a \in \mathbb{R}^{N},$ returns another vector $b \in \mathbb{R}^{N}$ satisfying $b[i] = \text{labels_bin}[i]$ for every node $i$ in the set $S$ of known labels, and $b[i] = a[i]$ otherwise. Write in the cell below the function for this orthogonal projection operator `P`.
#
# _Hint:_ remember you have already computed the mask `w`.
# + colab={} colab_type="code" id="wSDYwb-U9i1T" inputHidden=false outputHidden=false
def P(a):
b = np.zeros(len(a))
new_w
for i in range(len(a)):
if new_w[i]==0:
b[i]=a[i]
else:
b[i]=new_labels_bin[i]
return b
# + [markdown] colab_type="text" id="gsfN-_ne9i1X" inputHidden=false outputHidden=false
# ### Question 15
#
# Solve the variational problems for $p = 1$ and $p = 2$. Record the solution for the $1-$norm minimization under `sol_1norm_min` and the one for $2-$norm minimization under `sol_2norm_min`.
#
# Compute also binarized versions of these solutions by thresholding the values with respect to $0$, that is, non-negative values become `1.0`, while negative values become `-1.0`. Store those binarized versions under `sol_1norm_bin` and `sol_2norm_bin`, respectively.
# +
#Create Gradient for 10000 x 10000 adjacency metrix due runtime
degrees = new_adjacency.sum(0)
n_edges = np.sum(new_adjacency)
gradient = np.zeros((n_edges, new_adjacency.shape[0]))
c = 0
n_nodes = new_adjacency.shape[0]
for i in range(0, new_adjacency.shape[0]):
for j in range(i+1, new_adjacency.shape[0]):
if(new_adjacency[i][j] == 1):
gradient[c][i] = 1
gradient[c][j] = -1
c+=1
gradientT = gradient.transpose()
lap = np.diag(degrees) - new_adjacency
new_lap = gradientT @ gradient
np.testing.assert_allclose((new_lap), lap)
# + colab={} colab_type="code" id="D-e9rytw9i1X" inputHidden=false outputHidden=false
gradient = scipy.sparse.csr_matrix(gradient)
x0 = np.random.randn(n_nodes)
sol_1norm_min = graph_pnorm_interpolation(gradient, P, x0 = np.random.randn(n_nodes), p=1.)
sol_2norm_min = graph_pnorm_interpolation(gradient, P, x0 = np.random.randn(n_nodes), p=2.)
threshold = 0
sol_1norm_bin = np.copy(sol_1norm_min)
sol_2norm_bin = np.copy(sol_2norm_min)
np.place(sol_1norm_bin, sol_1norm_bin <= threshold, -1)
np.place(sol_1norm_bin, sol_1norm_bin > threshold, 1)
np.place(sol_2norm_bin, sol_2norm_bin <= threshold, -1)
np.place(sol_2norm_bin, sol_2norm_bin > threshold, 1)
# + [markdown] colab_type="text" id="-zil10PK9i1b" inputHidden=false outputHidden=false
# Plot, on your graph, the original `labels_bin` signal, as well as the solutions to the variational problems (both binarized and otherwise). Indicate on each plot the value of the relative error $\text{rel-err} = \frac{\|\text{labels_bin} - z\|_2}{\|\text{labels_bin}\|_2}$, where $z$ is the signal in the corresponding plot.
# +
rel_err_min_1norm = np.linalg.norm(new_labels_bin - sol_1norm_min) / np.linalg.norm(new_labels_bin)
rel_err_min_2norm = np.linalg.norm(new_labels_bin - sol_2norm_min) / np.linalg.norm(new_labels_bin)
rel_err_bin_1norm = np.linalg.norm(new_labels_bin - sol_1norm_bin) / np.linalg.norm(new_labels_bin)
rel_err_bin_2norm = np.linalg.norm(new_labels_bin - sol_2norm_bin) / np.linalg.norm(new_labels_bin)
titles = [ "Ground Truth",
"Solution to non-binarized Problem (p=1): relative_error="+str(rel_err_min_1norm),
"Solution to non-binarized Problem (p=2): relative_error="+str(rel_err_min_2norm),
"Solution to binarized Problem (p=1): relative_error="+str(rel_err_bin_1norm),
"Solution to binarized Problem (p=2): relative_error="+str(rel_err_bin_2norm),
]
fig, axes = plt.subplots(nrows=5, ncols=1, figsize=(15,10))
for i,plot in enumerate([new_labels_bin,sol_1norm_min,
sol_2norm_min,sol_1norm_bin,
sol_2norm_bin,]):
plt.subplot(5,1,i+1)
plt.scatter(U[:10000, 1], U[:10000, 2], c=plot, alpha=0.5)
plt.title(titles[i])
plt.subplots_adjust(hspace=1,top=1)
# -
# The above graphs represent each signal as a coloring of points, with each point being placed according to the 2nd and 3rd coordinates of the respective graph eigenvector.
# + [markdown] colab_type="text" id="aPWj161u9i1d"
# ### Question 16
#
# Now that you have got a feeling for the sort of solutions that the transductive learning problems studied can give, we will see what is the effect of the number of measurements on the accuracy of both $p-$norm minimization problems.
#
# Towards this goal, you will write a `phase_transition()` function. This function will basically go over all the procedures that you have implemented in this section, but for varying numbers of measurements and thresholding values. It will also compute the relative error, $\text{rel-err},$ of the solutions and average them over a number of trials.
#
# The output of the `phase_transition()` function has to be a matrix with `len(mn_ratios)` columns and `len(thresholds)` rows. Each pixel $(i,j)$ in the output matrix has to contain the average, over `n_trials` trials, of the relative error $\text{rel-err}$ in the binarized (with threshold `thresholds[i]`) solution given by `graph_pnorm_interpolation()` from observing an `mn_ratios[j]` fraction of nodes. The randomness comes from a different choice of mask `w` at each trial, hence the averaging.
#
# The interest of this phase transition matrix is to assess what level of recovery error one could expect for a certain fraction of measurements and a certain threshold level.
# + colab={} colab_type="code" id="SN5EpXLM9i1e" inputHidden=false outputHidden=false
def phase_transition(mn_ratios, thresholds, n_trials, labels_bin, p):
pt_matrix = np.zeros((len(thresholds), len(mn_ratios)))
for i in range(0, len(thresholds)):
for j in range(0, len(mn_ratios)):
errs = []
for k in range(0, n_trials):
# Create sample mask.
new_w = np.zeros(len(labels_bin))
new_m = int(mn_ratios[j] * len(labels_bin))
new_index= np.random.choice(range(len(labels_bin)), new_m, replace=False)
for q in range(len(new_index)):
new_w[new_index[q]]=1
# Solve p-norm interpolation.
solution = graph_pnorm_interpolation(gradient, P, x0 = np.random.randn(n_nodes), p = p,\
maxit=15,verbosity="NONE")
#Binarize Solution by threshold
np.place(solution, solution <= thresholds[i], -1)
np.place(solution, solution > thresholds[i], 1)
#Calculate Relative Error
err = np.linalg.norm(labels_bin - solution) / np.linalg.norm(labels_bin)
#Append Relative Error
errs.append(err)
#Aggregate
avg = np.sum(errs)/len(errs)
pt_matrix[i][j] = avg
return pt_matrix
# + [markdown] colab_type="text" id="vC4ibOrt9i1g"
# ### Question 17
#
# Pick 5 "m/n" ratios in $(0, 1)$ and 5 threshold levels in $(-1, 1)$ and run the `phase_transition()` function with `n_trials` = 20, for both $p = 1$ and $p = 2$.
# + colab={} colab_type="code" id="Myico0W49i1i" inputHidden=false outputHidden=false
mn_ratios = [0.3, 0.5, 0.7, 0.8, 0.9]
thresholds = [1, 0.5, 0.0, -0.2 , -1]
pt_matrix_2norm = phase_transition(mn_ratios, thresholds, 20, new_labels_bin, p=2)
pt_matrix_1norm = phase_transition(mn_ratios, thresholds, 20, new_labels_bin, p=1)
# +
X, Y = np.meshgrid(mn_ratios, thresholds)
Z = pt_matrix_1norm
fig, (ax0) = plt.subplots(1, 1)
c = ax0.pcolormesh(X, Y, Z)
fig.colorbar(c, ax=ax0)
plt.ylabel("Threshold")
plt.xlabel("Ratio")
plt.title("Relative Error as a Function of Thresholding and Ratio with p=1.")
# +
X, Y = np.meshgrid(mn_ratios, thresholds)
Z = pt_matrix_2norm
fig, (ax0) = plt.subplots(1, 1)
c = ax0.pcolormesh(X, Y, Z)
fig.colorbar(c, ax=ax0)
plt.ylabel("Threshold")
plt.xlabel("Ratio")
plt.title("Relative Error as a Function of Thresholding and Ratio with p=2.")
# + [markdown] colab_type="text" id="3uoSJTR89i1k"
# Plot both phase transition matrices as images with a colorbar. Make sure to properly name the axes and title the images.
# + [markdown] colab_type="text" id="Pzd70wTO9i1o"
# ### Question 18
#
# Do the phase transition plots above provide any justification for choosing one $p-$norm interpolation over the other? Why?
# + [markdown] colab_type="text" id="Cz9jEOx29i1p"
# The error in our case mostly does not make sense and this due to the dataset. The fact is that mostly the features that we have do not affect if somebody is a spammer or not. I guess in the real world being a spammer or not would depend on how many outgoing messages a user has proportinal on how many the user has received. The bigger this number gets the more indiciation we would have if somebody is a spammer or not.
#
# Since this is not an actual feature of our dataset and most of our users are not labeled as spammer, the best error that we get is when out threshold is set to 1, meaning that all the users will be labeled as non-spammers. This actually makes sense. And obviously as the threshold goes less and less we get less and less accuracy, as we trying to learn something that does not have any sense and correlation.
# -
# The error in our case it mostly make sense and this due to the dataset. The original dataset that we were given and we we are going to work on for the project is actually directed. So in the directed dataset it would make a huge difference, the proportion of how many messages a user has sent and how many messages a user has received. Since we are working with the undirected adjacency we are negating this factor. The reason why we get this results like having 1 as threshold level and why it gives us the biggest accuracy, is that most of the users are being labeled as non-spammers. So when setting the threshold to 1, it means that all results will be set to being non-spammers, thus having the best average accuracy. As the threshold decreases we get worse and worse results as we are not differentiating between undirected and directed, thus destroying the correlation of being identified as a spammer or not.
#
# Another factor would be that we are not working with the whole-dataset, we actually shorten up the adjacency matrix to 10000 x 10000 due to runtime, so for example if node 0 is being labeled as a spammer and it does not have any connection or a small number of connections to node 1 to 10000, but it has a lot of connections to node 10001 to 62137 than. This subsampling destroys the actual correlation and thus we are getting this results.
#
# To sum it up, subsampling the dataset and working with the unadjcacency matrix makes it almost impossible for the solver to get us something with logical meaning, as the original labels are based on a bigger dataset and directed adjacency matrix.
#
# The subsampling was necessary and was done due to the runtime.
#
# About the ratios, we can see that the average changes slightly but not much. This is due to the fact that again there is not much of a correlation.
#
# In real world in ML both thresholds and rations have a huge affect and usually the best threshold would be 50%-50% in our case 0.
| milestones/4_graph_signal_processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#########################################################################################
# Name: <NAME>
# Student ID: 64180008
# Department: Computer Engineering
# Assignment ID: A3
#########################################################################################
# -
import pandas as pd
import random
import numpy as np
#########################################################################################
# QUESTION I
# Description: The parts of question are solved with using pd.Series and pd.DataFrame functions.
#########################################################################################
print("\n")
print("SOLUTION OF QUESTION I:Perform the following tasks with pandas Series")
print("1.a")
a = pd.Series([7,11,13,17])
a
print("1.b")
a = pd.Series([100 for i in range(5)])
a
print("1.c")
a = pd.Series([random.randint(0,100) for i in range(20)])
a
print("1.d")
temperatures = pd.Series([98.6,98.9,100.2,97.9],index=['Julie','Charlie','Sam','Andrea'])
temperatures
print("1.e")
dictionary = {'Julie':98.6,
'Charlie':98.9,
'Sam':100.2,
'Andrea':97.9}
a = pd.Series(dictionary)
a
#########################################################################################
# QUESTION II
# Description: Parts of this question are solved with index information of pandas library.
#########################################################################################
print("\n")
print("SOLUTION OF QUESTION II:Perform the following tasks with pandas DataFrames")
print("2.a")
temp = {'Maxine':37.5,'James':37.3,'Amanda':39.9}
temperatures = pd.DataFrame({'temp':temp})
temperatures
print('2.b')
temps = [[37.8,37.9,38.9],
[36.9,38.7,39.7],
[36.4,37.5,38.6]]
temperatures = pd.DataFrame(temps,index=['Morning','Afternoon','Evening'],columns=temp)
temperatures
print('2.c')
temperatures['Maxine']
print('2.d')
temperatures.loc['Morning']
print('2.e')
temperatures.loc[['Morning','Evening']]
print('2.f')
temperatures[['Amanda','Maxine']]
print('2.g')
temperatures.loc[['Morning','Afternoon'],('Amanda','Maxine')]
print('2.h')
temperatures.describe()
print('2.i')
temperatures.transpose()
print('2.j')
temperatures.reindex(sorted(temperatures.columns), axis=1)
#########################################################################################
# QUESTION III
# Description: The parts of this question are solved with DataFrame and DataFrame functions within.
#########################################################################################
print("\n")
print("SOLUTION OF QUESTION III:These questions are based on Human Resources (HR) database given in site\n\
https://www.w3resource.com/python-exercises/pandas/index.php. This site includes Pandas exercises,\n\
practice facilities and solutions of some exercises. You can look at these exercises before solving the\n\
following questions. CSV files in HR database can be found in assignment’s attachments (HR-\n\
Database.rar). First, generate a data frame for each of tables in HR Database as follows:")
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
employees = pd.read_csv(r"EMPLOYEES.csv")
departments = pd.read_csv(r"DEPARTMENTS.csv")
job_history = pd.read_csv(r"JOB_HISTORY.csv")
jobs = pd.read_csv(r"JOBS.csv")
countries = pd.read_csv(r"COUNTRIES.csv")
regions = pd.read_csv(r"REGIONS.csv")
locations = pd.read_csv(r"LOCATIONS.csv")
print('3.a')
departments
print('3.b')
print('Number of records all dataframes.')
print('Dep:',len(departments))
print('Employees:',len(employees))
print('Job history:',len(job_history))
print('Jobs:',len(jobs))
print('Countries:',len(countries))
print('Locations:',len(locations))
print('Regions:',len(regions))
print('3.c')
a = employees.sort_values('salary',axis=0,ascending=False)
a[a['salary']>10000]
print('3.d')
employees['commission_pct'].fillna(0,inplace=True)
employees
print('3.e')
l = employees[employees['department_id']==80]
a = employees[employees['department_id']==30]
b = employees[employees['department_id']==50]
group = pd.concat([a,b,l])
group[['first_name','last_name','salary','department_id']]
print('3.f')
emp_dept = pd.merge(employees,departments,on='department_id')
emp_dept
print('3.g')
empt_dept = emp_dept.groupby('department_name').salary.aggregate(['min', 'mean', max])
empt_dept
# +
print('3.h')
group = pd.merge(emp_dept,locations,on='location_id')
group2 = group.groupby(['country_id','city']).aggregate({'salary':'mean'})
group3 = pd.DataFrame(group2)
city_list = []
for i in range(len(group3.index)):
city_list.append(group3.index[i][1])
def calculate(f,l):
a = 0
b = 0
salary = []
mean_salary = []
for j in range(len(city_list)):
for i in group[group['city'] == city_list[j]]['salary']:
if f < i <= l:
mean_salary.append(i)
for k in range(len(mean_salary)):
a += mean_salary[k]
if a != 0:
b= a/len(mean_salary)
salary.append(b)
else:
salary.append(0)
mean_salary = []
a = 0
b = 0
return salary
last_group = group3.drop(columns=['salary'])
last_group['(0,5000]'] = calculate(0,5000)
last_group['(5000,10000]'] = calculate(5000,10000)
last_group['(10000,15000]'] = calculate(10000,15000)
last_group['(15000,25000]'] = calculate(15000,25000)
last_group
# -
#########################################################################################
# QUESTION IV
# Description: The parts of this question are solved with combination of
# pandas and matplotlib libraries.
#########################################################################################
print("\n")
print("SOLUTION OF QUESTION IV:A data repository is maintained by Johns Hopkins University CSSE research\n\
center (https://github.com/CSSEGISandData/COVID-19/) about corona virus incidents. The site\n\
https://www.w3resource.com/python-exercises/project/covid-19/index.php includes some exercises\n\
on COVID-19 data set. You can look at these exercises before solving the following questions. First, get\n\
the latest covid data from github as follows:")
# +
covid_data = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/04-20-2020.csv')
covid_series = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
# -
print('4.a')
covid_data.head(5)
print('4.a')#continuation of a part of 4th question.
covid_series.head(5)
print('4.b')
active_data = covid_data.groupby('Country_Region').Active.sum()
active_data = covid_data.sort_values('Active',ascending=False)
active_data = active_data[['Country_Region','Confirmed','Deaths','Recovered','Active']]
active_data
# +
print('4.c')
ratio = []
for i in range(len(covid_data)):
if covid_data.iloc[i,7] !=0:
ratio.append((covid_data.iloc[i,8]/covid_data.iloc[i,7])*100)
else:
ratio.append(0)
covid_data['Death_Confirmed_Ratio'] = ratio
a = covid_data[covid_data['Confirmed']>1000].sort_values('Death_Confirmed_Ratio',ascending=False)
a[['Country_Region','Last_Update','Confirmed','Deaths','Recovered','Active','Death_Confirmed_Ratio']]
# +
print('4.d')
active_data2 = active_data.head(10)
covid_series = covid_series.rename(columns={"Country/Region": "Country_Region"})
li = [i for i in covid_series.columns]
li2 = [li[i] for i in range(li.index('3/11/20'),len(li))]
a = covid_series.groupby('Country_Region')[[i for i in li2]].sum()
last_data = pd.merge(active_data2,a,on='Country_Region',)
last_data2 = last_data.drop(columns=['Confirmed','Deaths','Recovered','Active'])
last_data3 = last_data2.sort_values('Country_Region',ascending = True)
last_data3
#Somehow, US appears twice in the list, but the other ones are work fine.
# -
# %matplotlib inline
import matplotlib.pyplot as plt
plt.figure(figsize=(15,15))
for j in range(len(last_data3)):
plt.plot(li2,[last_data3.iloc[j,i] for i in range(1,len(li2)+1)],label=last_data3.iloc[j,0],lw=8)
plt.xticks([])
plt.yticks([])
plt.legend()
#Somehow, US appears twice in the list, but the other ones are work fine.
| Data Analysis/data_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Download data for a functional layer of Spatial Signatures
#
# This notebook downloads and prepares data for a functional layer of Spatial Signatures.
from download import download
import geopandas as gpd
import pandas as pd
import osmnx as ox
from tqdm import tqdm
from glob import glob
import rioxarray as ra
import pyproj
import zipfile
import tarfile
from shapely.geometry import box, mapping
import requests
import datetime
# ## Population estimates
#
# Population estimates for England, Scotland and Wales. England is split into regions.
#
# ### ONS data
download('https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fcensusoutputareaestimatesinthesouthwestregionofengland%2fmid2019sape22dt10g/sape22dt10gmid2019southwest.zip',
'../../urbangrammar_samba/functional_data/population_estimates/south_west_england', kind='zip')
download('https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fcensusoutputareaestimatesintheyorkshireandthehumberregionofengland%2fmid2019sape22dt10c/sape22dt10cmid2019yorkshireandthehumber.zip',
'../../urbangrammar_samba/functional_data/population_estimates/yorkshire_humber_england', kind='zip')
download('https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fcensusoutputareaestimatesinthesoutheastregionofengland%2fmid2019sape22dt10i/sape22dt10imid2019southeast.zip',
'../../urbangrammar_samba/functional_data/population_estimates/south_east_england', kind='zip')
download('https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fcensusoutputareaestimatesintheeastmidlandsregionofengland%2fmid2019sape22dt10f/sape22dt10fmid2019eastmidlands.zip',
'../../urbangrammar_samba/functional_data/population_estimates/east_midlands_england', kind='zip')
download('https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fcensusoutputareaestimatesinthenorthwestregionofengland%2fmid2019sape22dt10b/sape22dt10bmid2019northwest.zip',
'../../urbangrammar_samba/functional_data/population_estimates/north_west_england', kind='zip')
download('https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fcensusoutputareaestimatesintheeastregionofengland%2fmid2019sape22dt10h/sape22dt10hmid2019east.zip',
'../../urbangrammar_samba/functional_data/population_estimates/east_england', kind='zip')
download('https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fcensusoutputareaestimatesinwales%2fmid2019sape22dt10j/sape22dt10jmid2019wales.zip',
'../../urbangrammar_samba/functional_data/population_estimates/wales', kind='zip')
download('https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fcensusoutputareaestimatesinthenortheastregionofengland%2fmid2019sape22dt10d/sape22dt10dmid2019northeast.zip',
'../../urbangrammar_samba/functional_data/population_estimates/north_east_england', kind='zip')
download('https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fcensusoutputareaestimatesinthewestmidlandsregionofengland%2fmid2019sape22dt10e/sape22dt10emid2019westmidlands.zip',
'../../urbangrammar_samba/functional_data/population_estimates/west_midlands_england', kind='zip')
# ### Geometries
download('https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/England_oa_2011.zip', '../../urbangrammar_samba/functional_data/population_estimates/oa_geometry_england', kind='zip')
download('https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/Wales_oac_2011.zip', '../../urbangrammar_samba/functional_data/population_estimates/oa_geometry_wales', kind='zip')
# ### Data cleaning and processing
england = gpd.read_file('../../urbangrammar_samba/functional_data/population_estimates/oa_geometry_england/england_oa_2011.shp')
wales = gpd.read_file('../../urbangrammar_samba/functional_data/population_estimates/oa_geometry_wales/wales_oac_2011.shp')
oa = england.append(wales[['code', 'label', 'name', 'geometry']])
files = glob('../../urbangrammar_samba/functional_data/population_estimates/*/*.xlsx', recursive=True)
# %time merged = pd.concat([pd.read_excel(f, sheet_name='Mid-2019 Persons', header=0, skiprows=4) for f in files])
population_est = oa.merge(merged, left_on='code', right_on='OA11CD', how='left')
# ### Add Scotland
#
# Scottish data are shipped differently.
#
# #### Data
download('http://statistics.gov.scot/downloads/file?id=438c9dc6-dca0-48d5-995c-e3bb1d34e29e%2FSAPE_2011DZ_2001-2019_Five_and_broad_age_groups.zip', '../../urbangrammar_samba/functional_data/population_estimates/scotland', kind='zip')
pop_scot = pd.read_csv('../../urbangrammar_samba/functional_data/population_estimates/scotland/data - statistics.gov.scot - SAPE_2011DZ_2019_Five.csv')
pop_scot = pop_scot[pop_scot.Sex == 'All']
counts = pop_scot[['GeographyCode', 'Value']].groupby('GeographyCode').sum()
# #### Geometry
download('http://sedsh127.sedsh.gov.uk/Atom_data/ScotGov/ZippedShapefiles/SG_DataZoneBdry_2011.zip', '../../urbangrammar_samba/functional_data/population_estimates/dz_geometry_scotland', kind='zip')
data_zones = gpd.read_file('../../urbangrammar_samba/functional_data/population_estimates/dz_geometry_scotland')
scotland = data_zones.merge(counts, left_on='DataZone', right_index=True)
scotland = scotland[['DataZone', 'Value', 'geometry']].rename(columns={'DataZone': 'code', 'Value': 'population'})
population_est = population_est[['code', 'All Ages', 'geometry']].rename(columns={'All Ages': 'population'}).append(scotland)
population_est.to_parquet('../../urbangrammar_samba/functional_data/population_estimates/gb_population_estimates.pq')
# ## WorldPop
#
# Data is dowloaded clipped to GB, so we only have to reproject to OSGB.
download('ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020_Constrained/2020/BSGM/GBR/gbr_ppp_2020_constrained.tif', '../../urbangrammar_samba/functional_data/population_estimates/world_pop/gbr_ppp_2020_constrained.tif')
# ### Reproject to OSGB
wp = ra.open_rasterio("../../urbangrammar_samba/functional_data/population_estimates/world_pop/gbr_ppp_2020_constrained.tif")
wp.rio.crs
# %time wp_osgb = wp.rio.reproject(pyproj.CRS(27700).to_wkt())
wp_osgb.rio.crs
wp_osgb.rio.to_raster("../../urbangrammar_samba/functional_data/population_estimates/world_pop/gbr_ppp_2020_constrained_osgb.tif")
# ## POIs
# ### Geolytix retail
# Geolytix retail POIs: https://drive.google.com/u/0/uc?id=1B8M7m86rQg2sx2TsHhFa2d-x-dZ1DbSy (no idea how to get them programatically, so they were downloaded manually)
geolytix = pd.read_csv('../../urbangrammar_samba/functional_data/pois/GEOLYTIX - RetailPoints/geolytix_retailpoints_v17_202008.csv')
geolytix.head(2)
# We already have coordinates in OSGB, no need to preprocess.
# ### Listed buildings
#
# We have to merge English, Scottish and Welsh data.
#
# England downloaded manually from https://services.historicengland.org.uk/NMRDataDownload/OpenPages/Download.aspx
download('https://inspire.hes.scot/AtomService/DATA/lb_scotland.zip', '../../urbangrammar_samba/functional_data/pois/listed_buildings/scotland', kind='zip')
download('http://lle.gov.wales/catalogue/item/ListedBuildings.zip', '../../urbangrammar_samba/functional_data/pois/listed_buildings/wales', kind='zip')
# #### Processing
with zipfile.ZipFile("../../urbangrammar_samba/functional_data/pois/listed_buildings/Listed Buildings.zip", 'r') as zip_ref:
zip_ref.extractall("../../urbangrammar_samba/functional_data/pois/listed_buildings/england")
england = gpd.read_file('../../urbangrammar_samba/functional_data/pois/listed_buildings/england/ListedBuildings_23Oct2020.shp')
england.head(2)
scotland = gpd.read_file('../../urbangrammar_samba/functional_data/pois/listed_buildings/scotland/Listed_Buildings.shp')
scotland.head(2)
wales = gpd.read_file('../../urbangrammar_samba/functional_data/pois/listed_buildings/wales/Cadw_ListedBuildingsMPoint.shp')
wales.head(2)
listed = pd.concat([england[['geometry']], scotland[['geometry']], wales[['geometry']]])
listed.reset_index(drop=True).to_parquet("../../urbangrammar_samba/functional_data/pois/listed_buildings/listed_buildings_gb.pq")
# ## Night lights
#
# We need to clip it to the extent of GB (dataset has a global coverage) and reproject to OSGB.
with open('../../urbangrammar_samba/functional_data/employment/SVDNB_npp_20190301-20190331_75N060W_vcmcfg_v10_c201904071900.tgz', "wb") as down:
down.write(requests.get('https://data.ngdc.noaa.gov/instruments/remote-sensing/passive/spectrometers-radiometers/imaging/viirs/dnb_composites/v10//201903/vcmcfg/SVDNB_npp_20190301-20190331_75N060W_vcmcfg_v10_c201904071900.tgz').content)
down.close()
with tarfile.open('../../urbangrammar_samba/functional_data/employment/SVDNB_npp_20190301-20190331_75N060W_vcmcfg_v10_c201904071900.tgz', 'r') as zip_ref:
zip_ref.extractall("../../urbangrammar_samba/functional_data/employment")
# ### Clip and reproject
nl = ra.open_rasterio('../../urbangrammar_samba/functional_data/employment/SVDNB_npp_20190301-20190331_75N060W_vcmcfg_v10_c201904071900.avg_rade9h.tif')
nl.rio.crs
extent = gpd.read_parquet("../../urbangrammar_samba/spatial_signatures/local_auth_chunks.pq")
extent = extent.to_crs(4326)
# %time nl_clipped = nl.rio.clip([mapping(box(*extent.total_bounds))], all_touched=True)
# %time nl_osgb = nl_clipped.rio.reproject(pyproj.CRS(27700).to_wkt())
nl_osgb.rio.to_raster("../../urbangrammar_samba/functional_data/employment/night_lights_osgb.tif")
nl_osgb.plot(figsize=(12, 12), vmin=0, vmax=7)
# ## Postcodes
#
# Keeping only active postcodes, relevant columns and determining their age.
download('https://www.arcgis.com/sharing/rest/content/items/b6e6715fa1984648b5e690b6a8519e53/data', '../../urbangrammar_samba/functional_data/postcode/nhspd', kind='zip')
postcodes = pd.read_csv("../../urbangrammar_samba/functional_data/postcode/nhspd/Data/nhg20aug.csv", header=None)
postcodes = postcodes.iloc[:, :6]
existing = postcodes[postcodes[3].isna()]
located = existing[existing[4].notna()]
located = located.rename(columns={0: 'postcode', 1: 'postcode2', 2:'introduced', 3:'terminated', 4:'x', 5:'y'})
located.introduced = pd.to_datetime(located.introduced, format="%Y%m")
located['age'] = (pd.to_datetime('today') - located.introduced).dt.days
located.drop(columns=['postcode2', 'terminated']).to_parquet('../../urbangrammar_samba/functional_data/postcode/postcodes_gb.pq')
# ## Food hygiene rating scheme
# FHRS https://data.cdrc.ac.uk/dataset/food-hygiene-rating-scheme-fhrs-ratings (requires login)
fhrs = pd.read_csv('../../urbangrammar_samba/functional_data/fhrs/Data/fhrs_location_20200528.csv')
fhrs
# No need to preprocess at the moment. Contains OSGB coordinates for each point.
# ## Business census
#
# https://data.cdrc.ac.uk/dataset/business-census (requires login)
# `encoding = "ISO-8859-1"`
#
# - get gemetries
# - either geocode addresses (could be expensive
# - or link to postcode points
# ## Workplace density
#
# Dowload workplace population data from scottish census and english census, combine together and link to geometry.
# + jupyter={"outputs_hidden": true}
download('http://www.scotlandscensus.gov.uk/documents/additional_tables/WP605SCwz.csv', '../../urbangrammar_samba/functional_data/employment/workplace/scotland_industry.csv')
# -
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_1314_1.bulk.csv?time=latest&measures=20100&geography=TYPE262', '../../urbangrammar_samba/functional_data/employment/workplace/england_wales_industry.csv', timeout=60)
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_1300_1.bulk.csv?time=latest&measures=20100&geography=2013265922TYPE299', '../../urbangrammar_samba/functional_data/employment/workplace/north_west.csv')
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_1300_1.bulk.csv?time=latest&measures=20100&geography=2013265926TYPE299', '../../urbangrammar_samba/functional_data/employment/workplace/east.csv')
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_1300_1.bulk.csv?time=latest&measures=20100&geography=2013265924TYPE299', '../../urbangrammar_samba/functional_data/employment/workplace/east_midlands.csv')
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_1300_1.bulk.csv?time=latest&measures=20100&geography=2013265927TYPE299', '../../urbangrammar_samba/functional_data/employment/workplace/london.csv')
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_1300_1.bulk.csv?time=latest&measures=20100&geography=2013265921TYPE299', '../../urbangrammar_samba/functional_data/employment/workplace/north_east.csv')
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_1300_1.bulk.csv?time=latest&measures=20100&geography=2013265928TYPE299', '../../urbangrammar_samba/functional_data/employment/workplace/south_east.csv', timeout=30)
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_1300_1.bulk.csv?time=latest&measures=20100&geography=2013265929TYPE299', '../../urbangrammar_samba/functional_data/employment/workplace/south_west.csv')
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_1300_1.bulk.csv?time=latest&measures=20100&geography=2013265925TYPE299', '../../urbangrammar_samba/functional_data/employment/workplace/west_midlands.csv')
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_1300_1.bulk.csv?time=latest&measures=20100&geography=2013265923TYPE299', '../../urbangrammar_samba/functional_data/employment/workplace/yorkshire.csv')
download('https://www.nrscotland.gov.uk/files/geography/output-area-2011-mhw.zip', '../../urbangrammar_samba/functional_data/employment/workplace/scotland_oa', kind='zip')
download('https://www.nomisweb.co.uk/api/v01/dataset/nm_155_1.bulk.csv?time=latest&measures=20100&geography=TYPE262', '../../urbangrammar_samba/functional_data/employment/workplace/wp_density_ew.csv', timeout=30)
download('https://www.nrscotland.gov.uk/files//geography/products/workplacezones2011scotland.zip', '../../urbangrammar_samba/functional_data/employment/workplace/wpz_scotland', kind='zip')
download('http://www.scotlandscensus.gov.uk/documents/additional_tables/WP102SCca.csv', '../../urbangrammar_samba/functional_data/employment/workplace/wp_density_scotland.csv')
download('http://www.scotlandscensus.gov.uk/documents/additional_tables/WP103SCwz.csv', '../../urbangrammar_samba/functional_data/employment/workplace/wp_pop_scotland.csv')
with zipfile.ZipFile("../../urbangrammar_samba/functional_data/employment/workplace/wz2011ukbgcv2.zip", 'r') as zip_ref:
zip_ref.extractall("../../urbangrammar_samba/functional_data/employment/workplace/")
wpz_geom = gpd.read_file('../../urbangrammar_samba/functional_data/employment/workplace/WZ_2011_UK_BGC_V2.shp')
wpz_geom
wpz_ew = pd.read_csv("../../urbangrammar_samba/functional_data/employment/workplace/wp_density_ew.csv")
wpz_ew
wpz = wpz_geom[['WZ11CD', 'LAD_DCACD', 'geometry']].merge(wpz_ew[['geography code', 'Area/Population Density: All usual residents; measures: Value']], left_on='WZ11CD', right_on='geography code', how='left')
scot = pd.read_csv("../../urbangrammar_samba/functional_data/employment/workplace/wp_pop_scotland.csv", header=5)
wpz = wpz.merge(scot[['Unnamed: 0', 'Total']], left_on='WZ11CD', right_on='Unnamed: 0', how='left')
wpz.Total = wpz.Total.astype(str).apply(lambda x: x.replace(',', '') if ',' in x else x).astype(float)
wpz['count'] = wpz['Area/Population Density: All usual residents; measures: Value'].astype(float).fillna(0) + wpz.Total.fillna(0)
wpz = wpz[~wpz.WZ11CD.str.startswith('N')]
wpz[['geography code', 'count', 'geometry']].to_parquet('../../urbangrammar_samba/functional_data/employment/workplace/workplace_population_gb.pq')
wpz_ind_s = pd.read_csv('../../urbangrammar_samba/functional_data/employment/workplace/scotland_industry.csv', skiprows=4)
wpz_ind_s = wpz_ind_s.loc[4:5378].drop(columns=[c for c in wpz_ind_s.columns if 'Unnamed' in c])
wpz_ind_s
wpz_ind_s.columns
wpz_ind_ew = pd.read_csv('../../urbangrammar_samba/functional_data/employment/workplace/england_wales_industry.csv')
wpz_ind_ew.columns
wpz_ind_ew['A, B, D, E. Agriculture, energy and water'] = wpz_ind_ew[[c for c in wpz_ind_ew.columns[4:] if c[10] in ['A', 'B', 'D', 'E']]].sum(axis=1)
wpz_ind_ew['C. Manufacturing'] = wpz_ind_ew[[c for c in wpz_ind_ew.columns[4:] if c[10] in ['C']]].sum(axis=1)
wpz_ind_ew['F. Construction'] = wpz_ind_ew[[c for c in wpz_ind_ew.columns[4:] if c[10] in ['F']]].sum(axis=1)
wpz_ind_ew['G, I. Distribution, hotels and restaurants'] = wpz_ind_ew[[c for c in wpz_ind_ew.columns[4:] if c[10] in ['G', 'I']]].sum(axis=1)
wpz_ind_ew['H, J. Transport and communication'] = wpz_ind_ew[[c for c in wpz_ind_ew.columns[4:] if c[10] in ['H', 'J']]].sum(axis=1)
wpz_ind_ew['K, L, M, N. Financial, real estate, professional and administrative activities'] = wpz_ind_ew[[c for c in wpz_ind_ew.columns[4:] if c[10] in ['K', 'L', 'M', 'N']]].sum(axis=1)
wpz_ind_ew['O,P,Q. Public administration, education and health'] = wpz_ind_ew[[c for c in wpz_ind_ew.columns[4:] if c[10] in ['O', 'P', 'Q']]].sum(axis=1)
wpz_ind_ew['R, S, T, U. Other'] = wpz_ind_ew[[c for c in wpz_ind_ew.columns[4:] if c[10] in ['R', 'S', 'T', 'U']]].sum(axis=1)
wpz = wpz_ind_ew[['geography code'] + list(wpz_ind_ew.columns[-8:])].append(wpz_ind_s.rename(columns={'2011 Workplace Zone': 'geography code'}).drop(columns='All workplace population aged 16 to 74'))
wpz_merged = wpz_geom.merge(wpz, left_on='WZ11CD', right_on='geography code', how='left')
wpz_merged = wpz_merged[~wpz_merged.WZ11CD.str.startswith('N')]
wpz_merged = wpz_merged.reset_index(drop=True)[list(wpz.columns) + ['geometry']]
wpz_merged.columns
for c in wpz_merged.columns[1:-1]:
wpz_merged[c] = wpz_merged[c].astype(str).apply(lambda x: x.replace(',', '') if ',' in x else x).astype(float)
wpz_merged
wpz_merged.to_parquet('../../urbangrammar_samba/functional_data/employment/workplace/workplace_by_industry_gb.pq')
# %%time
pois = []
for i in tqdm(range(103), total=103):
nodes = gpd.read_parquet(f'../../urbangrammar_samba/spatial_signatures/morphometrics/nodes/nodes_{i}.pq')
poly = nodes.to_crs(4326).unary_union.convex_hull
tags = {'amenity': ['cinema', 'theatre']}
pois.append(ox.geometries.geometries_from_polygon(poly, tags))
pois_merged = pd.concat(pois)
pois_merged
pois_merged.drop_duplicates(subset='unique_id')[['amenity', 'name', 'geometry']].to_crs(27700).to_parquet('../../urbangrammar_samba/functional_data/pois/culture_gb.pq')
# ## Corine land cover
#
# Corine - get link from https://land.copernicus.eu/pan-european/corine-land-cover
#
# We need to extract data, clip to GB and reproject to OSGB.
download('https://land.copernicus.eu/land-files/afd643e4508e9dd7af7659c1fb1d75017ba6d9f4.zip', '../../urbangrammar_samba/functional_data/land_use/corine', kind='zip')
with zipfile.ZipFile("../../urbangrammar_samba/functional_data/land_use/corine/u2018_clc2018_v2020_20u1_geoPackage.zip", 'r') as zip_ref:
zip_ref.extractall("../../urbangrammar_samba/functional_data/land_use/corine")
extent = gpd.read_parquet("../../urbangrammar_samba/spatial_signatures/local_auth_chunks.pq")
corine_gdf = gpd.read_file("../../urbangrammar_samba/functional_data/land_use/corine/u2018_clc2018_v2020_20u1_geoPackage/DATA/U2018_CLC2018_V2020_20u1.gpkg", mask=extent)
corine_gdf.to_crs(27700).to_parquet("../../urbangrammar_samba/functional_data/land_use/corine/corine_gb.pq")
# ## Land cover classification
# Land cover classification - get link from https://cds.climate.copernicus.eu/cdsapp#!/dataset/satellite-land-cover?tab=form
#
# We need to clip it to the extent of GB (dataset has a global coverage) and reproject to OSGB.
download('http://192.168.3.11/cache-compute-0011/cache/data0/dataset-satellite-land-cover-c20f5b30-2bdb-4f69-a21e-c8f2e696e715.zip', '../../urbangrammar_samba/functional_data/land_use/lcc', kind='zip' )
lcc = ra.open_rasterio("../../urbangrammar_samba/functional_data/land_use/lcc/C3S-LC-L4-LCCS-Map-300m-P1Y-2018-v2.1.1.nc")
lccs = lcc[0].lccs_class
extent.total_bounds
lccs_gb = lccs.sel(x=slice(-9, 2), y=slice(61, 49))
lccs_gb = lccs_gb.rio.set_crs(4326)
lccs_osgb = lccs_gb.rio.reproject(pyproj.CRS(27700).to_wkt())
lccs_osgb.rio.to_raster("../../urbangrammar_samba/functional_data/land_use/lcc/lccs_osgb.tif")
| functional_data/fetch_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbpresent={"id": "0b02df97-cbb3-4759-9371-cbbecd0ccd86"} slideshow={"slide_type": "slide"}
# # Construyendo una red neuronal con Keras
# + [markdown] nbpresent={"id": "03d05899-7ff9-4413-aa67-c7a96bbdfcde"} slideshow={"slide_type": "slide"}
# ## ¿Qué librerías necesitamos?
# + nbpresent={"id": "fa44eec5-93b3-4e4b-adcb-1065bb5cc474"} slideshow={"slide_type": "-"}
import numpy
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Activation, BatchNormalization, Dense, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras import optimizers, regularizers
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Otras librerías
# + nbpresent={"id": "79849185-892a-41ce-b4a2-26db6ad19597"} slideshow={"slide_type": "-"}
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
from IPython.display import SVG
from tensorflow.keras.utils import model_to_dot
# + [markdown] slideshow={"slide_type": "slide"}
# ## Cargando los datos del MNIST
#
# - El conjunto de datos a utilizar es el **[MNIST](http://yann.lecun.com/exdb/mnist/)**.
# - Es un conjunto estándar para hacer *reconocimiento de imágenes*.
# - Buscamos entrenar un clasificador que reconozca que dígito es mostrado en la imagen.
# - El MNIST está compuesto por imágenes de 28x28 píxeles representadas como matrices.
# - La salida son 10 clases (dígitos del 0 al 9).
# - Se preprocesará para *convertir las matrices en vectores* y transformar las etiquetas en representaciones *one-of-k*.
# + slideshow={"slide_type": "subslide"}
batch_size = 128 # For mini-batch gradient descent
num_classes = 10
epochs = 10
input_size = 28*28
train_examples = 60000
test_examples = 10000
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# reshape the dataset to convert the examples from 2D matrixes to 1D arrays.
x_train = x_train.reshape(train_examples, input_size)
x_test = x_test.reshape(test_examples, input_size)
# normalize the input
x_train = x_train / 255
x_test = x_test / 255
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Contruyendo la red neuronal
#
# - Comenzaremos por construir un *perceptrón multicapa* que es la red neuronal más común.
# - El modelo más simple en Keras es una concatenación de capas (layers).
# - Se llama modelo secuencial.
# - La capa más básica es la *densa* (dense o fully connected).
# - Internamente tiene dos variables: una matriz de pesos y un vector de biases. Keras nos abstrae de todo eso.
# + slideshow={"slide_type": "-"}
model = Sequential()
# Input to hidden layer
model.add(Dense(512, input_shape=(input_size,)))
model.add(Activation('relu'))
# Hidden to output layer
model.add(Dense(10))
model.add(Activation('softmax'))
# + [markdown] slideshow={"slide_type": "subslide"}
# Para imprimir una descripción del modelo existe un comando:
# + slideshow={"slide_type": "-"}
print(model.summary())
# + [markdown] slideshow={"slide_type": "slide"}
# # Hiperparámetros
# + [markdown] slideshow={"slide_type": "slide"}
# ## Funciones de Activación
#
# - Una red neuronal con activación lineal no tiene mucho más poder de representación que un algoritmo lineal.
# - Para expresar no linearidad en la red neuronal se necesitan funciones no lineales de activación.
# - Una función de activación común es la *sigmoide* (o logística).
# - Keras soporta varias funciones de activación: rectified linea unit (ReLU), tangenge hiperbólica, sigmoide "dura", etc.
# - Hoy en día, por sus propiedades, ReLU suele ser la más utilizada [1].
# - La función de activación *softmax* es utilizada al final de la red y sirve para clasificación.
#
# 
# <div style="text-align: right;">Fuente: https://ujjwalkarn.me/2016/08/09/quick-intro-neural-networks/</div>
# + slideshow={"slide_type": "subslide"}
model = Sequential([
Dense(64, input_shape=(784,), activation='relu'),
Dense(32, activation='relu'),
Dense(10, activation='softmax')
])
print(model.summary())
# + [markdown] slideshow={"slide_type": "slide"}
# ## Regularización de la red
#
# ### Regularización de los pesos
#
# - La red puede regularizarse penalizando los pesos.
# - Los pesos se regularizan mediante alguna norma:
# - L1 es la suma del valor absoluto: ${\displaystyle \lambda \sum_{i=1}^{k} |w_i|}$
# - L2 es la suma del valor cuadrado, es la más común: ${\displaystyle \lambda \sum_{i=1}^{k} w_i^2}$
# - Elastic net es una combinación de ambas: ${\displaystyle \lambda_1 \sum_{i=1}^{k} |w_i| + \lambda_2 \sum_{i=1}^{k} w_i^2}$
# - Para un análisis detallado de la diferencia entre L1 y L2 revisar [\[2\]](http://www.chioka.in/differences-between-l1-and-l2-as-loss-function-and-regularization/)
# + slideshow={"slide_type": "-"}
model = Sequential([
Dense(64, input_shape=(784,), activation='relu', kernel_regularizer=regularizers.l2(0.01)),
Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.01)),
Dense(10, activation='softmax', kernel_regularizer=regularizers.l2(0.01))
])
# + [markdown] slideshow={"slide_type": "slide"}
# ### Dropout
#
# - Otra forma muy usada a la hora de regularizar es el **dropout** [3].
# - Es extremadamente efectivo y simple.
# - Es complementario a L1/L2/ElasticNet.
# - Durante el entrenamiento se implementa apagando un neurón con alguna probabilidad **_p_** (un hiperparámetro).
#
# 
# <div style="text-align: right;">Fuente: Trabajo de Srivastava et al. [3]</div>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Dropout en Keras
#
# - Se aplica agregando capas al modelo.
# - Se llaman capas `Dropout` y se agrega a cada capa que se quiere regularizar.
# + slideshow={"slide_type": "-"}
model = Sequential([
Dense(64, input_shape=(784,), activation='relu', kernel_regularizer=regularizers.l2(0.01)),
Dropout(0.5),
Dense(10, activation='softmax')
])
# + [markdown] slideshow={"slide_type": "slide"}
# ### Batch Normalization
#
# - En general, para acelerar la convergencia de la red, se normalizan los features de entrada, de manera que todos estén en un rango similar.
# - Esta idea también puede llevarse a las capas ocultas de la red.
# - La idea de la "Normalización por Lotes" (*Batch Normalization*) [4] es reducir el rango en el que se mueven los valores de las neuronas ocultas.
# - La manera en que se hace esto es restarle, a cada salida de cada capa oculta, la media del lote (batch) de datos de entrenamiento y dividirlo por la desviación estándar (a grandes razgos).
# - Como resultado, la red converge más rápido e incluso se genera un efecto de regularización.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### BatchNormalization en Keras.
#
# - Se aplica agregando capas al modelo.
# - Se llaman capas `BatchNormalization` y se agrega a cada capa que se quiere normalizar.
# - El `momentum` es un parámetro que decide cuánta información de los lotes anteriores se tiene en cuenta a la hora de normalizar el lote actual (en el trabajo original, este es de `0`).
# + slideshow={"slide_type": "-"}
model = Sequential([
Dense(64, input_shape=(784,), activation='relu', kernel_regularizer=regularizers.l2(0.01)),
BatchNormalization(momentum=0),
Dropout(0.5),
Dense(10, activation='softmax')
])
# + [markdown] slideshow={"slide_type": "slide"}
# ## Preparando el modelo para entrenarlo
#
# - Para minimizar una red neuronal necesitamos *calcular sus gradientes*.
# - Esto se hace con el algoritmo de *retropropagación*.
# - Keras tiene la capacidad de hacerlo automáticamente.
# - Esto se conoce como _diferenciación automática_ y es algo común en los frameworks de deep learning.
# - El modelo de Keras necesita *compilarse*.
# - Recordar que lo que armamos en Keras (o TensorFlow) es un grafo de computación.
# - Al compilar el modelo los parámetros más importantes son la función de costo y el algoritmo de optimización.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Funciones de costo y algoritmos de optimización
#
# - La función de costo puede cambiar de acuerdo al tipo de problema (clasificación binaria/multiclase o regresión).
# - La funciones más comunes son la media del error cuadrático (_mean squared error_) para regresión y la entropía cruzada (_crossentropy_) para clasificación.
# - El algoritmo de optimización es el que entrena la red. Existen varios, que en si son variaciones del algoritmo de _descenso por la gradiente_.
#
# <div style="text-align: center; margin: 5px 0;">
# <div style="display: inline-block;">
# <img src="images/contours_evaluation_optimizers.gif" alt="Optimización" style="width: 350px;"/>
# </div>
# <div style="display: inline-block;">
# <img src="images/saddle_point_evaluation_optimizers.gif" alt="Optimización" style="width: 350px;"/>
# </div>
# </div>
# <div style="text-align: right;">Fuente: http://ruder.io/optimizing-gradient-descent/</div>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Compilando el modelo Keras y visualizando la arquitectura
#
# - Con el método `.compile()` podemos compilar el modelo de Keras.
# - Además de la función de costo y el algoritmo de optimización se le pueden pasar métricas para llevar registro además del error de los datos (e.g. la exactitud o la precisión).
# - Una vez compilado el modelo la modificación requerirá rehacerlo desde cero (salvo que se guarden y carguen los pesos).
# + slideshow={"slide_type": "-"}
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(learning_rate=1e-4),
# También podría ser el string "Adagrad" con los parámetros por defecto
metrics=['accuracy']) # La métrica sirve para llevar algún registro además del costo
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Visualizar la arquitectura (opcional)
#
# Opcionalmente, si instalamos las librerías extras pedidas en el setup y utilizando `vis_util`, podemos visualizar el grafo de la red.
# + slideshow={"slide_type": "-"}
SVG(model_to_dot(model, dpi=72).create(prog='dot', format='svg'))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Entrenamiento
#
# - Una vez compilado el modelo, está listo para ser entrenado.
# - Keras tiene una interfaz similar a Scikit-Learn, con lo método `fit` y `predict`.
# - Para entrenar se necesitan 3 parámetros:
# - El conjunto de datos de entrenamiento (datos y etiquetas).
# - El tamaño del batch para hacer _mini-batch gradient descent_.
# - La cantidad de épocas que entrenar.
# - Eventualmente le podemos pasar datos para hacer validación del modelo.
# - El parámetro `verbose` nos imprime información útil respecto al desempeño del modelo.
# + slideshow={"slide_type": "subslide"}
model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs,
validation_data=(x_test, y_test), verbose=1);
# + [markdown] slideshow={"slide_type": "skip"}
# ## Referencias
#
# - [1] LeCun, <NAME>, Yoshua, and <NAME>. "Deep learning." Nature 521, no. 7553 (2015): 436-444.
# - [2] "Differences between L1 and L2 as Loss Function and Regularization". http://www.chioka.in/differences-between-l1-and-l2-as-loss-function-and-regularization/
# - [3] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "Dropout: a simple way to prevent neural networks from overfitting." Journal of machine learning research 15, no. 1 (2014): 1929-1958. Harvard.
# - [4] <NAME>., & <NAME>. (2015). Batch normalization: Accelerating deep network training by reducing internal covariate shift. arXiv preprint arXiv:1502.03167.
| 1_introduction_nn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # openWithHaloFinder.ipynb
#
# This is an advanced tutorial using FIREreader, be warned!!
#
# This notebook is best used on Stampede2, where the halo file and snapshot directories live. You can run this notebook, and host a Firefly server, on Stampede by following the instructions [here](https://github.com/ageller/Firefly/wiki/Hosting-Firefly-on-a-Cluster-Environment).
#
# In this notebook, we open the AHF halo files saved on Stampede and offset the snapshot coordinates, as well as convert them to physical units, to put the center of the main halo at our origin. This is optional, since you can always fly within Firefly to a point and set that as your origin, but more convenient (and exact!).
#
# We also calculate the radius from the halo center for each particle and update the filter keys so we can interactively filter by radius from within Firefly.
#
# #### Importantly, we do **not** call the `reader.run()` method, which would not give us the flexibility required to change our units/calculate the radii & temperature before we output to JSON.
#
#
# +
# %load_ext autoreload
# %autoreload 2
from FIREreader import FIREreader
import numpy as np
import os
import h5py
# -
## initialize reader object and choose simulation to run
reader = FIREreader()
reader.directory = "/Users/agurvich/research/snaps/m12i_res7100/output"
reader.snapnum = 600
## could read this from snapshot times
current_redshift=0
# ## Open the AHF Halo file and extract the halo center and other parameters
# +
def load_AHF(directory,snapnum,current_redshift,hubble = 0.702):
path = os.path.join(directory,'../halo/ahf/halo_00000_smooth.dat')
## find column numbers without having to count
names_to_read = ['snum','Xc','Yc','Zc','Rvir','v_esc','Rstar0.5']
## load the first line of the datafile
names = list(np.genfromtxt(path,skip_header=0,max_rows = 1,dtype=str))
cols = []
## find the column each name appears in
for name in names_to_read:
cols+=[names.index(name)]
## load the rest of the file
sns,xs,ys,zs, rvirs, vescs, rstar_halfs = np.genfromtxt(
path,delimiter='\t',usecols=cols,unpack=1,skip_header=1)
## which row do I care about? make an index array
index = sns==snapnum
if np.sum(index)==0:
## snapnum is not in this halo file
raise IOError
## presumably in comoving kpc/h
halo_center = np.array([xs[index],ys[index],zs[index]])/hubble*(1/(1+current_redshift))
halo_center = halo_center.reshape(3,)
## convert other quantities one might care about from comoving kpc to pkpc
rvir = rvirs[index][0]/hubble/(1+current_redshift)
vesc = vescs[index][0]
rstar_half = rstar_halfs[index][0]/hubble/(1+current_redshift)
return halo_center, rvir, vesc, rstar_half
def getTemperature(U_code,y_helium,ElectronAbundance):
"""U_codes = res['u']
y_heliums = res['z'][:,1]
ElectronAbundance=res['ne']"""
U_cgs = U_code*1e10
gamma=5/3.
kB=1.38e-16 #erg /K
m_proton=1.67e-24 # g
mu = (1.0 + 4*y_helium) / (1+y_helium+ElectronAbundance)
mean_molecular_weight=mu*m_proton
return mean_molecular_weight * (gamma-1) * U_cgs / kB # kelvin
# -
## open the halo file and find the center
halo_center,rvir,vesc,rstar_half = load_AHF(reader.directory,reader.snapnum,current_redshift)
print halo_center,rvir
# ## Setup the reader configuration and load data
# +
## decide which part types to save to JSON
reader.returnParts = ['PartType0', 'PartType4']
## choose the names the particle types will get in the UI
reader.names = {'PartType0':'Gas',
'PartType1':'HRDM',
'PartType2':'LRDM',
'PartType4':'Stars' }
# +
#define the defaults; this must be run first if you want to change the defaults below
reader.defineDefaults()
## by what factor should you sub-sample the data (e.g. array[::decimate])
decimate = [100., 1000.]
# +
## load in the data from hdf5 files and put it into reader.partsDict
for i,p in enumerate(reader.returnParts):
reader.decimate[p] = decimate[i]
reader.returnKeys[p] = ['Coordinates', 'Density','Velocities']
#Note: you should only try to filter on scalar values (like density).
#The magnitude of the Velocities are calculated in Firefly, and you will automatically be allowed to filter on it
reader.addFilter[p] = [False, True, False]
## tell it to do the log of density when filtering
reader.dolog[p] = [False, True, False]
#NOTE: all dictionaries in the "options" reference the swapped names (i.e., reader.names) you define above.
#If you don't define reader.names, then you can use the default keys from the hdf5 files
#(but then you will see those hdf5 names in the Firefly GUI)
pp = reader.names[p]
## set the initial size of the particles when the interface loads
reader.options['sizeMult'][pp] = 0.3
## set the default colors when the interface loads
reader.options['color'] = {'Gas': [1., 0., 0., 1.],
'HRDM': [1., 1., 0., 0.1],
'LRDM': [1., 1., 0., 0.1],
'Stars':[0., 0., 1., 0.1]}
## set the camera center to be at the origin (defaults to np.mean(Coordinates) otherwise)
## later on we subtract out halo_center from coordinates but could instead make this halo_center
reader.options['center'] = np.array([0., 0., 0.])
## initialize filter flags and options
reader.defineFilterKeys()
## load in return keys from snapshot
filenames_opened = reader.populate_dict()
# -
# ### Let's calculate the galactocentric radius, offset the coordinates by it while we're at it, then add the array to Firefly using the `addtodict` method
# +
hubble=.702
## while we're at it, let's just shift all the coordinates relative to the main halo center
reader.partsDict['PartType0']['Coordinates']=reader.partsDict['PartType0']['Coordinates']-halo_center ## both already in physical coordinates
reader.partsDict['PartType4']['Coordinates']=reader.partsDict['PartType4']['Coordinates']-halo_center ## both already in physical coordinates
## calculate the radius from the halo center
gas_radii = np.sum(reader.partsDict['PartType0']['Coordinates']**2,axis=1)**0.5
star_radii = np.sum(reader.partsDict['PartType4']['Coordinates']**2,axis=1)**0.5
## add new radius array to the dictionary using addtodict method
reader.addtodict(reader.partsDict,None,'PartType0','Radius',0,0,vals=gas_radii, filterFlag = True)
reader.addtodict(reader.partsDict,None,'PartType4','Radius',0,0,vals=star_radii, filterFlag = True)
# -
# ### Let's convert the density to physical units
# Code mass -> g , (code length)^-3 -> cm^-3 , g -> nHydrogen
DENSITYFACT=2e43*(3.086e21)**-3/(1.67e-24)
reader.partsDict['PartType0']['log10Density'] = reader.partsDict['PartType0']['log10Density']+np.log10(DENSITYFACT)
# ### Now let's load necessary supplemental data to calculate the temperature
# +
## add temperature as a filtered quantity within the parts dict, but only for gas
all_gas_temperature = np.array([])
all_star_masses = np.array([])
for fname in reader.loadedHDF5Files:
print fname
with h5py.File(fname,'r') as handle:
## load necessary arrays to calculate temperature
gas_group = handle['PartType0']
InternalEnergy = np.array(gas_group['InternalEnergy'])
ElectronAbundance = np.array(gas_group['ElectronAbundance'])
Metallicity = np.array(gas_group['Metallicity'])
## calculate the temperature and append it to total array
all_gas_temperature=np.append(
all_gas_temperature,
getTemperature(
InternalEnergy,
Metallicity[:,1],
ElectronAbundance)
)
## save stellar masses for vcom below
all_star_masses=np.append(
all_star_masses,
np.array(handle['PartType4/Masses'])
)
## track the Temperature array, do the log, and add it to be filtered
reader.addtodict(reader.partsDict,None,'PartType0','Temperature',sendlog = 1, sendmag = 0,vals = all_gas_temperature, filterFlag = True)
# -
# ### Let's remove halo "CoM" velocity from velocities so that velocity vectors are accurate
# +
## find the nearby stars
near_star_indices = star_radii < rstar_half
## calculate vcom
near_star_vcom = (
np.sum(all_star_masses[near_star_indices][:,None]
*reader.partsDict['PartType4']['Velocities'][near_star_indices],axis=0)
/np.sum(all_star_masses[near_star_indices])
)
print(near_star_vcom,'kms')
## now let's remove it from the particle velocities
reader.partsDict['PartType4']['Velocities']-=near_star_vcom
reader.partsDict['PartType0']['Velocities']-=near_star_vcom
# -
## finish up, let's shuffle + decimate, add the GUI friendly names, and create our final JSON!
reader.shuffle_dict()
reader.swap_dict_names()
reader.createJSON()
reader.createJSON()
| data/openWithHaloFinder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Upload & Manage Annotations
#
#
import dtlpy as dl
item = dl.items.get(item_id="")
annotation = item.annotations.get(annotation_id="")
annotation.metadata["user"] = True
annotation.update()
# ## Upload User Metadata
# To upload annotations from JSON and include the user metadata, add the parameter local_annotation_path to the dataset.items.upload function, like so:
#
project = dl.projects.get(project_name='project_name')
dataset = project.datasets.get(dataset_name='dataset_name')
dataset.items.upload(local_path=r'<items path>',
local_annotations_path=r'<annotation json file path>',
item_metadata=dl.ExportMetadata.FROM_JSON,
overwrite=True)
#
# ## Convert Annotations To COCO Format
#
#
converter = dl.Converter()
converter.upload_local_dataset(
from_format=dl.AnnotationFormat.COCO,
dataset=dataset,
local_items_path=r'C:/path/to/items',
# Please make sure the names of the items are the same as written in the COCO JSON file
local_annotations_path=r'C:/path/to/annotations/file/coco.json'
)
#
# ## Upload Entire Directory and their Corresponding Dataloop JSON Annotations
#
#
# Local path to the items folder
# If you wish to upload items with your directory tree use : r'C:/home/project/images_folder'
local_items_path = r'C:/home/project/images_folder/*'
# Local path to the corresponding annotations - make sure the file names fit
local_annotations_path = r'C:/home/project/annotations_folder'
dataset.items.upload(local_path=local_items_path,
local_annotations_path=local_annotations_path)
#
# ## Upload Annotations To Video Item
# Uploading annotations to video items needs to consider spanning between frames, and toggling visibility (occlusion). In this example, we will use the following CSV file.
# In this file there is a single 'person' box annotation that begins on frame number 20, disappears on frame number 41, reappears on frame number 51 and ends on frame number 90.
#
# [Video_annotations_example.CSV](https://cdn.document360.io/53f32fe9-1937-4652-8526-90c1bc78d3f8/Images/Documentation/video_annotation_example.csv)
#
#
import pandas as pd
# Read CSV file
df = pd.read_csv(r'C:/file.csv')
# Get item
item = dataset.items.get(item_id='my_item_id')
builder = item.annotations.builder()
# Read line by line from the csv file
for i_row, row in df.iterrows():
# Create box annotation from csv rows and add it to a builder
builder.add(annotation_definition=dl.Box(top=row['top'],
left=row['left'],
bottom=row['bottom'],
right=row['right'],
label=row['label']),
object_visible=row['visible'], # Support hidden annotations on the visible row
object_id=row['annotation id'], # Numbering system that separates different annotations
frame_num=row['frame'])
# Upload all created annotations
item.annotations.upload(annotations=builder)
#
# # Show Annotations Over Image
# After uploading items and annotations with their metadata, you might want to see some of them and perform visual validation.
#
# To see only the annotations, use the annotation type *show* option.
#
#
# Use the show function for all annotation types
box = dl.Box()
# Must provide all inputs
box.show(image='',
thickness='',
with_text='',
height='',
width='',
annotation_format='',
color='')
#
# To see the item itself with all annotations, use the Annotations option.
#
#
# Must input an image or height and width
annotation.show(image='',
height='', width='',
annotation_format='dl.ViewAnnotationOptions.*',
thickness='',
with_text='')
#
# # Download Data, Annotations & Metadata
# The item ID for a specific file can be found in the platform UI - Click BROWSE for a dataset, click on the selected file, and the file information will be displayed in the right-side panel. The item ID is detailed, and can be copied in a single click.
#
# ## Download Items and Annotations
# Download dataset items and annotations to your computer folder in two separate folders.
# See all annotation options [here](https://dataloop.ai/docs/sdk-download#annotation-options).
#
#
#
dataset.download(local_path=r'C:/home/project/images', # The default value is ".dataloop" folder
annotation_options=dl.VIEW_ANNOTATION_OPTIONS_JSON)
#
# ## Multiple Annotation Options
# See all annotation options [here](https://dataloop.ai/docs/sdk-download#annotation-options).
#
#
#
dataset.download(local_path=r'C:/home/project/images', # The default value is ".dataloop" folder
annotation_options=[dl.VIEW_ANNOTATION_OPTIONS_MASK,
dl.VIEW_ANNOTATION_OPTIONS_JSON,
dl.ViewAnnotationOptions.INSTANCE])
#
# ## Filter by Item and/or Annotation
# * **Items filter** - download filtered items based on multiple parameters, like their directory.
# You can also download items based on different filters. Learn all about item filters [here](https://dataloop.ai/docs/sdk-sort-filter).
# * **Annotation filter** - download filtered annotations based on multiple parameters like their label.
# You can also download items annotations based on different filters, learn all about annotation filters [here](https://dataloop.ai/docs/sdk-sort-filter-annotation).
# This example will download items and JSONS from a dog folder of the label 'dog'.
#
#
#
# Filter items from "folder_name" directory
item_filters = dl.Filters(resource='items', field='dir', values='/dog_name')
# Filter items with dog annotations
annotation_filters = dl.Filters(resource=dl.FiltersResource.ANNOTATION, field='label', values='dog')
dataset.download(local_path=r'C:/home/project/images', # The default value is ".dataloop" folder
filters=item_filters,
annotation_filters=annotation_filters,
annotation_options=dl.VIEW_ANNOTATION_OPTIONS_JSON)
#
# ## Filter by Annotations
# * **Annotation filter** - download filtered annotations based on multiple parameters like their label. You can also download items annotations based on different filters, learn all about annotation filters [here](https://dataloop.ai/docs/sdk-sort-filter-annotation).
#
#
#
item = dataset.items.get(item_id="item_id") # Get item from dataset to be able to view the dataset colors on Mask
# Filter items with dog annotations
annotation_filters = dl.Filters(resource='annotations', field='label', values='dog')
item.download(local_path=r'C:/home/project/images', # the default value is ".dataloop" folder
annotation_filters=annotation_filters,
annotation_options=dl.VIEW_ANNOTATION_OPTIONS_JSON)
#
# ## Download Annotations in COCO Format
#
# * **Items filter** - download filtered items based on multiple parameters like their directory. You can also download items based on different filters, learn all about item filters [here](https://dataloop.ai/docs/sdk-sort-filter).
# * **Annotation filter** - download filtered annotations based on multiple parameters like their label. You can also download items annotations based on different filters, learn all about annotation filters [here](https://dataloop.ai/docs/sdk-sort-filter-annotation).
#
# This example will download COCO from a dog items folder of the label 'dog'.
#
#
#
# Filter items from "folder_name" directory
item_filters = dl.Filters(resource='items', field='dir', values='/dog_name')
# Filter items with dog annotations
annotation_filters = dl.Filters(resource='annotations', field='label', values='dog')
converter = dl.Converter()
converter.convert_dataset(dataset=dataset,
to_format='coco',
local_path=r'C:/home/coco_annotations',
filters=item_filters,
annotation_filters=annotation_filters)
| tutorials/data_management/upload_and_manage_annotations/chapter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:mayavi_env]
# language: python
# name: conda-env-mayavi_env-py
# ---
# +
from sympy import symbols, Function, Rational,Matrix,cse
def zparam():
px,py,pz,x,y,z,s = symbols('px py pz x y z s')
n = Function('n')(x,y,z)
nx = n.diff(x)
ny = n.diff(y)
nz = n.diff(z)
sdot = n/pz
pxdot = nx*n/pz
pydot = ny*n/pz
pzdot = nz*n/pz
xdot = px/pz
ydot = py/pz
zdot = Rational(1)
euVec = Matrix([pxdot,pydot,pzdot,xdot,ydot,zdot,sdot]).T
jac = Matrix([euVec.diff(px)[:],
euVec.diff(py)[:],
euVec.diff(pz)[:],
euVec.diff(x)[:],
euVec.diff(y)[:],
euVec.diff(z)[:],
euVec.diff(s)[:]])
cseFunc = cse(jac.T,optimizations='basic')
print(cseFunc)
def sparam():
px,py,pz,x,y,z,s = symbols('px py pz x y z s')
n = Function('n')(x,y,z)
nx = n.diff(x)
ny = n.diff(y)
nz = n.diff(z)
sdot = Rational(1)
pxdot = nx
pydot = ny
pzdot = nz
xdot = px/n
ydot = py/n
zdot = pz/n
euVec = Matrix([pxdot,pydot,pzdot,xdot,ydot,zdot,sdot]).T
jac = Matrix([euVec.diff(px)[:],
euVec.diff(py)[:],
euVec.diff(pz)[:],
euVec.diff(x)[:],
euVec.diff(y)[:],
euVec.diff(z)[:],
euVec.diff(s)[:]])
cseFunc = cse(jac.T,optimizations='basic')
print(cseFunc)
if __name__=="__main__":
zparam()
sparam()
# -
| src/ionotomo/notebooks/DeriveFermat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Planted SK model
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from IPython.display import Latex
np.random.seed(14753)
# %matplotlib inline
# Write a function to sample an instance of $(\mathbf{s},\mathbf{J})$.
#
# ## Point c)
# From points a) and b) of exercise 1 we know that the posterior distribution of $s$ is a Boltzmann distribution
#
# $$ P(s | J ) \propto e^{\frac{\beta}{\sqrt{N}} \sum_{ij}J_{ij} s_i s_j}$$
#
# To sample a particular realization we follow the generative model at hand: first we sample a "planted" realization of $s$ from the prior
#
# $$s_i \sim \frac{1}{2} \delta(s_i - 1) + \frac{1}{2} \delta(s_i + 1)$$
#
# and then sample a realization of $J$ from the likelihood
#
# $$J_{ij} \sim N \left(\frac{s_i s_j}{\sqrt{N}}, \sigma^2 \right) $$
def sample_instance(size_x, var_noise):
"""Sample {x, J} from P(x, J)"""
#TODO
return x0, J
var_noise = 0.1
x0, J = sample_instance(size_x=5000, var_noise=var_noise)
print(x0, end='\n\n')
print(J)
# ## Points d) e) f)
# * d) Write a function that implements the TAP equation to approximate the mean $\hat{\mathbf{s}}$ of $P(\mathbf{s}|\mathbf{J})$.
# This is an iteration that, if it converges, gives a very good approximation for $\hat{\mathbf{s}}$ as $N\rightarrow \infty$.
# For numerical reasons implement the fixed point iterations as follows:
# \begin{align*}
# m_i^{(t+1)} &= \tanh \left( \frac{1}{\sigma^2 \sqrt{N}} \sum_j J_{ij} \, m_j^{(t)} \right) && \mbox{Mean Field} \\
# m_i^{(t+1)} &= \tanh \left(
# \frac{1}{\sigma^2 \sqrt{N}} \sum_j J_{ij} \, m_j^{(t)} -
# m_i^{(t-1)} \frac{1}{N \sigma^4} \sum_j J^{2}_{ij}\, (1 - (m_j^{(t)})^2) \right)
# && \mbox{TAP} \\
# \end{align*}
#
# * e) Run some experiments ($N_{real} \in [10,100]$ re-samplings of $J, s$ at your choice) for $N=10,100,1000,5000$ and fixed $\sigma^{2}=0.1$ and check that the overlap with ground-truth improves with the iterations.
#
# The overlap is defined as: $overlap(\mathbf{m},\mathbf{s}_{0}) := | \frac{\mathbf{m}\cdot \mathbf{s}_{0}}{N}|$.
#
# * f)
# * i) Run sum experiments ($N_{real} \in [10,100]$ re-samplings of $J, s$ at your choice) for $N=10,100,1000,5000$ and varying $\sigma^{2} \in [0.1,2]$.
# * ii) Repeat the same experiments but using the MF approximation instead.
# * iii) Plot the performance metrics values at convergence for TAP and MF as a function of the noise $\sigma^{2}$ for various $N$.
# Comment on what you observe.
#
#
def iterate_sc_equation(J, var_noise, s0=None, max_iter=None, tol=1e-7, verbose=True, approximation='MF'):
"""Iterate MF or TAP self-consistency equation"""
# Some pre-processing
size_x = J.shape[0]
max_iter = max_iter or 100 * size_x
#TODO
return m
# Example of call of iterate_tap
iterate_sc_equation(J, var_noise, max_iter=30, approximation='TAP')
# Run experiment for varying levels of noise
def run_experiment(noise_vars, size_x, verbose=False):
"""Compute overlap obtained by AMP using different noise variances"""
overlaps_tap = np.zeros(len(noise_vars))
overlaps_MF = np.zeros(len(noise_vars))
# For each variance in noise_vars, sample new instance and run AMP
for i, var in enumerate(noise_vars):
#TODO
return overlaps_tap, overlaps_MF
# +
noise_vars = np.arange(0.1, 2.0, 0.25)
size_x = 100
n_real = 10
o_tap = []
o_MF = []
for rep in range(n_real):
overlaps_tap, overlaps_MF = #TODO
o_tap.append(overlaps_tap)
o_MF.append(overlaps_MF)
print(rep,o_MF[-1].mean(),o_tap[-1].mean())
# turn lists of arrays into 2D arrays
o_tap, o_MF = tuple(
map(
np.array,
(o_tap, o_MF)
)
)
# +
plt.figure(figsize=(10, 10))
plt.errorbar(noise_vars, o_tap.mean(axis=0), yerr=o_tap.std(axis=0), marker="o", c='b', label='TAP')
plt.errorbar(noise_vars, o_MF.mean(axis=0), yerr=o_MF.std(axis=0), marker="*", c='r', label='Mean Field')
plt.legend()
plt.ylabel("overlap($m_{model},m_{planted}$)",fontsize=20)
plt.xlabel(r"$\sigma^2$",fontsize=20)
plt.legend(fontsize=20)
# -
| L5/L5_tutorial_exercise2_pseudocode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 10. Gaussian Kernel Regression
#
# [](https://colab.research.google.com/github/rhennig/EMA6938/blob/main/Notebooks/10.GaussianKernelRegression.ipynb)
#
# (Based on https://towardsdatascience.com/kernel-regression-from-scratch-in-python-ea0615b23918)
#
# In this example, we apply Gaussian kernel regression to a one-dimensional dataset.
#
# ## Gaussian Kernel
#
# In Gaussian kernel regression the shape of the kernel is the Gaussian curve:
# $$
# \frac{1}{\sqrt{2\pi}} \exp \left ( - \frac{z^2}{2} \right ).
# $$
#
# Each constructed kernel describes a normal distribution with mean value ${\bf x}_i$ and standard deviation $b$, where $b$ is a hyperparameter that controls the width of the Gaussian:
# $$
# k(x, x_i) = \frac{1}{\sqrt{2\pi}} \exp \left ( - \frac{(x-x_i)^2}{2b^2} \right ).
# $$
#
# Note that the normalization of the Gaussian does not matter as the weights are being normalized themselves.
# ## Prediction
#
# The weights for a given new input $\tilde x$ are calculated from the normalized kernel values:
# $$
# w_i = \frac{k(\tilde x, x_i)}{\sum_{l=1}^N k(x_l, x_i)}.
# $$
#
# The prediction $\tilde y$ is obtained by multiplying the weight vector ${\bf w} = [w_1, w_2, \dots, w_N]$ with the label vector ${\bf y} = [y_1, y_2, \dots, y_N]$:
# $$
# \tilde y = \sum_{i=1}^N w_i y_i.
# $$
# ## 1-Dimensional Gaussian Kernel Regression
# +
from scipy.stats import norm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
# +
# Create a 1D dataset
X = np.array([10,20,30,40,50,60,70,80,90,100,110,120])
Y = np.array([2337,2750,2301,2500,1700,2100,1100,1750,1000,1642, 2000,1932])
# Plot the dataset
fig,ax=plt.subplots(figsize=(12,8))
ax.scatter(X, Y, color='blue', label='Training')
ax.set_xlabel('x',fontsize=20)
ax.set_ylabel('y',fontsize=20)
ax.set_title('Data',fontsize=25)
plt.legend(fontsize=20)
plt.show()
# +
# Define a class for Gaussian Kernel Regression
class GKR:
def __init__(self, x, y, b):
self.x = x
self.y = y
self.b = b
# Implement the Gaussian Kernel
def gaussian_kernel(self, z):
return (1/np.sqrt(2*np.pi))*np.exp(-0.5*z**2)
# Calculate weights and return prediction
def predict(self, X):
kernels = [self.gaussian_kernel((xi-X)/self.b) for xi in self.x]
weights = [kernel/np.sum(kernels) for kernel in kernels]
return np.dot(weights, self.y)
# Visualize the kernels
def visualize_kernels(self, precision):
plt.figure(figsize = (12,6))
for xi in self.x:
x_normal = np.linspace(xi - 3*self.b, xi + 3*self.b, precision)
y_normal = norm.pdf(x_normal, xi, self.b)
plt.plot(x_normal, y_normal, label='Kernel at xi=' + str(xi))
plt.title('Visualize the Kernel', fontsize=22)
plt.ylabel('Kernel Weights wi', fontsize=20)
plt.xlabel('x', fontsize=20)
plt.legend(fontsize=14)
# Visualize the predictions
def visualize_predictions(self, precision, X):
plt.figure(figsize = (12,6))
max_y = 0
for xi in self.x:
x_normal = np.linspace(xi - 3*self.b, xi + 3*self.b, precision)
y_normal = norm.pdf(x_normal, xi, self.b)
max_y = max(max(y_normal), max_y)
plt.plot(x_normal, y_normal, label='Kernel at xi=' + str(xi))
plt.title('Visualize the Prediction', fontsize=22)
plt.plot([X,X], [0, max_y], 'k-', lw=2,dashes=[2, 2])
plt.ylabel('Kernel Weights wi', fontsize=20)
plt.xlabel('x', fontsize=14)
plt.legend(fontsize=14)
# Set the width of the Gaussian kernel
b = 20
gkr = GKR(X, Y, b)
gkr.visualize_kernels(100)
# Prediction for test x
x = 26.0
gkr.visualize_predictions(200, x)
# +
# Visualize the 1-dimensional prediction
fig,ax = plt.subplots(figsize=(12,8))
xlist = np.linspace(0, 120, 240)
ylist = np.array([])
for x in xlist:
ylist = np.append(ylist, gkr.predict(x))
ax.scatter(X, Y, color='blue', label='Training')
ax.plot(xlist, ylist, color='orange', label='Prediction')
ax.set_xlabel('x',fontsize=20)
ax.set_ylabel('y',fontsize=20)
plt.legend(fontsize=20)
ax.set_title('Gaussian Kernel',fontsize=25)
plt.show()
# -
# ## N-dimensional Kernel Regression
#
# For $N$-dimenisonal inputs, the only modification we need to make is to calculate the kernels with the Eucledian metric.
# $$
# k(x, x_i) = \frac{1}{\sqrt{2\pi}} \exp \left ( - \frac{\|{\bf x}-{\bf x}_i\|^2}{2b^2} \right ).
# $$
#
#
# +
# N-dimensional using numpy
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import multivariate_normal
from matplotlib import cm
'''Class for Gaussian Kernel Regression'''
class GKR:
def __init__(self, x, y, b):
self.x = np.array(x)
self.y = np.array(y)
self.b = b
'''Implement the Gaussian Kernel'''
def gaussian_kernel(self, z):
return (1/np.sqrt(2*np.pi))*np.exp(-0.5*z**2)
'''Calculate weights and return prediction'''
def predict(self, X):
kernels = np.array([self.gaussian_kernel((np.linalg.norm(xi-X))/self.b) for xi in self.x])
weights = np.array([len(self.x) * (kernel/np.sum(kernels)) for kernel in kernels])
return np.dot(weights.T, self.y)/len(self.x)
# +
def func(x, y):
return (x * np.exp(-x**2 - y**2))
# Plot function using a dense regular mesh
x = np.linspace(-2, 2, 51)
y = np.linspace(-2, 2, 51)
xx, yy = np.meshgrid(x, y)
z = func(xx, yy)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, figsize=(10, 24))
ax1.contour(xx, yy, z, levels=14, linewidths=0.5, colors='k')
cntr1 = ax1.contourf(xx, yy, z, levels=14, cmap="RdBu_r")
fig.colorbar(cntr1, ax=ax1)
ax1.plot(xx, yy, 'ko', ms=1)
ax1.set(xlim=(-2, 2), ylim=(-2, 2))
ax1.set_title('Plot for dense mesh of points', fontsize = 20)
plt.subplots_adjust(hspace=0.2)
np.random.seed(23971)
npts = 100
x = np.random.uniform(-2, 2, npts)
y = np.random.uniform(-2, 2, npts)
X_train = np.vstack((x, y)).T
Y_train = func(x, y)
ax2.tricontour(x, y, Y_train, levels=14, linewidths=0.5, colors='k')
cntr2 = ax2.tricontourf(x, y, Y_train, levels=14, cmap="RdBu_r")
fig.colorbar(cntr2, ax=ax2)
ax2.plot(x, y, 'ko', ms=5)
ax2.set(xlim=(-2, 2), ylim=(-2, 2))
ax2.set_title('Triangulation plot for %d random points' % npts, fontsize = 20)
# Train Gaussian Kernel Regression on the random points
b = 0.25
gkr = GKR(X_train, Y_train, b)
xx = xx.flatten()
yy = yy.flatten()
z = np.array([])
for i in range(len(xx)):
x_val = xx[i]
y_val = yy[i]
arg = np.array([x_val, y_val])
z = np.append(z, gkr.predict(arg))
ax3.tricontour(xx, yy, z, levels=14, linewidths=0.5, colors='k')
cntr3 = ax3.tricontourf(xx, yy, z, levels=14, cmap="RdBu_r")
fig.colorbar(cntr3, ax=ax3)
ax3.plot(x, y, 'ko', ms=5)
ax3.set(xlim=(-2, 2), ylim=(-2, 2))
ax3.set_title('Gaussian Kernel Regression on %d random points' % npts, fontsize = 20)
plt.show()
# -
| Notebooks/10.GaussianKernelRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gaussian Elimination
import numpy as np
import matplotlib.pyplot as plt
# Assume we have a relationship betwen three variables x, y, and z, that can be related using the following three equations:
#
# $$2x - 4y + z = -3$$
# $$-x + 2y -2z = -3$$
# $$x + y + z = 6$$
#
# We can then express these relationships in the matrix form:
#
# $$
# \begin{bmatrix}
# 2 && -4 && 1 \\
# -1 && 2 && -2 \\
# 1 && 1 && 1
# \end{bmatrix}
# \begin{bmatrix} x \\ y \\ z \end{bmatrix} =
# \begin{bmatrix} -3 \\ -3 \\ 6\end{bmatrix}$$
#
# Often, this relation is written in it's more condensed short-hand form (known as an augmented matrix):
#
# $$
# \left[
# \begin{matrix}
# 2 && -4 && 1 \\
# -1 && 2 && -2 \\
# 1 && 1 && 1
# \end{matrix}
# \middle|
# \begin{matrix}
# -3 \\
# -3 \\
# 6
# \end{matrix}
# \right]$$
# The idea of these problems, is to solve for the x, y and z values that satisfy the above conditions.
#
# When writing this process in a computer program, we need a very structured way of iterating and processing data. We'll want to organize our matrix in a way such that our program can solve every matrix in the same way.
#
# If we were computing this solution by hand, we'd know the following two matrices are identical statements (notice, just two rows are swapped):
#
# $$
# \left[
# \begin{matrix}
# 1 && 0 && 0 \\
# 0 && 0 && 1 \\
# 0 && 1 && 0 \\
# \end{matrix}
# \middle|
# \begin{matrix}
# 1 \\ 3 \\ 2
# \end{matrix}
# \right]
# =
# \left[
# \begin{matrix}
# 1 && 0 && 0 \\
# 0 && 1 && 0 \\
# 0 && 0 && 1 \\
# \end{matrix}
# \middle|
# \begin{matrix}
# 1 \\ 2 \\ 3
# \end{matrix}
# \right]
# $$
# But, in order to create a strategic and methodically process for the computer, we'll have to organize the matrix such that our solution matrix will have 1's in the diagonal (also called the Identity Matrix).
# Here's how we can implement gaussian elimination with backsubstitution on the computer:
# #### Part 1: Iterate through your matrix. For each row, do the following steps:
# ##### Step 1: Partial Pivot
#
# Essentially, this just means we're going to include a process that will swap rows so that our final matrix will be the identity matrix.
#
# While you're iterating though the matrix, you'll need to compare the <i>ith</i> term of the <i>ith</i> row to the <i>ith</i> term in all the following rows. Swap the <i>ith</i> row with the row that has the largest <i>ith</i> element. ("Largest" in this case means farthest from 0, so you'll want to compare the absolute values).
# ##### Step 2: Divide by the diagonal term
# Divide the whole row by it's diagonal term (such that the diagonal term is a one).
# ##### Step 3: Add multiples
# The goal in this step is to make the matrix upper triangular (every element below the diagonal is 0).
#
# Our first row currently has a 1 in the leftmost position. We want to zero out the leftmost position for all of the following rows. So for every row after the 1st row, you'll subtract some multiple times the first row.
#
# When you repeat all of these steps on the <i>ith</i> row, you'll do the same thing for all of the following rows.
# After you repeat all of the above steps on the entirety of the matrix, your matrix should be upper triangular with ones along the diagonal. The last step is to use backsubstitution to get the identity matrix.
# #### Part 2: Backsubstitution
# Now that the matrix is upper triangular, we'll want to add multiples of rows to manipulate it into the identity matrix. Our last row contains only one value, add multiples of this row to each of the preceding rows to zero out the position in this matrix for all but the last row. Move up to the second to last row, and repeat this process.
# I recommend implementing the basics without the partial pivot, then incorporate the partial pivot after you have a solution that works for some matrices (I've included a collection of assert statements below that will pass without the partial pivot implemented).
def solve_system(matrix):
"""
Solves a system of linear equations
Parameters:
matrix(N x N+1 numpy array): the augmented matrix to be solved
Returns:
(N x 1 numpy array): array of the solutions to the linear equations
"""
N = len(matrix)
matrix = matrix.astype("float64")
for i in range(N):
for j in range(i + 1, N):
if abs(matrix[j, i]) > matrix[i, i]: # iterates through every row and column for partial pivot
temp = np.array(matrix[i].tolist())
matrix[i] = matrix[j] # switches nth row with row with largest nth element
matrix[j] = temp
matrix[i] = matrix[i] / (matrix[i, i]) # divides nth row by its nth value
for j in range(i + 1, N):
matrix[j] = matrix[j] - (matrix[i] * matrix[j, i]) # subtraction to make bottom left 0s
for i in range(N - 1, -1, -1):
for j in range(i): # backsubstituion, work backwards to solve each value and set upper right triangle to 0
matrix[j] -= matrix[j, i] * matrix[i]
return matrix[:, -1]
# I've included a detailed list of tests below. Once your code is running correctly, all of the following tests will pass.
test_a = np.array([[1, 0, 3], [0, 1, 2]])
test_b = np.array([[3, 1, 5], [2, 2, 6]])
test_c = np.array([[2,1,4,1,-4], [3,4,-1,-1,3], [1,-4,1,5,9], [2,-2,1,3,7]])
test_d = np.array([[0, 1, 2], [1, 0, 3]])
test_e = np.array([[2, -4, 1, -3], [-1, 2, -2, -3], [1, 1, 1, 6]])
test_f = np.array([[1,1,1,1,12], [2, -1, -1, 1, 4], [1, -2, 1, -2, -15], [3, 3, 2, -1, 15]])
test_g = np.array([[0, 0, 1, 0, 10], [0, 1, 0, 0, 3], [1, 0, 0, 0, 4], [0, 0, 0, 1, 2]])
# These will pass without the pivot functionality
assert(np.all((solve_system(test_a) - np.array([3, 2]) < 0.0001)))
assert(np.all((solve_system(test_b) - np.array([1, 2]) < 0.0001)))
assert(np.all((solve_system(test_c) - np.array([2, -1, 2, 1]) < 0.0001)))
assert(np.all((solve_system(test_f) - np.array([2, 4, 1, 5]) < 0.0001)))
# These will NOT pass unless you have the partial pivot implemented correct
assert(np.all((solve_system(test_d) - np.array([3, 2]) < 0.0001)))
assert(np.all((solve_system(test_e) - np.array([1, 2, 3]) < 0.0001)))
assert(np.all((solve_system(test_g) - np.array([4, 3, 10, 2]) < 0.0001)))
| Assignment09.ipynb |