text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
import re
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction import DictVectorizer
vec=DictVectorizer()
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from sklearn.svm import SVC
from tmm_functions import *
client=User(data)
lista_g = ["rock","funk","eletronica","rap","sertanejo","metal"]
#
# Playlists - (nome,user,id)
#
lista_playlists_rock = [["volume máximo","spotify","37i9dQZF1DX4908CEYEdlz"],
["rock this","spotify","37i9dQZF1DXcF6B6QPhFDv"],
["rock solid","spotify","37i9dQZF1DX49jUV2NfGku"],
["pure rock & roll","spotify","37i9dQZF1DWWRktbhJiuqL"],
["legendary guitar solos","spotify","37i9dQZF1DWSlJG7YPUBHF"],
["power ballads","spotify","37i9dQZF1DX82Zzp6AKx64"],
["90s pop rock essentials","spotify","37i9dQZF1DX3YMp9n8fkNx"],
["workday: rock classincs","spotify","37i9dQZF1DX1lwxXvReaux"],
["rock save the queen","spotify","37i9dQZF1DXcDnIr0iKAG8"],
["rock brazuca","digster_brasil","7tJSlaad9s6aiGloaqRJML"]]
lista_playlists_funk = [["funk 25","spotify","37i9dQZF1DWTkIwO2HDifB"],
["bonde do funk","spotify","37i9dQZF1DWYYeOy9vs7I5"],
["absurdamente funk","onerpm","4ut6RdaC7vEH5SpI5MXnnT"],
["baile funk","filtr.br","67z7xW9dTE9DIPStaywvxB"],
["funk divas","spotify","37i9dQZF1DWTSW6EnevZMi"],
["this is mc guime","spotify","37i9dQZF1DXa7ipSktWMpx"],
["funk na laje","digster_brasil","5eBQ5t5KiODgoezdnR0edQ"],
["this is mc bin laden","spotify","37i9dQZF1DXdPrxabRfeD5"],
["baile novo","spotify","37i9dQZF1DX3nEjIEd6k1k"],
["funk 2017","filtr.br","1rjCkO3uhxg6DvuKGQ5Uou"]]
lista_playlists_sertanejo = [["sertanejo 2017","filtr.br","4ApmKHtHuz8ToUrz6OOPf3"],
["sertanejo universitario 2017","vini_kam","4C7la5DHqabGdAaJ3615I5"],
["esquenta sertanejo","spotify","37i9dQZF1DXdSjVZQzv2tl"],
["potencia sertanejo","spotify","37i9dQZF1DX5lAV5ZGu9ik"],
["classicos sertanejos","spotify","37i9dQZF1DX7IK76Iw6baR"],
["sertanejo no trabalho","spotify","37i9dQZF1DWUkWvWISwtjS"],
["sextaneja","digster_brasil","429AhnPSHFmHsPrdLu1Zuj"],
["sertanejo romantico","somlivre","1YSZZdqxRaMUnzd7ThNe71"],
["festa sertaneja","filtr.br","13q6Cd96yY0ww3k7Fc8pWl"],
["this is luan santana","spotify","37i9dQZF1DXarYcE40q7Zs"]]
lista_playlists_rap = [["rapublica","spotify","37i9dQZF1DWSIvbYYt1Dvi"],
["proxima rima","spotify","37i9dQZF1DWYBXtbGSCurj"],
["rap e compromisso","onerpm","4eoAeASXEEa3s8CC5SVsCH"],
["hip hop 2017","mejoresplaylistsspotify","0OtdLZ5JVyqB4Zp8aylECe"],
["rap caviar","spotify","37i9dQZF1DX0XUsuxWHRQd"],
["hip hop crew","spotify","37i9dQZF1DXb8e91bM8R51"],
["most necessary","spotify","37i9dQZF1DX2RxBh64BHjQ"],
["urban poet","spotify","37i9dQZF1DWSTeI2WWFaia"],
["trabalho com hip hop","filtr.br","5jTImMGAxkRqwfB4p7PJC3"],
["rap the set","digster.fm","5HYnMEFnXw6j9Xj8DIth0f"]]
lista_playlists_eletronica = [["edm room","spotify","37i9dQZF1DXaPK7HyVedIT"],
["eletro br","spotify","37i9dQZF1DXd14ByHZraz1"],
["mint","spotify","37i9dQZF1DX4dyzvuaRJ0n"],
["dance party","spotify","37i9dQZF1DXaXB8fQg7xif"],
["housewek","spotify","37i9dQZF1DXa8NOEUWPn9W"],
["edm beats","playlistmebrasil","4fwESuhQHeVxXJjDUUVlvv"],
["chill tracks","spotify","37i9dQZF1DX6VdMW310YC7"],
["vacation haus","spotify","37i9dQZF1DWT8fmnlYTQW3"],
["ibiza vibes","spotify","37i9dQZF1DWV6K3lprq7Lc"],
["bass face","spotify","37i9dQZF1DX2vEc9fKrard"]]
lista_playlists_metal = [["old school metal","spotify","37i9dQZF1DX2LTcinqsO68"],
["deuses do metal","spotify","37i9dQZF1DWTDvefqi2DvL"],
["heavy metal","spotify","37i9dQZF1DX9qNs32fujYe"],
["thrash metal - big 4","spotify","37i9dQZF1DWZdFtcHGe8ED"],
["extreme metal workout","spotify","37i9dQZF1DWY3PJWG3ogmJ"],
["thrash metal - big 5","spotify","37i9dQZF1DXcWC95w8Zh09"],
["metalsucks playlist","metalsucks","40C5k2GWBlficlUyQKmR0S"],
["year in metal 2016","spotify","37i9dQZF1DX0FNUL0tXob1"],
["black sabbath: the dio years","spotify","37i9dQZF1DXcKmFL2DMoCZ"],
["metal domination","spotify","37i9dQZF1DX2lfnpRKY6V3"]]
lista_df = [rock_yes,rap_yes,eletronica_yes,funk_yes,sertanejo_yes,metal_yes]
count_train = 0
for i in range(len(lista_df)):
a = lista_df[i].count()[0]
print(a)
count_train += a
# data train #
#
# rock train
listx = [rock_yes,rap_yes,eletronica_yes,funk_yes,sertanejo_yes,metal_yes]
def make_genre_no(df,idx):
listtemp = [rock_yes,rap_yes,eletronica_yes,funk_yes,sertanejo_yes,metal_yes]
num_df = df.count()[0]
del listtemp[idx]
num_songs_no_per_g = num_df//len(listtemp)
df_no = listtemp[0]
frames = []
for i in range(len(listtemp)):
frames.append(listtemp[i].iloc[0:num_songs_no_per_g,:])
df_no = pd.concat(frames)
return df_no
lista_df = [rock_yes,rap_yes,eletronica_yes,funk_yes,sertanejo_yes,metal_yes]
rock_no = make_genre_no(rock_yes,0)
rap_no = make_genre_no(rap_yes,1)
eletronica_no = make_genre_no(eletronica_yes,2)
funk_no = make_genre_no(funk_yes,3)
sertanejo_no = make_genre_no(sertanejo_yes,4)
metal_no = make_genre_no(metal_yes,5)
rock_no.to_csv("not_rock_df.csv",encoding="utf-8")
funk_no.to_csv("not_funk_df.csv",encoding="utf-8")
rap_no.to_csv("not_rap_df.csv",encoding="utf-8")
eletronica_no.to_csv("not_eletronica_df.csv",encoding="utf-8")
sertanejo_no.to_csv("not_sertanejo_df.csv",encoding="utf-8")
metal_no.to_csv("not_metal_df.csv",encoding="utf-8")
frames = [rap_yes,eletronica_yes,funk_yes,sertanejo_yes,metal_yes]
dall_not_rock = pd.concat(frames)
dall_not_rock.to_csv("all_not_rock.csv",encoding="utf-8")
dtest = pd.read_csv("not_metal_df.csv")
##
##
## M00Ds!
## Relaxe Party Gym Focus
list_relax = [["desplugado","spotify","37i9dQZF1DX7nC9vdKqBZ5"],
["soul house relax","spotify","37i9dQZF1DWZeXxACCcUmL"],
["hora acustica","spotify","37i9dQZF1DX3EQWTTNVPtu"],
["acustico relax","filtr.br","3ofwArWXZqSqdOuVsVDzBS"],
["totally stress free","spotify","37i9dQZF1DWT7XSlwvR1ar"],
["chill.out.brain","spotify","37i9dQZF1DX3PIPIT6lEg5"],
["chilling on a dirt road","spotify","37i9dQZF1DWTkxQvqMy4WW"],
["peaceful","spotify","37i9dQZF1DWZqd5JICZI0u"],
["calm down","spotify","37i9dQZF1DX5bjCEbRU4SJ"],
["yoga e meditation","spotify","37i9dQZF1DX9uKNf5jGX6m"]
]
lista_url_gym = ["https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXdxcBWuJkbcy",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX76Wlfdnj7AP","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXakWdSZ8mUMO","https://open.spotify.com/user/filtr.br/playlist/12kNrq0RnI0Y1s4UfxmYro","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWSJHnPb1f0X3","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX9BXb6GsGCLl","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWZYWNM3NfvzJ","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX32NsLKyzScr","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWUVpAXiEPK8P","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX70RN3TfWWJh"]
lista_url_focus = ["https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWZeKCadgRdKQ","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX3PFzdbtx1Us","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX8NTLI2TtZa6","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX9sIqqvKsjG8","https://open.spotify.com/user/filtr.ca/playlist/0hZNf3tcMT4x03FyjKYJ3M","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWT5lkChsPmpy","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWZIOAPKUdaKS","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWSluGMsH1R9r","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXaipYDJRIDH9","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWWTdxbiocWOL"]
lista_url_party = ["https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX8mBRYewE6or","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXaXB8fQg7xif","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXdWpx7KRCSbJ","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX0IlCGIUGBsA","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX3ph0alWhOXm","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX2pto11EMGQc","https://open.spotify.com/user/filtr.br/playlist/1EKzZkhMwqQ8aPwh9c4nHN","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWZVV00CYG8D6","https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX7F6T2n2fegs","https://open.spotify.com/user/digster_brasil/playlist/5G0f2zH2I2ivvIcvaNS621"]
list_gym = lister(lista_url_gym )
list_focus = lister(lista_url_focus)
list_party = lister(lista_url_party)
lista_m = ["gym","focus","relax","party"]
lista_m,
gym_yes = list_download(list_gym,lista_m,"gym",1)
gym_yes.to_csv("gym_df.csv",encoding="utf-8")
focus_yes = list_download(list_focus,lista_m,"focus",1)
focus_yes.to_csv("focus_df.csv",encoding="utf-8")
relax_yes = list_download(list_relax,lista_m,"relax",1)
relax_yes.to_csv("relax_df.csv",encoding="utf-8")
party_yes = list_download(list_party,lista_m,"party",1)
party_yes.to_csv("party_df.csv",encoding="utf-8")
def make_mood_no(df,idx):
listtemp = [gym_yes,relax_yes,focus_yes,party_yes]
num_df = df.count()[0]
del listtemp[idx]
num_songs_no_per_g = num_df//len(listtemp)
df_no = listtemp[0]
frames = []
for i in range(len(listtemp)):
frames.append(listtemp[i].iloc[0:num_songs_no_per_g,:])
df_no = pd.concat(frames)
return df_no
gym_no = make_mood_no(gym_yes,0)
relax_no = make_mood_no(relax_yes,1)
focus_no = make_mood_no(focus_yes,2)
party_no = make_mood_no(party_yes,0)
relax_no.to_csv("not_relax_df.csv",encoding="utf-8")
gym_no.to_csv("not_gym_df.csv",encoding="utf-8")
focus_no.to_csv("not_focus_df.csv",encoding="utf-8")
party_no.to_csv("not_party_df.csv",encoding="utf-8")
test_rock = ["rock hard","spotify","37i9dQZF1DWWJOmJ7nRx0C"]
test_funk = ["funk pop","spotify","37i9dQZF1DWW6EwN6CPxGO"]
df_test_rock = client.playlist_downloader(test_rock[0],test_rock[1],test_rock[2])
df_test_no_rock = client.playlist_downloader(test_funk[0],test_funk[1],test_funk[2])
df_test_rock["rock"]=1
df_test_no_rock["rock"]=0
df_test_no_rock.to_csv("test_not_rock_df.csv",encoding="utf-8")
df_test_rock.to_csv("test_rock_df.csv",encoding="utf-8")
all_list = [
"http://open.spotify.com/user/spotify/playlist/5FJXhjdILmRA2z5bvz4nzf",
"http://open.spotify.com/user/spotify/playlist/04MJzJlzOoy5bTytJwDsVL",
"http://open.spotify.com/user/spotify/playlist/5yolys8XG4q7YfjYGl5Lff",
"http://open.spotify.com/user/spotify/playlist/103Zi2NG06F9qimASDrszv",
"http://open.spotify.com/user/spotify/playlist/4ecQaDJHF55Ls9m2lKIXbI",
"http://open.spotify.com/user/spotify/playlist/6uTuhSs7qiEPfCI3QDHXsL",
"http://open.spotify.com/user/spotify/playlist/3MlpudZs4HT3i0yGPVfmHC",
"http://open.spotify.com/user/spotify/playlist/7cOO30bzxMm4tO34C9UalD",
"http://open.spotify.com/user/spotify/playlist/4BKT5olNFqLB1FAa8OtC8k",
"http://open.spotify.com/user/spotify/playlist/5Oo5QuAOjjXMMXphFqC6eo",
"http://open.spotify.com/user/spotify/playlist/1GQLlzxBxKTb6tJsD4RxHI",
"http://open.spotify.com/user/spotify/playlist/7uDoSz5VxK5lbXgj7tBMG9",
"http://open.spotify.com/user/spotify/playlist/4TNBeyX7awz89qwtTmh9D4",
"http://open.spotify.com/user/spotify/playlist/7EtGvEVYkL6knEdLxNjHf3",
"http://open.spotify.com/user/spotify/playlist/2ujjMpFriZ2nayLmrD1Jgl",
"http://open.spotify.com/user/spotify/playlist/5p9ILyu1wb4KKHORoXU8nb",
"http://open.spotify.com/user/spotify/playlist/5p9ILyu1wb4KKHORoXU8nb",
"http://open.spotify.com/user/1159383624/playlist/6czK3dQ181NzcFQmt0ngSi",
"http://open.spotify.com/user/elainelin/playlist/6XVhyMuFRS1212blJHTED4",
"http://open.spotify.com/user/lmljoe/playlist/0DXoY83tBvgWkd8QH49yAI",
"http://open.spotify.com/user/lizyeomans/playlist/4m2hgFlI5hQk85rwD3Pfvf",
"https://open.spotify.com/user/12185155528/playlist/636WdfgkXObvlSYPqcX9pX",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWXx6OpWBkqVv",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXdSjVZQzv2tl",
"https://open.spotify.com/user/spotifycharts/playlist/37i9dQZEVXbMXbN3EUUhlg",
"https://open.spotify.com/user/spotifycharts/playlist/37i9dQZEVXbMDoHDwVN2tF",
"https://open.spotify.com/user/spotifycharts/playlist/37i9dQZEVXbMOkSwG072hV",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWTsQAU8k2LN7",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX34KNiFQJNVm",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXb8e91bM8R51",
"https://open.spotify.com/user/payyva/playlist/16vM2yG5XRuewto0pay890",
"https://open.spotify.com/user/mtvbrasil/playlist/02yrCnZiuurRsp8BIiOaUT",
"https://open.spotify.com/user/wwagnermendonca/playlist/1wRmsPfYAENmPayvQ3mLBn",
"https://open.spotify.com/user/spotify/playlist/0AEzUnfymkrhPFa6ZNr4P4",
"https://open.spotify.com/user/spotify/playlist/5FJXhjdILmRA2z5bvz4nzf?play=true",
"https://open.spotify.com/user/spotifybrazilian/playlist/4yYoQnLpbZUiiQM9cFH5BD",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWXRqgorJj26U",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX30w0JtSIv4j",
"https://open.spotify.com/user/spotify/playlist/3ZgmfR6lsnCwdffZUan8EA",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX4908CEYEdlz",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWZn9s1LNKPiM"
]
list_all = lister(all_list)
print(list_all)
df_all = list_download_blank(list_all)
df_all = df_all.drop_duplicates()
df_all
df_all.to_csv("giant_df.csv",encoding="utf-8")
metallica_df = list_download_blank(lister(["https://open.spotify.com/user/metallicaofficial/playlist/1cJ6lPBYj2fscs0kqBHsVV"]))
metallica_df.to_csv("metallica_df.csv",encoding="utf-8")
vaporwave = ["https://open.spotify.com/user/iam3/playlist/17HYiAIcwlDEg5RgVkm4L7"]
samba = ["https://open.spotify.com/user/sambabook/playlist/383LqftNehenclhJfLKl6r",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWTUHOvJwQIMp",
"https://open.spotify.com/user/12168830413/playlist/4CN7h81XV2XP9vF2dIgGe7",
"https://open.spotify.com/user/martinhodavilaoficial/playlist/5ZjKjMUjlfon1HLeMdnvwA",
"https://open.spotify.com/user/mourajo/playlist/1zjl4oY7lcsdiC22JPjtTr",
"https://open.spotify.com/user/cbernardo8/playlist/5nj9Txku0i7DjJCWtc71DT",
"https://open.spotify.com/user/serapha14/playlist/36QpQeAoTQiahgKo9Q4DV5",
"https://open.spotify.com/user/12142247974/playlist/5mWG6vTWj6ALARfYBrXYOS"]
pop = ["https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX6aTaZa0K6VA",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWTwnEm1IYyoj",
"https://open.spotify.com/user/sonymusicuk/playlist/1tPWTwuxOLsE2Do1JQSUxA",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX50QitC6Oqtn",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWUaThf8nMdW6",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXcBWIGoYBM5M",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWUa8ZRTfalHk",
"https://open.spotify.com/user/wolth5/playlist/2yDYrySCmg2RZmnOZ94S63",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX1N5uK98ms5p"]
blues = ["https://open.spotify.com/user/manualdohomemmoderno/playlist/1cNDobjQpdvddb1vkFCMB5",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXbkKnGZHv1kf",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWWyCCtyRAvGr",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWXbxDw7wBcIC",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX5AuRugisweW",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXcnkReojaCnV",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWXEV4YwX6nMt",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXaOWYud3Cg4V",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWSKpvyAAcaNZ",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX8QB9Ys2nV17"]
classica = ["https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWV0gynK7G6pD",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX21bRPJuEN7r",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXaky0wMRgvaj",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWWEJlAGA9gs0",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXcN1fAVSf7CR",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXdazL2M92igj",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX9G9wwzwWL2k",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWYZW76Iw4Lij",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWVfS4Cdd3PNf",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX0ynPp7KaiSY",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWXjj6kdiviS0",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX7TvcXU5TLWg",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXbm0dp7JzNeL",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWSXyN2w7ILMl"]
punk = [
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX0KpeLFwA3tO",
"https://open.spotify.com/user/theramonesofficial/playlist/5cs51izx3ZXvmupfUOlPpf",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXa9wYJr1oMFq",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXd6tJtr4qeot",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX3MU5XUozve7",
"https://open.spotify.com/user/1121932299/playlist/3HILyIo22ZfrOQuWfc8mW1",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWU0FBqUeZYeN",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXaDzsRodF91I",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXdJa941ExayM"]
reggae = ["https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWTMlvEhEJDRB",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWSiyIBdVQrkk",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX3mvx0imueXo",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX5lK3vvanTs8",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX6u62UVrRdZV",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWSkkUxEhrBdF",
"https://open.spotify.com/user/boogalooboo/playlist/4gRio043A2No9SRU7ERq6q",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWVYs6zNzJ0ci",
"https://open.spotify.com/user/spotify_uk_/playlist/5rowrSebXI7SzazOhpaJNe",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXbSbnqxMTGx9"]
jazz = ["https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX7YCknf2jT6s",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXbOVU4mpMJjh",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWVqfgj8NZEp1",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX4wta20PHgwo",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX5OepaGriAIm",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXbITWG1ZJKYt",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWTbzY5gOVvKd",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX0SM0LYsmbMT",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWYgA6FKKHSd6",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX5Lf3LZ6Czqu"]
mpb = ["https://open.spotify.com/user/12142195183/playlist/0npwe92QJnGIVveOLHofQw",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX9FSYmdU9mbT",
"https://open.spotify.com/user/tainanolveira/playlist/3LNWderZ6Lc09nMGBV5U5K",
"https://open.spotify.com/user/22z4mfgbxq4s6cpg3x5iv7ofa/playlist/4af3rkkQ8tA3cRKNm6gkFa",
"https://open.spotify.com/user/12174648846/playlist/6ljjNFUdKHDnpPVslmuArC",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX8eh9G7VlzAT",
"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX87C559Ss01a",
"https://open.spotify.com/user/12152277648/playlist/74pplBPrrw4LVVFMQ8IoiD",
"https://open.spotify.com/user/vozesdobrasil-mpb/playlist/2LWQqYlWYyLe5yOWM5L3J3",
"https://open.spotify.com/user/12175917631/playlist/3aVPrPl2oArP1wFv7iLGU2"]
vaporwave_df = list_download_blank(lister(vaporwave))
samba_df = list_download_blank(lister(samba))
pop_df = list_download_blank(lister(pop))
classica_df = list_download_blank(lister(classica))
punk_df = list_download_blank(lister(punk))
reggae_df = list_download_blank(lister(reggae))
jazz_df = list_download_blank(lister(jazz))
mpb_df = list_download_blank(lister(mpb))
blues_df = list_download_blank(lister(blues))
vaporwave_df.to_csv("vaporwave_df.csv",encoding="utf-8")
samba_df.to_csv("samba_df.csv",encoding="utf-8")
classica_df.to_csv("classica_df.csv",encoding="utf-8")
pop_df.to_csv("pop_df.csv",encoding="utf-8")
mpb_df.to_csv("mpb_df.csv",encoding="utf-8")
punk_df.to_csv("punk_df.csv",encoding="utf-8")
reggae_df.to_csv("reggae_df.csv",encoding="utf-8")
jazz_df.to_csv("jazz_df.csv",encoding="utf-8")
blues_df.to_csv("blues_df.csv",encoding="utf-8")
def make_genre_no(df,idx,name):
listtemp = [vaporwave_df,samba_df,classica_df,pop_df,mpb_df,punk_df,reggae_df,jazz_df,blues_df]
num_df = len(df)
del listtemp[idx]
num_songs_no_per_g = num_df//len(listtemp)
df_no = listtemp[0]
frames = []
for i in range(len(listtemp)):
frames.append(listtemp[i].iloc[0:num_songs_no_per_g,:])
df_no = pd.concat(frames)
df_no.to_csv("not_"+name+"_df.csv",encoding="utf-8")
return df_no
vaporwave_no = make_genre_no(vaporwave_df,0,"vaporwave")
samba_no = make_genre_no(samba_df,1,"samba")
classica_no = make_genre_no(classica_df,2,"classica")
pop_no = make_genre_no(pop_df,3,"pop")
mpb_no = make_genre_no(mpb_df,4,"mpb")
punk_no = make_genre_no(punk_df,5,"punk")
reggae_no = make_genre_no(reggae_df,6,"reggae")
jazz_no = make_genre_no(jazz_df,7,"jazz")
blues_no = make_genre_no(blues_df,8,"blues")
test_rock_df = list_download_blank(lister(["https://open.spotify.com/user/1296791898/playlist/0DzTbbt5l4w0pQ7JR0Ie38"]))
test_rock_df.to_csv("test_rock_df.csv",encoding="utf-8")
test_rap_df = list_download_blank(lister(["https://open.spotify.com/user/merovingin/playlist/0ZRwrJ2EDGyKR6YgQPWXeO"]))
test_rap_df.to_csv("test_rap_df.csv",encoding="utf-8")
test_funk_df = list_download_blank(lister(["https://open.spotify.com/user/playlistmebrasil/playlist/1Up8P0Ppoxe3205tXKSQlk"]))
test_funk_df.to_csv("test_funk_df.csv",encoding="utf-8")
test_metal_df = list_download_blank(lister(["https://open.spotify.com/user/1289881487/playlist/5W3dQ7pQajtTGTxztziVfz"]))
test_metal_df.to_csv("test_metal_df.csv",encoding="utf-8")
test_sertanejo_df = list_download_blank(lister(["https://open.spotify.com/user/22y5n6obzvznlkwknbqgaqxzi/playlist/6OYGkz18afNDQEgcbXU8nw"]))
test_sertanejo_df.to_csv("test_sertanejo_df.csv",encoding="utf-8")
test_relax_df = list_download_blank(lister(["https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX4sWSpwq3LiO"]))
test_relax_df.to_csv("test_relax_df.csv",encoding="utf-8")
test_focus_df = list_download_blank(lister(["https://open.spotify.com/user/dgdeccaclassics/playlist/4vGoXR0GZikU6TjGyMeIOa"]))
test_focus_df.to_csv("test_focus_df.csv",encoding="utf-8")
test_gym_df = list_download_blank(lister(["https://open.spotify.com/user/neffexmusic/playlist/3cIBg0kQGtPZkawFHbEKHO"]))
test_gym_df.to_csv("test_gym_df.csv",encoding="utf-8")
test_party_df = list_download_blank(lister(["https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXaXB8fQg7xif"]))
test_party_df.to_csv("test_party_df.csv",encoding="utf-8")
def multidownloader(lol,lon):
for in range(len(lol)):
temp_df = list_download_blank(lister[lol[i]])
temp_df.to_csv(lon[i]+"_df.csv",encoding="utf-8")
return "multidownloaded"
test_eletronica_df = list_download_blank(lister(["https://open.spotify.com/user/pefalcao/playlist/6kUb9BX3RkRd2tG5FNBpZH"]))
test_eletronica_df.to_csv("test_eletronica_df.csv",encoding="utf-8")
```
| github_jupyter |
# Least squares fitting of models to data
This is a quick introduction to `statsmodels` for physical scientists (e.g. physicists, astronomers) or engineers.
Why is this needed?
Because most of `statsmodels` was written by statisticians and they use a different terminology and sometimes methods, making it hard to know which classes and functions are relevant and what their inputs and outputs mean.
```
import numpy as np
import pandas as pd
import statsmodels.api as sm
```
## Linear models
Assume you have data points with measurements `y` at positions `x` as well as measurement errors `y_err`.
How can you use `statsmodels` to fit a straight line model to this data?
For an extensive discussion see [Hogg et al. (2010), "Data analysis recipes: Fitting a model to data"](http://arxiv.org/abs/1008.4686) ... we'll use the example data given by them in Table 1.
So the model is `f(x) = a * x + b` and on Figure 1 they print the result we want to reproduce ... the best-fit parameter and the parameter errors for a "standard weighted least-squares fit" for this data are:
* `a = 2.24 +- 0.11`
* `b = 34 +- 18`
```
data = """
x y y_err
201 592 61
244 401 25
47 583 38
287 402 15
203 495 21
58 173 15
210 479 27
202 504 14
198 510 30
158 416 16
165 393 14
201 442 25
157 317 52
131 311 16
166 400 34
160 337 31
186 423 42
125 334 26
218 533 16
146 344 22
"""
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
data = pd.read_csv(StringIO(data), delim_whitespace=True).astype(float)
# Note: for the results we compare with the paper here, they drop the first four points
data.head()
```
To fit a straight line use the weighted least squares class [WLS](http://www.statsmodels.org/devel/generated/statsmodels.regression.linear_model.WLS.html) ... the parameters are called:
* `exog` = `sm.add_constant(x)`
* `endog` = `y`
* `weights` = `1 / sqrt(y_err)`
Note that `exog` must be a 2-dimensional array with `x` as a column and an extra column of ones. Adding this column of ones means you want to fit the model `y = a * x + b`, leaving it off means you want to fit the model `y = a * x`.
And you have to use the option `cov_type='fixed scale'` to tell `statsmodels` that you really have measurement errors with an absolute scale. If you don't, `statsmodels` will treat the weights as relative weights between the data points and internally re-scale them so that the best-fit model will have `chi**2 / ndf = 1`.
```
exog = sm.add_constant(data['x'])
endog = data['y']
weights = 1. / (data['y_err'] ** 2)
wls = sm.WLS(endog, exog, weights)
results = wls.fit(cov_type='fixed scale')
print(results.summary())
```
### Check against scipy.optimize.curve_fit
```
# You can use `scipy.optimize.curve_fit` to get the best-fit parameters and parameter errors.
from scipy.optimize import curve_fit
def f(x, a, b):
return a * x + b
xdata = data['x']
ydata = data['y']
p0 = [0, 0] # initial parameter estimate
sigma = data['y_err']
popt, pcov = curve_fit(f, xdata, ydata, p0, sigma, absolute_sigma=True)
perr = np.sqrt(np.diag(pcov))
print('a = {0:10.3f} +- {1:10.3f}'.format(popt[0], perr[0]))
print('b = {0:10.3f} +- {1:10.3f}'.format(popt[1], perr[1]))
```
### Check against self-written cost function
```
# You can also use `scipy.optimize.minimize` and write your own cost function.
# This doesn't give you the parameter errors though ... you'd have
# to estimate the HESSE matrix separately ...
from scipy.optimize import minimize
def chi2(pars):
"""Cost function.
"""
y_model = pars[0] * data['x'] + pars[1]
chi = (data['y'] - y_model) / data['y_err']
return np.sum(chi ** 2)
result = minimize(fun=chi2, x0=[0, 0])
popt = result.x
print('a = {0:10.3f}'.format(popt[0]))
print('b = {0:10.3f}'.format(popt[1]))
```
## Non-linear models
```
# TODO: we could use the examples from here:
# http://probfit.readthedocs.org/en/latest/api.html#probfit.costfunc.Chi2Regression
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from data.dataset import StockDataset
from data.macro import Macro
from data.scaler import HybridScaler
from data.split import StratifiedTimeSeriesSplit
from model.arima import grid_search
from sklearn.model_selection import TimeSeriesSplit
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_validate
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import warnings
warnings.filterwarnings('ignore')
dataset = StockDataset('^GSPC')
df = dataset.get_hist(start_date='1950-01-01', end_date='2021-10-23', time_interval='daily')
x = pd.concat(
[df,
dataset.lookback_agg(lookback_len=30),
dataset.lookback_agg(lookback_len=60),
dataset.lookback_agg(lookback_len=120)],
axis=1)
y = dataset.get_change_forecast_label(forecast_len=30, is_up=False, method='past_all')
```
# Create Exogenous Features
```
macro = Macro(token='wixdGr7AAc9_syvt6cFD')
macro_data = macro.get_macro()
x = x.merge(macro_data, how='left', on='date')
x.apply(lambda x: x.first_valid_index()).max()
TRAIN_START = '1987-06-10'
# TRAIN_START = '1951-01-01'
TEST_START = '2018-01-01'
train_x = x[TRAIN_START:TEST_START]
train_y = y[TRAIN_START:TEST_START]
test_x = x[TEST_START:]
test_y = y[TEST_START:]
split = TimeSeriesSplit(n_splits=10, test_size=120)
# split = StratifiedTimeSeriesSplit(n_splits=10, test_size=120, min_positive_ratio=0.25)
```
# Build Model
```
from sklearn.ensemble import RandomForestClassifier
pipeline = Pipeline(steps=[
('scaler', StandardScaler()),
('model', RandomForestClassifier(n_estimators=100, random_state=1))
])
results = cross_validate(pipeline, train_x, train_y, cv=split, scoring='accuracy')
results['test_score'].mean()
pipeline.fit(train_x, train_y)
pred_y = pipeline.predict(test_x)
confusion_matrix(test_y, pred_y)
from model.eval import moving_average
import matplotlib.pyplot as plt
# test_df = moving_average(test_y, pipeline.predict_proba(test_x)[:, 1])
test_df = pd.DataFrame({'true': test_y, 'pred': pipeline.predict_proba(test_x)[:, 1]})
ax1 = test_df.plot(figsize=(18, 10), color=['g', 'orange'])
ax1.set_ylabel('bubble')
ax1.axhline(y=0.5, color='r', linestyle='--')
ax2 = ax1.twinx()
ax2.set_ylabel('SP500')
ax2 = test_x['close'].plot()
```
# Feature Importance
```
feature_importances = dict(zip(train_x.columns, pipeline['model'].feature_importances_))
dict(sorted(feature_importances.items(), key=lambda x: x[1], reverse=True))
top_features = list(dict(sorted(feature_importances.items(), key=lambda x: x[1], reverse=True)).keys())[:20]
```
| github_jupyter |
```
from __future__ import print_function
from keras.callbacks import LambdaCallback
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation, Embedding
from keras.layers import LSTM
from keras.optimizers import RMSprop, Adam
from keras.utils import to_categorical
from mpl_toolkits.mplot3d import Axes3D
# from unidecode import unidecode
import numpy as np
import matplotlib.pyplot as plt
import random, sys, io, re, csv
csv_reader = csv.reader(open('data/Lana_del_rey/lyrics.csv', encoding='utf-8'))
# return a dictionary of song: lyrics
def get_tokenized_lines(csv):
lyrics = {}
for r in csv:
words = []
row = str(r[2]).lower()
for line in row.split('|-|'):
new_words = re.findall(r"\b[a-z']+\b", line)
words = words + new_words
lyrics[r[1]] = words
return lyrics
all_lyric_words = get_tokenized_lines(csv_reader)
# total word number: 173631, total lines of lyrics: 22688, average word per line: 7.652988
SEQ_LENGTH = 32 + 1 # this one will be deleted during prediction, "1" stands for using a sequence to generate one word
sequences = list()
def get_all_sequences():
for song in all_lyric_words:
if len(all_lyric_words[song]) < SEQ_LENGTH:
sequences.append(all_lyric_words[song])
else:
for i in range(SEQ_LENGTH, len(all_lyric_words[song])):
seq = all_lyric_words[song][i - SEQ_LENGTH: i]
sequences.append(seq)
return sequences
sequences = get_all_sequences()
print('Total Sequences: %d' % len(sequences))
# store all the unique words and match them with indices
all_words = []
for song in all_lyric_words:
for word in all_lyric_words[song]:
all_words.append(word)
unique_word = set(all_words)
word_to_index = {w: i for i, w in enumerate(unique_word)}
index_to_word = {i: w for w, i in word_to_index.items()}
word_indices = [word_to_index[word] for word in unique_word]
word_size = len(unique_word)
print('vocabulary size: {}'.format(word_size))
# this funtion change the words into matrix wise data, and each position of the matrix stands for the index of the word in index_to_word
def data_to_matrix(lines, seq_len):
matrix = np.zeros((len(lines), seq_len))
for r, line in enumerate(lines):
for c, word in enumerate(line):
matrix[r, c] = word_to_index[word]
return matrix
matrix_data = data_to_matrix(sequences, SEQ_LENGTH)
matrix_data[:,-1].shape
# get the y = Wx data, X is the input data, and y is the target data
X, y = matrix_data[:, :-1], matrix_data[:, -1]
y = to_categorical(y, num_classes=word_size) # to_categorical: for categorical_crossentropy optimiser
seq_length = len(X[0]) # 32 in our case, stands for sequence length
print("X_shape", X.shape)
print("y_shape", y.shape)
# establish the network, using LSTM and compile it
model = Sequential()
model.add(Embedding(word_size, 32, input_length=seq_length)) # Embedding(input_dim, output_dim, input_length), which respecively stands for: the possible value of the word, the output vector size, sequence length
model.add(LSTM(100, return_sequences=True))
model.add(LSTM(100))
model.add(Dense(100, activation='relu'))
model.add(Dense(word_size, activation='softmax'))
print(model.summary())
adam = Adam(0.002)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
history = model.fit(X, y, batch_size=128, epochs=50)
model.save('model.h5') # this now stores the model for kanye west's training
# transfer text to a numpy matrix
def text_to_matrix(texts, word_to_index):
indices = np.zeros((1, len(texts)), dtype=int)
for i, text in enumerate(texts):
indices[:, i] = word_to_index[text]
return indices
# ensure seach sequence is no longer than max length
def my_pad_sequences(seq, max_length):
start = seq.shape[1] - max_length
return seq[:, start: start + max_length]
# generate lyrics
def generate_text(model, word_to_index, seq_length, seed_text, num_of_words):
result = list()[1:]
input_text = seed_text.lower()
for num in range(num_of_words):
encoded = text_to_matrix(input_text.split(), word_to_index)
encoded = my_pad_sequences(encoded, max_length=seq_length)
predict = model.predict_classes(encoded, verbose=0)
out_word = ''
for word, index in word_to_index.items():
if index == predict:
out_word = word
break
input_text += ' ' + out_word
result.append(out_word)
return ' '.join(result)
# this is for lana del rey
# make sure the seed text has length 32 and without puntuations
seed_text = "kiss me hard before you go summertime sadness I just wanted you to know that baby you the best I got my red dress on tonight dancing in the dark in the pale moonlight"
generated = generate_text(model, word_to_index, SEQ_LENGTH - 1, seed_text, 50)
print(generated)
# this is what we got for Kanye West !!!
seed_text = "Bougie girl grab her hand fuck that bitch she don't wanna dance excuse my French but I'm in France Prince williams ain't do it right if you ask me cause if I"
generated = generate_text(model, word_to_index, SEQ_LENGTH - 1, seed_text, 50)
print(generated)
model = load_model('model.h5')
weights = model.layers[4].get_weights()[0].reshape(-1,3)
def plot_3d(weights):
figure = plt.figure(figsize=(20,10))
ax = figure.add_subplot(111, projection='3d')
for i in range(0, len(weights)):
ax.scatter(weights[i, 0], weights[i, 1], weights[i, 2], s = 75)
ax.text(weights[i,0], weights[i,1],weights[i,2], '%s' % (str(i)), size = 20, zorder = 1, color = 'k')
plt.show()
plot_3d(weights)
```
| github_jupyter |
# Part I: Sorting
In `def sort(values)`, a quicksort for numpy array is implemented, with $O(Nlog(N))$ time complexity in general, and $O(N^2)$ in worst cases.
#### Algorithm
This version of quicksort (abbraviated as qsort in following passage) chose the first element as the `pivot`. In each turn, if the incoming array has length greater than one, then qsort do the following:
1. __select__ elements smaller than the `pivot` as `left`, select elements greater than the `pivot` as `right`
2. __rearange__ the array to the form of `left` + `pivot` + `right`
3. do __qsort__ on `left` and `right`
#### Complexity
First we analyse one trun:
1. select part: selecting `left` and `right` compares the whole array with `pivot` one time each, therefore time complexity is $O(2length) = O(length)$
2. rearange part: we simply copy `left`, `pivot` and `right` to the original array, with time complexity $O(length)$
In general, if the incoming array is almost random, we will approximately have half `left` and half `right` on each side of the pivot. Thus $T(n) = 2T(n/2)+cn$ and therefore by master theroem $T(N) = O(Nlog(N))$.
However, in worst cases, one side of each turn has $(length-1)$ elements while the other side has no elements. Hence we have $T(n) = T(n-1)+cn$ and therefore by master theroem $T(N) = O(N^2)$.
## Code (20 points)
```
# the function
def sort(values):
if len(values) > 1:
#1. select pivot, then left and right
pivot = values[0]
left = [x for x in values[1:] if x < pivot]
right = [x for x in values[1:] if x >= pivot]
#2. rearange the array to [left+pivot+right]
values[len(left)] = pivot
for i in range(0, len(left)):
values[i] = left[i]
for i in range(0, len(right)):
values[i+1+len(left)] = right[i]
#3. qsort left and right
sort(values[0:len(left)])
sort(values[len(left)+1:])
return values
# main
import numpy as np
# different random seed
np.random.seed()
# generate numbers
N = 10
# the TA will vary the input array size and content during testing
values = np.random.random([N])
sort(values)
correct = True
for index in range(1, len(values)):
if(values[index-1] > values[index]):
correct = False
print('Correct? ' + str(correct))
```
# Part II: Line fitting
The loss function is $L\left(\mathbf{X}, \mathbf{T}, \mathbf{Y}\right) = \sum_i \left( T^{(i)} - Y^{(i)}\right)^2 = \sum_i \left( T^{(i)} - w_1 X^{(i)} - w_0 \right)^2$.
## Math (30 points)
Write down explicit formulas for $w_0$ and $w_1$ in terms of $\mathbf{X}$ and $\mathbf{T}$.
### Vectored Version (The Formal Solution)
We let $\mathbf{H} = [\mathbf{1} | \mathbf{X}]$, therefore $\mathbf{H} \Theta = \mathbf{Y}$.
Set $\nabla_\Theta L(\Theta) = 0 $ we get
$$
\begin{align}
\nabla_\Theta L(\Theta) & = \nabla_\Theta \frac{1}{2} (\mathbf{H} \Theta - \mathbf{T})^T (\mathbf{H} \Theta - \mathbf{T}) \\
0 & = \frac{1}{2} \nabla_\Theta({\Theta}^T {\mathbf{H}}^T \mathbf{H} \Theta - {\Theta}^T {\mathbf{H}}^T \mathbf{T} - \mathbf{T}^T \mathbf{H} \Theta + \mathbf{T}^T \mathbf{T}) \\
0 & = \frac{1}{2} ({\mathbf{H}}^T \mathbf{H} \Theta + {\mathbf{H}}^T \mathbf{H} \Theta - {\mathbf{H}}^T \mathbf{T} - {\mathbf{H}}^T \mathbf{T}) \\
0 & = {\mathbf{H}}^T \mathbf{H} \Theta - {\mathbf{H}}^T \mathbf{T} \\
{\mathbf{H}}^T \mathbf{H} \Theta & = {\mathbf{H}}^T \mathbf{T} \\
\Theta & = ({\mathbf{H}}^T \mathbf{\mathbf{H}})^{-1} {\mathbf{\mathbf{H}}}^T {\mathbf{T}}
\end{align}
$$
Hence $\Theta = \{w_0, w_1 \}^T = ({\mathbf{H}}^T \mathbf{\mathbf{H}})^{-1} {\mathbf{\mathbf{H}}}^T {\mathbf{T}}$ with $\mathbf{H} = [\mathbf{1} | \mathbf{X}]$.
### Unvectored Version (The Solution only for Reference)
As the loss is convex, we can simply apply first-order necessary condition
$$
\begin{equation}
\frac{\partial L}{\partial w_0} = - 2\sum_i \left( T^{(i)} - w_1 X^{(i)} - w_0 \right) = 0 \\
\frac{\partial L}{\partial w_1} = - 2\sum_i \left(X^{(i)}( T^{(i)} - w_1 X^{(i)} - w_0) \right) = 0
\end{equation}
$$
We extract $w_0$ from the first equation, to substitute $w_0$ in the second equation
$$
\begin{equation}
w_0 = \frac{\sum_i \left( T^{(i)} - w_1 X^{(i)}\right)}{n} \\
\sum_i \left(X^{(i)}( T^{(i)} - w_1 X^{(i)} - \frac{\sum_j \left( T^{(j)} - w_1 X^{(j)}\right)}{n}) \right) = 0
\end{equation}
$$
Now that the second equation is only composed of $w_1, \mathbf{X}$ and $\mathbf{T}$, we can solve out $w_1$
$$
\begin{equation}
\sum_i \left(X^{(i)}T^{(i)} - w_1 X^{(i)}X^{(i)} - X^{(i)}\frac{\sum_j T^{(j)}}{n} + w_1 X^{(i)}\frac{\sum_j X^{(j)}}{n} \right) = 0 \\
\sum_i X^{(i)} T^{(i)} - \frac{\sum_i X^{(i)} \sum_j T^{(j)}}{n} = w_1(\sum_i X^{(i)} X^{(i)} - \frac{\sum_i X^{(i)} \sum_j X^{(j)}}{n})\\\
w_1 = \frac{n \sum_i X^{(i)} T^{(i)} - \sum_i X^{(i)} \sum_j T^{(j)}}{n \sum_i X^{(i)} X^{(i)} - \sum_i X^{(i)} \sum_j X^{(j)}} = \frac{n \sum_i X^{(i)} T^{(i)} - \sum_i X^{(i)} \sum_j T^{(j)}}{n \sum_i (X^{(i)})^2 - (\sum_i X^{(i)})^2}
\end{equation}
$$
Now simply substitute $w_1$ in the expression of $w_0$
$$
\begin{equation}
w_0 = \frac{\sum_i \left( T^{(i)} - w_1 X^{(i)}\right)}{n} = \frac{\sum_i X^{(i)} X^{(i)} \sum_j T^{(j)} - \sum_i X^{(i)} T^{(i)} \sum_j X^{(j)}}{n \sum_i (X^{(i)})^2 - (\sum_i X^{(i)})^2}
\end{equation}
$$
To conclude
$$ \left\{
\begin{aligned}
w_1 & = \frac{n \sum_i X^{(i)} T^{(i)} - \sum_i X^{(i)} \sum_j T^{(j)}}{n \sum_i (X^{(i)})^2 - (\sum_i X^{(i)})^2}\\
w_0 & = \frac{\sum_i X^{(i)} X^{(i)} \sum_j T^{(j)} - \sum_i X^{(i)} T^{(i)} \sum_j X^{(j)}}{n \sum_i (X^{(i)})^2 - (\sum_i X^{(i)})^2} \\
\end{aligned}
\right.
$$
## Code (50 points)
```
# line model
import numpy as np
class Line(object):
def __init__(self, w0, w1):
self.w0 = w0
self.w1 = w1
def predict(self, x, noise=0):
return (x*self.w1 + self.w0 + noise*np.random.normal())
# Input: data, a 2D array with each (x, t) pair on a row
# Return: w0 and w1, the intercept and slope of the fitted line
def learn(self, data):
# unvectored version
# sum_x = sum_t = sum_xx = sum_xt = 0
# n = data.shape[0]
# for (x, t) in data:
# sum_x += x
# sum_t += t
# sum_xx += x*x
# sum_xt += x*t
# w0 = (sum_xx*sum_t-sum_xt*sum_x)/(n*sum_xx-sum_x*sum_x)
# w1 = (n*sum_xt-sum_x*sum_t)/(n*sum_xx-sum_x*sum_x)
# vectored version
data = data.transpose()
X = data[0]
T = data[1]
H = np.array([np.ones(len(X)), X]).transpose()
Theta = np.linalg.inv((H.transpose()).dot(H)).dot(H.transpose()).dot(T)
w0 = Theta[0]
w1 = Theta[1]
return w0, w1
# test
np.random.seed()
w0 = np.asscalar(np.random.random(1))*2-1
w1 = np.asscalar(np.random.random(1))*2-1
line = Line(w0, w1)
N = 20
noise = 0.05
X = np.random.random([N])
T = []
for x in X:
T.append(np.sum(line.predict(x, noise)))
T = np.array(T)
#data = np.vstack((X, T)).transpose()
data = np.array([X, T]).transpose()
w0_fit, w1_fit = line.learn(data)
line_fit = Line(w0_fit, w1_fit)
print('truth: ' + str(w0) + ' ' + str(w1))
print('predict: ' + str(w0_fit) + ' ' + str(w1_fit))
# plot
%matplotlib inline
import matplotlib.pyplot as plt
plt.scatter(data[:, 0], data[:, 1], color='black', marker='o')
X_endpoints = [0, 1]
Y_truth, Y_fit = [], []
for x in X_endpoints:
Y_truth.append(line.predict(x))
Y_fit.append(line_fit.predict(x))
plt.plot(X_endpoints, Y_truth, color='blue', label='truth')
plt.plot(X_endpoints, Y_fit, color='red', label='predict')
plt.legend(loc='best')
plt.tight_layout()
plt.show()
```
| github_jupyter |
# Scikit-Learn IRIS Model
* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core
* Run locally on Docker to test
* Deploy on seldon-core running on a kubernetes cluster
## Dependencies
* [S2I](https://github.com/openshift/source-to-image)
```bash
pip install sklearn
pip install seldon-core
```
## Train locally
```
import numpy as np
import os
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
from sklearn import datasets
def main():
clf = LogisticRegression()
p = Pipeline([('clf', clf)])
print('Training model...')
p.fit(X, y)
print('Model trained!')
filename_p = 'IrisClassifier.sav'
print('Saving model in %s' % filename_p)
joblib.dump(p, filename_p)
print('Model saved!')
if __name__ == "__main__":
print('Loading iris data set...')
iris = datasets.load_iris()
X, y = iris.data, iris.target
print('Dataset loaded!')
main()
```
Wrap model using s2i
## REST test
```
!s2i build -E environment_rest . seldonio/seldon-core-s2i-python3:0.18 seldonio/sklearn-iris:0.1
!docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris:0.1
```
Send some random features that conform to the contract
```
!curl -s http://localhost:5000/predict -H "Content-Type: application/json" -d '{"data":{"ndarray":[[5.964,4.006,2.081,1.031]]}}'
!docker rm iris_predictor --force
```
## Setup Seldon Core
Use the setup notebook to [Setup Cluster](seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or Istio.
Then port-forward to that ingress on localhost:8003 in a separate terminal either with:
* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`
* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80`
```
!kubectl create namespace seldon
!kubectl config set-context $(kubectl config current-context) --namespace=seldon
!kubectl create -f sklearn_iris_deployment.yaml
!kubectl rollout status deploy/seldon-deployment-example-sklearn-iris-predictor-0
res=!curl -s http://localhost:8003/seldon/seldon/seldon-deployment-example/api/v0.1/predictions -H "Content-Type: application/json" -d '{"data":{"ndarray":[[5.964,4.006,2.081,1.031]]}}'
res
print(res)
import json
j=json.loads(res[0])
assert(j["data"]["ndarray"][0][0]>0.0)
!kubectl delete -f sklearn_iris_deployment.yaml
```
| github_jupyter |
```
from DEVDANmainloop import DEVDANmain, DEVDANmainID
from DEVDANbasic import DEVDAN
from utilsDEVDAN import dataLoader, plotPerformance
import random
import numpy as np
import torch
# random seed control
np.random.seed(0)
torch.manual_seed(0)
random.seed(0)
# load data
dataStreams = dataLoader('../dataset/susy2.mat')
print('All Data')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance occupancy ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
```
### 50% labeled data
print('50% Data')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.5)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.5)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.5)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.5)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.5)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance occupancy ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
### 25% Labeled Data
print('25% Data')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.25)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.25)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.25)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.25)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.25)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance occupancy ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
### Infinite Delay
print('Infinite Delay')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet0, performanceHistory0, allPerformance = DEVDANmainID(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet1, performanceHistory1, allPerformance = DEVDANmainID(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet2, performanceHistory2, allPerformance = DEVDANmainID(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet3, performanceHistory3, allPerformance = DEVDANmainID(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet4, performanceHistory4, allPerformance = DEVDANmainID(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance occupancy ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
| github_jupyter |
# Scientific computing
- [Scientific computing](day2/scicomp.ipynb):
- [Numpy](#Numpy): advanced array operations
- Scipy introduction: from linear algebra to image analisys
- Simpy: symbolic math
- Networks with networkx: centrality computation
- Fitting a curve, cleaning a signal.
https://docs.scipy.org/doc/numpy/user/basics.html
Numpy: advanced array operations
Scipy introduction: from linear algebra to image analisys
Simpy: symbolic math
Further reading:
- [https://scipy-lectures.github.io/index.html](https://scipy-lectures.github.io/index.html) - served as my main inspiration source for this chapter.
- [http://docs.scipy.org/doc/scipy-0.15.1/reference/](http://docs.scipy.org/doc/scipy-0.15.1/reference/) - The Scipy reference guide, containing a very good tutorial for each of the libraries.
- [http://www.scipy.org/topical-software.html](http://www.scipy.org/topical-software.html) This page is containing links to some of the most common Python modules. However it is far from complete, for example PIL, a library commonly used in image processing in Python, is not listed.
## Numpy
Most of the time when doing scientific computing speed is critical. There are several reasons Numpy is much faster than standard Python. The most important is that Numpy enforces strong typing, while Python is a dynamic typed language. That translates in Numpy using less heap space for representing data. Because array operations are the core of scientific computing, we will look at this library in greater depth.
```
import numpy as np
L = range(1000)
%timeit [i**2 for i in L]
a = np.arange(1000)
%timeit a**2
print(type(L[1]))
print(a.dtype)
import numpy as np
##Get help!
#np.lookfor('create array')
#np.array?
#np.arr*?
a = np.array([0, 1, 2, 3])
b = np.array([[0, 1, 2], [3, 4, 5]])
c = np.array([[[1], [2]], [[3], [4]]])
print(a)
print(b)
print(b.shape)
```
#### Data types
There are 5 basic numerical types representing booleans (bool), integers (int), unsigned integers (uint) floating point (float) and complex. Those with numbers in their name indicate the bitsize of the type (i.e. how many bits are needed to represent a single value in memory).
```
import numpy as np
x = np.float32(1.0)
print(x)
y = np.int_([1,2,4])
print(y)
z = np.arange(3, dtype=np.uint8)
print(z)
print(np.array([1, 2, 3], dtype='f'))
print(z.astype(float))
print(z.dtype)
d = np.dtype(int)
# d = 1 # question: why does it fail?
d = np.uint8(1)
print(d)
print(np.issubdtype(d, np.integer))
```
#### Array creation
```
# extrinsic
x = np.array([2,3,1,0])
print(x)
x = np.array([[ 1., 2.], [ 0., 0.], [ 1., 3.]])
print(x)
#intrinsic
b = np.arange(1, 9, 2)
print(b)
c = np.linspace(0, 1, 6)
print(c)
a = np.ones((3, 3))
b = np.zeros((2, 2))
c = np.eye(3)
print("ones, zeros, eyes:")
print(a,"\n", b,"\n",c)
%pylab inline
import matplotlib.pyplot as plt
x = np.diag(np.array([1, 2, 3, 4]))
print(x)
x = np.random.rand(4) # uniform in [0, 1]
print(x)
x = np.random.rand(4)
print(x)
x = np.random.randn(4,3) # Gaussian
print(x)
#np.random.seed(1234)
image = np.random.rand(30, 30)
plt.imshow(image, cmap=plt.cm.hot)
plt.colorbar()
# more generators
print(np.linspace(1., 4., 6))
print(np.indices((3,2)))
print(np.arange(35).reshape(5,7))
print(np.triu(np.ones((3, 3)), 1))
print(np.triu(np.ones((3, 3)), 2))
print(np.tile(np.arange(0, 40, 10), (3, 1)))
```
#### Indexing, slicing and selection
```
a = np.arange(10)
print(a[0], a[2], a[-1], a[-3])
print(a[2:5], a[2:], a[:-2], a[::2], a[2::2])
a = np.diag(np.arange(3))
print(a)
print(a[1, 1])
a[2, 1] = 10 # !third line, !second column
print("\na = \n",a)
print("\na[1] = \n",a[1])
print("\na[:,1], a[1,:] = \n",a[:,1], a[1,:])
print("\na[1:,2:] = \n",a[1:,1:])
# array indexes
x = np.arange(10,1,-1)
print(x)
print(x[np.array([3, 3, 1, 8])])
print(x[np.array([3,3,-3,8])])
print(x[np.array([[1,1],[2,3]])])
a = np.random.randint(0, 20 + 1, 15)
print(a)
print(a%3==0)
print(a[a%3==0])
a[a % 3 == 0] = -1
print(a)
a = np.arange(0, 100, 10)
print(a)
print(a[[2, 3, 2, 4, 2]])
a[[9, 7]] = -100
print(a)
a = np.arange(0,100,10)
idx = np.array([[3,4],[9,7]])
print(a[idx])
y = np.arange(35).reshape(5,7)
print(y)
print(y[np.array([0,2,4]), np.array([0,1,2])])
# multidimensional arrays have a less intuitive form of indexing
# first value of the resultant array is y[0,0]. The next value is y[2,1], and the last is y[4,2].
print(y[np.array([0,2,4]), 1])
print(y[np.array([0,2,4])])
```
Task:
- What does this do:
```
# What does it do?
print(np.arange(0, 51, 10)[:, np.newaxis])
print(np.arange(6))
print(np.arange(6) + np.arange(0, 51, 10)[:, np.newaxis])
# How does it work?
# Print the primes!
def get_primes():
primes = np.ones((100,), dtype=bool)
primes[:2] = 0
N_max = int(np.sqrt(len(primes)))
for j in range(2, N_max):
primes[2*j::j] = 0
return primes
print(get_primes())
```
#### Broadcasting, assignment, structured arrays
```
a = np.arange(10)
b = a[::2]
b[0] = 12
print(a, b)
print(np.may_share_memory(a, b))
a = np.arange(10)
c = a[::2].copy() # force a copy
c[0] = 12
print(a, c)
print(np.may_share_memory(a, c))
#Array operations
a = np.array([1, 2, 3, 4])
print("a: ", a)
print("a + 1, 2**a: ", a + 1, 2**a)
b = np.ones(4) + 1
print("a: ",a)
print("b: ",b)
print("a - b, a * b: ", a - b, a * b)
j = np.arange(5)
print ("2**(j + 1) - j: ", 2**(j + 1) - j)
c = np.ones((3, 3))
print("\n2*c + 1: \n", 2*c + 1)
print("\nc * c: \n", c * c)
print("\nc + c: \n", c + c)
print("\nc.dot(c): \n", c.dot(c)) #matrix multiplication!
a = np.arange(5)
print(np.sin(a), np.log(a), np.exp(a))
# shape manipulation
x = np.array([1, 2, 3])
print(x), print()
print(x[:, np.newaxis]), print()
print(x[np.newaxis, :]), print()
# flatten, reshape
a = np.array([[1, 2, 3], [4, 5, 6]])
print(a)
print(a.ravel()), print()
b = a.ravel()
b = b.reshape((2, 3)) #!reshape may return a copy!
print(b)
# open (i.e. not fleshed out) mesh-grid when indexed
x, y = np.ogrid[0:5, 0:5]
print(x)
print(y), print()
# mesh grid (closed)
x, y = np.mgrid[0:5, 0:5]
print(x)
print(y)
# Multidimensional transpose matrices
a = np.arange(4*3*2).reshape(4, 3, 2)
print(a), print()
print(a.shape, a[0, 2, 1])
b = a.transpose(1, 2, 0)
print(b.shape, b[2, 1, 0])
# resizing behaves differently if the array is row-major vs column major
x = np.array([[0, 1], [2, 3]], order='C')
#x = np.array([[0, 1], [2, 3]]
#x = np.array([[0, 1], [2, 3]], order='F')
print(x)
x.resize((2, 1))
print(x)
# sorting matrices, "finally"!
a = np.array([[4, 3, 5], [1, 2, 1]])
b = np.sort(a, axis=1) #sorting per row
print(a)
print(b)
a.sort(axis=1)
print(a), print()
a = np.array([4, 3, 1, 2])
j = np.argsort(a)
print(j, a[j]), print()
# sorting structured arrays
# oh, about structured arrays ...
# https://docs.scipy.org/doc/numpy/user/basics.rec.html
a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
print(a)
a.sort(order='y')
print(a)
```
#### Reductions
```
x = np.array([1, 2, 3, 4])
print("unidimensional:")
print(x)
print(np.sum(x), x.sum(), x.sum(axis=0))
x = np.array([[1, 1], [2, 2]])
print("multidimensional:")
print(x)
print(x.sum(axis=0)) # columns (first dimension)
print(x[:, 0], x[:, 0].sum()) # column sum, selected column only
print(x.sum(axis=1)) # rows (second dimension)
print(x[0, :], x[0, :].sum()) # row sum, selected column only
```
Task: What does it do?
```
x = np.random.rand(2,2,2)
print(x)
print("\nx.sum(axis=2)[0,1] =", x.sum(axis=2)[0,1])
print("\nx[0,1,:].sum() =", x[0,1,:].sum())
x = np.array([1, 3, 2])
print(x)
print(x.min(), x.max(), x.argmin(), x.argmax())
print(np.all([True, True, False]), np.any([True, True, False]))
x = np.array([1, 2, 3, 1])
y = np.array([[1, 2, 3], [5, 6, 1]])
print(x.mean(), x.std(), np.median(x), np.median(y, axis=-1))
a = np.zeros((100, 100))
print(np.any(a != 0), np.all(a == a))
a = np.array([1, 2, 3, 2])
b = np.array([2, 2, 3, 2])
c = np.array([6, 4, 4, 5])
print(((a <= b) & (b <= c)).all())
```
Task:
- Replace all values greater than 25 with 9 and all values smaller than 10 with 29.
- Use numpy preferentially!
```
np.random.seed(100)
x = np.random.uniform(1,50, 20)
x
```
Task: Create a rank array having the same shape as x.
```
np.random.seed(100)
a = np.random.randint(50, size=[2,5])
a
```
### Scipy
Scipy is Python's favorite library for scientific computing. Some of its functionality overlaps with Numpy and Scikit-learn, but in general Scipy modules are viewed as the equivalent of Matlab's standard toolboxes. One is supposed to use it in conjunction with the so call "Scipy-stack" so for example it is best to deal array functionality with Numpy and machine learning with Scikit-learn. We will quickly go through several more common uses of Scipy, while it is good to keep in mind that we are barely scratching the skin of actual scientific computing.
http://www.c3se.chalmers.se/common/python_course_2012/Lecture5_SciPy_2012.pdf
#### [scipy.linalg](http://docs.scipy.org/doc/scipy/reference/linalg.html#scipy.linalg) - Singuar Value Decomposition
There are three main uses of this module: solving linear equations, solving eigenvalues problems and matrix factorizations.
One of the most common techniques for data summarization is Singuar Value Decomposition (SVD) and Partial Component Analysis (PCA). We will solve one SVD task here, and a PCA task later in the machine learning section. Keep in mind that most machine learning algorithms in scikit-learn are wrappers for the basic functions in this scipy module. From the linear algebra perspective PCA is a matrix decomposition method. Several other such methods are available in the linalg module.
If X is a matrix with each variable in a column and each observation in a row then the SVD is
$$X = U S V$$
where the columns of U are orthogonal (left singular vectors), the columns of V are orthogonal (right singluar vectors) and S is a diagonal matrix of zeroes with main diagonal s (singular values).
In the example below, most of the variation in the dataset is explained by the first two singular values, corresponding to the first two features.
Obs:
scipy.linalg.orth(A) - uses SVD to find an orthonormal basis for A.
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.datasets import load_iris
iris = load_iris()
# print(iris.feature_names, iris.target_names)
#print(iris.DESCR)
#print(iris.data)
# perform SVD
A = iris.data
U, s, V = linalg.svd(A)
print("U.shape, V.shape, s.shape: ", U.shape, V.shape, s.shape)
print("Singular values:", s)
#a = U.dot(eye())
# transform back to A
S = linalg.diagsvd(s, 150, 4)
At = np.dot(U, np.dot(S, V))
print("Closeness test: ", np.allclose(A, At))
# np.allclose?
fig, axes = plt.subplots(1, 2, figsize=(15,5))
# fig = plt.figure()
ax1 = axes[0]
colors = np.array(['blue','red','black'])
labels = np.array(['setosa','versicolour','verginica'])
ax1.scatter(U[:,0], U[:,1], color = colors[iris.target])
ax1.set_xlabel("First singular vector")
ax1.set_ylabel("Second singular vector")
ax1.legend()
ax2 = axes[1]
colors = np.array(['blue','red','black'])
labels = np.array(['setosa','versicolour','verginica'])
ax2.scatter(A[:,0], A[:,1], color = colors[iris.target])
ax2.set_xlabel("First feature vector")
ax2.set_ylabel("Second feature vector")
ax2.legend()
```
#### scipy.signal and scipy.fftpack: Signal theory
Signal processing is useful in order to interpret the data of many measuring instruments, especially if there is a time delayed response. We are performing a simple example, but for those that want to learn more applications of Python for signal processing I reccomend a number of online IPython courses.
- [http://nbviewer.ipython.org/github/calebmadrigal/FourierTalkOSCON/tree/master/](http://nbviewer.ipython.org/github/calebmadrigal/FourierTalkOSCON/tree/master/): Basic entry level course focused only on FFT
- [http://nbviewer.ipython.org/github/unpingco/Python-for-Signal-Processing/tree/master/](http://nbviewer.ipython.org/github/unpingco/Python-for-Signal-Processing/tree/master/): A more complex and complete course of signal processing with Python.
A small example would be a noisy signal whose frequency is unknown to the observer, who only knows the sampling time step. The signal is supposed to come from a real function so the Fourier transform will be symmetric. The scipy.fftpack.fftfreq() function will generate the sampling frequencies and scipy.fftpack.fft() will compute the fast Fourier transform:
```
%matplotlib inline
import numpy as np
from scipy import fftpack
import matplotlib.pyplot as plt
time_step = 0.22
period = 5.
time_vec = np.arange(0, 20, time_step)
sig = np.sin(2 * np.pi / period * time_vec) + 0.5 * np.random.randn(time_vec.size)
from scipy import fftpack
#print(sig.size)
sample_freq = fftpack.fftfreq(sig.size, d=time_step)
sig_fft = fftpack.fft(sig)
pidxs = np.where(sample_freq > 0)
freqs, power = sample_freq[pidxs], np.abs(sig_fft)[pidxs]
freq = freqs[power.argmax()]
print("Determined frequency:",freq)
sig_fft[np.abs(sample_freq) > freq] = 0
main_sig = fftpack.ifft(sig_fft)#Discrete inverse Fourier transform
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax1.plot(time_vec,sig)
ax1.set_title('Signal')
ax2 = fig.add_subplot(312)
ax2.plot(freqs, power)
ax2.set_xlabel('Frequency [Hz]')
ax2.set_ylabel('power')
ax2.set_title('Peak frequency')
ax3 = fig.add_subplot(313)
ax3.plot(time_vec,main_sig)
ax1.set_title('Cleaned signal')
```
#### scipy.optimize: Local and global optimization, fitting and root finding
In the statistics chapter we use this package for line fitting. We also have a self standing optimization chapter where we will get back to this module.
We estimated the parameters of a function by performing an error minimization. An optimization problem complexity is dependent on several factors:
- Do you intend a local or a global optimization?
- Is the function linear or nonlinear?
- Is the function convex or not?
- Can a gradient be computed?
- Can the Hessian matrix be computed?
- Do we perform optimization under constraints?
Scipy does not cover all solvers efficiently but there are several Python packages specialized for certain classes of optimization problems. In general though heavy optimization is solved with dedicated programs, many of whom have language bindings for Python.
To exemplify, we use Newton's optimization to find the minima of a nonlinear function. (also covered in the optimization chapter)
```
import numpy as np
import scipy.optimize as optimize
def f(x): # The rosenbrock function
return .5*(1 - x[0])**2 + (x[1] - x[0]**2)**2
def fprime(x):
return np.array((-2*.5*(1 - x[0]) - 4*x[0]*(x[1] - x[0]**2), 2*(x[1] - x[0]**2)))
print optimize.fmin_ncg(f, [2, 2], fprime=fprime)
def hessian(x): # Computed with sympy
return np.array(((1 - 4*x[1] + 12*x[0]**2, -4*x[0]), (-4*x[0], 2)))
print optimize.fmin_ncg(f, [2, 2], fprime=fprime, fhess=hessian)
%matplotlib inline
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
Z = .5*(1 - X)**2 + (Y - X**2)**2
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_zlim(-1000.01, 1000.01)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
```
#### scipy.interpolate: Cubic interpolation
Interpolation is useful when we have sampled a function but want to approximate its values on different points. A well known class of interpolation functions are the splines, most commonly three spline curves are combined in order to interpolate a smooth curved line between two datapoints.
```
%matplotlib inline
import numpy as np
from scipy.interpolate import interp1d
import pylab as pl
measured_time = np.linspace(0, 1, 10)
noise = 0.1 * np.random.randn(10)
measures = np.sin(2 * np.pi * measured_time) + noise
linear_interp = interp1d(measured_time, measures)
computed_time = np.linspace(0, 1, 50)
linear_results = linear_interp(computed_time)
cubic_interp = interp1d(measured_time, measures, kind='cubic')
cubic_results = cubic_interp(computed_time)
pl.plot(measured_time, measures, 'o', ms=6, label='measures')
pl.plot(computed_time, linear_results, label='linear interp')
pl.plot(computed_time, cubic_results, label='cubic interp')
pl.legend()
```
#### scipy.integrate: Integration and ODE solvers
This submodule is useful for summing up function values over intervals (integration) and solving ordinary differential equations. Partial differential equations are not covered and require other Python packages. As a quick example, we solve a case of Michaelis-Menten enzime kinetics.
```
%matplotlib inline
#from scipy import *
import scipy.integrate as integrate
'''
Slightly modified from a sample code generated by this program,
that formulates a solver for different cases of enzime reactions:
http://code.google.com/p/kinpy/
## Reaction ##
#Michaelis-Menten enzyme kinetics.
E + S <-> ES
ES <-> E + P
## Mapping ##
E 0 -1*v_0(y[1], y[0], y[2]) +1*v_1(y[3], y[0], y[2])
S 1 -1*v_0(y[1], y[0], y[2])
ES 2 +1*v_0(y[1], y[0], y[2]) -1*v_1(y[3], y[0], y[2])
P 3 +1*v_1(y[3], y[0], y[2])
'''
dy = lambda y, t: array([\
-1*v_0(y[1], y[0], y[2]) +1*v_1(y[3], y[0], y[2]),\
-1*v_0(y[1], y[0], y[2]),\
+1*v_0(y[1], y[0], y[2]) -1*v_1(y[3], y[0], y[2]),\
+1*v_1(y[3], y[0], y[2])\
])
#Initial concentrations:
y0 = array([\
#E
0.6,\
#S
1.2,\
#ES
3.0,\
#P
0.2,\
])
#E + S <-> ES
v_0 = lambda S, E, ES : k0 * E**1 * S**1 - k0r * ES**1
k0 = 1.2
k0r = 1.5
#ES <-> E + P
v_1 = lambda P, E, ES : k1 * ES**1 - k1r * E**1 * P**1
k1 = 0.9
k1r = 1.9
t = arange(0, 10, 0.01)
Y = integrate.odeint(dy, y0, t)
import pylab as pl
pl.plot(t, Y, label='y')
```
#### [scipy.ndimage](https://docs.scipy.org/doc/scipy/reference/ndimage.html) - Image processing
This module is useful for containing functions for multidimensional image manipulation. It mainly contains filters, interpolation and morphology functions.
- https://scipy-lectures.org/advanced/image_processing/index.html
- https://scipy-lectures.org/packages/scikit-image/index.html
```
%matplotlib inline
import numpy as np
from scipy import ndimage
from scipy import misc
import matplotlib.pyplot as plt
import pylab as pl
koon = misc.face(gray=True)
#from scipy import misc
#face = misc.face(gray=True)
plt.imshow(koon)
plt.show()
blurred_koon = ndimage.gaussian_filter(koon, sigma=5)
plt.imshow(blurred_koon)
plt.show()
noisy_koon = np.copy(koon).astype(np.float)
noisy_koon += koon.std()*np.random.standard_normal(koon.shape)
plt.imshow(noisy_koon)
plt.show()
from scipy import signal
wiener_koon = signal.wiener(blurred_koon, (5,5))
plt.imshow(wiener_koon)
plt.show()
```
### Sympy
Symbolic math is sometimes important, especially if we are weak at calculus or if we need to perform automated calculus on long formulas. We are briefly going through a few test cases, to get the feel of it. Symbolic math is especially developed for [Mathematica](http://www.wolfram.com/mathematica/?source=nav), or [Sage](http://www.sagemath.org/) which is an open-source equivalent.
```
import sympy
print sympy.sqrt(8)
import math
print math.sqrt(8)
from sympy import symbols
x, y, z, t = symbols('x y z t')
expr = x + 2*y
print expr
print x * expr
from sympy import expand, factor, simplify
expanded_expr = expand(x*expr)
print expanded_expr
print factor(expanded_expr)
exp = expanded_expr.subs(x, z**t)
print exp
print simplify(exp)
```
In the scipy.optimize paragraph we needed the Hessian matrix for a function f. Here is how you can obtain it in sympy:
```
import sympy
x, y = sympy.symbols('x y')
f = .5*(1 - x)**2 + (y - x**2)**2
h = sympy.hessian(f, [x,y])
print(h)
from IPython.display import Latex
Latex(sympy.latex(h))
from IPython.display import HTML
HTML('<iframe src=http://en.wikipedia.org/wiki/Hessian_matrix width=700 height=350></iframe>')
```
| github_jupyter |
# X to Autosome Ratio
Here I perform a preliminary dosage compensation experiment in testis. This is a very rough preliminary analysis, but I have played with lots of different normalizations and ways of looking at things and the results are consistent. There appears to be attenuated dosage compensation in germ cells.
```
# %load ../start.py
# Load useful extensions
# Activate the autoreload extension for easy reloading of external packages
%reload_ext autoreload
%autoreload 2
# Trun on the water mark
%reload_ext watermark
%watermark -u -d -g
# Load ipycache extension
%reload_ext ipycache
from ipycache import CacheMagics
CacheMagics.cachedir = '../cachedir'
# The usual suspects
import os
import numpy as np
import pandas as pd
# plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set_style('white')
sns.set_context('notebook')
sns.mpl.rc("figure", figsize=(10, 10))
# Turn off scientific notation
np.set_printoptions(precision=5, suppress=True)
# Get count matrix
testis = pd.read_csv('../../output/single_cell_matrix_testis.tsv', sep='\t', index_col=0)
te = testis.drop('gene_symbol', axis=1).T
print(te.shape)
te.head()
# Get clusters information
clusters = pd.read_csv('../../output/testis_clusters.tsv', sep='\t', header=None)
clusters.columns = ['cell', 'cluster']
clusters['cell_id'] = clusters.cell.apply(lambda x: '{}-1'.format(x))
clusters.drop('cell', axis=1, inplace=True)
print(clusters.shape)
clusters.head()
# Make look up tables
fbgn2symbol = testis['gene_symbol'].to_dict()
symbol2fbgn = {}
for k, v in fbgn2symbol.items():
symbol2fbgn[v] = k
# Genes need to be expressed in 1/3 of samples.
expressed = (te > 0).sum() > te.shape[0] / 3
# make fbgn2chrom
REF = os.environ['REFERENCES_DIR']
CHROMS = ['chrX', 'chr2L', 'chr2R', 'chr3L', 'chr3R', 'chr4']
genes = []
with open(os.path.join(REF, 'dmel/r6-11/gtf/dmel_r6-11.gtf')) as fh:
for row in fh:
rows = row.strip().split()
if len(rows) == 0:
continue
if rows[2] == 'gene':
genes.append((rows[0], rows[9].replace('"', '').replace(';', '')))
fbgn2chrom = pd.DataFrame(genes, columns=['chrom', 'FBgn'])
fbgn2chrom.set_index('FBgn', inplace=True)
fbgn2chrom = fbgn2chrom[fbgn2chrom['chrom'].isin(CHROMS)]
# germ cells vasa positive
germ = te[te[symbol2fbgn['vas']] > 0].T
# The germ cells belog to cluster 3
cluster3 = te[te.index.isin(clusters[clusters.cluster == 3].index)].T
# Cluster 4 also looks germ cell like, erika thinks they are older spermatocytes
cluster4 = te[te.index.isin(clusters[clusters.cluster == 4].index)].T
# somatic cells
soma = te[te[symbol2fbgn['dsx']] > 0].T
# functions
def xtoa(df):
# normalize
def libnorm(df):
df2 = df[expressed].copy()
return np.log2(df2 / df2.sum() * 10**4 + 1)
df_rpm = libnorm(df)
# Aggregate across samples
def agg_samples(df, aggfunc=np.median):
med = df.apply(aggfunc, axis=1).to_frame()
merged = med.join(fbgn2chrom, how='inner').set_index('chrom', append=True)
merged.sort_index(inplace=True)
merged.columns = ['med_rpm']
return merged
df_rpm_smed = agg_samples(df_rpm, aggfunc=np.median)
# Center samples using X chrom
def median_center(df):
return df - df.loc[(slice(None), 'chrX'), :].median()
return median_center(df_rpm_smed)
def plot_xtoa(df, ax, name):
sns.boxplot(df.index.get_level_values(-1),
df.med_rpm, notch=True,
ax=ax, order=CHROMS, fliersize=0)
ax.set_title(name)
ax.set_ylim(None, 3)
ax.axhline(0, ls=':')
ax.set_ylabel('Log 2 Normalized Read Per Million\n(chrX Median Centered)')
germ = te[te[symbol2fbgn['vas']] > 0].T
soma = te[te[symbol2fbgn['dsx']] > 0].T
cls = [
('Germ Cells (n={})'.format(germ.shape[1]), xtoa(germ)),
('Somatic Cells (n={})'.format(soma.shape[1]), xtoa(soma))
]
for i, dd in clusters.groupby('cluster'):
cells = dd.cell_id.unique().tolist()
cls.append(('Cluster {} (n={})'.format(i, dd.shape[0]), xtoa(te[te.index.isin(cells)].T)))
# Boxplots
fig, axes = plt.subplots(5, 2, figsize=plt.figaspect(2)*2, sharey=True)
for (name, df), ax in zip(cls, axes.flatten()):
plot_xtoa(df, ax, name)
for ax in axes[:, 1]:
ax.set_ylabel('')
plt.tight_layout()
te.head()
merged = clusters.set_index('cell_id').join(te)
with sns.color_palette('hls', n_colors=10):
for clus, df in merged.groupby('cluster'):
sns.distplot(np.log10(df.drop('cluster', axis=1).T.sum()+1), label='cluster_{}'.format(clus), hist=False)
with sns.color_palette('hls', n_colors=10):
for clus, df in merged.groupby('cluster'):
ddf = df.drop('cluster', axis=1).T
mddf = ddf.join(fbgn2chrom)
mddf = mddf[mddf.chrom == 'chrX'].drop('chrom', axis=1)
pddf = mddf.sum()
sns.distplot(pddf, label='cluster_{}'.format(clus), hist=False)
```
| github_jupyter |
## Types
Now we'll build up the Hail interface from the ground up starting with types.
```
import hail as hl
hl.init()
```
The Hail interface is **statically** typed. That means each expression has a type, and that type constriants the set of values that expression can produce.
What are the Hail types?
There are three kinds of types: primitive types, container types, and domain-specific types.
The primitive types are:
- [int32](https://hail.is/docs/devel/types.html#hail.expr.types.tint32)
- [int64](https://hail.is/docs/devel/types.html#hail.expr.types.tint64)
- [float32](https://hail.is/docs/devel/types.html#hail.expr.types.tfloat32)
- [float64](https://hail.is/docs/devel/types.html#hail.expr.types.tfloat64)
- [bool](https://hail.is/docs/devel/types.html#hail.expr.types.tbool)
- [str](https://hail.is/docs/devel/types.html#hail.expr.types.tstr)
The container types are:
- [array](https://hail.is/docs/devel/types.html#hail.expr.types.tarray)
- [set](https://hail.is/docs/devel/types.html#hail.expr.types.tset)
- [dict](https://hail.is/docs/devel/types.html#hail.expr.types.tdict)
- [tuple](https://hail.is/docs/devel/types.html#hail.expr.types.ttuple)
- [struct](https://hail.is/docs/devel/types.html#hail.expr.types.tstruct)
- [interval](https://hail.is/docs/devel/types.html#hail.expr.types.tinterval)
The domain-specific types are:
- [locus](https://hail.is/docs/devel/types.html#hail.expr.types.tlocus)
- [call](https://hail.is/docs/devel/types.html#hail.expr.types.tcall)
Hail types are usually printed as above, but when accessing them in the module, they all start with `t`:
```
hl.tint32
hl.tdict(hl.tstr, hl.tarray(hl.tint32))
```
If you prefer the strings, you can parse them with [dtype](https://hail.is/docs/devel/types.html#hail.expr.types.dtype).
```
hl.dtype('dict<str, array<int32>>')
```
In general, you won't need to mention types explicitly, but there are a few cases:
- To specify column types in [import_table](https://hail.is/docs/devel/methods/impex.html#hail.methods.import_table), (although this function can also impute types automatically).
- When converting a Python value to a Hail expression with [literal](https://hail.is/docs/devel/functions/core.html#hail.expr.functions.literal) (although again the type can often be determined automatically).
- A few constructor functions, like [null](https://hail.is/docs/devel/functions/core.html#hail.expr.functions.null).
## Expression types
Each `Expression` has a type that can be accessed with the attribute `dtype`.
```
e = hl.dict({'a': 5, 'b': 7})
e
e.dtype
```
If the rules for computing the type of an expression are violated, Hail with throw a type error. For example, the types of the branches of a conditional must be the same. The Hail conditional function is called `cond`.
```
x = hl.int32(10)
y = hl.int32(20)
try:
hl.cond(x < y,
5,
'foo')
except Exception as e:
print(f'ERROR: {e}')
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
```
# Wiki40B 言語モデル
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/hub/tutorials/wiki40b_lm"><img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.orgで表示</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/hub/tutorials/wiki40b_lm.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/hub/tutorials/wiki40b_lm.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/hub/tutorials/wiki40b_lm.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード/a0}</a></td>
<td><a href="https://tfhub.dev/google/collections/wiki40b-lm/1"><img src="https://www.tensorflow.org/images/hub_logo_32px.png">TF Hub モデルを見る</a></td>
</table>
<a>TensorFlow Hub</a> の <strong>Wiki40B 言語モデル</strong>を使用して、ウィキペディアのようなテキストを生成しましょう!
このノートブックでは、次の項目を行う方法を説明します。
- TF-Hub の [Wiki40b-LM コレクション](https://tfhub.dev/google/collections/wiki40b-lm/1)の一部である単一言語モデル 41 個と多言語モデル 2 個を読み込みます。
- モデルを使用して、特定のテキストピースの複雑さ、レイヤーごとのアクティベーション、および単語埋め込みを取得します。
- シードテキストのピースからトークンごとにテキストを生成します。
言語モデルは、TensorFlow Datasets で新たに公開された、クリーンアップ済みの [Wiki40B データセット](https://www.tensorflow.org/datasets/catalog/wiki40b)でトレーニングされています。トレーニングは、論文「[Wiki-40B: Multilingual Language Model Dataset](https://research.google/pubs/pub49029/)」に基づいてセットアップされています。
## セットアップ
```
#@title Installing Dependencies
!pip install --quiet tensorflow_text
#@title Imports
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
import tensorflow_text as tf_text
tf.disable_eager_execution()
tf.logging.set_verbosity(tf.logging.WARN)
```
## 言語を選択する
TF-Hub から読み込む**言語モデル**と、生成される**テキストの長さ**を選択しましょう。
```
#@title { run: "auto" }
language = "en" #@param ["en", "ar", "zh-cn", "zh-tw", "nl", "fr", "de", "it", "ja", "ko", "pl", "pt", "ru", "es", "th", "tr", "bg", "ca", "cs", "da", "el", "et", "fa", "fi", "he", "hi", "hr", "hu", "id", "lt", "lv", "ms", "no", "ro", "sk", "sl", "sr", "sv", "tl", "uk", "vi", "multilingual-64k", "multilingual-128k"]
hub_module = "https://tfhub.dev/google/wiki40b-lm-{}/1".format(language)
max_gen_len = 20 #@param
print("Using the {} model to generate sequences of max length {}.".format(hub_module, max_gen_len))
```
## モデルを構築する
使用するトレーニング済みのモデルの構成が完了したので、最大 `max_gen_len` のテキストを生成するように構成することにしましょう。TF-Hub から言語モデルを読み込み、スターターテキストピースをフィードし、生成されるたびにインタラクティブにトークンをフィードする必要があります。
```
#@title Load the language model pieces
g = tf.Graph()
n_layer = 12
model_dim = 768
with g.as_default():
text = tf.placeholder(dtype=tf.string, shape=(1,))
# Load the pretrained model from TF-Hub
module = hub.Module(hub_module)
# Get the word embeddings, activations at each layer, negative log likelihood
# of the text, and calculate the perplexity.
embeddings = module(dict(text=text), signature="word_embeddings", as_dict=True)["word_embeddings"]
activations = module(dict(text=text), signature="activations", as_dict=True)["activations"]
neg_log_likelihood = module(dict(text=text), signature="neg_log_likelihood", as_dict=True)["neg_log_likelihood"]
ppl = tf.exp(tf.reduce_mean(neg_log_likelihood, axis=1))
#@title Construct the per-token generation graph
def feedforward_step(module, inputs, mems):
"""Generate one step."""
# Set up the input dict for one step of generation
inputs = tf.dtypes.cast(inputs, tf.int64)
generation_input_dict = dict(input_tokens=inputs)
mems_dict = {"mem_{}".format(i): mems[i] for i in range(n_layer)}
generation_input_dict.update(mems_dict)
# Generate the tokens from the language model
generation_outputs = module(generation_input_dict, signature="prediction", as_dict=True)
# Get the probablities and the inputs for the next steps
probs = generation_outputs["probs"]
new_mems = [generation_outputs["new_mem_{}".format(i)] for i in range(n_layer)]
return probs, new_mems
#@title Build the statically unrolled graph for `max_gen_len` tokens
with g.as_default():
# Tokenization with the sentencepiece model.
token_ids = module(dict(text=text), signature="tokenization", as_dict=True)["token_ids"]
inputs_np = token_ids
# Generate text by statically unrolling the computational graph
mems_np = [np.zeros([1, 0, model_dim], dtype=np.float32) for _ in range(n_layer)]
# Generate up to `max_gen_len` tokens
sampled_ids = []
for step in range(max_gen_len):
probs, mems_np = feedforward_step(module, inputs_np, mems_np)
sampled_id = tf.random.categorical(tf.math.log(probs[0]), num_samples=1, dtype=tf.int32)
sampled_id = tf.squeeze(sampled_id)
sampled_ids.append(sampled_id)
inputs_np = tf.reshape(sampled_id, [1, 1])
# Transform the ids into text
sampled_ids = tf.expand_dims(sampled_ids, axis=0)
generated_text = module(dict(token_ids=sampled_ids), signature="detokenization", as_dict=True)["text"]
init_op = tf.group([tf.global_variables_initializer(), tf.tables_initializer()])
```
## テキストを生成する
テキストを生成しましょう!言語モデルを促すテキストの `seed` を設定します。
**事前定義済みの**シードを使うか、*別のオプションとして***独自のシードを入力**することができます。このテキストは、言語モデルが次に何を生成するのかを促すための言語モデルのシードとして使用されます。
生成される記事の特別な部分の前に、次の特別なトークンを使用することができます。**`_START_ARTICLE_`** は、記事の始まりを示し、**`_START_SECTION_`** はセクションの始まりを示し、**`_START_PARAGRAPH_`** は記事のテキストを生成します。
```
#@title Predefined Seeds
lang_to_seed = {"en": "\n_START_ARTICLE_\n1882 Prince Edward Island general election\n_START_PARAGRAPH_\nThe 1882 Prince Edward Island election was held on May 8, 1882 to elect members of the House of Assembly of the province of Prince Edward Island, Canada.",
"ar": "\n_START_ARTICLE_\nأوليفيا كوك\n_START_SECTION_\nنشأتها والتعلي \n_START_PARAGRAPH_\nولدت أوليفيا كوك في أولدهام في مانشستر الكبرى لأسرة تتكون من أب يعمل كظابط شرطة، وأمها تعمل كممثلة مبيعات. عندما كانت صغيرة بدأت تأخذ دروساً في الباليه الجمباز. وفي المدرسة شاركت في المسرحيات المدرسية، إضافةً إلى عملها في مسرح سندريلا . وفي سن الرابعة عشر عاماً، حصلت على وكيلة لها في مانشستر وهي وقعت عقداً مع وكالة الفنانين المبدعين في مانشستر،",
"zh-cn": "\n_START_ARTICLE_\n上尾事件\n_START_SECTION_\n日本国铁劳资关系恶化\n_START_PARAGRAPH_\n由于日本国铁财政恶化,管理层开始重整人手安排,令工会及员工感到受威胁。但日本国铁作为公营企业,其雇员均受公营企业等劳资关系法规管——该法第17条规定公营企业员工不得发动任何罢工行为。为了规避该法例",
"zh-tw": "\n_START_ARTICLE_\n乌森\n_START_PARAGRAPH_\n烏森(法語:Houssen,發音:[usən];德語:Hausen;阿爾薩斯語:Hüse)是法國上萊茵省的一個市鎮,位於該省北部,屬於科爾馬-里博維萊區(Colmar-Ribeauvillé)第二科爾馬縣(Colmar-2)。該市鎮總面積6.7平方公里,2009年時的人口為",
"nl": "\n_START_ARTICLE_\n1001 vrouwen uit de Nederlandse geschiedenis\n_START_SECTION_\nSelectie van vrouwen\n_START_PARAGRAPH_\nDe 'oudste' biografie in het boek is gewijd aan de beschermheilige",
"fr": "\n_START_ARTICLE_\nꝹ\n_START_SECTION_\nUtilisation\n_START_PARAGRAPH_\nLe d insulaire est utilisé comme lettre additionnelle dans l’édition de 1941 du recueil de chroniques galloises Brut y Tywysogion",
"de": "\n_START_ARTICLE_\nÜnal Demirkıran\n_START_SECTION_\nLaufbahn\n_START_PARAGRAPH_\nDemirkıran debütierte als junges Talent am 25. September 1999 im Auswärtsspiel des SSV Ulm 1846 bei Werder Bremen (2:2) in der Bundesliga, als er kurz",
"it": "\n_START_ARTICLE_\n28th Street (linea IRT Lexington Avenue)\n_START_SECTION_\nStoria\n_START_PARAGRAPH_\nLa stazione, i cui lavori di costruzione ebbero inizio nel 1900, venne aperta il 27 ottobre 1904, come",
"ja": "\n_START_ARTICLE_\nしのぶ・まさみshow'05 恋してラララ\n_START_SECTION_\n概要\n_START_PARAGRAPH_\n『上海ルーキーSHOW』の打ち切り後に放送された年末特番で、同番組MCの大竹しのぶと久本雅美が恋愛にまつわるテーマでトークや音楽企画を展開していた。基本は女",
"ko": "\n_START_ARTICLE_\n녹턴, Op. 9 (쇼팽)\n_START_SECTION_\n녹턴 3번 나장조\n_START_PARAGRAPH_\n쇼팽의 녹턴 3번은 세도막 형식인 (A-B-A)형식을 취하고 있다. 첫 부분은 알레그레토(Allegretto)의 빠르기가 지시되어 있으며 물 흐르듯이 부드럽게 전개되나",
"pl": "\n_START_ARTICLE_\nAK-176\n_START_SECTION_\nHistoria\n_START_PARAGRAPH_\nPod koniec lat 60 XX w. w ZSRR dostrzeżono potrzebę posiadania lekkiej armaty uniwersalnej średniego kalibru o stosunkowo dużej mocy ogniowej, która",
"pt": "\n_START_ARTICLE_\nÁcido ribonucleico\n_START_SECTION_\nIntermediário da transferência de informação\n_START_PARAGRAPH_\nEm 1957 Elliot Volkin e Lawrence Astrachan fizeram uma observação significativa. Eles descobriram que uma das mais marcantes mudanças",
"ru": "\n_START_ARTICLE_\nАрнольд, Ремо\n_START_SECTION_\nКлубная карьера\n_START_PARAGRAPH_\nАрнольд перешёл в академию «Люцерна» в 12 лет. С 2014 года выступал за вторую команду, где провёл пятнадцать встреч. С сезона 2015/2016 находится в составе основной команды. 27 сентября 2015 года дебютировал",
"es": "\n_START_ARTICLE_\n(200012) 2007 LK20\n_START_SECTION_\nDesignación y nombre\n_START_PARAGRAPH_\nDesignado provisionalmente como 2007 LK20.\n_START_SECTION_\nCaracterísticas orbitales\n_START_PARAGRAPH_\n2007 LK20",
"th": "\n_START_ARTICLE_\nการนัดหยุดเรียนเพื่อภูมิอากาศ\n_START_SECTION_\nเกรียตา ทืนแบร์ย\n_START_PARAGRAPH_\nวันที่ 20 สิงหาคม 2561 เกรียตา ทืนแบร์ย นักกิจกรรมภูมิอากาศชาวสวีเดน ซึ่งขณะนั้นศึกษาอยู่ในชั้นเกรด 9 (เทียบเท่ามัธยมศึกษาปีที่ 3) ตัดสินใจไม่เข้าเรียนจนกระทั่งการเลือกตั้งทั่วไปในประเทศสวีเดนปี",
"tr": "\n_START_ARTICLE_\nİsrail'in Muhafazakar Dostları\n_START_SECTION_\nFaaliyetleri\n_START_PARAGRAPH_\nGrubun 2005 stratejisi ile aşağıdaki faaliyet alanları tespit edilmiştir:_NEWLINE_İsrail'i destekleme",
"bg": "\n_START_ARTICLE_\nАвтомобил с повишена проходимост\n_START_SECTION_\nОсобености на конструкцията\n_START_PARAGRAPH_\nВ исторически план леки автомобили с висока проходимост се произвеждат и имат военно",
"ca": "\n_START_ARTICLE_\nAuchy-la-Montagne\n_START_SECTION_\nPoblació\n_START_PARAGRAPH_\nEl 2007 la població de fet d'Auchy-la-Montagne era de 469 persones. Hi havia 160 famílies de les quals 28",
"cs": "\n_START_ARTICLE_\nŘemeslo\n_START_PARAGRAPH_\nŘemeslo je určitý druh manuální dovednosti, provozovaný za účelem obživy, resp. vytváření zisku. Pro řemeslné práce je charakteristický vysoký podíl ruční práce, spojený s používáním specializovaných nástrojů a pomůcek. Řemeslné práce",
"da": "\n_START_ARTICLE_\nÖrenäs slot\n_START_PARAGRAPH_\nÖrenäs slot (svensk: Örenäs slott) er et slot nær Glumslöv i Landskrona stad tæt på Øresunds-kysten i Skåne i Sverige._NEWLINE_Örenäs ligger",
"el": "\n_START_ARTICLE_\nΆλβαρο Ρεκόμπα\n_START_SECTION_\nΒιογραφικά στοιχεία\n_START_PARAGRAPH_\nΟ Άλβαρο Ρεκόμπα γεννήθηκε στις 17 Μαρτίου 1976 στο Μοντεβίδεο της Ουρουγουάης από",
"et": "\n_START_ARTICLE_\nAus deutscher Geistesarbeit\n_START_PARAGRAPH_\nAus deutscher Geistesarbeit (alapealkiri Wochenblatt für wissenschaftliche und kulturelle Fragen der Gegenwart) oli ajakiri, mis 1924–1934 ilmus Tallinnas. Ajakirja andis 1932–1934",
"fa": "\n_START_ARTICLE_\nتفسیر بغوی\n_START_PARAGRAPH_\nایرانی حسین بن مسعود بغوی است. این کتاب خلاصه ای از تفسیر الکشف و البیان عن تفسیر القرآن ابواسحاق احمد ثعلبی میباشد. این کتاب در ۴ جلد موجود میباش",
"fi": "\n_START_ARTICLE_\nBovesin verilöyly\n_START_SECTION_\nVerilöyly\n_START_PARAGRAPH_\n19. syyskuuta 1943 partisaaniryhmä saapui Bovesiin tarkoituksenaan ostaa leipää kylästä. Kylässä sattui olemaan kaksi SS-miestä, jotka",
"he": "\n_START_ARTICLE_\nאוגדה 85\n_START_SECTION_\nהיסטוריה\n_START_PARAGRAPH_\nהאוגדה הוקמה בהתחלה כמשלט העמקים בשנות השבעים. בשנות השמונים הפכה להיות אוגדה מרחבית עם שתי",
"hi": "\n_START_ARTICLE_\nऑडी\n_START_SECTION_\nऑडी इंडिया\n_START_PARAGRAPH_\nऑडी इंडिया की स्थापना मार्च 2007 में फोक्सवैगन ग्रुप सेल्स इंडिया के एक विभाजन के रूप में की गई थी। दुनिया भर में 110",
"hr": "\n_START_ARTICLE_\nČimariko (jezična porodica)\n_START_PARAGRAPH_\nChimarikan.-porodica sjevernoameričkih indijanskih jezika koja prema Powersu obuhvaća jezike Indijanaca Chimariko (Chemaŕeko) sa rijeke Trinity i Chimalakwe",
"hu": "\n_START_ARTICLE_\nÁllami Politikai Igazgatóság\n_START_PARAGRAPH_\nAz Állami Politikai Igazgatóság (rövidítve: GPU, oroszul: Государственное политическое управление), majd később Egyesített Állami Politikai Igazgatóság Szovjet-Oroszország",
"id": "\n_START_ARTICLE_\n(257195) 2008 QY41\n_START_SECTION_\nPembentukan\n_START_PARAGRAPH_\nSeperti asteroid secara keseluruhan, asteroid ini terbentuk dari nebula matahari primordial sebagai pecahan planetisimal, sesuatu di",
"lt": "\n_START_ARTICLE_\nŠavijos–Uardigo regionas\n_START_SECTION_\nGeografija\n_START_PARAGRAPH_\nŠavijos-Uardigo regionas yra Atlanto vandenynu pakrantės lygumoje",
"lv": "\n_START_ARTICLE_\nApatīts\n_START_SECTION_\nĪpašības\n_START_PARAGRAPH_\nApatīta kopējā ķīmiskā formula ir Ca₁₀(PO₄)₆(OH,F,Cl)₂, ir trīs atšķirīgi apatīta veidi: apatīts: Ca₁₀(PO₄)₆(OH)₂, fluorapatīts Ca₁₀(PO₄)₆(F)₂ un hlorapatīts: Ca₁₀(PO₄)₆(Cl)₂. Pēc sastāva",
"ms": "\n_START_ARTICLE_\nEdward C. Prescott\n_START_PARAGRAPH_\nEdward Christian Prescott (lahir 26 Disember 1940) ialah seorang ahli ekonomi Amerika. Beliau menerima Hadiah Peringatan Nobel dalam Sains Ekonomi pada tahun 2004, berkongsi",
"no": "\n_START_ARTICLE_\nAl-Minya\n_START_SECTION_\nEtymologi\n_START_PARAGRAPH_\nDet er sprikende forklaringer på bynavnet. Det kan komme fra gammelegyptisk Men'at Khufu, i betydning byen hvor Khufu ble ammet, noe som knytter byen til farao Khufu (Keops), som",
"ro": "\n_START_ARTICLE_\nDealurile Cernăuțiului\n_START_PARAGRAPH_\nDealurile Cernăuțiului sunt un lanț deluros striat, care se întinde în partea centrală a interfluviului dintre Prut și Siret, în cadrul regiunii Cernăuți din",
"sk": "\n_START_ARTICLE_\n10. peruť RAAF\n_START_PARAGRAPH_\n10. peruť RAAF je námorná hliadkovacia peruť kráľovských austrálskych vzdušných síl (Royal Australian Air Force – RAAF) založená na základni Edinburgh v Južnej Austrálii ako súčasť 92",
"sl": "\n_START_ARTICLE_\n105 Artemida\n_START_SECTION_\nOdkritje\n_START_PARAGRAPH_\nAsteroid je 16. septembra 1868 odkril James Craig Watson (1838 – 1880). Poimenovan je po Artemidi, boginji Lune iz grške",
"sr": "\n_START_ARTICLE_\nЉанос Морелос 1. Сексион (Истапангахоја)\n_START_SECTION_\nСтановништво\n_START_PARAGRAPH_\nПрема подацима из 2010. године у насељу је живело 212",
"sv": "\n_START_ARTICLE_\nÖstra Torps landskommun\n_START_SECTION_\nAdministrativ historik\n_START_PARAGRAPH_\nKommunen bildades i Östra Torps socken i Vemmenhögs härad i Skåne när 1862 års kommunalförordningar trädde i kraft. _NEWLINE_Vid kommunreformen",
"tl": "\n_START_ARTICLE_\nBésame Mucho\n_START_PARAGRAPH_\nAng Bésame Mucho ay isang awit na nasa Kastila. Isinulat ito ng Mehikanang si Consuelo Velázquez noong 1940, bago sumapit ang kanyang ika-16 na",
"uk": "\n_START_ARTICLE_\nІслам та інші релігії\n_START_PARAGRAPH_\nПротягом багатовікової ісламської історії мусульманські правителі, ісламські вчені і звичайні мусульмани вступали у різні відносини з представниками інших релігій. Стиль цих",
"vi": "\n_START_ARTICLE_\nĐường tỉnh 316\n_START_PARAGRAPH_\nĐường tỉnh 316 hay tỉnh lộ 316, viết tắt ĐT316 hay TL316, là đường tỉnh ở các huyện Thanh Sơn, Thanh Thủy, Tam Nông tỉnh Phú Thọ ._NEWLINE_ĐT316 bắt đầu từ xã Tinh Nhuệ",
"multilingual-64k": "\n_START_ARTICLE_\n1882 Prince Edward Island general election\n_START_PARAGRAPH_\nThe 1882 Prince Edward Island election was held on May 8, 1882 to elect members of the House of Assembly of the province of Prince Edward Island, Canada.",
"multilingual-128k": "\n_START_ARTICLE_\n1882 Prince Edward Island general election\n_START_PARAGRAPH_\nThe 1882 Prince Edward Island election was held on May 8, 1882 to elect members of the House of Assembly of the province of Prince Edward Island, Canada."}
seed = lang_to_seed[language]
#@title Enter your own seed (Optional).
user_seed = "" #@param { type: "string" }
if user_seed.strip():
seed = user_seed.strip()
# The seed must start with "_START_ARTICLE_" or the generated text will be gibberish
START_ARTICLE = "_START_ARTICLE_"
if START_ARTICLE not in seed:
seed = "\n{}\n{}".format(START_ARTICLE, seed)
print("Generating text from seed:\n{}".format(seed))
#@title Initialize session.
with tf.Session(graph=g).as_default() as session:
session.run(init_op)
#@title Generate text
with session.as_default():
results = session.run([embeddings, neg_log_likelihood, ppl, activations, token_ids, generated_text], feed_dict={text: [seed]})
embeddings_result, neg_log_likelihood_result, ppl_result, activations_result, token_ids_result, generated_text_result = results
generated_text_output = generated_text_result[0].decode('utf-8')
print(generated_text_output)
```
また、モデルのほかの出力を見ることも可能です。複雑さ、トークン ID、中間アクティベーション、および埋め込みがあります。
```
ppl_result
token_ids_result
activations_result.shape
embeddings_result
```
| github_jupyter |
<a href="https://colab.research.google.com/github/gracecarrillo/Political-Data-Science/blob/master/Feature_Engineering_Sentiment_Analysis_Scotref2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Scottish independence: Twitter data Sentiment Analysis
## 4. Feature Engineering
- Sentiment Score with Vader
- Part of Speech Tags (POS)
```
# Must be upgraded
!pip install tqdm==4.36.1 --upgrade
!pip install --upgrade gensim
!pip install vaderSentiment
from google.colab import drive
drive.mount('/content/drive')
# general
import os
import pandas as pd
import numpy as np
import csv
import string
import matplotlib.pyplot as plt
import seaborn as sns
import random
import itertools
import collections
from collections import Counter
# tweets
import tweepy as tw
import re
from collections import Counter
from string import punctuation
from tweepy import OAuthHandler
import json
# text manipulation
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from nltk.stem.porter import *
# plots
from wordcloud import WordCloud
import plotly
import chart_studio.plotly as py
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import cufflinks as cf
cf.go_offline()
# Feature Engineering
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# Machine Learning
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.model_selection import KFold, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer, CountVectorizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
# For geoplots
from IPython.display import IFrame
import folium
from folium import plugins
from folium.plugins import MarkerCluster, FastMarkerCluster, HeatMapWithTime
import networkx
# hide warnings
import warnings
warnings.filterwarnings("ignore")
# set plot preferences
plt.style.use(style='ggplot')
plt.rcParams['figure.figsize'] = (10, 6)
pd.set_option("display.max_colwidth", 200)
print('Libraries imported')
%matplotlib inline
```
## 4. Feature Engineering
To analyse a preprocessed data, it needs to be converted into features. Depending upon the usage, text features can be constructed using assorted techniques like Bag of Words, TF-IDF, and Word Embeddings.
A basic approach to Bag of Words will not be able to capture the difference between “I like you”, where “like” is a verb with a positive sentiment, and “I am like you”, where “like” is a preposition with a neutral sentiment.
To improve this technique we'll extract features using Vader's Polarity Scores and Part of Speech (POS) tags.
### 4.1 Sentiment Score with Vader
Vader sentiment analysis tool belongs to a type of sentiment analysis that is based on lexicons of sentiment-related words. It uses a bag of words approach (a lookup table of positive and negative words but in this approach, each of the words in the lexicon is rated as to whether it is positive or negative, and in many cases, how positive or negative.
VADER produces four sentiment metrics. The first three, positive, neutral and negative which is self explanatory. The final metric, the compound score, is the sum of all of the lexicon ratings, which are then standardised to range between -1 and 1.
For the compound score:
- positive sentiment : (compound score >= 0.05)
- neutral sentiment : (compound score > -0.05) and (compound score < 0.05)
- negative sentiment : (compound score <= -0.05)
We'll use these scores to create features based on the sentiment metrics of our tweets, which will then be used as adittional features for modeling.
```
# Load data set
train = pd.read_csv('/content/drive/My Drive/Twitter_Project/cleaned_train_data.csv')
train.info()
train.dropna(subset=['tidy_tweet'], inplace=True)
analyser = SentimentIntensityAnalyzer()
def polarity_scores_all(tweet):
'''
Takes string of text to:
1. Gets sentiment metrics
2. Returns negative, neutral, positive
and compound scores as lists.
'''
neg, neu, pos, compound = [], [], [], []
analyser = SentimentIntensityAnalyzer()
for text in tweet:
dict_ = analyser.polarity_scores(text)
neg.append(dict_['neg'])
neu.append(dict_['neu'])
pos.append(dict_['pos'])
compound.append(dict_['compound'])
return neg, neu, pos, compound
all_scores = polarity_scores_all(train.tidy_tweet.values)
train['neg_scores'] = all_scores[0]
train['neu_scores'] = all_scores[1]
train['pos_scores'] = all_scores[2]
train['compound_scores'] = all_scores[3]
train.head(4)
```
### 4.1 Part of Speech Tags (POS)
Part of Speech tagging (POS) is where a part of speech is assigned to each word in a list using context clues. This is useful because the same word with a different part of speech can have two completely different meanings. Is the process of marking up a word in a corpus to a corresponding part of a speech tag, based on its context and definition. This task is not straightforward, as a particular word may have a different part of speech based on the context in which the word is used.
We'll use a simple lexical based method that assigns the POS tag to the most frequently occurring word in the training corpus and add the tags as features in our model.
```
nltk.download('averaged_perceptron_tagger')
# To transform pos tags to readable tags
pos_family = {
'NOUN' : ['NN','NNS','NNP'], # Removed 'NNPS'
'PRON' : ['PRP','PRP$','WP','WP$'],
'VERB' : ['VB','VBD','VBG','VBN','VBP','VBZ'],
'ADJ' : ['JJ','JJR','JJS'],
'ADV' : ['RB','RBR','RBS','WRB']
}
def count_pos_tag(tweets):
'''
Takes string of text to:
1. Processes text and attaches POS tags
2. Input the dictionary of POS tags into a Counter.
2. Returns list of POS tags with occurrence number '''
total_count = []
for s in tweets:
partial_count = {}
s = s.split()
count_pos = Counter(dict(nltk.pos_tag(s)).values())
for item, value in count_pos.items():
partial_count[item] = partial_count.get(item, 0) + 1
total_count.append(partial_count)
return total_count
# Retrieve POS tags with occurrence
total_count = count_pos_tag(train.tidy_tweet.values)
# As dataframe
pos_df = pd.DataFrame(total_count)
# Remove unwanted characters
pos_df = pos_df.drop(['$', 'IN'], axis = 1) #drop '$' if needed
# Inspection
pos_df.columns
# Change tags to readable tags
pos_df['NOUN'] = pos_df[pos_family['NOUN']].sum(axis=1)
pos_df['PRON'] = pos_df[pos_family['PRON']].sum(axis=1)
pos_df['VERB'] = pos_df[pos_family['VERB']].sum(axis=1)
pos_df['ADJ'] = pos_df[pos_family['ADJ']].sum(axis=1)
pos_df['ADV'] = pos_df[pos_family['ADV']].sum(axis=1)
pos_df = pos_df[['NOUN', 'PRON', 'VERB', 'ADJ', 'ADV']]
# Add to end of original data set as new features
train = pd.concat([train, pos_df], axis = 1)
# Deal with NaN
train = train.fillna(value=0.0)
#train = train.fillna(value=0.0)
train.shape
# Remove duplicates
train.drop_duplicates(subset=['tidy_tweet'], inplace=True)
# Check new features
train.info()
train.head(5)
# Saving preprocessed dataset
train.to_csv('/content/drive/My Drive/Twitter_Project/feat_eng_train_data.csv', index=False)
```
| github_jupyter |
#Homework: Spark SQL
In this homework you will gain a mastery of using Spark SQL. The homework can be run locally or on an EMR cluster. The current version is for running locally.
The goal of the homework will be to create a training dataset for a Random Forest Machine learning model. The training data set will contain the monthly number of employees hired by companies in `linkedin.json` and their corresponding closing stock prices over a 10+ year period (1970-2018 `stock_prices.csv`). We will try and predict, based on this data, if the company will have a positive or negative growth in stock in the first quarter of the next year. Who's ready to make some money?
## Notes
Before we begin here are some important notes to keep in mind,
1. You are **required** to use Spark SQL queries to handle the data in the assignment. Mastering SQL is more beneficial than being able to use Spark commands (functions) as it will show up in more areas of programming and data science/analytics than just Spark. Use the following [function list](https://spark.apache.org/docs/latest/api/sql/index.html#) to see all the SQL functions avaliable in Spark.
2. There are portions of this homework that are _very_ challenging.
```
%%capture
!apt update
!apt install gcc python-dev libkrb5-dev
!pip install sparkmagic
!pip install pyspark
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.types import *
import pyspark.sql.functions as F
from pyspark.sql import SQLContext
import json
import urllib.request
from datetime import datetime
try:
if(spark == None):
spark = SparkSession.builder.appName('Graphs').getOrCreate()
sqlContext=SQLContext(spark)
except NameError:
spark = SparkSession.builder.appName('Graphs').getOrCreate()
sqlContext=SQLContext(spark)
from pyspark.sql.types import *
```
## Step 1: Data Cleaning and Shaping
When used for single machine like Colab, you should mount Google Drive to Colab and visit the data file locally.
If used remotely, please refer to the 'remote' version of the notebook. For remote version, the data you will use is stored in an S3 bucket, a cloud storage service. You now need to download it onto the nodes of your [EMR cluster](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-what-is-emr.html).
### Step 1.1: The Stupendous Schema
When loading data, Spark will try to infer the structure. This process is faulty because it will sometimes infer the type incorrectly. JSON documents, like the one we will use, can have nested types, such as: arrays, arrays of dictionaries, dictionaries of dictionaries, etc. Spark's ability to determine these nested types is not reliable, thus you will define a schema for `linkedin.json`.
A schema is a description of the structure of data. You will be defining an explicit schema for `linkedin.json`. In Spark, schemas are defined using a `StructType` object. This is a collection of data types, termed `StructField`s, that specify the structure and variable type of each component of the dataset. For example, suppose we have the following simple JSON object,
```
{
"student_name": "Leonardo Murri",
"GPA": 1.4,
"courses": [
{"department": "Computer and Information Science",
"course_id": "CIS 545",
"semester": "Fall 2018"},
{"department": "Computer and Information Science",
"course_id": "CIS 520",
"semester": "Fall 2018"},
{"department": "Electrical and Systems Engineering",
"course_id": "ESE 650",
"semester": "Spring 2018"}
],
"grad_year": 2019
}
```
We would define its schema as follows,
```
schema = StructType([
StructField("student_name", StringType(), nullable=True),
StructField("GPA", FloatType(), nullable=True),
StructField("courses", ArrayType(
StructType([
StructField("department", StringType(), nullable=True),
StructField("course_id", StringType(), nullable=True),
StructField("semester", StringType(), nullable=True)
])
), nullable=True),
StructField("grad_year", IntegerType(), nullable=True)
])
```
Each `StructField` has the following structure: `(name, type, nullable)`. The `nullable` flag defines that the specified field may be empty. Your first task is to define the `schema` of `linkedin.json`.
_Note_: In `linkedin.json` the field `specilities` is spelled incorrectly. This is **not** a typo.
```
# TODO: Define [linkedin.json] schema
# YOUR CODE HERE
```
### Step 1.2: The Laudable Loading
Load the `linkedin.json` dataset into a Spark dataframe (sdf) called `raw_data_sdf`. If you have constructed `schema` correctly `spark.read.json()` will read in the dataset. ***You do not need to edit this cell***.
```
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
raw_data_sdf = spark.read.json('/content/drive/My Drive/Colab Notebooks/test_data_10000.json', schema=schema)
raw_data_sdf.show(10)
raw_data_sdf.where(F.col('_id').isNull()).count()
```
The cell below shows how to run SQL commands on Spark tables. Use this as a template for all your SQL queries in this notebook. ***You do not need to edit this cell***.
```
# Create SQL-accesible table
raw_data_sdf.createOrReplaceTempView("raw_data")
# Declare SQL query to be excecuted
query = '''SELECT *
FROM raw_data'''
# Save the output sdf of spark.sql() as answer_sdf
answer_sdf = spark.sql(query)
# Display the first 10 rows
answer_sdf.show(10)
```
### Step 1.3: The Extravagent Extraction
In our training model, we are interested in when individuals began working at a company. From creating the schema, you should notice that the collection of companies inviduals worked at are contained in the `experience` field as an array of dictionaries. You should use the `org` for the company name and `start` for the start date. Here is an example of an `experience` field,
```
{
"experience": [
{
"org": "The Walt Disney Company",
"title" : "Mickey Mouse",
"end" : "Present",
"start": "November 1928",
"desc": "Sailed a boat."
},
{
"org": "Walt Disney World Resort",
"title": "Mickey Mouse Mascot",
"start": "January 2005",
"desc": "Took pictures with kids."
}
]
}
```
Your task is to extract each pair of company and start date from these arrays. In Spark, this is known as "exploding" a row. An explode will seperate the elements of an array into multiple rows.
Create an sdf called `raw_start_dates_sdf` that contains the company and start date for every experience of every individual in `raw_data_sdf`. Drop any row that contains a `null` in either column with `dropna()`. You can sort the elements however you wish (you don't need to if you don't want to). The sdf should look as follows:
```
+--------------------------+---------------+
|org |start_date |
+--------------------------+---------------+
|Walt Disney World Resort |January 2005 |
|The Walt Disney Company |November 1928 |
|... |... |
+--------------------------+---------------+
```
_Hint_: You may want to do two seperate explodes for `org` and `start`. In an explode, the position of the element in the array can be extracted as well, and used to merge two seperate explodes. Reference the [function list](https://spark.apache.org/docs/2.3.0/api/sql/index.html).
_Note_: Some of the entires in `org` are "weird", i.e. made up of non-english letters and characters. Keep them. **DO NOT** edit any name in the original dataframe unless we specify. **DO NOT** drop any row unless there is a `null` value as stated before. This goes for the rest of the homework as well, unless otherwise specified.
```
# TODO: Create [raw_start_dates_sdf]
##YOUR ANSWER HERE
raw_start_dates_sdf.show(4)
```
### Step 1.4: The Fortuitous Formatting
There are two issues with the values in our `date` column. First, the values are saved as strings, not datetime types. This keeps us from running functions such as `ORDER BY` or `GROUP BY` on common months or years. Second, some values do not have both month and year information or are in other languages. Your task is to filter out and clean the `date` column. We are interested in only those rows that have date in the following format "(month_name) (year)", e.g. "October 2010".
Create an sdf called `filtered_start_dates_sdf` from `raw_start_dates_sdf` with the `date` column filtered in the manner above. Keep only those rows with a start date between January 2000 to December 2011, inclusive. Ensure that any dates that are not in our desired format are ommitted. Drop any row that contains a `null` in either column. The format of the sdf is shown below:
```
+--------------------------+---------------+
|org |start_date |
+--------------------------+---------------+
|Walt Disney World Resort |2005-01-01 |
|... |... |
+--------------------------+---------------+
```
_Hint_: Refer to the [function list](https://spark.apache.org/docs/2.3.0/api/sql/index.html) to format the `date` column. In Spark SQL the date format we are interested in is `"MMM y"`.
_Note_: Spark will return the date in the format above, with the day as `01`. This is ok, since we are interested in the month and year each individual began working and all dates will have `01` as their day.
```
# TODO: Create [filtered_start_dates_sdf]
## YOUR ANSWER HERE
filtered_start_dates_sdf.show()
```
### Step 1.5 The Gregarious Grouping
We now want to collect the number of individuals that started in the same month and year for each company. Create an sdf called `start_dates_sdf` that has the total number of employees who began working at the same company on the same start date. The format of the sdf is shown below:
```
+--------------------------+---------------+---------------+
|org |start_date |num_employees |
+--------------------------+---------------+---------------+
|Walt Disney World Resort |2005-01-01 |1 |
|... |... |... |
+--------------------------+---------------+---------------+
```
```
# TODO: Create [start_dates_sdf]
## YOUR ANSWER HERE
start_dates_sdf.show()
```
## Step 2: Hiring Trends Analysis
Now we will analyze `start_dates_sdf` to find monthly and annual hiring trends.
### Step 2.1: The Marvelous Months
Your task is to answer the question: "On average, what month do most employees start working?" Create an sdf called `monthly_hires_sdf` which contains the total number of employees that started working on a specific month, at any company and on any year. The `month` column should be of type `int`, i.e. 1-12. The format of the sdf is shown below:
```
+---------------+---------------+
|month |num_employees |
+---------------+---------------+
|1 |... |
|2 |... |
|3 |... |
|... |... |
+---------------+---------------+
```
Find the month in which the most employees start working and save its number as an integer to the variable `most_common_month`.
_Hint_: Be careful. The start dates we have right now have both month and year. We only want the common months. See if you can find something in the [function list](https://spark.apache.org/docs/2.3.0/api/sql/index.html) that will help you do this.
```
# TODO: Create [monthly_hire_sdf] and find the most common month people were
# hired. Save its number as an integer to [most_common_month]
## YOUR ANSWER HERE
monthly_hires_sdf.show()
```
### Step 2.2: The Preposterous Percentages
The next question we will answer is "What is the percentage change in hires between 2010 and 2011 for each company?" Create an sdf called `percentage_change_sdf` that has the percentage change between 2010 and 2011 for each company. The sdf should look as follows:
```
+---------------------------+--------------------+
|org |percentage_change |
+---------------------------+--------------------+
|Walt Disney World Resort |12.3 |
|... |... |
+---------------------------+--------------------+
```
_Note_: A percentage change can be positive or negative depending
on the difference between the two years.The formula for percent change is given below,
$$\text{% change} = \frac{P_f-P_i}{P_f} \times 100$$
Here, $P_f$ is the final element (in this case the number of hires in 2011) and $P_i$ is initial element (the number of hires in 2010).
_Hint_: This is a **difficult** question. We recommend using a combination of `GROUP BY` and `JOIN`. Keep in mind that operations between columns in SQL dataframes are often easier than those between rows.
```
# TODO: Create [percentage_change_sdf]
## YOUR ANSWER HERE
percentage_change_sdf.where(F.col('percentage_change')>0).show()
percentage_change_sdf.show()
```
## Step 3: Formatting the Training Data
Our overaching goal is to train a machine learning (ML) model that will use the monthly hiring trends of a company to predict a positive or negative gain in the company's stock in the first quarter of the following year. A ML model is trained on a set of observations. Each observation contains a set of features, `X`, and a label, `y`. The goal of the ML model is to create a function that takes any `X` as an input and outputs a predicted `y`.
The machine learning model we will use is a [Random Forest Classifier](https://builtin.com/data-science/random-forest-algorithm). Each observation we will pass in will have 24 features (columns). These are the number of people hired from Jan to Dec and the company stock price on the last day of each month. The label will be the direction of the company's stock percentage change (positive, `1`, or negative, `-1`) in the first quarter of the following year. Each observation will correspond to a specified company's trends on a specified year. The format of our final training sdf is shown below. The first 26 columns define our observations, `X`, and the last column the label, `y`.
```
+----+-----+----------+---------+----------+----------+---------+----------+-------------+
|org |year |jan_hired | ... |dec_hired |jan_stock | ... |dec_stock |stock_result |
+----+-----+----------+---------+----------+----------+---------+----------+-------------+
|IBM |2008 |... | ... |... |... | ... |... |1 |
|IBM |2009 |... | ... |... |... | ... |... |-1 |
|... |... |... | ... |... |... | ... |... |... |
+----+-----+----------+---------+----------+----------+---------+----------+-------------+
```
_Note_: We will use the first three letters of each month in naming, i.e. `jan, feb, mar, apr, may, jun, jul, aug, sep, oct, nov, dec`
### Step 3.1: The Harmonious Hires
Your first task is to create the first half of the training table, i.e. the `jan_hired` through `dec_hired` columns. This will involve reshaping `start_dates_sdf`. Currently, `start_dates_sdf` has columns `org`, `start_date`, and `num_employees`. We want to group the rows together based on common `org` and years and create new columns for the number of employees that started working in each month of that year.
Create an sdf called `raw_hirings_for_training_sdf` that has for a single company and a single year, the number of hires in Jan through Dec, and the total number of hires that year. Note that for each company you will have several rows corresponding to years between 2000 and 2011. It is ok if for a given company you don't have a given year. However, ensure that for a given company and given year, each month column has an entry, i.e. if no one was hired the value should be `0`. The format of the sdf is shown below:
```
+----+-----+----------+---------+----------+----------+
|org |year |jan_hired | ... |dec_hired |total_num |
+----+-----+----------+---------+----------+----------+
|IBM |2008 |... | ... |... |... |
|IBM |2009 |... | ... |... |... |
|... |... |... | ... |... |... |
+----+-----+----------+---------+----------+----------+
```
_Hint_: This is a **difficult** question. The tricky part is creating the additional columns of monthly hires, specifically when there are missing dates. In our dataset, if a company did not hire anybody in a given date, it will not appear in `start_dates_sdf`. We suggest you look into `CASE` and `WHEN` statements in the [function list](https://spark.apache.org/docs/2.3.0/api/sql/index.html).
```
# TODO: Create [raw_hire_train_sdf]
## YOUR SOLUTION HERE
raw_hire_train_sdf.show()
```
### Step 3.2: The Formidable Filters
Create an sdf called `hire_train_sdf` that contains all the observations in `raw_hire_train_sdf` with `total_num` greater than or equal to 10. The format of the sdf is shown below:
```
+----+-----+----------+---------+----------+----------+
|org |year |jan_hired | ... |dec_hired |total_num |
+----+-----+----------+---------+----------+----------+
|IBM |2008 |... | ... |... |... |
|IBM |2009 |... | ... |... |... |
|... |... |... | ... |... |... |
+----+-----+----------+---------+----------+----------+
```
```
# TODO: Create [hire_train_sdf]
##YOUR SOLUTION HERE
hire_train_sdf.show()
```
### Step 3.3: The Stupendous Stocks
Now we are ready for the stock data. The stock data we will use is saved in the same S3 bucket as `linkedin.json`. Load the data into the EMR cluster. Run the cell below. ***You do not need to edit this cell***.
```
# Load stock data
raw_stocks_sdf = spark.read.format("csv") \
.option("header", "true") \
.load("./drive/My Drive/Colab Notebooks/stock_prices.csv")
# Creates SQL-accesible table
raw_stocks_sdf.createOrReplaceTempView('raw_stocks')
# Display the first 10 rows
query = '''SELECT *
FROM raw_stocks'''
spark.sql(query).show(10)
```
Run the cell below to see the types of the columns in our data frame. These are not correct. We could have defined a schema when reading in data but we will handle this issue in another manner. You will do this in Step 3.4.2.
```
# Print types of SDF
raw_stocks_sdf.dtypes
```
### Step 3.4 The Clairvoyant Cleaning
We now want to format the stock data set into the second half of the training table. We will then merge it with `hire_train` based off the common `org` and `year` fields.
#### Step 3.4.1 The Ubiquitous UDF
The companies in our stock dataset are defined by their stock tickers. Thus, we would not be able to merge it with the `org` field in `hire_train_sdf`. We must convert them to that format. Often times when using Spark, there may not be a built-in SQL function that can do the operation we desired. Instead, we can create one on our own with a user-defined function (udf).
A udf is defined as a normal Python function and then registered to be used as a Spark SQL function. Your task is to create a udf, `TICKER_TO_NAME()` that will convert the ticker field in `raw_stocks` to the company's name. This will be done using the provided `ticker_to_name_dict` dictionary. We are only interested in the companies in that dictionary.
Fill out the function `ticker_to_name()` below. Then use `spark.udf.register()` to register it as a SQL function. The command is provided. ***You do not need to edit it***. Note, we have defined the udf as returning `StringType()`. Ensure that your function returns this. You must also deal with any potential `null` cases.
```
# TODO: Fill out [ticker_to_name()] and register it as a udf.
## YOUR SOLUTION HERE
def ticker_to_name(ticker):
# Register udf as a SQL function. DO NOT EDIT
spark.udf.register("TICKER_TO_NAME", ticker_to_name, StringType())
```
#### Step 3.4.2: The Fastidious Filters
With our new `TICKER_TO_NAME()` function we will begin to wrangle `raw_stocks_sdf`.
Create an sdf called `filter_1_stocks_sdf` as follows. Convert all the ticker names in `raw_stocks_sdf` to the company names and save it as `org`. Next, convert the `date` field to a datetime type. As explained before this will help order and group the rows in future steps. Then, convert the type of the values in `closing_price` to `float`. This will take care of the `dtypes` issue we saw in Step 3.3.
Drop any company names that do not appear in `ticker_to_name_dict`. Keep any date between January 1st 2001 and December 4th 2012 inclusive, in the format shown below (note this is a datetime object not a string):
```
+----+------------+--------------+
|org |date |closing_price |
+----+------------+--------------+
|IBM |2000-01-03 |... |
|... |... |... |
+----+------------+--------------+
```
_Hint_: You will use a similar function to filter the dates as in Step 1.4. In Spark SQL the format for the `date` field in `raw_stocks_sdf` is `"yyyy-MM-dd"`.
```
# TODO: Create [filter_1_stocks_sdf]
## YOUR SOLUTION HERE
filter_1_stocks_sdf.show()
```
#### Step 3.4.3: The Magnanimous Months
The data in `filter_1_stocks_sdf` gives closing prices on a daily basis. Since we are interested in monthly trends, we will only keep the closing price on the **last trading day of each month**.
Create an sdf `filter_2_stocks_sdf` that contains only the closing prices for the last trading day of each month. Note that a trading day is not simply the last day of each month, as this could be on a weekend when the market is closed . The format of the sdf is shown below:
```
+----+------------+--------------+
|org |date |closing_price |
+----+------------+--------------+
|IBM |2000-01-31 |... |
|... |... |... |
+----+------------+--------------+
```
_Hint_: It may be helpful to create an intermediate dataframe that will help you filter out the specific dates you desire.
```
# TODO: Create [filter_2_stocks_sdf]
## YOUR SOLUTION HERE
filter_2_stocks_sdf.show()
```
#### Step 3.4.4: The Rambunctious Reshape
Now, we will begin to shape our dataframe into the format of the final training sdf.
Create an sdf `filter_3_stocks_sdf` that has for a single company and a single year, the closing stock price for the last trading day of each month in that year. This is similar to the table you created in Step 3.1. In this case since we cannot make a proxy for the closing price if the data is not avaliable, drop any rows containing any `null` values, in any column. The format of the sdf is shown below:
```
+----+-----+----------+---------+----------+
|org |year |jan_stock | ... |dec_stock |
+----+-----+----------+---------+----------+
|IBM |2008 |... | ... |... |
|IBM |2009 |... | ... |... |
|... |... |... | ... |... |
+----+-----+----------+---------+----------+
```
```
# TODO: Create [filter_3_stocks_sdf]
## YOUR SOLUTION HERE
filter_3_stocks_sdf.show()
```
#### Step 3.4.5: The Decisive Direction
The final element in our training set is the binary output for each case, i.e. the `y` label.
Create an sdf `stocks_train_sdf` from `filter_3_stocks_sdf` with an additional column `direction`. This should be the direction of percentage change in the closing stock price, i.e. `1` for positive or `-1` for negative, in the first quarter of a given year. The quarter of a year begins in January and ends in April, inclusive. We want to know the percent change between these two months. Reference Step 2.2 for the percent change formula. The format of the sdf is shown below:
```
+----+-----+----------+---------+----------+-------------+
|org |year |jan_stock | ... |dec_stock |direction |
+----+-----+----------+---------+----------+-------------+
|IBM |2008 |... | ... |... |1.0 |
|IBM |2009 |... | ... |... |-1.0 |
|... |... |... | ... |... |... |
+----+-----+----------+---------+----------+-------------+
```
```
# TODO: Create [stocks_train_sdf]
## YOUR SOLUTION HERE
stocks_train_sdf.show()
```
### Step 3.5: The Capricious Combination
Now that we have individually created the two halfs of our training data we will merge them together to create the final training sdf we showed in the beginning of Step 3.
Create an sdf called `training_sdf` in the format of the one shown at the beginning of Step 3. Note that in our definition for the `stock_result` column, the `stock_result` value for a particular year corresponds to the direction of the stock percentage change in the **following** year. For example, the stock_result in the `2008` row for `IBM` will contain the direction of IBM's stock in the first quarter of 2009. The format of the sdf is shown below:
```
+----+-----+----------+---------+----------+----------+---------+----------+-------------+
|org |year |jan_hired | ... |dec_hired |jan_stock | ... |dec_stock |stock_result |
+----+-----+----------+---------+----------+----------+---------+----------+-------------+
|IBM |2008 |... | ... |... |... | ... |... |-1.0 |
|IBM |2009 |... | ... |... |... | ... |... |1.0 |
|... |... |... | ... |... |... | ... |... |... |
+----+-----+----------+---------+----------+----------+---------+----------+-------------+
```
```
# TODO: Create [training_sdf]
## YOUR SOLUTION HERE
training_sdf.show()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
%matplotlib inline
from torch.utils.data import Dataset, DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
m = 500 # 5, 10, 20, 100, 500, 1000
desired_num = 1000
tr_i = 0
tr_j = int(desired_num/2)
tr_k = desired_num
tr_i, tr_j, tr_k
```
# Generate dataset
```
np.random.seed(12)
y = np.random.randint(0,10,5000)
idx= []
for i in range(10):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((5000,2))
np.random.seed(12)
x[idx[0],:] = np.random.multivariate_normal(mean = [4,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[0]))
x[idx[1],:] = np.random.multivariate_normal(mean = [5.5,6],cov=[[0.01,0],[0,0.01]],size=sum(idx[1]))
x[idx[2],:] = np.random.multivariate_normal(mean = [4.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[2]))
x[idx[3],:] = np.random.multivariate_normal(mean = [3,3.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[3]))
x[idx[4],:] = np.random.multivariate_normal(mean = [2.5,5.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[4]))
x[idx[5],:] = np.random.multivariate_normal(mean = [3.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[5]))
x[idx[6],:] = np.random.multivariate_normal(mean = [5.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[6]))
x[idx[7],:] = np.random.multivariate_normal(mean = [7,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[7]))
x[idx[8],:] = np.random.multivariate_normal(mean = [6.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[8]))
x[idx[9],:] = np.random.multivariate_normal(mean = [5,3],cov=[[0.01,0],[0,0.01]],size=sum(idx[9]))
x[idx[0]][0], x[idx[5]][5]
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
bg_idx = [ np.where(idx[3] == True)[0],
np.where(idx[4] == True)[0],
np.where(idx[5] == True)[0],
np.where(idx[6] == True)[0],
np.where(idx[7] == True)[0],
np.where(idx[8] == True)[0],
np.where(idx[9] == True)[0]]
bg_idx = np.concatenate(bg_idx, axis = 0)
bg_idx.shape
np.unique(bg_idx).shape
x = x - np.mean(x[bg_idx], axis = 0, keepdims = True)
np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True)
x = x/np.std(x[bg_idx], axis = 0, keepdims = True)
np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True)
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
foreground_classes = {'class_0','class_1', 'class_2'}
background_classes = {'class_3','class_4', 'class_5', 'class_6','class_7', 'class_8', 'class_9'}
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
print(a.shape)
print(fg_class , fg_idx)
np.reshape(a,(2*m,1))
mosaic_list_of_images =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
np.random.seed(j)
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_of_images.append(np.reshape(a,(2*m,1)))
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T
mosaic_list_of_images.shape
mosaic_list_of_images.shape, mosaic_list_of_images[0]
for j in range(m):
print(mosaic_list_of_images[0][2*j:2*j+2])
def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
cnt = 0
counter = np.zeros(m) #np.array([0,0,0,0,0,0,0,0,0])
for i in range(len(mosaic_dataset)):
img = torch.zeros([2], dtype=torch.float64)
np.random.seed(int(dataset_number*10000 + i))
give_pref = foreground_index[i] #np.random.randint(0,9)
# print("outside", give_pref,foreground_index[i])
for j in range(m):
if j == give_pref:
img = img + mosaic_dataset[i][2*j:2*j+2]*dataset_number/m #2 is data dim
else :
img = img + mosaic_dataset[i][2*j:2*j+2]*(m-dataset_number)/((m-1)*m)
if give_pref == foreground_index[i] :
# print("equal are", give_pref,foreground_index[i])
cnt += 1
counter[give_pref] += 1
else :
counter[give_pref] += 1
avg_image_dataset.append(img)
print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt))
print("the averaging are done as ", counter)
return avg_image_dataset , labels , foreground_index
avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m)
test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m)
avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0)
# avg_image_dataset_1 = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0))
# print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0))
print("=="*40)
test_dataset = torch.stack(test_dataset, axis = 0)
# test_dataset = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(test_dataset, keepdims= True, axis = 0))
# print(torch.std(test_dataset, keepdims= True, axis = 0))
print("=="*40)
x1 = (avg_image_dataset_1).numpy()
y1 = np.array(labels_1)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("dataset4 CIN with alpha = 1/"+str(m))
x1 = (test_dataset).numpy() / m
y1 = np.array(labels)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("test dataset4")
test_dataset[0:10]/m
test_dataset = test_dataset/m
test_dataset[0:10]
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
avg_image_dataset_1[0].shape
avg_image_dataset_1[0]
batch = 200
traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
testdata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
testdata_11 = MosaicDataset(test_dataset, labels )
testloader_11 = DataLoader( testdata_11 , batch_size= batch ,shuffle=False)
class Whatnet(nn.Module):
def __init__(self):
super(Whatnet,self).__init__()
self.linear1 = nn.Linear(2,3)
# self.linear2 = nn.Linear(50,10)
# self.linear3 = nn.Linear(10,3)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
def forward(self,x):
# x = F.relu(self.linear1(x))
# x = F.relu(self.linear2(x))
x = (self.linear1(x))
return x
def calculate_loss(dataloader,model,criter):
model.eval()
r_loss = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = model(inputs)
loss = criter(outputs, labels)
r_loss += loss.item()
return r_loss/(i+1)
def test_all(number, testloader,net):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
pred = np.concatenate(pred, axis = 0)
out = np.concatenate(out, axis = 0)
print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) )
print("correct: ", correct, "total ", total)
print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total))
def train_all(trainloader, ds_number, testloader_list):
print("--"*40)
print("training on data set ", ds_number)
torch.manual_seed(12)
net = Whatnet().double()
net = net.to("cuda")
criterion_net = nn.CrossEntropyLoss()
optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9)
acti = []
loss_curi = []
epochs = 1000
running_loss = calculate_loss(trainloader,net,criterion_net)
loss_curi.append(running_loss)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion_net(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_net.step()
running_loss = calculate_loss(trainloader,net,criterion_net)
if(epoch%200 == 0):
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.05:
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
break
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,net)
print("--"*40)
return loss_curi
train_loss_all=[]
testloader_list= [ testloader_1, testloader_11]
train_loss_all.append(train_all(trainloader_1, 1, testloader_list))
%matplotlib inline
for i,j in enumerate(train_loss_all):
plt.plot(j,label ="dataset "+str(i+1))
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
```
| github_jupyter |
```
# default_exp encoding
#hide
%load_ext autoreload
%autoreload 2
#hide
from nbdev.showdoc import *
#hide
#export
import numpy as np
from sklearn.metrics import r2_score
from sklearn.model_selection import KFold
from sklearn.linear_model import RidgeCV
import warnings
import copy
from joblib import Parallel, delayed
from sklearn.multioutput import MultiOutputRegressor, _fit_estimator
from sklearn.utils import check_X_y, check_array
from sklearn.utils.validation import check_is_fitted
from sklearn.base import RegressorMixin
def product_moment_corr(x,y):
'''Product-moment correlation for two ndarrays x, y'''
from sklearn.preprocessing import StandardScaler
x = StandardScaler().fit_transform(x)
y = StandardScaler().fit_transform(y)
n = x.shape[0]
r = (1/(n-1))*(x*y).sum(axis=0)
return r
```
# Training and validating voxel-wise encoding models
> Functions for training independent Ridge regressions for a large number of voxels and validating their performance
```
#export
def get_model_plus_scores(X, y, estimator=None, cv=None, scorer=None,
voxel_selection=True, validate=True, **kwargs):
'''Returns multiple estimator trained in a cross-validation on n_splits of the data and scores on the left-out folds
Parameters
X : ndarray of shape (samples, features)
y : ndarray of shape (samples, targets)
estimator : None or estimator object that implements fit and predict
if None, uses RidgeCV per default
cv : int, None, or a cross-validation object that implements a split method, default is None, optional.
int specifies the number of cross-validation splits of a KFold cross validation
None defaults to a scikit-learn KFold cross-validation with default settings
a scikit-learn-like cross-validation object needs to implement a split method for X and y
scorer : None or any sci-kit learn compatible scoring function, optional
default uses product moment correlation
voxel_selection : bool, optional, default True
Whether to only use voxels with variance larger than zero.
This will set scores for these voxels to zero.
validate : bool, optional, default True
Whether to validate the model via cross-validation
or to just train the estimator
if False, scores will be computed on the training set
kwargs : additional parameters that will be used to initialize RidgeCV if estimator is None
Returns
tuple of n_splits estimators trained on training folds or single estimator if validation is False
and scores for all concatenated out-of-fold predictions'''
from sklearn.utils.estimator_checks import check_regressor_multioutput
if scorer is None:
scorer = product_moment_corr
if cv is None:
cv = KFold()
if isinstance(cv, int):
cv = KFold(n_splits=cv)
models = []
score_list = []
if estimator is None:
estimator = RidgeCV(**kwargs)
if voxel_selection:
voxel_var = np.var(y, axis=0)
y = y[:, voxel_var > 0.]
if validate:
for train, test in cv.split(X, y):
models.append(copy.deepcopy(estimator).fit(X[train], y[train]))
if voxel_selection:
scores = np.zeros_like(voxel_var)
scores[voxel_var > 0.] = scorer(y[test], models[-1].predict(X[test]))
else:
scores = scorer(y[test], models[-1].predict(X[test]))
score_list.append(scores[:, None])
score_list = np.concatenate(score_list, axis=-1)
else:
models = estimator.fit(X, y)
score_list = scorer(y, estimator.predict(X))
return models, score_list
```
`get_model_plus_scores` is a convenience function that trains multiple Ridge regressions in a cross-validation scheme and evaluates their performance on the respective test set.
# Examples
First, we create some simulated `stimulus` and `fmri` data.
```
stimulus = np.random.randn(1000, 5)
fmri = np.random.randn(1000, 10)
```
## Using the default Ridge regression
We can now use `get_model_plus_scores` to estimate multiple [RidgeCV](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html) regressions, one for each voxel (that maps the stimulus representation to this voxel) and one for each split (trained on a different training set and evaluated on the held-out set).
Since sklearn's `RidgeCV` estimator allows multi-output, we get one `RidgeCV` object per split.
```
ridges, scores = get_model_plus_scores(stimulus, fmri, cv=3)
assert len(ridges) == 3
ridges
```
Each `RidgeCV` estimator maps from the feature space to each voxel.
In our example, that means it has 10 (the number of voxels-9 independently trained regression models with 5 coeficients each (the number of features).
```
assert ridges[0].coef_.shape == (10, 5)
print(ridges[0].coef_)
```
We also get a set of scores (by default the [product moment correlation](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient), but you can supply your own via the `scorer` argument) that specifies how well we predict left-out data (with the usual caveats of using a correlation coefficient for evaluating it). In our case it is of shape (10, 3) because we predict 10 voxels and use a 3-fold cross-validation, i.e. we split 3 times.
```
assert scores.shape == (10, 3)
scores
```
We can also change the parameters of the `RidgeCV` function.
For example, we can use pre-specified hyperparameters, like the values of the regularization parameter $\alpha$ we want to perform a gridsearch over or whether we want to normalize features. If we want to use other parameters for the default `RidgeCV`, we can just pass the parameters as additional keyword arguments:
```
alphas = [100]
ridges, scores = get_model_plus_scores(stimulus, fmri, alphas=alphas,
normalize=True, alpha_per_target=True)
assert ridges[0].normalize
assert ridges[0].alphas.shape == (1,)
```
## Using your own estimator
Additionally, we can use any other estimator that implements `fit` and `predict`.
For example, we can use [CCA](https://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.CCA.html) as an encoding model.
```
from sklearn import cross_decomposition
our_estimator = cross_decomposition.CCA(n_components=2)
ccas, scores = get_model_plus_scores(stimulus, fmri, our_estimator,
cv=3)
assert type(ccas[0]) == cross_decomposition._pls.CCA
```
If your favorite estimator does not work in the multioutput regime, i.e. it cannot predict multiple targets/voxels, then `get_model_plus_scores` will wrap it into sklearn's [MultiOutputRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputRegressor.html) by default. However, for many voxels this can increase training time by a lot.
```
from sklearn.linear_model import Lasso
from sklearn.multioutput import MultiOutputRegressor
our_estimator = MultiOutputRegressor(Lasso())
lassos, scores = get_model_plus_scores(stimulus, fmri, our_estimator,
cv=3)
lassos
```
## Training without validation
We can also train an estimator without any validation, if, for example we want to test on a different dataset. In that case, the scores will be computed with the trained estimator on the training set, i.e. they will contain no information about the generalization performance of the estimator.
```
our_estimator = RidgeCV()
model, scores = get_model_plus_scores(stimulus, fmri, our_estimator,
validate=False)
assert type(model) == RidgeCV
assert scores.shape == (10,)
```
## Using your own cross-validation method
Instead of the default `KFold` cross-validation without shuffling, we can also use any sckit-learn compatible cross-validation iterators (e.g. [these](https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators)).
For example, we could use a `TimeSerisSplit` to test our predictions on only the most recent part of the data.
```
from sklearn.model_selection import TimeSeriesSplit
ts_cv = TimeSeriesSplit(n_splits=5)
model, scores = get_model_plus_scores(stimulus, fmri, cv=ts_cv)
assert scores.shape == (10, 5)
```
# Distributed training
Voxel-wise encoding models can take a long time and a lot of memory to train, especially if we use the full brain or high resolution fMRI data.
The `BlockMultiOutput` class can help distribute the load across multiple cores by splitting the fMRI data into multiple "blocks" (the `n_blocks` parameter) and training an estimator for each block.
Without parallelization, this class allows one to train voxel-wise encoding models, even if training a single, large estimator takes up too much memory, by training the estimator for blocks of your data independently.
This works even if the original fMRI data do not fit into memory, by using a [memmapped](https://numpy.org/doc/stable/reference/generated/numpy.memmap.html) Numpy array.
```
#export
class BlockMultiOutput(MultiOutputRegressor, RegressorMixin):
"""Multi target regression with block-wise fit
This strategy consists of splitting the targets in blocks and fitting one regressor per block.
The estimator used needs to natively support multioutput.
Parameters
estimator : estimator object
An estimator object implementing `fit` and `predict` and supporting multioutput.
n_blocks : int, optional, default=10
The number of blocks for the target variable.
This is a split along *targets* (columns of the array), not observations (rows of the array).
n_jobs : int, optional, default=1
The number of jobs to run in parallel for `fit`. If -1,
then the number of jobs is set to the number of cores.
When individual estimators are fast to train or predict
using `n_jobs>1` can result in slower performance due
to the overhead of spawning processes.
"""
def __init__(self, estimator, n_blocks=10, n_jobs=1):
self.estimator = estimator
self.n_blocks = n_blocks
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
""" Fit the model to data.
Fit a separate model for each chunk of output.
Parameters
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets. An indicator matrix turns on multilabel
estimation.
sample_weight : array-like, shape = (n_samples) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
self : object
Returns self
"""
if not hasattr(self.estimator, "fit"):
raise ValueError("The base estimator should implement a fit method")
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
kfold = KFold(n_splits=self.n_blocks)
smpl_X, smpl_y = np.zeros((y.shape[1],1)), np.zeros((y.shape[1],1))
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
self.estimator, X, y[:, block], sample_weight)
for _, block in kfold.split(smpl_X, smpl_y))
return self
def partial_predict(self, X):
"""Predict multi-output variable using a model
trained for each target variable block and yields predictions for each block as an iterator.
Parameters
X : (sparse) array-like, shape (n_samples, n_features)
Data.
Returns
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets predicted across multiple predictors.
Note: Separate models are generated for each predictor.
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimator, "predict"):
raise ValueError("The base estimator should implement a predict method")
X = check_array(X, accept_sparse=True)
for estimator in self.estimators_:
yield estimator.predict(X)
def predict(self, X):
"""Predict multi-output variable using a model
trained for each target variable block.
Parameters
X : (sparse) array-like, shape (n_samples, n_features)
Data.
Returns
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets predicted across multiple predictors.
Note: Separate models are generated for each predictor.
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimator, "predict"):
raise ValueError("The base estimator should implement a predict method")
X = check_array(X, accept_sparse=True)
y = Parallel(n_jobs=self.n_jobs)(
delayed(e.predict)(X)
for e in self.estimators_)
return np.hstack(y)
def score(self, X, y):
"""Returns the correlation of the prediction with the target for each output.
Parameters
X : array-like, shape (n_samples, n_features)
Test samples.
y : array-like, shape (n_samples) or (n_samples, n_outputs)
True values for X.
Returns
score : float
Correlation of self.predict(X) wrt. y.
"""
from sklearn.preprocessing import StandardScaler
from itertools import izip
kfold = KFold(n_splits=self.n_blocks)
smpl_X, smpl_y = np.zeros((y.shape[1],1)), np.zeros((y.shape[1],1))
scores = []
for prediction, (_, block) in izip(self.partial_predict(X), kfold.split(smpl_X, smpl_y)):
mx = StandardScaler().fit_transform(prediction).astype('float32')
my = StandardScaler().fit_transform(y[:, block]).astype('float32')
n = mx.shape[0]
r = (1/(n-1))*(mx*my).sum(axis=0)
scores.append(r)
return np.concatenate(scores)
```
## Example
Let's generate a larger data set.
```
our_estimator = BlockMultiOutput(RidgeCV(alphas=[10,100]))
estimators, scores = get_model_plus_scores(stimulus, fmri, our_estimator,
cv=3)
assert len(estimators) == 3
assert len(estimators[0].estimator.alphas) == 2
```
Each `BlockMultiOutput` estimator contains `n_blocks` estimators that are trained on different blocks of the target.
```
assert len(estimators[0].estimators_) == estimators[0].n_blocks
estimators[0].estimators_
```
If `fmri` is of shape $(n\_samples, n\_targets)$, each of the `n_blocks` estimators in `BlockMultiOutput.estimators_` will contain the coefficients for ${n\_targets}/{n\_blocks}$ targets.
```
assert estimators[0].estimators_[0].coef_.shape == (1, 5)
print('fmri shape: {} \nn_blocks: {} \n'
'coefficients of the estimator for one block: {}'.format(
fmri.shape, our_estimator.n_blocks, estimators[0].estimators_[0].coef_.shape))
```
We can use `MultiBlockOutput` instance normally to predict data, i.e. it produces predictions of the full fmri data by concatenating the predictions of every block-estimator.
```
assert estimators[0].predict(stimulus).shape == (1000, 10)
```
## Parallelizing voxel-wise encoding models
We can use this to parallelize encoding models as well, by specifying the `n_jobs` parameter.
Keep in mind that this requires copying the full `stimulus` data to every worker and can thus increase memory demand.
```
our_estimator = BlockMultiOutput(RidgeCV(alphas=[10,100]), n_jobs=10)
estimators, scores = get_model_plus_scores(stimulus, fmri, our_estimator,
cv=3)
assert len(estimators) == 3
assert estimators[0].n_jobs == 10
```
| github_jupyter |
```
import os
import tensorflow as tf
import cv2
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
from object_detection.builders import model_builder
from object_detection.utils import config_utiln
CUSTOM_MODEL_NAME = 'my_ssd_mobnet_tuned3'
PRETRAINED_MODEL_NAME = 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'
PRETRAINED_MODEL_URL = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz'
TF_RECORD_SCRIPT_NAME = 'generate_tfrecord.py'
LABEL_MAP_NAME = 'label_map.pbtxt'
paths = {
'WORKSPACE_PATH': os.path.join('Tensorflow', 'workspace'),
'SCRIPTS_PATH': os.path.join('Tensorflow','scripts'),
'APIMODEL_PATH': os.path.join('Tensorflow','models'),
'ANNOTATION_PATH': os.path.join('Tensorflow', 'workspace','annotations'),
'IMAGE_PATH': os.path.join('Tensorflow', 'workspace','images'),
'MODEL_PATH': os.path.join('Tensorflow', 'workspace','models'),
'PRETRAINED_MODEL_PATH': os.path.join('Tensorflow', 'workspace','pre-trained-models'),
'CHECKPOINT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME),
'OUTPUT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'export'),
'TFJS_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfjsexport'),
'TFLITE_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfliteexport'),
'PROTOC_PATH':os.path.join('Tensorflow','protoc')
}
files = {
'PIPELINE_CONFIG':os.path.join('Tensorflow', 'workspace','models', CUSTOM_MODEL_NAME, 'pipeline.config'),
'TF_RECORD_SCRIPT': os.path.join(paths['SCRIPTS_PATH'], TF_RECORD_SCRIPT_NAME),
'LABELMAP': os.path.join(paths['ANNOTATION_PATH'], LABEL_MAP_NAME)
}
for path in paths.values():
if not os.path.exists(path):
if os.name == 'posix':
!mkdir -p {path}
if os.name == 'nt':
!mkdir {path}
paths
files
labels = [
{'name':'a', 'id':1}, {'name':'c', 'id':2}, {'name':'o', 'id':3},
{'name':'v', 'id':4}]
with open(files['LABELMAP'], 'w') as f:
for label in labels:
f.write('item { \n')
f.write('\tname:\'{}\'\n'.format(label['name']))
f.write('\tid:{}\n'.format(label['id']))
f.write('}\n')
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(files['PIPELINE_CONFIG'])
detection_model = model_builder.build(model_config=configs['model'], is_training=False)
# SET_LATEST_CHECKPOINT
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join(paths['CHECKPOINT_PATH'], 'SET_LATEST_CHECKPOINT')).expect_partial()
@tf.function
def detect_fn(image):
image, shapes = detection_model.preprocess(image)
prediction_dict = detection_model.predict(image, shapes)
detections = detection_model.postprocess(prediction_dict, shapes)
return detections
category_index = label_map_util.create_category_index_from_labelmap(files['LABELMAP'])
# Real time detection from webcam
# Press 'q' to quit from object detection window
cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
while cap.isOpened():
ret, frame = cap.read()
image_np = np.array(frame)
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
label_id_offset = 1
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes']+label_id_offset,
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=5,
min_score_thresh=.8,
agnostic_mode=False)
cv2.imshow('object detection', cv2.resize(image_np_with_detections, (800, 600)))
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
```
| github_jupyter |
`fapy`
=====
# Introduction
A little Python 3 module for finite automata theory and regular expressions.
# Simple automata fun
```
from fapy.finite_automaton import FiniteAutomaton
```
In `fapy`, all automata are non deterministic. Being deterministic is a _property_. The same goes for being complete, or epsilon-transition free.
We start by defining a simple automaton accepting all words containing at least an `a`.
```
automaton1 = FiniteAutomaton(
alphabet={'a', 'b'},
states={'q0', 'q1'},
initial_states={'q0'},
accepting_states={'q1'},
transitions={
'q0': [('a', 'q0'), ('b', 'q0'), ('a', 'q1')],
'q1': [('a', 'q1'), ('b', 'q1')]
}
)
automaton1.draw()
```
Let's run some tests
```
problems = [
('', False),
('a', True),
('b', False),
('bb', False),
('bbabb', True)
]
for problem, solution in problems:
print(f'Word: "{"".join(problem)}", Output: {automaton1.read(problem)}, Expected: {solution}.')
```
# Regular expressions
They exist.
```
from fapy.regular_expression import (
parse_regular_expression,
RegularExpression
)
reg = parse_regular_expression("(a + b)* a")
print(reg)
```
## Residuals
```
from fapy.algorithm_residual import residual
print(residual(parse_regular_expression("(a + abb) (bba)* c"), "abb"))
```
# Algorithms
We have some algorithms!
## Determinization
```
from fapy.algorithms import powerset_determinize
automaton1_det = powerset_determinize(automaton1)
automaton1_det.draw()
```
## Thompson's algorithm
It transforms a regular expression into an automaton by induction on the syntax tree of the regular expression.
```
from fapy.algorithms import thompson
alphabet = {'a', 'b'}
automaton = thompson(
parse_regular_expression('(a + b)* a (a + b)*'),
alphabet
)
automaton.draw()
```
# Glushkov's algorithm
It transforms a regular expression into an equivalent automaton.
```
from fapy.algorithms import glushkov
automaton = glushkov(parse_regular_expression('(a (ab)*)*'))
automaton.draw()
```
# Brozozwski's algorithm
It transforms a finite automaton into an equivalent regular expression.
```
from fapy.algorithms import brozozwski
automaton = FiniteAutomaton(
alphabet={'a', 'b'},
states={'q0', 'q1', 'q2', 'q3'},
initial_states={'q0'},
accepting_states={'q3'},
transitions={
'q0': [('a', 'q1'), ('b', 'q3')],
'q1': [('a', 'q2')],
'q2': [('a', 'q0')]
}
)
print(brozozwski(automaton))
automaton.draw()
```
# Brozozwski's minimization algorithm
```
from fapy.algorithms import brozozwski_minimize
alphabet = {'a', 'b', 'c'}
automaton = thompson(
parse_regular_expression('(a + b)* a b c (a + bb)*'),
alphabet
)
minimized_automaton = brozozwski_minimize(automaton)
minimized_automaton.draw()
```
| github_jupyter |
<small><i>This notebook was put together by [Jake Vanderplas](http://www.vanderplas.com) for PyCon 2015. Source and license info is on [GitHub](https://github.com/jakevdp/sklearn_pycon2015/).</i></small>
# 深入监督学习:支持向量机
之前我们已经介绍了监督学习。有许多监督学习算法,这里我们介绍一种最强大且有意思的一种方法:**支持向量机(SVMs)**。
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# use seaborn plotting defaults
import seaborn as sns; sns.set()
# eliminate warnings
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
```
## 支持向量机的动机
支持向量机(SVMs)是一种强大的监督学习算法,它可以用来进行分类和回归。SVMs是一种**判别式**分类器:用它可以在数据集之间划分边界。
```
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=50, centers=2,
random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring');
```
一个判别式分类器试图在两个不同的数据集之间画一条直线。但我们马上发现,这类直线是有问题的,我们可以画出好几条直线来完美区分这两类数据:
```
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b in [(1, 0.65), (0.5, 1.6), (-0.2, 2.9)]:
plt.plot(xfit, m * xfit + b, '-k')
plt.xlim(-1, 3.5);
```
在这些样本中,我们有三种分割方式。到底采用哪种分割方式,才能适应新的数据,这个真的很纠结!
我们如何才能改进它呢?
### 支持向量机:最大化*Margin*
支持向量机可以解决这个问题。
支持向量机不仅仅画一条直线,还考虑了和这些直线相关的*region*,下面我们来看个例子。
```
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:
yfit = m * xfit + b
plt.plot(xfit, yfit, '-k')
plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none', color='#AAAAAA', alpha=0.4)
plt.xlim(-1, 3.5);
```
注意如果我们想最大化这个*region*,中间的直线貌似是最好的选择。
这是**支持向量机**直观表现,它通过数据集之间的**margin**来优化线性判别模型。
#### 拟合一个支持向量机
现在我们将对这些点拟合一个支持向量机分类器。尽管SVM模型的数学细节很有意思,但是这里不会涉及。我们把sklearn中SVM算法当做一个黑盒来完成上面的任务。
```
from sklearn.svm import SVC # "Support Vector Classifier"
clf = SVC(kernel='linear')
clf.fit(X, y)
```
为了更好的可视化,我们写了一个函数来来绘制SVM决策边界:
```
def plot_svc_decision_function(clf, ax=None):
"""Plot the decision function for a 2D SVC"""
if ax is None:
ax = plt.gca()
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 30)
y = np.linspace(plt.ylim()[0], plt.ylim()[1], 30)
Y, X = np.meshgrid(y, x)
P = np.zeros_like(X)
for i, xi in enumerate(x):
for j, yj in enumerate(y):
P[i, j] = clf.decision_function([xi, yj])
# plot the margins
ax.contour(X, Y, P, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
plt.scatter(X[:, 0].reshape(-1, 1), X[:, 1].reshape(-1, 1), c=y, s=50, cmap='spring')
plot_svc_decision_function(clf);
```
注意虚线上面有一对点落在上面,这些点是这次拟合的关键,被称为**支持向量**。在sklearn中,它们被存储在分类器的``support_vectors_``属性中。
```
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none');
```
让我们用IPython的``interact``功能探索一下,点的分布是如何影响支持向量和判别拟合的。
```
from IPython.html.widgets import interact
def plot_svm(N=10):
X, y = make_blobs(n_samples=200, centers=2,
random_state=0, cluster_std=0.60)
X = X[:N]
y = y[:N]
clf = SVC(kernel='linear')
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plt.xlim(-1, 4)
plt.ylim(-1, 6)
plot_svc_decision_function(clf, plt.gca())
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none')
interact(plot_svm, N=[10, 200], kernel='linear');
```
注意如果你移动其他的任何点,如果它们没有越过决策边界,就不会对分类结果又影响。
#### 进一步看下Kernel方法
SVM还有一项很有用的特性叫做*kernels*。为了利用这个特性,我们看一些不能线性分割的数据:
```
from sklearn.datasets.samples_generator import make_circles
X, y = make_circles(100, factor=.1, noise=.1)
clf = SVC(kernel='linear').fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf);
```
很明显,只有非线性的才能划分这些数据。一种方式是我们采用**kernel**,它能对输入数据做些变换。例如,我们能用的一个简单模型叫做**径向基函数核**
```
r = np.exp(-(X[:, 0] ** 2 + X[:, 1] ** 2))
```
如果我们绘制出我们的数据,我们能看到它的效果。
```
from mpl_toolkits import mplot3d
def plot_3D(elev=30, azim=30):
ax = plt.subplot(projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], r, c=y, s=50, cmap='spring')
ax.view_init(elev=elev, azim=azim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('r')
interact(plot_3D, elev=[-90, 90], azip=(-180, 180));
```
我们能看到加上了额外的维度,数据变得在很小范围可以线性可分。这是相对简单的核,SVM内置有一个更复杂核。我们能使用它通过设置``kernel='rbf'``,全称*径向基函数核*:
```
clf = SVC(kernel='rbf')
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none');
```
| github_jupyter |
# TO-DO LIST
- Label Smoothing
- https://www.kaggle.com/chocozzz/train-cassava-starter-using-label-smoothing
- https://www.kaggle.com/c/siim-isic-melanoma-classification/discussion/173733
- Class Imbalance
- SWA / SWAG
- Augmentation
- https://www.kaggle.com/sachinprabhu/pytorch-resnet50-snapmix-train-pipeline
```
import os
print(os.listdir("./input/"))
package_paths = [
'./input/pytorch-image-models/pytorch-image-models-master', #'../input/efficientnet-pytorch-07/efficientnet_pytorch-0.7.0'
'./input/pytorch-gradual-warmup-lr-master'
]
import sys;
for pth in package_paths:
sys.path.append(pth)
# from warmup_scheduler import GradualWarmupScheduler
from glob import glob
from sklearn.model_selection import GroupKFold, StratifiedKFold
import cv2
from skimage import io
import torch
from torch import nn
import os
from datetime import datetime
import time
import random
import cv2
import torchvision
from torchvision import transforms
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from torch.utils.data import Dataset,DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from torch.cuda.amp import autocast, GradScaler
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.functional as F
import timm
import sklearn
import warnings
import joblib
from sklearn.metrics import roc_auc_score, log_loss
from sklearn import metrics
import warnings
import cv2
#from efficientnet_pytorch import EfficientNet
from scipy.ndimage.interpolation import zoom
CFG = {
'fold_num': 5,
'seed': 719,
'model_arch': 'tf_efficientnet_b4_ns',
'img_size': 512,
'epochs': 10,
'train_bs': 4,
'valid_bs': 1,
'T_0': 10,
'lr': 1e-4,
'min_lr': 1e-6,
'weight_decay':1e-6,
'num_workers': 4,
'accum_iter': 2, # suppoprt to do batch accumulation for backprop with effectively larger batch size
'verbose_step': 1,
'device': 'cuda:0',
'target_size' : 5,
'smoothing' : 0.2,
## Following four are related to FixMatch
'mu' : 5,
'T' : 1,
'lambda_u' : 1,
'threshold' : 0.95,
##
'debug' : False
}
train = pd.read_csv('./input/cassava-leaf-disease-classification/train.csv')
delete_id = ['2947932468.jpg', '2252529694.jpg', '2278017076.jpg']
train = train[~train['image_id'].isin(delete_id)].reset_index(drop=True)
train.head()
```
> We could do stratified validation split in each fold to make each fold's train and validation set looks like the whole train set in target distributions.
```
submission = pd.read_csv('./input/cassava-leaf-disease-classification/sample_submission.csv')
submission.head()
```
# Helper Functions
```
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_img(path):
im_bgr = cv2.imread(path)
im_rgb = im_bgr[:, :, ::-1]
#print(im_rgb)
return im_rgb
```
# Dataset
```
def rand_bbox(size, lam):
W = size[0]
H = size[1]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
class CassavaDataset(Dataset):
def __init__(self, df, data_root,
transforms=None,
output_label=True,
):
super().__init__()
self.df = df.reset_index(drop=True).copy()
self.transforms = transforms
self.data_root = data_root
self.output_label = output_label
self.labels = self.df['label'].values
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index: int):
# get labels
if self.output_label:
target = self.labels[index]
img = get_img("{}/{}".format(self.data_root, self.df.loc[index]['image_id']))
if self.transforms:
img = self.transforms(image=img)['image']
if self.output_label == True:
return img, target
else:
return img
```
# Define Train\Validation Image Augmentations
```
from albumentations.core.transforms_interface import DualTransform
# from albumentations.augmentations import functional as F
class GridMask(DualTransform):
"""GridMask augmentation for image classification and object detection.
Author: Qishen Ha
Email: haqishen@gmail.com
2020/01/29
Args:
num_grid (int): number of grid in a row or column.
fill_value (int, float, lisf of int, list of float): value for dropped pixels.
rotate ((int, int) or int): range from which a random angle is picked. If rotate is a single int
an angle is picked from (-rotate, rotate). Default: (-90, 90)
mode (int):
0 - cropout a quarter of the square of each grid (left top)
1 - reserve a quarter of the square of each grid (left top)
2 - cropout 2 quarter of the square of each grid (left top & right bottom)
Targets:
image, mask
Image types:
uint8, float32
Reference:
| https://arxiv.org/abs/2001.04086
| https://github.com/akuxcw/GridMask
"""
def __init__(self, num_grid=3, fill_value=0, rotate=0, mode=0, always_apply=False, p=0.5):
super(GridMask, self).__init__(always_apply, p)
if isinstance(num_grid, int):
num_grid = (num_grid, num_grid)
if isinstance(rotate, int):
rotate = (-rotate, rotate)
self.num_grid = num_grid
self.fill_value = fill_value
self.rotate = rotate
self.mode = mode
self.masks = None
self.rand_h_max = []
self.rand_w_max = []
def init_masks(self, height, width):
if self.masks is None:
self.masks = []
n_masks = self.num_grid[1] - self.num_grid[0] + 1
for n, n_g in enumerate(range(self.num_grid[0], self.num_grid[1] + 1, 1)):
grid_h = height / n_g
grid_w = width / n_g
this_mask = np.ones((int((n_g + 1) * grid_h), int((n_g + 1) * grid_w))).astype(np.uint8)
for i in range(n_g + 1):
for j in range(n_g + 1):
this_mask[
int(i * grid_h) : int(i * grid_h + grid_h / 2),
int(j * grid_w) : int(j * grid_w + grid_w / 2)
] = self.fill_value
if self.mode == 2:
this_mask[
int(i * grid_h + grid_h / 2) : int(i * grid_h + grid_h),
int(j * grid_w + grid_w / 2) : int(j * grid_w + grid_w)
] = self.fill_value
if self.mode == 1:
this_mask = 1 - this_mask
self.masks.append(this_mask)
self.rand_h_max.append(grid_h)
self.rand_w_max.append(grid_w)
def apply(self, image, mask, rand_h, rand_w, angle, **params):
h, w = image.shape[:2]
mask = F.rotate(mask, angle) if self.rotate[1] > 0 else mask
mask = mask[:,:,np.newaxis] if image.ndim == 3 else mask
image *= mask[rand_h:rand_h+h, rand_w:rand_w+w].astype(image.dtype)
return image
def get_params_dependent_on_targets(self, params):
img = params['image']
height, width = img.shape[:2]
self.init_masks(height, width)
mid = np.random.randint(len(self.masks))
mask = self.masks[mid]
rand_h = np.random.randint(self.rand_h_max[mid])
rand_w = np.random.randint(self.rand_w_max[mid])
angle = np.random.randint(self.rotate[0], self.rotate[1]) if self.rotate[1] > 0 else 0
return {'mask': mask, 'rand_h': rand_h, 'rand_w': rand_w, 'angle': angle}
@property
def targets_as_params(self):
return ['image']
def get_transform_init_args_names(self):
return ('num_grid', 'fill_value', 'rotate', 'mode')
from albumentations import (
HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout, ShiftScaleRotate, CenterCrop, Resize
)
from albumentations.pytorch import ToTensorV2
def get_train_transforms():
return Compose([
OneOf([
Resize(CFG['img_size'], CFG['img_size'], p=1.),
CenterCrop(CFG['img_size'], CFG['img_size'], p=1.),
RandomResizedCrop(CFG['img_size'], CFG['img_size'], p=1.)
], p=1.),
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
ShiftScaleRotate(p=0.5),
HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
CoarseDropout(p=0.5),
GridMask(num_grid=3, p=0.5),
ToTensorV2(p=1.0),
], p=1.)
def get_valid_transforms():
return Compose([
CenterCrop(CFG['img_size'], CFG['img_size'], p=1.),
Resize(CFG['img_size'], CFG['img_size']),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)
def get_inference_transforms():
return Compose([
OneOf([
Resize(CFG['img_size'], CFG['img_size'], p=1.),
CenterCrop(CFG['img_size'], CFG['img_size'], p=1.),
RandomResizedCrop(CFG['img_size'], CFG['img_size'], p=1.)
], p=1.),
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
Resize(CFG['img_size'], CFG['img_size']),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)
```
# Model
```
class CassvaImgClassifier(nn.Module):
def __init__(self, model_arch, n_class, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=pretrained)
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, n_class)
def forward(self, x):
x = self.model(x)
return x
```
# For FixMatch Unlabeled DataLoader
```
#######
o = os.listdir('./input/cassava-disease/all/')
o = np.array([o]).T
label_col = np.ones_like(o)
o = np.concatenate((o,label_col),axis=1)
unlabeled = pd.DataFrame(o,columns=['image_id','label'])
unlabeled.head()
unlabeled = train
import PIL
import PIL.ImageOps
import PIL.ImageEnhance
import PIL.ImageDraw
from PIL import Image
PARAMETER_MAX = 10
def AutoContrast(img, **kwarg):
return PIL.ImageOps.autocontrast(img)
def Brightness(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Color(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Color(img).enhance(v)
def Contrast(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Cutout(img, v, max_v, bias=0):
if v == 0:
return img
v = _float_parameter(v, max_v) + bias
v = int(v * min(img.size))
return CutoutAbs(img, v)
def CutoutAbs(img, v, **kwarg):
w, h = img.size
x0 = np.random.uniform(0, w)
y0 = np.random.uniform(0, h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = int(min(w, x0 + v))
y1 = int(min(h, y0 + v))
xy = (x0, y0, x1, y1)
# gray
color = (127, 127, 127)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def Equalize(img, **kwarg):
return PIL.ImageOps.equalize(img)
def Identity(img, **kwarg):
return img
def Invert(img, **kwarg):
return PIL.ImageOps.invert(img)
def Posterize(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
return PIL.ImageOps.posterize(img, v)
def Rotate(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.rotate(v)
def Sharpness(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def ShearX(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def Solarize(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
return PIL.ImageOps.solarize(img, 256 - v)
def SolarizeAdd(img, v, max_v, bias=0, threshold=128):
v = _int_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
img_np = np.array(img).astype(np.int)
img_np = img_np + v
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def TranslateX(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
v = int(v * img.size[0])
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
v = int(v * img.size[1])
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def _float_parameter(v, max_v):
return float(v) * max_v / PARAMETER_MAX
def _int_parameter(v, max_v):
return int(v * max_v / PARAMETER_MAX)
class RandAugmentMC(object):
def __init__(self, n, m):
assert n >= 1
assert 1 <= m <= 10
self.n = n
self.m = m
self.augment_pool = fixmatch_augment_pool()
def __call__(self, img):
ops = random.choices(self.augment_pool, k=self.n)
for op, max_v, bias in ops:
v = np.random.randint(1, self.m)
if random.random() < 0.5:
img = op(img, v=v, max_v=max_v, bias=bias)
img = CutoutAbs(img, int(CFG['img_size']*0.5))
return img
def fixmatch_augment_pool():
# FixMatch paper
augs = [(AutoContrast, None, None),
(Brightness, 0.9, 0.05),
(Color, 0.9, 0.05),
(Contrast, 0.9, 0.05),
(Equalize, None, None),
(Identity, None, None),
(Posterize, 4, 4),
(Rotate, 30, 0),
(Sharpness, 0.9, 0.05),
(ShearX, 0.3, 0),
(ShearY, 0.3, 0),
(Solarize, 256, 0),
(TranslateX, 0.3, 0),
(TranslateY, 0.3, 0)]
return augs
class TransformFixMatch(object):
def __init__(self, mean, std):
self.weak = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=CFG['img_size'],
padding=int(32*0.125),
padding_mode='reflect')])
self.strong = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=CFG['img_size'],
padding=int(32*0.125),
padding_mode='reflect'),
RandAugmentMC(n=2, m=10)])
self.normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
def __call__(self, x):
weak = self.weak(x)
strong = self.strong(x)
return self.normalize(weak), self.normalize(strong)
class CassavaDataset_ul(Dataset):
def __init__(self, df, data_root,
transforms=None,
output_label=True,
):
super().__init__()
self.df = df.reset_index(drop=True).copy()
self.transforms = transforms
self.data_root = data_root
self.output_label = output_label
self.labels = self.df['label'].values
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index: int):
# get labels
if self.output_label:
target = self.labels[index]
img = Image.open("{}/{}".format(self.data_root, self.df.loc[index]['image_id']))
if self.transforms:
img = self.transforms(img)
if self.output_label == True:
return img, target
else:
return img
from torch.utils.data import RandomSampler
######################## 바꿔주자!!! 2019 데이터셋으로
# unlabeled_dataset = CassavaDataset_ul(unlabeled, './input/cassava-disease/all', transforms=TransformFixMatch(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
unlabeled_dataset = CassavaDataset_ul(unlabeled, './input/cassava-leaf-disease-classification/train_images/', transforms=TransformFixMatch(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
train_loader_ul = torch.utils.data.DataLoader(
unlabeled_dataset,
sampler = RandomSampler(unlabeled_dataset),
batch_size=CFG['train_bs'] * CFG['mu'],
pin_memory=False,
drop_last=True,
num_workers=CFG['num_workers'],
)
def interleave(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def de_interleave(x, size):
s = list(x.shape)
return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
# train_loader_ul = iter(train_loader_ul)
# (inputs_u_w, inputs_u_s), _ = train_loader_ul.next()
# print(len(inputs_u_s), len(inputs_u_w))
```
# Training APIs
```
def prepare_dataloader(df, trn_idx, val_idx, data_root='./input/cassava-leaf-disease-classification/train_images/'):
# from catalyst.data.sampler import BalanceClassSampler
train_ = df.loc[trn_idx,:].reset_index(drop=True)
valid_ = df.loc[val_idx,:].reset_index(drop=True)
train_ds = CassavaDataset(train_, data_root, transforms=get_train_transforms(), output_label=True)
valid_ds = CassavaDataset(valid_, data_root, transforms=get_valid_transforms(), output_label=True)
train_loader = torch.utils.data.DataLoader(
train_ds,
batch_size=CFG['train_bs'],
pin_memory=False,
drop_last=True,###
shuffle=True,
num_workers=CFG['num_workers'],
#sampler=BalanceClassSampler(labels=train_['label'].values, mode="downsampling")
)
val_loader = torch.utils.data.DataLoader(
valid_ds,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
return train_loader, val_loader
def train_one_epoch(epoch, model, loss_fn, optimizer, train_loader, unlabeled_trainloader, device, scheduler=None, schd_batch_update=False):
model.train()
t = time.time()
running_loss = None
# pbar = tqdm(enumerate(train_loader), total=len(train_loader))
for step, (imgs, image_labels) in enumerate(train_loader):
imgs = imgs.float()
image_labels = image_labels.to(device).long()
try:
(inputs_u_s, inputs_u_w), _ = unlabeled_iter.next()
except:
unlabeled_iter = iter(unlabeled_trainloader)
(inputs_u_s, inputs_u_w), _ = unlabeled_iter.next()
inputs = interleave(
torch.cat((imgs, inputs_u_w, inputs_u_s)), 2*CFG['mu']+1).contiguous().to(device)
with autocast():
image_preds = model(inputs) #output = model(input)
logits = de_interleave(image_preds, 2*CFG['mu']+1)
logits_x = logits[:CFG['train_bs']]
logits_u_w, logits_u_s = logits[CFG['train_bs']:].chunk(2)
del logits
Lx = loss_fn(logits_x, image_labels)
pseudo_label = torch.softmax(logits_u_w.detach()/CFG['T'], dim=-1)
max_probs, targets_u = torch.max(pseudo_label, dim=-1)
mask = max_probs.ge(CFG['threshold']).float()
Lu = (F.cross_entropy(logits_u_s, targets_u,
reduction='none') * mask).mean()
loss = Lx + CFG['lambda_u'] * Lu
scaler.scale(loss).backward()
if running_loss is None:
running_loss = loss.item()
else:
running_loss = running_loss * .99 + loss.item() * .01
if ((step + 1) % CFG['accum_iter'] == 0) or ((step + 1) == len(train_loader)):
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if scheduler is not None and schd_batch_update:
scheduler.step()
# if ((step + 1) % CFG['verbose_step'] == 0) or ((step + 1) == len(train_loader)):
# description = f'epoch {epoch} loss: {running_loss:.4f}'
# print(description)
# pbar.set_description(description)
if scheduler is not None and not schd_batch_update:
scheduler.step()
def valid_one_epoch(epoch, model, loss_fn, val_loader, device, scheduler=None, schd_loss_update=False):
model.eval()
t = time.time()
loss_sum = 0
sample_num = 0
image_preds_all = []
image_targets_all = []
# pbar = tqdm(enumerate(val_loader), total=len(val_loader))
for step, (imgs, image_labels) in enumerate(val_loader):
imgs = imgs.to(device).float()
image_labels = image_labels.to(device).long()
image_preds = model(imgs) #output = model(input)
image_preds_all += [torch.argmax(image_preds, 1).detach().cpu().numpy()]
image_targets_all += [image_labels.detach().cpu().numpy()]
loss = loss_fn(image_preds, image_labels)
loss_sum += loss.item()*image_labels.shape[0]
sample_num += image_labels.shape[0]
# if ((step + 1) % CFG['verbose_step'] == 0) or ((step + 1) == len(val_loader)):
# description = f'epoch {epoch} loss: {loss_sum/sample_num:.4f}'
# pbar.set_description(description)
image_preds_all = np.concatenate(image_preds_all)
image_targets_all = np.concatenate(image_targets_all)
print('epoch = {}'.format(epoch+1), 'validation multi-class accuracy = {:.4f}'.format((image_preds_all==image_targets_all).mean()))
if scheduler is not None:
if schd_loss_update:
scheduler.step(loss_sum/sample_num)
else:
scheduler.step()
def inference_one_epoch(model, data_loader, device):
model.eval()
image_preds_all = []
# pbar = tqdm(enumerate(data_loader), total=len(data_loader))
with torch.no_grad():
for step, (imgs, image_labels) in enumerate(data_loader):
imgs = imgs.to(device).float()
image_preds = model(imgs) #output = model(input)
image_preds_all += [torch.softmax(image_preds, 1).detach().cpu().numpy()]
image_preds_all = np.concatenate(image_preds_all, axis=0)
return image_preds_all
# reference: https://www.kaggle.com/c/siim-isic-melanoma-classification/discussion/173733
class MyCrossEntropyLoss(_WeightedLoss):
def __init__(self, weight=None, reduction='mean'):
super().__init__(weight=weight, reduction=reduction)
self.weight = weight
self.reduction = reduction
def forward(self, inputs, targets):
lsm = F.log_softmax(inputs, -1)
if self.weight is not None:
lsm = lsm * self.weight.unsqueeze(0)
loss = -(targets * lsm).sum(-1)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
# ====================================================
# Label Smoothing
# ====================================================
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
```
# Main Loop
```
from torchcontrib.optim import SWA
from sklearn.metrics import accuracy_score
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # specify GPUs locally
# #debug
# train = pd.read_csv('./input/cassava-leaf-disease-classification/train_debug.csv')
# CFG['epochs']=4
# CFG['model_path'] = 'temporary'
# !mkdir -p temporary
# model_path='v2_fixmatch'
# !mkdir -p v2_fixmatch
if __name__ == '__main__':
for c in range(5):
train[c] = 0
folds = StratifiedKFold(n_splits=CFG['fold_num'], shuffle=True, random_state=CFG['seed']).split(np.arange(train.shape[0]), train.label.values)
for fold, (trn_idx, val_idx) in enumerate(folds):
print('Training with {} started'.format(fold))
print(len(trn_idx), len(val_idx))
train_loader, val_loader = prepare_dataloader(train, trn_idx, val_idx, data_root='./input/cassava-leaf-disease-classification/train_images/')
unlabeled_trainloader = train_loader_ul
device = torch.device(CFG['device'])
model = CassvaImgClassifier(CFG['model_arch'], train.label.nunique(), pretrained=True).to(device)
scaler = GradScaler()
base_opt = torch.optim.Adam(model.parameters(), lr=CFG['lr'], weight_decay=CFG['weight_decay'])
optimizer = SWA(base_opt, swa_start=5, swa_freq=1, swa_lr=1e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=CFG['T_0'], T_mult=1, eta_min=CFG['min_lr'], last_epoch=-1)
loss_tr = LabelSmoothingLoss(classes=CFG['target_size'], smoothing=CFG['smoothing']).to(device)
loss_fn = nn.CrossEntropyLoss().to(device)
for epoch in range(CFG['epochs']):
train_one_epoch(epoch, model, loss_tr, optimizer, train_loader, unlabeled_trainloader, device, scheduler=scheduler, schd_batch_update=False)
with torch.no_grad():
valid_one_epoch(epoch, model, loss_fn, val_loader, device, scheduler=None, schd_loss_update=False)
# torch.save(model.state_dict(),'./model9_2/{}_fold_{}_{}_{}'.format(CFG['model_arch'], fold, epoch, seed))
optimizer.swap_swa_sgd()
optimizer.bn_update(train_loader, model, device)
del train_loader, unlabeled_trainloader
with torch.no_grad():
valid_one_epoch(epoch, model, loss_fn, val_loader, device, scheduler=None, schd_loss_update=False)
torch.save(model.state_dict(),'./v2_fixmatch/swa_{}_fold_{}_{}'.format(CFG['model_arch'], fold, epoch))
tst_preds = []
for tta in range(5):
tst_preds += [inference_one_epoch(model, val_loader, device)]
train.loc[val_idx, [0, 1, 2, 3, 4]] = np.mean(tst_preds, axis=0)
del model, optimizer, val_loader, scaler, scheduler
torch.cuda.empty_cache()
train['pred'] = np.array(train[[0, 1, 2, 3, 4]]).argmax(axis=1)
print(accuracy_score(train['label'].values, train['pred'].values))
```
| github_jupyter |
```
%matplotlib inline
```
# Customize Basic Pruner
Users can easily customize a basic pruner in NNI. A large number of basic modules have been provided and can be reused.
Follow the NNI pruning interface, users only need to focus on their creative parts without worrying about other regular modules.
In this tutorial, we show how to customize a basic pruner.
## Concepts
NNI abstracts the basic pruning process into three steps, collecting data, calculating metrics, allocating sparsity.
Most pruning algorithms rely on a metric to decide where should be pruned. Using L1 norm pruner as an example,
the first step is collecting model weights, the second step is calculating L1 norm for weight per output channel,
the third step is ranking L1 norm metric and masking the output channels that have small L1 norm.
In NNI basic pruner, these three step is implement as ``DataCollector``, ``MetricsCalculator`` and ``SparsityAllocator``.
- ``DataCollector``: This module take pruner as initialize parameter.
It will get the relevant information of the model from the pruner,
and sometimes it will also hook the model to get input, output or gradient of a layer or a tensor.
It can also patch optimizer if some special steps need to be executed before or after ``optimizer.step()``.
- ``MetricsCalculator``: This module will take the data collected from the ``DataCollector``,
then calculate the metrics. The metric shape is usually reduced from the data shape.
The ``dim`` taken by ``MetricsCalculator`` means which dimension will be kept after calculate metrics.
i.e., the collected data shape is (10, 20, 30), and the ``dim`` is 1, then the dimension-1 will be kept,
the output metrics shape should be (20,).
- ``SparsityAllocator``: This module take the metrics and generate the masks.
Different ``SparsityAllocator`` has different masks generation strategies.
A common and simple strategy is sorting the metrics' values and calculating a threshold according to the configured sparsity,
mask the positions which metric value smaller than the threshold.
The ``dim`` taken by ``SparsityAllocator`` means the metrics are for which dimension, the mask will be expanded to weight shape.
i.e., the metric shape is (20,), the corresponding layer weight shape is (20, 40), and the ``dim`` is 0.
``SparsityAllocator`` will first generate a mask with shape (20,), then expand this mask to shape (20, 40).
## Simple Example: Customize a Block-L1NormPruner
NNI already have L1NormPruner, but for the reason of reproducing the paper and reducing user configuration items,
it only support pruning layer output channels. In this example, we will customize a pruner that supports block granularity for Linear.
Note that you don't need to implement all these three kinds of tools for each time,
NNI supports many predefined tools, and you can directly use these to customize your own pruner.
This is a tutorial so we show how to define all these three kinds of pruning tools.
Customize the pruning tools used by the pruner at first.
```
import torch
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import BasicPruner
from nni.algorithms.compression.v2.pytorch.pruning.tools import (
DataCollector,
MetricsCalculator,
SparsityAllocator
)
# This data collector collects weight in wrapped module as data.
# The wrapped module is the module configured in pruner's config_list.
# This implementation is similar as nni.algorithms.compression.v2.pytorch.pruning.tools.WeightDataCollector
class WeightDataCollector(DataCollector):
def collect(self):
data = {}
# get_modules_wrapper will get all the wrapper in the compressor (pruner),
# it returns a dict with format {wrapper_name: wrapper},
# use wrapper.module to get the wrapped module.
for _, wrapper in self.compressor.get_modules_wrapper().items():
data[wrapper.name] = wrapper.module.weight.data
# return {wrapper_name: weight_data}
return data
class BlockNormMetricsCalculator(MetricsCalculator):
def __init__(self, block_sparse_size):
# Because we will keep all dimension with block granularity, so fix ``dim=None``,
# means all dimensions will be kept.
super().__init__(dim=None, block_sparse_size=block_sparse_size)
def calculate_metrics(self, data):
data_length = len(self.block_sparse_size)
reduce_unfold_dims = list(range(data_length, 2 * data_length))
metrics = {}
for name, t in data.items():
# Unfold t as block size, and calculate L1 Norm for each block.
for dim, size in enumerate(self.block_sparse_size):
t = t.unfold(dim, size, size)
metrics[name] = t.norm(dim=reduce_unfold_dims, p=1)
# return {wrapper_name: block_metric}
return metrics
# This implementation is similar as nni.algorithms.compression.v2.pytorch.pruning.tools.NormalSparsityAllocator
class BlockSparsityAllocator(SparsityAllocator):
def __init__(self, pruner, block_sparse_size):
super().__init__(pruner, dim=None, block_sparse_size=block_sparse_size, continuous_mask=True)
def generate_sparsity(self, metrics):
masks = {}
for name, wrapper in self.pruner.get_modules_wrapper().items():
# wrapper.config['total_sparsity'] can get the configured sparsity ratio for this wrapped module
sparsity_rate = wrapper.config['total_sparsity']
# get metric for this wrapped module
metric = metrics[name]
# mask the metric with old mask, if the masked position need never recover,
# just keep this is ok if you are new in NNI pruning
if self.continuous_mask:
metric *= self._compress_mask(wrapper.weight_mask)
# convert sparsity ratio to prune number
prune_num = int(sparsity_rate * metric.numel())
# calculate the metric threshold
threshold = torch.topk(metric.view(-1), prune_num, largest=False)[0].max()
# generate mask, keep the metric positions that metric values greater than the threshold
mask = torch.gt(metric, threshold).type_as(metric)
# expand the mask to weight size, if the block is masked, this block will be filled with zeros,
# otherwise filled with ones
masks[name] = self._expand_mask(name, mask)
# merge the new mask with old mask, if the masked position need never recover,
# just keep this is ok if you are new in NNI pruning
if self.continuous_mask:
masks[name]['weight'] *= wrapper.weight_mask
return masks
```
Customize the pruner.
```
class BlockL1NormPruner(BasicPruner):
def __init__(self, model, config_list, block_sparse_size):
self.block_sparse_size = block_sparse_size
super().__init__(model, config_list)
# Implement reset_tools is enough for this pruner.
def reset_tools(self):
if self.data_collector is None:
self.data_collector = WeightDataCollector(self)
else:
self.data_collector.reset()
if self.metrics_calculator is None:
self.metrics_calculator = BlockNormMetricsCalculator(self.block_sparse_size)
if self.sparsity_allocator is None:
self.sparsity_allocator = BlockSparsityAllocator(self, self.block_sparse_size)
```
Try this pruner.
```
# Define a simple model.
class TestModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = torch.nn.Linear(4, 8)
self.fc2 = torch.nn.Linear(8, 4)
def forward(self, x):
return self.fc2(self.fc1(x))
model = TestModel()
config_list = [{'op_types': ['Linear'], 'total_sparsity': 0.5}]
# use 2x2 block
_, masks = BlockL1NormPruner(model, config_list, [2, 2]).compress()
# show the generated masks
print('fc1 masks:\n', masks['fc1']['weight'])
print('fc2 masks:\n', masks['fc2']['weight'])
```
This time we successfully define a new pruner with pruning block granularity!
Note that we don't put validation logic in this example, like ``_validate_config_before_canonical``,
but for a robust implementation, we suggest you involve the validation logic.
| github_jupyter |
### This script is designed to take an input of location of a beacon in X and Y coordinates and bin size of historgram. It knows already the size of the arena and will produce a histogram/ matrix of numbers for occupancy/area per each bin.
```
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as patches
from PIL import Image
import matplotlib.image as mpimg
import pandas as pd
import math
figures = 'C:/Users/Fabian/Desktop/Analysis/Round3_FS03_FS06/Figures/'
processed= 'C:/Users/Fabian/Desktop/Analysis/Round3_FS03_FS06/processed/'
```
### TODO:
1. Figure out making normalization without saving picture each time.
2. For each change of beacon make a histogram and multiply by given normalization
3. Make large array to produce for histogram making
### 1. input arena size
```
cut = 0 # keeping the cut where rectangle of arena ends
X_cut_min = -.59
Y_cut_max = 1.61
X_cut_max = .12
Y_cut_min = .00
print("area %s M*2" %((X_cut_max-X_cut_min)*(Y_cut_max-Y_cut_min)))
fig, ax1 = plt.subplots(1, 1, sharex=True,dpi=300, sharey=True)
fig.suptitle("Normalization visual")
ax1.plot(-.45,.4,"go")
rectangle = patches.Rectangle((-.59,0), .71,1.61, color="green")
ax1.add_patch(rectangle)
color=iter(plt.cm.rainbow(np.linspace(0,1,10)))
for i in reversed(range(10)):
c=next(color)
patch = patches.Circle((-.45,.4), radius=.15*i,color=c)
ax1.add_patch(patch)
patch.set_clip_path(rectangle)
ax1.axis("equal")
plt.show()
```
## 2. Now define the area of circles mathematically
## a. get area of each circle withouth subtracting rectangle
## Tried mathematical approach on paper but there were at least 4 other combination for which it would have to be calculated.So it is possible but would take too long to write the code hence decided to use the pictures that can be generated and estimated the pixels using different grayscale values
```
def visualization(center=(-.45,.4),X_cut_min = -.59,Y_cut_max = 1.61,X_cut_max = .12,Y_cut_min = .00 ):
"""Makes a visual represetation of a banded rectangle with circles. to be exported and then can be counted"""
fig, ax1 = plt.subplots(1, 1, sharex=True,dpi=400,)
fig.suptitle("Normalization visual")
ax1.plot(center[0],center[1],"go")
rectangle = patches.Rectangle((X_cut_min,Y_cut_min), (abs(X_cut_min)+abs(X_cut_max)),Y_cut_max , color="green")
ax1.add_patch(rectangle)
color=iter(plt.cm.rainbow(np.linspace(0,1,10)))
for i in reversed(range(10)):
c=next(color)
patch = patches.Circle((center[0],center[1]), radius=.15*i,color=c)
ax1.add_patch(patch)
patch.set_clip_path(rectangle)
ax1.axis("equal")
return plt.show()
visualization()
def fig2img(fig):
"""Convert a Matplotlib figure to a PIL Image and return it"""
import io
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
img = Image.open(buf)
return img
k=np.linspace(0,.99,20)
k
def visualization_grey(center=(-.45,.4),dpi=500,X_cut_min = -.59,Y_cut_max = 1.61,X_cut_max = .12,Y_cut_min = .00,bands=20 ):
"""Makes a visual represetation of a banded rectangle with circles. to be exported and then can be counted"""
fig, ax1 = plt.subplots(1, 1, sharex=True,dpi=dpi,)
fig.patch.set_visible(False)
#fig.suptitle("Normalization visual")
#ax1.plot(center[0],center[1],"go")
rectangle = patches.Rectangle((X_cut_min,Y_cut_min), (abs(X_cut_min)+abs(X_cut_max)),Y_cut_max , color="white")
ax1.add_patch(rectangle)
#color=iter(plt.cm.binary(np.linspace(0,.99,bands+1)))
color = np.linspace(0,.99,bands+1)
#BoundaryNorm((1,249), 20 [, clip, extend])
for i in reversed(range(20)):
c=color[i]
#print(color[i])
#print(str(c))
patch = patches.Circle((center[0],center[1]), radius=.075*i,color=str(c))
ax1.add_patch(patch)
patch.set_clip_path(rectangle)
ax1.axis("equal")
ax1.axis("off")
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
fig.savefig(figures + 'norm_graph.png', dpi=dpi, transparent=True)
#ax[0][0].imshow()
#img = fig2img(fig)
img= Image.frombytes('RGB',fig.canvas.get_width_height(),fig.canvas.tostring_rgb())
image_array = np.asarray(img)
#binning= np.bincount(image_array.ravel())
#print(binning)
hist, bins = np.histogram(image_array,bins=bands,range=(0,249))
plt.show()
plt.hist(image_array.ravel(), bins=bands, range=(0,249))
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
return hist,bins #img
hist,bins = visualization_grey()
hist,bins/249
norm= []
int(sum(hist))
for count in counts:
k= count/int(sum(counts))
norm.append(k)
len(norm),len(bins)
arr = np.asarray(img)
plt.imshow(arr, cmap='gray', vmin=0, vmax=255)
plt.show()
plt.imshow(image_array)
plt.colorbar()
plt.show()
np.histogram(image_array)
hist, bins = np.histogram(image_array,bins=19,range=(0,249))
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
hist,bins/19
#plt.hist(image_array[:,:,0])
plt.hist(image_array.ravel(), bins=18, range=(0,249))
k=np.random.random(100)
plt.hist(np.random.random(100))
print(k)
print(img.histogram())
plt.hist(img.histogram(),bins=18, range=(0.01, .99), fc='k', ec='w')
plt.show()
img = mpimg.imread(figures + 'norm_graph.png')
counts, bins, bars = plt.hist(img.ravel(), bins=18, range=(0.01, .99),)
print(img.ravel())
plt.hist(img.ravel(), bins=18, range=(0.01, .99), fc='k', ec='w')
plt.show()
imgplot = plt.imshow(img)
counts,bins
img.histogram()
norm= []
int(sum(counts))
for count in counts:
k= count/int(sum(counts))
norm.append(k)
norm
```
### Now take the counts and multiply the distributions correctly - so make a histogram for each time beacon changes
```
FS04=pd.read_excel(processed +'FS04_rears_new.xlsx', index_col=0)
FS04
def get_rear_distance_from_beacon(df_rears_corrected):
dist=[]
for row in df_rears_corrected.iterrows():
#print(row[1][1])
#print(row[1][4])
#print(row[1][2])
#print(row[1][5])
dist.append(math.sqrt((row[1][1] - row[1][4])**2 + (row[1][2] - row[1][5])**2))
return dist
plt.hist(get_rear_distance_from_beacon(FS04))
def make_simple_graphs (animal_ID,rearing):
binwidth=.075
plt.tight_layout
bins = np.arange(0, 1.425, binwidth)
interval=bins
print(bins)
bins[1]= 0.075
fig, ax = plt.subplots(4,dpi=800,sharex=False)
fig.suptitle(animal_ID +' rearing distance from beacons ephys',y=1)
N, bins, patches=ax[0].hist(get_rear_distance_from_beacon(rearing.loc[rearing['Visibility']==1]),bins=bins,ec='w')
print(len(norm))
print(len(bins))
print(N*norm)
ax[0].set_title('Visible attempt')
for i in range(0,1):
patches[i].set_facecolor('g')
for i in range(1, len(patches)):
patches[i].set_facecolor('blue')
fig.tight_layout(pad=1.5)
N1, bins, patches=ax[1].hist(get_rear_distance_from_beacon(rearing.loc[rearing['Visibility']==0]),bins=bins,ec='w')
print(N1*norm)
ax[1].set_title('Invisible attempt')
for i in range(0,1):
patches[i].set_facecolor('g')
for i in range(1, len(patches)):
patches[i].set_facecolor('blue')
fig.tight_layout(pad=1.5)
patches = ax[2].bar(interval, N*norm)
ax[2].set_title('Visible attempt Normalized')
for i in range(0,1):
patches[i].set_facecolor('g')
for i in range(1, len(patches)):
patches[i].set_facecolor('blue')
fig.tight_layout(pad=1.5)
number, bins, patches = ax[3].hist((N1*norm),bins=bins,ec='w')
ax[3].set_title('invisible attempt Normalized')
for i in range(0,1):
patches[i].set_facecolor('g')
for i in range(1, len(patches)):
patches[i].set_facecolor('blue')
fig.tight_layout(pad=1.5)
plt.savefig('%srat_rearing_distance_from_beacons_norm%s.png'%(figures,animal_ID), dpi = 100)
make_simple_graphs('FS04' ,FS04)
```
| github_jupyter |
# 🌞 Example Weather App
> Using `prodb`
* Starting with a `.csv` of locations, we wish to call the [MetaWeather](https://www.metaweather.com/api/) API to pull in the weather forecast.
* The app needs the ability to input new locations by the user
🌍 Core functions will be converted into the streamlit app `weather_app.py`
```
# default_exp examples
#hide
from nbdev.showdoc import *
%load_ext autoreload
%autoreload 2
```
## 1. Requests to `metaweather` open weather API
```
#export
#hide
import requests
import json
import arrow
import pandas as pd
import sys; sys.path.append('../')
from prodb.core import generate_db, insert_row
#export
def get_json_from_query(location):
"""Search for a city and return metadata from API"""
url = f"https://www.metaweather.com/api/location/search/?query={location}"
r = requests.get(url).json()
return r[0]
l = get_json_from_query('London')
l
```
Check `woeid`(Where On Earth ID) is correct for London
```
assert l['woeid'] == 44418
```
Get weather data for the London `woeid`
```
utc = arrow.utcnow().format('YYYY/MM/DD')
utc
#export
def get_current_weather(location):
"""
inputs: location str "London"
to find woeid i.e. 44418
"""
res = get_json_from_query(location)
woeid = res['woeid']
url = f"https://www.metaweather.com/api/location/{woeid}/"
res = requests.get(url).json()
return res
```
Today's forecast 🌤️
```
%%time
res = get_current_weather('London')
res['consolidated_weather'][0]
```
There are a number of metrological properties available to us from the API.
The ones we are most interested in are:
* `the_temp` 🌡️ current temperature
* `weather_state_name` ⛅ current sky condition
* `max_temp` 🥵 daily max temp
* `min_temp` 🥶 daily min temp
```
res = get_current_weather('London')
df = pd.DataFrame.from_records(res['consolidated_weather'][:1])
df['readable_time'] = df.created.apply(lambda x: arrow.get(x).humanize())
df
#export
def df_from_loc(location,
days_ahead=1,
keep_cols='location the_temp readable_time created applicable_date local_time latt_long weather_state_name min_temp max_temp'.split(' ')):
res = get_current_weather(location)
df = pd.DataFrame.from_records(res['consolidated_weather'][:days_ahead])
df['location'] = location
df['local_time'] = pd.to_datetime(res['time']).strftime('%H:%M')
df['latt_long'] = res['latt_long']
df['readable_time'] = df.created.apply(lambda x: arrow.get(x).humanize())
return df[keep_cols]
df = df_from_loc('London', days_ahead=2)
df
```
Example output showing location and current weather situation
```
df = df_from_loc('Auckland')
df
```
# 🦄 Pro db
```
import sys
sys.path.append('../')
from prodb.core import generate_db, insert_row
```
### Initialise empty db
```
dbpath = 'weather_db.csv'
cols = 'location temp high low weather_state'.split()
generate_db(cols=cols, dbpath=dbpath)
df = pd.read_csv(dbpath)
df.head()
```
## Add row
```
dx = df_from_loc('Auckland')
dx
#export
def visit_city(df, cities, dbpath):
if isinstance(cities, str): cities = [cities]
for city in cities:
dx = df_from_loc(city).round(1)
data = {'location': dx.location.item(),
'time_utc': arrow.utcnow().format('YYYY-MM-DD HH:mm:ss'),
'temp': dx.the_temp.item(),
'high': dx.max_temp.item(),
'low': dx.min_temp.item(),
'weather_state': dx.weather_state_name.item(),
'local_time': dx.local_time.item(),
'latlong': dx.latt_long.item()}
df = insert_row(df, data, dbpath)
return df.round(1)
df = visit_city(df, ['Christchurch', 'Wellington', 'Lagos', 'Zagreb'], dbpath)
display(df)
df = visit_city(df, 'Yangon', dbpath)
display(df)
df = visit_city(df, ['Singapore', 'Alexandria', 'Bangkok'], dbpath)
display(df)
```
| github_jupyter |

[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples//colab/Training/multi_lingual/binary_text_classification/NLU_multi_lingual_training_sentiment_classifier_demo_twitter.ipynb)
# Training a Sentiment Analysis Classifier with NLU
## 2 Class Twitter Sentiment Classifier Training
With the [SentimentDL model](https://nlp.johnsnowlabs.com/docs/en/annotators#sentimentdl-multi-class-sentiment-analysis-annotator) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem
This notebook showcases the following features :
- How to train the deep learning classifier
- How to store a pipeline to disk
- How to load the pipeline from disk (Enables NLU offline mode)
* List item
* List item
You can achieve these results or even better on this dataset with training data :
<br>

You can achieve these results or even better on this dataset with test data :
<br>

# 1. Install Java 8 and NLU
```
!wget https://setup.johnsnowlabs.com/nlu/colab.sh -O - | bash
import nlu
```
# 2. Download twitter Sentiment dataset
https://www.kaggle.com/cosmos98/twitter-and-reddit-sentimental-analysis-dataset
#Context
This is was a Dataset Created as a part of the university Project On Sentimental Analysis On Multi-Source Social Media Platforms using PySpark.
```
! wget http://ckl-it.de/wp-content/uploads/2021/02/twitter_data_multi_lang.csv
import pandas as pd
train_path = '/content/twitter_data_multi_lang.csv'
train_df = pd.read_csv(train_path)
train_df.test_sentences = train_df.test_sentences.astype(str)
# the text data to use for classification should be in a column named 'text'
# the label column must have name 'y' name be of type str
train_df= train_df[["text","y"]]
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(train_df, test_size=0.2)
train_df
```
# 3. Train Deep Learning Classifier using nlu.load('train.sentiment')
You dataset label column should be named 'y' and the feature column with text data should be named 'text'
```
trainable_pipe = nlu.load('xx.embed_sentence.labse train.sentiment')
# We need to train longer and user smaller LR for NON-USE based sentence embeddings usually
# We could tune the hyperparameters further with hyperparameter tuning methods like gridsearch
# Also longer training gives more accuracy
trainable_pipe['sentiment_dl'].setMaxEpochs(60)
trainable_pipe['sentiment_dl'].setLr(0.005)
fitted_pipe = trainable_pipe.fit(train_df)
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
from sklearn.metrics import classification_report
print(classification_report(preds['y'], preds['trained_sentiment']))
preds
```
# 3.1 evaluate on Test Data
```
preds = fitted_pipe.predict(test_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['trained_sentiment']))
```
# 4. Test Model on 20 languages!
```
train_df = pd.read_csv("/content/twitter_data_multi_lang.csv")
preds = fitted_pipe.predict(train_df[["test_sentences","y"]].iloc[:100],output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['trained_sentiment']))
preds
```
# The Model understands Englsih

```
fitted_pipe.predict("Congress's new policies made many people sad ")
fitted_pipe.predict("Congress's new policies made many people happy ")
```
# The Model understands German

```
# German for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("Die neue Politik des Kongresses machte viele Menschen arm, traurig und depressiv ")
# German for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("Die neue Politik des Kongresses machte viele Menschen glücklich ")
```
# The Model understands Chinese

```
# Chinese for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("国会的新政策使许多人感到高兴 ")
# Chinese for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("国会的新政策使许多人变得贫穷,悲伤和沮丧 ")
```
# Model understands Afrikaans

```
# Afrikaans for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("Die Kongres se nuwe beleid het baie mense arm, hartseer en depressief gemaak ")
# Afrikaans for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("Die Kongres se nuwe beleid het baie mense gelukkig gemaak ")
```
# The model understands Japanese

```
# Japanese for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("議会の新しい政策は多くの人々を貧しく、悲しくそして落ち込んだものにしました ")
# Japanese for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("議会の新しい政策は多くの人々を幸せにしました ")
```
# The Model understands Turkish

```
# Turkish for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("Kongrenin yeni politikaları birçok insanı fakir, hüzünlü ve depresif hale getirdi ")
# Turkish for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("Kongrenin yeni politikaları birçok insanı mutlu etti ")
```
# The Model understands Hebrew

```
# Hebrew for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("המדיניות החדשה של הקונגרס גרמה לאנשים רבים להיות עניים, עצובים ומדוכאים ")
# Hebrew for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("המדיניות החדשה של הקונגרס שימחה אנשים רבים ")
```
# The Model understands Telugu

```
# Telugu for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("కాంగ్రెస్ కొత్త విధానాలు చాలా మందిని పేదలుగా, విచారంగా, నిరాశకు గురి చేశాయి ")
# Telugu for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("కాంగ్రెస్ కొత్త విధానాలు చాలా మందికి సంతోషాన్నిచ్చాయి ")
```
# Model understands Russian

```
# Russian for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("Новая политика Конгресса сделала многих людей бедными, грустными и подавленными ")
# Russian for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("Новая политика Конгресса порадовала многих людей ")
```
# Model understands Urdu

```
# Urdu for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("کانگریس کی نئی پالیسیوں نے بہت سارے لوگوں کو غریب ، افسردہ اور افسردہ کردیا ")
# Urdu for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("کانگریس کی نئی پالیسیوں نے بہت سارے لوگوں کو خوش کیا ")
```
# Model understands Hindi

```
# hindi for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("कांग्रेस की नई नीतियों ने कई लोगों को गरीब, दुखी और उदास बना दिया ")
# hindi for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("कांग्रेस की नई नीतियों ने कई लोगों को खुश किया ")
```
# The Model understands French

```
# French for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("Les nouvelles politiques du Congrès ont rendu de nombreuses personnes pauvres, tristes et déprimées ")
# French for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("Les nouvelles politiques du Congrès ont rendu de nombreuses personnes heureuses ")
```
# The Model understands Thai

```
# Thai for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("นโยบายใหม่ของสภาคองเกรสทำให้หลายคนยากจนเศร้าและหดหู่ ")
# Thai for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("นโยบายใหม่ของสภาคองเกรสทำให้หลายคนพอใจ ")
```
# The Model understands Khmer

```
# Khmer for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("គោលនយោបាយថ្មីរបស់សភាបានធ្វើឱ្យប្រជាជនជាច្រើនក្រីក្រក្រៀមក្រំនិងធ្លាក់ទឹកចិត្ត ")
# Khmer for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("គោលនយោបាយថ្មីរបស់សភាបានធ្វើឱ្យមនុស្សជាច្រើនសប្បាយរីករាយ ")
```
# The Model understands Yiddish

```
# Yiddish for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("קאָנגרעס ס נייַ פּאַלאַסיז געמאכט פילע מענטשן נעבעך, טרויעריק און דערשלאָגן ")
# Yiddish for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("קאָנגרעס ס נייַ פּאַלאַסיז געמאכט פילע מענטשן צופרידן ")
```
# The Model understands Kygrgyz

```
# Kygrgyz for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("Конгресстин жаңы саясаты көптөгөн адамдарды жакыр, кайгыга чөгүп, көңүл чөгөттү ")
# Kygrgyz for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("Конгресстин жаңы саясаты көпчүлүктү кубандырды ")
```
# The Model understands Tamil

```
# Tamil for: 'Congress's newest polices made many people poor, sad and depressed '
fitted_pipe.predict("காங்கிரசின் புதிய கொள்கைகள் பலரை ஏழைகளாகவும், சோகமாகவும், மனச்சோர்வடையச் செய்தன ")
# Tamil for: 'Congress's newest polices made many people happy '
fitted_pipe.predict("காங்கிரசின் புதிய கொள்கைகள் பலரை மகிழ்ச்சியடையச் செய்தன ")
```
# 5. Lets save the model
```
stored_model_path = './models/classifier_dl_trained'
fitted_pipe.save(stored_model_path)
```
# 6. Lets load the model from HDD.
This makes Offlien NLU usage possible!
You need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk.
```
stored_model_path = './models/classifier_dl_trained'
hdd_pipe = nlu.load(path=stored_model_path)
preds = hdd_pipe.predict('I am extremly depressed and down cause of school and just feel like ending my life...')
preds
hdd_pipe.print_info()
```
| github_jupyter |
Using SQL to update weather data
```
# TO DO LIST
#
# This is for my own purposes
#
# Streamline the code to update US and Global data in one run
# This might entail putting table names and perhaps other values
# into a dictionary
#
# Maybe make a function to generate column names automatically
# Maybe it would read the name of the most recent column,
# and then generate new column name up through maybe a few
# days prior to the current date.
#
# Create way of programatically adding JHU's new location updates without having to re-call API
# for preexisting locations
# sqlite3 library to run operations on the database
import sqlite3
gen_path = '/home/imants/gits/COVID-19/'
db_path = gen_path + 'sql/weather.db'
# connecting to the weather database
con = sqlite3.connect(db_path)
# establishing database cursor
cursorObj = con.cursor()
# Getting table names
table_names = cursorObj.execute("SELECT name FROM sqlite_master WHERE type='table';")
table_names = table_names.fetchall()
# Taking the table names that were fetched, and making a usable lists out of the values.
US_names = []
global_names = []
for x in range(len(table_names)):
if (table_names[x][0][-2:] == 'US'):
US_names.append(table_names[x][0])
else:
global_names.append(table_names[x][0])
print('US names', US_names)
print('Global names', global_names)
# Reading API key
key_path = '/home/imants/gits/COVID-19/key.txt'
api_file = open(key_path, 'r')
API_KEY = api_file.read()
api_file.close()
API_KEY = API_KEY.strip()
# Creating API call function
import pandas as pd
import numpy as np
import requests
import json
from datetime import datetime as dt
def call_api (lat, long, time):
# Building the URL for the API get
url = 'https://api.darksky.net/forecast/' + API_KEY + '/' + lat + "," + long + ',' + time
url = url + '?exclude=currently,flags&units=si'
print(url)
# Getting the API call
# using the retry error handling established above
response = http.get(url)
# Putting the API response into the JSON thing
info = json.loads(response.content)
#info = url
return info
# Sorting out JSON response from API call
def sortOut (info):
# Dummy value
dummy = '-1000'
# dictionary of what will be pulled from JSON
return_values = {'tMax': '',
'tMin': '',
'hum': '',
'uvee': '',
'clouds': '',
'precip': '',
'dew': '',
'pressure': '',
'wind': '',
'ozone': '',
'sunrise': '',
'sunset': ''}
# print(info['daily']['data'][0])
try:
# Making a variable to more easily acccess JSON response data
easy_info = info['daily']['data'][0]
# Reading the JSON data
return_values['tMax'] = str(easy_info['temperatureHigh'])
return_values['tMin'] = str(easy_info['temperatureLow'])
return_values['hum'] = str(easy_info['humidity'] * 100)
return_values['uvee'] = str(easy_info['uvIndex'])
return_values['clouds'] = str(easy_info['cloudCover'] * 100)
return_values['precip'] = str(easy_info['precipProbability'] * 100)
return_values['dew'] = str(easy_info['dewPoint'])
return_values['pressure'] = str(easy_info['pressure'])
return_values['wind'] = str(easy_info['windSpeed'])
return_values['ozone'] = str(easy_info['ozone'])
return_values['sunrise'] = str(easy_info['sunriseTime'])
return_values['sunset'] = str(easy_info['sunsetTime'])
except:
# Creating dummy values in case of error
print('Error encountered')
for x in return_values:
return_values[x] = dummy
return return_values
# Updating US data
# Going through tables and adding data to new dates
import pandas as pd
import numpy as np
import requests
import json
from datetime import datetime as dt
# Trying to capture sqlite error messages
# found https://stackoverflow.com/questions/25371636/how-to-get-sqlite-result-error-codes-in-python
import traceback
import sys
# Code to increase number of retries on connection errors,
# and also to give it some time.
# Found on https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request
# And https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
s = requests.Session()
retries = Retry(total=30,
backoff_factor=0.1,
status_forcelist=[ 429, 500, 502, 503, 504 ],
method_whitelist=["HEAD", "GET", "OPTIONS"])
adapter = HTTPAdapter(max_retries=retries)
http = requests.Session()
http.mount("https://", adapter)
http.mount("http://", adapter)
# Making list of columns that need to be updated from prior update, above
columns_to_update = []
for x in range(1, 16):
day_str = str(x)
date_str = '"9/' + day_str + '/20"'
columns_to_update.append(date_str)
# Counter
counter = 0
# value for determining when to break
break_counter = 11000
# Iterating through the columns that need to be updated
for day in columns_to_update:
print(day)
# Querying all the latitudes and longitudes for the given day
query_str = 'SELECT Lat, Long_, {} FROM {}'.format(day, US_names[0])
print(query_str)
cursorObj.execute(query_str)
# putting query results into a variable
#rows = cursorObj.fetchall()
# Create Unix time stamp out of the date column
# stripping quotation marks from column name
i = len(day)
col_date = day[1:(i-1)]
print(col_date)
t = pd.to_datetime(col_date)
t = int(t.value / 10**9)
t = str(t)
print(t)
for row in cursorObj:
print(row[0])
print(row[1])
print(row[2])
latitude = row[0]
longitude = row[1]
print('Latitude: ', latitude, 'Longitude: ', longitude)
if row[2] is None:
print('Cell is Null')
# Calling API based on the row's latitude, longitude, and the day in question
info = call_api(str(latitude), str(longitude), t)
counter = counter + 1
# print(info)
# putting the JSON response into a dictionary
weatherData = sortOut(info)
# US names: [0: 'cloud_US', 1: 'dew_US', 2: 'humidity_US', 3: 'ozone_US', 4: 'precip_US', 5: 'pressure_US',
# 6: 'sunrise_US', 7: 'sunset_US', 8: 'tMax_US', 9: 'tMin_US', 10: uv_US', 11: 'wind_US']
try:
updateCursor = con.cursor()
# For Querying for specific longitude and latitude of location
where_str = 'Lat = "{}" AND Long_ = "{}"'.format(latitude, longitude)
# Updating cloud_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('cloud_US', day, weatherData['clouds'], where_str)
updateCursor.execute(update_str)
# Updating dew_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('dew_US', day, weatherData['dew'], where_str)
updateCursor.execute(update_str)
# Updating humidity_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('humidity_US', day, weatherData['hum'], where_str)
updateCursor.execute(update_str)
# updating ozone_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('ozone_US', day, weatherData['ozone'], where_str)
updateCursor.execute(update_str)
# updating precip_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('precip_US', day, weatherData['precip'], where_str)
updateCursor.execute(update_str)
# updating pressure_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('pressure_US', day, weatherData['pressure'], where_str)
updateCursor.execute(update_str)
# updating sunrise_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('sunrise_US', day, weatherData['sunrise'], where_str)
updateCursor.execute(update_str)
# updating sunset_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('sunset_US', day, weatherData['sunset'], where_str)
updateCursor.execute(update_str)
# updating tMax_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('tMax_US', day, weatherData['tMax'], where_str)
updateCursor.execute(update_str)
# updating tMin_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('tMin_US', day, weatherData['tMin'], where_str)
updateCursor.execute(update_str)
# updating uv_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('uv_US', day, weatherData['uvee'], where_str)
updateCursor.execute(update_str)
# updating wind_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('wind_US', day, weatherData['wind'], where_str)
updateCursor.execute(update_str)
con.commit()
print(update_str)
# displaying error
except sqlite3.Error as er:
print('SQLite error: %s' % (' '.join(er.args)))
print("Exception class is: ", er.__class__)
print('SQLite traceback: ')
exc_type, exc_value, exc_tb = sys.exc_info()
print(traceback.format_exception(exc_type, exc_value, exc_tb))
else:
print('Cell has a value')
if counter == break_counter:
break
if counter == break_counter:
break
# closing connection to database
con.close()
# close database connection
con.close()
# Updating GLOBAL data
# Going through tables and adding data to new dates
import pandas as pd
import numpy as np
import requests
import json
from datetime import datetime as dt
# Trying to capture sqlite error messages
# found https://stackoverflow.com/questions/25371636/how-to-get-sqlite-result-error-codes-in-python
import traceback
import sys
# Code to increase number of retries on connection errors,
# and also to give it some time.
# Found on https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request
# And https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
s = requests.Session()
retries = Retry(total=30,
backoff_factor=0.1,
status_forcelist=[ 429, 500, 502, 503, 504 ],
method_whitelist=["HEAD", "GET", "OPTIONS"])
adapter = HTTPAdapter(max_retries=retries)
http = requests.Session()
http.mount("https://", adapter)
http.mount("http://", adapter)
# Making list of columns that need to be updated from prior update, above
columns_to_update = []
for x in range(1, 16):
day_str = str(x)
date_str = '"9/' + day_str + '/20"'
columns_to_update.append(date_str)
# Counter
counter = 0
# value for determining when to break
break_counter = 11000
# Iterating through the columns that need to be updated
for day in columns_to_update:
print(day)
# Querying all the latitudes and longitudes for the given day
query_str = 'SELECT Lat, Long, {} FROM {}'.format(day, global_names[0])
print(query_str)
cursorObj.execute(query_str)
# putting query results into a variable
#rows = cursorObj.fetchall()
# Create Unix time stamp out of the date column
# stripping quotation marks from column name
i = len(day)
col_date = day[1:(i-1)]
print(col_date)
t = pd.to_datetime(col_date)
t = int(t.value / 10**9)
t = str(t)
print(t)
for row in cursorObj:
print(row[0])
print(row[1])
print(row[2])
latitude = row[0]
longitude = row[1]
print('Latitude: ', latitude, 'Longitude: ', longitude)
if row[2] is None:
print('Cell is Null')
# Calling API based on the row's latitude, longitude, and the day in question
info = call_api(str(latitude), str(longitude), t)
counter = counter + 1
# print(info)
# putting the JSON response into a dictionary
weatherData = sortOut(info)
#Global names ['cloud_Global', 'dew_Global', 'humidity_Global', 'ozone_Global', 'precip_Global', 'pressure_Global',
# 'sunrise_Global', 'sunset_Global', 'tMax_Global', 'tMin_Global', 'uv_Global', 'wind_Global']
try:
updateCursor = con.cursor()
# For Querying for specific longitude and latitude of location
where_str = 'Lat = "{}" AND Long = "{}"'.format(latitude, longitude)
# Updating cloud_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('cloud_Global', day, weatherData['clouds'], where_str)
updateCursor.execute(update_str)
# Updating dew_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('dew_Global', day, weatherData['dew'], where_str)
updateCursor.execute(update_str)
# Updating humidity_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('humidity_Global', day, weatherData['hum'], where_str)
updateCursor.execute(update_str)
# updating ozone_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('ozone_Global', day, weatherData['ozone'], where_str)
updateCursor.execute(update_str)
# updating precip_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('precip_Global', day, weatherData['precip'], where_str)
updateCursor.execute(update_str)
# updating pressure_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('pressure_Global', day, weatherData['pressure'], where_str)
updateCursor.execute(update_str)
# updating sunrise_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('sunrise_Global', day, weatherData['sunrise'], where_str)
updateCursor.execute(update_str)
# updating sunset_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('sunset_Global', day, weatherData['sunset'], where_str)
updateCursor.execute(update_str)
# updating tMax_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('tMax_Global', day, weatherData['tMax'], where_str)
updateCursor.execute(update_str)
# updating tMin_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('tMin_Global', day, weatherData['tMin'], where_str)
updateCursor.execute(update_str)
# updating uv_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('uv_Global', day, weatherData['uvee'], where_str)
updateCursor.execute(update_str)
# updating wind_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('wind_Global', day, weatherData['wind'], where_str)
updateCursor.execute(update_str)
con.commit()
print(update_str)
# displaying error
except sqlite3.Error as er:
print('SQLite error: %s' % (' '.join(er.args)))
print("Exception class is: ", er.__class__)
print('SQLite traceback: ')
exc_type, exc_value, exc_tb = sys.exc_info()
print(traceback.format_exception(exc_type, exc_value, exc_tb))
else:
print('Cell has a value')
if counter == break_counter:
break
if counter == 200:
break
# closing connection to database
con.close()
# adding dates to tables
# current last date is 8/31/20
for x in range(1, 16):
day_str = str(x)
date_str = '"9/' + day_str + '/20"'
print(date_str)
for y in US_names:
add_str = 'ALTER TABLE {} ADD COLUMN {} text'.format(y, date_str)
cursorObj.execute(add_str)
print(add_str)
for y in global_names:
add_str = 'ALTER TABLE {} ADD COLUMN {} text'.format(y, date_str)
cursorObj.execute(add_str)
print(add_str)
# Setting cell values to null
import sqlite3
gen_path = '/home/imants/gits/COVID-19/'
db_path = gen_path + 'sql/weather.db'
# connecting to the weather database
con = sqlite3.connect(db_path)
# establishing database cursor
cursorObj = con.cursor()
import pandas as pd
import numpy as np
import requests
import json
from datetime import datetime as dt
# Trying to capture sqlite error messages
# found https://stackoverflow.com/questions/25371636/how-to-get-sqlite-result-error-codes-in-python
import traceback
import sys
table_names = cursorObj.execute("SELECT name FROM sqlite_master WHERE type='table';")
table_names = table_names.fetchall()
# Taking the table names that were fetched, and making a usable lists out of the values.
US_names = []
global_names = []
for x in range(len(table_names)):
if (table_names[x][0][-2:] == 'US'):
US_names.append(table_names[x][0])
else:
global_names.append(table_names[x][0])
# Making list of columns that need to be updated from prior update, above
columns_to_update = []
for x in range(13, 32):
day_str = str(x)
date_str = '"8/' + day_str + '/20"'
columns_to_update.append(date_str)
# Counter
counter = 0
# Iterating through the columns that need to be updated
for day in columns_to_update:
print(day)
# Querying all the latitudes and longitudes for the given day
query_str = 'SELECT Lat, Long_, {} FROM {}'.format(day, US_names[0])
print(query_str)
cursorObj.execute(query_str)
# Create Unix time stamp out of the date column
# stripping quotation marks from column name
i = len(day)
col_date = day[1:(i-1)]
print(col_date)
t = pd.to_datetime(col_date)
t = int(t.value / 10**9)
t = str(t)
print(t)
for row in cursorObj:
print(row[0])
print(row[1])
print(row[2])
latitude = row[0]
longitude = row[1]
print('Latitude: ', latitude, 'Longitude: ', longitude)
counter = counter + 1
# US names: [0: 'cloud_US', 1: 'dew_US', 2: 'humidity_US', 3: 'ozone_US', 4: 'precip_US', 5: 'pressure_US',
# 6: 'sunrise_US', 7: 'sunset_US', 8: 'tMax_US', 9: 'tMin_US', 10: uv_US', 11: 'wind_US']
try:
updateCursor = con.cursor()
# For Querying for specific longitude and latitude of location
where_str = 'Lat = "{}" AND Long_ = "{}"'.format(latitude, longitude)
# Updating cloud_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('cloud_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
# Updating dew_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('dew_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
# Updating humidity_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('humidity_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
# updating ozone_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('ozone_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
# updating precip_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('precip_US', day,'NULL', where_str)
updateCursor.execute(update_str)
# updating pressure_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('pressure_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
# updating sunrise_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('sunrise_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
# updating sunset_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('sunset_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
# updating tMax_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('tMax_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
# updating tMin_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('tMin_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
# updating uv_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('uv_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
# updating wind_US table
update_str = 'UPDATE {} SET {} = {} WHERE {}'.format('wind_US', day, 'NULL', where_str)
updateCursor.execute(update_str)
con.commit()
print(update_str)
# displaying error
except sqlite3.Error as er:
print('SQLite error: %s' % (' '.join(er.args)))
print("Exception class is: ", er.__class__)
print('SQLite traceback: ')
exc_type, exc_value, exc_tb = sys.exc_info()
print(traceback.format_exception(exc_type, exc_value, exc_tb))
if counter == 1000:
break
if counter == 1000:
break
con.close()
```
| github_jupyter |
# Frequentist Inference Case Study - Part B
## Learning objectives
Welcome to Part B of the Frequentist inference case study! The purpose of this case study is to help you apply the concepts associated with Frequentist inference in Python. In particular, you'll practice writing Python code to apply the following statistical concepts:
* the _z_-statistic
* the _t_-statistic
* the difference and relationship between the two
* the Central Limit Theorem, including its assumptions and consequences
* how to estimate the population mean and standard deviation from a sample
* the concept of a sampling distribution of a test statistic, particularly for the mean
* how to combine these concepts to calculate a confidence interval
In the previous notebook, we used only data from a known normal distribution. **You'll now tackle real data, rather than simulated data, and answer some relevant real-world business problems using the data.**
## Hospital medical charges
Imagine that a hospital has hired you as their data scientist. An administrator is working on the hospital's business operations plan and needs you to help them answer some business questions.
In this assignment notebook, you're going to use frequentist statistical inference on a data sample to answer the questions:
* has the hospital's revenue stream fallen below a key threshold?
* are patients with insurance really charged different amounts than those without?
Answering that last question with a frequentist approach makes some assumptions, and requires some knowledge, about the two groups.
We are going to use some data on medical charges obtained from [Kaggle](https://www.kaggle.com/easonlai/sample-insurance-claim-prediction-dataset).
For the purposes of this exercise, assume the observations are the result of random sampling from our single hospital. Recall that in the previous assignment, we introduced the Central Limit Theorem (CLT), and its consequence that the distributions of sample statistics approach a normal distribution as $n$ increases. The amazing thing about this is that it applies to the sampling distributions of statistics that have been calculated from even highly non-normal distributions of data! Recall, also, that hypothesis testing is very much based on making inferences about such sample statistics. You're going to rely heavily on the CLT to apply frequentist (parametric) tests to answer the questions in this notebook.
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import t
from scipy import stats
from numpy.random import seed
medical = pd.read_csv('insurance2.csv')
medical.shape
medical.head()
medical.describe()
```
__Q1:__ Plot the histogram of charges and calculate the mean and standard deviation. Comment on the appropriateness of these statistics for the data.
__A:__ Calculate the population mean and standard deviation
```
_ = plt.hist(medical.charges)
_ = plt.xlabel('Charges ($)')
_ = plt.ylabel('Patients')
_ = plt.title('Avg Patient charges')
print("Mean for the data is:{0:f}".format(np.mean(medical.charges)))
print("Standard Deviation is:{0:f}".format(np.std(medical.charges)))
```
Calculate the sample mean and standard deviation to see what the distribution looks like. Take sample size = 50
```
def sampler(n):
return np.random.choice(medical.charges, n)
seed(47)
# calculate daily means from the larger sample size here
sample_size = sampler(50)
_ = plt.hist(sample_size, bins=10)
_ = plt.xlabel('Charges($)')
_ = plt.ylabel('Patients')
_ = plt.title('Avg Patient Charges')
```
__Q2:__ The administrator is concerned that the actual average charge has fallen below 12,000, threatening the hospital's operational model. On the assumption that these data represent a random sample of charges, how would you justify that these data allow you to answer that question? And what would be the most appropriate frequentist test, of the ones discussed so far, to apply?
__A:__
Since the sample distribution follows the same distribution pattern as the population distribution, instead of determining confidence interval as a frequentist test, since the distribution is not a normal distribution, apply **bootstraping**. Estimate the uncertainty of staistic .i.e the mean by generating a large family of samples (of size 100) from the one we have. Then characterize the distribution of mean over the family. Note: Draw samples randomy with replacement
For our example, the population is not too large and the distribution is not bi-modal
Let the sample size be 100 (n = 100) and perform bootstrapping over be 1000 replications (range)
https://stats.stackexchange.com/questions/33300/determining-sample-size-necessary-for-bootstrap-method-proposed-method
```
medical.sample(100, replace = True).describe()
# Bootstrapped distribution of mean
def bootstrapping(n):
return pd.DataFrame({'meancharge':[medical.sample(n, replace = True).charges.mean() for i in range(1000)]})
s = bootstrapping(n=100)
s.meancharge.hist(histtype = 'step')
plt.axvline(medical.charges.mean(), color = 'C1')
# Extract the quantile
s.meancharge.quantile(0.0225), s.meancharge.quantile(0.975)
print("The bootstrapped sample's average patient cost is: ${0:f}".format(s.meancharge.mean()))
```
**Conclusion** - As you can see, when you draw random samples as a part of bootstrapping, the average medical charge per patient is clearly > 12,000 ich is not a huge concern for the hospital's operating model. As you change the sample size, the margin of error increases, but the sample mean is still > 12K
__Q3:__ Given the nature of the administrator's concern, what is the appropriate confidence interval in this case? A ***one-sided*** or ***two-sided*** interval? (Refresh your understanding of this concept on p. 399 of the *AoS*). Calculate the critical value and the relevant 95% confidence interval for the mean, and comment on whether the administrator should be concerned.
__A:__ Considering the distribution of sample data, it is a one-sided interval
```
# Since it is a one tailed distribution,
# Reference - https://stackoverflow.com/questions/17559897/python-p-value-from-t-statistic
pop_mean = np.mean(medical.charges)
n = 100
#Find sample mean and std deviation for the bootstrapped sample s (w/ sample size = 100)
sample_mean = np.mean(medical.charges.sample(n, replace = True))
sample_sigma = np.std(medical.charges.sample(n, replace = True))
tt = (sample_mean-pop_mean)/(sample_sigma/np.sqrt(float(n))) # where n = 100 as defined in the bootstrapping function
pval_one_sided = stats.t.sf(np.abs(tt), n-1)
print("t-statistic: {0:f}".format(tt))
print("p-value: {0:f}".format(pval_one_sided))
# Calculate confidence interval
import statsmodels.stats.api as sms
a = 1.0 * np.array(medical.charges.sample(100, replace = True))
sms.DescrStatsW(a).tconfint_mean()
```
**Conclusion** - With the calcualtion of confidence interval based on bootstrapping, it seems like the average cost of patient would fall below 12,000 in certain samples drawn with replacement, which is a concern for the Hospital administration
The administrator then wants to know whether people with insurance really are charged a different amount to those without.
__Q4:__ State the null and alternative hypothesis here. Use the _t_-test for the difference between means, where the pooled standard deviation of the two groups is given by:
\begin{equation}
s_p = \sqrt{\frac{(n_0 - 1)s^2_0 + (n_1 - 1)s^2_1}{n_0 + n_1 - 2}}
\end{equation}
and the *t*-test statistic is then given by:
\begin{equation}
t = \frac{\bar{x}_0 - \bar{x}_1}{s_p \sqrt{1/n_0 + 1/n_1}}.
\end{equation}
(If you need some reminding of the general definition of ***t-statistic***, check out the definition on p. 404 of *AoS*).
What assumption about the variances of the two groups are we making here?
__A:__
We will calcualte t-test as the inferential statistic to determine if there is a significant difference between the variances of two groups in terms of average cost per patient
- 1st group - patients with insurance
- 2nd group - patients without insurance
**Assumption** we will use here is that there is no difference between the variance of 2 sampling groups drawn from the population
__Q5:__ Perform this hypothesis test both manually, using the above formulae, and then using the appropriate function from [scipy.stats](https://docs.scipy.org/doc/scipy/reference/stats.html#statistical-tests) (hint, you're looking for a function to perform a _t_-test on two independent samples). For the manual approach, calculate the value of the test statistic and then its probability (the p-value). Verify you get the same results from both.
__A:__
**H0** : (Avg cost per patient with an insurance) - (Avg cost per patient who doesnt have insurance) = 0
**H1** : (Avg cost per patient with an insurance) - (Avg cost per patient who doesnt have insurance) <> 0
**This is a two tailed test**
```
medical.head()
insurance_sample = medical.query("insuranceclaim == 1")
no_insurance_sample = medical.query("insuranceclaim == 0")
sample_mean_1 = np.mean(insurance_sample.charges.sample(100, replace=True))
sample_std_1 = np.std(insurance_sample.charges.sample(100, replace = True))
n1 = len(insurance_sample)
sample_mean_2 = np.mean(no_insurance_sample.charges.sample(100, replace = True))
sample_std_2 = np.std(no_insurance_sample.charges.sample(100, replace = True))
n2 = len(no_insurance_sample)
# Option 1 to calculate t-statistic and p-value using formula
import math
var1 = (n1 - 1)*(sample_std_1**2)
var2 = (n2 - 1)*(sample_std_2**2)
total_dof = n1 + n2 - 2
pooled_std = np.sqrt((var1 + var2)/total_dof)
#print(pooled_std)
tt_2samples = (sample_mean_1-sample_mean_2)/(pooled_std * (np.sqrt(1/float(n1) + 1/float(n2))))
#= stats.t.cdf(np.abs(tt), n-1)*2
pval_two_sided = 2 * t.cdf(tt_2samples, df=total_dof)
print("t-statistic: {0:f}".format(tt_2samples))
print("p-value: {0:f}".format(pval_two_sided))
# Option 2 to calculate t-statistic and p-value using scipy.stats
# Reference : https://stackoverflow.com/questions/22611446/perform-2-sample-t-test
rvs1 = stats.norm.rvs(loc=sample_mean_1,scale=sample_std_1,size=len(insurance_sample))
rvs2 = stats.norm.rvs(loc=sample_mean_2,scale=sample_std_2,size=len(no_insurance_sample))
stats.ttest_ind(rvs1, rvs2)
```
**Question - Why is the p-value so low and not matching????**
Since the p-value is very low (in the far end of spectrum), the null hypothesis can be rejected in favor of alterntaive hypothesis which indicates the variance of price is different between the 2 groups since the base assumption of t statistics test with 2 samples is that there is no difference between means of 2 samples
**Conclusion** - There is a difference in average cost per patient that has insurance and that doesn't have insurance
Congratulations! Hopefully you got the exact same numerical results. This shows that you correctly calculated the numbers by hand. Secondly, you used the correct function and saw that it's much easier to use. All you need to do is pass your data to it.
__Q6:__ Conceptual question: look through the documentation for statistical test functions in scipy.stats. You'll see the above _t_-test for a sample, but can you see an equivalent one for performing a *z*-test from a sample? Comment on your answer.
__A:__
Yes, to compute the z score of each value in the sample, relative to the sample mean and standard deviation.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.zscore.html
## Learning outcomes
Having completed this project notebook, you now have good hands-on experience:
* using the central limit theorem to help you apply frequentist techniques to answer questions that pertain to very non-normally distributed data from the real world
* performing inference using such data to answer business questions
* forming a hypothesis and framing the null and alternative hypotheses
* testing this using a _t_-test
| github_jupyter |
```
%%html
<link href="http://mathbook.pugetsound.edu/beta/mathbook-content.css" rel="stylesheet" type="text/css" />
<link href="https://aimath.org/mathbook/mathbook-add-on.css" rel="stylesheet" type="text/css" />
<style>.subtitle {font-size:medium; display:block}</style>
<link href="https://fonts.googleapis.com/css?family=Open+Sans:400,400italic,600,600italic" rel="stylesheet" type="text/css" />
<link href="https://fonts.googleapis.com/css?family=Inconsolata:400,700&subset=latin,latin-ext" rel="stylesheet" type="text/css" /><!-- Hide this cell. -->
<script>
var cell = $(".container .cell").eq(0), ia = cell.find(".input_area")
if (cell.find(".toggle-button").length == 0) {
ia.after(
$('<button class="toggle-button">Toggle hidden code</button>').click(
function (){ ia.toggle() }
)
)
ia.hide()
}
</script>
```
**Important:** to view this notebook properly you will need to execute the cell above, which assumes you have an Internet connection. It should already be selected, or place your cursor anywhere above to select. Then press the "Run" button in the menu bar above (the right-pointing arrowhead), or press Shift-Enter on your keyboard.
$\newcommand{\identity}{\mathrm{id}}
\newcommand{\notdivide}{\nmid}
\newcommand{\notsubset}{\not\subset}
\newcommand{\lcm}{\operatorname{lcm}}
\newcommand{\gf}{\operatorname{GF}}
\newcommand{\inn}{\operatorname{Inn}}
\newcommand{\aut}{\operatorname{Aut}}
\newcommand{\Hom}{\operatorname{Hom}}
\newcommand{\cis}{\operatorname{cis}}
\newcommand{\chr}{\operatorname{char}}
\newcommand{\Null}{\operatorname{Null}}
\newcommand{\lt}{<}
\newcommand{\gt}{>}
\newcommand{\amp}{&}
$
<div class="mathbook-content"><h2 class="heading hide-type" alt="Exercises 9.3 Exercises"><span class="type">Section</span><span class="codenumber">9.3</span><span class="title">Exercises</span></h2><a href="exercises-isomorph.ipynb" class="permalink">¶</a></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-310"><h6 class="heading"><span class="codenumber">1</span></h6><p id="p-1485">Prove that $\mathbb Z \cong n \mathbb Z$ for $n \neq 0\text{.}$</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-87" id="hint-87"><span class="type">Hint</span></a></span><div id="hk-hint-87" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1486">Every infinite cyclic group is isomorphic to ${\mathbb Z}$ by Theorem <a href="section-isomorph-definitions.ipynb#theorem-isomorph-2" class="xref" alt="Theorem 9.7 " title="Theorem 9.7 ">9.7</a>.</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-311"><h6 class="heading"><span class="codenumber">2</span></h6><p id="p-1487">Prove that ${\mathbb C}^\ast$ is isomorphic to the subgroup of $GL_2( {\mathbb R} )$ consisting of matrices of the form</p><div class="displaymath">
\begin{equation*}
\begin{pmatrix}
a & b \\
-b & a
\end{pmatrix}.
\end{equation*}
</div><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-88" id="hint-88"><span class="type">Hint</span></a></span><div id="hk-hint-88" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1488">Define $\phi: {\mathbb C}^* \rightarrow GL_2( {\mathbb R})$ by</p><div class="displaymath">
\begin{equation*}
\phi(a + bi) =
\begin{pmatrix}
a & b \\
-b & a
\end{pmatrix}.
\end{equation*}
</div></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-312"><h6 class="heading"><span class="codenumber">3</span></h6><p id="p-1489">Prove or disprove: $U(8) \cong {\mathbb Z}_4\text{.}$</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-89" id="hint-89"><span class="type">Hint</span></a></span><div id="hk-hint-89" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1490">False.</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-313"><h6 class="heading"><span class="codenumber">4</span></h6><p id="p-1491">Prove that $U(8)$ is isomorphic to the group of matrices</p><div class="displaymath">
\begin{equation*}
\begin{pmatrix}
1 & 0 \\
0 & 1
\end{pmatrix},
\begin{pmatrix}
1 & 0 \\
0 & -1
\end{pmatrix},
\begin{pmatrix}
-1 & 0 \\
0 & 1
\end{pmatrix},
\begin{pmatrix}
-1 & 0 \\
0 & -1
\end{pmatrix}.
\end{equation*}
</div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-314"><h6 class="heading"><span class="codenumber">5</span></h6><p id="p-1492">Show that $U(5)$ is isomorphic to $U(10)\text{,}$ but $U(12)$ is not.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-315"><h6 class="heading"><span class="codenumber">6</span></h6><p id="p-1493">Show that the $n$th roots of unity are isomorphic to ${\mathbb Z}_n\text{.}$</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-90" id="hint-90"><span class="type">Hint</span></a></span><div id="hk-hint-90" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1494">Define a map from ${\mathbb Z}_n$ into the $n$th roots of unity by $k \mapsto \cis(2k\pi / n)\text{.}$</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-316"><h6 class="heading"><span class="codenumber">7</span></h6><p id="p-1495">Show that any cyclic group of order $n$ is isomorphic to ${\mathbb Z}_n\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-317"><h6 class="heading"><span class="codenumber">8</span></h6><p id="p-1496">Prove that ${\mathbb Q}$ is not isomorphic to ${\mathbb Z}\text{.}$</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-91" id="hint-91"><span class="type">Hint</span></a></span><div id="hk-hint-91" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1497">Assume that ${\mathbb Q}$ is cyclic and try to find a generator.</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-318"><h6 class="heading"><span class="codenumber">9</span></h6><p id="p-1498">Let $G = {\mathbb R} \setminus \{ -1 \}$ and define a binary operation on $G$ by</p><div class="displaymath">
\begin{equation*}
a \ast b = a + b + ab.
\end{equation*}
</div><p>Prove that $G$ is a group under this operation. Show that $(G, *)$ is isomorphic to the multiplicative group of nonzero real numbers.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-319"><h6 class="heading"><span class="codenumber">10</span></h6><p id="p-1499">Show that the matrices</p><div class="displaymath">
\begin{align*}
\begin{pmatrix}
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 1
\end{pmatrix}
\quad
\begin{pmatrix}
1 & 0 & 0 \\
0 & 0 & 1 \\
0 & 1 & 0
\end{pmatrix}
\quad
\begin{pmatrix}
0 & 1 & 0 \\
1 & 0 & 0 \\
0 & 0 & 1
\end{pmatrix}\\
\begin{pmatrix}
0 & 0 & 1 \\
1 & 0 & 0 \\
0 & 1 & 0
\end{pmatrix}
\quad
\begin{pmatrix}
0 & 0 & 1 \\
0 & 1 & 0 \\
1 & 0 & 0
\end{pmatrix}
\quad
\begin{pmatrix}
0 & 1 & 0 \\
0 & 0 & 1 \\
1 & 0 & 0
\end{pmatrix}
\end{align*}
</div><p>form a group. Find an isomorphism of $G$ with a more familiar group of order 6.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-320"><h6 class="heading"><span class="codenumber">11</span></h6><p id="p-1500">Find five non-isomorphic groups of order 8.</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-92" id="hint-92"><span class="type">Hint</span></a></span><div id="hk-hint-92" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1501">There are two nonabelian and three abelian groups that are not isomorphic.</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-321"><h6 class="heading"><span class="codenumber">12</span></h6><p id="p-1502">Prove $S_4$ is not isomorphic to $D_{12}\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-322"><h6 class="heading"><span class="codenumber">13</span></h6><p id="p-1503">Let $\omega = \cis(2 \pi /n)$ be a primitive $n$th root of unity. Prove that the matrices</p><div class="displaymath">
\begin{equation*}
A =
\begin{pmatrix}
\omega & 0 \\
0 & \omega^{-1}
\end{pmatrix}
\quad \text{and} \quad
B =
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
\end{equation*}
</div><p>generate a multiplicative group isomorphic to $D_n\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-323"><h6 class="heading"><span class="codenumber">14</span></h6><p id="p-1504">Show that the set of all matrices of the form</p><div class="displaymath">
\begin{equation*}
\begin{pmatrix}
\pm 1 & k \\
0 & 1
\end{pmatrix},
\end{equation*}
</div><p>is a group isomorphic to $D_n\text{,}$ where all entries in the matrix are in ${\mathbb Z}_n\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-324"><h6 class="heading"><span class="codenumber">15</span></h6><p id="p-1505">List all of the elements of ${\mathbb Z}_4 \times {\mathbb Z}_2\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-325"><h6 class="heading"><span class="codenumber">16</span></h6><p id="p-1506">Find the order of each of the following elements. </p><ol class="lower-alpha"><li id="li-366"><p id="p-1507">$(3, 4)$ in ${\mathbb Z}_4 \times {\mathbb Z}_6$</p></li><li id="li-367"><p id="p-1508">$(6, 15, 4)$ in ${\mathbb Z}_{30} \times {\mathbb Z}_{45} \times {\mathbb Z}_{24}$</p></li><li id="li-368"><p id="p-1509">$(5, 10, 15)$ in ${\mathbb Z}_{25} \times {\mathbb Z}_{25} \times {\mathbb Z}_{25}$</p></li><li id="li-369"><p id="p-1510">$(8, 8, 8)$ in ${\mathbb Z}_{10} \times {\mathbb Z}_{24} \times {\mathbb Z}_{80}$</p></li></ol><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-93" id="hint-93"><span class="type">Hint</span></a></span><div id="hk-hint-93" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1511">(a) 12; (c) 5.</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-326"><h6 class="heading"><span class="codenumber">17</span></h6><p id="p-1512">Prove that $D_4$ cannot be the internal direct product of two of its proper subgroups.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-327"><h6 class="heading"><span class="codenumber">18</span></h6><p id="p-1513">Prove that the subgroup of ${\mathbb Q}^\ast$ consisting of elements of the form $2^m 3^n$ for $m,n \in {\mathbb Z}$ is an internal direct product isomorphic to ${\mathbb Z} \times {\mathbb Z}\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-328"><h6 class="heading"><span class="codenumber">19</span></h6><p id="p-1514">Prove that $S_3 \times {\mathbb Z}_2$ is isomorphic to $D_6\text{.}$ Can you make a conjecture about $D_{2n}\text{?}$ Prove your conjecture.</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-94" id="hint-94"><span class="type">Hint</span></a></span><div id="hk-hint-94" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1515">Draw the picture.</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-329"><h6 class="heading"><span class="codenumber">20</span></h6><p id="p-1516">Prove or disprove: Every abelian group of order divisible by 3 contains a subgroup of order 3.</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-95" id="hint-95"><span class="type">Hint</span></a></span><div id="hk-hint-95" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1517">True.</p></span></div></div></article></div>
<div class="mathbook-content"></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-330"><h6 class="heading"><span class="codenumber">21</span></h6><p id="p-1518">Prove or disprove: Every nonabelian group of order divisible by 6 contains a subgroup of order 6.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-331"><h6 class="heading"><span class="codenumber">22</span></h6><p id="p-1519">Let $G$ be a group of order 20. If $G$ has subgroups $H$ and $K$ of orders 4 and 5 respectively such that $hk = kh$ for all $h \in H$ and $k \in K\text{,}$ prove that $G$ is the internal direct product of $H$ and $K\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-332"><h6 class="heading"><span class="codenumber">23</span></h6><p id="p-1520">Prove or disprove the following assertion. Let $G\text{,}$ $H\text{,}$ and $K$ be groups. If $G \times K \cong H \times K\text{,}$ then $G \cong H\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-333"><h6 class="heading"><span class="codenumber">24</span></h6><p id="p-1521">Prove or disprove: There is a noncyclic abelian group of order 51.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-334"><h6 class="heading"><span class="codenumber">25</span></h6><p id="p-1522">Prove or disprove: There is a noncyclic abelian group of order 52.</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-96" id="hint-96"><span class="type">Hint</span></a></span><div id="hk-hint-96" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1523">True.</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-335"><h6 class="heading"><span class="codenumber">26</span></h6><p id="p-1524">Let $\phi : G \rightarrow H$ be a group isomorphism. Show that $\phi( x) = e_H$ if and only if $x=e_G\text{,}$ where $e_G$ and $e_H$ are the identities of $G$ and $H\text{,}$ respectively.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-336"><h6 class="heading"><span class="codenumber">27</span></h6><p id="p-1525">Let $G \cong H\text{.}$ Show that if $G$ is cyclic, then so is $H\text{.}$</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-97" id="hint-97"><span class="type">Hint</span></a></span><div id="hk-hint-97" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1526">Let $a$ be a generator for $G\text{.}$ If $\phi :G \rightarrow H$ is an isomorphism, show that $\phi(a)$ is a generator for $H\text{.}$</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-337"><h6 class="heading"><span class="codenumber">28</span></h6><p id="p-1527">Prove that any group $G$ of order $p\text{,}$ $p$ prime, must be isomorphic to ${\mathbb Z}_p\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-338"><h6 class="heading"><span class="codenumber">29</span></h6><p id="p-1528">Show that $S_n$ is isomorphic to a subgroup of $A_{n+2}\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-339"><h6 class="heading"><span class="codenumber">30</span></h6><p id="p-1529">Prove that $D_n$ is isomorphic to a subgroup of $S_n\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="isomorph-exercise-comp-inv"><h6 class="heading"><span class="codenumber">31</span></h6><p id="p-1530">Let $\phi : G_1 \rightarrow G_2$ and $\psi : G_2 \rightarrow G_3$ be isomorphisms. Show that $\phi^{-1}$ and $\psi \circ \phi$ are both isomorphisms. Using these results, show that the isomorphism of groups determines an equivalence relation on the class of all groups.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-341"><h6 class="heading"><span class="codenumber">32</span></h6><p id="p-1531">Prove $U(5) \cong {\mathbb Z}_4\text{.}$ Can you generalize this result for $U(p)\text{,}$ where $p$ is prime?</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-342"><h6 class="heading"><span class="codenumber">33</span></h6><p id="p-1532">Write out the permutations associated with each element of $S_3$ in the proof of Cayley's Theorem.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-343"><h6 class="heading"><span class="codenumber">34</span></h6><p id="p-1533">An <dfn class="terminology">automorphism</dfn> of a group $G$ is an isomorphism with itself. Prove that complex conjugation is an automorphism of the additive group of complex numbers; that is, show that the map $\phi( a + bi ) = a - bi$ is an isomorphism from ${\mathbb C}$ to ${\mathbb C}\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-344"><h6 class="heading"><span class="codenumber">35</span></h6><p id="p-1534">Prove that $a + ib \mapsto a - ib$ is an automorphism of ${\mathbb C}^*\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-345"><h6 class="heading"><span class="codenumber">36</span></h6><p id="p-1535">Prove that $A \mapsto B^{-1}AB$ is an automorphism of $SL_2({\mathbb R})$ for all $B$ in $GL_2({\mathbb R})\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-346"><h6 class="heading"><span class="codenumber">37</span></h6><p id="p-1536">We will denote the set of all automorphisms of $G$ by $\aut(G)\text{.}$ Prove that $\aut(G)$ is a subgroup of $S_G\text{,}$ the group of permutations of $G\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-347"><h6 class="heading"><span class="codenumber">38</span></h6><p id="p-1537">Find $\aut( {\mathbb Z}_6)\text{.}$</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-98" id="hint-98"><span class="type">Hint</span></a></span><div id="hk-hint-98" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1538">Any automorphism of ${\mathbb Z}_6$ must send 1 to another generator of ${\mathbb Z}_6\text{.}$</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-348"><h6 class="heading"><span class="codenumber">39</span></h6><p id="p-1539">Find $\aut( {\mathbb Z})\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-349"><h6 class="heading"><span class="codenumber">40</span></h6><p id="p-1540">Find two nonisomorphic groups $G$ and $H$ such that $\aut(G) \cong \aut(H)\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="isomorph-exercise-inner-aut"><h6 class="heading"><span class="codenumber">41</span></h6><p id="p-1541">Let $G$ be a group and $g \in G\text{.}$ Define a map $i_g : G \rightarrow G$ by $i_g(x) = g x g^{-1}\text{.}$ Prove that $i_g$ defines an automorphism of $G\text{.}$ Such an automorphism is called an <dfn class="terminology">inner automorphism</dfn>. The set of all inner automorphisms is denoted by $\inn(G)\text{.}$ </p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-351"><h6 class="heading"><span class="codenumber">42</span></h6><p id="p-1542">Prove that $\inn(G)$ is a subgroup of $\aut(G)\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-352"><h6 class="heading"><span class="codenumber">43</span></h6><p id="p-1543">What are the inner automorphisms of the quaternion group $Q_8\text{?}$ Is $\inn(G) = \aut(G)$ in this case?</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-353"><h6 class="heading"><span class="codenumber">44</span></h6><p id="p-1544">Let $G$ be a group and $g \in G\text{.}$ Define maps $\lambda_g :G \rightarrow G$ and $\rho_g :G \rightarrow G$ by $\lambda_g(x) = gx$ and $\rho_g(x) = xg^{-1}\text{.}$ Show that $i_g = \rho_g \circ \lambda_g$ is an automorphism of $G\text{.}$ The isomorphism $g \mapsto \rho_g$ is called the <dfn class="terminology">right regular representation</dfn> of $G\text{.}$ </p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-354"><h6 class="heading"><span class="codenumber">45</span></h6><p id="p-1545">Let $G$ be the internal direct product of subgroups $H$ and $K\text{.}$ Show that the map $\phi : G \rightarrow H \times K$ defined by $\phi(g) = (h,k)$ for $g =hk\text{,}$ where $h \in H$ and $k \in K\text{,}$ is one-to-one and onto.</p><div class="solutions"><span class="solution"><a knowl="" class="id-ref" refid="hk-hint-99" id="hint-99"><span class="type">Hint</span></a></span><div id="hk-hint-99" class="hidden-content tex2jax_ignore"><span class="solution"><p id="p-1546">To show that $\phi$ is one-to-one, let $g_1 = h_1 k_1$ and $g_2 = h_2 k_2$ and consider $\phi(g_1) = \phi(g_2)\text{.}$</p></span></div></div></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-355"><h6 class="heading"><span class="codenumber">46</span></h6><p id="p-1547">Let $G$ and $H$ be isomorphic groups. If $G$ has a subgroup of order $n\text{,}$ prove that $H$ must also have a subgroup of order $n\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-356"><h6 class="heading"><span class="codenumber">47</span></h6><p id="p-1548">If $G \cong \overline{G}$ and $H \cong \overline{H}\text{,}$ show that $G \times H \cong \overline{G} \times \overline{H}\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-357"><h6 class="heading"><span class="codenumber">48</span></h6><p id="p-1549">Prove that $G \times H$ is isomorphic to $H \times G\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-358"><h6 class="heading"><span class="codenumber">49</span></h6><p id="p-1550">Let $n_1, \ldots, n_k$ be positive integers. Show that</p><div class="displaymath">
\begin{equation*}
\prod_{i=1}^k {\mathbb Z}_{n_i} \cong {\mathbb Z}_{n_1 \cdots n_k}
\end{equation*}
</div><p>if and only if $\gcd( n_i, n_j) =1$ for $i \neq j\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-359"><h6 class="heading"><span class="codenumber">50</span></h6><p id="p-1551">Prove that $A \times B$ is abelian if and only if $A$ and $B$ are abelian.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-360"><h6 class="heading"><span class="codenumber">51</span></h6><p id="p-1552">If $G$ is the internal direct product of $H_1, H_2, \ldots, H_n\text{,}$ prove that $G$ is isomorphic to $\prod_i H_i\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-361"><h6 class="heading"><span class="codenumber">52</span></h6><p id="p-1553">Let $H_1$ and $H_2$ be subgroups of $G_1$ and $G_2\text{,}$ respectively. Prove that $H_1 \times H_2$ is a subgroup of $G_1 \times G_2\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-362"><h6 class="heading"><span class="codenumber">53</span></h6><p id="p-1554">Let $m, n \in {\mathbb Z}\text{.}$ Prove that $\langle m,n \rangle = \langle d \rangle$ if and only if $d = \gcd(m,n)\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-363"><h6 class="heading"><span class="codenumber">54</span></h6><p id="p-1555">Let $m, n \in {\mathbb Z}\text{.}$ Prove that $\langle m \rangle \cap \langle n \rangle = \langle l \rangle$ if and only if $l = \lcm(m,n)\text{.}$</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-classify-2p"><h6 class="heading"><span class="codenumber">55</span><span class="title">Groups of order $2p$</span></h6><p id="p-1556">In this series of exercises we will classify all groups of order $2p\text{,}$ where $p$ is an odd prime. </p><ol class="lower-alpha"><li id="li-370"><p id="p-1557">Assume $G$ is a group of order $2p\text{,}$ where $p$ is an odd prime. If $a \in G\text{,}$ show that $a$ must have order 1, 2, $p\text{,}$ or $2p\text{.}$</p></li><li id="li-371"><p id="p-1558">Suppose that $G$ has an element of order $2p\text{.}$ Prove that $G$ is isomorphic to ${\mathbb Z}_{2p}\text{.}$ Hence, $G$ is cyclic.</p></li><li id="li-372"><p id="p-1559">Suppose that $G$ does not contain an element of order $2p\text{.}$ Show that $G$ must contain an element of order $p\text{.}$ {\em Hint}: Assume that $G$ does not contain an element of order $p\text{.}$</p></li><li id="li-373"><p id="p-1560">Suppose that $G$ does not contain an element of order $2p\text{.}$ Show that $G$ must contain an element of order 2.</p></li><li id="li-374"><p id="p-1561">Let $P$ be a subgroup of $G$ with order $p$ and $y \in G$ have order 2. Show that $yP = Py\text{.}$</p></li><li id="li-375"><p id="p-1562">Suppose that $G$ does not contain an element of order $2p$ and $P = \langle z \rangle$ is a subgroup of order $p$ generated by $z\text{.}$ If $y$ is an element of order 2, then $yz = z^ky$ for some $2 \leq k \lt p\text{.}$</p></li><li id="li-376"><p id="p-1563">Suppose that $G$ does not contain an element of order $2p\text{.}$ Prove that $G$ is not abelian.</p></li><li id="li-377"><p id="p-1564">Suppose that $G$ does not contain an element of order $2p$ and $P = \langle z \rangle$ is a subgroup of order $p$ generated by $z$ and $y$ is an element of order 2. Show that we can list the elements of $G$ as $\{z^iy^j\mid 0\leq i \lt p, 0\leq j \lt 2\}\text{.}$</p></li><li id="li-378"><p id="p-1565">Suppose that $G$ does not contain an element of order $2p$ and $P = \langle z \rangle$ is a subgroup of order $p$ generated by $z$ and $y$ is an element of order 2. Prove that the product $(z^iy^j)(z^ry^s)$ can be expressed as a uniquely as $z^m y^n$ for some non negative integers $m, n\text{.}$ Thus, conclude that there is only one possibility for a non-abelian group of order $2p\text{,}$ it must therefore be the one we have seen already, the dihedral group.</p></li></ol></article></div>
| github_jupyter |
# Exercise 4
Related Notes:
- Fundamentals_1_Data_Types
- Fundamentals_3_Data_Structures
## Exercise 4.1 Value Swapping
Write a program that:
- Ask user to input 3 integer values, `x`, `y` and `z` one by one.
- Print out `x`, `y` and `z` value before swapping
- Write an one-line statement to swap values so that `x` will hold value of `y`, `y` will hold value of `z`, and `z` will hold value of x after running the statement.
- Print out `x`, `y` and `z` value after swapping.
Example interaction:
>Enter the value for x: 10<br>
>Enter the value for y: 20<br>
>Enter the value for z: 30<br>
>Before swapping, x = 10, y = 20, z = 30<br>
>After swapping, x = 20, y = 30, z = 10<br>
## Exercise 4.2 Dots in a Square
In Python, we can generate random values between -1 and 1 by:
-importing the `random` module, and then
- using `random.uniform(-1,1)` method.
Write a program that:
0. set `n` to be 100
1. creates a list called `coordinates` which contain `n` tuples with 2 elements each where each element in the tuple is a randomly generated real number between -1 and 1 inclusive, i.e. each element in `coordinates` is of the following form `(element_1,element_2)`. This list represents 100 random points in the cartesian plane ($xy$-plane) where the $x$-coordinates and $y$-coordinates are between -1 and 1 inclusive.
2. print out the number of the generated random points (i.e., elements in `coordinates`) that has a distance of at most 1 unit away from the origin $O$. Recall that the distance $d$ from origin to a point $\left(x,y\right)$ is $d=\sqrt{x^2+y^2}$. You can use the method `math.sqrt(your_number)` to find the square root of a number.
3. print out also `frac`, the fraction of the number of such points out of $n$ points.
4. print out `frac*4`.
5. repeat steps 1-5 above with `n` set to be 1000, 10000, 100000.
What can you observe about the values of `frac*4` as the `n` increases?
Example interaction:
>#Assume collection=\[(1,0.7),(0.5,0.5),(-1,-1)\]<br>
>Number of points that has a distance of at most 1 unit away from the origin: 1<br>
>Fraction of such numbers : 0.3333333<br>
>4*frac : 1.333333332<br>
```
#DO NOT DELETE THE FOLLOWING IMPORT
import random
import math
#TESTING THE METHODS UNIFORM AND SQRT
print(random.uniform(-1,1))
print(math.sqrt(2))
```
## Exercise 4.3 3D Vector Operations
In mathematics, a real valued 3-dimensional vector is a collection of 3 values $\left(\begin{array}{c}
a_{1}\\
a_{2}\\
a_{3}
\end{array}\right)$, where $a_1,a_2,a_3 \in \mathbb{R}$.
Binary operation between vectors differs from operation between numbers and some of such operations are defined as such:
- addition $+$, which operates between 2 vectors and returns a vector, $\left(\begin{array}{c}
a_{1}\\
a_{2}\\
a_{3}
\end{array}\right)+\left(\begin{array}{c}
b_{1}\\
b_{2}\\
b_{3}
\end{array}\right)=\left(\begin{array}{c}
a_{1}+b_{1}\\
a_{2}+b_{2}\\
a_{3}+b_{3}
\end{array}\right)$,
- dot product $\cdot$, which operates between 2 vector and returns a real number, $\left(\begin{array}{c}
a_{1}\\
a_{2}\\
a_{3}
\end{array}\right)\cdot\left(\begin{array}{c}
b_{1}\\
b_{2}\\
b_{3}
\end{array}\right)=a_{1}b_{1}+a_{2}b_{2}+a_{3}b_{3}$,
- cross product $\times$, which operates between 2 vectors and returns yet another vector, $\left(\begin{array}{c}
a_{1}\\
a_{2}\\
a_{3}
\end{array}\right)\times\left(\begin{array}{c}
b_{1}\\
b_{2}\\
b_{3}
\end{array}\right)=\left(\begin{array}{c}
a_{2}b_{3}-a_{3}b_{2}\\
-\left(a_{1}b_{3}-a_{3}b_{1}\right)\\
a_{1}b_{2}-a_{2}b_{1}
\end{array}\right)$.
Write a program that:
- Ask user to input 6 float values `x1`, `y1`, `z1`, `x2`, `y2`, `z2`
- Stores the 6 values above in a list called `vectors` that contained the tuples `(x1,y1,z1)` and `(x2,y2,z2)`. We will use each of these tuples to represent a 3-dimensional vector.
- Print out the list `vectors`.
- Implement the 3 vector operations defined above and print out the result. Refer to the example interaction below for the format.
Example interaction:
>Enter the value for x1: 1<br>
>Enter the value for y1: 2<br>
>Enter the value for z1: 3<br>
>Enter the value for x2: 4<br>
>Enter the value for y2: 5<br>
>Enter the value for z2: 6<br>
>vectors : \[(1,2,3),(4,5,6)\]<br>
>addition : (1,2,3)+(4,5,6) = (5,7,9)<br>
>dot product : (1,2,3).(4,5,6) = 32<br>
>cross product : (1,2,3)x(4,5,6) = (-3,6,-3)
```
#YOUR CODE HERE
```
| github_jupyter |
```
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential,Model
from keras.layers import Conv2D,Dense,Flatten,Dropout, MaxPooling2D,BatchNormalization,LeakyReLU
from keras import optimizers
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
import random as rn
import os
# Load the data
train = pd.read_csv("./digit-recognizer/train.csv")
test = pd.read_csv("./digit-recognizer/test.csv")
Y_train = train["label"]
# Drop 'label' column
X_train = train.drop(labels = ["label"],axis = 1)
# free some space
del train
#g = sns.countplot(Y_train)
Y_train.value_counts()
# Normalize the data
X_train = X_train / 255.0
test = test / 255.0
# Reshape image in 3 dimensions (height = 28px, width = 28px , canal = 1)
X_train = X_train.values.reshape(-1,28,28,1)
test = test.values.reshape(-1,28,28,1)
# Encode labels to one hot vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0])
Y_train = to_categorical(Y_train, num_classes = 10)
# Some examples
g = plt.imshow(X_train[10][:,:,0])
def CNNModel(model_num=None):#input dim: 28x28x1
model = Sequential()
model.add(Conv2D(32,(5,5),activation="relu",padding='SAME',input_shape=(28,28,1)))#output dim: 28*28*32
model.add(Conv2D(32,(5,5),activation="relu"))#output dim: 24x24x32
model.add(BatchNormalization())
model.add(MaxPooling2D((2,2)))#output dim: 14x14x32
model.add(Conv2D(64,(5,5),activation="relu"))#10*10*64
model.add(BatchNormalization())
model.add(MaxPooling2D((2,2)))#5*5*64
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(500,activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(10, activation='softmax'))
return model
digitRecon = CNNModel()
earlyStopping = EarlyStopping(monitor='acc',
patience=4)
digitRecon.compile(optimizer='adam',loss="categorical_crossentropy", metrics=["accuracy"])
digitRecon.fit(X_train,Y_train, epochs = 10, batch_size=256,callbacks=[earlyStopping])
# predict results
results = digitRecon.predict(test)
# select the indix with the maximum probability
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
#results.head(2)
val=rn.randint(0,test.shape[0]-1)
plt.imshow(test[val][:,:,0])
plt.title("Predicted value:"+str(results[val]))
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False)
```
| github_jupyter |
# RadarCOVID-Report
## Data Extraction
```
import datetime
import json
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import pycountry
import retry
import seaborn as sns
%matplotlib inline
current_working_directory = os.environ.get("PWD")
if current_working_directory:
os.chdir(current_working_directory)
sns.set()
matplotlib.rcParams["figure.figsize"] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
current_hour = datetime.datetime.utcnow().hour
are_today_results_partial = current_hour != 23
```
### Constants
```
from Modules.ExposureNotification import exposure_notification_io
spain_region_country_code = "ES"
germany_region_country_code = "DE"
default_backend_identifier = spain_region_country_code
backend_generation_days = 7 * 2
daily_summary_days = 7 * 4 * 3
daily_plot_days = 7 * 4
tek_dumps_load_limit = daily_summary_days + 1
```
### Parameters
```
environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER")
if environment_backend_identifier:
report_backend_identifier = environment_backend_identifier
else:
report_backend_identifier = default_backend_identifier
report_backend_identifier
environment_enable_multi_backend_download = \
os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD")
if environment_enable_multi_backend_download:
report_backend_identifiers = None
else:
report_backend_identifiers = [report_backend_identifier]
report_backend_identifiers
environment_invalid_shared_diagnoses_dates = \
os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES")
if environment_invalid_shared_diagnoses_dates:
invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",")
else:
invalid_shared_diagnoses_dates = []
invalid_shared_diagnoses_dates
```
### COVID-19 Cases
```
report_backend_client = \
exposure_notification_io.get_backend_client_with_identifier(
backend_identifier=report_backend_identifier)
@retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10))
def download_cases_dataframe():
return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
confirmed_df_ = download_cases_dataframe()
confirmed_df_.iloc[0]
confirmed_df = confirmed_df_.copy()
confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]]
confirmed_df.rename(
columns={
"date": "sample_date",
"iso_code": "country_code",
},
inplace=True)
def convert_iso_alpha_3_to_alpha_2(x):
try:
return pycountry.countries.get(alpha_3=x).alpha_2
except Exception as e:
logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}")
return None
confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2)
confirmed_df.dropna(inplace=True)
confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True)
confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_df.sort_values("sample_date", inplace=True)
confirmed_df.tail()
confirmed_days = pd.date_range(
start=confirmed_df.iloc[0].sample_date,
end=extraction_datetime)
confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"])
confirmed_days_df["sample_date_string"] = \
confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_days_df.tail()
def sort_source_regions_for_display(source_regions: list) -> list:
if report_backend_identifier in source_regions:
source_regions = [report_backend_identifier] + \
list(sorted(set(source_regions).difference([report_backend_identifier])))
else:
source_regions = list(sorted(source_regions))
return source_regions
report_source_regions = report_backend_client.source_regions_for_date(
date=extraction_datetime.date())
report_source_regions = sort_source_regions_for_display(
source_regions=report_source_regions)
report_source_regions
def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None):
source_regions_at_date_df = confirmed_days_df.copy()
source_regions_at_date_df["source_regions_at_date"] = \
source_regions_at_date_df.sample_date.apply(
lambda x: source_regions_for_date_function(date=x))
source_regions_at_date_df.sort_values("sample_date", inplace=True)
source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \
source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x)))
source_regions_at_date_df.tail()
#%%
source_regions_for_summary_df_ = \
source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy()
source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True)
source_regions_for_summary_df_.tail()
#%%
confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"]
confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns)
for source_regions_group, source_regions_group_series in \
source_regions_at_date_df.groupby("_source_regions_group"):
source_regions_set = set(source_regions_group.split(","))
confirmed_source_regions_set_df = \
confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy()
confirmed_source_regions_group_df = \
confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \
.reset_index().sort_values("sample_date")
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df.merge(
confirmed_days_df[["sample_date_string"]].rename(
columns={"sample_date_string": "sample_date"}),
how="right")
confirmed_source_regions_group_df["new_cases"] = \
confirmed_source_regions_group_df["new_cases"].clip(lower=0)
confirmed_source_regions_group_df["covid_cases"] = \
confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round()
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[confirmed_output_columns]
confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan)
confirmed_source_regions_group_df.fillna(method="ffill", inplace=True)
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[
confirmed_source_regions_group_df.sample_date.isin(
source_regions_group_series.sample_date_string)]
confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df)
result_df = confirmed_output_df.copy()
result_df.tail()
#%%
result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True)
result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left")
result_df.sort_values("sample_date_string", inplace=True)
result_df.fillna(method="ffill", inplace=True)
result_df.tail()
#%%
result_df[["new_cases", "covid_cases"]].plot()
if columns_suffix:
result_df.rename(
columns={
"new_cases": "new_cases_" + columns_suffix,
"covid_cases": "covid_cases_" + columns_suffix},
inplace=True)
return result_df, source_regions_for_summary_df_
confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe(
report_backend_client.source_regions_for_date)
confirmed_es_df, _ = get_cases_dataframe(
lambda date: [spain_region_country_code],
columns_suffix=spain_region_country_code.lower())
```
### Extract API TEKs
```
raw_zip_path_prefix = "Data/TEKs/Raw/"
base_backend_identifiers = [report_backend_identifier]
multi_backend_exposure_keys_df = \
exposure_notification_io.download_exposure_keys_from_backends(
backend_identifiers=report_backend_identifiers,
generation_days=backend_generation_days,
fail_on_error_backend_identifiers=base_backend_identifiers,
save_raw_zip_path_prefix=raw_zip_path_prefix)
multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"]
multi_backend_exposure_keys_df.rename(
columns={
"generation_datetime": "sample_datetime",
"generation_date_string": "sample_date_string",
},
inplace=True)
multi_backend_exposure_keys_df.head()
early_teks_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.rolling_period < 144].copy()
early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6
early_teks_df[early_teks_df.sample_date_string != extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
early_teks_df[early_teks_df.sample_date_string == extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[
"sample_date_string", "region", "key_data"]]
multi_backend_exposure_keys_df.head()
active_regions = \
multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions
multi_backend_summary_df = multi_backend_exposure_keys_df.groupby(
["sample_date_string", "region"]).key_data.nunique().reset_index() \
.pivot(index="sample_date_string", columns="region") \
.sort_index(ascending=False)
multi_backend_summary_df.rename(
columns={"key_data": "shared_teks_by_generation_date"},
inplace=True)
multi_backend_summary_df.rename_axis("sample_date", inplace=True)
multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int)
multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days)
multi_backend_summary_df.head()
def compute_keys_cross_sharing(x):
teks_x = x.key_data_x.item()
common_teks = set(teks_x).intersection(x.key_data_y.item())
common_teks_fraction = len(common_teks) / len(teks_x)
return pd.Series(dict(
common_teks=common_teks,
common_teks_fraction=common_teks_fraction,
))
multi_backend_exposure_keys_by_region_df = \
multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index()
multi_backend_exposure_keys_by_region_df["_merge"] = True
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_df.merge(
multi_backend_exposure_keys_by_region_df, on="_merge")
multi_backend_exposure_keys_by_region_combination_df.drop(
columns=["_merge"], inplace=True)
if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1:
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_combination_df[
multi_backend_exposure_keys_by_region_combination_df.region_x !=
multi_backend_exposure_keys_by_region_combination_df.region_y]
multi_backend_exposure_keys_cross_sharing_df = \
multi_backend_exposure_keys_by_region_combination_df \
.groupby(["region_x", "region_y"]) \
.apply(compute_keys_cross_sharing) \
.reset_index()
multi_backend_cross_sharing_summary_df = \
multi_backend_exposure_keys_cross_sharing_df.pivot_table(
values=["common_teks_fraction"],
columns="region_x",
index="region_y",
aggfunc=lambda x: x.item())
multi_backend_cross_sharing_summary_df
multi_backend_without_active_region_exposure_keys_df = \
multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier]
multi_backend_without_active_region = \
multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
multi_backend_without_active_region
exposure_keys_summary_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.region == report_backend_identifier]
exposure_keys_summary_df.drop(columns=["region"], inplace=True)
exposure_keys_summary_df = \
exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df = \
exposure_keys_summary_df.reset_index().set_index("sample_date_string")
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True)
exposure_keys_summary_df.head()
```
### Dump API TEKs
```
tek_list_df = multi_backend_exposure_keys_df[
["sample_date_string", "region", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
["sample_date", "region"]).tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_path_prefix = "Data/TEKs/"
tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json"
tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json"
tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json"
for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier]
tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
tek_list_current_path,
lines=True, orient="records")
tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json(
tek_list_daily_path,
lines=True, orient="records")
tek_list_base_df.to_json(
tek_list_hourly_path,
lines=True, orient="records")
tek_list_base_df.head()
```
### Load TEK Dumps
```
import glob
def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame(columns=["region"])
file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json"))))
if limit:
file_paths = file_paths[:limit]
for file_path in file_paths:
logging.info(f"Loading TEKs from '{file_path}'...")
iteration_extracted_teks_df = pd.read_json(file_path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
extracted_teks_df["region"] = \
extracted_teks_df.region.fillna(spain_region_country_code).copy()
if region:
extracted_teks_df = \
extracted_teks_df[extracted_teks_df.region == region]
return extracted_teks_df
daily_extracted_teks_df = load_extracted_teks(
mode="Daily",
region=report_backend_identifier,
limit=tek_dumps_load_limit)
daily_extracted_teks_df.head()
exposure_keys_summary_df_ = daily_extracted_teks_df \
.sort_values("extraction_date", ascending=False) \
.groupby("sample_date").tek_list.first() \
.to_frame()
exposure_keys_summary_df_.index.name = "sample_date_string"
exposure_keys_summary_df_["tek_list"] = \
exposure_keys_summary_df_.tek_list.apply(len)
exposure_keys_summary_df_ = exposure_keys_summary_df_ \
.rename(columns={"tek_list": "shared_teks_by_generation_date"}) \
.sort_index(ascending=False)
exposure_keys_summary_df = exposure_keys_summary_df_
exposure_keys_summary_df.head()
```
### Daily New TEKs
```
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
def compute_teks_by_generation_and_upload_date(date):
day_new_teks_set_df = tek_list_df.copy().diff()
try:
day_new_teks_set = day_new_teks_set_df[
day_new_teks_set_df.index == date].tek_list.item()
except ValueError:
day_new_teks_set = None
if pd.isna(day_new_teks_set):
day_new_teks_set = set()
day_new_teks_df = daily_extracted_teks_df[
daily_extracted_teks_df.extraction_date == date].copy()
day_new_teks_df["shared_teks"] = \
day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set))
day_new_teks_df["shared_teks"] = \
day_new_teks_df.shared_teks.apply(len)
day_new_teks_df["upload_date"] = date
day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True)
day_new_teks_df = day_new_teks_df[
["upload_date", "generation_date", "shared_teks"]]
day_new_teks_df["generation_to_upload_days"] = \
(pd.to_datetime(day_new_teks_df.upload_date) -
pd.to_datetime(day_new_teks_df.generation_date)).dt.days
day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0]
return day_new_teks_df
shared_teks_generation_to_upload_df = pd.DataFrame()
for upload_date in daily_extracted_teks_df.extraction_date.unique():
shared_teks_generation_to_upload_df = \
shared_teks_generation_to_upload_df.append(
compute_teks_by_generation_and_upload_date(date=upload_date))
shared_teks_generation_to_upload_df \
.sort_values(["upload_date", "generation_date"], ascending=False, inplace=True)
shared_teks_generation_to_upload_df.tail()
today_new_teks_df = \
shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.upload_date == extraction_date].copy()
today_new_teks_df.tail()
if not today_new_teks_df.empty:
today_new_teks_df.set_index("generation_to_upload_days") \
.sort_index().shared_teks.plot.bar()
generation_to_upload_period_pivot_df = \
shared_teks_generation_to_upload_df[
["upload_date", "generation_to_upload_days", "shared_teks"]] \
.pivot(index="upload_date", columns="generation_to_upload_days") \
.sort_index(ascending=False).fillna(0).astype(int) \
.droplevel(level=0, axis=1)
generation_to_upload_period_pivot_df.head()
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "shared_teks_by_upload_date",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.tail()
shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \
[["upload_date", "shared_teks"]].rename(
columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_teks_uploaded_on_generation_date",
})
shared_teks_uploaded_on_generation_date_df.head()
estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \
.groupby(["upload_date"]).shared_teks.max().reset_index() \
.sort_values(["upload_date"], ascending=False) \
.rename(columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_diagnoses",
})
invalid_shared_diagnoses_dates_mask = \
estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates)
estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0
estimated_shared_diagnoses_df.head()
```
### Hourly New TEKs
```
hourly_extracted_teks_df = load_extracted_teks(
mode="Hourly", region=report_backend_identifier, limit=25)
hourly_extracted_teks_df.head()
hourly_new_tek_count_df = hourly_extracted_teks_df \
.groupby("extraction_date_with_hour").tek_list. \
apply(lambda x: set(sum(x, []))).reset_index().copy()
hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \
.sort_index(ascending=True)
hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff()
hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply(
lambda x: len(x) if not pd.isna(x) else 0)
hourly_new_tek_count_df.rename(columns={
"new_tek_count": "shared_teks_by_upload_date"}, inplace=True)
hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[
"extraction_date_with_hour", "shared_teks_by_upload_date"]]
hourly_new_tek_count_df.head()
hourly_summary_df = hourly_new_tek_count_df.copy()
hourly_summary_df.set_index("extraction_date_with_hour", inplace=True)
hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index()
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df = hourly_summary_df.tail(-1)
hourly_summary_df.head()
```
### Official Statistics
```
import requests
import pandas.io.json
official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics")
official_stats_response.raise_for_status()
official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json())
official_stats_df = official_stats_df_.copy()
official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True)
official_stats_df.head()
official_stats_column_map = {
"date": "sample_date",
"applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated",
"communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated",
}
accumulated_suffix = "_accumulated"
accumulated_values_columns = \
list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values()))
interpolated_values_columns = \
list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns))
official_stats_df = \
official_stats_df[official_stats_column_map.keys()] \
.rename(columns=official_stats_column_map)
official_stats_df["extraction_date"] = extraction_date
official_stats_df.head()
official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json"
previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True)
previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True)
official_stats_df = official_stats_df.append(previous_official_stats_df)
official_stats_df.head()
official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)]
official_stats_df.sort_values("extraction_date", ascending=False, inplace=True)
official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True)
official_stats_df.head()
official_stats_stored_df = official_stats_df.copy()
official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d")
official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True)
official_stats_df.drop(columns=["extraction_date"], inplace=True)
official_stats_df = confirmed_days_df.merge(official_stats_df, how="left")
official_stats_df.sort_values("sample_date", ascending=False, inplace=True)
official_stats_df.head()
official_stats_df[accumulated_values_columns] = \
official_stats_df[accumulated_values_columns] \
.astype(float).interpolate(limit_area="inside")
official_stats_df[interpolated_values_columns] = \
official_stats_df[accumulated_values_columns].diff(periods=-1)
official_stats_df.drop(columns="sample_date", inplace=True)
official_stats_df.head()
```
### Data Merge
```
result_summary_df = exposure_keys_summary_df.merge(
new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
official_stats_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df = confirmed_es_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left")
result_summary_df.set_index(["sample_date", "source_regions"], inplace=True)
result_summary_df.drop(columns=["sample_date_string"], inplace=True)
result_summary_df.sort_index(ascending=False, inplace=True)
result_summary_df.head()
with pd.option_context("mode.use_inf_as_na", True):
result_summary_df = result_summary_df.fillna(0).astype(int)
result_summary_df["teks_per_shared_diagnosis"] = \
(result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case"] = \
(result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0)
result_summary_df.head(daily_plot_days)
def compute_aggregated_results_summary(days) -> pd.DataFrame:
aggregated_result_summary_df = result_summary_df.copy()
aggregated_result_summary_df["covid_cases_for_ratio"] = \
aggregated_result_summary_df.covid_cases.mask(
aggregated_result_summary_df.shared_diagnoses == 0, 0)
aggregated_result_summary_df["covid_cases_for_ratio_es"] = \
aggregated_result_summary_df.covid_cases_es.mask(
aggregated_result_summary_df.shared_diagnoses_es == 0, 0)
aggregated_result_summary_df = aggregated_result_summary_df \
.sort_index(ascending=True).fillna(0).rolling(days).agg({
"covid_cases": "sum",
"covid_cases_es": "sum",
"covid_cases_for_ratio": "sum",
"covid_cases_for_ratio_es": "sum",
"shared_teks_by_generation_date": "sum",
"shared_teks_by_upload_date": "sum",
"shared_diagnoses": "sum",
"shared_diagnoses_es": "sum",
}).sort_index(ascending=False)
with pd.option_context("mode.use_inf_as_na", True):
aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int)
aggregated_result_summary_df["teks_per_shared_diagnosis"] = \
(aggregated_result_summary_df.shared_teks_by_upload_date /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \
(aggregated_result_summary_df.shared_diagnoses /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(aggregated_result_summary_df.shared_diagnoses_es /
aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0)
return aggregated_result_summary_df
aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7)
aggregated_result_with_7_days_window_summary_df.head()
last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1]
last_7_days_summary
aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13)
last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1]
last_14_days_summary
```
## Report Results
```
display_column_name_mapping = {
"sample_date": "Sample\u00A0Date\u00A0(UTC)",
"source_regions": "Source Countries",
"datetime_utc": "Timestamp (UTC)",
"upload_date": "Upload Date (UTC)",
"generation_to_upload_days": "Generation to Upload Period in Days",
"region": "Backend",
"region_x": "Backend\u00A0(A)",
"region_y": "Backend\u00A0(B)",
"common_teks": "Common TEKs Shared Between Backends",
"common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)",
"covid_cases": "COVID-19 Cases (Source Countries)",
"shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)",
"shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)",
"shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)",
"shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)",
"teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)",
"shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)",
"covid_cases_es": "COVID-19 Cases (Spain)",
"app_downloads_es": "App Downloads (Spain – Official)",
"shared_diagnoses_es": "Shared Diagnoses (Spain – Official)",
"shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)",
}
summary_columns = [
"covid_cases",
"shared_teks_by_generation_date",
"shared_teks_by_upload_date",
"shared_teks_uploaded_on_generation_date",
"shared_diagnoses",
"teks_per_shared_diagnosis",
"shared_diagnoses_per_covid_case",
"covid_cases_es",
"app_downloads_es",
"shared_diagnoses_es",
"shared_diagnoses_per_covid_case_es",
]
summary_percentage_columns= [
"shared_diagnoses_per_covid_case_es",
"shared_diagnoses_per_covid_case",
]
```
### Daily Summary Table
```
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[summary_columns]
result_summary_with_display_names_df = result_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
result_summary_with_display_names_df
```
### Daily Summary Plots
```
result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \
.droplevel(level=["source_regions"]) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar(
title=f"Daily Summary",
rot=45, subplots=True, figsize=(15, 30), legend=False)
ax_ = summary_ax_list[0]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.95)
_ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist()))
for percentage_column in summary_percentage_columns:
percentage_column_index = summary_columns.index(percentage_column)
summary_ax_list[percentage_column_index].yaxis \
.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
```
### Daily Generation to Upload Period Table
```
display_generation_to_upload_period_pivot_df = \
generation_to_upload_period_pivot_df \
.head(backend_generation_days)
display_generation_to_upload_period_pivot_df \
.head(backend_generation_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping)
fig, generation_to_upload_period_pivot_table_ax = plt.subplots(
figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df)))
generation_to_upload_period_pivot_table_ax.set_title(
"Shared TEKs Generation to Upload Period Table")
sns.heatmap(
data=display_generation_to_upload_period_pivot_df
.rename_axis(columns=display_column_name_mapping)
.rename_axis(index=display_column_name_mapping),
fmt=".0f",
annot=True,
ax=generation_to_upload_period_pivot_table_ax)
generation_to_upload_period_pivot_table_ax.get_figure().tight_layout()
```
### Hourly Summary Plots
```
hourly_summary_ax_list = hourly_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.plot.bar(
title=f"Last 24h Summary",
rot=45, subplots=True, legend=False)
ax_ = hourly_summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.9)
_ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist()))
```
### Publish Results
```
github_repository = os.environ.get("GITHUB_REPOSITORY")
if github_repository is None:
github_repository = "pvieito/Radar-STATS"
github_project_base_url = "https://github.com/" + github_repository
display_formatters = {
display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "",
}
general_columns = \
list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values()))
general_formatter = lambda x: f"{x}" if x != 0 else ""
display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns)))
daily_summary_table_html = result_summary_with_display_names_df \
.head(daily_plot_days) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.to_html(formatters=display_formatters)
multi_backend_summary_table_html = multi_backend_summary_df \
.head(daily_plot_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(formatters=display_formatters)
def format_multi_backend_cross_sharing_fraction(x):
if pd.isna(x):
return "-"
elif round(x * 100, 1) == 0:
return ""
else:
return f"{x:.1%}"
multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(
classes="table-center",
formatters=display_formatters,
float_format=format_multi_backend_cross_sharing_fraction)
multi_backend_cross_sharing_summary_table_html = \
multi_backend_cross_sharing_summary_table_html \
.replace("<tr>","<tr style=\"text-align: center;\">")
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
covid_cases = \
extraction_date_result_summary_df.covid_cases.item()
shared_teks_by_generation_date = \
extraction_date_result_summary_df.shared_teks_by_generation_date.item()
shared_teks_by_upload_date = \
extraction_date_result_summary_df.shared_teks_by_upload_date.item()
shared_diagnoses = \
extraction_date_result_summary_df.shared_diagnoses.item()
teks_per_shared_diagnosis = \
extraction_date_result_summary_df.teks_per_shared_diagnosis.item()
shared_diagnoses_per_covid_case = \
extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item()
shared_teks_by_upload_date_last_hour = \
extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int)
display_source_regions = ", ".join(report_source_regions)
if len(report_source_regions) == 1:
display_brief_source_regions = report_source_regions[0]
else:
display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺"
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
import dataframe_image as dfi
df = df.copy()
df_styler = df.style.format(display_formatters)
media_path = get_temporary_image_path()
dfi.export(df_styler, media_path)
return media_path
summary_plots_image_path = save_temporary_plot_image(
ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(
df=result_summary_with_display_names_df)
hourly_summary_plots_image_path = save_temporary_plot_image(
ax=hourly_summary_ax_list)
multi_backend_summary_table_image_path = save_temporary_dataframe_image(
df=multi_backend_summary_df)
generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image(
ax=generation_to_upload_period_pivot_table_ax)
```
### Save Results
```
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(
report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(
report_resources_path_prefix + "Summary-Table.html")
hourly_summary_df.to_csv(
report_resources_path_prefix + "Hourly-Summary-Table.csv")
multi_backend_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Summary-Table.csv")
multi_backend_cross_sharing_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv")
generation_to_upload_period_pivot_df.to_csv(
report_resources_path_prefix + "Generation-Upload-Period-Table.csv")
_ = shutil.copyfile(
summary_plots_image_path,
report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(
summary_table_image_path,
report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(
hourly_summary_plots_image_path,
report_resources_path_prefix + "Hourly-Summary-Plots.png")
_ = shutil.copyfile(
multi_backend_summary_table_image_path,
report_resources_path_prefix + "Multi-Backend-Summary-Table.png")
_ = shutil.copyfile(
generation_to_upload_period_pivot_table_image_path,
report_resources_path_prefix + "Generation-Upload-Period-Table.png")
```
### Publish Results as JSON
```
def generate_summary_api_results(df: pd.DataFrame) -> list:
api_df = df.reset_index().copy()
api_df["sample_date_string"] = \
api_df["sample_date"].dt.strftime("%Y-%m-%d")
api_df["source_regions"] = \
api_df["source_regions"].apply(lambda x: x.split(","))
return api_df.to_dict(orient="records")
summary_api_results = \
generate_summary_api_results(df=result_summary_df)
today_summary_api_results = \
generate_summary_api_results(df=extraction_date_result_summary_df)[0]
summary_results = dict(
backend_identifier=report_backend_identifier,
source_regions=report_source_regions,
extraction_datetime=extraction_datetime,
extraction_date=extraction_date,
extraction_date_with_hour=extraction_date_with_hour,
last_hour=dict(
shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour,
shared_diagnoses=0,
),
today=today_summary_api_results,
last_7_days=last_7_days_summary,
last_14_days=last_14_days_summary,
daily_results=summary_api_results)
summary_results = \
json.loads(pd.Series([summary_results]).to_json(orient="records"))[0]
with open(report_resources_path_prefix + "Summary-Results.json", "w") as f:
json.dump(summary_results, f, indent=4)
```
### Publish on README
```
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
readme_contents = readme_contents.format(
extraction_date_with_hour=extraction_date_with_hour,
github_project_base_url=github_project_base_url,
daily_summary_table_html=daily_summary_table_html,
multi_backend_summary_table_html=multi_backend_summary_table_html,
multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html,
display_source_regions=display_source_regions)
with open("README.md", "w") as f:
f.write(readme_contents)
```
### Publish on Twitter
```
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule" and \
(shared_teks_by_upload_date_last_hour or not are_today_results_partial):
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
generation_to_upload_period_pivot_table_image_media.media_id,
]
if are_today_results_partial:
today_addendum = " (Partial)"
else:
today_addendum = ""
def format_shared_diagnoses_per_covid_case(value) -> str:
if value == 0:
return "–"
return f"≤{value:.2%}"
display_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case)
display_last_14_days_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"])
display_last_14_days_shared_diagnoses_per_covid_case_es = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"])
status = textwrap.dedent(f"""
#RadarCOVID – {extraction_date_with_hour}
Today{today_addendum}:
- Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour)
- Shared Diagnoses: ≤{shared_diagnoses:.0f}
- Usage Ratio: {display_shared_diagnoses_per_covid_case}
Last 14 Days:
- Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case}
- Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es}
Info: {github_project_base_url}#documentation
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/gtbook/robotics/blob/main/S65_driving_planning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
%pip install -q -U gtbook
# no imports (yet)
```
# Planning for Autonomous Driving.
> Motion primitives provide a computationally efficient tool for fast, local motion planning.
```
from gtbook.display import randomImages
from IPython.display import display
display(randomImages(6, 5, "steampunk", 1))
```
In previous chapters, we have mainly considered two kinds of planning problems.
For the trash sorting robot, vacuum cleaning robot, and warehouse robot, we focused
on the problem of making the best decisions in the presence of uncertainty.
In these problems, we used probability theory to quantify uncertainty,
and developed policies to maximize the expected benefit (or to minimize the expected cost)
of executing actions in a given state.
In contrast, for the differential drive robot (DDR), we considered the purely geometric
problem of planning collision-free paths.
A common characteristic of these is that each addresses a global problem.
For MDPs, we used value or policy iteration to establish a policy over the entire state space.
For DDRs, we searched the entire configuration space for a collision-free path.
Furthermore, the methods we developed for both problems were completely general.
Our probabilistic approaches work for arbitrary probability distributions, reward functions,
and system dynamics.
Our geometric approaches to path planning work for arbitrary environments,
and can easily be extended to robots with complex dynamics (e.g., we will extend RRTs to
the case of drones in the next chapter).
Methods that address global problems in broad generality often require significant computational
resources and significant computation time.
This can render such methods ineffective for situations in which real-time adaptivity
is required over short time horizons, or in local regions of the state space.
These conditions are exactly those confronted by self-driving cars,
and for this reason, in this chapter we introduce a new approach,
one that exploits precomputed motion primitives,
for motion planning.
## Motion Primitives
To this point, we have considered two approaches for quantifying motions.
For all of our probabilistic methods, we used a discrete time formulation and considered
the effects of executing an action (e.g., move forward, move left) for a small duration of time, $\Delta t$.
To plan collision-free paths, we considered artificial potential fields and RRTs, both of which
use short straight-line paths in the configuration space to connect configurations (small gradient descent
steps for potential fields, and steering toward $q_\mathrm{rand}$ for RRTs).
In each case, the language of path segments is very simple, and in each case,
a full plan will consist of many sequential steps.
This approach can be very inefficient for planning long trajectories that have well-defined
properties.
For example, consider a car traveling in reverse that wishes to suddenly change it's orientation
by completing a rapid 180-degree turn (a favorite maneuver for drivers like James Bond and Steve McQueen).
This maneuver can be achieved by a predefined
sequence of steps: after achieving a reasonable speed, remove your foot from the gas pedal;
turn left sharply and hit the breaks; at the perfect moment, release the breaks
and straighten the wheel.
When stunt drivers execute this maneuver, they do not plan step-by-step what to do.
Rather, they have pre-compiled this sequence of steps into a basic action that can be executed
with little reasoning.
This is the basic idea of **motion primitives**.
Motion primitives can be defined in numerous ways.
We could specify a geometric curve without consideration of time or dynamics
(e.g., for a parallel parking robot, we might define an
initial curve to move the car from the street into an empty parking spot).
In cases where dynamics are significant (e.g., in drone flight), we might specify
a feedback control law to be executed from an initial state until some final state is achieved.
We might parameterize these primitives by duration, by geometric properties (e.g., angle, distance),
or by state feedback conditions.
This idea is illustrated in the figure below, which shows four motion primitives
for a car.
The primitive $P_1$ corresponds to driving forward, while motion primitives $P_2$, $P_3$, and $P_4$ correspond to veering
to the left at increasingly sharp angles.
<figure>
<img src="https://github.com/gtbook/robotics/blob/main/Figures6/motion-primitives.png?raw=1" id="fig:MotionPrimitives" style="width:18cm" alt="">
<figcaption>Four motion primitives for a car veering to its left. </figcaption>
</figure>
## Planning using Motion Primitives
The use of motion primitives can greatly reduce the cost of planning, since the set
of actions available at any moment in time is small and easily computed.
For the car example above, if we assume a symmetric set of motion primitives for veering to the right,
motion planning can be reduced to choosing from this set of seven possible actions at each moment in time.
If, for example, there is a slow moving car just ahead, it might be advantageous to change lanes using one of
$P_2$, $P_3$, or $P_4$.
If there is a rapidly approaching oncoming car, it might be best to use $P_2$, to delay changing lanes
until that car has passed by.
More generally, a motion primitive typically includes a set of conditions that define when
the primitive is applicable, and a set of possible transitions to other motion primitives.
For example, it would be reasonable to veer left slightly and then drive straight, but it would
not be reasonable to transition from forward motion to reverse motion without some intermediate
maneuvering.
Under these conditions, planning can be effected by a generate-and-test approach.
At each moment in time, the planner considers the current situation, enumerates the valid
motion primitives (using preconditions for execution and set of valid transitions), and evaluates
the benefit of each admissible candidate motion primitive. This approach can be effective for
problems such as highway driving, where local context is all that is necessary for making decisions.
For example, the traffic outside the Atlanta perimeter is irrelevant when leaving the downtown on
a trip to Chicago.
In this case, immediate driving decisions depend on the car just ahead, and the nearby
cars in adjacent lanes.
| github_jupyter |
# Cross Section for Many Lines using MODIT
Update: August 13/2021, Hajime Kawahara
We demonstarte the Modified Discrete Integral Transform (MODIT), which is the modified version of DIT for exojax. MODIT uses the evenly-spaced logarithm grid (ESLOG) as a wavenumber dimension. MODIT takes advantage especially for the case that the number of the molecular line is large (typically > 10,000). We here compare the results by MODIT with the direct computation (LPF).
```
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import jax.numpy as jnp
plt.style.use('bmh')
from exojax.spec.hitran import SijT, doppler_sigma, gamma_hitran, gamma_natural
from exojax.spec import moldb
# Setting wavenumber bins and loading HITRAN database
nus=np.logspace(np.log10(1900.0),np.log10(2300.0),350000,dtype=np.float64)
R=(len(nus)-1)/np.log(nus[-1]/nus[0]) #resolution
mdbCO=moldb.MdbHit('05_hit12.par',nus)
# set T, P and partition function
Mmol=28.01 # molecular weight
Tfix=1000.0 # we assume T=1000K
Pfix=1.e-3 # we compute P=1.e-3 bar
Ppart=Pfix #partial pressure of CO. here we assume a 100% CO atmosphere.
qt=mdbCO.Qr_layer_HAPI([Tfix])[0]
# compute Sij, gamma_L, sigmaD
Sij=SijT(Tfix,mdbCO.logsij0,mdbCO.nu_lines,mdbCO.elower,qt)
gammaL = gamma_hitran(Pfix,Tfix, Ppart, mdbCO.n_air, \
mdbCO.gamma_air, mdbCO.gamma_self) \
+ gamma_natural(mdbCO.A)
```
MODIT uses the normalized quantities by wavenumber/R, where R is the spectral resolution. In this case, the normalized Doppler width (nsigmaD) is common for the same isotope. Then, we use a 2D DIT grid with the normalized gammaL and q = R log(nu).
```
from exojax.spec.hitran import normalized_doppler_sigma
dv_lines=mdbCO.nu_lines/R
nsigmaD=normalized_doppler_sigma(Tfix,Mmol,R)
ngammaL=gammaL/dv_lines
```
MODIT uses a grid of ngammaL, and wavenumber. ditgrid makes a 1D grid for ngamma.
```
from exojax.spec.modit import ditgrid
ngammaL_grid=ditgrid(ngammaL)
# we can change the resolution using res option
#ngammaL_grid=set_ditgrid(ngammaL,res=0.1)
#show the grids
plt.plot(mdbCO.nu_lines,ngammaL,".")
for i in ngammaL_grid:
plt.axhline(i,lw=1,alpha=0.5,color="C1")
plt.xlabel("wavenumber")
plt.ylabel("normalized gammaL")
```
We need to precompute the contribution for wavenumber and pmarray. These can be computed using init_dit.
```
from exojax.spec import initspec
cnu,indexnu,R,pmarray=initspec.init_modit(mdbCO.nu_lines,nus)
```
Let's compute the cross section!
```
from exojax.spec.modit import xsvector
xs=xsvector(cnu,indexnu,R,pmarray,nsigmaD,ngammaL,Sij,nus,ngammaL_grid)
```
Also, we here try the direct computation using LPF for the comparison purpose
```
from exojax.spec import xsection
sigmaD=doppler_sigma(mdbCO.nu_lines,Tfix,Mmol)
xsv=xsection(nus,mdbCO.nu_lines,sigmaD,gammaL,Sij,memory_size=30)
fig=plt.figure(figsize=(10,5))
ax=fig.add_subplot(211)
plt.plot(nus,xs,lw=1,alpha=0.5,label="MODIT")
plt.plot(nus,xsv,lw=1,alpha=0.5,label="Direct LPF")
plt.legend(loc="upper right")
plt.ylabel("Cross Section (cm2)")
ax=fig.add_subplot(212)
plt.plot(nus,xsv-xs,lw=2,alpha=0.5,label="MODIT")
plt.ylabel("LPF - DIT (cm2)")
plt.legend(loc="upper left")
plt.show()
```
There is about 1 % deviation between LPF and MODIT.
```
fig=plt.figure(figsize=(10,5))
ax=fig.add_subplot(211)
plt.plot(nus,xs,lw=2,alpha=0.5,label="DIT")
plt.plot(nus,xsv,lw=1,alpha=0.5,label="Direct")
plt.legend(loc="upper right")
plt.xlim(2050.8,2050.9)
plt.ylabel("Cross Section (cm2)")
ax=fig.add_subplot(212)
plt.plot(nus,xsv-xs,lw=2,alpha=0.6,label="MODIT")
plt.legend(loc="upper left")
plt.ylabel("Difference (cm2)")
plt.xlim(2050.8,2050.9)
#plt.yscale("log")
plt.savefig("fine_grid.png")
```
| github_jupyter |
[Oregon Curriculum Network](http://www.4dsolutions.net/ocn) <br />
[Discovering Math with Python](Introduction.ipynb)
Crystal Ball Sequence
============
The face-centered cubic (FCC) lattice is not always presented in this simplest form, ditto the cubic close packing (CCP), which amounts to the same thing. A nuclear ball is surrounded by a layer of twelve, all touching it, and adjacent neighbors. The shape so formed is not a cube, but a cuboctahedron, with eight triangular faces and six square. This is where I can type stuff.
As the cuboctahedral packing continues to expand outward, layer by layer, the cumulative number of balls or points forms the Crystal Ball Sequence.
cubocta(), a generator, yields the number of balls in each successive layer of the cuboctahedron, according to a simple formula derived by R. Buckminster Fuller, a prolific inventor and philosopher [1]. cummulative( ) delegates to cubocta( ) while accumulating the number in each layer to provide a running total.

```
from itertools import accumulate, islice
def cubocta():
"""
Classic Generator: Cuboctahedral / Icosahedral #s
https://oeis.org/A005901
"""
yield 1 # nuclear ball
f = 1
while True:
elem = 10 * f * f + 2 # f for frequency
yield elem # <--- pause / resume here
f += 1
def cummulative(n):
"""
https://oeis.org/A005902 (crystal ball sequence)
"""
yield from islice(accumulate(cubocta()),0,n)
print("{:=^30}".format(" Crystal Ball Sequence "))
print("{:^10} {:^10}".format("Layers", "Points"))
for f, out in enumerate(cummulative(30),start=1):
print("{:>10} {:>10}".format(f, out))
```
Octet Truss
======
When adjacent CCP ball centers interconnect, what do you get? Why the octet truss of course, a well known space frame, used a lot in architecture. Alexander Graham Bell was fascinated by this construction.[2]
<a data-flickr-embed="true" href="https://www.flickr.com/photos/kirbyurner/23636692173/in/album-72157664250599655/" title="Business Accelerator Building"><img src="https://farm2.staticflickr.com/1584/23636692173_103b411737.jpg" width="500" height="375" alt="Business Accelerator Building"></a><script async src="//embedr.flickr.com/assets/client-code.js" charset="utf-8"></script>
[1] Siobahn Roberts. *King of Infinite Space*. New York: Walker & Company (2006). pp 179-180.
"Coxeter sent back a letter saying that one equation would be 'a remarkable discovery, justifying Bucky's evident pride,' if only it weren't too good to be true. The next day, Coxeter called: 'On further reflection, I see that it *is* true'. Coxeter told Fuller how impressed he was with his formula -- on the cubic close-packing of balls."
[2] http://worldgame.blogspot.com/2006/02/octet-truss.html (additional info on the octet truss)
Pascal's Triangle
=========
Pascal's Triangle connects to the Binomial Theorem (originally proved by Sir Isaac Newton) and to numerous topics in probability theory. The triangular and tetrahedral number sequences may be discovered lurking in its columns.
pascal(), a generator, yields successive rows of Pascal's Triangle. By prepending and appending a zero element and adding vertically, a next row is obtained. For example, from [1] we get [0, 1] + [1, 0] = [1, 1]. From [1, 1] we get [0, 1, 1] + [1, 1, 0] = [1, 2, 1] and so on.
Notice the triangular numbers (1, 3, 6, 10...) and tetrahedral number sequences (1, 4, 10, 20...) appear in the slanted columns. [3]

```
from itertools import islice
def pascal():
row = [1]
while True:
yield row
row = [i+j for i,j in zip([0]+row, row+[0])]
print("{0:=^60}".format(" Pascal's Triangle "))
print()
for r in islice(pascal(),0,11):
print("{:^60}".format("".join(map(lambda n: "{:>5}".format(n), r))))
```
Each number in Pascal's Triangle may be understood as the number of unique pathways to that position, were falling balls introduced through the top and allowed to fall left or right to the next row down. This apparatus is sometimes called a Galton Board.
For example, a ball could reach the 6 in the middle of the 5th row going 1,1,2,3,6 in four ways (counting left and right mirrors), or 1,1,1,3,6 in two ways. The likely pattern when many balls fall through this maze will be a bell curve, as shown in the simulation below.
```
from IPython.display import YouTubeVideo
YouTubeVideo("9xUBhhM4vbM")
```
[3] for more on both Pascal's Triangle and Cuboctahedral Numbers see:
http://www.4dsolutions.net/ocn/numeracy0.html
| github_jupyter |
<i>Copyright (c) Microsoft Corporation. All rights reserved.<br>
Licensed under the MIT License.</i>
<br><br>
# Recommender Hyperparameter Tuning w/ AzureML
This notebook shows how to auto-tune hyperparameters of a recommender model by utilizing **Azure Machine Learning service**<sup>[a](#azureml-search), [b](#azure-subscription)</sup> ([AzureML](https://azure.microsoft.com/en-us/services/machine-learning-service/)).
We present an overall process of utilizing AzureML, specifically [**Hyperdrive**](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive?view=azure-ml-py) component, for the hyperparameter tuning by demonstrating key steps:
1. Configure AzureML Workspace
2. Create Remote Compute Target (GPU cluster)
3. Prepare Data
4. Prepare Training Scripts
5. Setup and Run Hyperdrive Experiment
6. Model Import, Re-train and Test
In this notebook, we use [**Wide-and-Deep model**](https://ai.googleblog.com/2016/06/wide-deep-learning-better-together-with.html) from **TensorFlow high-level Estimator API (v1.12)** on the movie recommendation scenario. Wide-and-Deep learning jointly trains wide linear model and deep neural networks (DNN) to combine the benefits of memorization and generalization for recommender systems.
For more details about the **Wide-and-Deep** model:
* [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb)
* [Original paper](https://arxiv.org/abs/1606.07792)
* [TensorFlow API doc](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNLinearCombinedRegressor)
Regarding **AuzreML**, please refer:
* [Quickstart notebook](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
* [Hyperdrive](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters)
* [Tensorflow model tuning with Hyperdrive](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-train-tensorflow)
> <span id="azureml-search">a. </span>To use AzureML, you will need an Azure subscription.
<span id="azure-subscription">b. </span>When you web-search "Azure Machine Learning", you will most likely to see mixed results of Azure Machine Learning (AzureML) and Azure Machine Learning **Studio**. Please note they are different services where AzureML's focuses are on ML model management, tracking and hyperparameter tuning, while the [ML Studio](https://studio.azureml.net/)'s is to provide a high-level tool for 'easy-to-use' experience of ML designing and experimentation based on GUI.
```
import sys
sys.path.append("../../")
import itertools
import os
import shutil
import time
from IPython.display import clear_output
import numpy as np
import papermill as pm
import pandas as pd
import sklearn.preprocessing
import tensorflow as tf
import azureml as aml
import azureml.widgets as widgets
import azureml.train.hyperdrive as hd
from reco_utils.dataset.pandas_df_utils import user_item_pairs
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_random_split
import reco_utils.evaluation.python_evaluation
print("Azure ML SDK Version:", aml.core.VERSION)
print("Tensorflow Version:", tf.__version__)
```
### 1. Configure AzureML Workspace
**AzureML workspace** is a foundational block in the cloud that you use to experiment, train, and deploy machine learning models via AzureML service. In this notebook, we 1) create a workspace from [**Azure portal**](https://portal.azure.com) and 2) configure from this notebook.
You can find more details about the setup and configure processes from the following links:
* [Quickstart with Azure portal](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started)
* [Quickstart with Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
<br>
#### 1.1 Create a workspace
1. Sign in to the [Azure portal](https://portal.azure.com) by using the credentials for the Azure subscription you use.
2. Select **Create a resource** menu, search for **Machine Learning service workspace** select **Create** button.
3. In the **ML service workspace** pane, configure your workspace with entering the *workspace name* and *resource group* (or **create new** resource group if you don't have one already), and select **Create**. It can take a few moments to create the workspace.
<br>
#### 1.2 Configure
To configure this notebook to communicate with the workspace, type in your Azure subscription id, the resource group name and workspace name to `<subscription-id>`, `<resource-group>`, `<workspace-name>` in the above notebook cell. Alternatively, you can create a *.\aml_config\config.json* file with the following contents:
```
{
"subscription_id": "<subscription-id>",
"resource_group": "<resource-group>",
"workspace_name": "<workspace-name>"
}
```
```
# AzureML workspace info. Note, will look up "aml_config\config.json" first, then fall back to use this
SUBSCRIPTION_ID = '<subscription-id>'
RESOURCE_GROUP = '<resource-group>'
WORKSPACE_NAME = '<workspace-name>'
# Remote compute (cluster) configuration. If you want to save the cost more, set these to small.
VM_SIZE = 'STANDARD_NC6'
VM_PRIORITY = 'lowpriority'
# Cluster nodes
MIN_NODES = 4
MAX_NODES = 8
# Hyperdrive experimentation configuration
MAX_TOTAL_RUNS = 100 # Number of runs (training-and-evaluation) to search the best hyperparameters.
MAX_CONCURRENT_RUNS = 4
# Recommend top k items
TOP_K = 10
# Select Movielens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
EPOCHS = 50
# Metrics to track
RANKING_METRICS = ['ndcg_at_k', 'precision_at_k']
RATING_METRICS = ['rmse', 'mae']
PRIMARY_METRIC = 'rmse'
# Data column names
USER_COL = 'UserId'
ITEM_COL = 'MovieId'
RATING_COL = 'Rating'
ITEM_FEAT_COL = 'Genres'
```
Now let's see if everything is ready!
```
# Connect to a workspace
try:
ws = aml.core.Workspace.from_config()
except aml.exceptions.UserErrorException:
try:
ws = aml.core.Workspace(
subscription_id=SUBSCRIPTION_ID,
resource_group=RESOURCE_GROUP,
workspace_name=WORKSPACE_NAME
)
ws.write_config()
except aml.exceptions.AuthenticationException:
ws = None
if ws is None:
raise ValueError(
"""Cannot access the AzureML workspace w/ the config info provided.
Please check if you entered the correct id, group name and workspace name"""
)
else:
print("AzureML workspace name: ", ws.name)
clear_output() # Comment out this if you want to see your workspace info.
```
### 2. Create Remote Compute Target
We create a gpu cluster as our **remote compute target**. If a cluster with the same name is already exist in your workspace, the script will load it instead. You can see [this document](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets) to learn more about setting up a compute target on different locations.
This notebook selects **STANDARD_NC6** virtual machine (VM) and sets it's priority as *lowpriority* to save the cost.
Size | vCPU | Memory (GiB) | Temp storage (SSD, GiB) | GPU | GPU memory (GiB) | Max data disks | Max NICs
---|---|---|---|---|---|---|---
Standard_NC6 | <div align="center">6</div> | <div align="center">56</div> | <div align="center">340</div> | <div align="center">1</div> | <div align="center">8</div> | <div align="center">24</div> | <div align="center">1</div>
For more information about Azure virtual machine sizes, see [here](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu).
```
CLUSTER_NAME = 'gpu-cluster-nc6'
try:
compute_target = aml.core.compute.ComputeTarget(workspace=ws, name=CLUSTER_NAME)
print("Found existing compute target")
except aml.core.compute_target.ComputeTargetException:
print("Creating a new compute target...")
compute_config = aml.core.compute.AmlCompute.provisioning_configuration(
vm_size=VM_SIZE,
vm_priority=VM_PRIORITY,
min_nodes=MIN_NODES,
max_nodes=MAX_NODES
)
# create the cluster
compute_target = aml.core.compute.ComputeTarget.create(ws, CLUSTER_NAME, compute_config)
compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
# Use the 'status' property to get a detailed status for the current cluster.
print(compute_target.status.serialize())
```
### 3. Prepare Data
For demonstration purpose, we use 100k MovieLens dataset. First, download the data and convert the format (multi-hot encode *genres*) to make it work for our model. More details about this step is described in our [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb).
```
data = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=[USER_COL, ITEM_COL, RATING_COL],
genres_col='Genres_string'
)
# Encode 'genres' into int array (multi-hot representation) to use as item features
genres_encoder = sklearn.preprocessing.MultiLabelBinarizer()
data[ITEM_FEAT_COL] = genres_encoder.fit_transform(
data['Genres_string'].apply(lambda s: s.split("|"))
).tolist()
data.drop('Genres_string', axis=1, inplace=True)
data.head()
```
The dataset is split into train, validation, and test sets. The train and validation sets will be used for hyperparameter tuning, and the test set will be used for the final evaluation of the model after we import the best model from AzureML workspace.
Here, we don't use multiple-split directly by passing `ratio=[0.56, 0.19, 0.25]`. Instead, we first split the data into train and test sets with the same `seed` we've been using in other notebooks to make the train set identical across them. Then, we further split the train set into train and validation sets.
```
# Use the same seed to make the train and test sets identical across other notebooks in the repo.
train, test = python_random_split(data, ratio=0.75, seed=42)
# Further split the train set into train and validation set.
train, valid = python_random_split(train)
print(len(train), len(valid), len(test))
```
Now, upload the train and validation sets to the AzureML workspace. Our Hyperdrivce experiment will use them.
```
DATA_DIR = 'aml_data'
os.makedirs(DATA_DIR, exist_ok=True)
TRAIN_FILE_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_train.pkl"
train.to_pickle(os.path.join(DATA_DIR, TRAIN_FILE_NAME))
VALID_FILE_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_valid.pkl"
valid.to_pickle(os.path.join(DATA_DIR, VALID_FILE_NAME))
# Note, all the files under DATA_DIR will be uploaded to the data store
ds = ws.get_default_datastore()
ds.upload(
src_dir=DATA_DIR,
target_path='data',
overwrite=True,
show_progress=True
)
```
### 4. Prepare Training Scripts
Next step is to prepare scripts that AzureML Hyperdrive will use to train and evaluate models with selected hyperparameters. We re-use our [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb) for that. To run the model notebook from the Hyperdrive Run, all we need is to prepare an [entry script](../../reco_utils/aml/wide_deep.py) which parses the hyperparameter arguments, passes them to the notebook, and records the results of the notebook to AzureML Run logs by using `papermill`. Hyperdrive uses the logs to track the performance of each hyperparameter-set and finds the best performed one.
Here is a code snippet from the [entry script](../../reco_utils/aml/wide_deep.py):
```
import argparse
import papermill as pm
from azureml.core import Run
run = Run.get_context()
...
parser = argparse.ArgumentParser()
...
parser.add_argument('--dnn-optimizer', type=str, dest='dnn_optimizer', ...
parser.add_argument('--dnn-optimizer-lr', type=float, dest='dnn_optimizer_lr', ...
...
pm.execute_notebook(
"../../notebooks/00_quick_start/wide_deep_movielens.ipynb",
OUTPUT_NOTEBOOK,
parameters=params,
kernel_name='python3',
)
...
```
```
# Prepare all the necessary scripts which will be loaded to our Hyperdrive Experiment Run
SCRIPT_DIR = 'aml_script'
# Clean-up scripts if already exists
shutil.rmtree(SCRIPT_DIR, ignore_errors=True)
# Copy scripts to SCRIPT_DIR temporarly
shutil.copytree(os.path.join('..', '..', 'reco_utils'), os.path.join(SCRIPT_DIR, 'reco_utils'))
# We re-use our model notebook for training and testing models.
model_notebook_dir = os.path.join('notebooks', '00_quick_start')
dest_model_notebook_dir = os.path.join(SCRIPT_DIR, model_notebook_dir)
os.makedirs(dest_model_notebook_dir , exist_ok=True)
shutil.copy(
os.path.join('..', '..', model_notebook_dir, 'wide_deep_movielens.ipynb'),
dest_model_notebook_dir
)
# This is our entry script for Hyperdrive Run
ENTRY_SCRIPT_NAME = 'reco_utils/aml/wide_deep.py'
```
### 5. Setup and Run Hyperdrive Experiment
[Hyperdrive](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters) create a machine learning Experiment [Run](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.run?view=azure-ml-py) on the workspace and utilizes child-runs to search the best set of hyperparameters.
<br>
#### 5.1 Create Experiment
[Experiment](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.experiment(class)?view=azure-ml-py) is the main entry point into experimenting with AzureML. To create new Experiment or get the existing one, we pass our experimentation name.
```
# Create an experiment to track the runs in the workspace
EXP_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_wide_deep_model"
exp = aml.core.Experiment(workspace=ws, name=EXP_NAME)
```
#### 5.2 Define Search Space
Now we define the search space of hyperparameters. For example, if you want to test different batch sizes of {64, 128, 256}, you can use `azureml.train.hyperdrive.choice(64, 128, 256)`. To search from a continuous space, use `uniform(start, end)`. For more options, see [Hyperdrive parameter expressions](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.parameter_expressions?view=azure-ml-py).
In this notebook, we fix model type as `wide_deep` and the number of epochs to 50.
In the search space, we set different linear and DNN optimizers, structures, learning rates and regularization rates. Details about the hyperparameters can be found from our [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb).
> Hyperdrive provides three different parameter sampling methods: `RandomParameterSampling`, `GridParameterSampling`, and `BayesianParameterSampling`. Details about each method can be found from [Azure doc](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters). Here, we use the Bayesian sampling.
```
# Fixed parameters
script_params = {
'--datastore': ds.as_mount(),
'--train-datapath': "data/" + TRAIN_FILE_NAME,
'--test-datapath': "data/" + VALID_FILE_NAME,
'--top-k': TOP_K,
'--user-col': USER_COL,
'--item-col': ITEM_COL,
'--item-feat-col': ITEM_FEAT_COL,
'--rating-col': RATING_COL,
'--ranking-metrics': RANKING_METRICS,
'--rating-metrics': RATING_METRICS,
'--epochs': EPOCHS,
'--model-type': 'wide_deep'
}
# Hyperparameter search space
params = {
'--batch-size': hd.choice(64, 128, 256),
# Linear model hyperparameters
'--linear-optimizer': hd.choice('Ftrl'), # 'SGD' and 'Momentum' easily got exploded loss in regression problems.
'--linear-optimizer-lr': hd.uniform(0.0001, 0.1),
'--linear-l1-reg': hd.uniform(0.0, 0.1),
# Deep model hyperparameters
'--dnn-optimizer': hd.choice('Adagrad', 'Adam'),
'--dnn-optimizer-lr': hd.uniform(0.0001, 0.1),
'--dnn-user-embedding-dim': hd.choice(4, 8, 16, 32, 64),
'--dnn-item-embedding-dim': hd.choice(4, 8, 16, 32, 64),
'--dnn-hidden-layer-1': hd.choice(0, 32, 64, 128, 256, 512, 1024), # 0: not using this layer
'--dnn-hidden-layer-2': hd.choice(0, 32, 64, 128, 256, 512, 1024),
'--dnn-hidden-layer-3': hd.choice(0, 32, 64, 128, 256, 512, 1024),
'--dnn-hidden-layer-4': hd.choice(32, 64, 128, 256, 512, 1024),
'--dnn-batch-norm': hd.choice(0, 1),
'--dnn-dropout': hd.choice(0.0, 0.1, 0.2, 0.3, 0.4)
}
```
**AzureML Estimator** is the building block for training. An Estimator encapsulates the training code and parameters, the compute resources and runtime environment for a particular training scenario (Note, this is not TensorFlow's Estimator)
We create one for our experimentation with the dependencies our model requires as follows:
```
conda_packages=['pandas', 'scikit-learn', 'tensorflow-gpu=1.12'],
pip_packages=['ipykernel', 'papermill']
```
To the Hyperdrive Run Config, we set our primary metric name and the goal (our hyperparameter search criteria), hyperparameter sampling method, and number of total child-runs. The bigger the search space, the more number of runs we will need for better results.
```
est = aml.train.estimator.Estimator(
source_directory=SCRIPT_DIR,
entry_script=ENTRY_SCRIPT_NAME,
script_params=script_params,
compute_target=compute_target,
use_gpu=True,
conda_packages=['pandas', 'scikit-learn', 'tensorflow-gpu=1.12'],
pip_packages=['ipykernel', 'papermill']
)
hd_run_config = hd.HyperDriveRunConfig(
estimator=est,
hyperparameter_sampling=hd.BayesianParameterSampling(params),
primary_metric_name=PRIMARY_METRIC,
primary_metric_goal=hd.PrimaryMetricGoal.MINIMIZE,
max_total_runs=MAX_TOTAL_RUNS,
max_concurrent_runs=MAX_CONCURRENT_RUNS
)
```
#### 5.3 Run Experiment
Now we submit the Run to our experiment. You can see the experiment progress from this notebook by using `azureml.widgets.RunDetails(hd_run).show()` or check from the Azure portal with the url link you can get by running `hd_run.get_portal_url()`.
<img src="https://recodatasets.blob.core.windows.net/images/aml_0.png?sanitize=true"/>
<img src="https://recodatasets.blob.core.windows.net/images/aml_1.png?sanitize=true"/>
<center><i>AzureML Hyperdrive Widget</i></center>
To load an existing Hyperdrive Run instead of start new one, use `hd_run = hd.HyperDriveRun(exp, <user-run-id>, hyperdrive_run_config=hd_run_config)`. You also can cancel the Run with `hd_run.cancel()`.
```
hd_run = exp.submit(config=hd_run_config)
widgets.RunDetails(hd_run).show()
```
Once all the child-runs are finished, we can get the best run and the metrics.
```
# Get best run and printout metrics
best_run = hd_run.get_best_run_by_primary_metric()
best_run_metrics = best_run.get_metrics()
print("* Best Run Id:", best_run.id)
print("\n* Best hyperparameters:")
print("Model type =", best_run_metrics['MODEL_TYPE'])
print("Batch size =", best_run_metrics['BATCH_SIZE'])
print("Linear optimizer =", best_run_metrics['LINEAR_OPTIMIZER'])
print("\tLearning rate = {0:.4f}".format(best_run_metrics['LINEAR_OPTIMIZER_LR']))
print("\tL1 regularization = {0:.4f}".format(best_run_metrics['LINEAR_L1_REG']))
print("DNN optimizer =", best_run_metrics['DNN_OPTIMIZER'])
print("\tUser embedding dimension =", best_run_metrics['DNN_USER_DIM'])
print("\tItem embedding dimension =", best_run_metrics['DNN_ITEM_DIM'])
hidden_units = []
for i in range(1, 5):
hidden_nodes = best_run_metrics['DNN_HIDDEN_LAYER_{}'.format(i)]
if hidden_nodes > 0:
hidden_units.append(hidden_nodes)
print("\tHidden units =", hidden_units)
print("\tLearning rate = {0:.4f}".format(best_run_metrics['DNN_OPTIMIZER_LR']))
print("\tDropout rate = {0:.4f}".format(best_run_metrics['DNN_DROPOUT']))
print("\tBatch normalization =", best_run_metrics['DNN_BATCH_NORM'])
# Metrics evaluated on validation set
print("\n* Performance metrics:")
print("Top", TOP_K)
for m in RANKING_METRICS:
print("\t{0} = {1:.4f}".format(m, best_run_metrics[m]))
for m in RATING_METRICS:
print("\t{0} = {1:.4f}".format(m, best_run_metrics[m]))
```
### 6. Model Import and Test
[Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb), which we've used in our Hyperdrive Experiment, exports the trained model to the output folder (the output path is recorded at `best_run_metrics['saved_model_dir']`). We can download a model from the best run and test it.
```
MODEL_DIR = 'aml_model'
os.makedirs(MODEL_DIR, exist_ok=True)
model_file_dir = os.path.normpath(best_run_metrics['saved_model_dir'][2:-1]) + '/'
print(model_file_dir)
for f in best_run.get_file_names():
if f.startswith(model_file_dir):
output_file_path = os.path.join(MODEL_DIR, f[len(model_file_dir):])
print("Downloading {}..".format(f))
best_run.download_file(name=f, output_file_path=output_file_path)
saved_model = tf.contrib.estimator.SavedModelEstimator(MODEL_DIR)
cols = {
'col_user': USER_COL,
'col_item': ITEM_COL,
'col_rating': RATING_COL,
'col_prediction': 'prediction'
}
tf.logging.set_verbosity(tf.logging.ERROR)
# Prediction input function for TensorFlow SavedModel
def predict_input_fn(df):
def input_fn():
examples = [None] * len(df)
for index, test_sample in df.iterrows():
example = tf.train.Example()
example.features.feature[USER_COL].int64_list.value.extend([test_sample[USER_COL]])
example.features.feature[ITEM_COL].int64_list.value.extend([test_sample[ITEM_COL]])
example.features.feature[ITEM_FEAT_COL].float_list.value.extend(test_sample[ITEM_FEAT_COL])
examples[index] = example.SerializeToString()
return {'inputs': tf.constant(examples)}
return input_fn
# Rating prediction set
X_test = test.drop(RATING_COL, axis=1)
X_test.reset_index(drop=True, inplace=True)
# Rating prediction
predictions = list(itertools.islice(
saved_model.predict(predict_input_fn(X_test)),
len(X_test)
))
prediction_df = X_test.copy()
prediction_df['prediction'] = [p['outputs'][0] for p in predictions]
print(prediction_df['prediction'].describe(), "\n")
for m in RATING_METRICS:
fn = getattr(reco_utils.evaluation.python_evaluation, m)
result = fn(test, prediction_df, **cols)
print(m, "=", result)
# Unique items
if ITEM_FEAT_COL is None:
items = data.drop_duplicates(ITEM_COL)[[ITEM_COL]].reset_index(drop=True)
else:
items = data.drop_duplicates(ITEM_COL)[[ITEM_COL, ITEM_FEAT_COL]].reset_index(drop=True)
# Unique users
users = data.drop_duplicates(USER_COL)[[USER_COL]].reset_index(drop=True)
# Ranking prediction set
ranking_pool = user_item_pairs(
user_df=users,
item_df=items,
user_col=USER_COL,
item_col=ITEM_COL,
user_item_filter_df=pd.concat([train, valid]), # remove seen items
shuffle=True
)
predictions = []
# To prevent creating a tensor proto whose content is larger than 2GB (which will raise an error),
# divide ranking_pool into 10 chunks, predict each, and concat back.
for pool in np.array_split(ranking_pool, 10):
pool.reset_index(drop=True, inplace=True)
# Rating prediction
pred = list(itertools.islice(
saved_model.predict(predict_input_fn(pool)),
len(pool)
))
predictions.extend([p['outputs'][0] for p in pred])
ranking_pool['prediction'] = predictions
for m in RANKING_METRICS:
fn = getattr(reco_utils.evaluation.python_evaluation, m)
result = fn(test, ranking_pool, **{**cols, 'k': TOP_K})
print(m, "=", result)
```
#### <span id="google-wide-deep-baseline">Wide-and-Deep Baseline Comparison</span>
To see if Hyperdrive found good hyperparameters, we simply compare with the model with known hyperparameters from [TensorFlow's wide-deep learning example](https://github.com/tensorflow/models/blob/master/official/wide_deep/movielens_main.py) which uses only the DNN part from the wide-and-deep model for MovieLens data.
> Note, this is not 'apples to apples' comparison. For example, TensorFlow's movielens example uses *rating-timestamp* as a numeric feature, but we did not use that here because we think the timestamps are not relevant to the movies' ratings. This comparison is more like to show how Hyperdrive can help to find comparable hyperparameters without requiring exhaustive efforts in searching through a huge space.
```
OUTPUT_NOTEBOOK = "output.ipynb"
OUTPUT_MODEL_DIR = "known_hyperparam_model_checkpoints"
params = {
'MOVIELENS_DATA_SIZE': MOVIELENS_DATA_SIZE,
'TOP_K': TOP_K,
'MODEL_TYPE': 'deep',
'EPOCHS': EPOCHS,
'BATCH_SIZE': 256,
'DNN_OPTIMIZER': 'Adam',
'DNN_OPTIMIZER_LR': 0.001,
'DNN_HIDDEN_LAYER_1': 256,
'DNN_HIDDEN_LAYER_2': 256,
'DNN_HIDDEN_LAYER_3': 256,
'DNN_HIDDEN_LAYER_4': 128,
'DNN_USER_DIM': 16,
'DNN_ITEM_DIM': 64,
'DNN_DROPOUT': 0.3,
'DNN_BATCH_NORM': 0,
'MODEL_DIR': OUTPUT_MODEL_DIR,
'EVALUATE_WHILE_TRAINING': False,
'EXPORT_DIR_BASE': OUTPUT_MODEL_DIR,
'RANKING_METRICS': RANKING_METRICS,
'RATING_METRICS': RATING_METRICS,
}
start_time = time.time()
pm.execute_notebook(
"../00_quick_start/wide_deep_movielens.ipynb",
OUTPUT_NOTEBOOK,
parameters=params,
kernel_name='python3'
)
end_time = time.time()
print("Training and evaluation of Wide-and-Deep model took", end_time-start_time, "secs.")
nb = pm.read_notebook(OUTPUT_NOTEBOOK)
for m in RANKING_METRICS:
print(m, "=", nb.data[m])
for m in RATING_METRICS:
print(m, "=", nb.data[m])
os.remove(OUTPUT_NOTEBOOK)
shutil.rmtree(OUTPUT_MODEL_DIR, ignore_errors=True)
```
### Concluding Remark
We showed how to tune hyperparameters by utilizing Azure Machine Learning service. Complex and powerful models like Wide-and-Deep model often have many number of hyperparameters that affect on the recommendation accuracy, and it is not practical to tune the model without using a GPU cluster. For example, a training and evaluation of a model took around 3 minutes on 100k MovieLens data on a single *Standard NC6* VM as we tested from the [above cell](#google-wide-deep-baseline). When we used 1M MovieLens, it took about 47 minutes. If we want to investigate through 100 different combinations of hyperparameters **manually**, it will take **78 hours** on the VM and we may still wonder if we had tested good candidates of hyperparameters. With AzureML, as we shown in this notebook, we can easily setup different size of GPU cluster fits to our problem and utilize Bayesian sampling to navigate through the huge search space efficiently, and tweak the experiment with different criteria and algorithms for further research.
#### Cleanup
```
shutil.rmtree(SCRIPT_DIR, ignore_errors=True)
shutil.rmtree(DATA_DIR, ignore_errors=True)
shutil.rmtree(MODEL_DIR, ignore_errors=True)
```
| github_jupyter |
```
"""
Plummer model generator
This module contains a function used to create Plummer (1911) models, which
follow a spherically symmetric density profile of the form:
rho = c * (1 + r**2)**(-5/2)
"""
import numpy
import numpy.random
from math import pi, sqrt
from amuse.units import nbody_system
from amuse import datamodel
__all__ = ["new_plummer_sphere", "new_plummer_model"]
class MakePlummerModel(object):
def __init__(self, number_of_particles, convert_nbody = None, radius_cutoff = 22.8042468, mass_cutoff = 0.999,
do_scale = False, random_state = None, random = None):
self.number_of_particles = number_of_particles
self.convert_nbody = convert_nbody
self.mass_cutoff = min(mass_cutoff, self.calculate_mass_cuttof_from_radius_cutoff(radius_cutoff))
self.do_scale = do_scale
if not random_state == None:
print("DO NOT USE RANDOM STATE")
self.random_state = None
if random is None:
self.random = numpy.random
else:
self.random = random
def calculate_mass_cuttof_from_radius_cutoff(self, radius_cutoff):
if radius_cutoff > 99999:
return 1.0
scale_factor = 16.0 / (3.0 * pi)
rfrac = radius_cutoff * scale_factor
denominator = pow(1.0 + rfrac ** 2, 1.5)
numerator = rfrac ** 3
return numerator/denominator
def calculate_radius(self, index):
mass_min = (index * self.mass_cutoff) / self.number_of_particles
mass_max = ((index+1) * self.mass_cutoff) / self.number_of_particles
random_mass_fraction = self.random.uniform(mass_min, mass_max)
radius = 1.0 / sqrt( pow (random_mass_fraction, -2.0/3.0) - 1.0)
return radius
def calculate_radius_uniform_distribution(self):
return 1.0 / numpy.sqrt( numpy.power(self.random.uniform(0,self.mass_cutoff,(self.number_of_particles,1)), -2.0/3.0) - 1.0)
def new_positions_spherical_coordinates(self):
pi2 = pi * 2
radius = self.calculate_radius_uniform_distribution()
theta = numpy.arccos(self.random.uniform(-1.0,1.0, (self.number_of_particles,1)))
phi = self.random.uniform(0.0,pi2, (self.number_of_particles,1))
return (radius,theta,phi)
def new_velocities_spherical_coordinates(self, radius):
pi2 = pi * 2
x,y = self.new_xy_for_velocity()
velocity = x * sqrt(2.0) * numpy.power( 1.0 + radius*radius, -0.25)
theta = numpy.arccos(self.random.uniform(-1.0,1.0, (self.number_of_particles,1)))
phi = self.random.uniform(0.0,pi2, (self.number_of_particles,1))
return (velocity,theta,phi)
def coordinates_from_spherical(self, radius, theta, phi):
x = radius * numpy.sin( theta ) * numpy.cos( phi )
y = radius * numpy.sin( theta ) * numpy.sin( phi )
z = radius * numpy.cos( theta )
return (x,y,z)
def new_xy_for_velocity(self):
number_of_selected_items = 0
selected_values_for_x = numpy.zeros(0)
selected_values_for_y = numpy.zeros(0)
while (number_of_selected_items < self.number_of_particles):
x = self.random.uniform(0,1.0, (self.number_of_particles-number_of_selected_items))
y = self.random.uniform(0,0.1, (self.number_of_particles-number_of_selected_items))
g = (x**2) * numpy.power(1.0 - x**2, 3.5)
compare = y <= g
selected_values_for_x = numpy.concatenate((selected_values_for_x, x.compress(compare)))
selected_values_for_y= numpy.concatenate((selected_values_for_x, y.compress(compare)))
number_of_selected_items = len(selected_values_for_x)
return numpy.atleast_2d(selected_values_for_x).transpose(), numpy.atleast_2d(selected_values_for_y).transpose()
def new_model(self):
m = numpy.zeros((self.number_of_particles,1)) + (1.0 / self.number_of_particles)
radius, theta, phi = self.new_positions_spherical_coordinates()
position = numpy.hstack(self.coordinates_from_spherical(radius, theta, phi))
radius, theta, phi = self.new_velocities_spherical_coordinates(radius)
velocity = numpy.hstack(self.coordinates_from_spherical(radius, theta, phi))
position = position / 1.695
velocity = velocity / sqrt(1 / 1.695)
return (m, position, velocity)
@property
def result(self):
masses = numpy.ones(self.number_of_particles) / self.number_of_particles
radius, theta, phi = self.new_positions_spherical_coordinates()
x,y,z = self.coordinates_from_spherical(radius, theta, phi)
radius, theta, phi = self.new_velocities_spherical_coordinates(radius)
vx,vy,vz = self.coordinates_from_spherical(radius, theta, phi)
result = datamodel.Particles(self.number_of_particles)
result.mass = nbody_system.mass.new_quantity(masses)
result.x = nbody_system.length.new_quantity(x.reshape(self.number_of_particles)/1.695)
result.y = nbody_system.length.new_quantity(y.reshape(self.number_of_particles)/1.695)
result.z = nbody_system.length.new_quantity(z.reshape(self.number_of_particles)/1.695)
result.vx = nbody_system.speed.new_quantity(vx.reshape(self.number_of_particles) / sqrt(1/1.695))
result.vy = nbody_system.speed.new_quantity(vy.reshape(self.number_of_particles) / sqrt(1/1.695))
result.vz = nbody_system.speed.new_quantity(vz.reshape(self.number_of_particles) / sqrt(1/1.695))
result.radius = 0 | nbody_system.length
result.move_to_center()
if self.do_scale:
result.scale_to_standard()
if not self.convert_nbody is None:
result = datamodel.ParticlesWithUnitsConverted(result, self.convert_nbody.as_converter_from_si_to_generic())
result = result.copy()
return result
def new_plummer_model(number_of_particles, *list_arguments, **keyword_arguments):
"""
Create a plummer sphere with the given number of particles. Returns
a set of stars with equal mass and positions and velocities distributed
to fit a plummer star distribution model. The model is centered around the
origin. Positions and velocities are optionally scaled such that the kinetic and
potential energies are 0.25 and -0.5 in nbody-units, respectively.
:argument number_of_particles: Number of particles to include in the plummer sphere
:argument convert_nbody: When given will convert the resulting set to SI units
:argument radius_cutoff: Cutoff value for the radius (defaults to 22.8042468)
:argument mass_cutoff: Mass percentage inside radius of 1
:argument do_scale: scale the result to exact nbody units (M=1, K=0.25, U=-0.5)
"""
uc = MakePlummerModel(number_of_particles, *list_arguments, **keyword_arguments)
return uc.result
new_plummer_sphere = new_plummer_model
import numpy as np
from amuse.units import units
Mcluster= 100. | units.MSun
Rcluster= 1. | units.parsec
converter= nbody_system.nbody_to_si(Mcluster,Rcluster)
N = 1000
stars = new_plummer_sphere(N,converter)
stars.x.as_quantity_in(units.parsec)
stars.vx.as_quantity_in(units.kms)
stars.mass.as_quantity_in(units.MSun)
print(stars)
np.mean(x)
```
| github_jupyter |
<br>
<br>
<font size='6'><u><b>Distance scale with variable stars</b></u></font>
<br>
_**Written by Chien-Hsiu Lee, 2020**_
In this activity we are going to look at stars that change their magnitudes (brightness) in time. We can use a certain group of these stars to help estimate distances to other astronomical objects such as M31, our neighboring galaxy.
___
# Table of Contents
* [How to Use This Notebook](#How-to-Use-This-Notebook)
* [Pre-Activity Setup](#Pre-Activity-Setup)
* [Activity 1: Finding the period of a star](#Activity-1:-Finding-the-period-of-a-star)
* [Activity 2: Classifying the variable star](#Activity-2:-Classifying-the-variable-star)
* [Activity 3: Using Leavitt Law to estimate distance](#Activity-3:-Using-Leavitt-Law-to-estimate-distance)
___
# How to Use This Notebook
The webpage you are in is actually an app - much like the ones on your cellphone. This app consists of cells.
An *input* cell looks like a light grey box with an `In [ ]:` on its left. Input cells each contain code - instructions to make the computer do something.
To activate or select a cell, click anywhere inside of it.
<div class='alert alert-info'>
<font size='3'><b>Select the cell below and read its contents.</b></font>
</div>
```
# Text that follows a "#" is known as a comment.
# Comments do not affect your code in any way.
# You should always read the comments at the top of each cell you interact with.
# Comments will be used to describe what the cell's code is actually doing.
```
To execute or run a selected cell, hit `[Shift + Enter]` on your keyboard.
<div class='alert alert-info'>
<font size='3'><b>Select the cell below and read its contents. Then, run the cell.</b></font>
<br> If a warning appears, just click <em>"Run Anyway"</em>, this code is safe ;)
<br> Also, if you want to save your progress, click the <em>"Copy to Drive"</em> button at the top.
</div>
```
# Text that DOESN'T follow a "#" is considered code.
# Lines of code are instructions given to your computer.
# The line of code below is a "print" statement.
# A print statement literally prints out the text between its quotes.
print("Congrats! You have successfully run your first cell!")
```
Running a cell creates an *output* directly below it. An output can be some text, a graph, an interactive slider, or even nothing at all! For that last case, you know you have run a cell when the `In [ ]:` becomes `In [#]:`, where "#" is any number.
You can learn more about how Jupyter notebooks work at https://try.jupyter.org/
___
# Pre-Activity Setup
In order for any of the activities to work properly, you must import the libraries needed for the code in this notebook.
<div class='alert alert-info'>
<font size='3'><b>Select and run the cell below.</b></font>
</div>
```
# If you are running this notebook in Colab the following package has to be installed first.
!pip install gatspy &> /dev/null
print("You have successfully installled: gatspy")
# Here, you are importing the libraries needed for this notebook.
# These libraries set up the plotting environment in your browser.
from ipywidgets import interactive, fixed
from IPython.display import display
import pdb
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
repoURL = 'https://raw.githubusercontent.com/astro-datalab/notebooks-latest/master/06_EPO/e-TeenAstronomyCafe/'
print('Done! You have successfully imported the libraries.')
```
### Programming Side Note
The previous cell is a way of importing the necessary functions and other stuff to allow us to make pictures and do math super easy. In the next cell, we will be using a dictionary to easily group things together. Like how in a normal dictionary there is a word and a definition, this dictionary acts in a similar manner but instead of a word, we call it a key and instead of a definition we call it a value.
```python
dictionary = {key1 : value1, key2 : value2 ...}
```
___
# Activity 1: Finding the period of a star
In this exercise, we will learn how to display light curves (brightness as a function of time) and from the light curve, find the period of a variable star.
<div class='alert alert-info'>
<font size='3'><b>Select and run the cell below to plot the light curve.</b></font>
</div>
<div class='alert alert-warning'>
<font size='3'><b>The plot shows brightness over time for a star. Notice how it rises and falls, this star is changing with time!</b></font><br>
</div>
```
#we read in the observation data and plot the light curve
lc=ascii.read(repoURL+'02_Variable_Stars/Data/cep1862r.tab')
mjd, mag, err = lc['col1'], lc['col2'], lc['col3']
plt.figure(figsize=(12, 8))
plt.plot(mjd, mag, 'g.')
plt.xlabel('Time [day]', size=20)
plt.ylabel('Brightness [magnitude]', size=20)
plt.title('Light curve of a variable star', size=20)
plt.ylim(20.2,18.8)
#plt.rc('font', weight='bold')
plt.rc('xtick.major', size=10, pad=10)
plt.rc('ytick.major', size=10, pad=10)
plt.rc('ytick', labelsize=20)
plt.rc('xtick', labelsize=20)
plt.show()
```
This is a good start and there appears to be a periodic nature to the star, so let's find the period.
<div class='alert alert-info'>
<font size='3'><b>Select and run the cell below to find the period.</b></font>
</div>
<div class='alert alert-warning'>
<font size='3'><b>The plots shows probability of the star pulsing with a certain period. Notice how some peaks are higher than others, these are likely period candidates!</b></font><br>
</div>
```
#now we use a package to find the period
from gatspy.periodic import LombScargleFast
ls=LombScargleFast(silence_warnings=True)
ls.optimizer.period_range = (0.1,100)
ls.fit(mjd,mag)
period0=ls.best_period
print("Best period: " + str(period0) + " days")
#periodogram
periods = np.linspace(1,100,1000)
scores = ls.score(periods)
plt.figure(figsize=(12, 8))
plt.plot(periods,scores)
plt.xlabel("Period [day]", size=20)
plt.ylabel("Likelihood", size=20)
plt.title("Lomb-Scargle-Periodogram", size=20)
plt.show()
```
<font size='4' color='#0076b6'><b>Question 1: Can you estimate the period from this diagram?</b></font>
From the above diagram of likelihood versus period, you can probably determine the period of the variable star. But for arguments sake, let's play around with different periods, say from 10 to 60 days, and see which of their diagram of period versus brightness make sense.
<div class='alert alert-info'>
<font size='3'><b>Run the cell below. Experiment with different values of the period.</b></font>
</div>
```
#print("Best period: " + str(period) + " days")
# put in a number between 10 and 60
#period=62
period=30.864572200228128
```
That was a great way to make sure we know the period. In fact, if we assume all the data sits on the same periodic function, we should be able to condense all the data to a single period, or "phased light curve". This will make it easier to read and see the period.
<div class='alert alert-info'>
<font size='3'><b>Select and run the cell below to plot the phased light curve.</b></font>
</div>
<div class='alert alert-warning'>
<font size='3'><b>The plots shows the light curve phased (folded) with the given period. You can clearly see the distinct shape of the Cepheid variable, confirming that period we have in hand is correct!</b></font><br>
</div>
```
phase = np.zeros(shape=mjd.shape)
phase = np.fmod(mjd/period,1)
plt.figure(figsize=(12, 8))
plt.plot(phase*period,mag,"g.")
plt.plot((phase+1)*period,mag,"r.")
plt.ylim(20.2,18.8)
plt.xlabel('Time [Days]', size=20)
plt.ylabel('Brightness [Magnitude]', size=20)
plt.title('Phased light curve', size=20)
plt.show()
```
<div class='alert alert-warning'>
<font size='3'><b>The interactive plot allows you to play around with different period. See how the plot changes if you phase it with wrong period values!</b></font><br>
</div>
```
#plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 18
def plot_lc(period=60):
phase = np.zeros(shape=mjd.shape)
phase = np.fmod(mjd/period,1)
plt.plot(phase*period,mag,"g.")
plt.plot((phase+1)*period,mag,"r.")
plt.ylim(20.2,18.8)
plt.xlabel('Time [Days]', size=20)
plt.ylabel('Brightness [Magnitude]', size=20)
plt.title('Phased light curve', size=20)
plt.show()
interactive_plot = interactive(plot_lc, period=(25.,40.,1.))
display(interactive_plot)
```
____
# Activity 2: Classifying the variable star
In this exercise, we will learn how to use a series of cosine and sine functions to model a light curve (so-called Fourier decomposition):
$f(\phi) = \sum_{i=1}^n a_i cos(2\pi i \phi) + b_i sin(2\pi i \phi)$,
where $\phi$ denotes the phase of the light curve and $a_i$ and $b_i$ are the coefficients of the i-th cos() and sin() terms. We will also learn how we can use the resulting coefficients to classify the type of variable star.
<div class='alert alert-info'>
<font size='3'><b>Select and run the cell below to model the light curve.</b></font>
</div>
```
#definition of Fourier decomposition function
from scipy import optimize
pi=3.1415
def FourierDecomp(x, a0, a1, a2,a3,a4, b1, b2, b3, b4):
return a0 + a1 * np.cos(2*pi* x) + a2 * np.cos(2*pi*2*x) + a3*np.cos(2*pi*3*x) + a4*np.cos(2*pi*4*x) + b1*np.sin(2*pi*x) + b2*np.sin(2*pi*2*x) + b3*np.sin(2*pi*3*x) + b4*np.sin(2*pi*4*x)
#fit the light curve and show coefficients
params, params_covariance = optimize.curve_fit(FourierDecomp, phase, mag, p0=[19.5, 0, 0, 0, 0, 0, 0, 0, 0])
print(params)
```
And plot the resulting curve on the data
<div class='alert alert-info'>
<font size='3'><b>Select and run the cell below to plot the modeled light curve.</b></font>
</div>
<div class='alert alert-warning'>
<font size='3'><b>This plot compares the modelled light curves (black line) with real data (red points). Looks like the model is doing a good job!</b></font><br>
</div>
```
plt.figure(figsize=(12, 8))
plt.plot(phase*period, mag, "r.", label='Data')
plt.plot((phase+1)*period, mag, "r.", label='Data')
p2=np.linspace(0,1,100)
plt.plot(p2*period, FourierDecomp(p2, params[0], params[1], params[2], params[3], params[4], params[5], params[6], params[7], params[8]), color='black', label='Fitted function')
plt.plot((p2+1)*period,FourierDecomp(p2, params[0], params[1], params[2], params[3], params[4], params[5], params[6], params[7], params[8]), color='black', label='Fitted function')
plt.xlabel('Time [Days]', size=20)
plt.ylabel('Brightness [Magnitude]', size=20)
plt.ylim(20.2,18.8)
plt.show()
```
From the light curve, use coefficients a4 and a2 to determine a4/a2 vs period.
<div class='alert alert-info'>
<font size='3'><b>Select and run the cell below to find out the parameters of the modeled light curve.</b></font>
</div>
```
print ("a4/a2: ",abs(params[4]/params[2]), "log period: ", np.log10(period))
```
___
<font size='4' color='#0076b6'><b>Question 2: Using a4/a2 and the period, can you determine the type of this variable?</b></font>
To determine the type of variable star from the period, check the classification diagram:
<div class='alert alert-warning'>
<font size='3'><b>Can you use the coefficients to type the variables?</b></font><br>
</div>
<img src="https://github.com/DavidVargasMora/TACTests/raw/master/02_Variable_Stars/Figures/TeenCafeClass.jpeg" width="600px" margin="0 auto">
Now use this period with the Leavitt Law to determine luminosity (i.e., absolute magnitude or intrinsic brightness) and with that, get the distance.
____
# Activity 3: Using Leavitt Law to estimate distance
In this exercise, we will fit the data to determine the coefficients of Leavitt law, and from that, we can estimate the distance to our nearest spiral galaxy, M31.
<img src="https://github.com/DavidVargasMora/TACTests/raw/master/02_Variable_Stars/Figures/TeenCafeDistance.jpeg" width="600px">
<div class='alert alert-info'>
<font size='3'><b>Run the cell below to see the period-luminosity relation of Cepehids in Andromeda.</b></font>
</div>
<div class='alert alert-warning'>
<font size='3'><b>This plot shows the period vs. brightness of Cepheids from Andromeda. Notice there are two different populations of Cepheids, and how they distribute following the Leavitt law!</b></font><br>
</div>
```
#read in data of Andromeda Cepheids
cep=ascii.read(repoURL+'02_Variable_Stars/Data/cep.tab')
per, jmag, hmag, typ = cep['col1'], cep['col2'], cep['col3'], cep['col4']
plt.figure(figsize=(12, 8))
plt.plot(np.log10(per[cep['col4'] == 'FM']), jmag[cep['col4'] == 'FM'], 'm.')
plt.plot(np.log10(per[cep['col4'] == 'FO']), jmag[cep['col4'] == 'FO'], 'y.')
#plt.xscale("log")
plt.xlabel('log(Period) [day]', size=20)
plt.ylabel('Magnitude', size=20)
plt.title('Leavitt Law', size=20)
plt.ylim(22,16)
plt.show()
```
By fitting a line to the data in the diagram above (showing the Leavitt Law), the absolute magnitude (or luminosity) can be found. With the equation for a line:
$y = ax + b$,
coefficient "a" is the slope of the line and coefficient "b" is the y value when x is zero. That y value is the absolute magnitude or luminosity of the variable star with a period of one day or log(0)=1. This value will be M1 in Question 3 below.
<div class='alert alert-info'>
<font size='3'><b>Run the cell below to find a linear relation of the Cepheids in M31.</b></font>
</div>
```
def Linear(x, a, b):
return a*x + b
params, params_covariance = optimize.curve_fit(Linear, np.log10(per[cep['col4'] == 'FM']), jmag[cep['col4'] == 'FM'], p0=[-1,22])
print ("y = a*x + b, a=", params[0], " b=", params[1])
```
<div class='alert alert-warning'>
<font size='3'><b>Fun fact: we may get used to define the linear relation as "y = m*x + y0", where m is the slope and y0 is the intercept on y-axis. But m can be misleading here because it can also mean magnitude, so we chose to use "y = a*x + b" instead. </b></font><br>
</div>
<img src="https://github.com/DavidVargasMora/TACTests/raw/master/02_Variable_Stars/Figures/TeenCafeClass.jpeg" width="600px" margin="0 auto">
<div class='alert alert-info'>
<font size='3'><b>Run the cell below to plot the relation along with the data.</b></font>
</div>
<div class='alert alert-warning'>
<font size='3'><b>This plot shows the period vs. brightness of Cepheids from Andromeda, and the linear relation we derive. See how good they match with each other!</b></font><br>
</div>
```
ppp=np.linspace(1,100,100)
#print p2
plt.figure(figsize=(12, 8))
plt.plot(np.log10(ppp), Linear(np.log10(ppp), params[0], params[1]), color='black', label='Fitted function')
plt.plot(np.log10(per[cep['col4'] == 'FM']), jmag[cep['col4'] == 'FM'], 'm.')
plt.plot(np.log10(per[cep['col4'] == 'FO']), jmag[cep['col4'] == 'FO'], 'y.')
#plt.xscale("log")
plt.xlabel('log(Period) [day]', size=20)
plt.ylabel('Brightness [Magnitude]', size=20)
plt.title('Leavitt Law', size=20)
plt.ylim(23,16)
plt.xlim(0,2)
plt.show()
```
___
<font size='4' color='#0076b6'><b>Question 3: Can you estimate the distance to Andromeda galaxy?</b></font>
We can use the distance modulus to convert magnitude to distance:
$M_1 - M_2 = 5 \times log_{10}\frac{d_1}{d_2}$
or in terms of distance (d1):
$d_1 = d_2 \times 10^{(M1-M2)/5}$
Here $M_1$ is the apparent magnitude of the Cepheids in Andromeda, $d_1$ is the distance to Andromeda. $M_2$ is the absolute magnitude (or reference magnitude) and $d_2$ is the reference distance we use to define the absolute magnitude (which is set to 10 pc).
To get $d_1$, what value for $M_1$ do you need to use from the Leavitt Law above?
See previous paragraph before Question 3.
Info you will need for $M_2$ and $d_2$: a 1-day period Cepheid has absolute brightness of -2.1 magnitude at 10 pc.
You can use Python as a calculator to determine the distance. Try it yourself using pow(base,exponent) for base^(exponent). Check your answer with the distance here: https://en.wikipedia.org/wiki/Andromeda_Galaxy
<div class='alert alert-info'>
<font size='3'><b>Run the cell below to calculate distance to the Andromeda galaxy.</b></font>
</div>
```
#Now fill in M1, M2, and d2 below to get d1
M1 = 22.36516566
M2 = -2.1
d2 = 10
d1 = d2 * pow(10,(M1-M2)/5.)
print ("Distance to Andromeda =", d1,'pc')
```
___
<font size='4' color='#0076b6'><b>Bonus question: Can you estimate the distance to Large Magellanic Cloud?</b></font>
<div class='alert alert-info'>
<font size='3'><b>Run the cell below to see Cepheids in the Large Magellanic Cloud.</b></font>
</div>
<div class='alert alert-warning'>
<font size='3'><b>This plot shows the period vs. brightness of Cepheids from Large Magellanic Cloud. Notice there are two different populations of Cepheids, and how they distribute following the Leavitt law!</b></font><br>
</div>
```
#read in data of LMC Cepheids
cep=ascii.read(repoURL+'02_Variable_Stars/Data/lmc.tab')
per, jmag, hmag, typ = cep['col1'], cep['col2'], cep['col3'], cep['col4']
plt.figure(figsize=(12, 8))
plt.plot(np.log10(per[cep['col4'] == 'FM']), jmag[cep['col4'] == 'FM'], 'm.')
#plt.xscale("log")
plt.xlabel('log(Period) [day]', size=20)
plt.ylabel('Magnitude', size=20)
plt.title('Leavitt Law', size=20)
plt.ylim(16.5,9)
plt.xlim(0.0,2)
plt.show()
```
<div class='alert alert-info'>
<font size='3'><b>Run the cell below to find a linear relation of the Cepheids in LMC.</b></font>
</div>
<div class='alert alert-warning'>
<font size='3'><b>This plot shows the period vs. brightness of Cepheids from Large Magellanic Cloud, and the linear relation we derive. See how good they match with each other!</b></font><br>
```
params, params_covariance = optimize.curve_fit(Linear, np.log10(per[cep['col4'] == 'FM']), jmag[cep['col4'] == 'FM'], p0=[-1,22])
print ("y = a*x + b, a=", params[0], " b=", params[1])
ppp=np.linspace(1,500,100)
#print p2
plt.figure(figsize=(12, 8))
plt.plot(np.log10(ppp), Linear(np.log10(ppp), params[0], params[1]), color='black', label='Fitted function')
plt.plot(np.log10(per[cep['col4'] == 'FM']), jmag[cep['col4'] == 'FM'], 'm.')
#plt.xscale("log")
plt.xlabel('log(Period) [day]', size=20)
plt.ylabel('Brightness [Magnitude]', size=20)
plt.title('Leavitt Law', size=20)
plt.ylim(16.5,9)
plt.xlim(0,2)
plt.show()
```
<div class='alert alert-info'>
<font size='3'><b>Run the cell below to calculate distance to the Large Magellanic Cloud.</b></font>
</div>
```
#Now fill in M1, M2, and d2 below to get d1
M1 = 16.41141536062039
M2 = -2.1
d2 = 10
d1 = d2 * pow(10,(M1-M2)/5.)
print ("Distance to LMC =", d1, 'pc')
```
| github_jupyter |
## EV Charging Stations Dashboard
**Introduction**:
This notebook demonstrates how to convert a notebook to a dashboard using Voila. The dashboard includes an analysis of EV charging station locations together with traffic data. Draw a bounding box on the map, **by using the black square on the map**, to fetch the EV charging station locations in the selected area. Select clear all to remove the content.
This sample notebook will install Voila. From within the notebook, click the Voila button at the top of the notebook to easily convert the notebook to a dashboard.
**Catalog**: hrn:here:data::olp-here:rib-2 **Layers**: electric-vehicle-charging-stations, topology-geometry
**Catalog**: hrn:here:data::olp-here:olp-traffic-1 **Layer**: traffic-flow
```
##check if bqplot is installed, if not install it
import sys
import importlib.util
spec = importlib.util.find_spec('bqplot')
if spec is None:
!{sys.executable} -m pip install bqplot
##check if voila is installed, if not install it
import importlib.util
spec = importlib.util.find_spec('voila')
if spec is None:
!{sys.executable} -m pip install voila
import os
from here.inspector.ipyleaflet import IpyleafletInspector
from shapely.geometry import Point
from shapely.ops import nearest_points
from ipyleaflet import DrawControl
from ipyleaflet import Marker, Icon, Map,GeoJSON,Heatmap
from ipywidgets import Text, HTML
from ipyleaflet import WidgetControl
import ipywidgets as widgets
import pandas as pd
import geopandas
import numpy as np
import json
import geojson
import bqplot.pyplot as bq
from dashboard.evdata import EVStation, TrafficMessage, get_partition_list, nearest_messages,empty_geojson
class UI:
def __init__(self, insp_backend, ev_control):
self.insp_backend = insp_backend
self.fig1 = bq.figure(animation_duration=750,
title="Top 5 - Sum of Traffic Flow Messages/EV St. Co.",
layout=widgets.Layout(flex='1 1 auto', width='auto'))
bq.xlabel("EV St. Company Name")
bq.ylabel("No of Nearest Messages")
x_label = [' ',' ', ' ', ' ', ' ', ' ', ' ', ' ']
self.line = bq.bar(x =x_label , y= [0,0,0,0,0,0,0,0])
self.fig2 = bq.figure(title="Top 5 EV St Co.",
animation_duration=750,
layout=widgets.Layout(flex='1 1 auto', width='auto'))
self.pie = bq.pie(sizes = list([0,0,0]),
labels =list(['','', '']),
display_values = True,
values_format=".0f",
display_labels='outside')
self.pie.radius = 130
self.pie.inner_radius = 30
self.figures = []
self.figures.append(self.fig1)
self.figures.append(self.fig2)
self.ev_control = ev_control
def _display_location(self,df):
features = []
insert_features = lambda X: features.append(
geojson.Feature(geometry=geojson.Point((X["longitude"],
X["latitude"]
)),
properties=dict(name=X["name"],
description=X["language"],
days=X['days_of_week_words']
)))
df.apply(insert_features, axis=1)
## display charging station details on the pane;
def display_info(event, feature, **kwargs):
html_EV_Details.value = '''
<h4>EV Charging Station Details</h4>
<b>{}</b>
<div>Language Code: {}</div>
Operating Days: {}
'''.format(feature['properties']['name'],
feature['properties']['description'],
feature['properties']['days'] )
self.geo_json_layer = GeoJSON(
name='EV Charging Stations',
data=geojson.FeatureCollection(features),
style={'color': 'black', 'radius':5, 'fillColor': 'red', 'opacity':.7, 'weight':1.9, 'dashArray':'2', 'fillOpacity':1.0},
point_style={'radius': 5, 'color': 'red', 'fillOpacity': 1.0, 'fillColor': 'blue', 'weight': 3}
)
self.geo_json_layer.on_click(display_info)
insp_bkend.add_layer(self.geo_json_layer)
def update_pie(self,df):
self._display_location(df)
grp = df.groupby(['name']).size().sort_values(ascending=False).head(5)
self.pie.sizes = list(grp.values)
self.pie.labels = [x.split(" ")[0] for x in list(grp.keys().values)]
def update_bar(self,df):
grp = df.groupby(['name']).size().sort_values(ascending=False).head(5)
self.line.x = [x.split(" ")[0] for x in list(grp.keys().values)]
self.line.y = list(grp.values)
self.insp_backend.add_control(self.ev_control)
def update_heatmap(self, df):
ls = []
for index, r in df.iterrows():
ls.append([r['geometry'].y, r['geometry'].x, r['count']*50] )
self.heatmap = Heatmap(
name='Nearest Messages',
locations=ls,
radius=20
)
self.insp_backend.add_layer(self.heatmap);
def update_traffic(self, df):
self.traffic_geo = GeoJSON(name='Traffic Flow',
data=df,
style={'color': 'blue', 'radius':1, 'fillColor': 'blue', 'opacity':1.0, 'weight':1.9, 'dashArray':'2', 'fillOpacity':1.0},
point_style={'radius': 1, 'color': 'blue', 'fillOpacity': 1.0, 'fillColor': 'blue', 'weight': 3}
)
self.insp_backend.add_layer(self.traffic_geo)
def clear_all_data(self):
self.insp_backend.remove_layer(self.geo_json_layer)
self.insp_backend.remove_layer(self.traffic_geo)
self.insp_backend.remove_layer(self.heatmap)
self.insp_backend.remove_control(self.ev_control)
self.line.y = [0,0,0,0,0,0,0,0]
self.pie.sizes = list([0,0,0]),
self.pie.labels =list(['','', ''])
html_EV_Details.value = '''
<h4>EV Charging Station Details</h4>
Click over an icon
'''
## Event listener for rectangle on the map
def handle_draw(self, action, geo_json):
"""extract bounded rect coordinates"""
if action == 'created':
global nearest_df
global ev_df
array = json.dumps(geo_json)
data = json.loads(array)
## extract upper-left and bottom-right coordinates of bounding box
x1 = data['geometry']['coordinates'][0][0][0]
y1 = data['geometry']['coordinates'][0][0][1]
x2 = data['geometry']['coordinates'][0][2][0]
y2 = data['geometry']['coordinates'][0][2][1]
#print(x1,y1,x2,y2)
p_list = get_partition_list(x1,y1,x2,y2)
ev_df = ev.get_geojson(p_list,x1,y1,x2,y2 )
ui.update_pie(ev.data)
geo_tr = traffic.get_geojson(p_list,x1,y1,x2,y2)
ui.update_traffic(geo_tr)
nearest_df = nearest_messages(ev.data, traffic.data)
ui.update_bar(nearest_df)
ui.update_heatmap(nearest_df)
elif action == 'deleted':
ui.clear_all_data()
##Inspector code
insp = IpyleafletInspector().set_zoom(14)
insp.set_center(Point(13.3,52.5))
insp_bkend = insp.backend()
draw_control = DrawControl(marker={},
rectangle={'shapeOptions': {'color': '#0000FF'}},
circle={},
polygon={},
polyline={},
circlemarker={},
)
draw_control.rectangle = {
"shapeOptions": {
"fillColor": "black",
"color": "black",
"fillOpacity": 0.1
}
}
## Right side panel html
html_EV_Details = HTML('''
<h4>EV Charging Station Details</h4>
Click over an icon
''')
html_EV_Details.layout.margin = '0px 20px 20px 20px'
control_EV = WidgetControl(widget=html_EV_Details, position='bottomright')
##end of panel
## instantiate all objects
ui = UI(insp_bkend, control_EV)
ev = EVStation()
traffic = TrafficMessage()
##draw controls
draw_control.on_draw(handle_draw)
insp_bkend.add_control(draw_control)
insp.show()
display(widgets.HBox(ui.figures))
```
<span style="float:left; margin-top:3px;"><img src="https://www.here.com/themes/custom/here_base_theme_v2/logo.svg" alt="HERE Logo" height="60" width="60"></span><span style="float:right; width:90%;"><sub><b>Copyright (c) 2020-2021 HERE Global B.V. and its affiliate(s). All rights reserved.</b>
This software, including documentation, is protected by copyright controlled by HERE. All rights are reserved. Copying, including reproducing, storing, adapting or translating, any or all of this material requires the prior written consent of HERE. This material also contains confidential information which may not be disclosed to others without the prior written consent of HERE.</sub></span>
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive')
!cp -r '/content/drive/My Drive/Colab Notebooks/[Kaggle] Understanding Clouds from Satellite Images/Scripts/.' .
!unzip -q '/content/drive/My Drive/Colab Notebooks/[Kaggle] Understanding Clouds from Satellite Images/Data/train_images320x480.zip'
```
### Dependencies
```
from utillity_script_cloud_segmentation import *
from utillity_script_lr_schedulers import *
seed = 0
seed_everything(seed)
warnings.filterwarnings("ignore")
base_path = '/content/drive/My Drive/Colab Notebooks/[Kaggle] Understanding Clouds from Satellite Images/'
data_path = base_path + 'Data/'
model_base_path = base_path + 'Models/files/classification/'
train_path = data_path + 'train.csv'
hold_out_set_path = data_path + 'hold-out.csv'
train_images_dest_path = 'train_images/'
```
### Load data
```
train = pd.read_csv(train_path)
hold_out_set = pd.read_csv(hold_out_set_path)
X_train = hold_out_set[hold_out_set['set'] == 'train']
X_val = hold_out_set[hold_out_set['set'] == 'validation']
print('Compete set samples:', len(train))
print('Train samples: ', len(X_train))
print('Validation samples: ', len(X_val))
# Preprocecss data
train['image'] = train['Image_Label'].apply(lambda x: x.split('_')[0])
label_columns=['Fish', 'Flower', 'Gravel', 'Sugar']
for label in label_columns:
X_train[label].replace({0: 1, 1: 0}, inplace=True)
X_val[label].replace({0: 1, 1: 0}, inplace=True)
display(X_train.head())
```
# Model parameters
```
BATCH_SIZE = 64
WARMUP_EPOCHS = 3
WARMUP_LEARNING_RATE = 1e-3
EPOCHS = 20
MAX_LEARNING_RATE = 10**(-1.7)
HEIGHT = 224
WIDTH = 224
CHANNELS = 3
N_CLASSES = 4
ES_PATIENCE = 8
STEP_SIZE_TRAIN = len(X_train)//BATCH_SIZE
STEP_SIZE_VALID = len(X_val)//BATCH_SIZE
model_name = '17-EfficientNetB0_%sx%s_Cosine' % (HEIGHT, WIDTH)
model_path = model_base_path + '%s.h5' % (model_name)
```
### Data generator
```
datagen=ImageDataGenerator(rescale=1./255.,
vertical_flip=True,
horizontal_flip=True,
zoom_range=[1, 1.2],
fill_mode='constant',
cval=0.)
test_datagen=ImageDataGenerator(rescale=1./255.)
train_generator=datagen.flow_from_dataframe(
dataframe=X_train,
directory=train_images_dest_path,
x_col="image",
y_col=label_columns,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode="other",
shuffle=True,
seed=seed)
valid_generator=test_datagen.flow_from_dataframe(
dataframe=X_val,
directory=train_images_dest_path,
x_col="image",
y_col=label_columns,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode="other",
shuffle=True,
seed=seed)
```
# Model
```
def create_model(input_shape, N_CLASSES):
input_tensor = Input(shape=input_shape)
base_model = efn.EfficientNetB0(weights='imagenet',
include_top=False,
input_tensor=input_tensor,
pooling='avg')
x = base_model.output
final_output = Dense(N_CLASSES, activation='sigmoid')(x)
model = Model(input_tensor, final_output)
return model
```
# Learning rate finder
## Warmup top layers
```
model = create_model((None, None, CHANNELS), N_CLASSES)
metric_list = ['accuracy']
for layer in model.layers[:-1]:
layer.trainable = False
optimizer = optimizers.SGD(lr=WARMUP_LEARNING_RATE, momentum=0.9, nesterov=True)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=metric_list)
# model.summary()
warmup_history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
verbose=1).history
```
## Fine-tune all layers
```
for layer in model.layers:
layer.trainable = True
checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', save_best_only=True)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
cosine_lr = WarmUpCosineDecayScheduler(learning_rate_base=MAX_LEARNING_RATE,
total_steps=(STEP_SIZE_TRAIN * EPOCHS),
warmup_steps=(1 * STEP_SIZE_TRAIN),
hold_base_rate_steps=0)
callback_list = [checkpoint, es, cosine_lr]
optimizer = optimizers.SGD(lr=MAX_LEARNING_RATE, momentum=0.9, nesterov=True)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=metric_list)
# model.summary()
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
callbacks=callback_list,
epochs=EPOCHS,
verbose=1).history
```
## Model loss graph
```
#@title
metrics_history = ['loss', 'acc']
for metric_hist in metrics_history:
history[metric_hist] = warmup_history[metric_hist] + history[metric_hist]
history['val_' + metric_hist] = warmup_history['val_' + metric_hist] + history['val_' + metric_hist]
plot_metrics(history, metric_list=metrics_history)
```
## Scheduler learning rates
```
#@title
fig, ax1 = plt.subplots(1, 1, figsize=(20, 6))
ax1.plot(cosine_lr.learning_rates)
ax1.set_title('Fine-tune learning rates')
plt.xlabel('Steps')
plt.ylabel('Learning rate')
sns.despine()
plt.show()
```
| github_jupyter |
# Adding a discharge point source to a LEM
*(Greg Tucker, CSDMS / CU Boulder, fall 2020)*
This notebook shows how to add one or more discharge point sources to a Landlab-built landscape evolution model (LEM), using the flow routing components. The basic idea is to modify the `water__unit_flux_in` field to include a large flux (which could be represented as either drainage area or discharge) at one or more locations along the edge of a grid.
```
from landlab import RasterModelGrid, imshow_grid
from landlab.components import FlowAccumulator
import numpy as np
```
## Docstring example from `FlowAccumulator`
The following is a tiny example from the `FlowAccumulator` documentation:
```
mg = RasterModelGrid((5, 4), xy_spacing=(10., 10))
topographic__elevation = np.array([0., 0., 0., 0.,
0., 21., 10., 0.,
0., 31., 20., 0.,
0., 32., 30., 0.,
0., 0., 0., 0.])
_ = mg.add_field("topographic__elevation", topographic__elevation, at="node")
mg.set_closed_boundaries_at_grid_edges(True, True, True, False)
fa = FlowAccumulator(
mg,
'topographic__elevation',
flow_director='FlowDirectorSteepest'
)
runoff_rate = np.arange(mg.number_of_nodes, dtype=float)
rnff = mg.add_field("water__unit_flux_in", runoff_rate, at="node", clobber=True)
fa.run_one_step()
print(mg.at_node['surface_water__discharge'].reshape(5, 4))
# array([ 0., 500., 5200., 0.,
# 0., 500., 5200., 0.,
# 0., 900., 4600., 0.,
# 0., 1300., 2700., 0.,
# 0., 0., 0., 0.])
```
We can extend this tiny example to show that you can subsequently modify the `rnff` array and it will take effect when you re-run the `FlowAccumulator`:
```
rnff[:] = 1.0
fa.run_one_step()
print(mg.at_node['surface_water__discharge'].reshape(5, 4))
```
## Larger example
In this example, we create a slightly larger grid, with a surface that slopes down toward the south / bottom boundary. We will introduce a runoff point source at a node in the middle of the top-most non-boundary row.
Start by defining some parameters:
```
# Parameters
nrows = 41
ncols = 41
dx = 100.0 # grid spacing in m
slope_gradient = 0.01 # gradient of topographic surface
noise_amplitude = 0.2 # amplitude of random noise
input_runoff = 10000.0 # equivalent to a drainage area of 10,000 dx^2 or 10^8 m2
```
Create grid and topography, and set boundaries:
```
# Create a grid, and a field for water input
grid = RasterModelGrid((nrows, ncols), xy_spacing=dx)
# Have just one edge (south / bottom) be open
grid.set_closed_boundaries_at_grid_edges(True, True, True, False)
# Create an elevation field as a ramp with random noise
topo = grid.add_zeros('topographic__elevation', at='node')
topo[:] = slope_gradient * grid.y_of_node
np.random.seed(0)
topo[grid.core_nodes] += noise_amplitude * np.random.randn(grid.number_of_core_nodes)
```
The `FlowAccumulator` component takes care of identifying drainage directions (here using the D8 method) and calculating the cumulative drainage area and surface water discharge.
Note that in this case we are assuming a default runoff value of unity, meaning that the calculated `surface_water__discharge` is actually just drainage area. To introduce the drainage area of a river entering at the top, we will use a large value for runoff. Because we are considering drainage area as the primary variable, with unit "runoff", our input runoff is a dimensionless variable: the number of contributing grid cell equivalents. We will set this to unity at all the nodes in the model except the point-source location.
```
# Create a FlowAccumulator component
fa = FlowAccumulator(grid, flow_director='FlowDirectorD8')
# Create a runoff input field, and set one of its nodes to have a large input
runoff = grid.add_ones('water__unit_flux_in', at='node', clobber=True)
top_middle_node = grid.number_of_nodes - int(1.5 * ncols)
runoff[top_middle_node] = input_runoff
fa.run_one_step()
imshow_grid(grid, 'surface_water__discharge')
```
## Changing the amount and/or location of input
We can change the input drainage area / discharge amount or location simply by modifying the `water__unit_flux_in` field. Here we will shift it to the left and double its magnitude.
```
runoff[top_middle_node] = 1.0 # go back to being a "regular" node
runoff[top_middle_node - 15] = 2 * input_runoff # shift 15 cells left and double amount
fa.run_one_step()
imshow_grid(grid, 'surface_water__discharge')
```
Note that the `drainage_area` field does not recognize any runoff input. It continues to track *only* the local drainage area:
```
imshow_grid(grid, 'drainage_area')
```
This means that you should use the `surface_water__discharge` field rather than the `drainage_area` field, regardless of whether the former is meant to represent discharge (volume per time) or effective drainage area (area).
## Combining with a Landscape Evolution Model
Here we'll set up a simple LEM that uses the river input.
```
from landlab.components import StreamPowerEroder, LinearDiffuser
# Parameters
K = 4.0e-5
D = 0.01
uplift_rate = 0.0001
nrows = 51
ncols = 51
dx = 10.0 # grid spacing in m
slope_gradient = 0.01 # gradient of topographic surface
noise_amplitude = 0.04 # amplitude of random noise
input_runoff = 10000.0 # equivalent to a drainage area of 10,000 dx^2 or 10^6 m2
run_duration = 25.0 / uplift_rate
dt = dx / (K * (dx * dx * input_runoff)**0.5)
num_steps = int(run_duration / dt)
print(str(num_steps) + ' steps.')
# Create a grid, and a field for water input
grid = RasterModelGrid((nrows, ncols), xy_spacing=dx)
# Have just one edge (south / bottom) be open
grid.set_closed_boundaries_at_grid_edges(True, True, True, False)
# Create an elevation field as a ramp with random noise
topo = grid.add_zeros('topographic__elevation', at='node')
topo[:] = slope_gradient * grid.y_of_node
np.random.seed(0)
topo[grid.core_nodes] += noise_amplitude * np.random.randn(grid.number_of_core_nodes)
# Create components
fa = FlowAccumulator(grid, flow_director='FlowDirectorD8')
sp = StreamPowerEroder(grid, K_sp=K, discharge_field='surface_water__discharge')
ld = LinearDiffuser(grid, linear_diffusivity=D)
runoff = grid.add_ones('water__unit_flux_in', at='node', clobber=True)
top_middle_node = grid.number_of_nodes - int(1.5 * ncols)
runoff[top_middle_node] = input_runoff
for _ in range(num_steps):
topo[grid.core_nodes] += uplift_rate * dt
fa.run_one_step()
ld.run_one_step(dt)
sp.run_one_step(dt)
imshow_grid(grid, topo)
```
| github_jupyter |
```
# default_exp prep
```
# Prep
> This module contains all the necessary functions for loading and preprocessing videos into a standard format to be used in the rest of the pipeline.
```
#export
import concurrent.futures
import csv
import cv2
import ffmpeg
import json
import ntpath
import numpy
import os
import pprint
import pytesseract
import random
import matplotlib.pyplot as plt
import more_itertools as mit
import pandas as pd
from collections import defaultdict, OrderedDict
from pathlib import Path
from PIL import Image
from two_to_tango.utils import *
from shutil import copyfile
from tqdm.auto import tqdm
#hide
from nbdev.showdoc import *
#export
def get_rand_imgs(vid_path, max_msecs, n = 10):
vid = cv2.VideoCapture(str(vid_path))
imgs = []
while len(imgs) < n:
msec = random.randrange(1_000, max_msecs, 1_000)
vid.set(cv2.CAP_PROP_POS_MSEC, msec)
success, img = vid.read()
if success:
imgs.append(img)
return imgs
#export
def vid_from_frames(frames, output = None, fr = 30):
"""Generate video from list of frame paths."""
if not output: output = frames.parent
try:
stream = ffmpeg.input(frames/'%04d.jpg')
stream = ffmpeg.output(stream, str(output/"gen_vid.mp4"), r = fr)
out, err = ffmpeg.run(stream)
except Exception as e:
print("Error occured:", e)
test_path = Path("<path>")
vid_from_frames(test_path, Path('.'))
# export
class Video:
def __init__(self, vid_path, fr = None, overwrite = False):
self.video = cv2.VideoCapture(str(vid_path))
self.vid_path = vid_path
self.fr = eval(ffmpeg.probe(vid_path)["streams"][0]["avg_frame_rate"])
if fr is not None:
self.fr = fr
self.vid_path = self._fix_framerate(vid_path, fr, overwrite)
def show_frame(self, i):
plt.imshow(self[i])
plt.show()
def _fix_framerate(self, vid_path, fr, overwrite):
"""
Fixes each video in the list of video paths to a certain frame rate.
"""
output_path = str(vid_path) if overwrite else str(vid_path.parent/f'{vid_path.stem}_fixed_{fr}.mp4')
stream = ffmpeg.input(vid_path)
stream = ffmpeg.output(stream, output_path, r = fr)
stream = ffmpeg.overwrite_output(stream)
out, err = ffmpeg.run(stream)
return Path(output_path)
def __len__(self):
return int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
def __getitem__(self, i):
if i >= len(self) or i < 0:
raise Exception(f'Frame index is not in the proper range (0, {len(self) - 1}).')
self.video.set(cv2.CAP_PROP_POS_FRAMES, i)
suc, frame = self.video.read()
if not suc: return None
return Image.fromarray(frame)
vid_path = Path("<path>"); vid_path
vid = Video(vid_path)
```
View a single frame of a video
```
vid.show_frame(80)
```
Get the number of frames in a video
len(vid)
```
# export
class VideoDataset:
def __init__(self, videos):
self.videos = videos
self.labels = None
self.data = None
def label_from_paths(self):
self.labels = defaultdict(
lambda: defaultdict(dict)
)
for vid in self.videos:
self.labels[vid.vid_path.parent.parent.name][vid.vid_path.parent.name][vid.vid_path.parent.parent.parent.name] = vid
return self
def get_labels(self):
return list(self.labels.keys())
@staticmethod
def from_path(path, extract_frames = False, fr = None, overwrite = False):
videos = []
fixed_vid_paths = sorted(path.rglob(f"*fixed_{fr}.mp4"))
if len(fixed_vid_paths) > 0:
for vid_path in fixed_vid_paths:
videos.append(Video(vid_path, overwrite = overwrite))
else:
vid_paths = sorted(path.rglob('*.mp4'))
for vid_path in vid_paths:
videos.append(Video(vid_path, fr = fr, overwrite = overwrite))
return VideoDataset(videos)
def __getitem__(self, label):
return self.labels[label]
vid_ds = VideoDataset.from_path(vid_path).label_from_paths()
```
Get all apps contained in the dataset
```
vid_ds.get_labels()
```
Access all videos that belong to a particular bug
```
vid_ds['app']['bug']
# export
def get_rico_imgs(path, n = None):
rico_path = path/'rico-images/data'
img_paths = sorted(rico_path.glob('*.jpg'))
if n == None: n = len(img_paths)
return [Image.open(img) for img in random.sample(img_paths, n)]
# export
def read_video_data(file_path):
with open(file_path) as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=';')
data = list(csv_reader)
idx_data = {}
apps, bugs, app_bugs, users = set(), set(), set(), set()
for row in data:
app = row['app']
bug = row['bug']
app_bug = row['app_bug']
user = row['final_assignment']
apps.add(app)
bugs.add(bug)
app_bugs.add(app_bug)
users.add(user)
if app not in idx_data:
idx_data[app] = {}
bugs_for_app = idx_data[app]
if bug not in bugs_for_app:
bugs_for_app[bug] = []
users_for_bug = bugs_for_app[bug]
users_for_bug.append((user, app_bug + "-" + user))
result = {
'idx_data': idx_data,
'apps': apps,
'bugs': bugs,
'app_bugs': app_bugs,
'users': users,
'data': data,
}
return result
def get_non_duplicate_corpus(bugs, bug_idx, app_bugs, br_idx, bugs_to_exclude=[]):
if bug_idx == 0:
other_bugs = bugs[bug_idx + 1:len(bugs)]
elif bug_idx == len(bugs) - 1:
other_bugs = bugs[0:bug_idx]
else:
other_bugs = bugs[bug_idx + 1:len(bugs)]
other_bugs.extend(bugs[0:bug_idx])
assert len(other_bugs) == 9, "The list of non-duplicate bugs is different than 9"
# print(other_bugs)
bug_reports = []
for bug in other_bugs:
if bug not in bugs_to_exclude:
bug_report = app_bugs[bug][br_idx]
bug_reports.append(bug_report[1])
return bug_reports
def generate_setting2(data, out_path):
Path(out_path).mkdir(parents=True, exist_ok=True)
apps = data['apps']
idx_data = data['idx_data']
retrieval_runs = []
run_id = 1
# for each app
for app in apps:
app_data = idx_data[app]
pprint.pprint(app_data)
# for each bug
bugs = list(idx_data[app].keys())
for bug_idx in range(len(bugs)):
bug = bugs[bug_idx]
bug_reports = idx_data[app][bug]
# for each other bug
other_bugs_idxes = [i for i in range(len(bugs)) if i != bug_idx]
for bug_idx2 in other_bugs_idxes:
next_bug = bugs[bug_idx2]
next_bug_reports = idx_data[app][next_bug]
# for each bug report
for br_idx in range(3):
query = bug_reports[br_idx][1]
duplicate_corpus = [bug_reports[(br_idx + 1) % 3][1], bug_reports[(br_idx + 2) % 3][1]]
ground_truth = duplicate_corpus.copy()
duplicate_corpus.extend([l[1] for l in next_bug_reports])
# for each user
for br_idx2 in range(3):
# get the non-duplicate corpus for each user and all other bugs except current one
non_duplicate_corpus = get_non_duplicate_corpus(bugs, bug_idx, app_data, br_idx2, [next_bug])
retrieval_job = {
'run_id': run_id,
'query': query,
'corpus_size': len(duplicate_corpus) + len(non_duplicate_corpus),
'dup_corpus': duplicate_corpus,
'non_dup_corpus': non_duplicate_corpus,
'gnd_trh': ground_truth
}
run_id += 1
retrieval_runs.append(retrieval_job)
write_json_line_by_line(retrieval_runs, out_path/'setting2.json')
path = Path('/tf/data/')
csv_file_path = path/'artifacts/user_assignment.csv'
video_data = read_video_data(csv_file_path)
generate_setting2(video_data, path/'outputs/evaluation_settings')
# export
def get_all_texts(vid_ds, out_path, fps):
Path(out_path).mkdir(parents=True, exist_ok=True)
video_output_path = os.path.join(out_path, "text_" + str(fps))
Path(video_output_path).mkdir(parents=True, exist_ok=True)
videos = [vid.vid_path for vid in vid_ds.videos]
for video_path in videos:
video_path_obj = Path(video_path)
file_name = ntpath.basename(video_path).split(".")[0]
video_name = file_name + "-" + str(video_path_obj.parent.parent.parent.stem)
frame_path = os.path.join(out_path, "frames_" + str(fps), video_name)
Path(frame_path).mkdir(parents=True, exist_ok=True)
frames = find_file("*.jpeg", frame_path)
if not frames:
extract_frames(video_path_obj, frame_path, fps)
frames = find_file("*.jpeg", frame_path)
frames_text = []
with concurrent.futures.ThreadPoolExecutor(max_workers=12) as executor:
futures = []
for frame in frames:
futures.append(executor.submit(process_frame, frame))
for future in concurrent.futures.as_completed(futures):
frames_text.append(future.result())
frames_text = sorted(frames_text, key=lambda t: t["f"])
video_name = video_name.replace("_fixed_30", "")
out_file = os.path.join(video_output_path, video_name + '.json')
write_json_line_by_line(frames_text, out_file)
print("done: " + video_name)
path = Path('/tf/main/tango_reproduction_package')
vid_path = path/'artifacts/videos'
out_path = path/'artifacts/extracted_text'
get_all_texts(vid_path, out_path, 1)
from nbdev.export import notebook2script
notebook2script()
```
| github_jupyter |
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import os
import io
import sys
import json
import numpy as np
import astropy.units as u
from astropy import wcs
from astropy.io import fits
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.visualization import make_lupton_rgb
from astropy.utils.data import download_file, clear_download_cache
import matplotlib.pyplot as plt
```
## Response curves of HSC filters
* For details of HSC filters, please see [this page at NAOJ](https://www.subarutelescope.org/Observing/Instruments/HSC/sensitivity.html)
* We replace the r-band filter to new one (HSC-r2) from Jul 28th 2016 (HST). We mainly used the previous r-band filter (HSC-r) till Jul 11th 2016 (HST). Please see "FILTER01" at the FITS header for checking which filter was used.
* We replace the i-band filter to new one (HSC-i2) from Feb 2016. We mainly used the previous i-band filter (HSC-i) till Jan 2016. Please see "FILTER01" at the FITS header for checking which filter was used.
* When you estimate a throughput of the Y band filter, you should consider the quantum efficiency of FDCCD, which is very low at >10000 Å.
```
from unagi import filters
from unagi import camera
# Where we save the filters
filters.FILTER_DIR
```
### Basic properties of a HSC filter
```
iband = filters.Filter('i2')
iband.print()
# Plot the transmission curves
_ = iband.plot()
```
### Get a summary table for all filters
```
# Total transmission curves
hsc_filter_total = filters.hsc_filters(use_saved=False)
# Just the filter; at the center of the camera
hsc_filter_origin_cen = filters.hsc_filters(origin=True, center=True, use_saved=False)
# Just the filter; area weighted
hsc_filter_origin_wgt = filters.hsc_filters(origin=True, center=False, use_saved=False)
```
### Convert the transmission curve into the Kcorrect format
* This is used by several softwares: e.g. `kcorrect`, `iSEDFit`, `sedpy`
```
iband.to_kfilter()
```
### Solar spectrum and the absolute magnitude of the Sun
* By default, we are using the composite Solar spectrum from [Haberreiter et al. 2017](https://agupubs.onlinelibrary.wiley.com/doi/full/10.1002/2016JA023492)
- It is used in [Willmer 2018](https://iopscience.iop.org/article/10.3847/1538-4365/aabfdf)
- More details can be found [here](http://mips.as.arizona.edu/~cnaw/sun.html)
* We also have the "classic" Kurucz1993 model spectrum
```
k93 = filters.SolarSpectrum(kind='Kurucz1993')
w18 = filters.SolarSpectrum()
fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(111)
ax1.grid(linestyle='--', linewidth=2, alpha=0.8)
ax1.plot(k93.wave, np.log10(k93.flux), label=r'$\rm K93$')
ax1.plot(w18.wave, np.log10(w18.flux), label=r'$\rm W18$')
ax1.legend(loc='best', fontsize=20)
_ = ax1.set_xlim(3000, 12500)
_ = ax1.set_ylim(-10.99, -10.21)
_ = ax1.set_xlabel(r'$\mathrm{Wavelength}\ [\AA]$')
_ = ax1.set_ylabel(r'$\log\ (\mathrm{Flux}/\mathrm{erg\ s^{-1}\ cm^{-2}\ }\AA)$')
```
### Basic properties of the HSC camera
```
hsc_cam = camera.Camera()
fig = plt.figure(figsize=(14, 6))
ax1 = fig.add_subplot(111)
ax1.grid(linestyle='--', linewidth=2, alpha=0.8)
# Show the CCD QE
ax1.plot(hsc_cam.qe[:, 0], hsc_cam.qe[:, 1], linewidth=3.0, alpha=0.6, color='b',
label=r'$\rm CCD$')
# Show the primary mirror reflectivity
ax1.plot(hsc_cam.primary_reflect[:, 0], hsc_cam.primary_reflect[:, 1],
linewidth=3.0, alpha=0.6, color='k', linestyle='--',
label=r'$\rm Mirror$')
# Show the transparency of the dewar window
ax1.plot(hsc_cam.dewar[:, 0], hsc_cam.dewar[:, 1],
linewidth=2.5, alpha=0.6, color='gray', linestyle='-.',
label=r'$\rm Dewar$')
# Show the transparency of the PFU: prime focus unit
ax1.plot(hsc_cam.popt2[:, 0], hsc_cam.popt2[:, 1],
linewidth=2.5, alpha=0.6, color='orangered', linestyle=':',
label=r'$\rm PFU$')
# Overplot all the filters
for filt in hsc_filter_total:
if filt['short'] == 'r' or filt['short'] == 'i':
linestyle, alpha = '--', 0.25
else:
linestyle, alpha = '-', 0.4
ax1.axhline(0.0, linewidth=2, color='k', alpha=0.8)
# Filled the transmission curve
ax1.fill_between(filt['wave'], 0.0, filt['trans'], edgecolor='k',
alpha=alpha, linewidth=2.5, linestyle=linestyle,
facecolor=filt['color'],
label=r'$\rm {0}$'.format(filt['short']))
ax1.legend(loc='best', fontsize=14.5)
_ = ax1.set_xlim(3750, 11950)
_ = ax1.set_xlabel(r'$\mathrm{Wavelength}\ [\AA]$')
_ = ax1.set_ylabel(r'$\mathrm{Transmission}$')
```
| github_jupyter |
# Logistic Regression Example
A logistic regression learning algorithm example using TensorFlow library.
- Author: Aymeric Damien
- Project: https://github.com/aymericdamien/TensorFlow-Examples/
## MNIST Dataset Overview
This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).

More info: http://yann.lecun.com/exdb/mnist/
```
import tensorflow as tf
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.01
training_epochs = 25
batch_size = 100
display_step = 1
# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes
# Set model weights
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# Construct model
pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
print "Optimization Finished!"
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy for 3000 examples
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print "Accuracy:", accuracy.eval({x: mnist.test.images[:3000], y: mnist.test.labels[:3000]})
```
| github_jupyter |
# Equivalent layer technique for estimating total magnetization direction : Iteration process and L-curve application
Notebook to perform the inversion process. The L-curve
## Importing libraries
```
% matplotlib inline
import sys
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
import datetime
import timeit
import string as st
from fatiando.gridder import regular
notebook_name = 'airborne_EQL_magdirection_RM_calculation.ipynb'
```
## Plot style
```
plt.style.use('ggplot')
```
## Importing my package
```
dir_modules = '../../../mypackage'
sys.path.append(dir_modules)
import auxiliary_functions as fc
```
## Loading the model
```
with open('data/model_multi.pickle') as f:
model_multi = pickle.load(f)
```
## Loading observation points
```
with open('data/airborne_survey.pickle') as f:
airborne = pickle.load(f)
```
## Loading data set
```
with open('data/data_set.pickle') as f:
data = pickle.load(f)
```
## Open a dictionary
```
result_RM_airb = dict()
```
## List of saved files
```
saved_files = []
```
## Observation area
```
print 'Area limits: \n x_max = %.1f m \n x_min = %.1f m \n y_max = %.1f m \n y_min = %.1f m' % (airborne['area'][1],
airborne['area'][0],
airborne['area'][3],
airborne['area'][2])
```
## Airborne survey information
```
print 'Shape : (%.0f,%.0f)'% airborne['shape']
print 'Number of data: %.1f' % airborne['N']
print 'dx: %.1f m' % airborne['dx']
print 'dy: %.1f m ' % airborne['dy']
```
## Properties of the model
### Main field
```
inc_gf,dec_gf = model_multi['main_field']
print'Main field inclination: %.1f degree' % inc_gf
print'Main field declination: %.1f degree' % dec_gf
```
### Magnetization direction
```
print 'Inclination: %.1f degree' % model_multi['inc_R']
print 'Declination: %.1f degree' % model_multi['dec_R']
inc_R,dec_R = model_multi['inc_R'],model_multi['dec_R']
```
## Generating the layer
### Layer depth
```
h = 1150.
```
### Generating the equivalent sources coordinates
```
shape_layer = (airborne['shape'][0],airborne['shape'][1])
xs,ys,zs = regular(airborne['area'],shape_layer,h)
```
## Iteration process : LM-NNLS for positive magnetic-moment distribution
```
i_pos = 1250
it_max = 30
it_marq = 15
lamb = 10.
dlamb = 100.
eps_e = 1e-4
eps_i = 1e-4
mu_list = [1e2,1e3,1e4,1e5,3.5*1e5,5*1e5,1e6,2*1e6]
mu_norm = []
norm_r = []
norm_m = []
m_est = []
incl_est = []
decl_est = []
phi_list = []
for i in mu_list:
m_LM,inc_est,dec_est,phi,imax,pest,incs,decs = fc.LM_NNLS(
data['tfa_obs_RM_airb'],airborne['x'],airborne['y'],
airborne['z'],xs,ys,zs,inc_gf,dec_gf,-10.,-10.,lamb,dlamb,i_pos,it_max,
it_marq,eps_e,eps_i,i)
G = fc.sensitivity_mag(airborne['x'],airborne['y'],airborne['z'],
xs,ys,zs,inc_gf,dec_gf,inc_est,dec_est)
tfpred = np.dot(G,m_LM)
r = data['tfa_obs_RM_airb'] - tfpred
norm_r.append(np.sqrt(np.sum(r*r)))
norm_m.append(np.sqrt(np.sum(m_LM*m_LM)))
m_est.append(m_LM)
incl_est.append(inc_est)
decl_est.append(dec_est)
phi_list.append(phi)
```
## L-curve visualization
```
title_font = 20
bottom_font = 18
saturation_factor = 1.
plt.close('all')
plt.figure(figsize=(9,9), tight_layout=True)
plt.figure(figsize=(10, 10))
plt.loglog(norm_r,norm_m, 'b-')
plt.title('L-curve', fontsize=title_font)
plt.xlabel('r_norm', fontsize = title_font)
plt.ylabel('m_norm', fontsize = title_font)
plt.tick_params(axis='both', which='major', labelsize=15)
file_name = 'figs/airborne/Lcurve_RM'
plt.savefig(file_name+'.png',dpi=300)
saved_files.append(file_name+'.png')
plt.show()
```
### Results
```
result_RM_airb['magnetic_moment'] = m_est
result_RM_airb['inc_est'] = incl_est
result_RM_airb['dec_est'] = decl_est
result_RM_airb['layer_depth'] = h
result_RM_airb['reg_parameter'] = mu_list
result_RM_airb['phi'] = phi_list
```
### Generating .pickle file
```
now = datetime.datetime.utcnow().strftime('%d %B %Y %H:%M:%S UTC')
result_RM_airb['metadata'] = 'Generated by {name} on {date}'.format(date=now, name=notebook_name)
file_name = 'data/result_RM_airb.pickle'
with open(file_name, 'w') as f:
pickle.dump(result_RM_airb, f)
saved_files.append(file_name)
```
### Saved files
```
with open('reports/report_%s.md' % notebook_name[:st.index(notebook_name, '.')], 'w') as q:
q.write('# Saved files \n')
now = datetime.datetime.utcnow().strftime('%d %B %Y %H:%M:%S UTC')
header = 'Generated by {name} on {date}'.format(date=now, name=notebook_name)
q.write('\n\n'+header+'\n\n')
for i, sf in enumerate(saved_files):
print '%d %s' % (i+1,sf)
q.write('* `%s` \n' % (sf))
```
| github_jupyter |
# 1. Import necessary packages and set path of files
```
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_squared_error, accuracy_score, mean_absolute_error
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
%matplotlib inline
%pwd
path = 'D:\\Python\\DataScience\\DataScience6105\\Project'
```
# 2. Preprocess data
### Read data from trains.csv, features.csv, stores.csv and merge them into dataTrain variable.
```
dataTrain = pd.read_csv(f'{path}/train.csv', header=0, names=['Store','Dept','Date','weeklySales','isHoliday'], sep=',')
features = pd.read_csv(f'{path}/features.csv', header=0, names=['Store','Date','Temperature','Fuel_Price','MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5','CPI','Unemployment','IsHoliday'], sep=',').drop(columns=['IsHoliday'])
stores = pd.read_csv(f'{path}/stores.csv', header=0, names=['Store','Type','Size'], sep=',')
dataTrain = dataTrain.merge(stores, how='left').merge(features, how='left')
print(dataTrain.shape)
dataTrain.head(5)
```
### Fill NAN in dataTrain
```
dataTrain = pd.get_dummies(dataTrain, columns=['Type'])
dataTrain[['MarkDown1','MarkDown2','MarkDown3','MarkDown4', 'MarkDown5']] = dataTrain[['MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5']].fillna(0)
dataTrain = dataTrain.fillna(0)
dataTrain['Month'] = pd.to_datetime(dataTrain['Date']).dt.month
dataTrain = dataTrain.drop(columns=["Date", "CPI", "Fuel_Price", 'Unemployment', 'MarkDown3'])
print(dataTrain.shape)
dataTrain.head(5)
```
### Read data from test.csv and merge data into dataTest variable. Fill NAN in dataTest.
```
dataTest = pd.read_csv(f'{path}/test.csv', header=0, names=['Store','Dept','Date','isHoliday'], sep=',')
dataTest = dataTest.merge(stores, how='left').merge(features, how='left')
dataTest = pd.get_dummies(dataTest, columns=['Type'])
dataTest[['MarkDown1','MarkDown2','MarkDown3','MarkDown4', 'MarkDown5']] = dataTest[['MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5']].fillna(0)
dataTest = dataTest.fillna(0)
columnDate = dataTest['Date']
dataTest['Month'] = pd.to_datetime(dataTest['Date']).dt.month
dataTest = dataTest.drop(columns=["Date", "CPI", "Fuel_Price", 'Unemployment', 'MarkDown3'])
print(dataTest.shape)
dataTest.head(5)
```
# 3. Train and predict by different models
### Split dataTrain into train_x, train_y and try to use knn at first.
```
train_x = dataTrain.drop(columns=['weeklySales'])
train_y = dataTrain['weeklySales']
knn = KNeighborsRegressor(weights='uniform', n_neighbors=10)
knn.fit(train_x, train_y)
predict_y = knn.predict(test_x)
predict_y = pd.Series(predict_y)
print(predict_y)
```
### Split train_x, train_y into small size. Prepare to use train_x_split, train_y_split to optimize hyperparameters.
```
x_split, train_x_split, y_split, train_y_split = train_test_split(train_x, train_y, test_size=0.024, random_state=0)
print(train_x_split.shape, train_y_split.shape, x_split.shape, y_split.shape)
```
### Split train_x, train_y into train part and valid part. Prepare to use valid part to compare predict_y with valid_y and achieve errors.
```
train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=0.24, random_state=0)
print(valid_x.shape, valid_y.shape)
```
## (1) KNN model
### Tune n_neighbors, weights, algorithm, leaf_size hyperparameters. Use GridSearchCV with 5 cross validation. During this process, use mean_absolute_error as a measure parameter.
### Find best estimator and best score in KNN model.
```
knn = KNeighborsRegressor()
k_range = list(range(1,10))
leaf_range = list(range(1,2))
weight_options = ['uniform','distance']
algorithm_options = ['auto','ball_tree','kd_tree','brute']
paramGridKnn = dict(n_neighbors = k_range,weights = weight_options,algorithm=algorithm_options,leaf_size=leaf_range)
%time
gridKnn = GridSearchCV(knn,paramGridKnn,cv=5,verbose=1,n_jobs=-1,scoring='neg_mean_absolute_error')
gridKnn.fit(train_x_split, train_y_split)
print(gridKnn.best_estimator_)
print(gridKnn.best_score_)
```
### Use best estimator of KNN and train data(train_x, train_y) to fit the KNN model.
```
knn = KNeighborsRegressor(leaf_size=1, n_jobs=-1, n_neighbors=9, weights='uniform')
knn.fit(train_x, train_y)
print(train_x.shape, train_y.shape)
```
### Predict pred_y by valid data(valid_x) and compare it with real data(valid_y).
### Calculate mean absolute error and mean squared error.
```
pred_y = pd.Series(knn.predict(valid_x))
print(pred_y)
print(mean_absolute_error(valid_y, pred_y))
print(mean_squared_error(valid_y, pred_y))
```
### Predict y value using test data and get output.
```
predict_y = pd.Series(knn.predict(dataTest))
dataOutput = dataTest
dataOutput['WeeklySales'] = predict_y
dataOutput['Date'] = columnDate
dataOutput['Id'] = dataOutput['Store'].astype(str) + '_' + dataOutput['Dept'].astype(str) + '_' + dataOutput['Date'].astype(str)
dataOutput = dataOutput[['Id', 'WeeklySales']]
print(dataOutput)
```
# (2) RandomForest model
### Tune n_estimators, criterion, min_samples_leaf hyperparameters. Use GridSearchCV with 5 cross validation. During this process, use mean_absolute_error as a measure parameter.
### Find best estimator and best score in RandomForest model.
```
rf = RandomForestRegressor()
n_range = list(range(10, 31, 5))
criterion_options = ['mse','mae']
min_samples_leaf_options = list(range(2,7,2))
paramGridRf = dict(n_estimators=n_range, criterion=criterion_options, min_samples_leaf=min_samples_leaf_options)
%time
gridRf = GridSearchCV(rf,paramGridRf,cv=5,verbose=1,n_jobs=-1,scoring='neg_mean_absolute_error')
gridRf.fit(train_x_split, train_y_split)
print(gridRf.best_estimator_)
print(gridRf.best_score_)
```
### Use best estimator of RandomForest and train data(train_x, train_y) to fit the RandomForest model.
```
rf = RandomForestRegressor(n_estimators=30, criterion='mae',min_samples_leaf=2)
rf.fit(train_x, train_y)
```
### Predict pred_y by valid data(valid_x) and compare it with real data(valid_y).
### Calculate mean absolute error and mean squared error.
```
pred_y = pd.Series(rf.predict(valid_x))
print(pred_y)
print(mean_absolute_error(valid_y, pred_y))
print(mean_squared_error(valid_y,pred_y))
```
### Predict y value using test data and get output.
```
predict_y = pd.Series(rf.predict(dataTest))
dataOutput = dataTest
dataOutput['WeeklySales'] = predict_y
dataOutput['Date'] = columnDate
dataOutput['Id'] = dataOutput['Store'].astype(str) + '_' + dataOutput['Dept'].astype(str) + '_' + dataOutput['Date'].astype(str)
dataOutput = dataOutput[['Id', 'WeeklySales']]
print(dataOutput)
```
# (3) ExtraTrees Model
### Use hyperparameters setting manually. Use train data(train_x, train_y) to fit the ExtraTrees model.
```
%time
et = ExtraTreesRegressor(n_estimators=100,verbose=1,n_jobs=-1)
et.fit(train_x, train_y)
```
### Predict pred_y by valid data(valid_x) and compare it with real data(valid_y).
### Calculate mean absolute error and mean squared error.
```
pred_y = pd.Series(et.predict(valid_x))
print(pred_y)
print(mean_absolute_error(valid_y, pred_y))
print(mean_squared_error(valid_y,pred_y))
```
### Predict y value using test data and get output.
```
predict_y = pd.Series(et.predict(dataTest))
dataOutput = dataTest
dataOutput['WeeklySales'] = predict_y
dataOutput['Date'] = columnDate
dataOutput['Id'] = dataOutput['Store'].astype(str) + '_' + dataOutput['Dept'].astype(str) + '_' + dataOutput['Date'].astype(str)
dataOutput = dataOutput[['Id', 'WeeklySales']]
print(dataOutput)
```
# 4. Compare errors and present them in pictures
### Mean absolute error comparison
```
x_data = ['KNN', 'RandomForest', 'ExtraTrees']
y_data = [10144.274310773308, 5262.778810473657, 2096.0216748119633]
y_data_2 = [296059497.6182222, 110148527.76437865, 29008643.0901295]
plt.bar(x=x_data,height=y_data,width=0.5,color='steelblue',alpha=0.8)
for x, y in enumerate(y_data):
plt.text(x, y + 100, '%s' % y, ha='center', va='bottom')
plt.title('mean_absolute_error comparison')
plt.xlabel('model name')
plt.ylabel('error value')
plt.show()
```
### Mean squared error comparison
```
x_data = ['KNN', 'RandomForest', 'ExtraTrees']
y_data = [10144.274310773308, 5262.778810473657, 2096.0216748119633]
y_data_2 = [296059497.6182222, 110148527.76437865, 29008643.0901295]
plt.bar(x=x_data,height=y_data_2,width=0.5,color='steelblue',alpha=0.8)
for x, y in enumerate(y_data_2):
plt.text(x, y + 100, '%s' % y, ha='center', va='bottom')
plt.title('mean_squared_error comparison')
plt.xlabel('model name')
plt.ylabel('error value')
plt.show()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
dframe = pd.read_csv('bitcoin.csv')
dframe.head(10)
dframe['date'] = pd.to_datetime(dframe['Timestamp'],unit='s').dt.date
group = dframe.groupby('date')
Real_Price = group['Weighted_Price'].mean()
# Split the dataset so that we can take last 30 days data as test dataset
prediction_days = 30
dframe_train= Real_Price[:len(Real_Price)-prediction_days]
dframe_test= Real_Price[len(Real_Price)-prediction_days:]
# Data preprocessing
training_set = dframe_train.values
training_set = np.reshape(training_set, (len(training_set), 1))
#import sklearn package and use MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler()
training_set = sc.fit_transform(training_set)
X_train = training_set[0:len(training_set)-1]
y_train = training_set[1:len(training_set)]
X_train = np.reshape(X_train, (len(X_train), 1, 1))
import sys; sys.path
# Importing the Keras libraries and relevant packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import sys; sys.path
# Initialise the RNN model
regressor = Sequential()
# Add the input layer and the LSTM layer
regressor.add(LSTM(units = 4, activation = 'sigmoid', input_shape = (None, 1)))
# Add the output layer
regressor.add(Dense(units = 1))
# Compile the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fit the RNN to the Training set
regressor.fit(X_train, y_train, batch_size = 5, epochs = 100)
# Make the predictions
test_set = dframe_test.values
inputs = np.reshape(test_set, (len(test_set), 1))
inputs = sc.transform(inputs)
inputs = np.reshape(inputs, (len(inputs), 1, 1))
predicted_BTC_price = regressor.predict(inputs)
predicted_BTC_price = sc.inverse_transform(predicted_BTC_price)
# Visualize the results
plt.figure(figsize=(25,15), dpi=80, facecolor='w', edgecolor='k')
ax = plt.gca()
plt.plot(test_set, color = 'red', label = 'Real Bitcoin Price')
plt.plot(predicted_BTC_price, color = 'blue', label = 'Predicted Bitcoin Price')
plt.title('Bitcoin Price Prediction', fontsize=30)
# dframe_test = dframe_test.reset_index()
x=dframe_test.index
labels = dframe_test['date']
plt.xticks(x, labels, rotation = 'vertical')
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(18)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(18)
plt.xlabel('Time', fontsize=20)
plt.ylabel('BTC Price(USD)', fontsize=20)
plt.legend(loc=2, prop={'size': 25})
plt.show()
```
| github_jupyter |
# Open Chemistry JupyterLab - Multiple Calculations
Running multiple calculations can be useful when many results are needed. An example is given below.
```
import openchemistry as oc
import matplotlib.pyplot as plt
```
Start by creating a list of SMILES to perform the calculations on. Note that generating 3D structure is optional, and can be skipped for batch calculations such as these.
```
structure_smiles = ['CN1CNCN(C1)c1ccc(c2c1cccc2)C1NCNCN1', 'SC1CSCC(S1)(S)c1cccnc1', 'c1ccc(nc1)c1scc(c1)c1ccnc(c1)c1ccc2c(c1)cccc2']
mols = []
for smiles in structure_smiles:
mols.append(oc.import_structure(smiles, gen3d=False))
```
When viewing a molecule that does not have 3D structure, a 2D SVG will be displayed.
```
mols[0].structure.show()
```
Multiple calculations can be ran in a single docker container if they are submitted using `oc.run_calculations()`
```
image_name = 'openchemistry/chemml:0.6.0'
input_parameters = {}
results = oc.run_calculations(mols, image_name, input_parameters)
```
All of the calculations will finish simultaneously, and their status may be tracked by requesting one of their outputs:
```
results[0].properties.show()
```
# Plotting the Results
Once the calculations are finished, their results can be obtained and plotted.
```
x = []
y = []
for result in results:
x.append(result.data()['properties']['polarizability'])
y.append(result.data()['properties']['refractiveIndex'])
fig, ax = plt.subplots()
ax.plot(x, y, 'ro')
ax.set(xlabel='polarizability', ylabel='refractive index', title='Machine learning...')
ax.grid()
```
# Running a 3D Calculation on a 2D Molecule
If a 3D calculation is attempted on a 2D molecule, 3D coordinate generation will automatically start, and a message will alert the user to try again soon.
```
image_name = 'openchemistry/psi4:1.2.1'
input_parameters = {
'theory': 'dft',
'functional': 'b3lyp',
'basis': '6-31g'
}
result = mols[0].energy(image_name, input_parameters)
result.orbitals.show(mo='homo', iso=0.005)
```
Alternatively, 3D coordinates may also be generated manually. If this is done, the forcefield options may also be passed.
```
mols[1].structure.generate_3d(forcefield='uff', steps=150)
```
# Specifying the Type for Multiple Calculations
For typed calculations, the type can be specified by adding the appropriate "task" keyword to the input parameters.
```
input_parameters['task'] = 'energy'
results = oc.run_calculations(mols, image_name, input_parameters)
results[0].orbitals.show(mo='homo', iso=0.005)
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
def main():
bandit_probs = [0.1,0.5,0.6,0.8,0.1,
0.25,0.60,0.45,0.75,
0.65]
N_experiments = 200
N_episodes = 1000
epsilon = 0.1
save_fig = False
save_format = ".png"
class Bandit: #ENVIRONMENT
def __init__(self,bandit_probs):
self.N = len(bandit_probs)
self.prob = bandit_probs
def get_reward(self,action):
rand = np.random.random()
reward = 1 if (rand<self.prob[action]) else 0
return reward
class Agent:
def __init__(self,bandit,epsilon):
self.epsilon = epsilon
self.k = np.zeros(bandit.N,dtype=np.int)
self.Q = np.zeros(bandit.N,dtype=np.float)
def update_Q(self,action,reward):
self.k[action] += 1
self.Q[action] += (1./self.k[action])*(reward-self.Q[action])
def choose_action(self,bandit,force_explore = False):
rand = np.random.random()
if(rand < self.epsilon) or force_explore:
action_explore = np.random.randint(bandit.N)
return action_explore
else:
action_greedy = np.argmax(self.Q) #np.random.choice(np.flatnonzero(self.Q == self.Q.max())) #np.argmax(self.Q)
return action_greedy
def experiment(agent,bandit,N_episodes):
action_history = []
reward_history = []
for episode in range(N_episodes):
action=agent.choose_action(bandit)
reward = bandit.get_reward(action)
agent.update_Q(action,reward)
action_history.append(action)
reward_history.append(reward)
return(np.array(action_history),np.array(reward_history))
N_bandits = len(bandit_probs)
print("Running Multiarmed Bandits with N_bandits = {} and agent epsilon = {}".format(N_bandits,epsilon))
reward_history_avg = np.zeros(N_episodes)
action_history_sum = np.zeros((N_episodes,N_bandits))
for i in range(N_experiments):
bandit = Bandit(bandit_probs)
agent= Agent(bandit,epsilon)
(action_history,reward_history) =
experiment(agent,bandit,N_episodes)
if (i+1)%(N_experiments/20)==0:
print("[Experiment {}/{}]".format(i+1,N_experiments))
print(" N_episodes = {}".format(N_episodes))
print(" bandit choice history = {}".format(action_history + 1))
print(" reward history = {}".format(reward_history))
print(" Average Reward = {}".format(np.sum(reward_history)/
len(reward_history)))
print("")
reward_history_avg+=reward_history
for j, (a) in enumerate(action_history):
action_history_sum[j][a] += 1
reward_history_avg/=np.float(N_experiments)
print("reward history avg = {}".format(reward_history_avg))
plt.plot(reward_history_avg)
plt.xlabel("Episode Number")
plt.ylabel("Rewards collected".format(N_experiments))
plt.title("Bandit Reward History averaged over {} experiments (epsilon = {})".format(N_experiments,epsilon))
ax = plt.gca()
ax.set_xscale("log",nonposx='clip')
if save_fig:
output_file = "MABresults/MAB_rewards" + save_format
plt.savefig(output_file,bbox_inches="tight")
else:
plt.show()
plt.figure(figsize=(18,12))
for i in range(N_bandits):
action_history_sum_plot = 100*action_history_sum[:,i]/N_experiments
plt.plot(list(np.array(range(len(action_history_sum_plot)))+1),
action_history_sum_plot, linewidth=5.0,
label = "Bandit #{}".format(i+1))
plt.title("Bandit action history averaged over {} experiments (epsilon = {})".format(N_experiments,epsilon),fontsize=26)
plt.xlabel("Episode Number",fontsize=26)
plt.ylabel("Bandit Action Choices (%)", fontsize=26)
leg = plt.legend(loc="best",shadow=True,fontsize=26)
ax = plt.gca()
ax.set_xscale("log",nonposx='clip')
plt.xlim([1,N_episodes])
plt.ylim([0,100])
plt.xticks(fontsize=24)
plt.yticks(fontsize=24)
for legobj in leg.legendHandles:
legobj.set_linewidth(16.0)
if save_fig:
output_file = "MABresults/MAB_actions" + save_format
plt.savefig(output_file,bbox_inches="tight")
else:
plt.show
main()
```
| github_jupyter |
## Downstream Task: ImageWang
At the heart of this experiment we are trying to answer:
> What is better: the best trained network starting with *random* weights, or the best trained network starting with weights generated from a *pretext* task?
This may seem obvious but it's important to keep in mind when designing our experiments. In order to train a network with random weights in the best possible way, we will use the approach that gives the highest accuracy when training from scratch on [Imagenette](https://github.com/fastai/imagenette/tree/58a63175a2c6457650289d32741940d6a7d58fbf).
To be honest, I'm not sure what the best possible way to train a network that has been initialized with pretext weights. Therefore we will try both training the only head of the network and training the entire network with discriminitive fine-tuning.
This gives us three scenarios we'd like to compare:
1. Training an entire model that is initialized with random weights.
2. Training the head of a model that is initialized with weights generated on a pretext task.
3. Training an entire model that is initialized with weights generated on a pretext task.
For our experiments here we'll continue using the [ImageWang](https://github.com/fastai/imagenette/blob/master/README.md) dataset. It contains a curated subset of images from the original Imagenet dataset.
It contains:
- A `/train` folder with 20 classes
- A `/val` folder with 10 classes (all of which are in /train)
- An `/unsup` folder with miscellaneous unlabelled images
```
import json
import torch
import numpy as np
from config import config
from RandomCutout import RandomCutout, PILImageInput
from fastai2.basics import *
from fastai2.vision.all import *
from torch.nn import MSELoss
from functools import partial
```
## Random Weights Baseline
Let's train a completely random network in the best way we know how. The [Imagenette Leaderboard](https://github.com/fastai/imagenette/) contains hyperparameters that give the highest accuracy when trained on the Imagenette dataset (a subset of Imagenet).
We will use all the tips and tricks from [training Imagenette](https://github.com/fastai/imagenette/tree/58a63175a2c6457650289d32741940d6a7d58fbf).
```
# Default parameters
lr=config['lr']
size=config['size']
sqrmom=config['sqrmom']
mom=config['mom']
eps=config['eps']
epochs=config['epochs']
bs=config['bs']
opt=config['opt']
sh=config['sh']
sa=config['sa']
sym=config['sym']
beta=config['beta']
act_fn=config['act_fn']
fp16=config['fp16']
pool=config['pool']
runs=config['runs']
model = config['model']
if opt=='adam' : opt_func = partial(Adam, mom=mom, sqr_mom=sqrmom, eps=eps)
elif opt=='rms' : opt_func = partial(RMSProp, sqr_mom=sqrmom)
elif opt=='sgd' : opt_func = partial(SGD, mom=mom)
elif opt=='ranger': opt_func = partial(ranger, mom=mom, sqr_mom=sqrmom, eps=eps, beta=beta)
runs = 3
# Default parameters
print("lr", lr)
print("size", size)
print("sqrmom", sqrmom)
print("mom", mom)
print("eps", eps)
print("epochs", epochs)
print("bs", bs)
print("opt", opt)
print("sh", sh)
print("sa", sa)
print("sym", sym)
print("beta", beta)
print("act_fn", act_fn)
print("fp16", fp16)
print("pool", pool)
print("runs", runs)
print("model", model)
def get_dbunch(size, bs):
if size<=224:
path = URLs.IMAGEWANG_160
else:
path = URLs.IMAGEWANG
source = untar_data(path)
item_tfms=[RandomResizedCrop(size, min_scale=0.35), FlipItem(0.5)]
batch_tfms=RandomErasing(p=0.9, max_count=3, sh=sh)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=GrandparentSplitter(valid_name='val'),
get_items=get_image_files,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
workers = min(8, num_cpus())
return dblock.dataloaders(source, path=source, bs=bs, num_workers=workers)
size = 128
bs = 64
dbunch = get_dbunch(size, bs)
dbunch.show_batch()
for run in range(runs):
print(f'Run: {run}')
learn = Learner(dbunch, model(c_out=20, act_cls=act_fn, sa=sa, sym=sym, pool=pool, pretrained=False), opt_func=opt_func, \
metrics=[accuracy,top_k_accuracy], loss_func=LabelSmoothingCrossEntropy())
if fp16:
learn = learn.to_fp16()
cbs = []
#n_gpu = torch.cuda.device_count()
#if gpu is None and n_gpu: learn.to_parallel()
if num_distrib()>1: learn.to_distributed(gpu) # Requires `-m fastai.launch`
learn.fit_flat_cos(epochs, lr, wd=1e-2, cbs=cbs)
```
When I run the above for 3 runs I get:
- Run 1: 0.535505
- Run 2: 0.543141
- Run 3: 0.540087
This gives us a random weights baseline accuracy of **54.0%**.
## Pretext Weights: Frozen Network
Now that we've got a random weights baseline, we'd like to see whether training a network starting with pretext weights gives us any improvement. For this experiment we will exclusively train the head of our network for `100` epochs.
```
for run in range(runs):
print(f'Run: {run}')
learn = Learner(dbunch, model(c_out=20, act_cls=torch.nn.ReLU, sa=sa, sym=sym, pool=pool, pretrained=False), opt_func=opt_func, \
metrics=[accuracy,top_k_accuracy], loss_func=LabelSmoothingCrossEntropy())
if fp16: learn = learn.to_fp16()
cbs = []
# Load weights generated from training on our pretext task
state_dict = torch.load('imagewang_inpainting_15_epochs.pth')
# HACK: If we don't have all of the parameters for our learner, we get an error
linear_layer = learn.model[-1]
state_dict['11.weight'] = linear_layer.weight
state_dict['11.bias'] = linear_layer.bias
learn.model.load_state_dict(state_dict)
learn.freeze()
learn.fit_flat_cos(epochs, lr, wd=1e-2, cbs=cbs)
```
When I run the above for 3 runs I get:
- Run 1: 0.572156
- Run 2: 0.562739
- Run 3: 0.553576
This gives us an average accuracy of **56.3%**
## Pretext Weights: Frozen Network + Discriminitive Learning
We've already beaten the network initialized with random weights, but it seems unlikely that fine-tuning the head of a network initialized with pretext weights is the best way to train.
In this experiment we initialize our network using the weights generated by pretraining our network on a pretext task. We will train the head of our network for `5` epochs and then use discriminitive learning rates to train the rest of the network for `95` epochs.
When we fine-tune we will use a slice from `lr//100` to `lr`. This is taken from fastai's [`.fine_tune()`](https://github.com/fastai/fastai2/blob/c942b186b032dcb4d42c95f56893957a24a4f96d/fastai2/callback/schedule.py#L130).
**Some concerns**:
- I am essentially trying to mix "the best way to train with random weights" with "the best way to train with ImageNet weights". Can these approaches be mixed? I feel like I lack a necessary understanding of all the moving parts to say how they will behave once I combine them.
- I am using `fit_flat_cos()` because I'm using the `ranger` optimizer which reduces the need for learning rate warmup. I have not seen any examples of fine-tuning with this approach.
- I am not setting `pct_start`. Most examples I've seen set this. (They also use `.fit_one_cycle()`)
```
for run in range(runs):
print(f'Run: {run}')
learn = Learner(dbunch, model(c_out=20, act_cls=torch.nn.ReLU, sa=sa, sym=sym, pool=pool), opt_func=opt_func, \
metrics=[accuracy,top_k_accuracy], loss_func=LabelSmoothingCrossEntropy())
if fp16: learn = learn.to_fp16()
cbs = []
# Train 15 epochs w/ frozen head
state_dict = torch.load('imagewang_inpainting_15_epochs.pth')
# HACK: If we don't have all of the parameters for our learner, we get an error
linear_layer = learn.model[-1]
state_dict['11.weight'] = linear_layer.weight
state_dict['11.bias'] = linear_layer.bias
learn.model.load_state_dict(state_dict)
# One epoch of fine-tuning
learn.freeze()
learn.fit_flat_cos(5, lr, wd=1e-2, cbs=cbs)
# Remainder is train_imagenette.py approach, but with discriminitive learning rates
learn.unfreeze()
learn.fit_flat_cos(epochs - 5, slice(lr//100, lr), wd=1e-2, cbs=cbs)
```
When I run the above for 3 runs I get:
- Run 1: 0.564266
- Run 2: 0.564011
- Run 3: 0.556630
This gives us an average accuracy of **56.2%** (about the same as before).
## Conclusion
Our three experiments give us:
1. Random weights baseline: `54.0%` accuracy
2. Pretext weights with head fine-tuning: `56.3%` accuracy
3. Pretext weights with head fine-tuning followed by discriminitive learning: `56.2%` accuracy
In summary, we've demonstrated that we can get a reliable improvements in downstream accuracy by pre-training a network on a self-supervised pretext task. This is not super exciting in and of itself, but it gives us a good starting point from which to move towards more interesting questions.
Here are some questions we should consider investigating in future notebooks:
- Can we train on pretext for longer and see larger improvements in downstream performance?
- Are 100 epochs of downstream training enough?
- Can we train on multiple pretext tasks and see greater improvements?
- Can we ever hope to compete with pre-trained weights generated from labelled datasets (eg. Imagenet)?
- What about domains where those pre-trained weights are weaker (eg. medical images)?
| github_jupyter |
```
import torch
import torch.nn as nn
import hashedEmbeddingBag
bag_num = 18
num_categories = 100
num_feature = 200
hashed_weight_size = 200
hashed_weights = torch.rand(hashed_weight_size)
#hashed_weights = torch.arange(start=0, end=hashed_weight_size, dtype=torch.float)
bag_size = torch.randint(low=0, high=7, size=(bag_num,))
indices_num = bag_size.sum().item()
indices = torch.randint(low=0, high=num_categories - 1, size=(indices_num, ))
offsets = torch.cat([torch.zeros(1, dtype=torch.long), bag_size.cumsum(dim=0)[:-1]])
from ..hash_embedding_bag import HashEmbeddingBag
rand_hash_compression_rate = 1.0
EE = HashEmbeddingBag(num_categories, num_feature, rand_hash_compression_rate, mode="sum", sparse=True)
W = np.random.uniform(
low=-np.sqrt(1 / num_categories), high=np.sqrt(1 / num_categories), size=((int(num_categories * num_feature * rand_hash_compression_rate), ))
).astype(np.float32)
EE.hashed_weight.data = torch.tensor(W, requires_grad=True)
res = EE(indices, offsets)
# res.retain_grad()
tmp = res * torch.rand_like(res)
y = tmp.sum()
y.backward()
print(hashed_weights)
print(bag_size)
print(indices)
print(offsets)
print(indices_num)
print(indices.shape)
mode = 0
device = torch.cuda.current_device()
hashed_weights = hashed_weights.to(device)
indices = indices.to(device)
offsets = offsets.to(device)
output, offset2bag, bag_size, max_indices, hashed_idx = \
hashed_embedding_bag_cpp.forward(hashed_weights, indices, offsets, mode, num_feature)
print("output is:")
print(output)
print("offset2bag is:")
print(offset2bag)
print("bag_size is:")
print(bag_size)
print("max_indices is:")
print(max_indices)
print("hashed_idx is:")
print(hashed_idx)
print(hashed_idx.shape)
def toSignedInt(value, bits):
valueUint8 = value & (2**bits - 1)
if valueUint8 & 2**(bits-1):
return valueUint8 - 2**bits
return valueUint8
"""
def hash_function(a, b):
tmp1 = toSignedInt(a * 9824516537, 64)
tmp2 = toSignedInt(b * 57857966300227, 64)
tmp3 = toSignedInt(tmp1 + tmp2, 64)
tmp3 %= 117130198221199
return tmp3
"""
def hash_function(a, b):
return a + b
device = torch.device("cpu")
hashed_weights = hashed_weights.to(device)
indices = indices.to(device)
offsets = offsets.to(device)
output = output.to(device)
offset2bag = offset2bag.to(device)
bag_size = bag_size.to(device)
max_indices = max_indices.to(device)
hashed_idx = hashed_idx.to(device)
def make_offset2bag(offsets, indices):
offsets2bag = torch.zeros(indices.size(0) + 1, dtype=indices.dtype, device=offsets.device)
offsets2bag.index_add_(0, offsets, torch.ones_like(offsets, memory_format=torch.legacy_contiguous_format))
offsets2bag[0] -= 1
offsets2bag = offsets2bag.cumsum(0)
offsets2bag.resize_(indices.size(0))
return offsets2bag
expected_offsets2bag = make_offset2bag(offsets, indices)
assert((expected_offsets2bag - offset2bag).abs().sum().item() == 0)
expected_hashed_index = torch.zeros((indices_num, num_feature), dtype=torch.long)
expected_output = torch.zeros(bag_num, num_feature)
for i in range(indices.size(0)):
for j in range(num_feature):
weight_idx = hashed_embedding_bag_cpp.hash(indices[i].item(), j) % hashed_weights.size(0)
expected_hashed_index[i, j] = weight_idx
expected_output[expected_offsets2bag[i].item(), j] += hashed_weights[weight_idx]
print(expected_output)
assert(expected_hashed_index.equal(hashed_idx))
assert(expected_output.equal(output))
output_grad = torch.rand_like(expected_output)
#output_grad = torch.arange(start=0, end=num_feature, dtype=torch.float).unsqueeze(0).repeat(bag_num, 1)
#output_grad = torch.arange(start=0, end=bag_num, dtype=torch.float).unsqueeze(-1).repeat(1, num_feature)
output_grad[:, 0] = 0.5
expected_weight_grad = torch.zeros_like(hashed_weights)
for i in range(indices.size(0)):
for j in range(num_feature):
weight_idx = hashed_embedding_bag_cpp.hash(indices[i].item(), j) % hashed_weights.size(0)
expected_weight_grad[weight_idx] += output_grad[offset2bag[i].item(), j]
device = torch.cuda.current_device()
hashed_weights = hashed_weights.to(device)
indices = indices.to(device)
offsets = offsets.to(device)
offset2bag = offset2bag.to(device)
bag_size = bag_size.to(device)
max_indices = max_indices.to(device)
hashed_idx = hashed_idx.to(device)
print(output_grad)
output_grad = output_grad.to(device)
weight_grad = hashed_embedding_bag.backward(
output_grad, indices, offsets, offset2bag, bag_size, max_indices, hashed_idx, hashed_weights.size(0), False, mode, num_feature)
weight_grad = weight_grad.cpu()
print(expected_weight_grad)
print(weight_grad)
assert((weight_grad - expected_weight_grad).sum().item() < 1)
import hashedEmbeddingBag
emb = hashedEmbeddingBag.HashedEmbeddingBag(
num_categories, num_feature, hashed_weight_size / (num_feature * num_categories), "sum", hashed_weights)
res = emb(indices, offsets)
res.retain_grad()
tmp = res * torch.rand_like(res)
y = tmp.sum()
y.backward()
res.grad
assert(res.cpu().equal(expected_output))
output_grad = res.grad
weight_grad = hashed_embedding_bag.backward(
output_grad, indices, offsets, offset2bag, bag_size, max_indices, hashed_idx, hashed_weights.size(0), False, mode, num_feature)
print(emb.weight.grad)
print(weight_grad)
assert((emb.weight.grad - weight_grad).sum().abs().item() < 1)
```
| github_jupyter |
```
import numpy as np
from matplotlib import pyplot as plt
def kinesin1_velocity(F):
dstep = 8.2
d1 = 4.6
d3 = 0.35
k1 = 4900
k2 = 95
k3 = 260
Fi = 26
kT = 4.114
exp1 = np.exp(-F * d1 / kT)
exp2 = np.exp((-F + Fi) * d3 / kT)
numerator = dstep * k1 * k2 * k3 * exp1 * exp2
denom = k1 * k2 * exp1 + k3 * exp2 * (k1 * exp1 + k2)
return numerator / denom
def linear_vdep_model(F, Fstall=6.4):
v0 = kinesin1_velocity(0)
fdep = 1. - (F / Fstall)
if isinstance(fdep, float):
fdep = 1 if fdep > 1 else fdep
fdep = 0 if fdep < 0 else fdep
else:
fdep[fdep < 0] = 0
fdep[fdep > 1] = 1
return v0 * fdep
def exp_vdep_model(F, Fstall=4):
if isinstance(F, float) or isinstance(F, int):
Fext = 0 if F < 0 else F
else:
Fext = F.copy()
Fext[Fext<0] = 0
v0 = kinesin1_velocity(0)
fdep = np.exp(-(Fext/Fstall)**2.65)
return v0 * fdep
x = np.linspace(-5, 10, 100)
y = kinesin1_velocity(x)
ym = linear_vdep_model(x)
ymexp = exp_vdep_model(x)
plt.figure()
plt.plot(x, y)
plt.plot(x, ym)
plt.plot(x, ymexp)
plt.show()
print(kinesin1_velocity(0))
```
## Parameters of Kinesin-1
v = "735 nm/s" (1.05 sigma per tau)
from Examining kinesin processivity within a general gating framework, Andreasson 2015 eLife
k_a = "5 s^-1" (0.18 per tau) # per kinesin, so multiply by number concentration at interface of MT
From: Membrane-Bound Kinesin Transport and its Applications in Active Matter, 2019 (thesis by Lopes from UC Merced)
and ((C. Leduc, O. Campàs, K. B. Zeldovich, A. Roux, P. Jolimaitre, L. Bourel-Bonnet, B. Goud, J.-F. Joanny, P. Bassereau and J. Prost, "Cooperative extraction of membrane nanotubes by molecular motors," Proceedings of the National Academy of Sciences, vol. 101, no. 49, pp. 17096-17101, 2004.)) which found that the association rate per kinesin molecule was about 5 per second
k_u = "0.42 s^-1" (0.015 per tau)
from Direct observation of single kinesin molecules moving along microtubules, Vale, R. D., Funatsu, T., Pierce, D. W., Romberg, L., Harada, Y. & Yanagida, T. (1996) Nature 380 , 451–453. pmid:8602245
f_stall = 6 pN (37.5 in sim units) from references mentioned above (approx)
k_spring = 0.3 pN/nm (47 in sim units) (reference from Robert's thesis)
fdep_unbind_factor = 0.6 nm (.024 sigma in sim units)
rest_length = 60 nm (2.4 in sim units) I'm not worrying about extra distance from center of MT to surface, since the rest length is from structure data and there are probably some entropic considerations
## time constant in simulation
D = kT/3 pi eta d
d = 25 nm
kT = 4.114 pN nm
eta = 1 Pa. s = 1 * 10^12 * 10^-18 pN nm^-2 s (cytoplasm, reference from Robert's thesis)
kT/(eta * d) = nm^2/s
tau = 0.0358 s (from below calculation and from Robert's thesis)
```
D = (4.114 / (np.pi * 3 * 1 * 10**12 * 10**-18 * 25))
tau = 25**2/D
tau
5*tau
0.18
0.42/5
0.165 pN
735/25*0.0358
6/0.16
0.3*25/0.16
0.6/25
```
| github_jupyter |
In this exploration notebook, we shall try to uncover the basic information about the dataset which will help us build our models / features.
Let us start with importing the necessary modules.
```
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import model_selection, preprocessing
import xgboost as xgb
color = sns.color_palette()
%matplotlib inline
pd.options.mode.chained_assignment = None # default='warn'
pd.set_option('display.max_columns', 500)
```
First let us import the train file and get some idea about the data.
```
train_df = pd.read_csv("../input/train.csv")
train_df.shape
train_df.head()
```
There are quite a few variables in this dataset.
Let us start with target variable exploration - 'price_doc'. First let us do a scatter plot to see if there are any outliers in the data.
```
plt.figure(figsize=(8,6))
plt.scatter(range(train_df.shape[0]), np.sort(train_df.price_doc.values))
plt.xlabel('index', fontsize=12)
plt.ylabel('price', fontsize=12)
plt.show()
```
Looks okay to me. Also since the metric is RMSLE, I think it is okay to have it as such. However if needed, one can truncate the high values.
We can now bin the 'price_doc' and plot it.
```
plt.figure(figsize=(12,8))
sns.distplot(train_df.price_doc.values, bins=50, kde=True)
plt.xlabel('price', fontsize=12)
plt.show()
```
Certainly a very long right tail. Since our metric is Root Mean Square **Logarithmic** error, let us plot the log of price_doc variable.
```
plt.figure(figsize=(12,8))
sns.distplot(np.log(train_df.price_doc.values), bins=50, kde=True)
plt.xlabel('price', fontsize=12)
plt.show()
```
This looks much better than the previous one.
Now let us see how the median housing price change with time.
```
train_df['yearmonth'] = train_df['timestamp'].apply(lambda x: x[:4]+x[5:7])
grouped_df = train_df.groupby('yearmonth')['price_doc'].aggregate(np.median).reset_index()
plt.figure(figsize=(12,8))
sns.barplot(grouped_df.yearmonth.values, grouped_df.price_doc.values, alpha=0.8, color=color[2])
plt.ylabel('Median Price', fontsize=12)
plt.xlabel('Year Month', fontsize=12)
plt.xticks(rotation='vertical')
plt.show()
```
There are some variations in the median price with respect to time. Towards the end, there seems to be some linear increase in the price values.
Now let us dive into other variables and see. Let us first start with getting the count of different data types.
```
train_df = pd.read_csv("../input/train.csv", parse_dates=['timestamp'])
dtype_df = train_df.dtypes.reset_index()
dtype_df.columns = ["Count", "Column Type"]
dtype_df.groupby("Column Type").aggregate('count').reset_index()
```
So majority of them are numerical variables with 15 factor variables and 1 date variable.
Let us explore the number of missing values in each column.
```
missing_df = train_df.isnull().sum(axis=0).reset_index()
missing_df.columns = ['column_name', 'missing_count']
missing_df = missing_df.ix[missing_df['missing_count']>0]
ind = np.arange(missing_df.shape[0])
width = 0.9
fig, ax = plt.subplots(figsize=(12,18))
rects = ax.barh(ind, missing_df.missing_count.values, color='y')
ax.set_yticks(ind)
ax.set_yticklabels(missing_df.column_name.values, rotation='horizontal')
ax.set_xlabel("Count of missing values")
ax.set_title("Number of missing values in each column")
plt.show()
```
Seems variables are found to missing as groups.
Since there are 292 variables, let us build a basic xgboost model and then explore only the important variables.
```
for f in train_df.columns:
if train_df[f].dtype=='object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train_df[f].values))
train_df[f] = lbl.transform(list(train_df[f].values))
train_y = train_df.price_doc.values
train_X = train_df.drop(["id", "timestamp", "price_doc"], axis=1)
xgb_params = {
'eta': 0.05,
'max_depth': 8,
'subsample': 0.7,
'colsample_bytree': 0.7,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'silent': 1
}
dtrain = xgb.DMatrix(train_X, train_y, feature_names=train_X.columns.values)
model = xgb.train(dict(xgb_params, silent=0), dtrain, num_boost_round=100)
# plot the important features #
fig, ax = plt.subplots(figsize=(12,18))
xgb.plot_importance(model, max_num_features=50, height=0.8, ax=ax)
plt.show()
```
So the top 5 variables and their description from the data dictionary are:
1. full_sq - total area in square meters, including loggias, balconies and other non-residential areas
2. life_sq - living area in square meters, excluding loggias, balconies and other non-residential areas
3. floor - for apartments, floor of the building
4. max_floor - number of floors in the building
5. build_year - year built
Now let us see how these important variables are distributed with respect to target variable.
**Total area in square meters:**
```
ulimit = np.percentile(train_df.price_doc.values, 99.5)
llimit = np.percentile(train_df.price_doc.values, 0.5)
train_df['price_doc'].ix[train_df['price_doc']>ulimit] = ulimit
train_df['price_doc'].ix[train_df['price_doc']<llimit] = llimit
col = "full_sq"
ulimit = np.percentile(train_df[col].values, 99.5)
llimit = np.percentile(train_df[col].values, 0.5)
train_df[col].ix[train_df[col]>ulimit] = ulimit
train_df[col].ix[train_df[col]<llimit] = llimit
plt.figure(figsize=(12,12))
sns.jointplot(x=np.log1p(train_df.full_sq.values), y=np.log1p(train_df.price_doc.values), size=10)
plt.ylabel('Log of Price', fontsize=12)
plt.xlabel('Log of Total area in square metre', fontsize=12)
plt.show()
```
**Living area in square meters:**
```
col = "life_sq"
train_df[col].fillna(0, inplace=True)
ulimit = np.percentile(train_df[col].values, 95)
llimit = np.percentile(train_df[col].values, 5)
train_df[col].ix[train_df[col]>ulimit] = ulimit
train_df[col].ix[train_df[col]<llimit] = llimit
plt.figure(figsize=(12,12))
sns.jointplot(x=np.log1p(train_df.life_sq.values), y=np.log1p(train_df.price_doc.values),
kind='kde', size=10)
plt.ylabel('Log of Price', fontsize=12)
plt.xlabel('Log of living area in square metre', fontsize=12)
plt.show()
```
**Floor:**
We will see the count plot of floor variable.
```
plt.figure(figsize=(12,8))
sns.countplot(x="floor", data=train_df)
plt.ylabel('Count', fontsize=12)
plt.xlabel('floor number', fontsize=12)
plt.xticks(rotation='vertical')
plt.show()
```
The distribution is right skewed. There are some good drops in between (5 to 6, 9 to 10, 12 to 13, 17 to 18). Now let us see how the price changes with respect to floors.
```
grouped_df = train_df.groupby('floor')['price_doc'].aggregate(np.median).reset_index()
plt.figure(figsize=(12,8))
sns.pointplot(grouped_df.floor.values, grouped_df.price_doc.values, alpha=0.8, color=color[2])
plt.ylabel('Median Price', fontsize=12)
plt.xlabel('Floor number', fontsize=12)
plt.xticks(rotation='vertical')
plt.show()
```
This shows an overall increasing trend (individual houses seems to be costlier as well - check price of 0 floor houses).
A sudden increase in the house price is also observed at floor 18.
**Max floor:**
Total number of floors in the building is one another important variable. So let us plot that one and see.
```
plt.figure(figsize=(12,8))
sns.countplot(x="max_floor", data=train_df)
plt.ylabel('Count', fontsize=12)
plt.xlabel('Max floor number', fontsize=12)
plt.xticks(rotation='vertical')
plt.show()
```
We could see that there are few tall bars in between (at 5,9,12,17 - similar to drop in floors in the previous graph). May be there are some norms / restrictions on the number of maximum floors present(?).
Now let us see how the median prices vary with the max floors.
```
plt.figure(figsize=(12,8))
sns.boxplot(x="max_floor", y="price_doc", data=train_df)
plt.ylabel('Median Price', fontsize=12)
plt.xlabel('Max Floor number', fontsize=12)
plt.xticks(rotation='vertical')
plt.show()
```
More to come. Stay tuned.!
| github_jupyter |
# KernelPCA with Scale
This code template of KPCA(Kernel PCA) in python is a non-linear technique for dimensionality reduction through the use of Kernel along with the Scale as a feature rescaling technique.
### Required Packages
```
import warnings
import itertools
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from sklearn.decomposition import KernelPCA
from sklearn.preprocessing import LabelEncoder,Normalizer,scale
from numpy.linalg import eigh
warnings.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training .
```
features=[]
#y_value
target= ''
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df = pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X.
```
X = df[features]
Y = df[target]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
```
### Data Rescaling
Standardize a dataset along any axis.
Center to the mean and component wise scale to unit variance
Refer [API](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.scale.html) for the parameters
```
X_Scaled = scale(X)
X_Scaled=pd.DataFrame(data = X_Scaled,columns = X.columns)
X_Scaled.head()
```
### Choosing the number of components
We have to estimate how many components are needed to describe the data. This can be determined by looking at the cumulative explained variance ratio as a function of the number of components.
This curve quantifies how much of the total, dimensional variance is contained within the first N components.
### Explained Variance
Explained variance refers to the variance explained by each of the principal components (eigenvectors). It can be represented as a function of ratio of related eigenvalue and sum of eigenvalues of all eigenvectors.
The function below returns a list with the values of explained variance and also plots cumulative explained variance
```
def explained_variance_plot(X):
cov_matrix = np.cov(X, rowvar=False) #this function returns the co-variance matrix for the features
egnvalues, egnvectors = eigh(cov_matrix) #eigen decomposition is done here to fetch eigen-values and eigen-vectors
total_egnvalues = sum(egnvalues)
var_exp = [(i/total_egnvalues) for i in sorted(egnvalues, reverse=True)]
plt.plot(np.cumsum(var_exp))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
return var_exp
var_exp=explained_variance_plot(X)
```
#### Scree plot
The scree plot helps you to determine the optimal number of components. The eigenvalue of each component in the initial solution is plotted. Generally, you want to extract the components on the steep slope. The components on the shallow slope contribute little to the solution.
```
plt.plot(var_exp, 'ro-', linewidth=2)
plt.title('Scree Plot')
plt.xlabel('Principal Component')
plt.ylabel('Proportion of Variance Explained')
plt.show()
```
### Model
Kernel PCA in python is a non-linear technique for dimensionality reduction through the use of Kernel.
Kernel PCA uses a kernel function to project dataset into a higher dimensional feature space, where it is linearly separable
Refer [API](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.KernelPCA.html) for the parameters
```
pca = KernelPCA(n_components=5)
pcaX = pd.DataFrame(data = pca.fit_transform(X_Scaled))
```
#### Output Dataframe
```
finalDf = pd.concat([pcaX, Y], axis = 1)
finalDf.head()
```
#### Creator: Ageer Harikrishna , Github: [Profile](https://github.com/ageerHarikrishna)
| github_jupyter |
# 4. Dynamic Programming
- **Created by Andrés Segura Tinoco**
- **Created on Jan 26, 2020**
- **Updated on May 18, 2021**
**Dynamic programming** is an efficient technique for solving many combinatorial optimization problems in a polynomial time.
Dynamic programming is both a mathematical optimization method and a computer programming method. In both contexts it refers to simplifying a complicated problem by breaking it down into simpler sub-problems in a recursive manner <a href="#link_one">[1]</a>. There are two key attributes that a problem must have in order for dynamic programming to be applicable: optimal substructure and overlapping sub-problems.
### Principle of Optimality
An optimal policy has the property that whatever the initial state and initial decision are, the remaining decisions must constitute an optimal policy with regard to the state resulting from the first decision <a href="#link_two">[2]</a>. (See Bellman, 1957, Chap. III.3.)
## 4.1. Binomial Coefficient
In mathematics, the **binomial coefficients** are the positive integers that occur as coefficients in the binomial theorem <a href="#link_three">[3]</a>. Commonly, a binomial coefficient is indexed by a pair of integers $ n ≥ k ≥ 0 $ and is written $ \tbinom {n}{k} $. It is the coefficient of the $ x^k $ term in the polynomial expansion of the binomial power $ (1 + x)^n $, and it is given by the formula:
$$ \tbinom {n}{k} = \frac {n!}{k!(n-k)!} \tag{1}$$
```
# Load the Python libraries
import timeit
import math
import pandas as pd
# Example values
n = 25
k = 15
```
### 4.1.1. Formula approach
```
# Binomial coefficient from the mathematical formula
def bin_coef_1(n, k):
return int(math.factorial(n) / (math.factorial(k) * math.factorial(n - k)))
start_time = timeit.default_timer()
print(bin_coef_1(n, k))
print('>> elapsed time', (timeit.default_timer() - start_time) * 1000, 'ms')
```
### 4.1.2. Simple approach
```
# The recursive natural solution
def bin_coef_2(n, k):
if k == 0 or k == n:
return 1
return bin_coef_2(n - 1, k - 1) + bin_coef_2(n - 1, k)
start_time = timeit.default_timer()
print(bin_coef_2(n, k))
print('>> elapsed time', (timeit.default_timer() - start_time) * 1000, 'ms')
```
### 4.1.3. Dynamic Programming
```
# Solution with dynamic programming (supported by a table)
def bin_coef_3(n, k):
c = 0
v = [1] * (k + 1)
for i in range(n + 1):
for j in range(k, 0, -1):
if j < i:
v[j] = v[j - 1] + v[j]
return v[k]
start_time = timeit.default_timer()
print(bin_coef_3(n, k))
print('>> elapsed time', (timeit.default_timer() - start_time) * 1000, 'ms')
```
With time complexity of $ \Theta(nk) $ and a space complexity of $ \Theta(k) $.
## 4.2. World Championship problem
```
# Example values
n = 10
p = 0.55
q = 1 - p
```
### 4.2.1. Simple approach
```
# The recursive natural solution
def WCP(i, j):
if i == 0:
return 1
elif j == 0:
return 0
return p * WCP(i - 1, j) + q * WCP(i, j - 1)
start_time = timeit.default_timer()
print(WCP(n, n))
print('>> elapsed time', (timeit.default_timer() - start_time) * 1000, 'ms')
```
### 4.2.2. Dynamic Programming
```
# Solution with dynamic programming (supported by a table)
def WCP2(n, p):
n = n + 1
q = 1 - p
prob = [[0] * n for i in range(n)]
for s in range(n):
prob[0][s] = 1
for k in range(1, s):
prob[k][s - k] = p * prob[k - 1][s - k] + q * prob[k][s - k - 1]
for s in range(1, n):
for k in range(0, n - s):
prob[s + k][n - k - 1] = p * prob[s + k - 1][n - k - 1] + q * prob[s + k][n - k - 2]
return prob[n - 1][n - 1]
start_time = timeit.default_timer()
print(WCP2(n, p))
print('>> elapsed time', (timeit.default_timer() - start_time) * 1000, 'ms')
```
With time complexity of $ \Theta(n^2) $ and a space complexity of $ \Theta(n^2) $.
## 4.3. Coin Change problem
The **coin-change problem** or change-making problem addresses the question of finding the minimum number of coins (of certain denominations) that add up to a given amount of money. It is a special case of the integer knapsack problem, and has applications wider than just currency <a href="#link_four">[4]</a>.
#### Returns all possible combinations of coins change with Dynamic Programming
Version with unlimited supply of coins.
```
def calc_coin_change(N, d):
n = len(d)
matrix = [[0] * (N + 1) for i in range(n)]
for i in range(0, n):
for j in range(1, N + 1):
if i == 0 and j < d[i]:
matrix[i][j] = math.inf
elif i == 0:
matrix[i][j] = 1 + matrix[0][j - d[0]]
elif j < d[i]:
matrix[i][j] = matrix[i - 1][j]
else:
matrix[i][j] = min(matrix[i - 1][j], 1 + matrix[i][j - d[i]])
return matrix
# Example values
N = 8
d = [1, 4, 6]
# Showing results
dp_table = calc_coin_change(N, d)
pd.DataFrame(dp_table, index=d)
```
With time complexity of $ \Theta(nN) $ and a space complexity of $ \Theta(n(N + 1)) $.
#### Calculate the list of coins needed to give change
Greedy approach
```
def get_coins_list(c, d, N, verbose=False):
coins_list = []
i = len(d) - 1
j = N
while i > -1 and j > -1:
if verbose:
print(i, j)
if i - 1 >= 0 and c[i][j] == c[i - 1][j]:
i = i - 1
elif j - d[i] >= 0 and c[i][j] == 1 + c[i][j - d[i]]:
coins_list.append(d[i])
j = j - d[i]
else:
break
return coins_list
# List of coins for each scenario
for j in range(0, N + 1):
print(j, '->', get_coins_list(dp_table, d, j))
```
With time complexity of $ \Theta(n + c[n, N]) $ and a space complexity of $ \Theta(n(N + 1)) $.
## 4.4. The Knapsack problem
The **knapsack problem** or rucksack problem is a problem in combinatorial optimization: Given a set of items, each with a weight and a value, determine the number of each item to include in a collection so that the total weight is less than or equal to a given limit **W** and the total value is as large as possible. It derives its name from the problem faced by someone who is constrained by a fixed-size knapsack and must fill it with the most valuable items <a href="#link_five">[5]</a>.
#### Get best items combination with Dynamic Programming
```
def calc_best_knapsack(w, v, W):
n = len(v)
matrix = [[0] * (W + 1) for i in range(n)]
for i in range(0, n):
for j in range(1, W + 1):
if i == 0 and j < w[i]:
matrix[i][j] = -math.inf
elif i == 0:
matrix[i][j] = v[i]
elif j < w[i]:
matrix[i][j] = matrix[i - 1][j]
else:
matrix[i][j] = max(matrix[i - 1][j], matrix[i - 1][j - w[i]] + v[i])
return matrix
# Example values
w = [1, 2, 5, 6, 7]
v = [1, 6, 18, 22, 28]
max_weight = 11
# Run algorithm
dp_table = calc_best_knapsack(w, v, max_weight)
df_index = ["w:" + str(w[i]) + ", v:" + str(v[i]) for i in range(len(v))]
pd.DataFrame(dp_table, index=df_index)
```
With time complexity of $ \Theta(nW) $ and a space complexity of $ \Theta(n(W + 1)) $.
#### Calculate the list of items needed to fill the backpack
Greedy approach
```
def get_items_list(values, v, w, W, verbose=False):
item_list = []
i = len(w) - 1
j = W
while i > -1 and j > -1:
if verbose:
print(i, j)
if i - 1 >= 0 and values[i][j] == values[i - 1][j]:
i = i - 1
elif i - 1 >= 0 and j - w[i] >= 0 and values[i][j] == values[i - 1][j - w[i]] + v[i]:
item = { "w": w[i], "v": v[i] }
item_list.append(item)
j = j - w[i]
i = i - 1
elif i == 0 and values[i][j] == v[i]:
item = { "w": w[i], "v": v[i] }
item_list.append(item)
break
else:
break
return item_list
# List of coins for each scenario
for j in range(0, max_weight + 1):
print(j, '->', get_items_list(dp_table, v, w, j))
```
With time complexity of $ \Theta(n + W) $ and a space complexity of $ \Theta(n(W + 1)) $.
## 4.5. Longest Common Subsequence (LCS) problem
The **longest common subsequence** (LCS) problem is the problem of finding the longest subsequence common to all sequences in a set of sequences (often just two sequences). It differs from the longest common substring problem: unlike substrings, subsequences are not required to occupy consecutive positions within the original sequences <a href="#link_six">[6]</a>.
The longest common subsequence problem is a classic computer science problem, the basis of data comparison programs such as the diff utility, and has applications in computational linguistics and bioinformatics. It is also widely used by revision control systems such as Git for reconciling multiple changes made to a revision-controlled collection of files.
#### Get the Longest Common Subsequence with Dynamic Programming
```
def calc_lcs(a, b):
n = len(a)
m = len(b)
matrix = [[0] * (m + 1) for i in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, m + 1):
if a[i - 1] == b[j - 1]:
matrix[i][j] = 1 + matrix[i - 1][j - 1]
else:
matrix[i][j] = max(matrix[i - 1][j], matrix[i][j - 1])
return matrix
# Example values
a = ['X', 'M', 'J', 'Y', 'A', 'U', 'Z']
b = ['M', 'Z', 'J', 'A', 'W', 'X', 'U']
# Run algorithm
dp_table = calc_lcs(a, b)
pd.DataFrame(dp_table, index=['-'] + a, columns=['-'] + b)
```
With time complexity of $ \Theta(nm) $ and a space complexity of $ \Theta((n + 1)(m + 1)) $.
#### Calculate the Longest Common Subsequence
Greedy approach
```
def get_lcs(matrix, a, b, verbose=False):
lc_seq = []
i = len(a)
j = len(b)
while i > -1 and j > -1:
if verbose:
print(i, j)
if i > 0 and j > 0 and a[i - 1] == b[j - 1]:
lc_seq.append(a[i - 1])
i = i - 1
j = j - 1
elif j > 0 and (i == 0 or matrix[i][j - 1] >= matrix[i - 1][j]):
j = j - 1
elif i > 0 and (j == 0 or matrix[i][j - 1] < matrix[i - 1][j]):
i = i - 1
else:
break
return list(reversed(lc_seq))
# This function gets the longest common subsequence
get_lcs(dp_table, a, b)
```
With time complexity of $ \Theta(n + m) $ and a space complexity of $ \Theta((n + 1)(m + 1)) $.
## 4.6. Sequence Alignment problem
In bioinformatics, a sequence alignment is a way of arranging the sequences of DNA, RNA, or protein to identify regions of similarity that may be a consequence of functional, structural, or evolutionary relationships between the sequences <a href="#link_seven">[7]</a>. Aligned sequences of nucleotide or amino acid residues are typically represented as rows within a matrix. Gaps are inserted between the residues so that identical or similar characters are aligned in successive columns.
Sequence alignments are also used for non-biological sequences, such as calculating the distance cost between strings in a natural language or in financial data.
### 4.6.1. The Needleman-Wunsch algorithm will be used
It was one of the first applications of dynamic programming to compare biological sequences. The algorithm essentially divides a large problem (e.g. the full sequence) into a series of smaller problems, and it uses the solutions to the smaller problems to find an optimal solution to the larger problem <a href="#link_eight">[8]</a>.
```
# Function to measure the performance of an alignment
def s(x, y):
if x == '-' or y == '-':
# Payment for Gap
return -1
elif x == y:
# Payment for Match
return 1
# Payment for Mismatch
return -1
# Needleman–Wunsch algorithm to calculate the sequence alignment
def calc_seq_align(a, b):
m = len(a)
n = len(b)
matrix = [[0] * (m + 1) for i in range(n + 1)]
for i in range(n + 1):
matrix[i][0] = i * s('-', b[i - 1])
for j in range(m + 1):
matrix[0][j] = j * s(a[j - 1], '-')
for i in range(1, n + 1):
for j in range(1, m + 1):
matrix[i][j] = max(matrix[i - 1][j - 1] + s(a[j - 1], b[i - 1]),
matrix[i - 1][j] + s('-', b[i - 1]),
matrix[i][j - 1] + s(a[j - 1], '-'))
return matrix
# Example values
a = list('GCATGCUA')
b = list('GATTACA')
# Run algorithm
dp_table = calc_seq_align(a, b)
pd.DataFrame(dp_table, index=['-'] + b, columns=['-'] + a)
```
With time complexity of $ \Theta(nm) $ and a space complexity of $ \Theta((n + 1)(m + 1)) $.
#### Calculate the Sequence Alignment result
Greedy approach
```
def get_seq_align(matrix, a, b, verbose=False):
alignmentA = ""
alignmentB = ""
j = len(a)
i = len(b)
while i > -1 and j > -1:
if verbose:
print(i, j)
if i > 0 and j > 0 and matrix[i][j] == matrix[i - 1][j - 1] + s(a[j - 1], b[i - 1]):
alignmentA = a[j - 1] + alignmentA
alignmentB = b[i - 1] + alignmentB
i = i - 1
j = j - 1
elif i > 0 and matrix[i][j] == matrix[i - 1][j] + s('-', b[i - 1]):
alignmentA = "-" + alignmentA
alignmentB = b[i - 1] + alignmentB
i = i - 1
elif j > 0 and matrix[i][j] == matrix[i][j - 1] + s(a[j - 1], '-'):
alignmentA = a[j - 1] + alignmentA
alignmentB = "-" + alignmentB
j = j - 1
else:
break
return (alignmentA, alignmentB)
# This function gets the Sequence Alignment
get_seq_align(dp_table, a, b)
```
With time complexity of $ \Theta(n + m) $ and a space complexity of $ \Theta((n + 1)(m + 1)) $.
## 4.7. All-Pairs Shortest Path
The all-pairs shortest path problem is the determination of the shortest graph distances between every pair of vertices in a given graph. The problem can be solved using n applications of Dijkstra's algorithm or all at once using the Floyd-Warshall algorithm <a href="#link_nine">[9]</a>.
- The Dijkstra's algorithm has a time complexity of $ \Theta(n^2) $ and a space complexity of $ \Theta(n) $.
- The Floyd-Warshall algorithm has a time complexity of $ \Theta(n^3) $ and a space complexity of $ \Theta(n^2) $.
Please click <a href="https://ansegura7.github.io/Algorithms/graphs/Graphs.html#3.5.-Shortest-Path" target="_blank">here</a> to see an example of both algorithms in the Graphs section. The second one is solved with dynamic programming.
## Reference
<a name='link_one' href='https://en.wikipedia.org/wiki/Dynamic_programming' target='_blank' >[1]</a> Wikipedia - Dynamic Programming.
<a name='link_two' href='https://en.wikipedia.org/wiki/Bellman_equation' target='_blank' >[2]</a> Wikipedia - Principle of Optimality.
<a name='link_three' href='https://en.wikipedia.org/wiki/Binomial_coefficient' target='_blank' >[3]</a> Wikipedia - Binomial coefficient.
<a name='link_four' href='https://en.wikipedia.org/wiki/Change-making_problem' target='_blank' >[4]</a> Wikipedia - Change-making problem.
<a name='link_five' href='https://en.wikipedia.org/wiki/Knapsack_problem' target='_blank' >[5]</a> Wikipedia - Knapsack problem.
<a name='link_six' href='https://en.wikipedia.org/wiki/Longest_common_subsequence_problem' target='_blank' >[6]</a> Wikipedia - Longest common subsequence problem.
<a name='link_seven' href='https://en.wikipedia.org/wiki/Sequence_alignment' target='_blank' >[7]</a> Wikipedia - Sequence alignment problem.
<a name='link_eight' href='https://en.wikipedia.org/wiki/Needleman-Wunsch_algorithm' target='_blank' >[8]</a> Wikipedia - Needleman-Wunsch algorithm.
<a name="link_nine" href="https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm" target="_blank">[9]</a> Wikipedia - All-Pairs Shortest Path.
---
<a href="https://ansegura7.github.io/Algorithms/">« Home</a>
| github_jupyter |
```
import torch
import pandas as pd
import numpy as np
import sklearn
from collections import Counter
from sklearn.utils import Bunch
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from itertools import combinations
import re
import os
import torch.nn as nn
import matplotlib.pyplot as plt
```
# Data Loading
```
path = r'E:\github\movie_hatespeech_detection\data\movies_for_training\all_movies.csv'
df = pd.read_csv(path, index_col=0)
df = df.rename(columns={'majority_answer': 'label'})
df.head()
df.shape[0]
df[df.label==2].shape[0] / df.shape[0]
print(df.label.value_counts())
df.label.value_counts().plot(kind='pie', subplots=True, autopct='%1.0f%%', title='Hate Speech Distribution')
movie_names = df.movie_name.unique()
```
## Data Splitting
```
def split_dataset(df, test_movie, seed):
test = df[df.movie_name == test_movie]
train = df[df.movie_name != test_movie]
train = train.sample(frac=1, random_state=seed)
return train.text.values, train.label.values, test.text.values, test.label.values
categories = [0,1,2]
seed = 11
movie_index = 5
test_movie = movie_names[movie_index]
train, train_targets, test, test_targets = split_dataset(df, test_movie, seed)
train_size = len(train)
test_size = len(test)
print(train_size)
print(test_size)
def calculate_dataset_class_distribution(targets, categories):
df = pd.DataFrame({'category':targets})
s = df.category.value_counts(normalize=True)
s = s.reindex(categories)
return [s.index[0], s[0]], [s.index[1], s[1]], [s.index[2], s[2]]
train_class_distribution = calculate_dataset_class_distribution(train_targets, categories)
test_class_distribution = calculate_dataset_class_distribution(test_targets, categories)
print(train_class_distribution)
print(test_class_distribution)
train_ds = Bunch(data=train, target=train_targets)
test_ds = Bunch(data=test, target=test_targets)
```
## Buidling the Model
```
# Getting all the vocabularies and indexing to a unique position
vocab = Counter()
#Indexing words from the training data
for text in train_ds.data:
for word in text.split(' '):
vocab[word.lower()]+=1
#Indexing words from the training data
for text in test_ds.data:
for word in text.split(' '):
vocab[word.lower()]+=1
total_words = len(vocab)
def get_word_2_index(vocab):
word2index = {}
for i,word in enumerate(vocab):
word2index[word.lower()] = i
return word2index
word2index = get_word_2_index(vocab)
print(len(word2index))
print(word2index["the"]) # Showing the index of 'the'
print (total_words)
# define the network
class News_20_Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(News_20_Net, self).__init__()
self.layer_1 = nn.Linear(input_size,hidden_size, bias=True).cuda()
self.relu = nn.ReLU().cuda()
self.layer_2 = nn.Linear(hidden_size, hidden_size, bias=True).cuda()
self.output_layer = nn.Linear(hidden_size, num_classes, bias=True).cuda()
# accept input and return an output
def forward(self, x):
out = self.layer_1(x)
out = self.relu(out)
out = self.layer_2(out)
out = self.relu(out)
out = self.output_layer(out)
return out
def get_batch(df,i,batch_size):
batches = []
results = []
# Split into different batchs, get the next batch
texts = df.data[i*batch_size:i*batch_size+batch_size]
# get the targets
categories = df.target[i*batch_size:i*batch_size+batch_size]
#print(categories)
for text in texts:
# Dimension, 196609
layer = np.zeros(total_words,dtype=float)
for word in text.split(' '):
layer[word2index[word.lower()]] += 1
batches.append(layer)
# We have 5 categories
for category in categories:
#print(category)
index_y = -1
if category == 0:
index_y = 0
elif category == 1:
index_y = 1
elif category == 2:
index_y = 2
results.append(index_y)
# the training and the targets
return np.array(batches),np.array(results)
# Parameters
learning_rate = 0.001
num_epochs = 8
batch_size = 32
display_step = 10 # ADDED will multiplied by 10
# Network Parameters
hidden_size = 100 # 1st layer and 2nd layer number of features
input_size = total_words # Words in vocab
num_classes = len(categories) # Categories: "graphics","space","baseball","guns", "christian"
```
## Training
```
results = []
news_net = News_20_Net(input_size, hidden_size, num_classes)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss() # This includes the Softmax loss function
optimizer = torch.optim.Adam(news_net.parameters(), lr=learning_rate)
# Train the Model
for epoch in range(num_epochs):
# determine the number of min-batches based on the batch size and size of training data
total_batch = int(len(train_ds.data)/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x,batch_y = get_batch(train_ds,i,batch_size)
articles = torch.cuda.FloatTensor(batch_x, device='cuda')
labels = torch.cuda.LongTensor(batch_y, device='cuda')
# Forward + Backward + Optimize
optimizer.zero_grad() # zero the gradient buffer
outputs = news_net(articles)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % display_step == 0:
result = 'Epoch [%d/%d], Step [%d/%d], Loss: %.4f'%(epoch+1, num_epochs, i+1, len(train_ds.data)/batch_size, loss.data)
results.append({'Epoch': epoch+1, 'Step': i+1, 'Loss': loss.data.item()})
if (i+1) % (display_step*10) == 0:
print({'Epoch': epoch+1, 'Step': i+1, 'Loss': loss.data.item()})
```
## Validation
```
# Test the Model
correct = 0
total = 0
total_test_data = len(test_ds.target)
iterates = total_test_data/batch_size # ignore last (<batch_size) batch
all_total = []
all_correct = []
labels_all = []
predicted_all = []
for i in range(int(iterates)):
batch_x_test,batch_y_test = get_batch(test_ds,i,batch_size)
articles = torch.FloatTensor(batch_x_test).to('cuda')
labels = torch.LongTensor(batch_y_test).to('cuda')
outputs = news_net(articles)
_, predicted = torch.max(outputs.data, 1)
labels_all.extend([x.item() for x in labels])
predicted_all.extend([x.item() for x in predicted])
categories = ['normal', 'offensive', 'hate']
report = classification_report(labels_all, predicted_all, target_names=categories, output_dict=True)
df_report = pd.DataFrame(report).transpose()
df_report.to_csv(movie_names[movie_index] + '.csv')
df_results = pd.DataFrame(results)
df_results['learning_rate'] = learning_rate
df_results['num_epochs'] = num_epochs
df_results['batch_size'] = batch_size
df_results['num_classes'] = num_classes
df_results['test_size'] = test_size
df_results['train_size'] = train_size
df_results['seed'] = seed
df_results['test_size'] = test_size
df_results['train_class_proportion_' + categories[0]] = round(train_class_distribution[0][1], 2)
df_results['train_class_proportion_' + categories[1]] = round(train_class_distribution[1][1], 2)
df_results['train_class_proportion_' + categories[2]] = round(train_class_distribution[2][1], 2)
df_results['test_class_proportion_' + categories[0]] = round(test_class_distribution[0][1], 2)
df_results['test_class_proportion_' + categories[1]] = round(test_class_distribution[1][1], 2)
df_results['test_class_proportion_' + categories[2]] = round(test_class_distribution[2][1], 2)
df_results.head()
def plot_loss(df, ax, title):
df.groupby('Epoch').Loss.plot(kind='line', legend=True, title=title, ax=ax, figsize=(15,8))
ax.legend(loc='upper right')
plt.xlabel('Step')
plt.ylabel('Loss')
return ax
fig, axs = plt.subplots(1,1)
plot_loss(df_results, axs, title='Loss Word Embeddings, Twitter Dataset')
```
----
# Avarage Results
```
def load_df(path):
name = path.split('.')[0]
df = pd.read_csv(path)
df['movie_name'] = name
df = df.rename(columns={'Unnamed: 0': 'label'})
return df
pathes = ['TheWolfofWallStreet.csv', 'South_Park.csv', 'Pulp_Fiction.csv', 'Django_Unchained.csv', 'AmerricanHistoryX.csv', 'BlacKkKlansman.csv']
dataframes = []
for path in pathes:
df = load_df(path)
dataframes.append(df)
result_df = pd.concat(dataframes)
result_df.head()
```
## macro avg
```
result_df[result_df.label=='macro avg'].groupby('movie_name')['f1-score'].mean().values.mean().round(2)
```
### Accuracy
```
result_df[result_df.label=='accuracy'].precision.mean()
def get_precision_recall_f1(category):
precision = result_df[result_df.label==category].precision.mean()
recall = result_df[result_df.label==category].recall.mean()
f1 = result_df[result_df.label==category]['f1-score'].mean()
macro_avg = result_df[result_df.label==category]['f1-score'].mean()
return {'label': category, 'precision': precision, 'recall': recall, 'f1': f1}
normal_dict = get_precision_recall_f1('normal')
offensive_dict = get_precision_recall_f1('offensive')
hate_dict = get_precision_recall_f1('hate')
df_result = pd.DataFrame([normal_dict, offensive_dict, hate_dict])
df_result
```
| github_jupyter |
# ANEOS MODEL INTERFACE JUPYTER NOTEBOOK
Sarah T. Stewart<br>
sts@ucdavis.edu<br>
Created October 22, 2019<br>
Updated January 21, 2020<p>
***
***
## COMMENTS ON ANEOS IRON MODEL WITH MELT CURVE
Theses parameters were obtained by R. Canup from E. Pierazzo and H. J. Melosh circa 2001-2002.
This version was used in Canup 2004, 2008, 2012; Canup et al. 2013; Nakajima & Stevenson 2014, 2015; Barr 2016; Golabek et al. 2018; Emsenhuber et al. 2018; Deng et al. 2019. The parameters were tabulated in Canup 2012 and Barr 2016. Note that the value for Esep in Table 2 of Emsenhuber et al. 2018 is incorrect.
This is a solid, liquid, gas phase ANEOS model with ionization for pure iron.
Issues:<br>
- The temperatures on the melt curve are too high.<br>
- The model includes one solid phase. Iron has several solid phases.<br>
- The vapor curve does not match available data. The critical point temperature is too high.<br>
- In the original ANEOS, the temperatures are too high and the entropies are too low in the liquid region. The discrepancy arises because the ANEOS model has a limiting 3nR heat capacity; the true heat capacities in liquid iron are larger.<br>
***
***
<b>STS ANEOS INTERFACE NOTEBOOK GUIDE</b>
<b>USAGE</b><br>
This notebook interacts with multiple outputs from the Stewart-modified version of ANEOS, including phase boundary information and tabulated EOS.
This notebook is designed to be used in two modes:<br>
<ul>
<li> Development mode: ANEOS is called in the local directory to generate new model output. The user can varying the model input parameters and tabular gridding schemes. This notebook produces plots of the equation of state surface and comparisons to experimental data and ab initio calculations.
<li> Release mode: the notebook interacts with previously created ANEOS model output. This notebook provides detailed documentation of a version-controlled release of the model EOS and enables users to interact with the EOS surface and compare the model to data.
</ul>
This notebook is distributed in release mode. Users who wish to use the development mode may request the modified version of ANEOS from the author.<p>
Development mode:<br>
The notebook calls a local aneos executable which expects a formatted ANEOS.INPUT ascii file and an ascii gridtable.txt definition file that is generated by this notebook. ANEOS produces an ANEOS.OUTPUT ascii file and gridded ascii SESAME-format EOS tables.<br>
The user edits (1) the input parameters in ANEOS.INPUT (separately from this notebook) and (2) the output EOS table parameters in the notebook cell below.<br>
This notebook reads some parameters from ANEOS.INPUT and assumes that the variables are spaced by 10 columns.<br>
When the notebook is run, the code issues a system call to run the aneos program in the local directory and reads in results from the ANEOS.OUTPUT, NEW-SESAME-STD.TXT, and NEW-SESAME-EXT.TXT files to make the plots below and to generate the GADGET format EOS table and other miscellaneous files.<p>
Release mode:<br>
This notebook reads in the provided ANEOS.INPUT, EOS tables, and other distribution data to generate the plots below. <p>
<b>OUTPUT/PROVIDED DATA FILES</b><br>
ANEOS.OUTPUT: Plain text file with an overview of the calculated equation of state. Look at this file for any errors in the EOS calculation.<br>
NEW-SESAME-STD.TXT: Standard length Sandia-style SESAME file with 201 table and 301 table (density, temperature, pressure, sp. internal energy, Helmholtz free energy). 301 table units: g/cm$^3$, K, GPa, MJ/kg, MJ/kg. <br>
NEW-SESAME-EXT.TXT: SESAME-style table with extra variables from ANEOS. Contains the standard 201 table and non-standard 301-extra-variables EOS table. The 301 table has: density grid values, temperature grid values, sp. entropy(T,rho), sound speed(T,rho), sp. heat capacity(T,rho), KPA flag(T,rho). 2-D arrays list all densities, looping over each temperature. 301 table units: g/cm$^3$, K, MJ/K/kg, cm/s, MJ/K/kg, integer flag, integer flag. The KPA flag is an ANEOS output with phase information. <br>
NEW-SESAME-HUG.TXT: Ascii table with the reference state Hugoniot. <br>
NEW-SESAME-STD-NOTENSION.TXT: Standard length Sandia-style SESAME file with 201 table and 301 table (density, temperature, pressure, sp. internal energy, Helmholtz free energy) where the ANEOS tension region is removed and replaced with the solid-vapor coexistence region. 301 table units: g/cm$^3$, K, GPa, MJ/kg, MJ/kg. <br>
NEW-GADGET2-STD-NOTENSION.TXT: Standard GADGET2 EOS table generated by interpolating the ANEOS table with no tension. Format: number of density points, number of sp. entropy points, density grid values, sp. entropy grid values, 2-D arrays of pressure, temperature, specific internal energy, sound speed. 2-D array values list all densities, looping over each value for specific entropy. Units: g/cm$^3$, erg/K/g, dynes/cm$^2$, K, erg/g, cm/s. <br>
NEW-GADGET2-EXT-NOTENSION.TXT: Extended variables GADGET2 EOS table generated by interpolating the ANEOS table with no tension. Format: number of density points, number of sp. entropy points, density grid values, sp. entropy grid values, 2-D arrays of Helmholtz free energy, KPA flag, MDQ flag. 2-D array values list all densities, looping over each value for specific entropy. Units: g/cm$^3$, erg/K/g, erg/g, integer flag, integer flag. <br>
The SESAME tabular equation of state formats are described in Lyon and Johnson (1992).<p>
<b>ANEOS NOTES</b><br>
The ANEOS source code was provided by Jay Melosh and Gareth Collins. This version included Melosh's (2007) treatment for molecular gas and the capability to include a melt curve and solid-solid/liquid-liquid transition (Collins & Melosh LPSC 2014).<br>
In Stewart et al. 2019, the Melosh and Collins version of ANEOS was modified to adjust the Debye model for the thermal term in the Helmholtz free energy to approach a user-defined heat capacity at high temperatures. The multiplicative factor $f_{cv}$ is entered in input value V44, and the high-temperature heat capacity is $3f_{cv}Nk$.<p>
The ANEOSTEST.f routine was modified to output tabulated EOS. Note that the current version of this function sets positive pressures smaller than 1.E-30 GPa equal to 1.E-30 GPa.<br>
ANEOS2.f was modified to increase the number of points tabulated on the melt curve in the ANEOS.OUTPUT file and to gather the variables for the heat capacity modification.<br>
ANHUG.f was modified to output more Hugoniot points.<br>
ANEOS1.f and ANEOS2.f were modified to increase the high temperature limit for the heat capacity (Stewart et al., SCCM19).<p>
<b>CORRECTIONS AND IMPROVEMENTS</b><br>
Please send corrections to STS and any requests for data to include in the model-data comparison plots.<p>
```
# this cell has a java script that that enables hiding the raw python code
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
# Record the verions information for these calculation for posterity
import platform
print('python version: ',platform.python_version())
del platform
import matplotlib
print('matplotlib version: ', matplotlib.__version__)
del matplotlib
import numpy
print('numpy version: ', numpy.__version__)
del numpy
import scipy
print('scipy version: ', scipy.__version__)
del scipy
import pandas
print('pandas version: ', pandas.__version__)
del pandas
# this prevents some error messages for inline plotting in a jupyter notebook
%matplotlib inline
# import python libraries
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from copy import deepcopy
import numpy as np
from scipy import interpolate
import colormaps as local_cmaps
import subprocess
import pandas as pd
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
#
plt.rcParams["figure.figsize"] = (20,10)
plt.rcParams["patch.force_edgecolor"] = True
```
## USER INPUTS FOR SESAME AND GADGET2 TABLE CONSTRUCTION
If the code cell below is hidden, use the button above to reveal the cell.<p>
In development mode, the user must input:<br>
1. Header information for the SESAME table.<br>
2. Temperature, density and entropy grid points.<p>
The following code cell also includes the development mode flag and
option to skip construction of a GADGET2 table if it is not needed.<p>
```
# -------------------------------------------------------------
# USER INPUTS
#-------------------------------------------------------------
# NOTEBOOK MODE
#-------------------------------------------------------------
#
DEVMODE = 1 # 1=development mode; 0=release mode
#
#-------------------------------------------------------------
# OPTIONS TO REMOVE TENSION REGION AND CONSTRUCT A GADGET2 TABLE
#-------------------------------------------------------------
#
REMOVETENSIONFLAG=0 # 0- keep tension region; 1- remove tension region
MAKEGADGETTABLE=0 # 1=make table; 0=do not make gadget2 table
#
#-------------------------------------------------------------
# USER MUST SET THESE VALUES TO DEFINE THE TABULATED EOS GRIDS
#-------------------------------------------------------------
#
# ====>>>>>> YOU NEED TO MAKE SURE THESE VALUES MATCH ANEOS.INPUT <<<<=====
MODELNAME = 'Iron-ANEOS-SLVTv0.1Gx'
# Header information must all be compatible with float format
MATID = 1.0 # MATID number
DATE = 191022. # Date as a single 6-digit number YYMMDD
VERSION = 0.1 # ANEOS Parameters Version number
FMN = 26. # Formula weight in atomic numbers for Fe
FMW = 55.847 # Formula molecular weight (g/cm3) for Fe
# The following define the default initial state for material in the 201 table
R0REF = 8.06 # g/cm3 *** R0REF is inserted into the density array; using gamma-iron for rho0
K0REF = 1.33E12 # dynes/cm2; using gamma-iron for rho0
T0REF = 298. # K -- *** T0REF is inserted into the temperature array
P0REF = 1.E6 # dynes/cm2 -- this defines the principal Hugoniot calculated below
#
# generate output files for other ANEOS models
modelflag = 1 # 0 - don't change anything above; 1 - Canup et al. 2013;
# 2 - GADGET2 Marcus PhD Thesis & Cuk & Stewart 2012; 3 - ISALE Dellen Collins and Melosh 2014
#
if modelflag == 1:
MODELNAME = 'IRON WITH MELT CURVE'
# Header information must all be compatible with float format
MATID = 1.0 # MATID number
DATE = 191022. # Date as a single 6-digit number YYMMDD
VERSION = 1.0 # ANEOS Parameters Version number
FMN = 26. # Formula weight in atomic numbers for Mg2SiO4
FMW = 55.847 # Formula molecular weight (g/cm3) for Mg2SiO4
# The following define the default initial state for material in the 201 table
R0REF = 7.85 # g/cm3 *** R0REF is inserted into the density array
K0REF = 1.45E12 # dynes/cm2 K0=rho0*cs0^2
T0REF = 298. # K -- *** T0REF is inserted into the temperature array
P0REF = 1.E6 # dynes/cm2 -- this defines the principal Hugoniot calculated below
#-------------------------------------------------------------
# EXAMPLE EOS GRID ARRAYS
#-------------------------------------------------------------
#
# Life is easier if the grid arrays are even in length (after adding T0REF and R0REF below)
#
# Flag for easily switching between different table grids during model development
# 1: high res for testing
gridflag = 1
#
#------------------------------------------------------
#
if gridflag == 0: # low resolution grid
# define the temperature array for the SESAME table
# 0 K is changed to 1 K
# T0REF is inserted into the table if it is not present in the original array
# need to have extra temperature resolution around the triple point
gridtvals = [0, 1750, 1950, 20000.,1.E5, 1.E7] # K -- this array defines the edges of the segments along the grid, 0 is changed to 1 below
gridtstyle = [1,1,1,1,0] # 1 for linear, 0 for log in each segment
gridtnpts = [20, 20, 150.,50.,10.] # number of grid points in each segment
# define the density array for the SESAME AND GADGET2 tables
# R0REF is inserted into the table if it is not present in the original array
# need to have extra density resolution around the triple point
# 0 g/cm3 is removed if present
gridrvals = [1.e-20, 0.5, 6.6, 7.4, 20., 30.] # g/cm3 -- this array defines the edges of the segments along the grid
gridrstyle = [0, 1, 1, 1, 1] # 1 for linear, 0 for log in each segment
gridrnpts = [50, 20, 20, 40, 20] # number of grid points in each segment
# define the specific entropy array for the GADGET2 table
# 0 MJ/K/kg is removed if present
gridsvals = [1.e-4,.1] # MJ/K/kg -- this array defines the edges of the segments along the grid
gridsstyle = [0] # 1 for linear, 0 for log in each segment
gridsnpts = [200] # number of grid points in each segment
#
if gridflag == 1: # new form for the high resolution table entries in 301 table. Try for 1e6 values
# define the temperature array for the SESAME table
# 0 K is changed to 1 K
# T0REF is inserted into the table if it is not present in the original array
# need to have extra temperature resolution around the triple point
gridtvals = [0, 100, 1750, 1950, 20000.,1.E5, 1.E7] # K -- this array defines the edges of the segments along the grid, 0 is changed to 1 below
gridtstyle = [1,1,1,1,1,0] # 1 for linear, 0 for log in each segment
gridtnpts = [2, 75., 100, 500.,85.,50.] # number of grid points in each segment
# define the density array for the SESAME AND GADGET2 tables
# R0REF is inserted into the table if it is not present in the original array
# need to have extra density resolution around the triple point
# 0 g/cm3 is removed if present
gridrvals = [1.e-20, 0.5, 6.6, 7.4, 20., 30.] # g/cm3 -- this array defines the edges of the segments along the grid
gridrstyle = [0, 1, 1, 1, 1] # 1 for linear, 0 for log in each segment
gridrnpts = [150, 84, 101, 400, 100] # number of grid points in each segment
# define the specific entropy array for the GADGET2 table
# 0 MJ/K/kg is removed if present
gridsvals = [1.e-4,.1] # MJ/K/kg -- this array defines the edges of the segments along the grid
gridsstyle = [0] # 1 for linear, 0 for log in each segment
gridsnpts = [800] # number of grid points in each segment
#----------------------------------------
# END USER INPUT
#----------------------------------------
# THIS CELL DEFINEs FUNCTIONS
# define index function (must be a more efficient way to do this...)
get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
#
# LOAD EXTERNAL FUNCTIONS AND OBJECT CLASSES
# defines rational function interpolation functions rf1d and rf2d
#%run rfinterpolation.py
# defines class objects for extEOStable and functions to read in ANEOS input and output
# READ THIS CODE IF YOU WANT TO UNDERSTAND THE FILE STRUCTURES
%run eostable.py
#import eostable
# Run the following help for the functions in the class
#help(extEOStable())
# THIS CELL HAS CODE TO GENERATE THE GRIDS AND WRITES TO A FILE TO BE READ BY ANEOS
print('GENERATING ARRAYS FOR THE INDICES OF THE TABULATED EOS.')
#-------------- the following code generates the temperature array
gridtarr = []
for iir in range(1,len(gridtvals)):
if gridtstyle[iir-1] == 1:
# linear region
temptarr = np.arange(gridtnpts[iir-1])/gridtnpts[iir-1]*(gridtvals[iir]-gridtvals[iir-1])+gridtvals[iir-1]
if gridtstyle[iir-1] == 0:
# log region
logstep = (np.log10(gridtvals[iir])-np.log10(gridtvals[iir-1]))/gridtnpts[iir-1]
temptarr = np.power(10.,np.log10(gridtvals[iir-1])+(np.arange(gridtnpts[iir-1]))*logstep)
gridtarr = np.concatenate((gridtarr,temptarr))
# check if T0REF already in the grid
#indexT0REF = get_indexes(T0REF,gridtarr)
indexT0REF = np.where(abs(T0REF-gridtarr)<1.E-4)[0]
if len(indexT0REF) == 0:
# insert T0REF K into the array
indexT0REF = np.where((gridtarr-T0REF)>0.)[0]
gridtarr = np.concatenate((gridtarr[0:indexT0REF[0]],[T0REF],gridtarr[indexT0REF[0]::]))
if indexT0REF[0]>1:
print('Inserted the REFERENCE TEMPERATURE INTO THE ARRAY:',gridtarr[indexT0REF[0]-2:indexT0REF[0]+3])
else:
print('Inserted the REFERENCE TEMPERATURE INTO THE ARRAY:',gridtarr[indexT0REF[0]-1:indexT0REF[0]+3])
else:
print('T0REF already in temperature array: ',T0REF,gridtarr[indexT0REF[0]-2:indexT0REF[0]+2])
# Remove 0 K or convert to 1 K
indexzero = get_indexes(0.,gridtarr)
indexone = get_indexes(1.,gridtarr)
if len(indexzero) == 1 and len(indexone) == 0:
gridtarr[indexzero] = 1.
print('Zero kelvin changed to 1 kelvin.')
else:
gridtarr = np.delete(gridtarr, indexzero)
print('Deleted zero K index.')
#
#gridtsize = len(grdtarr)
print('Number of temperature points: ',len(gridtarr))
#print(gridtarr)
#------------ the following code generates the density array
gridrarr = []
for iir in range(1,len(gridrvals)):
if gridrstyle[iir-1] == 1:
# linear region
temprarr = np.arange(gridrnpts[iir-1])/gridrnpts[iir-1]*(gridrvals[iir]-gridrvals[iir-1])+gridrvals[iir-1]
if gridrstyle[iir-1] == 0:
# log region
logstep = (np.log10(gridrvals[iir])-np.log10(gridrvals[iir-1]))/gridrnpts[iir-1]
temprarr = np.power(10.,np.log10(gridrvals[iir-1])+(np.arange(gridrnpts[iir-1]))*logstep)
gridrarr = np.concatenate((gridrarr,temprarr))
# check if R0REF already in the grid
#indexR0REF = get_indexes(R0REF,gridrarr)
indexR0REF = np.where(abs(R0REF-gridrarr)<1.E-4)[0]
if len(indexR0REF) == 0:
# insert R0REF into the array
indexR0REF = np.where((gridrarr-R0REF)>0.)[0]
gridrarr = np.concatenate((gridrarr[0:indexR0REF[0]],[R0REF],gridrarr[indexR0REF[0]::]))
print('Inserted the REFERENCE DENSITY INTO THE ARRAY:',gridrarr[indexR0REF[0]-2:indexR0REF[0]+3])
#print('NOT INSERTING REFERENCE DENSITY INTO THE ARRAY.')
else:
print('R0REF already in density array: ',R0REF,gridrarr[indexR0REF[0]-2:indexR0REF[0]+2])
# Remove 0 g/cm3 if present
indexzero = get_indexes(0.,gridrarr)
if len(indexzero) == 1:
gridrarr = np.delete(gridrarr, indexzero)
print('Deleted zero g/cm3 index.')
#
#gridrsize = len(gridrarr)
print('Number of density points: ',len(gridrarr))
#print(gridrarr)
#------------ the following code generates the specific entropy array for gadget tables
gridsarr = []
for iir in range(1,len(gridsvals)):
if gridsstyle[iir-1] == 1:
# linear region
tempsarr = np.arange(gridsnpts[iir-1])/gridsnpts[iir-1]*(gridsvals[iir]-gridsvals[iir-1])+gridsvals[iir-1]
if gridsstyle[iir-1] == 0:
# log region
logstep = (np.log10(gridsvals[iir])-np.log10(gridsvals[iir-1]))/gridsnpts[iir-1]
tempsarr = np.power(10.,np.log10(gridsvals[iir-1])+(np.arange(gridsnpts[iir-1]))*logstep)
gridsarr = np.concatenate((gridsarr,tempsarr))
# Remove 0 MJ/K/kg if present
indexzero = get_indexes(0.,gridsarr)
if len(indexzero) == 1:
gridsarr[indexzero]=1.e-20
# gridsarr = np.delete(gridsarr, indexzero)
print('replaced zero MJ/K/kg index with 1.E-20')
#gridssize = len(gridsarr)
#
print('Number of specific entropy points: ',len(gridsarr))
#print(gridsarr)
print('Number of words in the standard SESAME 301 table (NR, NT, R, T, P, IE, HFE)=',2.+len(gridrarr)+len(gridtarr)+len(gridrarr)*len(gridtarr)*3.)
#-----------------------------------------------------------
#
if DEVMODE:
print('WRITING tablegrid.txt FOR ANEOS in the local directory.\n')
# write the table arrays to a file for the ANEOS driver to read in
tablegridfile = open("tablegrid.txt","w")
tablegridfile.write("{:.6e}".format(MATID)+'\n')
tablegridfile.write("{:.6e}".format(DATE)+'\n')
tablegridfile.write("{:.6e}".format(VERSION)+'\n')
tablegridfile.write("{:.6e}".format(FMN)+'\n')
tablegridfile.write("{:.6e}".format(FMW)+'\n')
tablegridfile.write("{:.6e}".format(R0REF)+'\n')
tablegridfile.write("{:.6e}".format(K0REF)+'\n')
tablegridfile.write("{:.6e}".format(T0REF)+'\n')
tablegridfile.write("{:.6e}".format(len(gridrarr))+'\n') # number of density points
tablegridfile.write("{:.6e}".format(len(gridtarr))+'\n') # number of temperature points
for i in range(0,len(gridrarr)):
tablegridfile.write("{:.6e}".format(gridrarr[i])+'\n')
for i in range(0,len(gridtarr)):
tablegridfile.write("{:.6e}".format(gridtarr[i])+'\n')
tablegridfile.close()
#
#-----------------------------------------------------
# Output GADGET2 Table information
if MAKEGADGETTABLE:
print('Number of words in the extended GADGET2 EOS table (NR, NS, R, S, P, T, IE, CS, HFE, CV, KPA, MDQ)=',2.+len(gridrarr)+len(gridtarr)+len(gridrarr)*len(gridtarr)*8.)
#------------------------------------------------------
#----------------------------------------------------
# Sp. Internal Energy grid for Tillotson
tillearr = np.power(10.,np.arange(600)/600.*6.-3.) # MJ/kg
tillearr = np.append(0.,tillearr)
print('Number of specific internal energy points for Tillotson EOS: ',len(tillearr))
#----------------------------------------------------
# Paranoia check
# make sure all table grid values are unique
if (len(np.unique(gridtarr))!=len(gridtarr)):
print('NON-UNIQUE VALUES IN TEMP ARRAY')
stopthecode
if (len(np.unique(gridrarr))!=len(gridrarr)):
print('NON-UNIQUE VALUES IN DENSITY ARRAY')
stopthecode
if (len(np.unique(gridsarr))!=len(gridsarr)):
print('NON-UNIQUE VALUES IN SP. ENTROPY ARRAY')
stopthecode
if (len(np.unique(tillearr))!=len(tillearr)):
print('NON-UNIQUE VALUES IN SP. ENERGY ARRAY')
stopthecode
#----------------------------------------------------
print('\nDone generating EOS grids.\n')
print('\nCHECK THAT THE GRID ARRAYS ARE EVEN IN LENGTH.\n')
# END TABLE GRIDDING INFORMATION
if DEVMODE:
# call aneos in the local directory
# output file is in the local directory
print('RUNNING IN DEVELOPMENT MODE')
print('Calling aneos in the local directory.')
completed = subprocess.run(['./aneos'])
print('Aneos call complete. CHECK THAT A NEW ANEOS.OUTPUT FILE WAS CREATED.')
print('aneos call result: ', completed.returncode)
print('Check ANEOS.OUTPUT file for errors.')
else:
print('RUNNING IN RELEASE MODE: NO ANEOS SYSTEM CALL.')
print('READING IN EOS DISTRIBUTION FILES.')
# READ IN NEW ANEOS MODEL and fill the extEOStable class object
# source in eostable.py
#------------------------------------------------------------------
NewEOS = extEOStable() # FIRST make new empty EOS object
NewEOS.loadextsesame('NEW-SESAME-EXT.TXT') # LOAD THE EXTENDED 301 SESAME FILE GENERATED BY STSM VERSION OF ANEOS
NewEOS.loadstdsesame('NEW-SESAME-STD.TXT') # LOAD THE STANDARD 301 SESAME FILE GENERATED BY STSM VERSION OF ANEOS
NewEOS.MODELNAME = MODELNAME # string set above in user input
NewEOS.MDQ = np.zeros((NewEOS.NT,NewEOS.ND)) # makes the empty MDQ array
#print(NewEOS.units) # these are the default units for SESAME rho-T tables
#'Units: g/cm3, K, GPa, MJ/kg, MJ/kg, MJ/K/kg, cm/s, MJ/K/kg, KPA flag. 2D arrays are (NT,ND).'
# Add the header info to the table. This could be done during the loading.
# if made from this notebook, these values are set in the user-input above.
# ** MAKE SURE THEY MATCH ANEOS.INPUT **
NewEOS.MATID = MATID
NewEOS.DATE = DATE
NewEOS.VERSION = VERSION
NewEOS.FMN = FMN
NewEOS.FMW = FMW
NewEOS.R0REF = R0REF
NewEOS.K0REF = K0REF
NewEOS.T0REF = T0REF
NewEOS.P0REF = P0REF
#
# Load the information from ANEOS.INPUT and ANEOS.OUTPUT
NewEOS.loadaneos(aneosinfname='ANEOS.INPUT',aneosoutfname='ANEOS.OUTPUT')
#
# calculate the principal Hugoniot from the table
if DEVMODE:
# calculate the Hugoniot and write to a file
NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF,writefilename='NEW-SESAME-HUG.TXT')
else:
# just calculate the Hugoniot and put it in the EOS object
NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF)
#
# calculate the 1-bar profile; loop over temp
NewEOS.onebar.T = np.zeros(NewEOS.NT)
NewEOS.onebar.S = np.zeros(NewEOS.NT)
NewEOS.onebar.rho = np.zeros(NewEOS.NT)
it0 = np.where(NewEOS.T >= NewEOS.T0REF)[0]
id0 = np.arange(NewEOS.ND)#np.where(NewEOS.rho >= 0.8*NewEOS.R0REF)[0]
for iit in range(0,NewEOS.NT):
NewEOS.onebar.T[iit] = NewEOS.T[iit]
NewEOS.onebar.S[iit] = np.interp(1.E-4,NewEOS.P[iit,id0],NewEOS.S[iit,id0])
NewEOS.onebar.rho[iit] = np.interp(1.E-4,NewEOS.P[iit,id0],NewEOS.rho[id0])
#print(iit,NewEOS.onebar.rho[iit],NewEOS.onebar.T[iit],NewEOS.onebar.S[iit]*1.E3)
# Make a NOTENSION TABLE and GADGET TABLE
################### REMOVE TENSION REGION ###########################
## The GADGET2 tables do not have a tension region.
## If the gadget2 table is selected, then must generate a no-tension table
if MAKEGADGETTABLE:
REMOVETENSIONFLAG=1 # 0- keep tension region; 1- remove tension region
# minimum pressure 1.E-30
# minimum sp energy about 1.E-4 before negative entropy region
# minimum sp entropy about 1.E-5 before negative entropy region
# minimum HFE about 1.E-3 before negative entropy region
# minimum sound speed about 1.E-10 before negative entropy region
# minimum sp heat capacity about 1.E-4 before negative entropy region
## FUTURE WORK: THIS SECTION COULD USE THE LEVER RULE TO FILL IN THE MISSING POINTS
## SINCE MANY VALUES ARE NEAR THE LOWER LIMITS IN THE SUBLIMATION REGION ANYWAY,
## HERE, TENSION REGION VALUES ARE FILLED IN WITH THE ADJACENT S-V DOME VALUES
## This is obviously bad for interpolations. Fix for future work.
if REMOVETENSIONFLAG:
# First make a deep copy of the original table with the tension region
NewEOSorig = deepcopy(NewEOS)
NewEOS.MODELNAME = NewEOS.MODELNAME+'notension'
# THIS SECTION REWRITES NEGATIVE PRESSURE REGION
# The tension region is in the solid-vapor phase boundary.
# Use the liquid-vapor phase boundary. Below the temperature of the triple point, it is the sublimation curve.
# But the pressures for the solid calculation is spurious at very low pressures, so use the vapor pressure.
# HFE = GFE + TS
print('REMOVING TENSION REGION FROM THE ORIGINAL ANEOS TABLE.')
for itt in range(NewEOS.NT-1,-1,-1):
tmp = np.where(NewEOS.P[itt,:] < 0.)
if (len(tmp[0]) > 0):
badind = tmp[0]
if badind[0] > 0:
#print(itt, len(badind), NewEOS.T[itt], 'tension only', NewEOS.P[itt,badind[0]-4:badind[0]+1])
# at this temperature, all densities are the same pressure in the vapor dome
# so use the adjacent positive pressure value
# do the same for all the variables since there is very little change in this region of the dome.
# Errors introduced by interpolating the vapor curve are comparable to
# recalculating the lever rule. Would need to call ANEOS again to get good end point values
# to calculate the mass-weighted value via the lever rule.
NewEOS.P[itt,badind] = np.full(len(badind),NewEOS.P[itt,badind[0]-1])
NewEOS.U[itt,badind] = np.full(len(badind),NewEOS.U[itt,badind[0]-1])
NewEOS.S[itt,badind] = np.full(len(badind),NewEOS.S[itt,badind[0]-1])
NewEOS.A[itt,badind] = np.full(len(badind),NewEOS.A[itt,badind[0]-1])
NewEOS.cs[itt,badind] = np.full(len(badind),NewEOS.cs[itt,badind[0]-1])
NewEOS.cv[itt,badind] = np.full(len(badind),NewEOS.cv[itt,badind[0]-1])
NewEOS.KPA[itt,badind] = np.full(len(badind),2)
NewEOS.MDQ[itt,badind] = np.full(len(badind),-1) # replaced tension region flag
else:
print('SOMETHING VERY WRONG WITH TENSION IN THE TABLE')
makethecodestop # please tell me a more graceful way to stop the notebook
#
# THIS SECTION RESETS NEGATIVE SPECIFIC ENTROPIES WITH ADJACENT VALUES OR MINIMUM POSITIVE VALUES
# THESE VALUES TYPICALLY DEFINE THE COLD COMPRESSION LIMIT OF THE MATERIAL
# OR SPURIOUS NEGATIVE ENTROPIES AT VERY LOW PRESSURES (IMPERFECT DEBYE MODEL IN ANEOS)
for itt in range(NewEOS.NT-1,-1,-1):
tmp = np.where(NewEOS.S[itt,:] < 0.)
if (len(tmp[0]) > 0):
badind = tmp[0]
if badind[0] > 0:
#print(itt, antarr[itt], andarr[badind[0]], badind[0],len(badind),'negative entropy', ansarr[itt,badind[0]],ansarr[itt,badind[0]-1])
NewEOS.P[itt,badind] = np.full(len(badind),NewEOS.P[itt,badind[0]-1]) # use the adjacent pressure at this temperature in the dome
NewEOS.U[itt,badind] = np.full(len(badind),NewEOS.U[itt,badind[0]-1])
NewEOS.S[itt,badind] = np.full(len(badind),NewEOS.S[itt,badind[0]-1])
NewEOS.A[itt,badind] = np.full(len(badind),NewEOS.A[itt,badind[0]-1])
NewEOS.cs[itt,badind] = np.full(len(badind),NewEOS.cs[itt,badind[0]-1])
NewEOS.cv[itt,badind] = np.full(len(badind),NewEOS.cv[itt,badind[0]-1])
#NewEOS.KPA[itt,badind] # leave as original region flag
NewEOS.MDQ[itt,badind] = np.full(len(badind),-2) # negative entropies flag
#print('Some NEGATIVE S AT THIS INDEX, TEMP(K): ',itt, NewEOS.T[itt],NewEOS.P[itt,badind[0]-1],NewEOS.P[itt,badind[0]-2]) #, badind[0],len(badind),'negative entropy', NewEOS.S[itt,badind[0]])
if badind[0] == 0:
print('All S VALUES NEGATIVE AT THIS INDEX, TEMP(K): ',itt, NewEOS.T[itt]) #, badind[0],len(badind),'negative entropy', NewEOS.S[itt,badind[0]])
NewEOS.S[itt,badind] = np.full(len(badind),1.E-10)
NewEOS.U[itt,badind] = np.full(len(badind),1.E-10) # also replace the negative sp. internal energies
NewEOS.MDQ[itt,badind] = np.full(len(badind),-2) # negative entropies flag
#### ONLY WRITE A NEW TABLE IN DEVELOPMENT MODE
if DEVMODE:
#### WRITE NEW SESAME TABLE WITH NO TENSION REGION
# WRITE STANDARD (SHORT) SESAME FILE
# WRITE SESAME TABLE TO FILE LIMITED TO P, E, HFE
NewEOS.writestdsesame(writestdsesfname="NEW-SESAME-STD-NOTENSION.TXT")
#### ONLY WRITE A NEW TABLE IN DEVELOPMENT MODE
#if DEVMODE:
# #### WRITE MDQ Flag as a 301-style table
# NewEOS.writemdqsesame(writemdqsesfname="NEW-SESAME-MDQ.TXT")
#
#--------------------------------------------------------------------------
########## MAKE GADGET2 EOS TABLE ARRAYS ###################
# LATER WE CAN TURN THIS INTO A FUNCTION IN THE GADGET EOS OBJECT
# FLAG IS SET ABOVE IN THE USER INPUT SECTION
# MAKEGADGETTABLE = 1 # 1 - do it ; 0 - skip it
if MAKEGADGETTABLE:
print('Starting interpolation for GADGET table.')
GadEOS = extGADtable() # make new empty Gadget EOS object
#
# rho array is andarr to match the SESAME table
# S array is gridsarr
GadEOS.ND = len(gridrarr)
GadEOS.NS = len(gridsarr)
GadEOS.rho = gridrarr
GadEOS.S = gridsarr
GadEOS.P = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.T = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.U = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.A = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.cs = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.cv = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.KPA = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.MDQ = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.MODELNAME = NewEOS.MODELNAME+'-gadget-linear-int'
#
# some combinations of density and sp. entropy are not in the EOS surface
# in these cases, apply minimum values
gadpmin = np.min(NewEOS.P) # GPa
gadsmin = 1.E-30 # MJ/K/kg
gademin = np.min(NewEOS.U) # MJ/kg
gadtmin = 1. # K
gadcmin = np.min(NewEOS.cs) # cm/s
gadcvmin = 0. # MJ/K/kg
gadamin = np.min(NewEOS.A) # can be negative # MJ/kg
print('Minimum values used in the rational function interpolation scheme:')
print('gadpmin,gadsmin,gademin,gadtmin,gadcmin,gadcvmin,gadamin=')
print(gadpmin,gadsmin,gademin,gadtmin,gadcmin,gadcvmin,gadamin)
#
# some combinations of density and sp. entropy are not in the EOS surface
# in these cases, fill the region with the adjacent real values from the original T-RHO table
# ----------------------------------------------------------
# SIMPLE LINEAR INTERPOLATION TO FLIP T-RHO to S-RHO table
print("LINEAR INTERPOLATION FOR GADGET TABLE.")
for iid in range(0,GadEOS.ND):
# same density arrays for the T-rho table and S-rho table
sall1d = NewEOS.S[:,iid]
pall1d = NewEOS.P[:,iid]
eall1d = NewEOS.U[:,iid]
tall1d = NewEOS.T[:]
call1d = NewEOS.cs[:,iid]
cvall1d = NewEOS.cv[:,iid]
aall1d = NewEOS.A[:,iid]
kall1d = NewEOS.KPA[:,iid]
mdqall1d = NewEOS.MDQ[:,iid]
# interpolate P at the new S
testinterp = interpolate.interp1d(sall1d,pall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.P[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.P[tmpind,iid] = pall1d[NewEOS.NT-1]
# interpolate E at the new S
testinterp = interpolate.interp1d(sall1d,eall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.U[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.U[tmpind,iid] = eall1d[NewEOS.NT-1]
# interpolate T at the new S
testinterp = interpolate.interp1d(sall1d,tall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.T[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.T[tmpind,iid] = tall1d[NewEOS.NT-1]
# interpolate sound speed at the new S
testinterp = interpolate.interp1d(sall1d,call1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.cs[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.cs[tmpind,iid] = call1d[NewEOS.NT-1]
# interpolate sp. heat capacity at the new S
testinterp = interpolate.interp1d(sall1d,cvall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.cv[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.cv[tmpind,iid] = call1d[NewEOS.NT-1]
# interpolate HFE at the new S
testinterp = interpolate.interp1d(sall1d,aall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.A[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.A[tmpind,iid] = call1d[NewEOS.NT-1]
# interpolate phase information KPA flag at the new S
testinterp = interpolate.interp1d(sall1d,kall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.KPA[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.KPA[tmpind,iid] = call1d[NewEOS.NT-1]
# interpolate model development quality flag at the new S
testinterp = interpolate.interp1d(sall1d,mdqall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.MDQ[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.MDQ[tmpind,iid] = call1d[NewEOS.NT-1]
#
GadEOSlin = deepcopy(GadEOS)
#
'''
# ----------------------------------------------------------
# 1D RATIONAL FUNCTION INTERPOLATION METHOD FROM KERLEY
print("RATIONAL FUNCTION INTERPOLATION TO GENERATE GADGET2 EOS TABLE.")
for iid in range(0,GadEOS.ND):
GadEOS.MODELNAME = NewEOS.MODELNAME+'-gadget-ratfunc-int'
#
# same density arrays for the T-rho table and S-rho table
sall1d = NewEOS.S[:,iid]
pall1d = NewEOS.P[:,iid]
eall1d = NewEOS.U[:,iid]
tall1d = NewEOS.T[:]
call1d = NewEOS.cs[:,iid]
cvall1d = NewEOS.cv[:,iid]
aall1d = NewEOS.A[:,iid]
#kall1d = NewEOS.KPA[:,iid]
#mdqall1d = NewEOS.MDQ[:,iid]
#
# interpolate P at the new S
GadEOS.P[:,iid] = rf1d(sall1d,pall1d,gridsarr,gadpmin)
# interpolate E at the new S
GadEOS.U[:,iid] = rf1d(sall1d,eall1d,gridsarr,gademin)
# interpolate T at the new S
GadEOS.T[:,iid] = rf1d(sall1d,tall1d,gridsarr,gadtmin)
# interpolate sound speed at the new S
GadEOS.cs[:,iid] = rf1d(sall1d,call1d,gridsarr,gadcmin)
# interpolate sp. heat capacity at the new S
GadEOS.cv[:,iid] = rf1d(sall1d,cvall1d,gridsarr,gadcvmin)
# interpolate HFE at the new S
GadEOS.A[:,iid] = rf1d(sall1d,aall1d,gridsarr,gadamin)
# Do not interpolate KPA or MDQ with rational function interp. Use Linear.
# do not overwrite the linear interpolation above.
'''
#
print('Done interpolating the gadget2 table.')
#print('Using RATIONAL FUNCTION INTERPOLATION EXCEPT FOR KPA and MDQ FLAGS (bilinear).')
if DEVMODE:
# ONLY WRITE GADGET2 TABLE TO DISK IN DEVELOPMENT MODE
#### WRITE NEW GADGET2 TABLE WITH NO TENSION REGION
#### This ascii table format follows the sesame table format of 5 columns of numbers in 16.8e format
#### the STYLE variable is just a counter to 5 to know when to write a newline
GadEOS.writestdgadget(writestdgadgetfname="NEW-GADGET-STD-NOTENSION.TXT")
GadEOS.writeextgadget(writeextgadgetfname="NEW-GADGET-EXT-NOTENSION.TXT")
# READ IN FORSTERITE MATERIAL DATA SETS
datadir='../data/'
#======================================================================
#======================================================================
# Gather some experimental and theoretical data on forsterite
ironSTP_S = 0.489211596E-3 # sp. entropy MJ/K/kg JANAF STP value
# iron 1 bar entropy Desai 1986
#T (K), Cp (J/mol/K), dH0 (J/mol), S (J/mol/K)
Desai_file=datadir+'tabula-Desai-Table2.csv'
Desai_datarr = pd.read_csv(Desai_file,nrows=67)
Desai_tarr = Desai_datarr.iloc[:,0].values # temp K
Desai_sarr = Desai_datarr.iloc[:,3].values/55.847+0.489211596 # sp. entropy J/mol/K->kJ/kg/K offset by JANAF STP value
# iron 1 bar liquid density Assael et al. PCRD 2006 Table 3
# T(K), density (kg/m3)
Assael_file=datadir+'tabula-Assael-Table3.csv'
Assael_datarr = pd.read_csv(Assael_file,nrows=15)
Assael_tarr = Assael_datarr.iloc[:,0].values # temp K
Assael_rarr = Assael_datarr.iloc[:,1].values # rho kg/m3
# Iron alloy solidus Fe-16Si from Fischer et al. 2012 in Fig 1.2A from Fischer AGU 2016
Fischer_file=datadir+'Fischer-Fig1.2-AFischer-lower.csv'
Fischer_datalow = pd.read_csv(Fischer_file,skiprows=1)
Fischer_file=datadir+'Fischer-Fig1.2-AFischer-upper.csv'
Fischer_datahigh = pd.read_csv(Fischer_file,skiprows=1)
# Kraus et al. Nature Geoscience 2015
# Entropy tie point on the Hugoniot
Ztie_p=507. # GPa
Ztie_perrlow=-85. # GPa
Ztie_perrhigh=+65. # GPa
Ztie_s=2240.E-6 # MJ/K/kg
Ztie_serr=60.E-6 # MJ/K/kg
# http://www.ihed.ras.ru/rusbank/
# iron data compilation REFERENCES ARE LISTED AT THE END OF THE FILE
# m [rho0/rho], U=part vel(km/s), D = shockvel(km/s), P (GPa)
ihed_rho0=7.873
ihed_file=datadir+'iron-Russian-shockwave-database.txt'
ihed_datarr = np.loadtxt(ihed_file,skiprows=4,usecols=(0,1,2,3,4),max_rows=571)
ihed_fullden = np.where(ihed_datarr[:,0] == 1.)[0]
#ihed_datarr.shape
#print(ihed_datarr[ihed_fullden,3])
#print(max(ihed_datarr[ihed_fullden,3]))
# http://www.ihed.ras.ru/rusbank/
# iron data compilation REFERENCES ARE LISTED AT THE END OF THE FILE
# m U D Cs P R/R0 R E-E0 Rem Ref
ihed_file=datadir+'iron-Russian-database-cs.txt'
ihed_csarr = np.loadtxt(ihed_file,skiprows=4,usecols=(0,1,2,3,4,5,6),max_rows=9)
#ihed_datarr.shape
#print(ihed_csarr[:,3])
#print(max(ihed_datarr[ihed_fullden,3]))
# http://www.ihed.ras.ru/rusbank/
# iron data compilation REFERENCES ARE LISTED AT THE END OF THE FILE
# m U0 P0 U P Rem Ref
ihed_file=datadir+'iron-Russian-database-rel.txt'
ihed_relarr = np.loadtxt(ihed_file,skiprows=4,usecols=(0,1,2,3,4),max_rows=20)
#ihed_datarr.shape
#print(ihed_relarr[:,3])
#print(max(ihed_datarr[ihed_fullden,3]))
# Sjostrom & Crockett 2018 Hugoniot equation from their paper; temperature equation has a problem
scironuparr = np.arange(300)/300.*30. # km/s
scironusarr = 3.7885 + 1.25524*scironuparr + 0.518106*scironuparr*np.exp(-0.0866344*scironuparr) # km/s
scironparr = 7.874*(scironuparr)*(scironusarr) # GPa
scironrarr = 1./((1./7874.)*(1.-scironuparr/scironusarr))/1.e3 # g/cm3
#scirontarr = 1.24182E-3*scironparr+2.80723E-6*np.power(scironparr,2.)-1.31703E-9*np.power(scironparr,3.)+1.88449E-13*np.power(scironparr,4.)*11604. # K
# Kraus et al. 2015 fit Hugoniot SOM Equation 7
krausironuparr = (np.arange(200)+1)/200.*20. # km/s
krausironusarr = 3.41 + 1.84*krausironuparr - 0.036*krausironuparr*krausironuparr # km/s
krausironparr = 7854.*(krausironuparr)*(krausironusarr)/1.e3 # GPa
krausironrarr = 1./((1./7854.)*(1.-krausironuparr/krausironusarr))/1.e3 # g/cm3
krausironsarr = -1.984e4/np.sqrt(krausironparr) + 39.9*np.sqrt(krausironparr)-0.1047*krausironparr+2279.
krausironsuarr = -1.845e4/np.sqrt(krausironparr) + 45.7*np.sqrt(krausironparr)-0.1193*krausironparr+2270.
krausironslarr = -2.631e4/np.sqrt(krausironparr) + 22.33*np.sqrt(krausironparr)-0.007689*krausironparr+2804.
krausliqrho = 6.01 # g/cm3 1 bar boiling point, 3133 K Kraus et al. 2015 SOM Equation 6
krausironliqusarr = 0.79 + 0.50*krausliqrho + (1.19 + 0.0307*krausliqrho)*krausironuparr # km/s
krausironliqparr = krausliqrho*(krausironuparr)*(krausironliqusarr) # GPa
krausironliqrarr = 1./((1./krausliqrho)*(1.-krausironuparr/krausironliqusarr)) # g/cm3
ironliq_r0=krausliqrho
ironliq_t0=3133.
```
## Color mesh plots of the SESAME Rho-T table
```
# PLOT COLOR SHADED VALUES RHO-T TABLE
lw=1
sfont = 15
ds = (10,10)
%matplotlib inline
fig, ([ax0, ax1], [ax2,ax3], [ax4,ax5])= plt.subplots(nrows=3,ncols=2,figsize=(14,24))
plt.subplots_adjust(wspace=0.4)
#plt.tight_layout(pad=0.6)
##================ Pressure
im1=ax0.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.P)+25)/29.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax0.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax0.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax0.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.set_ylim(0.,20000)
ax0.set_xlim(0,30.)
ax0.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax0.set_ylabel('Temperature (K)',size=sfont)
ax0.set_title('Colors=log$_{10}$(P (GPa))',size=sfont)
ax0.tick_params(labelsize=sfont)
divider0 = make_axes_locatable(ax0)
cax0 = divider0.append_axes('right', size='5%', pad=0.05)
cbar0 = fig.colorbar(im1, cax=cax0, ticks = [],orientation='vertical')
#cbar0.ax.set_yticklabels([' ',' ',' ']) # vertical colorbar
cbar0.set_label('Log$_{10}$(P) [GPa]',size=sfont)
#cbar0.ax.set_yticklabels(['-25','-11','+4']) # vertical colorbar
im1=ax1.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.P)+25)/29.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax1.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax1.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax1.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.set_ylim(10.,1.E7)
ax1.set_xlim(1.E-21,100.)
ax1.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax1.set_ylabel('Temperature (K)',size=sfont)
ax1.set_title('Colors=log$_{10}$(P (GPa))',size=sfont)
ax1.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax1)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
cbar.ax.set_yticklabels(['-25','-11', '+4']) # vertical colorbar
cbar.set_label('Log$_{10}$(P) [GPa]',size=sfont)
#================ Sp. Int. Energy
im1=ax2.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.U)+4.)/10.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax2.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax2.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax2.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.set_ylim(0.,20000)
ax2.set_xlim(0,30.)
ax2.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax2.set_ylabel('Temperature (K)',size=sfont)
ax2.set_title('Colors=log$_{10}$(Sp. Int. Energy (MJ/kg))',size=sfont)
ax2.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax2)
cax = divider.append_axes('right', size='5%', pad=0.05)
# does not want to label ticks in this location
cbar = fig.colorbar(im1, cax=cax, ticks = [], orientation='vertical')
#cbar.ax.set_yticklabels(['','','']) # vertical colorbar
cbar.set_label('Log$_{10}$(E) [MJ/kg]',size=sfont)
#cbar.ax.set_yticklabels(['-4','+1','+6']) # vertical colorbar
ax3.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.U)+4.)/10.,cmap=local_cmaps.parula,vmin=0,vmax=1)
ax3.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax3.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax3.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.set_ylim(10.,1.E7)
ax3.set_xlim(1.E-21,100.)
ax3.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax3.set_ylabel('Temperature (K)',size=sfont)
ax3.set_title('Colors=log$_{10}$(E (MJ/kg))',size=sfont)
ax3.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax3)
cax = divider.append_axes('right', size='5%', pad=0.05)
#cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
#cbar.ax.set_yticklabels(['-6', '-3.5', '-1']) # vertical colorbar
cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
cbar.ax.set_yticklabels(['-4', '+1','+6']) # vertical colorbar
cbar.set_label('Log$_{10}$(E) [MJ/kg]',size=sfont)
#================ Sp. Entropy
im1=ax4.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.S)+4.)/4.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax4.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax4.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax4.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.set_ylim(0.,20000)
ax4.set_xlim(0,30.)
ax4.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax4.set_ylabel('Temperature (K)',size=sfont)
ax4.set_title('Colors=log$_{10}$(Sp. Entropy (MJ/K/kg))',size=sfont)
ax4.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax4)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, ticks = [], orientation='vertical')
#cbar.ax.set_yticklabels(['-4', '-2', '0']) # vertical colorbar
cbar.set_label('Log$_{10}$(S) [MJ/K/kg]',size=sfont)
ax5.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.S)+4.)/4.,cmap=local_cmaps.parula,vmin=0,vmax=1)
ax5.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax5.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax5.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.set_ylim(10.,1.E7)
ax5.set_xlim(1.E-21,100.)
ax5.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax5.set_ylabel('Temperature (K)',size=sfont)
ax5.set_title('Colors=log$_{10}$(Sp. Entropy (MJ/K/kg))',size=sfont)
ax5.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax5)
cax = divider.append_axes('right', size='5%', pad=0.05)
#cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
#cbar.ax.set_yticklabels(['-6', '-3.5', '-1']) # vertical colorbar
cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
cbar.ax.set_yticklabels(['-4', '-2','0']) # vertical colorbar
cbar.set_label('Log$_{10}$(S) [MJ/K/kg]',size=sfont)
##============= start new plot
fig2, ([ax0, ax1], [ax2,ax3], [ax4,ax5])= plt.subplots(nrows=3,ncols=2,figsize=(14,24))
plt.subplots_adjust(wspace=0.4)
#================ Sound Speed
im1=ax0.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.cs)+3.)/10.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax0.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax0.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax0.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.set_ylim(0.,20000)
ax0.set_xlim(0,30.)
ax0.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax0.set_ylabel('Temperature (K)',size=sfont)
ax0.set_title('Colors=log$_{10}$(Sound Speed (cm/s))',size=sfont)
ax0.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax0)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, ticks = [], orientation='vertical')
#cbar.ax.set_yticklabels(['-3', '+2', '+7']) # vertical colorbar
cbar.set_label('Log$_{10}$(Cs) [cm/s]',size=sfont)
ax1.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.cs)+3.)/10.,cmap=local_cmaps.parula,vmin=0,vmax=1)
ax1.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax1.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax1.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.set_ylim(10.,1.E7)
ax1.set_xlim(1.E-21,100.)
ax1.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax1.set_ylabel('Temperature (K)',size=sfont)
ax1.set_title('Colors=log$_{10}$(sound speed (cm/s))',size=sfont)
ax1.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax1)
cax = divider.append_axes('right', size='5%', pad=0.05)
#cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
#cbar.ax.set_yticklabels(['-6', '-3.5', '-1']) # vertical colorbar
cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
cbar.ax.set_yticklabels(['-3', '+2', '+7']) # vertical colorbar
cbar.set_label('Log$_{10}$(Cs) [cm/s]',size=sfont)
#================ HFE
amin = np.min(NewEOS.A)
amin = -1000.
alogmax = 1500. #np.max(anaarrodd)-amin #np.max(np.log10(anaarrodd-amin))
#print('amin, log10(amin)',amin,np.log10(-amin))
#print('alogmax=',alogmax)
im1=ax2.pcolormesh(NewEOS.rho,NewEOS.T,(NewEOS.A-amin)/alogmax,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax2.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax2.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax2.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.set_ylim(0.,20000)
ax2.set_xlim(0,30.)
ax2.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax2.set_ylabel('Temperature (K)',size=sfont)
ax2.set_title('Colors=HFE (MJ/kg)',size=sfont)
ax2.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax2)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, ticks = [], orientation='vertical')
#cbar.ax.set_yticklabels(['-1000','-500','0','500']) # vertical colorbar
cbar.set_label('Log$_{10}$(HFE) [MJ/kg]',size=sfont)
ax3.pcolormesh(NewEOS.rho,NewEOS.T,(NewEOS.A-amin)/alogmax,cmap=local_cmaps.parula,vmin=0,vmax=1)
ax3.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax3.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax3.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.set_ylim(10.,1.E7)
ax3.set_xlim(1.E-21,100.)
ax3.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax3.set_ylabel('Temperature (K)',size=sfont)
ax3.set_title('Colors=HFE (MJ/kg)',size=sfont)
ax3.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax3)
cax = divider.append_axes('right', size='5%', pad=0.05)
#cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
#cbar.ax.set_yticklabels(['-6', '-3.5', '-1']) # vertical colorbar
cbar = fig.colorbar(im1, cax=cax, ticks = [0,.333,.6667, 1], orientation='vertical')
cbar.ax.set_yticklabels(['-1000','-500','0','500']) # vertical colorbar
cbar.set_label('Log$_{10}$(HFE) [MJ/kg]',size=sfont)
#================ KPA flag
cmap2 = cm.get_cmap('plasma', 6) # 6 discrete colors
im1=ax4.pcolormesh(NewEOS.rho,NewEOS.T,((NewEOS.KPA)-0.5)/6.,cmap=cmap2,vmin=0,vmax=1,edgecolors='face')
#im1=ax4.pcolormesh(NewEOS.rho,NewEOS.T,((NewEOS.KPA))/6.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax4.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax4.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax4.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.set_ylim(0.,20000)
ax4.set_xlim(0,30.)
ax4.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax4.set_ylabel('Temperature (K)',size=sfont)
ax4.set_title('Colors=KPA flag',size=sfont)
ax4.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax4)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, ticks = [], orientation='vertical')
#cbar.ax.set_yticklabels(['0','1','2','3','4','5', '6']) # vertical colorbar
cbar.set_label('KPA Flag',size=sfont)
ax5.pcolormesh(NewEOS.rho,NewEOS.T,((NewEOS.KPA)-0.5)/6.,cmap=cmap2,vmin=0,vmax=1)
ax5.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax5.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax5.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.set_ylim(10.,1.E7)
ax5.set_xlim(1.E-21,100.)
ax5.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax5.set_ylabel('Temperature (K)',size=sfont)
ax5.set_title('Colors=KPA flag',size=sfont)
ax5.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax5)
cax = divider.append_axes('right', size='5%', pad=0.05)
#cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
#cbar.ax.set_yticklabels(['-6', '-3.5', '-1']) # vertical colorbar
cbar = fig.colorbar(im1, cax=cax, ticks = np.arange(13)/12, orientation='vertical')
cbar.ax.set_yticklabels(['','1','','2','','3','','4','','5','', '6']) # vertical colorbar
cbar.set_label('KPA Flag',size=sfont)
fig2.show()
```
# ANEOS Hugoniots Compared to Experimental Data
The following plots compare the ANEOS model Hugoniots with laboratory data (for nonporous and porous iron) downloaded from the shock wave database at http://ihed.ras.ru/rusbank/
Entropy on the Principal Hugoniot was derived by Kraus et al. 2015.
ANEOS Hugoniot: red -- calculated in the ANEOS code using model reference density and temperature.
Calculated Hugoniot: blue -- calculated by linear interpolation of the EOS table starting at model reference density and temperature.
The liquid Hugoniot is calculated in Kraus et al. 2015 starting at the boiling point of liquid iron at 1 bar: 6.01 g/cm$^3$, 3133 K. The ANEOS model Hugoniot is shown for comparison.
```
# Display the initial conditions for the Hugoniot in ANEOS.OUTPUT
print('\nRequested ANEOS Hugoniot initial state from (THUG,RHUG) (red curves in plots below):')
print('Initial Pressure (GPa):',NewEOS.anhug.P[0])
print('Initial Temperature (K):',NewEOS.anhug.T[0])
print('Initial Sp. Entropy (MJ/K/kg):',NewEOS.anhug.S[0])
print('Initial Sp. Internal Energy (MJ/kg):',NewEOS.anhug.U[0])
print('Initial Density (g/cm3):',NewEOS.anhug.rho[0])
#
print('\nCalculated Hugoniot using (T0REF,R0REF) initial state (blue curves in plots below):')
print('Initial Pressure (GPa):',NewEOS.hug.P[0])
print('Initial Temperature (K):',NewEOS.hug.T[0])
print('Initial Sp. Entropy (MJ/K/kg):',NewEOS.hug.S[0])
print('Initial Sp. Internal Energy (MJ/kg):',NewEOS.hug.U[0])
print('Initial Density (g/cm3):',NewEOS.hug.rho[0])
#========== PLOTTING STYLES FOR LOTS OF PLOTS =================
styleqmd = ['^','red',5,' ']
stylezdat = ['+','black',5,'Porous Fe Hugoniot\n(IHED)']
stylegg = ['+','blue',5,'STP Hugoniot data\n(IHED)']
styleliq = ['--','orange',7,'Liquid Fe Hugoniot\n(Kraus et al. 2015)']
styleliqhug = ['-','orange',1,'ANEOS Liquid Fe Hugoniot']
stylezfit = ['-','green',1,' ']
styleanhug = ['-','red',1,'ANEOS Hugoniot']
stylestphug = ['-','blue',1,'Calculated Hugoniot']
styleanphase = ['-','black',1,'ANEOS Phase Boundaries']
stylephasept = ['o','black',6]
#======================LOTS OF PLOTS============================
# plot the Hugoniots for comparison
sfont = 15
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(14,18))
plt.subplots_adjust(wspace=0.4)
#-------------
# Pressure-Density Data vs. ANEOS
ai=0
aj=0
#axes[ai,aj].plot(QMDH_r[1::],QMDH_p[1::],styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label=styleqmd[3])
#axes[ai,aj].plot(Zdat_r,Zdat_p,stylezdat[0],markersize=stylezdat[2],color=stylezdat[1],label=stylezdat[3])
#axes[ai,aj].plot(LowHugDat_r,LowHugDat_p,stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
#axes[ai,aj].plot(Zfit_r,Zfit_p,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
axes[ai,aj].plot(NewEOS.anhug.rho,NewEOS.anhug.P,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(ihed_datarr[ihed_fullden,4]*ihed_rho0,ihed_datarr[ihed_fullden,3],stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
axes[ai,aj].plot(scironrarr,scironparr,'-',color='m',label='STP Hugoniot\n(Sjostrom & Crockett 2018)')
# Kraus Liquid Iron Hugoniot at 1 bar boiling point
axes[ai,aj].plot(krausironliqrarr[40:100], krausironliqparr[40:100],styleliq[0],markersize=styleliq[2],color=styleliq[1],label=styleliq[3])
#linestyle='-',color='cyan',label='Kraus et al. 2015\n1-bar Boiling Pt. Liquid Hugoniot')
# ANEOS LIQUID HUGONIOT
# Calculate the model Hugoniot with the same initial state as Thomas & Asimow 2013
NewEOS.calchugoniot(r0=ironliq_r0,t0=ironliq_t0)
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,styleliqhug[0],linewidth=styleliqhug[2],color=styleliqhug[1],label=styleliqhug[3])
# Put the STP hugoniot back into the structure
NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF)
axes[ai,aj].set_ylim(0,1100)
axes[ai,aj].set_xlim(7.,16.)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#-------------
# Pressure - Temperature Data vs. ANEOS
ai=0
aj=1
#axes[ai,aj].plot(QMDH_p[1::],QMDH_t[1::]/1.E3,styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label=styleqmd[3])
#axes[ai,aj].errorbar(ZTdat_p,ZTdat_t/1.E3,xerr=ZTdat_perr,yerr=ZTdat_terr/1.E3,fmt=stylezdat[0],markersize=stylezdat[2],color=stylezdat[1],label=stylezdat[3],elinewidth=1)
#axes[ai,aj].plot(Zfit_p,Zfit_t/1.E3,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
axes[ai,aj].plot(NewEOS.anhug.P,NewEOS.anhug.T/1.E3,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.P,NewEOS.hug.T/1.E3,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
## add squares
rect = Rectangle((240.,5.),6.,.700,linewidth=1,edgecolor='moccasin',facecolor='moccasin')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(243.,5.350,'d',color='orange',label='Brown & McQueen 1986')
rect = Rectangle((222.,4.7),6.,1.000,linewidth=1,edgecolor='moccasin',facecolor='moccasin')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(225.,5.100,'o',color='orange',label='Nguyen & Holmes 2004')
axes[ai,aj].set_xlim(0,500)
axes[ai,aj].set_ylim(0,20)
axes[ai,aj].set_ylabel('Temperature (1000 K)',size=sfont)
axes[ai,aj].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#-------------
# Particle Velocity vs. Shock Velocity; Data vs. ANEOS
ai=1
aj=0
#axes[ai,aj].plot(QMDH_up[1::],QMDH_us[1::],styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label=styleqmd[3])
#axes[ai,aj].errorbar(Zdat_up,Zdat_us,xerr=Zdat_uperr,yerr=Zdat_userr,fmt=stylezdat[0],markersize=stylezdat[2],color=stylezdat[1],label=stylezdat[3],elinewidth=1)
#axes[ai,aj].plot(LowHugDat_up,LowHugDat_us,stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
#axes[ai,aj].plot(Zfit_up,Zfit_us,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
axes[ai,aj].plot(NewEOS.anhug.up,NewEOS.anhug.us,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.up,NewEOS.hug.us,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(ihed_datarr[ihed_fullden,1],ihed_datarr[ihed_fullden,2],stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
#axes[ai,aj].plot(foliq_up,foliq_us,styleliq[0],markersize=styleliq[2],color=styleliq[1],label=styleliq[3])
# ANEOS LIQUID HUGONIOT
# Calculate the model Hugoniot with the same initial state as Thomas & Asimow 2013
#NewEOS.calchugoniot(r0=foliq_r0,t0=foliq_t0)
#axes[ai,aj].plot(NewEOS.hug.up,NewEOS.hug.us,styleliqhug[0],linewidth=styleliqhug[2],color=styleliqhug[1],label=styleliqhug[3])
# Put the STP hugoniot back into the structure
#NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF)
axes[ai,aj].set_xlim(0,11)
axes[ai,aj].set_ylim(0,20)
axes[ai,aj].set_xlabel('Particle Velocity (km/s)',size=sfont)
axes[ai,aj].set_ylabel('Shock Velocity (km/s)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#-------------
# Sp. Entropy vs Pressure; Calculated from experiments vs. ANEOS
ai=1
aj=1
#axes[ai,aj].plot(Zfit_p,Zfit_s*1.E3,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
#axes[ai,aj].fill_between(Zfit_p,(Zfit_s-Zfit_serr)*1000.,(Zfit_s+Zfit_serr)*1000.,color=stylezfit[1],alpha=0.2)
# pressure error envelope is tiny and can't be seen on this scale
#axes[ai,aj].fill_betweenx(Zfit_s*1000.,(Zfit_p-Zfit_perr),(Zfit_p+Zfit_perr),color='red',alpha=0.2)
axes[ai,aj].plot(NewEOS.anhug.P,NewEOS.anhug.S*1.E3,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.P,NewEOS.hug.S*1.E3,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
# Tie point from Kraus et al. 2015
axes[ai,aj].plot(Ztie_p,Ztie_s*1.E3,'o',markersize=5,color='red',label='Entropy Tie Point (Kraus et al. 2015)')
axes[ai,aj].plot([Ztie_p+Ztie_perrlow,Ztie_p+Ztie_perrhigh],[Ztie_s*1.E3,Ztie_s*1.E3],'-',color='red')
axes[ai,aj].plot([Ztie_p,Ztie_p],[(Ztie_s-Ztie_serr)*1.E3,(Ztie_s+Ztie_serr)*1.E3],'-',color='red')
# Iron at STP from JANAF
axes[ai,aj].plot(1.E-4,ironSTP_S*1.E3,'s',markersize=5,color='orange',label='STP (JANAF)')
colstr='green'
axes[ai,aj].plot(krausironparr[40:80], krausironsarr[40:80]/1.e3, linestyle='-',color=colstr)
#axes[ai,aj].plot(krausironparr[40:80], krausironsarr[40:80]/1.e3, linestyle='-',color=colstr,label='Kraus et al. 2015\nLiquid Region STP Hugoniot')
#axes[ai,aj].plot(krausironparr[40:80], krausironsuarr[40:80]/1.e3, linestyle=':',color=colstr)
#axes[ai,aj].plot(krausironparr[40:80], krausironslarr[40:80]/1.e3, linestyle=':',color=colstr)
axes[ai,aj].fill_between(krausironparr[40:80],krausironslarr[40:80]/1.e3,krausironsuarr[40:80]/1.e3,color='green',alpha=0.2,label='Kraus et al. 2015\nLiquid Region STP Hugoniot')
axes[ai,aj].set_ylim(0,4.)
axes[ai,aj].set_xlim(0,1200)
axes[ai,aj].set_ylabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#-------------
# Pressure-Density Data vs. ANEOS LOW PRESSURE + LIQUID HUGONIOT
ai=2
aj=0
#axes[ai,aj].plot(QMDH_r[1::],QMDH_p[1::],styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label=styleqmd[3])
#axes[ai,aj].plot(Zdat_r,Zdat_p,stylezdat[0],markersize=stylezdat[2],color=stylezdat[1],label=stylezdat[3])
#axes[ai,aj].plot(LowHugDat_r,LowHugDat_p,stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
# Liquid forsterite Hugoniot Data
#axes[ai,aj].plot(foliq_r,foliq_p,styleliq[0],markersize=styleliq[2],color=styleliq[1],label=styleliq[3])
#axes[ai,aj].plot(foliq_r0,0,styleliq[0],markersize=styleliq[2],color=styleliq[1])
#axes[ai,aj].plot(Zfit_r,Zfit_p,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
# ANEOS REFERENCE POINT (STP) Hugoniots
axes[ai,aj].plot(NewEOS.anhug.rho,NewEOS.anhug.P,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(ihed_datarr[ihed_fullden,4]*ihed_rho0,ihed_datarr[ihed_fullden,3],stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
# POROUS HUGONIOT m=R0/R00=1.128
m=1.128
colstr='black'
labstr='R0/R00=1.128 (+IHED -ANEOS)'
ihed_porous = np.where(ihed_datarr[:,0] == m)[0]
axes[ai,aj].plot(ihed_datarr[ihed_porous,4]*ihed_rho0,ihed_datarr[ihed_porous,3],'+',markersize=5,color=colstr,label=labstr)
NewEOS.calcporoushugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF,r00=NewEOS.R0REF/m)
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,styleliqhug[0],linewidth=styleliqhug[2],color=colstr)
# Put the STP hugoniot back into the structure
NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF)
# POROUS HUGONIOT m=R0/R00=1.315
m=1.315
colstr='green'
labstr='R0/R00=1.315 (+IHED -ANEOS)'
ihed_porous = np.where(ihed_datarr[:,0] == m)[0]
axes[ai,aj].plot(ihed_datarr[ihed_porous,4]*ihed_rho0,ihed_datarr[ihed_porous,3],'+',markersize=5,color=colstr,label=labstr)
NewEOS.calcporoushugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF,r00=NewEOS.R0REF/m)
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,styleliqhug[0],linewidth=styleliqhug[2],color=colstr)
# POROUS HUGONIOT m=R0/R00=1.660
m=1.660
colstr='m'
labstr='R0/R00=1.660 (+IHED -ANEOS)'
ihed_porous = np.where(ihed_datarr[:,0] == m)[0]
axes[ai,aj].plot(ihed_datarr[ihed_porous,4]*ihed_rho0,ihed_datarr[ihed_porous,3],'+',markersize=5,color=colstr,label=labstr)
NewEOS.calcporoushugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF,r00=NewEOS.R0REF/m)
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,styleliqhug[0],linewidth=styleliqhug[2],color=colstr)
# POROUS HUGONIOT m=R0/R00=2.333
m=2.333
colstr='c'
labstr='R0/R00=2.333 (+IHED -ANEOS)'
ihed_porous = np.where(ihed_datarr[:,0] == m)[0]
axes[ai,aj].plot(ihed_datarr[ihed_porous,4]*ihed_rho0,ihed_datarr[ihed_porous,3],'+',markersize=5,color=colstr,label=labstr)
NewEOS.calcporoushugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF,r00=NewEOS.R0REF/m)
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,styleliqhug[0],linewidth=styleliqhug[2],color=colstr)
# Put the STP hugoniot back into the structure
NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF)
axes[ai,aj].set_ylim(-5,200)
axes[ai,aj].set_xlim(5,12.)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#-------------
# Pressure-Density Data vs. ANEOS EXTRAPOLATED HIGH PRESSURE
ai=2
aj=1
#axes[ai,aj].plot(QMDH_r[1::],QMDH_p[1::],styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label=styleqmd[3])
#axes[ai,aj].plot(Zdat_r,Zdat_p,stylezdat[0],markersize=stylezdat[2],color=stylezdat[1],label=stylezdat[3])
#axes[ai,aj].plot(LowHugDat_r,LowHugDat_p,stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
#axes[ai,aj].plot(Zfit_r,Zfit_p,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
axes[ai,aj].plot(NewEOS.anhug.rho,NewEOS.anhug.P,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(ihed_datarr[ihed_fullden,4]*ihed_rho0,ihed_datarr[ihed_fullden,3],stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
axes[ai,aj].plot(scironrarr,scironparr,'-',color='m',label='STP Hugoniot\n(Sjostrom & Crockett 2018)')
axes[ai,aj].set_ylim(0,6200)
axes[ai,aj].set_xlim(7.,25.)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#======================LOTS OF PLOTS============================
# plot the Hugoniots for comparison
sfont = 15
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14,6))
plt.subplots_adjust(wspace=0.4)
#-------------
# Pressure-Sound Speed Hugoniot Data vs. ANEOS
ai=0
aj=0
axes[ai].plot(NewEOS.hug.P,NewEOS.hug.cs,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai].plot(ihed_csarr[:,4],ihed_csarr[:,3],stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
axes[ai].set_xlim(0,400)
axes[ai].set_ylim(4.,14.)
axes[ai].set_ylabel('Sound speed (km/s)',size=sfont)
axes[ai].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai].tick_params(labelsize=sfont)
axes[ai].legend(fontsize=sfont-5)
#-------------
# Particle velocity - Pressure Data vs. ANEOS
ai=1
aj=1
axes[ai].semilogy(NewEOS.hug.up,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai].plot(ihed_relarr[:,3],ihed_relarr[:,4],'s',markersize=stylegg[2],color='red',label='Partial release (IHED)')
axes[ai].plot(ihed_relarr[:,1],ihed_relarr[:,2],'s',markersize=stylegg[2],color='red')
uniqrelarr = np.unique(ihed_relarr[:,2])
for j in range(0,len(uniqrelarr)):
ptarget=uniqrelarr[j]
# ANEOS ISENTROPE NEAR SAME PRESSURE
ANEOSIsen = isentrope_class() # code in eostable.py
Sisen = np.interp(ptarget,NewEOS.hug.P,NewEOS.hug.S) # MJ/K/kg
# loop across all densities and extract the values for the requested isentrope
for i in range(0,NewEOS.ND):
ind = np.where((NewEOS.S[:,i] > 0))[0]
interpfunction = interpolate.interp1d(NewEOS.S[ind,i],NewEOS.P[ind,i]) # MJ/K/kg, GPa
ANEOSIsen.pressure = np.append(ANEOSIsen.pressure,interpfunction(Sisen)) # GPa
interpfunction = interpolate.interp1d(NewEOS.S[ind,i],NewEOS.T[ind]) # MJ/K/kg, GPa
ANEOSIsen.temperature = np.append(ANEOSIsen.temperature,interpfunction(Sisen)) # GPa
interpfunction = interpolate.interp1d(NewEOS.S[ind,i],NewEOS.cs[ind,i]) # MJ/K/kg, cm/s
ANEOSIsen.soundspeed = np.append(ANEOSIsen.soundspeed,interpfunction(Sisen)) # cm/s
interpfunction = interpolate.interp1d(NewEOS.S[ind,i],NewEOS.U[ind,i]) # MJ/K/kg, MJ/kg
ANEOSIsen.energy = np.append(ANEOSIsen.energy,interpfunction(Sisen)) # MJ/kg
ANEOSIsen.density = np.copy(NewEOS.rho)*1000. # kg/m3
ANEOSIsen.partvel = np.zeros(NewEOS.ND) # m/s
ir0 = int(np.round(np.where(ANEOSIsen.pressure >= ptarget)[0][0])) # go up just past the intersection with Hugoniot
ANEOSIsen.partvel[ir0] = np.interp(ptarget,NewEOS.hug.P,NewEOS.hug.up)*1.E3 # m/s
#print('ANEOS Starting Pressure:',ptarget)
iir=ir0-1
while (ANEOSIsen.pressure[iir]>.0001):
if iir == ir0-1:
dp = (ptarget - ANEOSIsen.pressure[iir])*1.E9 # Pa
else:
dp = (ANEOSIsen.pressure[iir+1]-ANEOSIsen.pressure[iir])*1.E9 # Pa
ANEOSIsen.partvel[iir] = ANEOSIsen.partvel[iir+1] + dp/(ANEOSIsen.density[iir]*ANEOSIsen.soundspeed[iir+1]/1.E2) # m/s
#print(iir,ANEOSIsen.pressure[iir],ANEOSIsen.partvel[iir]/1.e3,dp/1.e9,ANEOSIsen.density[iir])
iir=iir-1
# plot aneos isentrope
tmp = np.where((ANEOSIsen.pressure < ptarget)&(ANEOSIsen.pressure > .01))[0]
if j == 0:
axes[ai].plot(ANEOSIsen.partvel[tmp]/1.e3,ANEOSIsen.pressure[tmp],'--',color='blue',label='ANEOS release isentrope')
else:
axes[ai].plot(ANEOSIsen.partvel[tmp]/1.e3,ANEOSIsen.pressure[tmp],'--',color='blue')
axes[ai].set_xlim(0,7)
axes[ai].set_ylim(.01,200)
axes[ai].set_xlabel('Particle velocity (km/s)',size=sfont)
axes[ai].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai].tick_params(labelsize=sfont)
axes[ai].legend(fontsize=sfont-5)
```
## Model Critical Point and Shock-Induced Phase Changes
```
print('ANEOS Calculated Critical point:')
print(' Temperature (K): ',NewEOS.cp.T)
print(' Pressure (GPa): ', NewEOS.cp.P)
print(' Density (g/cm^3): ',NewEOS.cp.rho)
print(' Sp. Energy (MJ/kg): ', NewEOS.cp.U)
print(' Sp. Entropy (kJ/K/kg): ', NewEOS.cp.S*1.e3)
print('\n')
print('ANEOS Calculated Values at 1 bar:')
print(' Temperature at melting (K): ', NewEOS.onebar.Tmelt)
print(' Liquid density at melting (g/cm^3): ', NewEOS.onebar.rhocm)
print(' Sp. Entropy at inc. melting Sim (kJ/K/kg): ', NewEOS.onebar.Sim*1.E3)
print(' Sp. Entropy at com. melting Scm (kJ/K/kg): ', NewEOS.onebar.Scm*1.E3)
print(' Temperature at boiling (K): ', NewEOS.onebar.Tvap)
print(' Liquid density at boiling (g/cm^3): ', NewEOS.onebar.rhoiv)
print(' Sp. Entropy at inc. vaporization Siv (kJ/K/kg): ', NewEOS.onebar.Siv*1.E3)
print(' Sp. Entropy at com. vaporization Scv (kJ/K/kg): ', NewEOS.onebar.Scv*1.E3)
print('\n')
if NewEOS.tp.T > 0:
print('ANEOS Calculated Triple Point:')
print(' Temperature (K): ',NewEOS.tp.T)
print(' Pressure (GPa): ', NewEOS.tp.P)
print(' Solid density (g/cm^3): ',NewEOS.tp.rhos)
print(' Liquid density (g/cm^3): ',NewEOS.tp.rhol)
print(' Vapor density (g/cm^3): ',NewEOS.tp.rhov)
print(' Sp. Entropy at inc. melt Sim (kJ/K/kg): ', NewEOS.tp.Sim*1.E3)
print(' Sp. Entropy at com. melt Scm (kJ/K/kg): ', NewEOS.tp.Scm*1.E3)
print(' Sp. Entropy at inc. vap. Siv (kJ/K/kg): ', NewEOS.tp.Siv*1.E3)
print(' Sp. Entropy at com. vap. Scv (kJ/K/kg): ', NewEOS.tp.Scv*1.E3)
else:
print('ANEOS: No melt curve or triple point in this calculation.')
print('\n')
print('Starting at reference conditions (rho,T): ',NewEOS.R0REF,NewEOS.T0REF, ' (g/cm^3, K)')
print('ANEOS model conditions for shock-induced vaporization on release to 1 bar:')
print(' : IM CM IV 50%V CV')
print('Shock Pressure (GPa) : {:10.2f}{:10.2f}{:10.2f}{:10.2f}{:10.2f}'.format(
np.interp(NewEOS.onebar.Sim,NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.onebar.Scm,NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.onebar.Siv,NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.onebar.Siv+0.5*(NewEOS.onebar.Scv-NewEOS.onebar.Siv),NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.onebar.Scv,NewEOS.hug.S,NewEOS.hug.P)))
print('Imp. Match Vel (km/s): {:10.2f}{:10.2f}{:10.2f}{:10.2f}{:10.2f}'.format(
np.interp(NewEOS.onebar.Sim,NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.onebar.Scm,NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.onebar.Siv,NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.onebar.Siv+0.5*(NewEOS.onebar.Scv-NewEOS.onebar.Siv),NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.onebar.Scv,NewEOS.hug.S,NewEOS.hug.up)*2))
print('ANEOS model conditions for shock-induced vaporization on release to the triple point pressure:')
print(' : IM CM IV 50%V CV')
print('Shock Pressure (GPa) : {:10.2f}{:10.2f}{:10.2f}{:10.2f}{:10.2f}'.format(
np.interp(NewEOS.tp.Sim,NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.tp.Scm,NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.tp.Siv,NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.tp.Siv+0.5*(NewEOS.tp.Scv-NewEOS.tp.Siv),NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.tp.Scv,NewEOS.hug.S,NewEOS.hug.P)))
print('Imp. Match Vel (km/s): {:10.2f}{:10.2f}{:10.2f}{:10.2f}{:10.2f}'.format(
np.interp(NewEOS.tp.Sim,NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.tp.Scm,NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.tp.Siv,NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.tp.Siv+0.5*(NewEOS.tp.Scv-NewEOS.tp.Siv),NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.tp.Scv,NewEOS.hug.S,NewEOS.hug.up)*2))
print('\n')
print('USING JANAF ENTROPIES AT 1 BAR:')
print('Starting at reference conditions (rho,T): ',NewEOS.R0REF,NewEOS.T0REF, ' (g/cm^3, K)')
print('ANEOS model conditions for shock-induced vaporization on release to 1 bar (JANAF):')
print(' : IM CM IV 50%V CV')
print('Shock Pressure (GPa) : {:10.2f}{:10.2f}{:10.2f}{:10.2f}{:10.2f}'.format(
np.interp(92.190/NewEOS.FMW/1.e3,NewEOS.hug.S,NewEOS.hug.P),
np.interp(99.823/NewEOS.FMW/1.e3,NewEOS.hug.S,NewEOS.hug.P),
np.interp(2.24e-3,NewEOS.hug.S,NewEOS.hug.P),
np.interp(2.24e-3+0.5*((4.238-2.24)*1.e-3),NewEOS.hug.S,NewEOS.hug.P),
np.interp(4.238e-3,NewEOS.hug.S,NewEOS.hug.P)))
stop
```
## Model Phase Boundaries
Black lines are the ANEOS phase boundaries. Blue curve is the model Principal Hugoniot.
Orange diamonds are the 1-bar boiling point; orange squares are the 1-bar melting point.
The critical point has not been measured experimentally. There are several ab initio calculations, summarized in Medvedev 2014. The range of calculated critical points are shown below by the light blue box.
```
# PHASE DIAGRAM PLOTS
#======================LOTS OF PLOTS============================
# Plot aneos phase boundaries with focus on the vapor curve
sfont = 15
fig, axes = plt.subplots(nrows=4, ncols=2, figsize=(14,24))
plt.subplots_adjust(wspace=0.4)
#------------------------------
# entropy-temperature
ai=0
aj=0
axes[ai,aj].plot(NewEOS.vc.Sl*1.E3,NewEOS.vc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.Sv*1.E3,NewEOS.vc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.S*1.E3,NewEOS.cp.T,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Sl*1.E3,NewEOS.mc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.Ss*1.E3,NewEOS.mc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.S*1.E3,NewEOS.hug.T,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.S[0]*1.E3,NewEOS.hug.T[0],'o',color=stylestphug[1])
axes[ai,aj].set_ylim(0,1.5*NewEOS.cp.T)
axes[ai,aj].set_xlim(0,7)
axes[ai,aj].set_xlabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_ylabel('Temperature [K]',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-------------------------
# density-pressure
ai=0
aj=1
axes[ai,aj].semilogy(NewEOS.vc.rl,NewEOS.vc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.rv,NewEOS.vc.Pv,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.rho,NewEOS.cp.P,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.rho[0],NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].set_ylim(1.e-5,300)
axes[ai,aj].set_xlim(0,10)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#------
#entropy-pressure
ai=1
aj=0
axes[ai,aj].semilogy(NewEOS.vc.Sl*1.E3,NewEOS.vc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.Sv*1.E3,NewEOS.vc.Pv,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.S*1.E3,NewEOS.cp.P,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Sl*1.E3,NewEOS.mc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.Ss*1.E3,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.S*1.E3,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.S[0]*1.E3,NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].set_xlabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(1.e-12,10000)
axes[ai,aj].set_xlim(0,7)
#-----------
# specific energy - pressure
ai=1
aj=1
axes[ai,aj].semilogy(NewEOS.vc.Ul,NewEOS.vc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.Uv,NewEOS.vc.Pv,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.U,NewEOS.cp.P,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Ul,NewEOS.mc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.Us,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.U,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.U[0],NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].set_xlabel('Specific Internal Energy (MJ/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(1.e-12,10000)
axes[ai,aj].set_xlim(-3,NewEOS.cp.U*1.5)
#-------
# temperature - pressure
ai=2
aj=0
axes[ai,aj].semilogy(NewEOS.vc.T,NewEOS.vc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.T,NewEOS.vc.Pv,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.T,NewEOS.cp.P,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.T,NewEOS.mc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.T,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.T,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.T[0],NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].set_xlabel('Temperature (K)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_xlim(0,12000)
axes[ai,aj].set_ylim(1.e-16,100)
#-------
# pressure - temperature melt curve
ai=2
aj=1
#axes[ai,aj].plot(NewEOS.vc.Pl,NewEOS.vc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
#axes[ai,aj].plot(NewEOS.vc.Pv,NewEOS.vc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
#axes[ai,aj].plot(NewEOS.cp.P,NewEOS.cp.T,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Pl,NewEOS.mc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.mc.Ps,NewEOS.mc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.P,NewEOS.hug.T,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.P[0],NewEOS.hug.T[0],'o',color=stylestphug[1])
axes[ai,aj].set_ylabel('Temperature (K)',size=sfont)
axes[ai,aj].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(0,9000)
axes[ai,aj].set_xlim(-5,450)
#-------
# density - pressure melt curve
ai=3
aj=0
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.rho[0],NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].plot(scironrarr,scironparr,'-',color='m',label='STP Hugoniot\n(Sjostrom & Crockett 2018)')
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,'--',color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(100,400)
axes[ai,aj].set_xlim(10,14)
#-------
# temperature - pressure zoom out
ai=3
aj=1
axes[ai,aj].semilogy(NewEOS.vc.T,NewEOS.vc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.T,NewEOS.vc.Pv,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.T,NewEOS.cp.P,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.T,NewEOS.mc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.T,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.T,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.T[0],NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].set_xlabel('Temperature (K)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_xlim(0,35000)
axes[ai,aj].set_ylim(1.e-16,100)
#===================================================================
# PLOT EXPERIMENTAL CONSTRAINTS ON THE ANEOS PHASE BOUNDARIES
PLOTDATA=1
datms = 8
if PLOTDATA:
# entropy-temperature [0,0]
ai=0
aj=0
# range of ab initio predictions for the critical point from Medvedev 2014
rect = Rectangle((2.24,7300.),2.,3600,linewidth=1,edgecolor='skyblue',facecolor='skyblue')
axes[ai,aj].add_patch(rect)
# STP NIST-JANAF
axes[ai,aj].plot(ironSTP_S*1.E3,298,'+',color='orange',label='STP (JANAF)')
# 1 bar melting point JANAF delta-iron to liquid
#1809.000 42.558 92.190 59.772 58.645 DELTA <--> LIQUID
#1809.000 46.024 99.823 59.772 72.452 TRANSITION
axes[ai,aj].plot(99.823/NewEOS.FMW,1809,'d',color='orange',label='1-bar MP (JANAF)')
axes[ai,aj].plot(92.190/NewEOS.FMW,1809,'d',color='orange')
# Boiling point NIST-JANAF, Kraus et al. Nature Geoscience 2015 and refs within
axes[ai,aj].plot(2.24,3133,'s',color='orange',label='1-bar BP (JANAF)')
axes[ai,aj].plot(4.238,3133,'s',color='orange')
axes[0,0].plot(2.496,8787,'x',color='navy',label='Fortov & Lomonosov 2010 CP',markersize=10)
#EOS parameters of the critical point for iron Pc = 1.131 GPa, Tc = 8,787 K, Vc = 0.458cm3/g, Sc = 2.496 J/g/K
# density-pressure [0,1]
ai=0
aj=1
# range of ab initio predictions for the critical point from Medvedev 2014
rect = Rectangle((1.24,.356),1.,1.35,linewidth=1,edgecolor='skyblue',facecolor='skyblue')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(2.1834,1.131,'x',color='navy',label='Fortov & Lomonosov 2010 CP',markersize=10)
# 99.99% Fe melting volume at 1 bar from Blumm & Henderson 2000 3.46%, approx 7.175 g/cm3 for solid at melt pt
axes[ai,aj].plot([6.98,6.98*(1.0346)],[1.E-4,1.E-4],'d',color='orange',label='MP (Blumm & Henderson 2000)')
# 1-bar Fe liquid at boiling point calculation Kraus et al. 2015
axes[ai,aj].plot([6.01],[1.E-4],'s',color='orange',label='BP (Kraus et al. 2015)')
# entropy -- pressure [1,0]
ai=1
aj=0
# range of ab initio predictions for the critical point from Medvedev 2014
rect = Rectangle((2.24,.356),2.,1.35,linewidth=1,edgecolor='skyblue',facecolor='skyblue')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(2.496,1.131,'x',color='navy',label='Fortov & Lomonosov 2010 CP',markersize=10)
# STP NIST-JANAF
axes[ai,aj].plot(ironSTP_S*1.E3,1.E-4,'+',color='orange',label='STP (JANAF)')
# 1 bar melting point JANAF delta-iron to liquid
#1809.000 42.558 92.190 59.772 58.645 DELTA <--> LIQUID
#1809.000 46.024 99.823 59.772 72.452 TRANSITION
axes[ai,aj].plot(99.823/NewEOS.FMW,1.E-4,'d',color='orange',label='1-bar MP (JANAF)')
axes[ai,aj].plot(92.190/NewEOS.FMW,1.E-4,'d',color='orange')
# Boiling point NIST-JANAF, Kraus et al. Nature Geoscience 2015 and refs within
axes[ai,aj].plot(2.24,1.E-4,'s',color='orange',label='1-bar BP (JANAF)')
axes[ai,aj].plot(4.238,1.E-4,'s',color='orange')
# temperature -- pressure [2,0]
ai=2
aj=0
# range of ab initio predictions for the critical point from Medvedev 2014
rect = Rectangle((7300.,.356),3600,1.35,linewidth=1,edgecolor='skyblue',facecolor='skyblue')
axes[ai,aj].add_patch(rect)
# plot Fe experimentally fitted vapor curve from Alcock et al. 1984
alcocktarr = np.arange(2100)+1
# Fe solid good from 298 to melting point
A=7.1
B=-21723.
C=0.4536
D=-0.5846
alcockfesolidparr = 1.e-4*np.power(10.,A+B/alcocktarr+C*np.log10(alcocktarr)+D*alcocktarr*1.e-3) #GPa
# Fe liquid good from melting point to 2100 K
A=6.347
B=-19574
C=0.
D=0.
alcockfeliquidparr = 1.e-4*np.power(10.,A+B/alcocktarr+C*np.log10(alcocktarr)+D*alcocktarr*1.e-3) #GPa
axes[ai,aj].plot(alcocktarr[298:1809],alcockfesolidparr[298:1809],color='orange',label='Vapor curve\nAlcock et al. 1984')
axes[ai,aj].plot(alcocktarr[1809:2100],alcockfeliquidparr[1809:2100],color='orange')
axes[ai,aj].plot([3133.345],[1.e-4],'o',color='orange',label='1-bar BP (JANAF)')
axes[ai,aj].plot(9250.,8750.e-4,'+',color='purple',label='Beutl et al. 1994 CP',markersize=10)
axes[ai,aj].plot(8787,1.131,'x',color='navy',label='Fortov & Lomonosov 2010 CP',markersize=10)
# pressure- temperature [2,1]
ai=2
aj=1
# Anzellini et al. 2013 Science iron melt curve high pressure region up to 200 GPa/5000 K
anzmct3arr = (np.arange(100)+1.)/100.*1300.+3700 # K
anzmcp3arr = (np.power(anzmct3arr/3712.,1.72)-1.)*161.2 + 98.5 # GPa
anzmct2arr = (np.arange(100)+1.)/100.*3700. # K
anzmcp2arr = (np.power(anzmct2arr/1991.,2.38)-1.)*27.39 + 5.2 # GPa
axes[ai,aj].plot(anzmcp3arr,anzmct3arr,color="orange",linestyle='--',label='Anzellini et al. 2013')
axes[ai,aj].plot(anzmcp2arr,anzmct2arr,color="orange",linestyle='--')
#Fischer et al. 2012 Fe-Si alloys
# example solidus of an iron alloy
fischerparrlow = Fischer_datalow.iloc[:,0].values # P GPa
fischertarrlow = Fischer_datalow.iloc[:,1].values # T K
fischerparrhigh = Fischer_datahigh.iloc[:,0].values # P GPa
fischertarrhigh = Fischer_datahigh.iloc[:,1].values # T K
axes[ai,aj].plot(fischerparrhigh,fischertarrhigh,'v',color="brown",label='Fischer et al. 2012 Fe-16Si\nupper limit solidus')
axes[ai,aj].plot(fischerparrlow,fischertarrlow,'^',color="brown",label='lower limit solidus')
rect = Rectangle((240.,5000.),6.,700,linewidth=1,edgecolor='moccasin',facecolor='moccasin')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(243.,5350,'x',color='orange',label='Brown & McQueen 1986')
rect = Rectangle((222.,4700),6.,1000,linewidth=1,edgecolor='moccasin',facecolor='moccasin')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(225.,5100,'*',color='orange',label='Nguyen & Holmes 2004')
axes[ai,aj].errorbar(100.,3500.,xerr=10.,yerr=200,fmt='s',color='red',label='bcc-hcp-liquid TP\nMorard et al. 2018')
# density - pressure [3,0]
ai=3
aj=0
axes[ai,aj].plot(12.5,270.,'^',color='orange',label='STP Hug. enters liq. 270 GPa\n(Sjostrom & Crockett 2018)',markersize=6)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(np.interp(260.,NewEOS.mc.Pl,NewEOS.mc.rl),260.,'d',color='orange',label='STP Hug. enters liq. 260 GPa\n(Nguyen & Holmes 2014)',markersize=6)
axes[ai,aj].plot(np.interp(225.,NewEOS.mc.Ps,NewEOS.mc.rs),225.,'d',color='red',label='STP Hug. begins to melt 225 GPa\n(Nguyen & Holmes 2014)',markersize=6)
# temperature -- pressure [3,1]
ai=3
aj=1
# range of ab initio predictions for the critical point from Medvedev 2014
rect = Rectangle((7300.,.356),3600,1.35,linewidth=1,edgecolor='skyblue',facecolor='skyblue')
axes[ai,aj].add_patch(rect)
# plot Fe experimentally fitted vapor curve from Alcock et al. 1984
alcocktarr = np.arange(2100)+1
# Fe solid good from 298 to melting point
A=7.1
B=-21723.
C=0.4536
D=-0.5846
alcockfesolidparr = 1.e-4*np.power(10.,A+B/alcocktarr+C*np.log10(alcocktarr)+D*alcocktarr*1.e-3) #GPa
axes[0,0].legend(fontsize=sfont-5)
axes[0,1].legend(fontsize=sfont-5)
axes[1,0].legend(fontsize=sfont-5)
axes[1,1].legend(fontsize=sfont-5)
axes[2,0].legend(fontsize=sfont-5)
axes[2,1].legend(fontsize=sfont-5)
axes[3,0].legend(fontsize=sfont-5)
axes[3,1].legend(fontsize=sfont-5)
plt.tight_layout(pad=0.6)
```
## ANEOS model comparisons to experimental data
The original ANEOS with a 3nR limit to the heat capacity does provide a good fit to the liquid region.
Dorogokupets et al. 2017 calculated the isotherm for liquid iron at 2200 K. The ANEOS model is a good fit (and then enters the solid phase).
Anzellini et al. (2013) estimated that the temperature at the core-mantle boundary is about 4050 K. Here, I plot the ANEOS isentrope through (the nearest grid point to) 4050 K and 136 GPa and compare the isentrope to PREM (http://ds.iris.edu/spud/earthmodel/9785674). As expected, the ANEOS model isentrope is denser than Earth's core. The model isentrope and 10% and 4% density reductions are compared to PREM.
```
# INCLUDE A COMPARISON TO EARTH'S STRUCTURE PREM MODEL
G = 6.67E-11 # Gravitational constant m3/kg/s2
# Read in PREM: Preliminary Earth Reference Model
PREM_filename=datadir+'PREM500_IDV.csv' # we love long, readable variable names!
# make a class to hold the PREM data
class PREMclass:
"""Class to hold PREM data and other 1-D Earth variables.""" # this is a documentation string for this class
def __init__(self): # self is the default name of the object for internal referencing of the variables in the class
"""A function to initialize the class object.""" # this is a documentation string for this function
self.NR = 0 # number of radius points
self.radius = np.zeros(self.NR)
self.density = np.zeros(self.NR)
self.pwavevel = np.zeros(self.NR)
self.swavevel = np.zeros(self.NR)
self.pressure = np.zeros(self.NR)
self.temperature = np.zeros(self.NR)
# not going to use all the variables in the file
self.units = '' # I like to keep a text note in a structure about the units
# initialize an empty PREM object
PREM = PREMclass()
# read the data into the class parameters
PREM.radius = np.loadtxt(PREM_filename,delimiter=',',skiprows=2,usecols=[0]) # radius in m
PREM.density = np.loadtxt(PREM_filename,delimiter=',',skiprows=2,usecols=[1]) # density in kg/m3
PREM.pwavevel = np.loadtxt(PREM_filename,delimiter=',',skiprows=2,usecols=[2]) # p-wave velocity m/s
PREM.swavevel = np.loadtxt(PREM_filename,delimiter=',',skiprows=2,usecols=[3]) # s-wave velocity m/s
PREM.NR = len(PREM.radius) # number of radius points
PREM.units = 'radius (m), density (kg/m3), pwavevel (m/s), swavevel (m/s)'
# start at the surface and integrate via a for loop to the center of the planet
# calculate the thickness of each layer in the PREM model using the roll function
PREM_dr = np.roll(PREM.radius,-1)-PREM.radius
PREM_dr[PREM.NR-1] = 0. # we are not using the last entry in the list because there are NR-1 layers
#print(PREM_dr)
# calculate the mass of each layer
# density x area x thickness of each layer
PREM_mass_rad = PREM.density*(4.*np.pi*PREM.radius*PREM.radius*PREM_dr)
# Check that the total mass is the mass of the Earth
#print('PREM total mass (kg)=',np.sum(PREM_mass_rad))
#print('PREM total mass (Mearth)=',np.sum(PREM_mass_rad)/5.972E24)
PREM.pressure = np.zeros(PREM.NR) # make array of zeros for pressure of the same length as the arrays in the PREM model
# The first entry is the middle of the planet, so start at the surface and integrate inwards
for i in range(PREM.NR-2,0,-1):
# we indent for the code in the for loop
PREM.pressure[i] = PREM.pressure[i+1]+G*np.sum(PREM_mass_rad[0:i-1])*PREM.density[i]*PREM_dr[i]/PREM.radius[i]/PREM.radius[i]
# use SESAME units
PREM.pressure = PREM.pressure/1.E9 # GPa
IronEOS = NewEOS
# first extract the isentropes for the planet from the EOS tables
core = isentrope_class() # code in eostable.py
Score = 0.0022 # MJ/K/kg
# loop across all densities and extract the values for the requested isentrope
for i in range(0,IronEOS.ND):
ind = np.where((IronEOS.S[:,i] > 0))[0]
interpfunction = interpolate.interp1d(IronEOS.S[ind,i],IronEOS.P[ind,i]) # MJ/K/kg, GPa
core.pressure = np.append(core.pressure,interpfunction(Score)) # GPa
interpfunction = interpolate.interp1d(IronEOS.S[ind,i],IronEOS.T[ind]) # MJ/K/kg, GPa
core.temperature = np.append(core.temperature,interpfunction(Score)) # GPa
core.density = IronEOS.rho # g/cm3
#print('Calculated iron isentrope with ',np.round(np.interp(136.,core.pressure,core.temperature)),' K CMB temperature (P=136 GPa)')
# first extract the isentropes for the planet from the EOS tables
core2 = isentrope_class() # code in eostable.py
it0 = np.where(IronEOS.T >= 4050.)[0]
Score = np.interp(136.,IronEOS.P[it0[0],:],IronEOS.S[it0[0],:])
#Score = 0.00186 # MJ/K/kg
# loop across all densities and extract the values for the requested isentrope
for i in range(0,IronEOS.ND):
ind = np.where((IronEOS.S[:,i] > 0))[0]
interpfunction = interpolate.interp1d(IronEOS.S[ind,i],IronEOS.P[ind,i]) # MJ/K/kg, GPa
core2.pressure = np.append(core2.pressure,interpfunction(Score)) # GPa
interpfunction = interpolate.interp1d(IronEOS.S[ind,i],IronEOS.T[ind]) # MJ/K/kg, GPa
core2.temperature = np.append(core2.temperature,interpfunction(Score)) # GPa
core2.density = IronEOS.rho # g/cm3
print('Calculated iron isentrope with ',np.round(np.interp(136.,core2.pressure,core2.temperature)),' K CMB temperature (P=136 GPa)')
# 1-bar plots and 298 K plots
# Plot aneos vapor curves
sfont = 15
fig, axes = plt.subplots(2, 2, figsize=(14,14))
plt.subplots_adjust(wspace=0.4)
#--------
# temperature - entropy at 1 bar
ai=0
aj=0
axes[ai,aj].plot(Desai_tarr,Desai_sarr,'-',color='orange',label='Iron data (Desai 1986)',linewidth=3)
axes[ai,aj].plot(NewEOS.onebar.T,NewEOS.onebar.S*1.E3,'+',color='blue',label='ANEOS 1 bar')
axes[ai,aj].plot(1809.,99.823/NewEOS.FMW,'d',color='orange',label='1-bar MP (JANAF)')
axes[ai,aj].plot(1809.,92.190/NewEOS.FMW,'d',color='orange')
# Boiling point NIST-JANAF, Kraus et al. Nature Geoscience 2015 and refs within
axes[ai,aj].plot(3133.,2.24,'s',color='orange',label='1-bar BP (JANAF)')
axes[ai,aj].plot(3133.,4.238,'s',color='orange')
axes[ai,aj].set_xlabel('Temperature (K)',size=sfont)
axes[ai,aj].set_ylabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_title('1-bar isobar',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(0,5)
axes[ai,aj].set_xlim(0,4000)
#----
# density-temperature at 1 bar
ai=0
aj=1
axes[ai,aj].plot(Assael_tarr,Assael_rarr/1.e3,'-',color='orange',label='Liquid iron data (Assael et al. 2006)')
axes[ai,aj].plot(NewEOS.onebar.T,NewEOS.onebar.rho,'+',color='blue',label='ANEOS 1 bar')
axes[ai,aj].plot(298.,7.873,'+',color='orange',label='STP')
axes[ai,aj].plot([3133],[6.01],'s',color='orange',label='Boiling point (Kraus et al. 2015)')
# 3.4% volume change (Askeland 1996) with highest density from Assael et al. 2006
axes[ai,aj].plot([1809.,1809],[6.99,6.99*(1.034)],'d',color='orange',label='Melt transition')
axes[ai,aj].plot([1811.],[7.019],'X',color='red',label='Anderson & Ahrens 1994')
axes[ai,aj].set_ylabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_xlabel('Temperature (K)',size=sfont)
axes[ai,aj].set_title('1-bar isobar',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(5,9)
axes[ai,aj].set_xlim(0,4000)
# plot data
#axes[ai,aj].plot(1890+273.,2.968,'s',color='orange',label='MELTS MP Berman')
#axes[ai,aj].plot(1890+273.,2.687,'o',color='orange',label='MELTS MP Lange')
#axes[ai,aj].plot(2273.,2.597,'d',color='orange',label='Thomas & Asimow 2013')
#----
# density-pressure at 298 K
ai=1
aj=0
# Guinan & Beshers JPCS 1969 Fe 298 K isotherm
#K0T = 166.4 # GPa room temp isentrope
#K0Tp = 5.29
#R00 = 7.874
#print(R00)
GB_cs_stp = np.sqrt(166.4E9/7874.) # m/s
#print(GB_cs_stp)
#BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
# )*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
#igood = np.where((BM_pressure < 14.6))[0]
#axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'-',color='orange',label='alpha-Fe 298-K isentrope\nGuinan & Beshers 1969')
# Dorogokupets et al. 2017
# alpha (bcc) iron 298 K 1 bar (low T is ferro; high T is para)
K0T = 164.001 # GPa room temp isotherm
K0Tp = 5.5
R00 = 7.8746
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure < 14.6))[0]
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'-',color='teal',label='alpha-Fe 298-K isotherm\nDorogokupets et al. 2017')
# Dorogokupets et al. 2017
# gamma (fcc) iron 298 K 1 bar
K0T = 146.2 # GPa room temp isotherm
K0Tp = 4.67
R00 = 8.06
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure < 110))[0]
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'-',color='cyan',linewidth=4,label='gamma-Fe 298-K isotherm\nDorogokupets et al. 2017')
iT0REF = np.where(NewEOS.T == T0REF)[0]
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[iT0REF[0],:],'-',color='blue',label='ANEOS 298-K isotherm')
# Dewaele et al. PRL 2006 Fe 298 K isotherm
# espilon-iron (hcp)
K0T = 165 # fixed GPa room temp isotherm
K0Tp = 4.97 # +-0.04 fitted
R00 = (NewEOS.FMW)*1.*1.6726/(11.234*1.E-3)/1.E3 # kg/m3->g/cm3
#print(R00)
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure > 17.)&(BM_pressure < 197.))
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'-',color='brown',linewidth=2,label='epsilon-Fe 298-K isotherm\nDewaele et al. 2006')
'''
# error bars Dewaele et al. PRL 2006 Fe 298 K isotherm
K0Tp = 4.97-0.04 # +-0.04 fitted
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure > 17.)&(BM_pressure < 197.))
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'--',color='brown')
# error bars Dewaele et al. PRL 2006 Fe 298 K isotherm
K0Tp = 4.97+0.04 # +-0.04 fitted
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure > 17.)&(BM_pressure < 197.))
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'--',color='brown')
'''
#------ LIQUID
# Dorogokupets et al. 2017
# liquid iron 2200 K 1 bar
K0T = 65.2 # GPa isotherm
K0Tp = 6.48
R00 = 7.019/1.04
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure < 20))[0]
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'--',color='orange',linewidth=2,label='Liquid Fe 2200-K isotherm\nDorogokupets et al. 2017')
# plot liquid isotherm
it0 = np.where(NewEOS.T >= 2200)[0]
#print('Liquid isotherm at (K) ',NewEOS.T[it0[0]])
igood = np.where((NewEOS.P[it0[0],:] < 20))[0]
axes[ai,aj].plot(NewEOS.rho[igood],NewEOS.P[it0[0],igood],'--',color='orangered',label='ANEOS '+str(np.round(NewEOS.T[it0[0]]))+'-K isotherm')
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_title('298-K, 2200-K isotherms',size=sfont)
axes[ai,aj].set_xlim(6,13)
axes[ai,aj].set_ylim(0,200)
#----
# density-pressure at 298 K
ai=1
aj=1
axes[ai,aj].plot(PREM.density[1::]/1.E3,PREM.pressure[1::],'-',color='orange',label='PREM')
#axes[ai,aj].plot(core.density,core.pressure,'-',color='blue',label='ANEOS '+str(np.round(np.interp(136,core.pressure,core.temperature)))+' K isentrope at CMB',markersize=10)
axes[ai,aj].plot(core2.density,core2.pressure,'-',color='blue',label='ANEOS isentrope: '+str(np.round(np.interp(136,core2.pressure,core2.temperature)))+' K at CMB',markersize=10)
axes[ai,aj].plot(0.9*core2.density,core2.pressure,'--',color='blue',label='ANEOS isentrope shifted 10% in density')
axes[ai,aj].plot(0.96*core2.density,core2.pressure,':',color='blue',label='ANEOS isentrope shifted 4% in density')
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_title('PREM and ANEOS model isentropes',size=sfont)
axes[ai,aj].set_xlim(3,14)
axes[ai,aj].set_ylim(0,360)
# don't show a plot in lower right
#axes[1,1].axis("off")
axes[0,0].legend(fontsize=sfont-5)
axes[0,1].legend(fontsize=sfont-5)
axes[1,0].legend(fontsize=sfont-5)
axes[1,1].legend(fontsize=sfont-5)
#print(PREM.pressure)
```
## ANEOS Gruneisen parameter and Theta parameter for thermal model
The Gruneisen parameter for iron liquid has been determined experimentally from shock wave data (e.g., see Kraus et al. 2015). Shockwave data derive the Grueneisen parameter in reference to a known state:
\begin{equation}
\gamma = \frac{1}{\rho}\frac{(P-P_{\rm ref})}{(E-E_{\rm ref})} \bigg\rvert _V
\end{equation}
The ANEOS Gruneisen model is split into two parts. For $\rho \ge \rho_{0}$,
\begin{equation}
\gamma = \left( \frac{\gamma_0 \rho_0}{\rho} + C_{24} \left( 1-\frac{\rho_0}{\rho} \right)^2 \right) (1-C_{60}) + \left( \gamma_0 + (C_{24}-\gamma_0) \left( 1- \frac{\rho_0}{\rho} \right)^2 \right) C_{60}.
\end{equation}
For $\rho < \rho_{0}$,
\begin{equation}
\gamma = C_{16} \rho^2 + C_{17}\rho + 1 + C_{61}.
\end{equation}
$C_{16}$ and $C_{17}$ are calculated so that $\gamma$ and $d \gamma / d \rho$ are continuous at $\rho=\rho_0$. The model asymptotes to the Thomas-Fermi limit ($\gamma=2/3$) when $C_{24}=2/3$.
The gamma function developed by Kraus et al. 2015 (SOM equation 5) is also shown for comparison. This function was derived from the abundant data on shocked porous iron. This calculation assumes E=0.
The literature values for the Gruneisen parameter of alpha-iron span 1.65 to 1.75: e.g., 1.65 Adams et al. 2006; 1.736 Dorogokupets et al. 2017; 1.75 Sjostrom & Crockett 2018.
```
# plot the gamma and isotherms
# ANEOS Formulation for Gamma and theta
theta0=abs(NewEOS.theta0)
rhoarr = (1+np.arange(1000))/1000.*NewEOS.R0REF*10.
gammaarr = (NewEOS.gamma0*NewEOS.R0REF/rhoarr
)+NewEOS.C24*np.power(1.0-NewEOS.R0REF/rhoarr,2.)*(1.-NewEOS.C60)+(NewEOS.gamma0 + (NewEOS.C24-NewEOS.gamma0)*np.power(1.-NewEOS.R0REF/rhoarr,2.))*NewEOS.C60
s3 = NewEOS.C61+1.
C14 = theta0 * np.exp(1.5*s3-(2.-0.5*NewEOS.C60)*NewEOS.gamma0)/(np.power(NewEOS.R0REF,s3))
C16 = (s3-(2.-NewEOS.C60)*NewEOS.gamma0)/(NewEOS.R0REF*NewEOS.R0REF)
C17 = ((3.-NewEOS.C60)*NewEOS.gamma0-2.*s3)/NewEOS.R0REF
gammaarrlow = C16*rhoarr*rhoarr+C17*rhoarr+s3
thetaarr = theta0*np.exp((1.-NewEOS.C60)*(1.-NewEOS.R0REF/rhoarr)*NewEOS.gamma0-0.5*(NewEOS.R0REF/rhoarr)*(3.-(NewEOS.R0REF/rhoarr)*(4-(NewEOS.R0REF/rhoarr))))*np.power((rhoarr/NewEOS.R0REF),NewEOS.C24)
thetalowarr = C14*np.exp(rhoarr*(C17+0.5*C16*rhoarr))*np.power(rhoarr,s3)
#-------- plots
sfont = 15
fig, axes = plt.subplots(1, 2, figsize=(14,7))
plt.subplots_adjust(wspace=0.4)
# -----
ai=0
indlow = np.where(rhoarr <= NewEOS.R0REF)
indhigh = np.where(rhoarr >= NewEOS.R0REF)
# Gamma from Kraus et al. 2015 SOM Equation 5
# this assumes E = 0. I dropped the exp(-phi*E) term. Phi=0.045 kg/MJ.
kraus_garr = 0.7+(2.1-0.7)*np.power(7.85/NewEOS.rho,1.7)
kraus_validrange = np.where((NewEOS.rho>9)&(NewEOS.rho<12))[0]
axes[ai].plot(NewEOS.rho[kraus_validrange],kraus_garr[kraus_validrange],'-',linewidth=6,color='orange',label='Liquid iron fit\n(Kraus et al. 2015)')
axes[ai].set_ylim(0,2.5)
axes[ai].set_xlim(0,25)
# liquid iron at 1 bar 1811 K Anderson and Ahrens
#axes[ai,aj].plot([7.019],[1.735],'s',color='red',label='Liq. Iron 1811 K, 1 bar')
#axes[ai,aj].plot([5.5,6.0,6.5,7.019,12.643,13.015,13.417],[1.344,1.547,1.751,1.723,1.419,1.401,1.303],'o',color='red',label='Liquid Iron')
axes[ai].errorbar([5.5,6.0,6.5,7.019,12.643,13.015,13.417],[1.344,1.547,1.751,1.723,1.419,1.401,1.303], yerr=[0.269,0.309,0.350,0.116,0.272,0.265,0.269], fmt='o', color='orange', ecolor='orange', capthick=2,label='Liquid Fe data\n(Anderson & Ahrens 1994)')
# solid alpha-iron at STP
axes[ai].plot([NewEOS.R0REF,NewEOS.R0REF],[1.65,1.75],'-',linewidth=3,color='blue',label='alpha-Fe solid STP')
axes[ai].plot(rhoarr[indhigh[0]],gammaarr[indhigh[0]],label="ANEOS Gamma-high",color='black')
axes[ai].plot(rhoarr[indlow[0]], gammaarrlow[indlow[0]],label="ANEOS Gamma-low",color='black',ls='--')
axes[ai].plot(NewEOS.R0REF,NewEOS.gamma0,'+',label="ANEOS Gamma0",color='black')
axes[ai].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai].set_ylabel('Gamma (-)',size=sfont)
axes[ai].tick_params(labelsize=sfont)
# -----
ai=1
axes[ai].plot(rhoarr[indhigh[0]],thetaarr[indhigh[0]],label="ANEOS Theta-high",color='black')
axes[ai].plot(rhoarr[indlow[0]],thetalowarr[indlow[0]],label="ANEOS Theta-low",color='black',ls='--')
axes[ai].plot(NewEOS.R0REF,theta0,'+',label="Theta0",color='black')
axes[ai].set_ylim(0,2000)
axes[ai].set_xlim(0,25)
axes[ai].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai].set_ylabel('Theta (K)',size=sfont)
axes[ai].tick_params(labelsize=sfont)
axes[0].legend(fontsize=sfont-5)
axes[1].legend(fontsize=sfont-5)
```
# Specific Heat Capacity and Sounds Speeds
```
sfont = 15
fig, axes = plt.subplots(2, 2, figsize=(14,14))
plt.subplots_adjust(wspace=0.4)
#-----------------------------
# Pick isotherms and set color scale
ixtemparr = [298.,1000.,2000.,5000.,20000.]
# set color scale same for all plots
tcolmin=0.
tcolmax=20000.
# find the index closest to the selected temperatures -- no interpolation
ixindarr=np.zeros(len(ixtemparr))
ixlabelarr = []
for i in np.arange(len(ixtemparr)):
tmpi=int(np.round(np.interp(ixtemparr[i],NewEOS.T,np.arange(NewEOS.NT))))
#print(tmpi,antarr[tmpi])
ixindarr[i] = tmpi
ixlabelarr.append(str(ixtemparr[i])+' K')
#-----------------------
ai=0
aj=0
for i in np.arange(len(ixtemparr)):
icolscale = (ixtemparr[i]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.cv[int(ixindarr[i]),:]*1000.,c=col,linestyle='-',label=ixlabelarr[i])
#axes[ai,aj].plot(QMDH_r1,QMDH_cv*1000.,styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label='QMD Hugoniot\n(Root et al. 2018')
#axes[ai,aj].plot(2.597,1.73736,'^',color='orange',label='1 bar 2273 K liquid Cv\n(Thomas & Asimow 20133)')
#axes[ai,aj].plot(3.,180./(NewEOS.FMW/1.E3)/1.E3,'s',color='orange',label='1 bar 2000 K solid Cv\n(Gillet et al. 1991)')
axes[ai,aj].set_ylim(0.,3.)
axes[ai,aj].set_xlim(0,30)
axes[ai,aj].set_ylabel('Specific heat capacity Cv (kJ/K/kg)',size=sfont)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-----------------------
ai=0
aj=1
for i in np.arange(len(ixtemparr)):
icolscale = (ixtemparr[i]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.cv[int(ixindarr[i]),:]*1000.*1000./1/8.314*NewEOS.FMW/1000.,c=col,linestyle='-',label=ixlabelarr[i])
#axes[ai,aj].plot(QMDH_r1,QMDH_cv*1.E6/7/8.314*0.14,styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label='QMD Hugoniot\n(Root et al. 2018')
#axes[ai,aj].plot(2.597,1737./7/8.314*0.140,'^',color='orange',label='1 bar 2273 K liquid Cv\n(Thomas & Asimow 2013)')
#axes[ai,aj].plot(3.,180./(NewEOS.FMW/1.E3)/7/8.314*0.140,'s',color='orange',label='1 bar 2000 K solid Cv\n(Gillet et al. 1991)')
axes[ai,aj].set_ylim(1.,7.)
axes[ai,aj].set_xlim(0,30)
axes[ai,aj].set_ylabel('Specific heat capacity Cv (nR)',size=sfont)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-----------------------
# plot sound speed along isotherms
ai=1
aj=0
for i in np.arange(len(ixtemparr)):
icolscale = (ixtemparr[i]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.P[int(ixindarr[i]),:],NewEOS.cs[int(ixindarr[i]),:]/1.e5,c=col,linestyle='-',label=ixlabelarr[i])
axes[ai,aj].plot(1.E-4,GB_cs_stp/1.e3,'+',markersize=10,color='orange',label='alph-iron STP\n(Guinan & Beshers 1968)')
axes[ai,aj].plot(1.E-4,3.82,'X',markersize=10,color='orange',label='Liquid iron 1809 K\n(Nasch et al. 1994)')
axes[ai,aj].set_ylim(0,10)
axes[ai,aj].set_xlim(-10,40)
axes[ai,aj].set_ylabel('Bulk Sound Speed (km/s)',size=sfont)
axes[ai,aj].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-----------------------
# plot sound speed along isotherms
ai=1
aj=1
for i in np.arange(len(ixtemparr)):
icolscale = (ixtemparr[i]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.P[int(ixindarr[i]),:],NewEOS.cs[int(ixindarr[i]),:]/1.e5,c=col,linestyle='-',label=ixlabelarr[i])
axes[ai,aj].plot([225,260],[10,9.5],'d',color='orange',label='Nguyen & Holmes 2004 on Hugoniot')
axes[ai,aj].set_ylim(0,20)
axes[ai,aj].set_xlim(0,700)
axes[ai,aj].set_ylabel('Bulk Sound Speed (km/s)',size=sfont)
axes[ai,aj].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[0,0].legend(fontsize=sfont-5)
axes[0,1].legend(fontsize=sfont-5)
axes[1,0].legend(fontsize=sfont-5)
axes[1,1].legend(fontsize=sfont-5)
```
# Check the Melt Curve in Tabulated EOS
Colored lines correspond to isotherms. Black lines are ANEOS phase boundaries.
Tabulations of the equation of state can lead to erroneous material descriptions across the melt curve. In the following plots, the isotherms should be flat (constant pressure) across the phase boundary (black lines).
Every 5th isotherm in the table is shown in the plotted regions.
Iron requires a very densly populated density grid to capture the melt curve.
```
# CHECK MELT CURVE GRIDDING
sfont = 15
fig, axes = plt.subplots(nrows=5, ncols=2, figsize=(14,26))
plt.subplots_adjust(wspace=0.5)
tcolmin=0.
tcolmax=1.5*NewEOS.cp.T
tpltmax=1.5*NewEOS.cp.T
tskip=5
#-------------
ai=0
aj=0
axes[ai,aj].plot([],[],' ',label='Low pressure melt curve')
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(0,50)
axes[ai,aj].set_xlim(6.5,9.5)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-------------
ai=0
aj=1
axes[ai,aj].plot([],[],' ',label='High pressure melt curve')
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(50.,150)
axes[ai,aj].set_xlim(9.0,11.5)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-------------
ai=1
aj=0
axes[ai,aj].plot([],[],' ',label='Higher pressure melt curve')
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(180.,310)
axes[ai,aj].set_xlim(11.,13.,)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-------------
ai=1
aj=1
axes[ai,aj].plot([],[],' ',label='Higher pressure melt curve')
axes[ai,aj].set_xlabel('Specific Energy (MJ/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.U[it,:],NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Ul,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.Us,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(180.,310)
axes[ai,aj].set_xlim(4,7.5)
#-------------
ai=2
aj=0
axes[ai,aj].plot([],[],' ',label='Low pressure melt curve')
axes[ai,aj].set_xlabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.S[it,:]*1.e3,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Sl*1.e3,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.Ss*1.e3,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(0.,50)
axes[ai,aj].set_xlim(1.5,2.)
#-------------
ai=2
aj=1
axes[ai,aj].plot([],[],' ',label='High pressure melt curve')
axes[ai,aj].set_xlabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.S[it,:]*1.e3,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Sl*1.e3,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.Ss*1.e3,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(50.,150)
axes[ai,aj].set_xlim(1.6,2.)
#-------------
ai=3
aj=0
axes[ai,aj].plot([],[],' ',label='Low pressure melt curve')
axes[ai,aj].set_xlabel('Specific Energy (MJ/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.U[it,:],NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Ul,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.Us,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_xlim(0.5,2.)
axes[ai,aj].set_ylim(0.,50)
#-------------
ai=3
aj=1
axes[ai,aj].plot([],[],' ',label='High pressure melt curve')
axes[ai,aj].set_xlabel('Specific Energy (MJ/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.U[it,:],NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Ul,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.Us,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(50.,150)
axes[ai,aj].set_xlim(1,4.)
#-------------
ai=4
aj=0
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[it,:],'o',markersize=2,color=col,linestyle='-')
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(300,550)
axes[ai,aj].set_xlim(13.5,15.)
axes[ai,aj].plot([],[],' ',label='Very high pressure melt curve.')
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-------------
ai=4
aj=1
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(800,1000)
axes[ai,aj].set_xlim(16.5,17.5)
axes[ai,aj].plot([],[],' ',label='Very high pressures')
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#------
axes[0,0].legend(fontsize=sfont-5)
axes[0,1].legend(fontsize=sfont-5)
axes[1,0].legend(fontsize=sfont-5)
axes[1,1].legend(fontsize=sfont-5)
axes[2,0].legend(fontsize=sfont-5)
axes[2,1].legend(fontsize=sfont-5)
axes[3,0].legend(fontsize=sfont-5)
axes[3,1].legend(fontsize=sfont-5)
axes[4,0].legend(fontsize=sfont-5)
axes[4,1].legend(fontsize=sfont-5)
```
# References
Adams, J. J., Agosta, D. S., Leisure, R. G., & Ledbetter, H. (2006). Elastic constants of monocrystal iron from 3 to 500 K. Journal of applied physics, 100(11), 113530.
Alcock, C. B., Itkin, V. P., & Horrigan, M. K. (1984). Vapour pressure equations for the metallic elements: 298–2500K. Canadian Metallurgical Quarterly, 23(3), 309-313.
Anderson, W. W., & Ahrens, T. J. (1994). An equation of state for liquid iron and implications for the Earth's core. Journal of Geophysical Research: Solid Earth, 99(B3), 4273-4284.
Anzellini, S., Dewaele, A., Mezouar, M., Loubeyre, P., & Morard, G. (2013). Melting of iron at Earth’s inner core boundary based on fast X-ray diffraction. Science, 340(6131), 464-466.
Askeland D R, 1996 Materialwissenschaften (Heidelberg: Spektrum Akademischer Verlag GmbH) p. 203
Assael, M. J., Kakosimos, K., Banish, R. M., Brillo, J., Egry, I., Brooks, R., ... & Wakeham, W. A. (2006). Reference data for the density and viscosity of liquid aluminum and liquid iron. Journal of Physical and Chemical Reference Data, 35(1), 285-300.
Beutl, M., Pottlacher, G., & Jäger, H. (1994). Thermophysical properties of liquid iron. International journal of thermophysics, 15(6), 1323-1331.
Blumm, J., & Henderson, J. B. (2000). Measurement of the volumetric expansion and bulk density of metals in the solid and molten regions. High Temperatures High Pressures, 32(1), 109-114.
Brown, J. M., & McQueen, R. G. (1986). Phase transitions, Grüneisen parameter, and elasticity for shocked iron between 77 GPa and 400 GPa. Journal of Geophysical Research: Solid Earth, 91(B7), 7485-7494.
Burakovsky, L., & Preston, D. L. (2004). Analytic model of the Grüneisen parameter all densities. Journal of Physics and Chemistry of Solids, 65(8-9), 1581-1587.
Bushman, A.V., I. V. Lomonosov and K. V. Khishchenko, et al. (accessed 2019) Shock Wave Database, http://www.ihed.ras.ru/rusbank/
Chase Jr, M. W., Curnutt, J. L., Downey Jr, J. R., McDonald, R. A., Syverud, A. N., & Valenzuela, E. A. (1982). JANAF thermochemical tables, 1982 supplement. Journal of Physical and Chemical Reference Data, 11(3), 695-940. https://janaf.nist.gov
Desai, P. D. (1986). Thermodynamic properties of iron and silicon. Journal of physical and chemical reference data, 15(3), 967-983.
Dewaele, A., Loubeyre, P., Occelli, F., Mezouar, M., Dorogokupets, P. I., & Torrent, M. (2006). Quasihydrostatic equation of state of iron above 2 Mbar. Physical Review Letters, 97(21), 215504.
Chicago
Dorogokupets, P. I., Dymshits, A. M., Litasov, K. D., & Sokolova, T. S. (2017). Thermodynamics and Equations of State of Iron to 350 GPa and 6000 K. Scientific reports, 7, 41863.
Duffy, T., Madhusudhan, N., & Lee, K. K. M. (2015). 2.07 Mineralogy of Super-Earth Planets. Treatise on Geophysics, Second Edition, Elsevier, Oxford.Duffy, T., Madhusudhan, N., & Lee, K. K. M. (2015). 2.07 Mineralogy of Super-Earth Planets. Treatise on Geophysics, Second Edition, Elsevier, Oxford.
Durek, J. J., and G. Ekstrom (1996) Modified PREM (Preliminary Reference Earth Model), doi:10.17611/DP/9785674, http://ds.iris.edu/spud/earthmodel/9785674.
Fischer, R. A. (2016). Melting of Fe alloys and the thermal structure of the core. Deep Earth: Physics and chemistry of the lower mantle and core, 217, 3-12.
Fischer, R. A., Campbell, A. J., Caracas, R., Reaman, D. M., Dera, P., & Prakapenka, V. B. (2012). Equation of state and phase diagram of Fe–16Si alloy as a candidate component of Earth's core. Earth and Planetary Science Letters, 357, 268-276.
Fortov, V. E., & Lomonosov, I. V. (2010). Shock waves and equations of state of matter. Shock waves, 20(1), 53-71.
Guinan, M. W., & Beshers, D. N. (1968). Pressure derivatives of the elastic constants of α-iron to 10 kbs. Journal of Physics and Chemistry of Solids, 29(3), 541-549.
Kerley, G. I. (1977). Rational function method of interpolation (No. LA-6903-MS). Los Alamos National Laboratory, Los Alamos, NM (United States).
Kerley, G. I. (1993). Multiphase equation of state for iron (No. SAND-93-0027). Sandia National Labs., Albuquerque, NM (United States).
Kraus, R. G., Root, S., Lemke, R. W., Stewart, S. T., Jacobsen, S. B., & Mattsson, T. R. (2015). Impact vaporization of planetesimal cores in the late stages of planet formation. Nature Geoscience, 8(4), 269.
Lin, J. F., Campbell, A. J., Heinz, D. L., & Shen, G. (2003). Static compression of iron‐silicon alloys: Implications for silicon in the Earth's core. Journal of Geophysical Research: Solid Earth, 108(B1).
Lyon, S. P., & Johnson, J. D. (1992). SESAME: The LANL equation of state database. Los Alamos National Laboratories Report LAUR-92-3407, Los Alamos, NM.
Medvedev, A. B. (2014). Wide-range multiphase equation of state for iron. Combustion, Explosion, and Shock Waves, 50(5), 582-598.
Morard, G., Boccato, S., Rosa, A. D., Anzellini, S., Miozzi, F., Henry, L., ... & Boulard, E. (2018). Solving controversies on the iron phase diagram under high pressure. Geophysical Research Letters, 45(20), 11-074.
Nasch, P. M., Manghnani, M. H., & Secco, R. A. (1994). Sound velocity measurements in liquid iron by ultrasonic interferometry. Journal of Geophysical Research: Solid Earth, 99(B3), 4285-4291.
Nguyen, J. H., & Holmes, N. C. (2004). Melting of iron at the physical conditions of the Earth's core. Nature, 427(6972), 339.
Tillotson, J. H. (1962). Metallic equations of state for hypervelocity impact (No. GA-3216). General Atomics Division, General Dynamics, San Diego, CA.
Sjostrom, T., & Crockett, S. (2018). Quantum molecular dynamics of warm dense iron and a five-phase equation of state. Physical Review E, 97(5), 053209.
Zeman, M., Holec, M., & Váchal, P. (2019). HerEOS: A framework for consistent treatment of the Equation of State in ALE hydrodynamics. Computers & Mathematics with Applications, 78(2), 483-503.
## ANEOS references
Collins, Gareth S., and H. Jay Melosh (2014). Improvements to ANEOS for multiple phase transitions. 45th Lunar Planet. Sci. Conf. Abs. 2664.
Melosh, H. J. (2007). A hydrocode equation of state for SiO$_2$. Meteoritics & Planetary Science, 42(12), 2079-2098.
Thompson, S. L. (1990). ANEOS analytic equations of state for shock physics codes input manual. SANDIA REPORT SAND, 89-2951.
Thompson, S. L., & Lauson, H. S. (1972). Improvements in the Chart D radiation-hydrodynamic CODE III: Revised analytic equations of state (No. SC-RR--71-0714). Sandia Labs.
Stewart, S., et al. (accepted). The shock physics of giant impacts: Key requirements for the equations of state. In J. Lane, T. Germann, and M. Armstrong (Eds.), 21st Biennial APS Conference on Shock Compression of Condensed Matter (SCCM19). AIP Publishing. (https://arxiv.org/abs/1910.04687)
Stewart, S. T. (2019). ANEOS Code Modification: Thermal model adjustment parameter. https://github.com/ststewart/aneos-forsterite-2019/EOS-docs/
End of File
#####
| github_jupyter |
## Face and Facial Keypoint detection
After you've trained a neural network to detect facial keypoints, you can then apply this network to *any* image that includes faces. The neural network expects a Tensor of a certain size as input and, so, to detect any face, you'll first have to do some pre-processing.
1. Detect all the faces in an image using a face detector (we'll be using a Haar Cascade detector in this notebook).
2. Pre-process those face images so that they are grayscale, and transformed to a Tensor of the input size that your net expects. This step will be similar to the `data_transform` you created and applied in Notebook 2, whose job was tp rescale, normalize, and turn any iimage into a Tensor to be accepted as input to your CNN.
3. Use your trained model to detect facial keypoints on the image.
---
In the next python cell we load in required libraries for this section of the project.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
```
#### Select an image
Select an image to perform facial keypoint detection on; you can select any image of faces in the `images/` directory.
```
import cv2
# load in color image for face detection
image = cv2.imread('images/obamas.jpg')
# switch red and blue color channels
# --> by default OpenCV assumes BLUE comes first, not RED as in many images
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plot the image
fig = plt.figure(figsize=(9,9))
plt.imshow(image)
```
## Detect all faces in an image
Next, you'll use one of OpenCV's pre-trained Haar Cascade classifiers, all of which can be found in the `detector_architectures/` directory, to find any faces in your selected image.
In the code below, we loop over each face in the original image and draw a red square on each face (in a copy of the original image, so as not to modify the original). You can even [add eye detections](https://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html) as an *optional* exercise in using Haar detectors.
An example of face detection on a variety of images is shown below.
<img src='images/haar_cascade_ex.png' width=80% height=80%/>
```
# load in a haar cascade classifier for detecting frontal faces
face_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')
# run the detector
# the output here is an array of detections; the corners of each detection box
# if necessary, modify these parameters until you successfully identify every face in a given image
faces = face_cascade.detectMultiScale(image, 1.2, 2)
# make a copy of the original image to plot detections on
image_with_detections = image.copy()
# loop over the detected faces, mark the image where each face is found
for (x,y,w,h) in faces:
# draw a rectangle around each detected face
# you may also need to change the width of the rectangle drawn depending on image resolution
cv2.rectangle(image_with_detections,(x,y),(x+w,y+h),(255,0,0),3)
fig = plt.figure(figsize=(9,9))
plt.imshow(image_with_detections)
```
## Loading in a trained model
Once you have an image to work with (and, again, you can select any image of faces in the `images/` directory), the next step is to pre-process that image and feed it into your CNN facial keypoint detector.
First, load your best model by its filename.
```
import torch
from models import NaimishNet
net = NaimishNet()
## TODO: load the best saved model parameters (by your path name)
## You'll need to un-comment the line below and add the correct name for *your* saved model
net.load_state_dict(torch.load('saved_models/model_naimishnet.pt'))
## print out your net and prepare it for testing (uncomment the line below)
net.eval()
```
## Keypoint detection
Now, we'll loop over each detected face in an image (again!) only this time, you'll transform those faces in Tensors that your CNN can accept as input images.
### TODO: Transform each detected face into an input Tensor
You'll need to perform the following steps for each detected face:
1. Convert the face from RGB to grayscale
2. Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
3. Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
4. Reshape the numpy image into a torch image.
You may find it useful to consult to transformation code in `data_load.py` to help you perform these processing steps.
### TODO: Detect and display the predicted keypoints
After each face has been appropriately converted into an input Tensor for your network to see as input, you'll wrap that Tensor in a Variable() and can apply your `net` to each face. The ouput should be the predicted the facial keypoints. These keypoints will need to be "un-normalized" for display, and you may find it helpful to write a helper function like `show_keypoints`. You should end up with an image like the following with facial keypoints that closely match the facial features on each individual face:
<img src='images/michelle_detected.png' width=30% height=30%/>
```
image_copy = np.copy(image)
# loop over the detected faces from your haar cascade
for (x,y,w,h) in faces:
# Select the region of interest that is the face in the image
roi = image_copy[y:y+h+7, x:x+w+8]
## TODO: Convert the face region from RGB to grayscale
## TODO: Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
## TODO: Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
## TODO: Reshape the numpy image shape (H x W x C) into a torch image shape (C x H x W)
## TODO: Make facial keypoint predictions using your loaded, trained network
## perform a forward pass to get the predicted facial keypoints
## TODO: Display each detected face and the corresponding keypoints
roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
roi = roi / 255.0
resized_roi = cv2.resize(roi, (224, 224))
roi = torch.from_numpy(resized_roi.reshape(1, 1, 224, 224))
roi = roi.type(torch.FloatTensor)
pred = net(roi)
pred = pred.view(68, -1)
pred_key = pred.data.numpy()
pred_key = pred_key*69.0 + 81
plt.figure(figsize=(6,6))
plt.imshow(resized_roi, cmap='gray')
plt.scatter(pred_key[:, 0], pred_key[:, 1])
plt.show()
```
| github_jupyter |
<a id="introduction"></a>
## Introduction to Dask
#### By Paul Hendricks
-------
In this notebook, we will show how to get started with Dask using basic Python primitives like integers and strings.
**Table of Contents**
* [Introduction to Dask](#introduction)
* [Setup](#setup)
* [Introduction to Dask](#dask)
* [Conclusion](#conclusion)
<a id="setup"></a>
## Setup
This notebook was tested using the following Docker containers:
* `rapidsai/rapidsai-dev-nightly:0.10-cuda10.0-devel-ubuntu18.04-py3.7` container from [DockerHub](https://hub.docker.com/r/rapidsai/rapidsai-nightly)
This notebook was run on the NVIDIA GV100 GPU. Please be aware that your system may be different and you may need to modify the code or install packages to run the below examples.
If you think you have found a bug or an error, please file an issue here: https://github.com/rapidsai/notebooks-contrib/issues
Before we begin, let's check out our hardware setup by running the `nvidia-smi` command.
```
!nvidia-smi
```
Next, let's see what CUDA version we have:
```
!nvcc --version
```
## Install graphviz
The visualizations in this notebook require graphviz. Your environment may not have it installed, but don't worry! If you don't, we're going to install it now. This can take a little while, so sit tight.
```
import os
try:
import graphviz
except ModuleNotFoundError:
os.system('conda install -c conda-forge graphviz -y')
os.system('conda install -c conda-forge python-graphviz -y')
```
<a id="dask"></a>
## Introduction to Dask
Dask is a library the allows for parallelized computing. Written in Python, it allows one to compose complex workflows using large data structures like those found in NumPy, Pandas, and cuDF. In the following examples and notebooks, we'll show how to use Dask with cuDF to accelerate common ETL tasks as well as build and train machine learning models like Linear Regression and XGBoost.
To learn more about Dask, check out the documentation here: http://docs.dask.org/en/latest/
#### Client/Workers
Dask operates by creating a cluster composed of a "client" and multiple "workers". The client is responsible for scheduling work; the workers are responsible for actually executing that work.
Typically, we set the number of workers to be equal to the number of computing resources we have available to us. For CPU based workflows, this might be the number of cores or threads on that particlular machine. For example, we might set `n_workers = 8` if we have 8 CPU cores or threads on our machine that can each operate in parallel. This allows us to take advantage of all of our computing resources and enjoy the most benefits from parallelization.
On a system with one or more GPUs, we usually set the number of workers equal to the number of GPUs available to us. Dask is a first class citizen in the world of General Purpose GPU computing and the RAPIDS ecosystem makes it very easy to use Dask with cuDF and XGBoost.
Before we get started with Dask, we need to setup a Local Cluster of workers to execute our work and a Client to coordinate and schedule work for that cluster. As we see below, we can inititate a `cluster` and `client` using only few lines of code.
```
import dask; print('Dask Version:', dask.__version__)
from dask.distributed import Client, LocalCluster
# create a local cluster with 4 workers
n_workers = 4
cluster = LocalCluster(n_workers=n_workers)
client = Client(cluster)
```
Let's inspect the `client` object to view our current Dask status. We should see the IP Address for our Scheduler as well as the the number of workers in our Cluster.
```
# show current Dask status
client
```
You can also see the status and more information at the Dashboard, found at `http://<ip_address>/status`. You can ignore this for now, we'll dive into this in subsequent tutorials.
With our client and workers setup, it's time to execute our first program in parallel. We'll define a function called `add_5_to_x` that takes some value `x` and adds 5 to it.
```
def add_5_to_x(x):
return x + 5
```
Next, we'll iterate through our `n_workers` and create an execution graph, where each worker is responsible for taking its ID and passing it to the function `add_5_to_x`. For example, the worker with ID 2 will take its ID and pass it to the function `add_5_to_x`, resulting in the value 7.
```
from dask import delayed
addition_operations = [delayed(add_5_to_x)(i) for i in range(n_workers)]
addition_operations
```
The above output shows a list of several `Delayed` objects. An important thing to note is that the workers aren't actually executing these results - we're just defining the execution graph for our client to execute later. The `delayed` function wraps our function `add_5_to_x` and returns a `Delayed` object. This ensures that this computation is in fact "delayed" - or lazily evaluated - and not executed on the spot i.e. when we define it.
Next, let's sum each one of these intermediate results. We can accomplish this by wrapping Python's built-in `sum` function using our `delayed` function and storing this in a variable called `total`.
```
total = delayed(sum)(addition_operations)
total
```
Using the `graphviz` library, we can use the `visualize` method of a `Delayed` object to visualize our current graph.
```
total.visualize()
```
As we mentioned before, none of these results - intermediate or final - have actually been compute. We can compute them using the `compute` method of our `client`.
```
from dask.distributed import wait
import time
addition_futures = client.compute(addition_operations, optimize_graph=False, fifo_timeout="0ms")
total_future = client.compute(total, optimize_graph=False, fifo_timeout="0ms")
wait(total_future) # this will give Dask time to execute the work
```
Let's inspect the output of each call to `client.compute`:
```
addition_futures
```
We can see from the above output that our `addition_futures` variable is a list of `Future` objects - not the "actual results" of adding 5 to each of `[0, 1, 2, 3]`. These `Future` objects are a promise that at one point a computation will take place and we will be left with a result. Dask is responsible for fulfilling that promise by delegating that task to the appropriate Dask worker and collecting the result.
Let's take a look at our `total_future` object:
```
print(total_future)
print(type(total_future))
```
Again, we see that this is an object of type `Future` as well as metadata about the status of the request (i.e. whether it has finished or not), the type of the result, and a key associated with that operation. To collect and print the result of each of these `Future` objects, we can call the `result()` method.
```
addition_results = [future.result() for future in addition_futures]
print('Addition Results:', addition_results)
```
Now we see the results that we want from our addition operations. We can also use the simpler syntax of the `client.gather` method to collect our results.
```
addition_results = client.gather(addition_futures)
total_result = client.gather(total_future)
print('Addition Results:', addition_results)
print('Total Result:', total_result)
```
Awesome! We just wrote our first distributed workflow.
To confirm that Dask is truly executing in parallel, let's define a function that sleeps for 1 second and returns the string "Success!". In serial, this function should take our 4 workers around 4 seconds to execute.
```
def sleep_1():
time.sleep(1)
return 'Success!'
%%time
for _ in range(n_workers):
sleep_1()
```
As expected, our process takes about 4 seconds to run. Now let's execute this same workflow in parallel using Dask.
```
%%time
# define delayed execution graph
sleep_operations = [delayed(sleep_1)() for _ in range(n_workers)]
# use client to perform computations using execution graph
sleep_futures = client.compute(sleep_operations, optimize_graph=False, fifo_timeout="0ms")
# collect and print results
sleep_results = client.gather(sleep_futures)
print(sleep_results)
```
Using Dask, we see that this whole process takes a little over a second - each worker is executing in parallel!
<a id="conclusion"></a>
## Conclusion
In this tutorial, we learned how to use Dask with basic Python primitives like integers and strings.
To learn more about RAPIDS, be sure to check out:
* [Open Source Website](http://rapids.ai)
* [GitHub](https://github.com/rapidsai/)
* [Press Release](https://nvidianews.nvidia.com/news/nvidia-introduces-rapids-open-source-gpu-acceleration-platform-for-large-scale-data-analytics-and-machine-learning)
* [NVIDIA Blog](https://blogs.nvidia.com/blog/2018/10/10/rapids-data-science-open-source-community/)
* [Developer Blog](https://devblogs.nvidia.com/gpu-accelerated-analytics-rapids/)
* [NVIDIA Data Science Webpage](https://www.nvidia.com/en-us/deep-learning-ai/solutions/data-science/)
| github_jupyter |
```
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
plt.style.use('seaborn-talk')
%matplotlib notebook
# Load the data set:
data = pd.read_csv('Data/gasoline.csv')
# Each row contain a measured spectrum and a corresponding octane number
# Extract the octane numbers:
yvars = ['octane']
# Extract the spectra:
xvars = [i for i in data.columns if i not in yvars]
wavelengths = [int(i.split()[0].split('.')[1]) for i in xvars]
print(f'Number of wavelengths measured: {len(xvars)}')
data.describe()
# Let us visualize the spectra, just to see what we have to work with:
spec = data[xvars].to_numpy()
octane = data['octane'].to_numpy()
# Plot the first spectra:
figi = plt.figure(constrained_layout=True)
axi = figi.add_subplot(projection='3d')
axi.set_title('NIR spectra')
ribbon = 4
xticks = []
xticks_labels = []
for i, speci in enumerate(spec):
x = wavelengths
X = np.vstack((x, x)).T
Y = np.full_like(X, 5 * i)
xticks.append(5 * i)
xticks_labels.append(i + 1)
Y[:,1] = Y[:,0] + ribbon
Z = np.vstack((speci, speci)).T
surf = axi.plot_surface(X,Y,Z, rstride=1, cstride=1, cmap='Spectral', vmin=spec.min(), vmax=spec.max(), lw=1)
if i >= 9:
break
axi.grid(False)
axi.w_xaxis.pane.set_visible(False)
axi.w_yaxis.pane.set_visible(False)
axi.set_yticks(xticks[::2])
axi.set_yticklabels(xticks_labels[::2])
axi.set_xlabel('Wavelength (nm)', labelpad=10)
axi.set_ylabel('Spectrum no.', labelpad=10)
# Plot the spectrum with the lowest and highest octane number:
idxmin = np.argmin(octane)
idxmax = np.argmax(octane)
figj, axj = plt.subplots(constrained_layout=True)
axj.plot(wavelengths, spec[idxmin], label=f'Octane: {octane[idxmin]}')
axj.plot(wavelengths, spec[idxmax], label=f'Octane: {octane[idxmax]}')
axj.set_title('Highest and lowest octane numbers')
axj.set_xlabel('Wavelength (nm)')
axj.legend()
# And make a histogram of the octane numbers so we know what values we are working with:
figk, axk = plt.subplots(constrained_layout=True)
axk.hist(octane)
axk.set_title('Octane numbers');
from sklearn.preprocessing import scale
X = scale(data[xvars])
Y = scale(data[yvars])
# Create a PLS model to relate the octane content to the NIR spectra:
from sklearn.cross_decomposition import PLSRegression
pls = PLSRegression(n_components=5, scale=False)
pls.fit(X, Y)
Y_hat = pls.predict(X)
B_PLS = pls.coef_
# Create a linear model to relate the octane content to the NIR spectra:
from sklearn.linear_model import LinearRegression
linear = LinearRegression(fit_intercept=False)
linear.fit(X, Y)
Y_hat_lin = linear.predict(X)
B_MLR = linear.coef_.T
from sklearn.metrics import r2_score
figi, (axi, axj) = plt.subplots(constrained_layout=True, ncols=2, nrows=1, sharex=True, sharey=True)
axi.set_title('PLS')
axi.scatter(Y, Y_hat, label=f'r² = {r2_score(Y, Y_hat):6.4f}')
axi.set(xlabel='y', ylabel='ŷ')
axj.set_title('Least squares')
axj.scatter(Y, Y_hat_lin, label=f'r² = {r2_score(Y, Y_hat_lin):6.4f}')
axj.set(xlabel='y', ylabel='ŷ')
axi.legend();
axj.legend();
figi, (axi, axj) = plt.subplots(constrained_layout=True, ncols=1, nrows=2)
axi.set_title('PLS coefficients')
axi.plot(wavelengths, B_PLS[:, 0])
axi.set(xlabel='Wavelength (nm)', ylabel='Coeff.')
axi.axhline(y=0.0, ls=':', color='k')
axj.set_title('Least squares coefficients')
axj.plot(wavelengths, B_MLR[:, 0])
axj.axhline(y=0.0, ls=':', color='k')
axj.set(xlabel='Wavelength (nm)', ylabel='Coeff.');
# Try it all again with a training and test set:
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.3,
)
pls = PLSRegression(n_components=5, scale=False)
pls.fit(X_train, Y_train)
Y_hat_train = pls.predict(X_train)
Y_hat_test = pls.predict(X_test)
linear = LinearRegression(fit_intercept=False)
linear.fit(X_train, Y_train)
Y_hat_lin_train = linear.predict(X_train)
Y_hat_lin_test = linear.predict(X_test)
figi, (axi, axj) = plt.subplots(constrained_layout=True, ncols=2, nrows=1, sharex=True, sharey=True)
axi.scatter(Y_train, Y_hat_train, label=f'Train, r² = {r2_score(Y_train, Y_hat_train):6.4f}')
axi.scatter(Y_test, Y_hat_test, label=f'Test, r² = {r2_score(Y_test, Y_hat_test):6.4f}', marker='X')
axj.scatter(Y_train, Y_hat_lin_train, label=f'Train, r² = {r2_score(Y_train, Y_hat_lin_train):6.4f}')
axj.scatter(Y_test, Y_hat_lin_test, label=f'Test, r² = {r2_score(Y_test, Y_hat_lin_test):6.4f}', marker='X')
axi.set_title('PLS')
axj.set_title('Least squares')
axi.set(xlabel='y', ylabel='ŷ')
axj.set(xlabel='y', ylabel='ŷ')
axi.legend();
axj.legend();
# Repeat training and testing 20 times:
pls_test = []
ls_test = []
for i in range(20):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
pls = PLSRegression(n_components=5, scale=False)
pls.fit(X_train, Y_train)
Y_hat_train = pls.predict(X_train)
Y_hat_test = pls.predict(X_test)
pls_test.append(r2_score(Y_test, Y_hat_test))
linear = LinearRegression(fit_intercept=False)
linear.fit(X_train, Y_train)
Y_hat_lin_train = linear.predict(X_train)
Y_hat_lin_test = linear.predict(X_test)
ls_test.append(r2_score(Y_test, Y_hat_lin_test))
figi, axi = plt.subplots(constrained_layout=True)
x = np.arange(len(pls_test))
axi.plot(x, pls_test, marker='o', label='PLS')
axi.axhline(y=np.mean(pls_test), label='Avg. PLS', ls=':', color='k')
axi.plot(x, ls_test, marker='X', label='LS')
axi.axhline(y=np.mean(ls_test), label='Avg. LS', ls='--', color='k')
axi.set(xlabel='Test no.', ylabel='r² (test)')
axi.legend();
# Use training and testing for checking the performance as a function of PLS components:
pls_test2 = []
for comp in range(1, 11):
score = []
for i in range(10):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
pls = PLSRegression(n_components=comp, scale=False)
pls.fit(X_train, Y_train)
Y_hat_test = pls.predict(X_test)
score.append(r2_score(Y_test, Y_hat_test))
pls_test2.append([np.mean(score), np.std(score)])
pls_test2 = np.array(pls_test2)
figi, axi = plt.subplots(constrained_layout=True)
axi.errorbar(range(1, 11), pls_test2[:, 0], yerr=pls_test2[:, 1], marker='o')
axi.set_xlabel('No. of components')
axi.set_ylabel('r² (test)');
```
| github_jupyter |
# Machine Learning on a TabText Dataframe
## An Example Based on the Paycheck Protection Program
The Paycheck Protection Program (PPP) was created by the U.S. government to enable employers struggling with COVID-related business adversities to make payments to their employees. For more information, see the [Paycheck Protection Program](https://www.sba.gov/funding-programs/loans/coronavirus-relief-options/paycheck-protection-program). In this example notebook, you'll learn how to run a machine learning model on a sample of companies in the program over the first two quarters of 2020.
In this notebook, we take U.S. Securities and Exchange Commission (SEC) filing data from some of the companies that partook of the loans under this program. We demonstrate how to merge the SEC filing data (text data) with stock price data (tabular data) using the [SageMaker JumpStart Industry Python SDK](https://pypi.org/project/smjsindustry/). The `build_tabText` class of the library helps merge text dataframes with numeric dataframes to create a multimodal dataframe for machine learning.
A subset of the list of tickers of firms that took PPP loans obtained from authors of the following paper:
- Balyuk, T., Prabhala, N. and Puri, M. (November 2020, revised June 2021), *[Indirect Costs of Government Aid and Intermediary Supply Effects: Lessons from the Paycheck Protection Program](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3735682)*. NBER Working Paper No. w28114, Available at SSRN: https://ssrn.com/abstract=3735682.
Companies took funds under the PPP because they were financially constrained and needed to make payroll. However, some companies availed of the loans even though they were not financially constrained. The market penalized the borrowers when they revealed that they took the PPP money, as they read the information as bad news, even for the companies that had no credit quality issues. Subsequently, the better companies returned the money they had borrowed because they wanted to recoup their share value.
We are interested in seeing if an ML model is able to detect whether the text of filings of companies that returned PPP money is different from that of companies that retained PPP money.
>**<span style="color:RED">Legal Disclaimer</span>**:
>This example notebook is for demonstrative purposes only. It is not financial advice and should not be relied on as financial or investment advice.
### General Steps
This notebook takes the following steps:
1. Read in over 400 tickers of companies that took the PPP loans.
2. Read in the 10-K, 10-Q, and 8K filings for all paycheck protection tickers during Q1 and Q2 of 2020. Texts in the SEC filings are loaded using the `smjsindustry.DataLoader` class.
3. Load a synthetic time series data of daily stock prices for the given tickers during Q1 and Q2 of 2020. Convert prices to returns. The simulated data is generated to be correlated with appropriate labels so that it can be meaningful. An analogous exercise with true data yields similar results.
4. Merge text and tabular datasets using the `smjsindustry.build_tabText` API.
5. Conduct machine learning analysis to obtain a baseline accuracy.
6. Build an [AutoGluon](https://auto.gluon.ai/stable/index.html) model to analyze how stock prices and texts in the SEC filings are related to each company's decision to accept or return the money. This notebook shows how to flag all filings of companies that return the money with a 1 and the filings of companies that do not return the money with a 0. A good fit to the data implies the model can distinguish companies into two categories: the ones that return PPP funding versus those that do not based on the text.
7. Deploy the endpoint for model inference.
8. Test the endpoint.
## Objective
The goal in this notebook is to investigate the companies that partook of funding from the Paycheck Protection Program (PPP) and to study how stock prices and returns and text from the SEC forms are related to their decisions to return the money.
The PPP program is reported in each company's 8-K, an SEC filing which is required when a public company experiences a material change in business conditions. In addition to a 8-K filing, 10-K and 10-Q filings, which present a comprehensive summary of a company's financial performance, are also used as source of inputs for this study. The stock data is *synthetically generated* to be correlated with the labels. You can repeat this exercise with actual data as needed.
**Note**: You can also access this notebook through SageMaker JumpStart that is executable on SageMaker Studio. For more information, see [Amazon SageMaker JumpStart Industry](https://docs.aws.amazon.com/sagemaker/latest/dg/studio-jumpstart-industry.html) in the **<em>Amazon SageMaker Developer Guide</em>**.
## Kernel and SageMaker Setup
Recommended kernel is **conda_python3**.
Ensure AutoGluon images information is available in SageMaker Python SDK.
```
!pip install -q -U "sagemaker>=2.66"
import sagemaker
session = sagemaker.Session()
region = session._region_name
bucket = session.default_bucket()
role = sagemaker.get_execution_role()
ppp_folder = "jumpstart_industry_ppp"
train_instance_type = "ml.c5.2xlarge"
inference_instance_type = "ml.m5.xlarge"
```
## Load Data, SDK, and Dependencies
The following code cells download the `smjsindustry` SDK, dependencies, and dataset from an Amazon S3 bucket prepared by SageMaker JumpStart Industry. You will learn how to use the `smjsindustry` SDK which contains various APIs to curate SEC datasets. The dataset in this example was synthetically generated using the `smjsindustry` package's SEC Forms Retrieval tool.
```
notebook_artifact_bucket = f"jumpstart-cache-prod-{region}"
notebook_data_prefix = "smfinance-notebook-data/ppp"
notebook_sdk_prefix = "smfinance-notebook-dependency/smjsindustry"
# Download dataset
data_bucket = f"s3://{notebook_artifact_bucket}/{notebook_data_prefix}"
!aws s3 sync $data_bucket ./ --exclude "*" --include "*.csv" --include "*.xlsx"
```
Install `smjsindustry` package from `whl` artifact running the following code block. Alternatively, we can also use `pip install smjsindustry==1.0.0`.
```
# Install smjsindustry SDK
sdk_bucket = f"s3://{notebook_artifact_bucket}/{notebook_sdk_prefix}"
!aws s3 sync $sdk_bucket ./
!pip install --no-index smjsindustry-1.0.0-py3-none-any.whl
```
## Import Packages
```
import os
import boto3
import json
import pandas as pd
import tarfile
import sagemaker
from sklearn.model_selection import train_test_split
from sagemaker.estimator import Estimator
from smjsindustry import build_tabText
from sklearn.metrics import classification_report, confusion_matrix
from ag_model import (
AutoGluonTraining,
AutoGluonInferenceModel,
AutoGluonTabularPredictor,
)
```
## Step 1: Read in the Tickers
Over 400 tickers are used for this study.
```
%pylab inline
ppp_tickers = pd.read_excel("ppp_tickers.xlsx", index_col=None, sheet_name=0, engine="openpyxl")
print("Number of PPP tickers =", ppp_tickers.shape[0])
ticker_list = list(set(ppp_tickers.ticker))
ppp_tickers.head()
```
## Step 2: Read in the SEC Forms Filed by These Companies
1. This notebook retrieves all 10-K/Q, 8-K forms from the SEC servers for Q1 and Q2 of 2020. This was done using the SageMaker JumpStart Industry Python SDK's `DataLoader` class. For reference, the time taken by the data loader process was around 30 minutes for curating a dataframe of over 4000 filings.
2. There is one 10K/Q form per quarter. These are quarterly reports.
3. There can be multiple 8K forms per quarter, because these are filed for material changes in business conditions. Depending on how many such events there are, several 8Ks might need to be filed. As you will see, this notebook retrieves more than one form per quarter.
4. The dataset was stored in a CSV file named `ppp_10kq_8k_data.csv` (351 MB).
>**<span style="color:RED">Legal Disclaimer</span>**:
>This example notebook uses data obtained from the SEC EDGAR database. You are responsible for complying with EDGAR’s access terms and conditions located in the [Accessing EDGAR Data](https://www.sec.gov/os/accessing-edgar-data) page.
```
%%time
# Text data
df_sec = pd.read_csv("ppp_10kq_8k_data.csv")
print("Number of SEC filings: ", df_sec.shape[0])
```
## Step 3: Collect Stock Prices and Convert to Returns
* Given the list of tickers, we synthetically generated stock prices using simulation of geometric Brownian motion. The stock prices are generated to be consistent with the real market data. You can buy data for commercial use if needed.
* Convert the stock prices to returns.
Some tickers might have been delisted since the time of the PPP program.
#### Read in the PPP stock prices synthetic dataset
```
df_prices = pd.read_csv("ppp_stock_prices_synthetic.csv")
print("Total number of days for the stock time series: ", df_prices.shape[0])
print("Total number of stocks: ", df_prices.shape[1])
df_prices.head()
```
The following code cell converts the prices into percentage returns.
- It converts prices into returns.
- It calls helper function to convert prices to returns.
- It removes the stock that only has `NaN` values, if any.
- It converts prices to returns using the `pct_change` function.
```
def convert_price_to_return(df_prices):
ticker_list = list(df_prices.columns[1:])
df_returns = df_prices[ticker_list].pct_change() # not using fill_method='ffill'
df_returns = pd.concat([df_prices.Date, df_returns], axis=1)[1:] # drop first row as it is NaN
df_returns = df_returns.reset_index(drop=True)
return df_returns
df_returns = convert_price_to_return(df_prices)
df_returns.dropna(axis=1, how="all", inplace=True) # drop columns with partial data
df_returns.set_index("Date", inplace=True)
print("Total number of stocks: ", len(list(df_returns.columns[1:])))
df_returns.head()
```
#### Convert the dataframe to CSV and save
```
df_returns.to_csv("ppp_returns.csv", index=True)
```
## Step 4: Merge Text and Tabular Datasets
The stock returns and the SEC forms are saved in earlier code blocks into CSV files. In this step, you'll learn how to read in the files and merge the text data with the tabular data.
* Line up the returns from day -5 before the filing date to day +5 after the filing date. Including the return on the filing date itself, we get 11 days of returns around the filing date.
Three types of returns are considered here:
> **Ret** - stock return
> **MktRet** - S&P 500 return
> **NetRet** - difference between `Ret` and `MktRet`
* Merge the SEC text data and the tabular data with the `build_tabText` API. We need to see how returns evolve around the filing date.
#### Read in the return data and the text data
```
%%time
df_returns = pd.read_csv("ppp_returns.csv") # Tabular/numeric data
df_sec = pd.read_csv("ppp_10kq_8k_data.csv") # Text data
```
Define helper functions to create 3 types of returns for 5 days before and 5 days after the filing date.
The functions fill in returns for the ticker and corresponding S&P return.
```
%%time
def fillReturn(df_returns, ticker, dt, displacement):
if np.where(df_returns.columns == ticker)[0].size > 0:
bwd = list(
df_returns[ticker].loc[:dt][-(displacement + 1) :]
) # 5 days before filing plus filing date
fwd = list(df_returns[ticker].loc[dt:][1 : (displacement + 1)]) # 5 days after filing
if len(bwd) < displacement + 1:
bwd = [np.nan] * (
displacement + 1 - len(bwd)
) + bwd # Add NaN at the beginning if less bwd
if len(fwd) < displacement:
fwd = fwd + [np.nan] * (displacement - len(fwd)) # Append NaN in the end if less fwd
return bwd + fwd
else:
return [np.nan for idx in range(2 * displacement + 1)]
def create_df_5_days_return(df_returns):
displace = 5
cols = [
"Date",
"ticker",
"Ret-5",
"Ret-4",
"Ret-3",
"Ret-2",
"Ret-1",
"Ret0",
"Ret1",
"Ret2",
"Ret3",
"Ret4",
"Ret5",
"MktRet-5",
"MktRet-4",
"MktRet-3",
"MktRet-2",
"MktRet-1",
"MktRet0",
"MktRet1",
"MktRet2",
"MktRet3",
"MktRet4",
"MktRet5",
"NetRet-5",
"NetRet-4",
"NetRet-3",
"NetRet-2",
"NetRet-1",
"NetRet0",
"NetRet1",
"NetRet2",
"NetRet3",
"NetRet4",
"NetRet5",
]
df_trans_dict = {}
idx = 0
for ticker in df_returns.columns[1:]:
for row in range(len(df_returns)):
dt = df_returns.Date[row]
rets = fillReturn(df_returns, ticker, dt, displace)
mkt_rets = fillReturn(df_returns, "^GSPC", dt, displace)
net_rets = [a - b for a, b in zip(rets, mkt_rets)]
row_data = [dt, ticker] + rets + mkt_rets + net_rets
df_trans_dict[idx] = row_data
idx += 1
df_returns_trans = pd.DataFrame.from_dict(df_trans_dict, orient="index", columns=cols)
return df_returns_trans
df_returns_trans = create_df_5_days_return(df_returns)
pd.set_option("display.max_columns", 50)
df_returns_trans.head(5)
```
### Create a TabText dataframe
The following code cell calls the `smjsindustry.build_tabText` class to create a multimodal TabText dataframe, merging the tabular data and the text data together; the dataframe should have the `Date` column and a common column ('ticker' in this case) to generate a time series TabText dataset.
```
%%time
# Use build_tabText API to merge text and tabular datasets
tab_text = build_tabText(
df_sec, "ticker", "filing_date", df_returns_trans, "ticker", "Date", freq="D"
)
tab_text.head()
```
#### Write the merged dataframe into a CSV file
```
tab_text.to_csv("ppp_10kq_8k_stock_data.csv", index=False)
```
## Step 5: Machine Learning Analysis
Some of these companies subsequently returned the money, for various reasons, ranging from:
* They did not need the money,
* Their stock prices were penalized for taking the loans. When they returned the money, the market rewarded them,
* They wished to avoid additional regulatory oversight.
Returning the money results in signaling an improvement in business conditions with a subsequent uptick in stock prices. Thus, an exercise to predict which firms would return the money based on their SEC filings might be of interest.
The following code cells prepare the dataset for ML studies with the following steps:
* It flags all filings of the companies that returned the PPP money with a 1 and the others with a 0. Therefore, an ML model fit to these labels teases out whether the text for companies that retain PPP money is distinguishable from text of companies that return PPP money.
The resultant dataframe from the previous steps is stored as a CSV file titled `ppp_model_TabText.csv` (354 MB). This file contains both text and numerical columns of data.
#### Read in the TabText dataframe and get the returned ticker list
```
tab_text = pd.read_csv("ppp_10kq_8k_stock_data.csv")
ppp_tickers_returned = pd.read_excel(
"ppp_tickers_returned.xlsx", index_col=None, sheet_name=0, engine="openpyxl"
)
print("Number of PPP Returned tickers =", ppp_tickers_returned.shape[0])
ticker_list_returned = list(set(ppp_tickers_returned.ticker))
tab_text["returned"] = [1 if j in ticker_list_returned else 0 for j in tab_text["ticker"]]
tab_text
```
#### Add the `"returned"` label (1,0) to each row as required
```
tab_text["returned"] = [1 if j in ticker_list_returned else 0 for j in tab_text["ticker"]]
tab_text = tab_text.drop(["Date"], axis=1)
tab_text.to_csv("ppp_model_TabText.csv", index=False)
```
You can start examining the mean return in the 5 days before the filing (-5,0) and 5 days after the filing (0,+5) to see how the firms that returned the money fared, compared to those that did not return the money. You'll learn how the mean excess return (over the S&P return) between the two groups are calculated.
#### Read in the TabText dataframe
```
df = pd.read_csv("ppp_model_TabText.csv")
print(df.shape)
print(df.columns)
```
Next, the following cell curates the TabText dataframe by creating a cumulative (net of market) return for the 5 days before the filing (`df["First5"]`) and the 5 days after the filing (`df["Second5"]`). You can also see the various feature columns shown in the dataframe as shown in the following cell.
```
# Add up the returns for days (-5,0) denoted "First5" and days (0,5) denoted second 5
# Note that it is actually 6 days of returns.
df["First5"] = (
df["NetRet-5"]
+ df["NetRet-4"]
+ df["NetRet-3"]
+ df["NetRet-2"]
+ df["NetRet-1"]
+ df["NetRet0"]
)
df["Second5"] = (
df["NetRet5"] + df["NetRet4"] + df["NetRet3"] + df["NetRet2"] + df["NetRet1"] + df["NetRet0"]
)
df.head()
res = df.groupby(["returned"]).count()["ticker"]
print(res)
print("Baseline accuracy =", res[0] / sum(res))
df.groupby(["returned"]).mean()[["First5", "Second5"]]
```
From the output of the preceding cell, the mean return for the `"First5"` set is slightly worse for the `"returned=0"` case and the mean return for the `"Second5"` set is higher for the `"returned=1"` case. Maybe firms that returned the money were signalling to the market that they were in good shape and the market rewarded them with a stock price bounce.
## Step 6: Machine Learning on the TabText Dataframe
In this notebook, an AutoGluon model is used to analyze how leading stock returns for 5 days (numerical data) and 10-K/Q, 8-K filings (text) are related to each company's decision to accept or return the money.
### Train an AutoGluon Model for Classification
Here, you'll see how easy it is to undertake a seamless ML on multimodal data (TabText). In this section, you'll learn how to use one of the open source AWS libraries known as AutoGluon, which is a part of the Gluon NLP family of tools. To learn more, see [GluonNLP: NLP made easy](https://nlp.gluon.ai/).
In particular, we use the AutoGluon-Tabular model, which is designed for TabText and has superior performance. For more information about the model, see [AutoGluon-Tabular: Robust and Accurate AutoML for Structured Data](https://arxiv.org/abs/2003.06505).
For a quick start, see [Predicting Columns in a Table - Quick Start](https://auto.gluon.ai/tutorials/tabular_prediction/tabular-quickstart.html). To find the AutoGluon-Tabular model in AWS Marketplace, see [AutoGluon-Tabular](https://aws.amazon.com/marketplace/pp/Amazon-Web-Services-AutoGluon-Tabular/prodview-n4zf5pmjt7ism).
The AutoGluon-Tabular model processes the data and trains a diverse ensemble of ML models to create a “predictor” which is able to predict the `"returned"` label in this data. This example uses both return and text data to build a model.
#### Create a sample dataset
For demonstration purposes, take a sample from the original dataset to reduce the time for training.
```
sample_df = pd.concat(
[df[df["returned"] == 1].sample(n=500), df[df["returned"] == 0].sample(n=500)]
).sample(frac=1)
```
Save the dataframe into a CSV file.
```
sample_df.to_csv("ppp_model_sample_input.csv", index=False)
```
#### Split the sample dataset into a training dataset and a test dataset
```
from sklearn.model_selection import train_test_split
sample_df_ag = sample_df[["First5", "text", "returned"]]
train_data, test_data = train_test_split(sample_df_ag, test_size=0.2, random_state=123)
train_data.to_csv("train_data.csv", index=False)
test_data.to_csv("test_data.csv", index=False)
train_s3_path = session.upload_data(
"train_data.csv", bucket=bucket, key_prefix=ppp_folder + "/" + "data"
)
test_s3_path = session.upload_data(
"test_data.csv", bucket=bucket, key_prefix=ppp_folder + "/" + "data"
)
config_s3_path = session.upload_data(
os.path.join("code", "config.yaml"), bucket=bucket, key_prefix=ppp_folder + "/" + "config"
)
```
### Run a SageMaker training job
The training job takes around 10 minutes with the sample dataset. If you want to train a model with your own data, you might need to update the training script `train.py` or configuration file `config.yaml` in the` code` folder. If you want to use GPU instance to achieve a better accuracy, replace `train_instance_type` with the desired GPU instance.
```
ag = AutoGluonTraining(
role=role,
entry_point="code/train.py",
region=region,
instance_count=1,
instance_type=train_instance_type,
framework_version="0.3.1",
base_job_name="jumpstart-example-classic-gecko-ppp",
enable_network_isolation=True, # Set enable_network_isolation=True to ensure a security running environment
)
ag.fit(
{"config": config_s3_path, "train": train_s3_path, "test": test_s3_path},
)
```
#### Download Model Outputs
Download the following files (training job artifacts) from the SageMaker session's default S3 bucket:
* `leaderboard.csv`
* `predictions.csv`
* `feature_importance.csv`
* `evaluation.json`
```
s3_client = boto3.client("s3")
job_name = ag._current_job_name
s3_client.download_file(bucket, f"{job_name}/output/output.tar.gz", "output.tar.gz")
with tarfile.open("output.tar.gz", "r:gz") as so:
so.extractall()
```
#### Score details of each model
```
leaderboard = pd.read_csv("leaderboard.csv")
leaderboard
```
#### The result of the training evaluation
```
with open("evaluation.json") as f:
data = json.load(f)
print(data)
```
The `evaluation.json` file reports all the usual metrics as well as the Matthews correlation coefficient (MCC). This is a more comprehensive metric for an unbalanced dataset. It ranges from $-1$ to $+1$, where $-1$ implies perfect misclassification and $+1$ is perfect classification.
>**Reference**: Davide Chicco & Giuseppe Jurman (2020), *[The advantages of the Matthews correlation coefficient (MCC) over F1 score and accuracy in binary classification evaluation](https://bmcgenomics.biomedcentral.com/articles/10.1186/s12864-019-6413-7)*, BMC Genomics volume 21, Article number: 6
>**Note**: Various metrics are discussed in [Receiver operating characteristic](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) in Wikipedia, the free encyclopedia.
## Step 7: Deploy the endpoint
In this step, we deploy the model artifact from **Step 6** and use for inference. We use `AutoGluonInferenceModel` defined in `ag_model.py` to create an AutoGluon model and [SageMaker model deployment](https://sagemaker.readthedocs.io/en/stable/frameworks/mxnet/using_mxnet.html#deploy-mxnet-models) APIs to deploy an endpoint. If you bring your own data for inference, you may also need to update the inference script `inference.py` in the `code` folder.
```
training_job_name = ag.latest_training_job.name
print("Training job name: ", training_job_name)
ag_estimator = Estimator.attach(training_job_name)
ag_estimator.model_data
endpoint_name = "jumpstart-example-classic-gecko-ppp-endpoint"
ag_model = AutoGluonInferenceModel(
model_data=ag.model_data,
role=role,
region=region,
framework_version="0.3.1",
instance_type=inference_instance_type,
entry_point="code/inference.py",
predictor_cls=AutoGluonTabularPredictor,
name="jumpstart-example-classic-gecko-ppp-model",
)
ppp_predictor = ag_model.deploy(
initial_instance_count=1,
instance_type=inference_instance_type,
endpoint_name=endpoint_name,
)
```
## Step 8: Test the endpoint
We randomly select some data from the test dataset and test the endpoint.
```
test_endpoint_data = test_data.sample(n=5).drop(["returned"], axis=1)
test_endpoint_data
ppp_predictor.predict(test_endpoint_data.values)
```
## Clean Up
After you are done using this notebook, delete the model artifacts and other resources to avoid any incurring charges.
>**Caution:** You need to manually delete resources that you may have created while running the notebook, such as Amazon S3 buckets for model artifacts, training datasets, processing artifacts, and Amazon CloudWatch log groups.
For more information about cleaning up resources, see [Clean Up](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html) in the *Amazon SageMaker Developer Guide*.
```
ppp_predictor.delete_model()
ppp_predictor.delete_endpoint()
```
## License
The SageMaker JumpStart Industry product and its related materials are under the [Legal License Terms](https://jumpstart-cache-alpha-us-west-2.s3.us-west-2.amazonaws.com/smfinance-notebook-dependency/legal_file.txt).
| github_jupyter |
# 手写字母XO数据预处理
## 概要
先尝试做一下分类, 通过卷积神经网络分类,然后完成Tic-Tac-Toe的游戏.
数据集可以来自于手写字母,提取字母中的`X`跟`O` .
## EMNIST数据集
https://www.nist.gov/node/1298471/emnist-dataset
需要其中的**EMNIST Letters: 145,600 characters. 26 balanced classes.**数据集
下载里面的gzip压缩包,只保留其中的`emnist-letters` 部分,手写字母数据集.
解压缩里面的`gz`后缀的压缩包,里面是二进制格式的训练集跟测试集以及标签数据.

安装拓展包`mlxtend`
```
sudo pip3 install mlxtend
```
## 载入数据集
```
from mlxtend.data import loadlocal_mnist
import matplotlib.pyplot as plt
import numpy as np
# 标签数据的二进制文件路径
LABEL_FILE = "../../dataset/emnist-letters/emnist-letters-train-labels-idx1-ubyte";
# 图像数据的二进制文件路径
IMAGE_FILE = "../../dataset/emnist-letters/emnist-letters-train-images-idx3-ubyte";
# 载入训练集与标签
X, y = loadlocal_mnist(images_path=IMAGE_FILE,labels_path=LABEL_FILE)
```
X是一个长度为124800x784的矩阵,每一行就是一张图片对应的向量, 维度是(784,)
```
X.shape
```
## 显示样例数据集
如果你想可视化图像需要将向量转换为28x28的矩阵,接下来再把矩阵转置一下.
另外标签的范围是1-26, 代表`a`-`z` 26个字母.
```
def array2img(x):
'''将(784,)的向量转换为图片格式28*28'''
x_img = x.reshape((28, 28))
x_img = x_img.T
return x_img
def num2letter(num):
'''将标签数字转换为字母'''
return chr(ord('a') + num - 1)
# 显示图片
plt.imshow(array2img(X[14]), cmap='gray')
print(num2letter(y[14]))
```
## 筛选数据
去除无关字符的测试集, 只保留`X`跟`O`对应的数据集还有标签,并将标签替换为0跟1
| 字母 | 标签 |
| ---- | ---- |
| O | 0 |
| X | 1 |
```
def letter2label(letter):
return ord(letter) - ord('a') + 1
# 获取字母X跟O对应的序号
xo_indexs = np.where(np.logical_or(y==letter2label('o'), y==letter2label('x')))
xo_indexs
# 索引数据
X_train = X[xo_indexs]
y_train = y[xo_indexs]
# 标签转换为0,1
y_train = np.uint8(y_train == letter2label('x'))
```
## 转换为2D数据集
将原来的1维的数据重新转换为二维
```
image_list = []
for img_idx in range(len(X_train)):
image_list.append(array2img(X_train[img_idx]).tolist())
# 训练集图片
train_images = np.array(image_list).astype('uint8')
# 显示其中一个样例图片
plt.imshow(train_images[0], cmap='gray')
# 训练集的标签
train_labels = y_train
```
## 测试集执行相同操作
```
test_image_file = '../../dataset/emnist-letters/emnist-letters-test-images-idx3-ubyte'
test_label_file = '../../dataset/emnist-letters/emnist-letters-test-labels-idx1-ubyte'
X_test, y_test = loadlocal_mnist(images_path=test_image_file,labels_path=test_label_file)
xo_indexs = np.where(np.logical_or(y_test==letter2label('o'), y_test==letter2label('x')))
X_test = X_test[xo_indexs]
y_test = y_test[xo_indexs]
y_test = np.uint8(y_test == letter2label('x'))
image_list = []
for img_idx in range(len(X_test)):
image_list.append(array2img(X_test[img_idx]).tolist())
test_images = np.array(image_list).astype('uint8')
test_labels = y_test
```
## 序列化保存数据
```
import pickle
# 创建一个字典格式的数据集
dataset = {
'train_images': train_images,
'train_labels': train_labels,
'test_images': test_images,
'test_labels': test_labels
}
# 序列化保存在文件
with open('../../common/xo_dataset.bin', 'wb') as f:
pickle.dump(dataset, f)
```
## 导出部分测试图片
```
import cv2
#导出测试数据集
for i in range(20):
cv2.imwrite('../../common/test_dataset/test_{}.png'.format(i), test_images[i])
```
| github_jupyter |
# HEX algorithm **Kopuru Vespa Velutina Competition**
**Linear Regression model**
Purpose: Predict the number of Nests in each of Biscay's 112 municipalities for the year 2020.
Output: *(WaspBusters_20210512_batch_OLSyears.csv)*
@authors:
* mario.bejar@student.ie.edu
* pedro.geirinhas@student.ie.edu
* a.berrizbeitia@student.ie.edu
* pcasaverde@student.ie.edu
## Libraries
```
# Base packages -----------------------------------
import numpy as np
import pandas as pd
# Visualization -----------------------------------
from matplotlib import pyplot
# Scaling data ------------------------------------
from sklearn import preprocessing
# Linear Regression -------------------------------
from statsmodels.formula.api import ols
#from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
```
## Functions
```
# Function that checks if final Output is ready for submission or needs revision
def check_data(HEX):
if HEX.shape == (112, 3):
print(HEX.shape,": Shape is correct.")
else:
print(HEX.shape,": Shape is **INCORRECT!**")
if HEX["CODIGO MUNICIPIO"].nunique() == 112:
print(HEX["CODIGO MUNICIPIO"].nunique(),": Number of unique municipalities is correct.")
else:
print(HEX["CODIGO MUNICIPIO"].nunique(),": Number of unique municipalities is **INCORRECT!**")
if any(HEX["NIDOS 2020"] < 0):
print("**INCORRECT!** At least one municipality has NESTS <= 0.")
else:
print("Great! All municipalities have NESTS >= 0.")
print("The Total 2020 Nests' Prediction is", int(HEX["NIDOS 2020"].sum()))
```
## Get the data
```
QUEEN_train = pd.read_csv('./WBds03_QUEENtrainYEARS.csv', sep=',')
QUEEN_predict = pd.read_csv('./WBds03_QUEENpredictYEARS.csv', sep=',')
clusters = pd.read_csv("../../../Other_open_data/cluster.csv")
#QUEEN_predict.isnull().sum()
QUEEN_train.shape
QUEEN_predict.shape
```
## Caracterize in clusters
```
QUEEN_train = pd.merge(QUEEN_train, clusters, how = 'left', left_on = 'municip_code', right_on = 'CODIGO MUNICIPIO')
QUEEN_predict = pd.merge(QUEEN_predict, clusters, how = 'left', left_on = 'municip_code', right_on = 'CODIGO MUNICIPIO')
QUEEN_train.drop(columns = ['CODIGO MUNICIPIO'], inplace=True)
QUEEN_predict.drop(columns = ['CODIGO MUNICIPIO'], inplace=True)
QUEEN_train.fillna(4, inplace=True)
QUEEN_predict.fillna(4, inplace=True)
QUEEN_train.shape
QUEEN_predict.shape
#QUEEN_train.isnull().sum()
#QUEEN_predict.isnull().sum()
QUEEN_train.Cluster.value_counts()
```
## Determine feature importance
```
X = QUEEN_train.drop(columns = ['municip_name', 'municip_code', 'NESTS', 'station_code'])
y = QUEEN_train['NESTS']
# Scale the datasets using MinMaxScaler
scalators = X.columns
X[scalators] = preprocessing.minmax_scale(X[scalators])
# define the model
model_fi = LinearRegression()
# fit the model
model_fi.fit(X, y)
# get importance
importance = model_fi.coef_
# summarize feature importance
for i,v in enumerate(importance):
print('Feature: %0s, Score: %.5f' % (X.columns[i],v))
# plot feature importance
pyplot.bar([x for x in range(len(importance))], importance)
pyplot.show()
for i,v in enumerate(importance):
if abs(v) > 15:
print('Feature: %0s, Score: %.2f' % (X.columns[i],v))
```
## Train the model
### With the variables suggested by the Feature Importance method
```
model = ols('NESTS ~ year_offset + food_fruit + food_apple + food_txakoli + food_kiwi + lev_max + lev_mid + rain + rain_cum + rain_max_day + temp_avg + temp_max_abs + temp_max_avg + wind_max + population',\
data=QUEEN_train).fit()
print(model.summary())
```
### Backward elimination
```
model = ols('NESTS ~ year_offset + food_txakoli + lev_mid + temp_avg + wind_max',\
data=QUEEN_train).fit()
print(model.summary())
```
### With the Cluster Categorical
```
model = ols('NESTS ~ year_offset + food_txakoli + lev_mid + temp_avg + wind_max + C(Cluster)',\
data=QUEEN_train).fit()
print(model.summary())
```
## Predict 2020's nests
```
y_2020 = model.predict(QUEEN_predict)
y_2020
# Any municipality resulting in NESTS<0 is equivalent to = 0
y_2020[y_2020 < 0] = 0
y_2020
QUEEN_predict['NESTS'] = y_2020
HEX = QUEEN_predict.loc[:,['municip_code','municip_name','NESTS']].groupby(by=['municip_code','municip_name'], as_index=False).sum()
y_2020.sum()
```
## Manual adjustments
```
HEX.loc[HEX.municip_code.isin([48022, 48071, 48088, 48074, 48051, 48020]), 'NESTS'] = 0
HEX.loc[HEX.municip_code.isin([48022, 48071, 48088, 48074, 48051, 48020]), :]
HEX.columns = ["CODIGO MUNICIPIO", "NOMBRE MUNICIPIO", "NIDOS 2020"] # change column names to Spanish (Competition template)
```
## Verify dataset format
```
check_data(HEX)
```
## Export dataset for submission
```
HEX.to_csv('WaspBusters_20210519_OLSyearsClusters.csv', index=False)
```
| github_jupyter |
# Problem Set 1
See “Check Your Understanding” from [basics](../python_fundamentals/basics.ipynb) and [collections](../python_fundamentals/collections.ipynb)
## Question 1
Below this cell, add
1. A markdown cell with
- two levels of headings;
- a numbered list;
- an unnumbered list;
- text with a `%` and a `-` sign (hint: look at this cell and [escape characters](https://www.markdownguide.org/basic-syntax/#characters-you-can-escape))
- backticked code (see [https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet))
1. A markdown cell with
- the [quadratic formula](https://en.wikipedia.org/wiki/Quadratic_formula) embedded in the cell using [LaTeX](https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Typesetting%20Equations.html)
## Question 2
Use the following code, which sets up variables `a, b,` and `c`, to find the roots using the quadratic formula.
$$
a x^2 + b x + c = 0
$$
```
a = 1.0
b = 2.0
c = 1.0
# Your code goes here
```
## Question 3
In the cell below, use tab completion to find a function from the time
module that displays the **local** time.
Use `time.FUNC_NAME?` (where `FUNC_NAME` is replaced with the name
of the function you found) to see information about that function,
then call the function. (Hint: look for something involving the word
`local`).
```
import time
# Your code goes here
# time. # uncomment and hit <TAB> to see functions
```
## Question 4
Create the following variables:
- `D`: A floating point number with the value 10,000
- `r`: A floating point number with the value 0.025
- `T`: An integer with the value 30
Compute the present discounted value of a payment (`D`) made
in `T` years, assuming an interest rate of 2.5%. Save this value to
a new variable called `PDV` and print your output.
Hint: The formula is
$$
\text{PDV} = \frac{D}{(1 + r)^T}
$$
```
# Your code goes here
```
## Question 5
How could you use the variables `x` and `y` to create the sentence
`Hello World` ?
Hint: Think about how to represent a space as a string.
```
x = "Hello"
y = "World"
# Your code goes here
```
## Question 6
Suppose you are working with price data and come across the value
`"€6.50"`.
When Python tries to interpret this value, it sees the value as the string
`"€6.50"` instead of the number `6.50`. (Quiz: why is this a
problem? Think about the examples above.)
In this exercise, your task is to convert the variable `price` below
into a number.
*Hint*: Once the string is in a suitable format, you can call
`float(clean_price)` to make it a number.
```
price = "€6.50"
# Your code goes here
```
## Question 7
Use Python formatting (e.g. `print(f"text {somecode}")` where `somecode` is a valid expression or variable name) to produce the following
output.
```text
The 1st quarter revenue was $110M
The 2nd quarter revenue was $95M
The 3rd quarter revenue was $100M
The 4th quarter revenue was $130M
```
```
# Your code goes here
```
## Question 8
Define two lists y and z.
They can contain **anything you want**.
Check what happens when you do y + z.
When you have finished that, try 2 * x and x * 2 where x represents the object you created from y + z.
Briefly explain.
```
y = [] # fill me in!
z = [] # fill me in!
# Your code goes here
```
| github_jupyter |
# Seminar 06. Hunt the Wumpus, part 3
### Exercise 1
What will be print out?
**Answer: **
```
x = [1, 2, 3]
y = x
y.append(4)
print(x)
```
### Exercise 2
What will be printed out?
**Answer:**
```
def change_it(y):
y.append(4)
x = [1, 2, 3]
change_it(x)
print(x)
```
### Exercise 3
What will be printed out?
**Answer #1:**
```
x = 3
y = x
y += 5
print(x)
```
**Answer #2:**
```
x = [1, 2, 3]
y = x
y[1] += x[1]
print(x)
```
### Exercise 4
What will be printed?
**Answer: **
```
x = [1, 2, 3]
y = tuple(x)
x.append(4)
print(y)
```
### Exercise 5
What is going to happen?
**Anwer #1:**
```
def change_it(y):
y = list(y)
y.append(4)
print(y)
x = [1, 2, 3]
change_it(x)
```
**Answer #2:**
```
def change_it(y):
y = list(y)
y.append(4)
x = [1, 2, 3]
change_it(x)
print(x)
```
**Answer #3:**
```
x = [1, 2, 3]
y = tuple(x)
z = list(y)
z.append(4)
print(x)
```
### Exercise 6.
Given that `list()` and `x[:]` creates a new list? This means that you can use it to create a copy of a list directly, without an intermediate tuple step. What will happen below?
**Answer :**
```
x = [1, 2, 3]
y = x[:]
x.append(4)
print(y)
```
### Exercise 7, extending find_empty_cave function
Add a second argument to it (call it `caves_taken` or something along these lines) and modify the code so the it keeps randomly generating cave index until it is _not_ in the `caves_taken` list.
### Exercise 8, using updated find_empty_cave function
Modify the main script to take advantage of the update `find_empty_cave()`.
### Exercise 9, for loop
What will be printed in each case? Write the answer down before running the code.
**Answer #1:**
```
for letter in ["A", "B", "C", "D"]:
print(letter)
```
**Answer #2:**
```
for letter in ["A", "B", "C", "D"][2:]:
print(letter)
```
**Answer #3:**
```
for letter in ["A", "B", "C", "D"]:
print(letter)
break
```
**Answer #4:**
```
for letter in ["A", "B", "C", "D"]:
if letter == "C":
break
print(letter)
```
**Answer #5:**
```
for letter in ["A", "B", "C", "D"]:
if letter == "C":
print(letter)
```
**Answer #6:**
```
for letter in []:
print("Letter is %s"%(letter))
```
### Exercise 10, range
How many times the for loop will be repeated and what will be printed?
**Answer #1:**
```
for item in range(4):
print(item)
```
**Answer #2:**
```
for item in range(1, 4):
print(item)
```
**Answer #3:**
```
for item in range(1, 4, 2):
print(item)
```
**Answer #4:**
```
for item in range(4, 1, 1):
print(item)
```
**Answer #5:**
```
for item in range(4, 1, -1):
print(item)
```
**Answer #6:**
```
for item in range(4, 4):
print(item)
```
### Exercise 11, place bottomless pits
create a new constant `NUMBER_OF_BOTTOMLESS_PITS` and a new variable (`bottomless_pits`) with a list of indexes of caves with the bottomless pits in them. Add bottomless pits in a for loop by getting a cave index from `find_empty_cave` function (think about its parameters).
### Exercise 12, let the player fall into a bottomless pit
On each iteration, check whether player is currently in a cave that has a bottomless pit. If that is the case, print a sad game over message and `break` out of the loop. Modify the `while` loop condition to `while True:`, so that the only way to end the game is to fall into the pit.
### Exercise 13, warn the player about bottomless pits
Before asking for input, in a for loop, iterate over the connected caves and every time cave has a bottomless pit in it, print "You feel a breeze!".
### Exercise 14, place bats
Define a constant that determines the number of bat colonies (e.g., `NUBMER_OF_BATS` and set it `2` or some other number you prefer), a variable that holds a list with indexes of caves with bats (e.g., `bats`), and you need to pick random empty caves and store them in `bats` in exactly the same way you did it with bottomless pits. Print out location of bats for diagnostic purposes.
### Exercise 15, warn about bats
In the same loop over connected caves that you use to warn the player about bottomless pits, add another check that prints out `"You hear flapping!"` every time the connected cave have bats in it.
### Exercise 16, player is carried by the bats to a random cave
If the player is in the cave with bats, they transport them to a _random_ cave. If the cave has bat in it, they carry them again. If the cave has bottomless pit in it, the player falls into it.
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Federated Learning for Text Generation
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/federated/tutorials/federated_learning_for_text_generation"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/v0.14.0/docs/tutorials/federated_learning_for_text_generation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/federated/blob/v0.14.0/docs/tutorials/federated_learning_for_text_generation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
**NOTE**: This colab has been verified to work with the [latest released version](https://github.com/tensorflow/federated#compatibility) of the `tensorflow_federated` pip package, but the Tensorflow Federated project is still in pre-release development and may not work on `master`.
This tutorial builds on the concepts in the [Federated Learning for Image Classification](federated_learning_for_image_classification.ipynb) tutorial, and demonstrates several other useful approaches for federated learning.
In particular, we load a previously trained Keras model, and refine it using federated training on a (simulated) decentralized dataset. This is practically important for several reasons . The ability to use serialized models makes it easy to mix federated learning with other ML approaches. Further, this allows use of an increasing range of pre-trained models --- for example, training language models from scratch is rarely necessary, as numerous pre-trained models are now widely available (see, e.g., [TF Hub](https://www.tensorflow.org/hub)). Instead, it makes more sense to start from a pre-trained model, and refine it using Federated Learning, adapting to the particular characteristics of the decentralized data for a particular application.
For this tutorial, we start with a RNN that generates ASCII characters, and refine it via federated learning. We also show how the final weights can be fed back to the original Keras model, allowing easy evaluation and text generation using standard tools.
```
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow_federated
import collections
import functools
import os
import time
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
tf.compat.v1.enable_v2_behavior()
np.random.seed(0)
# Test the TFF is working:
tff.federated_computation(lambda: 'Hello, World!')()
```
## Load a pre-trained model
We load a model that was pre-trained following the TensorFlow tutorial
[Text generation using a RNN with eager execution](https://www.tensorflow.org/tutorials/sequences/text_generation). However,
rather than training on [The Complete Works of Shakespeare](http://www.gutenberg.org/files/100/100-0.txt), we pre-trained the model on the text from the Charles Dickens'
[A Tale of Two Cities](http://www.ibiblio.org/pub/docs/books/gutenberg/9/98/98.txt)
and
[A Christmas Carol](http://www.ibiblio.org/pub/docs/books/gutenberg/4/46/46.txt).
Other than expanding the vocabularly, we didn't modify the original tutorial, so this initial model isn't state-of-the-art, but it produces reasonable predictions and is sufficient for our tutorial purposes. The final model was saved with `tf.keras.models.save_model(include_optimizer=False)`.
We will use federated learning to fine-tune this model for Shakespeare in this tutorial, using a federated version of the data provided by TFF.
### Generate the vocab lookup tables
```
# A fixed vocabularly of ASCII chars that occur in the works of Shakespeare and Dickens:
vocab = list('dhlptx@DHLPTX $(,048cgkoswCGKOSW[_#\'/37;?bfjnrvzBFJNRVZ"&*.26:\naeimquyAEIMQUY]!%)-159\r')
# Creating a mapping from unique characters to indices
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
```
### Load the pre-trained model and generate some text
```
def load_model(batch_size):
urls = {
1: 'https://storage.googleapis.com/tff-models-public/dickens_rnn.batch1.kerasmodel',
8: 'https://storage.googleapis.com/tff-models-public/dickens_rnn.batch8.kerasmodel'}
assert batch_size in urls, 'batch_size must be in ' + str(urls.keys())
url = urls[batch_size]
local_file = tf.keras.utils.get_file(os.path.basename(url), origin=url)
return tf.keras.models.load_model(local_file, compile=False)
def generate_text(model, start_string):
# From https://www.tensorflow.org/tutorials/sequences/text_generation
num_generate = 200
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
temperature = 1.0
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
predictions = tf.squeeze(predictions, 0)
predictions = predictions / temperature
predicted_id = tf.random.categorical(
predictions, num_samples=1)[-1, 0].numpy()
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(idx2char[predicted_id])
return (start_string + ''.join(text_generated))
# Text generation requires a batch_size=1 model.
keras_model_batch1 = load_model(batch_size=1)
print(generate_text(keras_model_batch1, 'What of TensorFlow Federated, you ask? '))
```
## Load and Preprocess the Federated Shakespeare Data
The `tff.simulation.datasets` package provides a variety of datasets that are split into "clients", where each client corresponds to a dataset on a particular device that might participate in federated learning.
These datasets provide realistic non-IID data distributions that replicate in simulation the challenges of training on real decentralized data. Some of the pre-processing of this data was done using tools from the [Leaf project](https://arxiv.org/abs/1812.01097) ([github](https://github.com/TalwalkarLab/leaf)).
```
train_data, test_data = tff.simulation.datasets.shakespeare.load_data()
```
The datasets provided by `shakespeare.load_data()` consist of a sequence of
string `Tensors`, one for each line spoken by a particular character in a
Shakespeare play. The client keys consist of the name of the play joined with
the name of the character, so for example `MUCH_ADO_ABOUT_NOTHING_OTHELLO` corresponds to the lines for the character Othello in the play *Much Ado About Nothing*. Note that in a real federated learning scenario
clients are never identified or tracked by ids, but for simulation it is useful
to work with keyed datasets.
Here, for example, we can look at some data from King Lear:
```
# Here the play is "The Tragedy of King Lear" and the character is "King".
raw_example_dataset = train_data.create_tf_dataset_for_client(
'THE_TRAGEDY_OF_KING_LEAR_KING')
# To allow for future extensions, each entry x
# is an OrderedDict with a single key 'snippets' which contains the text.
for x in raw_example_dataset.take(2):
print(x['snippets'])
```
We now use `tf.data.Dataset` transformations to prepare this data for training the char RNN loaded above.
```
# Input pre-processing parameters
SEQ_LENGTH = 100
BATCH_SIZE = 8
BUFFER_SIZE = 10000 # For dataset shuffling
# Construct a lookup table to map string chars to indexes,
# using the vocab loaded above:
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
keys=vocab, values=tf.constant(list(range(len(vocab))),
dtype=tf.int64)),
default_value=0)
def to_ids(x):
s = tf.reshape(x['snippets'], shape=[1])
chars = tf.strings.bytes_split(s).values
ids = table.lookup(chars)
return ids
def split_input_target(chunk):
input_text = tf.map_fn(lambda x: x[:-1], chunk)
target_text = tf.map_fn(lambda x: x[1:], chunk)
return (input_text, target_text)
def preprocess(dataset):
return (
# Map ASCII chars to int64 indexes using the vocab
dataset.map(to_ids)
# Split into individual chars
.unbatch()
# Form example sequences of SEQ_LENGTH +1
.batch(SEQ_LENGTH + 1, drop_remainder=True)
# Shuffle and form minibatches
.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
# And finally split into (input, target) tuples,
# each of length SEQ_LENGTH.
.map(split_input_target))
```
Note that in the formation of the original sequences and in the formation of
batches above, we use `drop_remainder=True` for simplicity. This means that any
characters (clients) that don't have at least `(SEQ_LENGTH + 1) * BATCH_SIZE`
chars of text will have empty datasets. A typical approach to address this would
be to pad the batches with a special token, and then mask the loss to not take
the padding tokens into account.
This would complicate the example somewhat, so for this tutorial we only use full batches, as in the
[standard tutorial](https://www.tensorflow.org/tutorials/sequences/text_generation).
However, in the federated setting this issue is more significant, because many
users might have small datasets.
Now we can preprocess our `raw_example_dataset`, and check the types:
```
example_dataset = preprocess(raw_example_dataset)
print(example_dataset.element_spec)
```
## Compile the model and test on the preprocessed data
We loaded an uncompiled keras model, but in order to run `keras_model.evaluate`, we need to compile it with a loss and metrics. We will also compile in an optimizer, which will be used as the on-device optimizer in Federated Learning.
The original tutorial didn't have char-level accuracy (the fraction
of predictions where the highest probability was put on the correct
next char). This is a useful metric, so we add it.
However, we need to define a new metric class for this because
our predictions have rank 3 (a vector of logits for each of the
`BATCH_SIZE * SEQ_LENGTH` predictions), and `SparseCategoricalAccuracy`
expects only rank 2 predictions.
```
class FlattenedCategoricalAccuracy(tf.keras.metrics.SparseCategoricalAccuracy):
def __init__(self, name='accuracy', dtype=tf.float32):
super().__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.reshape(y_true, [-1, 1])
y_pred = tf.reshape(y_pred, [-1, len(vocab), 1])
return super().update_state(y_true, y_pred, sample_weight)
```
Now we can compile a model, and evaluate it on our `example_dataset`.
```
BATCH_SIZE = 8 # The training and eval batch size for the rest of this tutorial.
keras_model = load_model(batch_size=BATCH_SIZE)
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[FlattenedCategoricalAccuracy()])
# Confirm that loss is much lower on Shakespeare than on random data
loss, accuracy = keras_model.evaluate(example_dataset.take(5), verbose=0)
print(
'Evaluating on an example Shakespeare character: {a:3f}'.format(a=accuracy))
# As a sanity check, we can construct some completely random data, where we expect
# the accuracy to be essentially random:
random_guessed_accuracy = 1.0 / len(vocab)
print('Expected accuracy for random guessing: {a:.3f}'.format(
a=random_guessed_accuracy))
random_indexes = np.random.randint(
low=0, high=len(vocab), size=1 * BATCH_SIZE * (SEQ_LENGTH + 1))
data = collections.OrderedDict(
snippets=tf.constant(
''.join(np.array(vocab)[random_indexes]), shape=[1, 1]))
random_dataset = preprocess(tf.data.Dataset.from_tensor_slices(data))
loss, accuracy = keras_model.evaluate(random_dataset, steps=10, verbose=0)
print('Evaluating on completely random data: {a:.3f}'.format(a=accuracy))
```
## Fine-tune the model with Federated Learning
TFF serializes all TensorFlow computations so they can potentially be run in a
non-Python environment (even though at the moment, only a simulation runtime implemented in Python is available). Even though we are running in eager mode, (TF 2.0), currently TFF serializes TensorFlow computations by constructing the
necessary ops inside the context of a "`with tf.Graph.as_default()`" statement.
Thus, we need to provide a function that TFF can use to introduce our model into
a graph it controls. We do this as follows:
```
# Clone the keras_model inside `create_tff_model()`, which TFF will
# call to produce a new copy of the model inside the graph that it will
# serialize. Note: we want to construct all the necessary objects we'll need
# _inside_ this method.
def create_tff_model():
# TFF uses an `input_spec` so it knows the types and shapes
# that your model expects.
input_spec = example_dataset.element_spec
keras_model_clone = tf.keras.models.clone_model(keras_model)
return tff.learning.from_keras_model(
keras_model_clone,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[FlattenedCategoricalAccuracy()])
```
Now we are ready to construct a Federated Averaging iterative process, which we will use to improve the model (for details on the Federated Averaging algorithm, see the paper [Communication-Efficient Learning of Deep Networks from Decentralized Data](https://arxiv.org/abs/1602.05629)).
We use a compiled Keras model to perform standard (non-federated) evaluation after each round of federated training. This is useful for research purposes when doing simulated federated learning and there is a standard test dataset.
In a realistic production setting this same technique might be used to take models trained with federated learning and evaluate them on a centralized benchmark dataset for testing or quality assurance purposes.
```
# This command builds all the TensorFlow graphs and serializes them:
fed_avg = tff.learning.build_federated_averaging_process(
model_fn=create_tff_model,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(lr=0.5))
```
Here is the simplest possible loop, where we run federated averaging for one round on a single client on a single batch:
```
state = fed_avg.initialize()
state, metrics = fed_avg.next(state, [example_dataset.take(5)])
print('loss={l:.3f}, accuracy={a:.3f}'.format(
l=metrics.loss, a=metrics.accuracy))
```
Now let's write a slightly more interesting training and evaluation loop.
So that this simulation still runs relatively quickly, we train on the same three clients each round, only considering two minibatches for each.
```
def data(client, source=train_data):
return preprocess(
source.create_tf_dataset_for_client(client)).take(5)
clients = ['ALL_S_WELL_THAT_ENDS_WELL_CELIA',
'MUCH_ADO_ABOUT_NOTHING_OTHELLO',
'THE_TRAGEDY_OF_KING_LEAR_KING']
train_datasets = [data(client) for client in clients]
# We concatenate the test datasets for evaluation with Keras.
test_dataset = functools.reduce(
lambda d1, d2: d1.concatenate(d2),
[data(client, test_data) for client in clients])
```
The initial state of the model produced by `fed_avg.initialize()` is based
on the random initializers for the Keras model, not the weights that were loaded,
since `clone_model()` does not clone the weights. To start training
from a pre-trained model, we set the model weights in the server state
directly from the loaded model.
```
NUM_ROUNDS = 5
# The state of the FL server, containing the model and optimization state.
state = fed_avg.initialize()
state = tff.learning.state_with_new_model_weights(
state,
trainable_weights=[v.numpy() for v in keras_model.trainable_weights],
non_trainable_weights=[
v.numpy() for v in keras_model.non_trainable_weights
])
def keras_evaluate(state, round_num):
# Take our global model weights and push them back into a Keras model to
# use its standard `.evaluate()` method.
keras_model = load_model(batch_size=BATCH_SIZE)
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[FlattenedCategoricalAccuracy()])
tff.learning.assign_weights_to_keras_model(keras_model, state.model)
loss, accuracy = keras_model.evaluate(example_dataset, steps=2, verbose=0)
print('\tEval: loss={l:.3f}, accuracy={a:.3f}'.format(l=loss, a=accuracy))
for round_num in range(NUM_ROUNDS):
print('Round {r}'.format(r=round_num))
keras_evaluate(state, round_num)
state, metrics = fed_avg.next(state, train_datasets)
print('\tTrain: loss={l:.3f}, accuracy={a:.3f}'.format(
l=metrics.loss, a=metrics.accuracy))
keras_evaluate(state, NUM_ROUNDS + 1)
```
With the default changes, we haven't done enough training to make a big difference, but if you train longer on more Shakespeare data, you should see a difference in the style of the text generated with the updated model:
```
# Set our newly trained weights back in the originally created model.
keras_model_batch1.set_weights([v.numpy() for v in keras_model.weights])
# Text generation requires batch_size=1
print(generate_text(keras_model_batch1, 'What of TensorFlow Federated, you ask? '))
```
## Suggested extensions
This tutorial is just the first step! Here are some ideas for how you might try extending this notebook:
* Write a more realistic training loop where you sample clients to train on randomly.
* Use "`.repeat(NUM_EPOCHS)`" on the client datasets to try multiple epochs of local training (e.g., as in [McMahan et. al.](https://arxiv.org/abs/1602.05629)). See also [Federated Learning for Image Classification](federated_learning_for_image_classification.ipynb) which does this.
* Change the `compile()` command to experiment with using different optimization algorithms on the client.
* Try the `server_optimizer` argument to `build_federated_averaging_process` to try different algorithms for applying the model updates on the server.
* Try the `client_weight_fn` argument to to `build_federated_averaging_process` to try different weightings of the clients. The default weights client updates by the number of examples on the client, but you can do e.g. `client_weight_fn=lambda _: tf.constant(1.0)`.
| github_jupyter |
# ECG Federated 1D-CNN Server Side
This code is the server part of ECG federated 1D-CNN model for **multi** client and a server.
## Setting variables
```
rounds = 400
local_epoch = 1
users = 2 # number of clients
import os
import h5py
import socket
import struct
import pickle
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from threading import Thread
from threading import Lock
import time
from tqdm import tqdm
import copy
```
## Cuda
```
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
```
## Pytorch layer modules for *Conv1D* Network
### `Conv1d` layer
- `torch.nn.Conv1d(in_channels, out_channels, kernel_size)`
### `MaxPool1d` layer
- `torch.nn.MaxPool1d(kernel_size, stride=None)`
- Parameter `stride` follows `kernel_size`.
### `ReLU` layer
- `torch.nn.ReLU()`
### `Linear` layer
- `torch.nn.Linear(in_features, out_features, bias=True)`
### `Softmax` layer
- `torch.nn.Softmax(dim=None)`
- Parameter `dim` is usually set to `1`.
## Construct 1D-CNN ECG classification model
```
class EcgConv1d(nn.Module):
def __init__(self):
super(EcgConv1d, self).__init__()
self.conv1 = nn.Conv1d(1, 16, 7) # 124 x 16
self.relu1 = nn.LeakyReLU()
self.pool1 = nn.MaxPool1d(2) # 62 x 16
self.conv2 = nn.Conv1d(16, 16, 5) # 58 x 16
self.relu2 = nn.LeakyReLU()
self.conv3 = nn.Conv1d(16, 16, 5) # 54 x 16
self.relu3 = nn.LeakyReLU()
self.conv4 = nn.Conv1d(16, 16, 5) # 50 x 16
self.relu4 = nn.LeakyReLU()
self.pool4 = nn.MaxPool1d(2) # 25 x 16
self.linear5 = nn.Linear(25 * 16, 128)
self.relu5 = nn.LeakyReLU()
self.linear6 = nn.Linear(128, 5)
self.softmax6 = nn.Softmax(dim=1)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.relu4(x)
x = self.pool4(x)
x = x.view(-1, 25 * 16)
x = self.linear5(x)
x = self.relu5(x)
x = self.linear6(x)
x = self.softmax6(x)
return x
ecg_net = EcgConv1d()
ecg_net.to('cpu')
```
## variables
```
clientsoclist = [0]*users
start_time = 0
weight_count = 0
global_weights = copy.deepcopy(ecg_net.state_dict())
datasetsize = [0]*users
weights_list = [0]*users
lock = Lock()
```
## Comunication overhead
```
total_sendsize_list = []
total_receivesize_list = []
client_sendsize_list = [[] for i in range(users)]
client_receivesize_list = [[] for i in range(users)]
train_sendsize_list = []
train_receivesize_list = []
```
## Socket initialization
### Set host address and port number
### Required socket functions
```
def send_msg(sock, msg):
# prefix each message with a 4-byte length in network byte order
msg = pickle.dumps(msg)
l_send = len(msg)
msg = struct.pack('>I', l_send) + msg
sock.sendall(msg)
return l_send
def recv_msg(sock):
# read message length and unpack it into an integer
raw_msglen = recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
# read the message data
msg = recvall(sock, msglen)
msg = pickle.loads(msg)
return msg, msglen
def recvall(sock, n):
# helper function to receive n bytes or return None if EOF is hit
data = b''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
import copy
def average_weights(w, datasize):
"""
Returns the average of the weights.
"""
for i, data in enumerate(datasize):
for key in w[i].keys():
w[i][key] *= float(data)
w_avg = copy.deepcopy(w[0])
# when client use only one kinds of device
for key in w_avg.keys():
for i in range(1, len(w)):
w_avg[key] += w[i][key]
w_avg[key] = torch.div(w_avg[key], float(sum(datasize)))
# when client use various devices (cpu, gpu) you need to use it instead
#
# for key, val in w_avg.items():
# common_device = val.device
# break
# for key in w_avg.keys():
# for i in range(1, len(w)):
# if common_device == 'cpu':
# w_avg[key] += w[i][key].cpu()
# else:
# w_avg[key] += w[i][key].cuda()
# w_avg[key] = torch.div(w_avg[key], float(sum(datasize)))
return w_avg
```
## Thread define
## Receive users before training
```
def run_thread(func, num_user):
global clientsoclist
global start_time
thrs = []
for i in range(num_user):
conn, addr = s.accept()
print('Conntected with', addr)
# append client socket on list
clientsoclist[i] = conn
args = (i, num_user, conn)
thread = Thread(target=func, args=args)
thrs.append(thread)
thread.start()
print("timmer start!")
start_time = time.time() # store start time
for thread in thrs:
thread.join()
end_time = time.time() # store end time
print("TrainingTime: {} sec".format(end_time - start_time))
def receive(userid, num_users, conn): #thread for receive clients
global weight_count
global datasetsize
msg = {
'rounds': rounds,
'client_id': userid,
'local_epoch': local_epoch
}
datasize = send_msg(conn, msg) #send epoch
total_sendsize_list.append(datasize)
client_sendsize_list[userid].append(datasize)
train_dataset_size, datasize = recv_msg(conn) # get total_batch of train dataset
total_receivesize_list.append(datasize)
client_receivesize_list[userid].append(datasize)
with lock:
datasetsize[userid] = train_dataset_size
weight_count += 1
train(userid, train_dataset_size, num_users, conn)
```
## Train
```
def train(userid, train_dataset_size, num_users, client_conn):
global weights_list
global global_weights
global weight_count
global ecg_net
global val_acc
for r in range(rounds):
with lock:
if weight_count == num_users:
for i, conn in enumerate(clientsoclist):
datasize = send_msg(conn, global_weights)
total_sendsize_list.append(datasize)
client_sendsize_list[i].append(datasize)
train_sendsize_list.append(datasize)
weight_count = 0
client_weights, datasize = recv_msg(client_conn)
total_receivesize_list.append(datasize)
client_receivesize_list[userid].append(datasize)
train_receivesize_list.append(datasize)
weights_list[userid] = client_weights
print("User" + str(userid) + "'s Round " + str(r + 1) + " is done")
with lock:
weight_count += 1
if weight_count == num_users:
#average
global_weights = average_weights(weights_list, datasetsize)
host = socket.gethostbyname(socket.gethostname())
port = 10080
print(host)
s = socket.socket()
s.bind((host, port))
s.listen(5)
```
### Open the server socket
```
run_thread(receive, users)
end_time = time.time() # store end time
print("TrainingTime: {} sec".format(end_time - start_time))
```
## Print all of communication overhead
```
# print('val_acc list')
# for acc in val_acc:
# print(acc)
print('\n')
print('---total_sendsize_list---')
total_size = 0
for size in total_sendsize_list:
# print(size)
total_size += size
print("total_sendsize size: {} bytes".format(total_size))
print('\n')
print('---total_receivesize_list---')
total_size = 0
for size in total_receivesize_list:
# print(size)
total_size += size
print("total receive sizes: {} bytes".format(total_size) )
print('\n')
for i in range(users):
print('---client_sendsize_list(user{})---'.format(i))
total_size = 0
for size in client_sendsize_list[i]:
# print(size)
total_size += size
print("total client_sendsizes(user{}): {} bytes".format(i, total_size))
print('\n')
print('---client_receivesize_list(user{})---'.format(i))
total_size = 0
for size in client_receivesize_list[i]:
# print(size)
total_size += size
print("total client_receive sizes(user{}): {} bytes".format(i, total_size))
print('\n')
print('---train_sendsize_list---')
total_size = 0
for size in train_sendsize_list:
# print(size)
total_size += size
print("total train_sendsizes: {} bytes".format(total_size))
print('\n')
print('---train_receivesize_list---')
total_size = 0
for size in train_receivesize_list:
# print(size)
total_size += size
print("total train_receivesizes: {} bytes".format(total_size))
print('\n')
root_path = '../../models/'
```
## Defining `ECG` Dataset Class
```
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
class ECG(Dataset):
def __init__(self, train=True):
if train:
with h5py.File(os.path.join(root_path, 'ecg_data', 'train_ecg.hdf5'), 'r') as hdf:
self.x = hdf['x_train'][:]
self.y = hdf['y_train'][:]
else:
with h5py.File(os.path.join(root_path, 'ecg_data', 'test_ecg.hdf5'), 'r') as hdf:
self.x = hdf['x_test'][:]
self.y = hdf['y_test'][:]
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return torch.tensor(self.x[idx], dtype=torch.float), torch.tensor(self.y[idx])
```
## Making Batch Generator
```
batch_size = 32
```
### `DataLoader` for batch generating
`torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)`
```
train_dataset = ECG(train=True)
test_dataset = ECG(train=False)
trainloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
testloader = DataLoader(test_dataset, batch_size=batch_size)
```
### Number of total batches
```
train_total_batch = len(trainloader)
print(train_total_batch)
test_batch = len(testloader)
print(test_batch)
lr = 0.001
optimizer = Adam(ecg_net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
```
## Accuracy of train and each of classes
```
ecg_net.load_state_dict(global_weights)
ecg_net.eval()
ecg_net = ecg_net.to(device)
# train acc
with torch.no_grad():
corr_num = 0
total_num = 0
train_loss = 0.0
for j, trn in enumerate(trainloader):
trn_x, trn_label = trn
trn_x = trn_x.to(device)
trn_label = trn_label.clone().detach().long().to(device)
trn_output = ecg_net(trn_x)
loss = criterion(trn_output, trn_label)
train_loss += loss.item()
model_label = trn_output.argmax(dim=1)
corr = trn_label[trn_label == model_label].size(0)
corr_num += corr
total_num += trn_label.size(0)
print("train_acc: {:.2f}%, train_loss: {:.4f}".format(corr_num / total_num * 100, train_loss / len(trainloader)))
# test acc
with torch.no_grad():
corr_num = 0
total_num = 0
val_loss = 0.0
for j, val in enumerate(testloader):
val_x, val_label = val
val_x = val_x.to(device)
val_label = val_label.clone().detach().long().to(device)
val_output = ecg_net(val_x)
loss = criterion(val_output, val_label)
val_loss += loss.item()
model_label = val_output.argmax(dim=1)
corr = val_label[val_label == model_label].size(0)
corr_num += corr
total_num += val_label.size(0)
accuracy = corr_num / total_num * 100
test_loss = val_loss / len(testloader)
print("test_acc: {:.2f}%, test_loss: {:.4f}".format( accuracy, test_loss))
# acc of each acc
class_correct = list(0. for i in range(5))
class_total = list(0. for i in range(5))
classes = ['N', 'L', 'R', 'A', 'V']
with torch.no_grad():
for data in testloader:
x, labels = data
x = x.to(device)
labels = labels.to(device)
outputs = ecg_net(x)
labels = labels.long()
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(len(labels)):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(5):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
# Let's quickly save our trained model:
PATH = './ecg_fd.pth'
torch.save(ecg_net.state_dict(), PATH)
end_time = time.time() # store end time
print("WorkingTime: {} sec".format(end_time - start_time))
# sys.exit(0)
```
| github_jupyter |
## Manual Classification
```
import warnings
warnings.filterwarnings('ignore')
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
from nltk.corpus import stopwords
import pandas as pd
import numpy as np
from glove import Glove
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense
import geopandas
import os
import json
import h5py
labelEncoder = LabelEncoder()
one_enc = OneHotEncoder()
lemma = nltk.WordNetLemmatizer()
def litho_Dataframe(path):
'''Function that creates a single georeferenced dataframe with
lithologic descriptions
Input:
-path: path to groundwater explorer files
Output:
-DF: pandas dataframe with georeferenced lithologic descriptions'''
shapefile = 'NGIS_Bore.shp'
lithologs = 'NGIS_LithologyLog.csv'
shapefile_df = geopandas.read_file(os.path.join(path, shapefile))
#shapefile_df = shapefile_df.set_index('HydroCode')
shapefile_df['geometry'] = shapefile_df['geometry'].to_crs(epsg=32755)
litho_df = pd.read_csv(os.path.join(path, lithologs))
litho_df = litho_df.set_index('HydroCode')
#litho_df['HydroCode'] = litho_df.index
#litho_df['geometry'] = shapefile_df['geometry']
DF1= litho_df.merge(shapefile_df, on='HydroCode', how='left')
DF2 = DF1[['Description', 'HydroCode', 'FromDepth', 'ToDepth',
'TopElev', 'BottomElev', 'MajorLithCode', 'geometry']]
DF2 = DF2.dropna(how='any')
DF2 = DF2[(DF2['TopElev'] != 'None') & (DF2['BottomElev'] != 'None')]
DF2['z'] = (pd.to_numeric(DF2.TopElev) + pd.to_numeric(DF2.BottomElev))/2
DF = DF2.copy()
DF['FromDepth'] = pd.to_numeric(DF.FromDepth)
DF['ToDepth'] = pd.to_numeric(DF.ToDepth)
DF['TopElev'] = pd.to_numeric(DF.TopElev)
DF['BottomElev'] = pd.to_numeric(DF.BottomElev)
print('number of original litho classes:', len(DF.MajorLithCode.unique()))
return DF
Dir = '/mnt/d/Dropbox/Ranee_Joshi_PhD_Local/04_PythonCodes/dh2loop_old/shp_NSW'
DF=litho_Dataframe(Dir)
DF.to_csv('export.csv')
def manual_reclass(DF):
'''Function that uses a set of RE rules for manual classification
and aggregation
Inputs:
-DF: original dataframe_file
Outputs:
-DFint1: output dataframe containing 18 major lithological classes
manually obtained'''
DF['OWN'] = np.nan
DF['OWN'][DF.MajorLithCode == 'OPAL'] = 'opal'
DF['OWN'][DF.MajorLithCode == 'PRNX'] = 'pyroxenite'
DF['OWN'][DF.MajorLithCode == 'PDRY'] = 'shale'
DF['OWN'][DF.MajorLithCode == 'VTRC'] = 'volcanic'
DF['OWN'][DF.MajorLithCode == '14'] = 'pyrite'
DF['OWN'][DF.MajorLithCode == 'XCST'] = 'tuff'
DF['OWN'][DF.MajorLithCode == 'CLBN'] = 'coal'
DF['OWN'][DF.MajorLithCode == 'APTT'] = 'apatite'
DF['OWN'][DF.MajorLithCode == 'SDRK'] = 'sand'
DF['OWN'][DF.MajorLithCode == '07'] = 'lignite'
DF['OWN'][DF.MajorLithCode == '09'] = 'marble'
DF['OWN'][DF.MajorLithCode == 'PRLS'] = 'pyroclastic'
DF['OWN'][DF.MajorLithCode == 'LGNC'] = 'lignite'
DF['OWN'][DF.MajorLithCode == '13'] = 'peat'
DF['OWN'][DF.MajorLithCode == 'GRNC'] = 'granite'
DF['OWN'][DF.MajorLithCode == 'ALBT'] = 'bitumen'
DF['OWN'][DF.MajorLithCode == 'PRPC'] = 'porphyry'
DF['OWN'][DF.MajorLithCode == 'PGGY'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'LRED'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'STFF'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'PGMT'] = 'pegmatite'
DF['OWN'][DF.MajorLithCode == 'LTIT'] = 'latite'
DF['OWN'][DF.MajorLithCode == 'CBSK'] = 'carbonaceous'
DF['OWN'][DF.MajorLithCode == 'FLDR'] = 'basalt'
DF['OWN'][DF.MajorLithCode == 'CSSD'] = 'sand gravel'
DF['OWN'][DF.MajorLithCode == 'HCMB'] = 'basalt'
DF['OWN'][DF.MajorLithCode == 'BRRC'] = 'basalt'
DF['OWN'][DF.MajorLithCode == 'DCRM'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'BRCB'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'MDDY'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'SLRN'] = 'andesite'
DF['OWN'][DF.MajorLithCode == 'AGLC'] = 'argillite'
DF['OWN'][DF.MajorLithCode == 'ARGL'] = 'argillite'
DF['OWN'][DF.MajorLithCode == 'TRSC'] = 'sandstone'
DF['OWN'][DF.MajorLithCode == '08'] = 'limestone'
DF['OWN'][DF.MajorLithCode == 'CLPN'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'PETY'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'KNKR'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'VGTN'] = 'soil'
DF['OWN'][DF.MajorLithCode == 'CSLN'] = 'coal'
DF['OWN'][DF.MajorLithCode == 'FORM'] = 'sand'
DF['OWN'][DF.MajorLithCode == 'CBLL'] = 'sand'
DF['OWN'][DF.MajorLithCode == 'TRCT'] = 'trachyte'
DF['OWN'][DF.MajorLithCode == 'MRBL'] = 'marble'
DF['OWN'][DF.MajorLithCode == 'BAR'] = 'cement'
DF['OWN'][DF.MajorLithCode == 'FRCL'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'BUXT'] = 'bauxite'
DF['OWN'][DF.MajorLithCode == 'GRYL'] = 'drift'
DF['OWN'][(DF.MajorLithCode == 'VRGD') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'VRGD') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'LYSD') &
(DF.Description.str.contains('Clayey sand'))] = 'clay sand'
DF['OWN'][(DF.MajorLithCode == 'LYSD') &
(DF.Description.str.contains('loam'))] = 'sandy loam'
DF['OWN'][(DF.MajorLithCode == 'EVPR') &
(DF.Description.str.contains('basalt'))] = 'basalt'
DF['OWN'][(DF.MajorLithCode == 'DBLY') &
(DF.Description.str.contains('basalt'))] = 'basalt'
DF['OWN'][(DF.MajorLithCode == 'DBLY') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'DBLY') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'DBLY') &
(DF.Description.str.contains('shale'))] = 'shale'
DF['OWN'][(DF.MajorLithCode == 'TALC') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'TALC') &
(DF.Description.str.contains('silty'))] = 'silty clay'
DF['OWN'][(DF.MajorLithCode == 'MTLD') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'MTLD') &
(DF.Description.str.contains('silty'))] = 'silty clay'
DF['OWN'][(DF.MajorLithCode == 'CLBA') &
(DF.Description.str.contains('Clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'CLBA') &
(DF.Description.str.contains('sand'))] = 'clay sand'
DF['OWN'][(DF.MajorLithCode == 'FREE') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'FREE') &
(DF.Description.str.contains('granite'))] = 'granite'
DF['OWN'][(DF.MajorLithCode == 'MDGD') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'MDGD') &
(DF.Description.str.contains('soil'))] = 'soil'
DF['OWN'][(DF.MajorLithCode == 'MDGD') &
(DF.Description.str.contains('stone'))] = 'mudstone'
DF['OWN'][(DF.MajorLithCode == 'GRSD') &
(DF.Description.str.contains('sand'))] = 'sand gravel'
DF['OWN'][(DF.MajorLithCode == 'GRSD') &
(DF.Description.str.contains('clay'))] = 'clay gravel'
DF['OWN'][(DF.MajorLithCode == 'AOLN') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'AOLN') &
(DF.Description.str.contains('basalt'))] = 'basalt'
DF['OWN'][(DF.MajorLithCode == 'FRLY') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'FRLY') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'FRLY') &
(DF.Description.str.contains('soil'))] = 'soil'
DF['OWN'][(DF.MajorLithCode == 'FRLY') &
((DF.Description.str.contains('sand')) &
(DF.Description.str.contains('clay')))] = 'clay sand'
DF['OWN'][(DF.MajorLithCode == 'FIRM') &
(DF.Description.str.contains('basalt'))] = 'basalt'
DF['OWN'][(DF.MajorLithCode == 'FIRM') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'FIRM') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'FIRM') &
(DF.Description.str.contains('sandstone'))] = 'sandstone'
DF['OWN'][(DF.MajorLithCode == 'FIRM') &
(DF.Description.str.contains('granite'))] = 'granite'
DF['OWN'][(DF.MajorLithCode == 'FIRM') &
(DF.Description.str.contains('shale'))] = 'shale'
DF['OWN'][(DF.MajorLithCode == 'FIRM') &
(DF.Description.str.contains('siltstone'))] = 'siltstone'
DF['OWN'][(DF.MajorLithCode == 'GREY') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'GREY') &
(DF.Description.str.contains('acke'))] = 'wacke'
DF['OWN'][(DF.MajorLithCode == 'GREY') &
(DF.Description.str.contains('shale'))] = 'shale'
DF['OWN'][(DF.MajorLithCode == 'GREY') &
(DF.Description.str.contains('sandstone'))] = 'sandstone'
DF['OWN'][(DF.MajorLithCode == 'BECH') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'BECH') &
(DF.Description.str.contains('clay'))] = 'sandy clay'
DF['OWN'][(DF.MajorLithCode == 'PKBN') &
(DF.Description.str.contains('granite'))] = 'granite'
DF['OWN'][(DF.MajorLithCode == 'BCLC') &
(DF.Description.str.contains('itumen'))] = 'bitumen'
DF['OWN'][(DF.MajorLithCode == 'BCLC') &
(DF.Description.str.contains('gravel'))] = 'gravel'
DF['OWN'][(DF.MajorLithCode == 'MFIC') &
(DF.Description.str.contains('monzo'))] = 'monzonite'
DF['OWN'][(DF.MajorLithCode == 'MZNT') &
(DF.Description.str.contains('Monzo'))] = 'monzonite'
DF['OWN'][(DF.MajorLithCode == 'DCIT') &
(DF.Description.str.contains('acite'))] = 'dacite'
DF['OWN'][(DF.MajorLithCode == 'DCIT') &
(DF.Description.str.contains('granite'))] = 'granite'
DF['OWN'][(DF.MajorLithCode == 'SYNT') &
(DF.Description.str.contains('yenite'))] = 'syenite'
DF['OWN'][(DF.MajorLithCode == 'AMPB') &
(DF.Description.str.contains('Amphibolite'))] = 'amphibolite'
DF['OWN'][(DF.MajorLithCode == 'BJCN') &
(DF.Description.str.contains('Basalt'))] = 'basalt'
DF['OWN'][(DF.MajorLithCode == 'BJCN') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'BJCN') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'BJCN') &
(DF.Description.str.contains('claystone'))] = 'claystone'
DF['OWN'][(DF.MajorLithCode == 'BJCN') &
(DF.Description.str.contains('granite'))] = 'granite'
DF['OWN'][(DF.MajorLithCode == 'CORD') &
(DF.Description.str.contains('slate'))] = 'slate'
DF['OWN'][(DF.MajorLithCode == 'CORD') &
(DF.Description.str.contains('schist'))] = 'schist'
DF['OWN'][(DF.MajorLithCode == 'CORD') &
(DF.Description.str.contains('Clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'CORD') &
(DF.Description.str.contains('granite'))] = 'granite'
DF['OWN'][(DF.MajorLithCode == 'CORD') &
(DF.Description.str.contains('andstone'))] = 'sandstone'
DF['OWN'][(DF.MajorLithCode == 'HMTT') &
(DF.Description.str.contains('sandstone'))] = 'sandstone'
DF['OWN'][(DF.MajorLithCode == 'HMTT') &
(DF.Description.str.contains('volcano'))] = 'volcanic'
DF['OWN'][(DF.MajorLithCode == 'HMTT') &
(DF.Description.str.contains('conglomerate'))] = 'conglomerate'
DF['OWN'][(DF.MajorLithCode == 'HMTT') &
(DF.Description.str.contains('granite'))] = 'granite'
DF['OWN'][(DF.MajorLithCode == 'HMTT') &
(DF.Description.str.contains('andesite'))] = 'andesite'
DF['OWN'][DF.MajorLithCode == 'MMPC'] = np.nan
DF['OWN'][(DF.MajorLithCode == 'MMPC') &
(DF.Description.str.contains('etamorphic'))] = 'metamorphic'
DF['OWN'][(DF.MajorLithCode == 'MMPC') &
(DF.Description.str.contains('siltstone'))] = 'siltstone'
DF['OWN'][(DF.MajorLithCode == 'MMPC') &
(DF.Description.str.contains('ornfels'))] = 'hornfels'
DF['OWN'][(DF.MajorLithCode == 'MMPC') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'MMPC') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'MMPC') &
((DF.Description.str.contains('sand')) &
(DF.Description.str.contains('clay')))] = 'clay sand'
DF['OWN'][DF.MajorLithCode == 'CLCS'] = 'earth'
DF['OWN'][(DF.MajorLithCode == 'CLCS') &
(DF.Description.str.contains('shale'))] = 'shale'
DF['OWN'][DF.MajorLithCode == '04'] = 'gypsum'
DF['OWN'][(DF.MajorLithCode == '04') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == '04') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][DF.MajorLithCode == '01'] = 'coal'
DF['OWN'][(DF.MajorLithCode == '01') &
(DF.Description.str.contains('iltstone'))] = 'siltstone'
DF['OWN'][(DF.MajorLithCode == '01') &
(DF.Description.str.contains('udstone'))] = 'mudstone'
DF['OWN'][(DF.MajorLithCode == 'CLRC') &
(DF.Description.str.contains('schist'))] = 'schist'
DF['OWN'][(DF.MajorLithCode == 'SDDF') &
(DF.Description.str.contains('Sand'))] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'SDDF') &
((DF.Description.str.contains('Sand')) &
(DF.Description.str.contains('silt')))] = 'silty sand'
DF['OWN'][(DF.MajorLithCode == 'SDDF') &
((DF.Description.str.contains('Sand')) &
(DF.Description.str.contains('clay')))] = 'clay sand'
DF['OWN'][(DF.MajorLithCode == 'SDDF') &
((DF.Description.str.contains('Sand')) &
(DF.Description.str.contains('gravel')))] = 'sand gravel'
DF['OWN'][(DF.MajorLithCode == 'SDDF') &
(DF.Description.str.contains('Carb'))] = 'carbonaceous'
DF['OWN'][(DF.MajorLithCode == 'SDDF') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'CRBC') &
(DF.Description.str.contains('Carb'))] = 'carbonaceous'
DF['OWN'][(DF.MajorLithCode == 'STRK') &
(DF.Description.str.contains('clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'STRK') &
(DF.Description.str.contains('sand'))] = 'sand'
DF['OWN'][DF.MajorLithCode == 'BOTT'] = 'bitumen'
DF['OWN'][DF.MajorLithCode == 'MXTR'] = 'shale'
DF['OWN'][DF.MajorLithCode == 'OIL'] = 'shale'
DF['OWN'][DF.MajorLithCode == 'GRDR'] = 'granodiorite'
DF['OWN'][(DF.MajorLithCode == 'BOTT') &
(DF.Description.str.contains('Biotite'))] = 'biotite'
DF['OWN'][DF.MajorLithCode == 'KLNT'] = 'kaolinite'
DF['OWN'][DF.MajorLithCode == 'CBSS'] = 'carbonaceous'
DF['OWN'][DF.MajorLithCode == 'GPSM'] = 'gypsum'
DF['OWN'][DF.MajorLithCode == 'PBHZ'] = 'pebbles'
DF['OWN'][DF.MajorLithCode == 'KOLN'] = 'kaolinite'
DF['OWN'][DF.MajorLithCode == 'IROX'] = 'ironstone'
DF['OWN'][DF.MajorLithCode == 'ALVL'] = 'drift'
DF['OWN'][DF.MajorLithCode == 'SCOR'] = 'scoria'
DF['OWN'][DF.MajorLithCode == 'CHCL'] = 'charcoal'
DF['OWN'][DF.MajorLithCode == 'GNSS'] = 'gneiss'
DF['OWN'][DF.MajorLithCode == 'CBLS'] = 'water'
DF['OWN'][(DF.MajorLithCode == 'GNSS') &
(DF.Description.str.contains('Green clay'))] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'GNSS') &
(DF.Description.str.contains('Genise'))] = np.nan
DF['OWN'][DF.MajorLithCode == 'QRTT'] = 'quartzite'
DF['OWN'][DF.MajorLithCode == 'BDLR'] = np.nan
DF['OWN'][DF.MajorLithCode == 'FOSL'] = np.nan
DF['OWN'][DF.MajorLithCode == 'CORL'] = np.nan
DF['OWN'][DF.MajorLithCode == 'GRPT'] = np.nan
DF['OWN'][DF.MajorLithCode == 'CRST'] = np.nan
DF['OWN'][DF.MajorLithCode == 'GRBL'] = np.nan
DF['OWN'][DF.MajorLithCode == 'AOIL'] = np.nan
DF['OWN'][DF.MajorLithCode == 'CLTE'] = np.nan
DF['OWN'][(DF.MajorLithCode == 'CLTE') &
(DF.Description.str.contains('Stones'))] = 'stones'
DF['OWN'][DF.MajorLithCode == 'DPSL'] = np.nan
DF['OWN'][DF.MajorLithCode == 'BORE'] = np.nan
DF['OWN'][DF.MajorLithCode == 'PYRT'] = 'Pyrite'
DF['OWN'][(DF.MajorLithCode == 'PYRT') &
(DF.Description.str.contains('basalt'))] = 'basalt'
DF['OWN'][(DF.MajorLithCode == 'PYRT') &
(DF.Description.str.contains('limestone'))] = 'limestone'
DF['OWN'][DF.MajorLithCode == 'ZOLT'] = 'zeolite'
DF['OWN'][(DF.MajorLithCode == 'SHLY')] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'SLLY')] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'SLTE')] = 'slate'
DF['OWN'][(DF.MajorLithCode == '1')] = 'coal'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == '1')] = 'shale'
DF['OWN'][(DF.Description.str.contains('ironstone')) &
(DF.MajorLithCode == '1')] = 'ironstone'
DF['OWN'][(DF.Description.str.contains('siltstone')) &
(DF.MajorLithCode == '1')] = 'siltstone'
DF['OWN'][(DF.Description.str.contains('tuff')) &
(DF.MajorLithCode == '1')] = 'tuff'
DF['OWN'][(DF.MajorLithCode == 'COAL')] = 'coal'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'COAL')] = 'shale'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'COAL')] = 'sand'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'COAL')] = 'clay'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'COAL')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'COAL')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'COAL')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('silty')) &
(DF.MajorLithCode == 'COAL')] = 'silty'
DF['OWN'][((DF.Description.str.contains('siltstone')) |
(DF.Description.str.contains('Siltstone'))) &
(DF.MajorLithCode == 'COAL')] = 'siltstone'
DF['OWN'][(DF.Description.str.contains('claystone')) &
(DF.MajorLithCode == 'COAL')] = 'claystone'
DF['OWN'][(DF.Description.str.contains('mudstone')) &
(DF.MajorLithCode == 'COAL')] = 'mudstone'
DF['OWN'][(DF.MajorLithCode == 'CMNT')] = 'cement'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'CMNT')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'CMNT')] = 'sand'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'CMNT')] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'SDMN')] = 'sedimentary'
DF['OWN'][(DF.MajorLithCode == 'DLMT')] = 'dolomite'
DF['OWN'][(DF.MajorLithCode == 'AGLM')] = 'agglomerate'
DF['OWN'][(DF.MajorLithCode == 'CRBN')] = 'carbonaceous'
DF['OWN'][(DF.MajorLithCode == 'BNTN')] = 'bentonite'
DF['OWN'][DF.MajorLithCode == 'IRNS'] = 'sand'
DF['OWN'][DF.MajorLithCode == 'WODY'] = 'sand'
DF['OWN'][DF.MajorLithCode == 'IGRK'] = 'igneous'
DF['OWN'][DF.MajorLithCode == 'RHLT'] = 'rhyolite'
DF['OWN'][DF.MajorLithCode == 'DLRT'] = 'dolerite'
DF['OWN'][DF.MajorLithCode == 'SLND'] = 'silty sand'
DF['OWN'][(DF.MajorLithCode == '8')] = 'limestone'
DF['OWN'][DF.MajorLithCode == 'TUFF'] = 'tuff'
DF['OWN'][DF.MajorLithCode == 'BLML'] = 'blue metal'
DF['OWN'][DF.MajorLithCode == 'SRPN'] = 'serpentine'
DF['OWN'][DF.MajorLithCode == 'DORT'] = 'diorite'
DF['OWN'][DF.MajorLithCode == 'SRFC'] = 'soil'
DF['OWN'][(DF.MajorLithCode == 'SOIL')] = 'soil'
DF['OWN'][(DF.Description.str.contains('None')) &
(DF.MajorLithCode == 'SOIL')] = np.nan
DF['OWN'][(DF.MajorLithCode == 'TPSL')] = 'topsoil'
DF['OWN'][((DF.Description.str.contains(' soil')) |
(DF.Description.str.contains('Soil'))) &
(DF.MajorLithCode == 'TPSL')] = 'soil'
DF['OWN'][(DF.Description.str.contains('None')) &
(DF.MajorLithCode == 'TPSL')] = np.nan
DF['OWN'][DF.MajorLithCode == 'SBSL'] = 'subsoil'
DF['OWN'][DF.MajorLithCode == 'VLCC'] = 'volcanic'
DF['OWN'][DF.MajorLithCode == 'DRFT'] = 'drift'
DF['OWN'][DF.MajorLithCode == 'CBSD'] = 'claystone'
DF['OWN'][DF.MajorLithCode == 'SDRC'] = 'siderite'
DF['OWN'][DF.MajorLithCode == 'CNSD'] = 'shale'
DF['OWN'][DF.MajorLithCode == 'SASH'] = 'shale'
DF['OWN'][DF.MajorLithCode == 'CHLK'] = 'chalk'
DF['OWN'][DF.MajorLithCode == 'JSPR'] = 'jasper'
DF['OWN'][DF.MajorLithCode == '15'] = 'quartz'
DF['OWN'][DF.MajorLithCode == 'IGVL'] = 'ironstone'
DF['OWN'][DF.MajorLithCode == 'LTRT'] = 'laterite'
DF['OWN'][DF.MajorLithCode == 'FMSR'] = 'sandstone'
DF['OWN'][DF.MajorLithCode == 'NORT'] = 'sandstone'
DF['OWN'][DF.MajorLithCode == 'PRUS'] = 'sandstone'
DF['OWN'][DF.MajorLithCode == 'RDDS'] = 'sandstone'
DF['OWN'][DF.MajorLithCode == 'SLSN'] = 'siltstone'
DF['OWN'][DF.MajorLithCode == 'CLRD'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'SOLD'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'TUGH'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'STNY'] = 'stones clay'
DF['OWN'][DF.MajorLithCode == 'PUG'] = 'pug'
DF['OWN'][DF.MajorLithCode == 'YWBN'] = 'clay'
DF['OWN'][DF.MajorLithCode == '22'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'STCK'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'TNLR'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'DBLU'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'CLLM'] = 'clay loam'
DF['OWN'][DF.MajorLithCode == 'SPBK'] = 'limestone'
DF['OWN'][DF.MajorLithCode == 'SPSN'] = 'soapstone'
DF['OWN'][DF.MajorLithCode == 'LGNT'] = 'lignite'
DF['OWN'][DF.MajorLithCode == 'BNDS'] = 'sand'
DF['OWN'][DF.MajorLithCode == 'VSND'] = 'sand'
DF['OWN'][DF.MajorLithCode == 'MMCR'] = 'gravel'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'MMCR')] = 'sand gravel'
DF['OWN'][DF.MajorLithCode == 'FNCR'] = 'gravel'
DF['OWN'][DF.MajorLithCode == 'RBBL'] = 'gravel'
DF['OWN'][DF.MajorLithCode == 'TGHT'] = 'gravel'
DF['OWN'][DF.MajorLithCode == 'DRTY'] = 'gravel'
DF['OWN'][DF.MajorLithCode == 'SOPY'] = 'gravel'
DF['OWN'][DF.MajorLithCode == 'RSTY'] = 'gravel'
DF['OWN'][DF.MajorLithCode == 'PEA'] = 'gravel'
DF['OWN'][DF.MajorLithCode == 'LAVA'] = 'lava'
DF['OWN'][DF.MajorLithCode == 'CLEN'] = 'sandy clay'
DF['OWN'][DF.MajorLithCode == 'MOST'] = 'sandy clay'
DF['OWN'][DF.MajorLithCode == 'LBLU'] = 'sandy clay'
DF['OWN'][DF.MajorLithCode == 'GRTT'] = 'gritty clay'
DF['OWN'][DF.MajorLithCode == 'SEMS'] = 'sandy clay'
DF['OWN'][DF.MajorLithCode == 'MARL'] = 'marl'
DF['OWN'][DF.MajorLithCode == 'FLVL'] = 'volcanic'
DF['OWN'][DF.MajorLithCode == 'FLSC'] = 'volcanic'
DF['OWN'][DF.MajorLithCode == 'GRSN'] = np.nan
DF['OWN'][DF.MajorLithCode == 'FPPP'] = 'porphyry'
DF['OWN'][DF.MajorLithCode == 'CGLD'] = 'conglomerate'
DF['OWN'][DF.MajorLithCode == 'RFLT'] = 'conglomerate'
DF['OWN'][DF.MajorLithCode == 'CGLC'] = 'conglomerate'
DF['OWN'][DF.MajorLithCode == 'SEMI'] = 'conglomerate'
DF['OWN'][(DF.MajorLithCode == 'CGLM')] = 'conglomerate'
DF['OWN'][DF.MajorLithCode == 'SWRX'] = 'granite'
DF['OWN'][DF.MajorLithCode == 'GRCK'] = 'greywacke'
DF['OWN'][DF.MajorLithCode == 'GRIT'] = 'sand'
DF['OWN'][DF.MajorLithCode == 'FRMN'] = 'basalt'
DF['OWN'][DF.MajorLithCode == 'RSTN'] = 'basalt'
DF['OWN'][DF.MajorLithCode == 'BSLC'] = 'sand gravel'
DF['OWN'][DF.MajorLithCode == 'LIME'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'PRPR'] = 'porphyry'
DF['OWN'][DF.MajorLithCode == 'GBBR'] = 'conglomerate'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'GBBR')] = 'sand gravel'
DF['OWN'][(DF.MajorLithCode == 'DENS')] = 'sand gravel'
DF['OWN'][DF.MajorLithCode == 'BRCC'] = 'breccia'
DF['OWN'][DF.MajorLithCode == 'MICA'] = 'mica'
DF['OWN'][DF.MajorLithCode == 'BCVC'] = 'volcanic'
DF['OWN'][DF.MajorLithCode == 'SLRR'] = 'sand'
DF['OWN'][DF.MajorLithCode == 'ASHV'] = 'volcanic'
DF['OWN'][DF.MajorLithCode == 'CVTY'] = 'cavity'
DF['OWN'][DF.MajorLithCode == 'CLGR'] = 'clay sand gravel'
DF['OWN'][DF.MajorLithCode == 'GRDL'] = 'clay gravel'
DF['OWN'][DF.MajorLithCode == 'PHLT'] = 'phyllite'
DF['OWN'][DF.MajorLithCode == 'SDLM'] = 'sandy loam'
DF['OWN'][DF.MajorLithCode == 'SLBD'] = 'sandy loam'
DF['OWN'][DF.MajorLithCode == 'CLCR'] = 'calcrete'
DF['OWN'][DF.MajorLithCode == 'GRLT'] = 'gravel'
DF['OWN'][DF.MajorLithCode == 'QZPP'] = 'porphyry'
DF['OWN'][((DF.Description.str.contains('sandstone')) |
(DF.Description.str.contains('bands'))) &
(DF.MajorLithCode == 'QZPP')] = 'sandstone'
DF['OWN'][DF.MajorLithCode == 'HWRX'] = 'conglomerate'
DF['OWN'][DF.MajorLithCode == 'ARKS'] = 'arkose'
DF['OWN'][DF.MajorLithCode == 'SCST'] = 'schist'
DF['OWN'][DF.MajorLithCode == 'VCRK'] = 'volcanic'
DF['OWN'][DF.MajorLithCode == 'FSND'] = 'sandy clay'
DF['OWN'][DF.MajorLithCode == 'ANDS'] = 'andesite'
DF['OWN'][DF.MajorLithCode == 'SLCR'] = 'silcrete'
DF['OWN'][(DF.Description.str.contains('Silstone')) &
(DF.MajorLithCode == 'SLCR')] = 'siltstone'
DF['OWN'][(~DF.Description.str.contains('Silstone')) &
(~DF.Description.str.contains('Silcrete')) &
(DF.MajorLithCode == 'SLCR')] = 'silty clay'
DF['OWN'][DF.MajorLithCode == 'FLDP'] = 'feldspar'
DF['OWN'][DF.MajorLithCode == 'CLCT'] = 'calcite'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'FLOT')] = 'basalt'
DF['OWN'][(~DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'FLOT')] = 'water'
DF['OWN'][(DF.Description.str.contains('Acid')) &
(DF.MajorLithCode == 'ACVC')] = 'volcanic'
DF['OWN'][DF.MajorLithCode == 'SGCY'] = 'sand gravel'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'SGCY')] = 'granite'
DF['OWN'][DF.MajorLithCode == 'ALRD'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'ALWD'] = 'wood'
DF['OWN'][DF.MajorLithCode == 'PFWD'] = 'wood'
DF['OWN'][DF.MajorLithCode == 'SSLS'] = 'siltstone'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'SSLS')] = 'sandy clay'
DF['OWN'][DF.MajorLithCode == 'LBIL'] = 'shale'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'LBIL')] = 'clay'
DF['OWN'][DF.MajorLithCode == 'CRKD'] = 'slate'
DF['OWN'][DF.MajorLithCode == 'SHFT'] = 'shale'
DF['OWN'][DF.MajorLithCode == 'CNZC'] = 'clay sand'
DF['OWN'][(DF.Description.str.contains('coal')) &
(DF.MajorLithCode == 'CNZC')] = 'coal'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'BCPD')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'BCPD')] = 'shale'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'WTST')] = 'clay'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'WTST')] = 'sand'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'WTST')] = 'sandstone'
DF['OWN'][DF.MajorLithCode == 'GNBN'] = 'granite'
DF['OWN'][DF.MajorLithCode == 'RED'] = 'soil'
DF['OWN'][DF.MajorLithCode == 'RODN'] = 'soil'
DF['OWN'][DF.MajorLithCode == 'TRCN'] = 'soil'
DF['OWN'][DF.MajorLithCode == 'SBGL'] = 'soil'
DF['OWN'][DF.MajorLithCode == 'BLCK'] = 'soil'
DF['OWN'][DF.MajorLithCode == 'LGRY'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'VSLT'] = 'silty clay'
DF['OWN'][(DF.MajorLithCode == 'SLCY')] = 'silty clay'
DF['OWN'][(DF.Description.str.contains('silty clay')) &
(DF.MajorLithCode == 'RDST')] = 'silty clay'
DF['OWN'][(DF.Description.str.contains('sandy clay')) &
(DF.MajorLithCode == 'RDST')] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'RDST')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'BRWN')] = 'clay'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'BRWN')] = 'sand'
DF['OWN'][(DF.Description.str.contains('sandy clay')) &
(DF.MajorLithCode == 'BRWN')] = 'sandy clay'
DF['OWN'][(DF.Description == 'Brown') &
(DF.MajorLithCode == 'BRWN')] = np.nan
DF['OWN'][DF.MajorLithCode == 'DRED'] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'VFIN')] = 'sand'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'VFIN')] = 'clay gravel'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'DBRN')] = 'clay'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'DBRN')] = 'soil'
DF['OWN'][(DF.Description.str.contains('sandy clay')) &
(DF.MajorLithCode == 'DBRN')] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('sandy loam')) &
(DF.MajorLithCode == 'DBRN')] = 'sandy loam'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'DBRN')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('sand and gravel')) &
(DF.MajorLithCode == 'DBRN')] = 'sand gravel'
DF['OWN'][DF.MajorLithCode == 'DGRY'] = 'clay'
DF['OWN'][DF.MajorLithCode == 'BLUE'] = 'sand'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'LOMY')] = 'soil'
DF['OWN'][(~DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'LOMY')] = 'clay loam'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'DRY')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'DRY')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel and clay')) &
(DF.MajorLithCode == 'DRY')] = 'clay gravel'
DF['OWN'][DF.MajorLithCode == 'WET'] = 'gravel'
DF['OWN'][DF.MajorLithCode == 'BCKS'] = 'basalt'
DF['OWN'][DF.MajorLithCode == 'LOOS'] = 'boulders'
DF['OWN'][DF.MajorLithCode == 'RIVR'] = 'gravel'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'WHBN')] = 'sand'
DF['OWN'][(DF.Description.str.contains('silt')) &
(DF.MajorLithCode == 'WHBN')] = 'silty'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'WHBN')] = 'clay'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'LGHT')] = 'clay'
DF['OWN'][(~DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'LGHT')] = np.nan
DF['OWN'][(DF.Description.str.contains('siltstone')) &
(DF.MajorLithCode == 'KHKI')] = 'siltstone'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'KHKI')] = 'clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'LRGE')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('stone')) &
(DF.MajorLithCode == 'LRGE')] = 'stones'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'SMLL')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'SMLL')] = 'clay gravel'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'SMLL')] = 'sand gravel'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'CMPC')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'CMPC')] = 'shale'
DF['OWN'][(DF.Description == 'Compacted') &
(DF.MajorLithCode == 'CMPC')] = np.nan
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'PRPL')] = 'clay'
DF['OWN'][(~DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'PRPL')] = np.nan
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'HARD')] = 'clay'
DF['OWN'][(~DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'HARD')] = np.nan
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'SCLY')] = 'clay gravel'
DF['OWN'][(~DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'SCLY')] = 'gritty clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'CMND')] = 'gravel'
DF['OWN'][(~DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'CMND')] = np.nan
DF['OWN'][(DF.Description.str.contains('Wood')) &
(DF.MajorLithCode == 'WOOD')] = 'wood'
DF['OWN'][(~DF.Description.str.contains('Wood')) &
(DF.MajorLithCode == 'WOOD')] = 'clay'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'WOOD')] = 'sand'
DF['OWN'][(DF.Description.str.contains('Sand')) &
(DF.MajorLithCode == 'SCBN')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'SCBN')] = 'sand gravel'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'SCBN')] = 'clay sand'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'SSDS')] = 'sandstone'
DF['OWN'][(~DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'SSDS')] = 'conglomerate'
DF['OWN'][(DF.MajorLithCode == 'ERTH')] = 'earth'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'ERTH')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'ERTH')] = 'clay'
DF['OWN'][(DF.Description.str.contains('boulder')) &
(DF.MajorLithCode == 'ERTH')] = 'boulders'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'ERTH')] = 'shale'
DF['OWN'][(DF.Description.str.contains('loam')) &
(DF.MajorLithCode == 'ERTH')] = 'loam'
DF['OWN'][(DF.MajorLithCode == 'SLTY')] = 'silty'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'SLTY')] = 'silty sand'
DF['OWN'][((DF.Description.str.contains('Silty/clay')) |
(DF.Description.str.contains('Silty clay'))) &
(DF.MajorLithCode == 'SLTY')] = 'silty sand'
DF['OWN'][(DF.Description.str.contains('loam')) &
(DF.MajorLithCode == 'SLTY')] = 'silty loam'
DF['OWN'][(DF.MajorLithCode == 'SDLC')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'SDLC')] = 'sand gravel'
DF['OWN'][(DF.Description.str.contains('Sand clay')) &
(DF.MajorLithCode == 'SDLC')] = 'clay sand'
DF['OWN'][(DF.MajorLithCode == 'CLYY')] = 'clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'CLYY')] = 'clay gravel'
DF['OWN'][((DF.Description.str.contains('sand')) &
(~DF.Description.str.contains('gravel'))) &
(DF.MajorLithCode == 'CLYY')] = 'clay sand'
DF['OWN'][(DF.MajorLithCode == 'FRCD')] = np.nan
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'FRCD')] = 'shale'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'FRCD')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'FRCD')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'FRCD')] = 'granite'
DF['OWN'][(DF.MajorLithCode == 'WRCK')] = np.nan
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'WRCK')] = 'shale'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'WRCK')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('volcanic')) &
(DF.MajorLithCode == 'WRCK')] = 'volcanic'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'WRCK')] = 'granite'
DF['OWN'][(DF.MajorLithCode == 'MLCD')] = np.nan
DF['OWN'][((DF.Description.str.contains('clay')) |
(DF.Description.str.contains('Clay'))) &
(DF.MajorLithCode == 'MLCD')] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'GRBN')] = 'clay'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'GRBN')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'GRBN')] = 'clay sand'
DF['OWN'][(DF.MajorLithCode == 'ORNG')] = 'clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'ORNG')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('silty clay')) &
(DF.MajorLithCode == 'ORNG')] = 'silty clay'
DF['OWN'][(DF.Description.str.contains('silt')) &
(DF.MajorLithCode == 'ORNG')] = 'silty'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'ORNG')] = 'sand'
DF['OWN'][(DF.Description.str.contains('sandy clay')) &
(DF.MajorLithCode == 'ORNG')] = 'sandy clay'
DF['OWN'][(DF.MajorLithCode == 'FINE')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'FINE')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'FINE')] = 'sandstone'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'FINE')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('clay'))) &
(DF.MajorLithCode == 'FINE')] = 'clay gravel'
DF['OWN'][(DF.MajorLithCode == 'LBRN')] = 'clay'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'LBRN')] = 'soil'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'LBRN')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('sand')) &
(DF.Description.str.contains('clay'))) &
(DF.MajorLithCode == 'LBRN')] = 'sandy clay'
DF['OWN'][((DF.Description.str.contains('silt')) &
(DF.Description.str.contains('clay'))) &
(DF.MajorLithCode == 'LBRN')] = 'silty clay'
DF['OWN'][(DF.MajorLithCode == 'MDUM')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'MDUM')] = 'clay'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'MDUM')] = 'sand'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'MDUM')] = 'sand gravel'
DF['OWN'][(DF.MajorLithCode == 'CRSE')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'CRSE')] = 'gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'CRSE')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('clay'))) &
(DF.MajorLithCode == 'CRSE')] = 'clay gravel'
DF['OWN'][(DF.MajorLithCode == 'BRKN')] = np.nan
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'BRKN')] = 'shale'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'BRKN')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'BRKN')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'BRKN')] = 'granite'
DF['OWN'][(DF.Description.str.contains('mudstone')) &
(DF.MajorLithCode == 'BRKN')] = 'mudstone'
DF['OWN'][(DF.MajorLithCode == 'HDBD')] = np.nan
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'HDBD')] = 'shale'
DF['OWN'][(DF.Description.str.contains('conglomerate')) &
(DF.MajorLithCode == 'HDBD')] = 'conglomerate'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'HDBD')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'HDBD')] = 'granite'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'HDBD')] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'SFBD')] = np.nan
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'SFBD')] = 'shale'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'SFBD')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'SFBD')] = 'granite'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'SFBD')] = 'clay'
DF['OWN'][((DF.Description.str.contains('clay')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'SFBD')] = 'sandy clay'
DF['OWN'][(DF.MajorLithCode == 'YWST')] = 'shale'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'YWST')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'YWST')] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'WDDY')] = np.nan
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'WDDY')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'WDDY')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'WDDY')] = 'granite'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'WDDY')] = 'shale'
DF['OWN'][(DF.MajorLithCode == 'BLBN')] = np.nan
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'BLBN')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'BLBN')] = 'clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'BLBN')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'BLBN')] = 'sand'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'BLBN')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('silt')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'BLBN')] = 'silty sand'
DF['OWN'][(DF.MajorLithCode == 'WTCB')] = 'clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'WTCB')] = 'clay gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) |
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'WTCB')] = 'clay sand gravel'
DF['OWN'][(DF.MajorLithCode == 'GYST')] = np.nan
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'GYST')] = 'sand'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'GYST')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'GYST')] = 'shale'
DF['OWN'][(DF.Description.str.contains('siltstone')) &
(DF.MajorLithCode == 'GYST')] = 'siltstone'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'GYST')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('clay')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'GYST')] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'GYST')] = 'topsoil'
DF['OWN'][(DF.MajorLithCode == 'BKBN')] = 'clay'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'BKBN')] = 'soil'
DF['OWN'][(DF.Description.str.contains('topsoil')) &
(DF.MajorLithCode == 'BKBN')] = 'topsoil'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'BKBN')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('volcanic')) &
(DF.MajorLithCode == 'BKBN')] = 'volcanic'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'BKBN')] = 'granite'
DF['OWN'][(DF.Description.str.contains('rock')) &
(DF.MajorLithCode == 'BKBN')] = np.nan
DF['OWN'][((DF.Description.str.contains('clay')) &
(DF.Description.str.contains('silt'))) &
(DF.MajorLithCode == 'BKBN')] = 'silty clay'
DF['OWN'][(DF.MajorLithCode == 'BKST')] = 'slate'
DF['OWN'][((DF.Description.str.contains('soil')) &
(DF.Description.str.contains('Soil'))) &
(DF.MajorLithCode == 'BKST')] = 'soil'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'BKST')] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'DCMP')] = np.nan
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'DCMP')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'DCMP')] = 'shale'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'DCMP')] = 'granite'
DF['OWN'][(DF.MajorLithCode == 'BLST')] = np.nan
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'BLST')] = 'sand gravel'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'BLST')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'BLST')] = 'shale'
DF['OWN'][(DF.MajorLithCode == 'WTRD')] = np.nan
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'WTRD')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'WTRD')] = 'shale'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'WTRD')] = 'basalt'
DF['OWN'][(DF.MajorLithCode == '21')] = 'sandy clay'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(~DF.Description.str.contains('clay'))) &
(DF.MajorLithCode == '21')] = 'sand gravel'
DF['OWN'][(DF.Description.str.contains('loam')) &
(DF.MajorLithCode == '21')] = 'sandy loam'
DF['OWN'][(DF.Description.str.contains('Loam')) &
(DF.MajorLithCode == '21')] = 'loam'
DF['OWN'][(DF.MajorLithCode == 'SCLM')] = 'sandy clay loam'
DF['OWN'][((DF.Description.str.contains('Clay sandy')) |
(~DF.Description.str.contains('loam'))) &
(DF.MajorLithCode == 'SCLM')] = 'clay sand'
DF['OWN'][(DF.MajorLithCode == 'CHRT')] = 'chert'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'CHRT')] = 'stones clay'
DF['OWN'][(DF.Description.str.contains('Clay')) &
(DF.MajorLithCode == 'CHRT')] = 'clay sand'
DF['OWN'][(DF.Description.str.contains('None')) &
(DF.MajorLithCode == 'CHRT')] = np.nan
DF['OWN'][(DF.MajorLithCode == 'SNDY')] = 'sand'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'SNDY')] = 'soil'
DF['OWN'][(DF.Description.str.contains('topsoil')) &
(DF.MajorLithCode == 'SNDY')] = 'topsoil'
DF['OWN'][(DF.Description.str.contains('loam')) &
(DF.MajorLithCode == 'SNDY')] = 'sandy loam'
DF['OWN'][(DF.Description.str.contains('Clay')) &
(DF.MajorLithCode == 'SNDY')] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'SNDY')] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'SNDY')] = 'sand gravel'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'SNDY')] = 'shale'
DF['OWN'][(DF.MajorLithCode == 'FILL')] = np.nan
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'FILL')] = 'gravel'
DF['OWN'][((DF.Description.str.contains('gravelly sand')) |
(DF.Description.str.contains('sandy gravel')) |
DF.Description.str.contains('silt')) &
(DF.MajorLithCode == 'FILL')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('ground')) |
(DF.Description.str.contains('soil'))) &
(DF.MajorLithCode == 'FILL')] = 'soil'
DF['OWN'][(DF.Description.str.contains('topsoil')) &
(DF.MajorLithCode == 'FILL')] = 'topsoil'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'FILL')] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'MUD')] = 'mud'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'MUD')] = 'mud gravel'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'MUD')] = 'mud sand'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'MUD')] = 'shale'
DF['OWN'][(DF.MajorLithCode == 'STON')] = 'stones'
DF['OWN'][((DF.Description.str.contains('clay')) |
(DF.Description.str.contains('Clay'))) &
(DF.MajorLithCode == 'STON')] = 'stones clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'STON')] = 'stones gravel'
DF['OWN'][(DF.MajorLithCode == 'WB')] = 'water'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'WB')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'WB')] = 'sand'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'WB')] = 'clay'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'WB')] = 'sand gravel'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'WB')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'WB')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'WB')] = 'shale'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'WB')] = 'granite'
DF['OWN'][(DF.Description.str.contains('rock')) &
(DF.MajorLithCode == 'WB')] = np.nan
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'WB')] = 'clay gravel'
DF['OWN'][(DF.MajorLithCode == 'BRBN')] = 'clay'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'BRBN')] = 'soil'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'BRBN')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'BRBN')] = 'gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'BRBN')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('clay')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'BRBN')] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'BRBN')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'BRBN')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'BRBN')] = 'shale'
DF['OWN'][(DF.Description.str.contains('rock')) &
(DF.MajorLithCode == 'BRBN')] = np.nan
DF['OWN'][(DF.MajorLithCode == '99')] = 'clay'
DF['OWN'][((DF.Description.str.contains('None')) |
(DF.Description.str.contains('unknown')) |
(DF.Description == '(')) &
(DF.MajorLithCode == '99')] = np.nan
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == '99')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == '99')] = 'gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == '99')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('clay'))) &
(DF.MajorLithCode == '99')] = 'clay gravel'
DF['OWN'][((DF.Description.str.contains('clay')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == '99')] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('channel')) &
(DF.MajorLithCode == '99')] = 'sand'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == '99')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == '99')] = 'soil'
DF['OWN'][(DF.Description.str.contains('opsoil')) &
(DF.MajorLithCode == '99')] = 'topsoil'
DF['OWN'][(DF.MajorLithCode == 'BNST')] = 'shale'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'BNST')] = 'clay'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'BNST')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'BNST')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('s&g')) &
(DF.MajorLithCode == 'BNST')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'BNST')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) &
(DF.Description.str.contains('clay'))) &
(DF.MajorLithCode == 'BNST')] = 'clay gravel'
DF['OWN'][((DF.Description.str.contains('clay')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'BNST')] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'BNST')] = 'sandstone'
DF['OWN'][(DF.MajorLithCode == 'GRCL')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'GRCL')] = 'clay gravel'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'GRCL')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('clay')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'GRCL')] = 'clay sand gravel'
DF['OWN'][(DF.MajorLithCode == '23')] = 'clay sand'
DF['OWN'][(DF.Description.str.contains('sandy')) &
(DF.MajorLithCode == '23')] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == '23')] = 'clay sand gravel'
DF['OWN'][(DF.MajorLithCode == '19')] = 'silty clay'
DF['OWN'][(~DF.Description.str.contains('clay')) &
(DF.MajorLithCode == '19')] = 'silty sand'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == '19')] = 'silty clay sand'
DF['OWN'][(DF.MajorLithCode == 'CLSD')] = 'clay sand'
DF['OWN'][(DF.Description.str.contains('silt')) &
(DF.MajorLithCode == 'CLSD')] = 'silty clay sand'
DF['OWN'][(DF.MajorLithCode == 'CLSN')] = 'claystone'
DF['OWN'][(~DF.Description.str.contains('claystone')) &
(~DF.Description.str.contains('Claystone')) &
(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'CLSN')] = 'clay gravel'
DF['OWN'][(~DF.Description.str.contains('claystone')) &
(~DF.Description.str.contains('Claystone')) &
(DF.Description.str.contains('stone')) &
(DF.MajorLithCode == 'CLSN')] = 'stones clay'
DF['OWN'][(DF.Description.str.contains('limestone')) &
(DF.MajorLithCode == 'CLSN')] = 'clay'
DF['OWN'][(DF.MajorLithCode == 'CLBD')] = 'clay gravel'
DF['OWN'][(DF.Description.str.contains('boulder')) &
(DF.MajorLithCode == 'CLBD')] = 'clay boulders'
DF['OWN'][(DF.MajorLithCode == 'ALVM')] = 'alluvium'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'ALVM')] = 'soil'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'ALVM')] = 'clay'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'ALVM')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'ALVM')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'ALVM')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('drift')) &
(DF.MajorLithCode == 'ALVM')] = 'drift'
DF['OWN'][(DF.Description.str.contains('conglomerate')) &
(DF.MajorLithCode == 'ALVM')] = 'conglomerate'
DF['OWN'][(DF.Description.str.contains('rock')) &
(DF.MajorLithCode == 'ALVM')] = 'alluvium'
DF['OWN'][((DF.Description.str.contains('sand')) &
(DF.Description.str.contains('gravel'))) &
(DF.MajorLithCode == 'ALVM')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('clay')) &
(DF.Description.str.contains('gravel'))) &
(DF.MajorLithCode == 'ALVM')] = 'clay gravel'
DF['OWN'][((DF.Description.str.contains('clay')) &
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'ALVM')] = 'clay sand'
DF['OWN'][(DF.MajorLithCode == 'MDSN')] = 'mudstone'
DF['OWN'][(DF.Description.str.contains('None')) &
(DF.MajorLithCode == 'MDSN')] = np.nan
DF['OWN'][(DF.MajorLithCode == 'BDRK')] = 'bedrock'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'BDRK')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('granite')) &
(DF.MajorLithCode == 'BDRK')] = 'granite'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'BDRK')] = 'basalt'
DF['OWN'][(DF.Description.str.contains('volcanic')) &
(DF.MajorLithCode == 'BDRK')] = 'volcanic'
DF['OWN'][(DF.Description.str.contains('siltstone')) &
(DF.MajorLithCode == 'BDRK')] = 'siltstone'
DF['OWN'][(DF.Description.str.contains('mudstone')) &
(DF.MajorLithCode == 'BDRK')] = 'mudstone'
DF['OWN'][(DF.Description.str.contains('dolerite')) &
(DF.MajorLithCode == 'BDRK')] = 'dolerite'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'BDRK')] = 'shale'
DF['OWN'][(DF.Description.str.contains('arkose')) &
(DF.MajorLithCode == 'BDRK')] = 'arkose'
DF['OWN'][(DF.Description.str.contains('porphyry')) &
(DF.MajorLithCode == 'BDRK')] = 'porphyry'
DF['OWN'][(DF.Description.str.contains('ignimbrite')) &
(DF.MajorLithCode == 'BDRK')] = 'ignimbrite'
DF['OWN'][((DF.Description.str.contains('Rhyolite')) |
(DF.Description.str.contains('rhyolite'))) &
(DF.MajorLithCode == 'BDRK')] = 'rhyolite'
DF['OWN'][(DF.Description.str.contains('diorite')) &
(DF.MajorLithCode == 'BDRK')] = 'diorite'
DF['OWN'][(DF.MajorLithCode == '20')] = 'clay sand'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == '20')] = 'shale'
DF['OWN'][((DF.Description.str.contains('Sandy')) |
(DF.Description.str.contains('sandy clay'))) &
(DF.MajorLithCode == '20')] = 'sandy clay'
DF['OWN'][((~DF.Description.str.contains('clay')) |
(~DF.Description.str.contains('Clay'))) &
(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == '20')] = 'sand gravel'
DF['OWN'][((~DF.Description.str.contains('clay')) |
(~DF.Description.str.contains('Clay'))) &
(DF.Description.str.contains('loam')) &
(DF.MajorLithCode == '20')] = 'sandy loam'
DF['OWN'][(DF.MajorLithCode == 'IRSN')] = 'ironstone'
DF['OWN'][(DF.Description.str.contains('None')) &
(DF.MajorLithCode == 'IRSN')] = np.nan
DF['OWN'][(DF.MajorLithCode == 'SILT')] = 'silty'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'SILT')] = 'soil'
DF['OWN'][(DF.Description.str.contains('topsoil')) &
(DF.MajorLithCode == 'SILT')] = 'topsoil'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'SILT')] = 'silty gravel'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'SILT')] = 'silty clay'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'SILT')] = 'sandy silt'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'SILT')] = 'silty clay sand'
DF['OWN'][(DF.MajorLithCode == 'LOAM')] = 'loam'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'LOAM')] = 'soil'
DF['OWN'][(DF.Description.str.contains('topsoil')) &
(DF.MajorLithCode == 'LOAM')] = 'topsoil'
DF['OWN'][(DF.Description.str.contains('None')) &
(DF.MajorLithCode == 'LOAM')] = np.nan
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'LOAM')] = 'silty gravel'
DF['OWN'][((DF.Description.str.contains('silt')) |
(DF.Description.str.contains('Silt'))) &
(DF.MajorLithCode == 'LOAM')] = 'silty loam'
DF['OWN'][((DF.Description.str.contains('clay')) |
(DF.Description.str.contains('Clay')))
& (DF.MajorLithCode == 'LOAM')] = 'clay loam'
DF['OWN'][((DF.Description.str.contains('sand')) |
(DF.Description.str.contains('Sand'))) &
(DF.MajorLithCode == 'LOAM')] = 'sandy loam'
DF['OWN'][(DF.MajorLithCode == 'LMSN')] = 'limestone'
DF['OWN'][(DF.Description.str.contains('None')) &
(DF.MajorLithCode == 'LMSN')] = np.nan
DF['OWN'][(DF.MajorLithCode == 'SDCY')] = 'sandy clay'
DF['OWN'][((DF.Description.str.contains('silt')) |
(DF.Description.str.contains('Silt'))) &
(DF.MajorLithCode == 'SDCY')] = 'silty sandy clay'
DF['OWN'][((DF.Description.str.contains('gravel')) |
(DF.Description.str.contains('Gravelly'))) &
(DF.MajorLithCode == 'SDCY')] = 'clay sand gravel'
DF['OWN'][(DF.MajorLithCode == 'GRNT')] = 'granite'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'GRNT')] = 'clay'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'GRNT')] = 'soil'
DF['OWN'][(DF.Description.str.contains('topsoil')) &
(DF.MajorLithCode == 'GRNT')] = 'topsoil'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'GRNT')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'GRNT')] = 'gravel'
DF['OWN'][((DF.Description.str.contains('boulder')) |
(DF.Description.str.contains('Boulder'))) &
(DF.MajorLithCode == 'GRNT')] = 'boulders'
DF['OWN'][(DF.MajorLithCode == 'BSLT')] = 'basalt'
DF['OWN'][(DF.Description == 'None') &
(DF.MajorLithCode == 'BSLT')] = np.nan
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'BSLT')] = 'soil'
DF['OWN'][(DF.Description.str.contains('topsoil')) &
(DF.MajorLithCode == 'BSLT')] = 'topsoil'
DF['OWN'][(DF.MajorLithCode == 'ROCK')] = np.nan
DF['OWN'][((DF.Description == 'granit') |
(DF.Description == 'Granit')) &
(DF.MajorLithCode == 'ROCK')] = 'granite'
DF['OWN'][(DF.Description.str.contains('basalt')) &
(DF.MajorLithCode == 'ROCK')] = 'basalt'
DF['OWN'][((DF.Description.str.contains('clay')) |
(DF.Description.str.contains('Clay'))) &
(DF.MajorLithCode == 'ROCK')] = 'clay'
DF['OWN'][((DF.Description.str.contains('boulder')) |
(DF.Description.str.contains('Boulder'))) &
(DF.MajorLithCode == 'ROCK')] = 'boulders'
DF['OWN'][((DF.Description.str.contains('sand')) |
(DF.Description.str.contains('Sand'))) &
(DF.MajorLithCode == 'ROCK')] = 'sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'ROCK')] = 'gravel'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'ROCK')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'ROCK')] = 'shale'
DF['OWN'][(DF.Description.str.contains('slate')) &
(DF.MajorLithCode == 'ROCK')] = 'slate'
DF['OWN'][(DF.Description.str.contains('conglomerate')) &
(DF.MajorLithCode == 'ROCK')] = 'conglomerate'
DF['OWN'][((DF.Description.str.contains('volcanic')) |
(DF.Description.str.contains('Volcanic'))) &
(DF.MajorLithCode == 'ROCK')] = 'volcanic'
DF['OWN'][(DF.Description.str.contains('limestone')) &
(DF.MajorLithCode == 'ROCK')] = 'limestone'
DF['OWN'][((DF.Description.str.contains('mudstone')) |
(DF.Description.str.contains('Mudstone'))) &
(DF.MajorLithCode == 'ROCK')] = 'mudstone'
DF['OWN'][(DF.MajorLithCode == 'SHLE')] = 'shale'
DF['OWN'][((DF.Description == 'Slippery back') |
(DF.Description == 'slippery back')) &
(DF.MajorLithCode == 'SHLE')] = np.nan
DF['OWN'][(DF.MajorLithCode == 'GRVL')] = 'gravel'
DF['OWN'][((DF.Description.str.contains('Sand')) |
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'GRVL')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('clay')) |
(DF.Description.str.contains('Clay'))) &
(DF.MajorLithCode == 'GRVL')] = 'clay gravel'
DF['OWN'][(~DF.Description.str.contains('Gravel')) &
(~DF.Description.str.contains('gravel')) &
((DF.Description.str.contains('Stone')) |
(DF.Description.str.contains('stones'))) &
(DF.MajorLithCode == 'GRVL')] = 'stones'
DF['OWN'][(~DF.Description.str.contains('Gravel')) &
(~DF.Description.str.contains('gravel')) &
((DF.Description.str.contains('Pebble')) |
(DF.Description.str.contains('pebbles'))) &
(DF.MajorLithCode == 'GRVL')] = 'pebbles'
DF['OWN'][(DF.MajorLithCode == 'SDSN')] = 'sandstone'
DF['OWN'][(DF.Description == 'None') &
(DF.MajorLithCode == 'SDSN')] = np.nan
DF['OWN'][(~DF.Description.str.contains('andstone')) &
(~DF.Description.str.contains('and rock')) &
((DF.Description.str.contains('Shale')) |
(DF.Description.str.contains('shale'))) &
(DF.MajorLithCode == 'SDSN')] = 'shale'
DF['OWN'][(~DF.Description.str.contains('andstone')) &
(~DF.Description.str.contains('and rock')) &
((DF.Description.str.contains('Sand')) |
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'SDSN')] = 'sand'
DF['OWN'][(DF.MajorLithCode == 'UNKN')] = np.nan
DF['OWN'][((DF.Description.str.contains('granit')) |
(DF.Description.str.contains('Granit'))) &
(DF.MajorLithCode == 'UNKN')] = 'granite'
DF['OWN'][((DF.Description.str.contains('Drift')) |
(DF.Description.str.contains('drift'))) &
(DF.MajorLithCode == 'UNKN')] = 'drift'
DF['OWN'][((DF.Description.str.contains('shale')) |
(DF.Description.str.contains('Shale'))) &
(DF.MajorLithCode == 'UNKN')] = 'shale'
DF['OWN'][((DF.Description.str.contains('conglomerate')) |
(DF.Description.str.contains('Conglomerate'))) &
(DF.MajorLithCode == 'UNKN')] = 'conglomerate'
DF['OWN'][((DF.Description.str.contains('basalt')) |
(DF.Description.str.contains('Basalt'))) &
(DF.MajorLithCode == 'UNKN')] = 'basalt'
DF['OWN'][((DF.Description.str.contains('wood')) |
(DF.Description.str.contains('Wood'))) &
(DF.MajorLithCode == 'UNKN')] = 'wood'
DF['OWN'][((DF.Description.str.contains('coal')) |
(DF.Description.str.contains('Coal'))) &
(DF.MajorLithCode == 'UNKN')] = 'coal'
DF['OWN'][((DF.Description.str.contains('clay')) |
(DF.Description.str.contains('Clay'))) &
(DF.MajorLithCode == 'UNKN')] = 'clay'
DF['OWN'][((DF.Description.str.contains('silt')) |
(DF.Description.str.contains('Silt'))) &
(DF.MajorLithCode == 'UNKN')] = 'silty'
DF['OWN'][((DF.Description.str.contains('sand')) |
(DF.Description.str.contains('Sand'))) &
(DF.MajorLithCode == 'UNKN')] = 'sand'
DF['OWN'][((DF.Description.str.contains('pebble')) |
(DF.Description.str.contains('Pebble'))) &
(DF.MajorLithCode == 'UNKN')] = 'pebbles'
DF['OWN'][((DF.Description.str.contains('gravel')) |
(DF.Description.str.contains('Gravel'))) &
(DF.MajorLithCode == 'UNKN')] = 'gravel'
DF['OWN'][((DF.Description.str.contains('boulder')) |
(DF.Description.str.contains('Boulder'))) &
(DF.MajorLithCode == 'UNKN')] = 'boulders'
DF['OWN'][((DF.Description.str.contains('limestone')) |
(DF.Description.str.contains('Limestone'))) &
(DF.MajorLithCode == 'UNKN')] = 'limestone'
DF['OWN'][((DF.Description.str.contains('wacke')) |
(DF.Description.str.contains('Wacke'))) &
(DF.MajorLithCode == 'UNKN')] = 'greywacke'
DF['OWN'][((DF.Description.str.contains('stones')) |
(DF.Description.str.contains('Stones'))) &
(DF.MajorLithCode == 'UNKN')] = 'stones'
DF['OWN'][((DF.Description.str.contains('soil')) |
(DF.Description.str.contains('Soil'))) &
(DF.MajorLithCode == 'UNKN')] = 'soil'
DF['OWN'][((DF.Description.str.contains('topsoil')) |
(DF.Description.str.contains('Topsoil'))) &
(DF.MajorLithCode == 'UNKN')] = 'topsoil'
DF['OWN'][((DF.Description.str.contains('sand')) |
(DF.Description.str.contains('Sand'))) &
((DF.Description.str.contains('gravel')) |
(DF.Description.str.contains('Gravel'))) &
(DF.MajorLithCode == 'UNKN')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('clay')) |
(DF.Description.str.contains('Clay'))) &
((DF.Description.str.contains('gravel')) |
(DF.Description.str.contains('Gravel'))) &
(DF.MajorLithCode == 'UNKN')] = 'clay gravel'
DF['OWN'][((DF.Description.str.contains('clay')) |
(DF.Description.str.contains('Clay'))) &
((DF.Description.str.contains('sandy')) |
(DF.Description.str.contains('Gravel'))) &
(DF.MajorLithCode == 'UNKN')] = 'sandy clay'
DF['OWN'][((DF.Description.str.contains('mudstone')) |
(DF.Description.str.contains('Mudstone'))) &
(DF.MajorLithCode == 'UNKN')] = 'mudstone'
DF['OWN'][((DF.Description.str.contains('sandstone')) |
(DF.Description.str.contains('Sandstone'))) &
(DF.MajorLithCode == 'UNKN')] = 'sandstone'
DF['OWN'][((DF.Description.str.contains('Rhyolite')) |
(DF.Description.str.contains('rhyolite'))) &
(DF.MajorLithCode == 'UNKN')] = 'rhyolite'
DF['OWN'][((DF.Description.str.contains('Diorite')) |
(DF.Description.str.contains('diorite'))) &
(DF.MajorLithCode == 'UNKN')] = 'diorite'
DF['OWN'][((DF.Description.str.contains('Andesite')) |
(DF.Description.str.contains('andesite'))) &
(DF.MajorLithCode == 'UNKN')] = 'andesite'
DF['OWN'][((DF.Description.str.contains('silty clay')) |
(DF.Description.str.contains('Silty clay'))) &
(DF.MajorLithCode == 'UNKN')] = 'silty clay'
DF['OWN'][((DF.Description.str.contains('Volcanic')) |
(DF.Description.str.contains('volcanic'))) &
(DF.MajorLithCode == 'UNKN')] = 'volcanic'
DF['OWN'][(DF.MajorLithCode == 'CLAY')] = 'clay'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'CLAY')] = 'sandy clay'
DF['OWN'][(DF.Description.str.contains('silt')) &
(DF.MajorLithCode == 'CLAY')] = 'silty clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'CLAY')] = 'clay gravel'
DF['OWN'][(DF.Description.str.contains('grit')) &
(DF.MajorLithCode == 'CLAY')] = 'gritty clay'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'CLAY')] = 'clay sand gravel'
DF['OWN'][((DF.Description.str.contains('stones')) |
(DF.Description.str.contains('pebbles')) |
(DF.Description.str.contains('boulder'))) &
(DF.MajorLithCode == 'CLAY')] = 'stones clay'
DF['OWN'][(DF.MajorLithCode == 'SAND')] = 'sand'
DF['OWN'][(DF.Description.str.contains('and clay')) &
(DF.MajorLithCode == 'SAND')] = 'clay sand'
DF['OWN'][(DF.Description.str.contains('silt')) &
(DF.MajorLithCode == 'SAND')] = 'silty sand'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'SAND')] = 'sand gravel'
DF['OWN'][((DF.Description.str.contains('stones')) |
(DF.Description.str.contains('pebbles'))) &
(DF.MajorLithCode == 'SAND')] = 'stones sand'
DF['OWN'][(DF.MajorLithCode == 'BLDR')] = 'boulders'
DF['OWN'][(DF.Description.str.contains('clay')) &
(DF.MajorLithCode == 'BLDR')] = 'clay boulders'
DF['OWN'][(DF.Description.str.contains('sand')) &
(DF.MajorLithCode == 'BLDR')] = 'sand boulders'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'SBLDR')] = 'gravel boulders'
DF['OWN'][DF.MajorLithCode == 'CLYS'] = 'clay'
DF['OWN'][(DF.Description.str.contains('shale')) &
(DF.MajorLithCode == 'CLYS')] = 'shale'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'CLYS')] = 'clay gravel'
DF['OWN'][(DF.Description.str.contains('stone')) &
(DF.MajorLithCode == 'CLYS')] = 'claystone'
DF['OWN'][DF.MajorLithCode == 'ALUM'] = np.nan
DF['OWN'][(DF.Description.str.contains('water')) &
(DF.MajorLithCode == 'ALUM')] = 'water'
DF['OWN'][DF.MajorLithCode == 'QRTZ'] = 'quartz'
DF['OWN'][(~DF.Description.str.contains('quartz')) &
(~DF.Description.str.contains('Quartz')) &
((DF.Description.str.contains('jasper')) |
(DF.Description.str.contains('Jasper'))) &
(DF.MajorLithCode == 'QRTZ')] = 'jasper'
DF['OWN'][(DF.Description.str.contains('sandstone')) &
(DF.MajorLithCode == 'QRTZ')] = 'sandstone'
DF['OWN'][(DF.Description.str.contains('soil')) &
(DF.MajorLithCode == 'QRTZ')] = 'soil'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'QRTZ')] = 'gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) |
(DF.Description.str.contains('sand'))) &
(DF.MajorLithCode == 'QRTZ')] = 'sand gravel'
DF['OWN'][(DF.Description.str.contains('conglomerate')) &
(DF.MajorLithCode == 'QRTZ')] = 'conglomerate'
DF['OWN'][DF.MajorLithCode == 'RIDG'] = 'clay'
DF['OWN'][(DF.Description.str.contains('gravel')) &
(DF.MajorLithCode == 'RIDG')] = 'gravel'
DF['OWN'][((DF.Description.str.contains('gravel')) |
(DF.Description.str.contains('clay'))) &
(DF.MajorLithCode == 'RIDG')] = 'clay gravel'
DF['OWN'][((DF.Description.str.contains('boulder')) |
(DF.Description.str.contains('clay'))) &
(DF.MajorLithCode == 'RIDG')] = 'clay boulders'
# aggregation of litho classes into 18 major classes
DFint = DF.dropna(subset=['OWN'])
DFint1 = DFint.copy()
DFint1['reclass'] = np.nan
DFint1['reclass'][(DFint1.OWN == 'basalt') |
(DFint1.OWN == 'volcanic') |
(DFint1.OWN == 'lava') |
(DFint1.OWN == 'tuff') |
(DFint1.OWN == 'breccia') |
(DFint1.OWN == 'rhyolite') |
(DFint1.OWN == 'agglomerate') |
(DFint1.OWN == 'ignimbrite') |
(DFint1.OWN == 'zeolite') |
(DFint1.OWN == 'andesite') |
(DFint1.OWN == 'latite') |
(DFint1.OWN == 'trachyte') |
(DFint1.OWN == 'scoria') |
(DFint1.OWN == 'dacite') |
(DFint1.OWN == 'pyroclastic')] = 'volcanic'
DFint1['reclass'][(DFint1.OWN == 'diorite') |
(DFint1.OWN == 'granite') |
(DFint1.OWN == 'porphyry') |
(DFint1.OWN == 'dolerite') |
(DFint1.OWN == 'igneous') |
(DFint1.OWN == 'feldspar') |
(DFint1.OWN == 'granodiorite') |
(DFint1.OWN == 'syenite') |
(DFint1.OWN == 'monzonite') |
(DFint1.OWN == 'pyroxenite') |
(DFint1.OWN == 'quartz')] = 'intrusive'
DFint1['reclass'][(DFint1.OWN == 'sandstone') |
(DFint1.OWN == 'greywacke') |
(DFint1.OWN == 'arkose') |
(DFint1.OWN == 'wacke')] = 'sandstone'
DFint1['reclass'][(DFint1.OWN == 'shale') |
(DFint1.OWN == 'mudstone') |
(DFint1.OWN == 'claystone') |
(DFint1.OWN == 'siltstone') |
(DFint1.OWN == 'argillite')] = 'shale'
DFint1['reclass'][(DFint1.OWN == 'limestone') |
(DFint1.OWN == 'dolomite') |
(DFint1.OWN == 'calcrete') |
(DFint1.OWN == 'siderite') |
(DFint1.OWN == 'chalk') |
(DFint1.OWN == 'marl') |
(DFint1.OWN == 'calcite')] = 'limestone'
DFint1['reclass'][(DFint1.OWN == 'mica') |
(DFint1.OWN == 'schist') |
(DFint1.OWN == 'serpentine') |
(DFint1.OWN == 'gneiss') |
(DFint1.OWN == 'soapstone') |
(DFint1.OWN == 'slate') |
(DFint1.OWN == 'phyllite') |
(DFint1.OWN == 'amphibolite') |
(DFint1.OWN == 'hornfels') |
(DFint1.OWN == 'pegmatite') |
(DFint1.OWN == 'metamorphic') |
(DFint1.OWN == 'marble') |
(DFint1.OWN == 'quartzite') |
(DFint1.OWN == 'biotite')] = 'metamorphic'
DFint1['reclass'][(DFint1.OWN == 'carbonaceous') |
(DFint1.OWN == 'coal') |
(DFint1.OWN == 'lignite') |
(DFint1.OWN == 'wood') |
(DFint1.OWN == 'bitumen') |
(DFint1.OWN == 'charcoal')] = 'carbonaceous'
DFint1['reclass'][(DFint1.OWN == 'jasper') |
(DFint1.OWN == 'chert') |
(DFint1.OWN == 'silcrete') |
(DFint1.OWN == 'laterite') |
(DFint1.OWN == 'ironstone') |
(DFint1.OWN == 'cement') |
(DFint1.OWN == 'pyrite') |
(DFint1.OWN == 'Pyrite') |
(DFint1.OWN == 'opal') |
(DFint1.OWN == 'gypsum') |
(DFint1.OWN == 'bauxite') |
(DFint1.OWN == 'apatite')] = 'chemical'
DFint1['reclass'][(DFint1.OWN == 'soil') |
(DFint1.OWN == 'sandy loam') |
(DFint1.OWN == 'silty loam') |
(DFint1.OWN == 'loam') |
(DFint1.OWN == 'clay loam') |
(DFint1.OWN == 'sandy clay loam') |
(DFint1.OWN == 'topsoil') |
(DFint1.OWN == 'subsoil') |
(DFint1.OWN == 'earth')] = 'soil'
DFint1['reclass'][(DFint1.OWN == 'soil') |
(DFint1.OWN == 'topsoil') |
(DFint1.OWN == 'subsoil') |
(DFint1.OWN == 'earth')] = 'soil'
DFint1['reclass'][(DFint1.OWN == 'sandy loam') |
(DFint1.OWN == 'silty sandy clay') |
(DFint1.OWN == 'silty loam') |
(DFint1.OWN == 'loam') |
(DFint1.OWN == 'clay loam') |
(DFint1.OWN == 'sandy clay loam') |
(DFint1.OWN == 'clay') |
(DFint1.OWN == 'mud') |
(DFint1.OWN == 'pug') |
(DFint1.OWN == 'silty clay') |
(DFint1.OWN == 'bentonite') |
(DFint1.OWN == 'kaolinite') |
(DFint1.OWN == 'gritty clay') |
(DFint1.OWN == 'sandy clay') |
(DFint1.OWN == 'mud sand') |
(DFint1.OWN == 'clay sand') |
(DFint1.OWN == 'silty clay sand') |
(DFint1.OWN == 'silty gravel') |
(DFint1.OWN == 'stones clay') |
(DFint1.OWN == 'clay gravel') |
(DFint1.OWN == 'mud gravel') |
(DFint1.OWN == 'clay boulders') |
(DFint1.OWN == 'silty') |
(DFint1.OWN == 'sandy silt') |
(DFint1.OWN == 'drift')] = 'fine_sediments'
DFint1['reclass'][(DFint1.OWN == 'stones sand') |
(DFint1.OWN == 'sand gravel') |
(DFint1.OWN == 'sand boulders') |
(DFint1.OWN == 'clay sand gravel') |
(DFint1.OWN == 'gravel') |
(DFint1.OWN == 'stones gravel') |
(DFint1.OWN == 'sand') |
(DFint1.OWN == 'silty sand') |
(DFint1.OWN == 'stones') |
(DFint1.OWN == 'pebbles') |
(DFint1.OWN == 'boulders') |
(DFint1.OWN == 'blue metal')] = 'coarse_sediments'
DFint1['reclass'][(DFint1.OWN == 'conglomerate')] = 'conglomerate'
DFint1['reclass'][(DFint1.OWN == 'bedrock')] = 'bedrock'
DFint1['reclass'][(DFint1.OWN == 'alluvium')] = 'alluvium'
DFint1['reclass'][(DFint1.OWN == 'water')] = 'water'
DFint1['reclass'][(DFint1.OWN == 'cavity')] = 'cavity'
DFint1['reclass'][(DFint1.OWN == 'sedimentary')] = 'sedimentary'
DFint1['reclass'][(DFint1.OWN == 'peat')] = 'peat'
#DFint1['x'] = DFint1['geometry'].apply(lambda x: x.centroid.x)
#DFint1['y'] = DFint1['geometry'].apply(lambda x: x.centroid.y)
DFint1=geopandas.GeoDataFrame(DFint1, geometry='geometry')
DFint1['x'] = DFint1.centroid.map(lambda p: p.x)
DFint1['y'] = DFint1.centroid.map(lambda p: p.y)
print('number of litho classes :',
len(DFint1['reclass'].unique()))
print('unclassified descriptions:',
len(DFint1[DFint1['reclass'].isnull()]))
return DFint1
manual_reclass(DF)
DF2=manual_reclass(DF)
DF2.to_csv('export2.csv')
def save_file(DF, name):
'''Function to save manually reclassified dataframe
Inputs:
-DF: reclassified pandas dataframe
-name: name (string) to save dataframe file
'''
DF.to_pickle('{}.pkl'.format(name))
# Replace Dir with path of groundwaterExplorer files
Dir = '/mnt/d/Dropbox/Ranee_Joshi_PhD_Local/04_PythonCodes/dh2loop_old/shp_NSW'
# creating lithoDataframe from files in groundwaterExplorer
Dataframe = litho_Dataframe(Dir)
# reclassifying lithological descriptions
resultingDF = manual_reclass(Dataframe)
# saving manually reclassified dataframe
save_file(resultingDF, 'manualTest')
```
## MLP Classification
```
def load_geovec(path):
instance = Glove()
with h5py.File(path, 'r') as f:
v = np.zeros(f['vectors'].shape, f['vectors'].dtype)
f['vectors'].read_direct(v)
dct = f['dct'][()].tostring().decode('utf-8')
dct = json.loads(dct)
instance.word_vectors = v
instance.no_components = v.shape[1]
instance.word_biases = np.zeros(v.shape[0])
instance.add_dictionary(dct)
return instance
# Stopwords
extra_stopwords = [
'also',
]
stop = stopwords.words('english') + extra_stopwords
def tokenize(text, min_len=1):
'''Function that tokenize a set of strings
Input:
-text: set of strings
-min_len: tokens length
Output:
-list containing set of tokens'''
tokens = [word.lower() for sent in nltk.sent_tokenize(text)
for word in nltk.word_tokenize(sent)]
filtered_tokens = []
for token in tokens:
if token.isalpha() and len(token) >= min_len:
filtered_tokens.append(token)
return [x.lower() for x in filtered_tokens if x not in stop]
def tokenize_and_lemma(text, min_len=0):
'''Function that retrieves lemmatised tokens
Inputs:
-text: set of strings
-min_len: length of text
Outputs:
-list containing lemmatised tokens'''
filtered_tokens = tokenize(text, min_len=min_len)
lemmas = [lemma.lemmatize(t) for t in filtered_tokens]
return lemmas
def get_vector(word, model, return_zero=False):
'''Function that retrieves word embeddings (vector)
Inputs:
-word: token (string)
-model: trained MLP model
-return_zero: boolean variable
Outputs:
-wv: numpy array (vector)'''
epsilon = 1.e-10
unk_idx = model.dictionary['unk']
idx = model.dictionary.get(word, unk_idx)
wv = model.word_vectors[idx].copy()
if return_zero and word not in model.dictionary:
n_comp = model.word_vectors.shape[1]
wv = np.zeros(n_comp) + epsilon
return wv
def mean_embeddings(dataframe_file, model):
'''Function to retrieve sentence embeddings from dataframe with
lithological descriptions.
Inputs:
-dataframe_file: pandas dataframe containing lithological descriptions
and reclassified lithologies
-model: word embeddings model generated using GloVe
Outputs:
-DF: pandas dataframe including sentence embeddings'''
DF = pd.read_pickle(dataframe_file)
DF = DF.drop_duplicates(subset=['x', 'y', 'z'])
DF['tokens'] = DF['Description'].apply(lambda x: tokenize_and_lemma(x))
DF['length'] = DF['tokens'].apply(lambda x: len(x))
DF = DF.loc[DF['length']> 0]
DF['vectors'] = DF['tokens'].apply(lambda x: np.asarray([get_vector(n, model) for n in x]))
DF['mean'] = DF['vectors'].apply(lambda x: np.mean(x[~np.all(x == 1.e-10, axis=1)], axis=0))
DF['reclass'] = pd.Categorical(DF.reclass)
DF['code'] = DF.reclass.cat.codes
DF['drop'] = DF['mean'].apply(lambda x: (~np.isnan(x).any()))
DF = DF[DF['drop']]
return DF
# loading word embeddings model
# (This can be obtained from https://github.com/spadarian/GeoVec )
#modelEmb = Glove.load('/home/ignacio/Documents/chapter2/best_glove_300_317413_w10_lemma.pkl')
modelEmb = load_geovec('geovec_300d_v1.h5')
# getting the mean embeddings of descriptions
DF = mean_embeddings('manualTest.pkl', modelEmb)
def split_stratified_dataset(Dataframe, test_size, validation_size):
'''Function that split dataset into test, training and validation subsets
Inputs:
-Dataframe: pandas dataframe with sentence mean_embeddings
-test_size: decimal number to generate the test subset
-validation_size: decimal number to generate the validation subset
Outputs:
-X: numpy array with embeddings
-Y: numpy array with lithological classes
-X_test: numpy array with embeddings for test subset
-Y_test: numpy array with lithological classes for test subset
-Xt: numpy array with embeddings for training subset
-yt: numpy array with lithological classes for training subset
-Xv: numpy array with embeddings for validation subset
-yv: numpy array with lithological classes for validation subset
'''
X = np.vstack(Dataframe['mean'].values)
Y = Dataframe.code.values.reshape(len(Dataframe.code), 1)
X_train, X_test, y_train, y_test = train_test_split(X,
Y,
test_size=test_size,
stratify=Y,
random_state=42)
print(X_train.shape)
print(Y_train.shape)
Xt, Xv, yt, yv = train_test_split(X_train,
y_train,
test_size=validation_size,
stratify=None,
random_state=1)
return X, Y, X_test, y_test, Xt, yt, Xv, yv
def retrieve_predictions(classifier, x):
'''Function that retrieves lithological classes using the trained classifier
Inputs:
-classifier: trained MLP classifier
-x: numpy array containing embbedings
Outputs:
-codes_pred: numpy array containing lithological classes predicted'''
preds = classifier.predict(x, verbose=0)
new_onehot = np.zeros((x.shape[0], 18))
new_onehot[np.arange(len(preds)), preds.argmax(axis=1)] = 1
codes_pred = one_enc.inverse_transform(new_onehot)
return codes_pred
def classifier_assess(classifier, x, y):
'''Function that prints the performance of the classifier
Inputs:
-classifier: trained MLP classifier
-x: numpy array with embeddings
-y: numpy array with lithological classes predicted'''
Y2 = retrieve_predictions(classifier, x)
print('f1 score: ', metrics.f1_score(y, Y2, average='macro'),
'accuracy: ', metrics.accuracy_score(y, Y2),
'balanced_accuracy:', metrics.balanced_accuracy_score(y, Y2))
def save_predictions(Dataframe, classifier, x, name):
'''Function that saves dataframe predictions as a pickle file
Inputs:
-Dataframe: pandas dataframe with mean_embeddings
-classifier: trained MLP model,
-x: numpy array with embeddings,
-name: string name to save dataframe
Outputs:
-save dataframe'''
preds = classifier.predict(x, verbose=0)
Dataframe['predicted_probabilities'] = preds.tolist()
Dataframe['pred'] = retrieve_predictions(classifier, x).astype(np.int32)
Dataframe[['x', 'y', 'FromDepth', 'ToDepth', 'TopElev', 'BottomElev',
'mean', 'predicted_probabilities', 'pred', 'reclass', 'code']].to_pickle('{}.pkl'.format(name))
# subseting dataset for training classifier
X, Y, X_test, Y_test, X_train, Y_train, X_validation, Y_validation = split_stratified_dataset(DF, 0.1, 0.1)
# encoding lithological classes
encodes = one_enc.fit_transform(Y_train).toarray()
# MLP model generation
model = Sequential()
model.add(Dense(100, input_dim=300, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(units=len(DF.code.unique()), activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# training MLP model
model.fit(X_train, encodes, epochs=30, batch_size=100, verbose=2)
# saving MLP model
model.save('mlp_prob_model.h5')
# assessment of model performance
classifier_assess(model, X_validation, Y_validation)
# save lithological prediction likelihoods dataframe
save_predictions(DF, model, X, 'NSWpredictions')
import pickle
with open('NSWpredictions.pkl', 'rb') as f:
data = pickle.load(f)
print(data)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
from scipy.stats import lognorm
import matplotlib.pyplot as plt
import shared
ve_or_ju = shared.race_type()
year = shared.forecast_year()
import time
startTime = time.time()
estimates = pd.read_json(f'web-lib/for_web_{shared.race_id_str()}.json', orient="records", convert_dates=["fin_time_median", "fin_time_start95", "fin_time_end95"])
estimates.head().round(3)
display(estimates.info())
# Read the actual times after race and analyze estimates
results = pd.read_csv(f'data/results_with_dist_j{year}_{ve_or_ju}.tsv', delimiter="\t")
display(results.head().round(2))
results = results[["team-id", "leg-nro", "leg-time", "leg_distance"]]
results["leg-time"] = results["leg-time"] / 60
results = results.rename(index=str, columns={"team-id": "team_id", "leg-nro": "leg"})
results.head().round(3)
with_result = pd.merge(estimates, results, how='left', on=['team_id', 'leg'])
with_result = with_result[np.isfinite(with_result["leg-time"])]
with_result.head().round(3)
with_result["ind_leg_time_estimate"] = np.exp(with_result["ind_log_mean"]) * with_result["leg_distance"]
with_result["ind_error_signed"] = with_result["leg-time"] - with_result["ind_leg_time_estimate"]
with_result["ind_error"] = np.abs(with_result["ind_leg_time_estimate"] - with_result["leg-time"])
with_result["ind_error_ratio"] = with_result["ind_error"] / with_result["leg-time"]
with_result["ind_interval_error"] = (with_result["personal_start95"] > with_result["leg-time"]) | (with_result["personal_end95"] < with_result["leg-time"])
with_result.sort_values(by=['ind_error']).tail(10).round(2)
reports = [f'Yksilöennusteen etumerkillinen keskivirhe: {np.mean(with_result["ind_error_signed"]).round(1)} minuuttia',
f'Yksilöennusteen keskivirhe: {np.mean(with_result["ind_error"]).round(1)} minuuttia',
f'Yksilöennusteen mediaanivirhe: {np.median(with_result["ind_error"]).round(1)} minuuttia',
f'Virheen suhde loppuaikaan: {np.mean(with_result["ind_error_ratio"] * 100).round(1)} %',
f'Yksilön aikaväliennuste väärin: {np.mean(with_result["ind_interval_error"] * 100).round(1)} %']
display(reports)
with_result["ind_interval"] = with_result["personal_end95"] - with_result["personal_start95"]
ind_by_num_runs = with_result[["num_runs", "ind_interval", 'ind_error', 'ind_interval_error', 'ind_error_signed']].groupby('num_runs').agg({'ind_interval': ["mean", "median"], 'ind_error': ["mean", "median"], 'ind_interval_error': ["mean", "count"], 'ind_error_signed': ["mean", "median"]}).round(2)
reports.append(ind_by_num_runs.to_string())
ind_by_num_runs
with_result[["leg", 'ind_error', 'ind_interval_error', "num_runs"]].groupby('leg').agg(["mean", "median", "count"]).round(2)
%%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;
}
// # To disable auto-scrolling, execute this javascript in a notebook cell before other cells are executed:
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(16, 10))
plot = sns.scatterplot(x="team_id", y="ind_error_signed", hue="num_runs", data=with_result)
plot.axes.set_ylim(with_result.ind_error_signed.quantile(0.05), with_result.ind_error_signed.quantile(0.95))
plt.figure(figsize=(16, 10))
plot = sns.scatterplot(x="team_id", y="leg-time", hue="ind_interval_error", alpha=0.4, data=with_result)
plot.axes.set_ylim(with_result["leg-time"].min() - 5, with_result["leg-time"].quantile(0.95))
plt.figure(figsize=(16, 10))
plot = sns.scatterplot(x="leg-time", y="ind_error_signed", hue="ind_interval_error", data=with_result)
plot.axes.set_ylim(with_result.ind_error_signed.quantile(0.05), with_result.ind_error_signed.quantile(0.95))
plot.axes.set_xlim(with_result["leg-time"].min() - 1, with_result["leg-time"].quantile(0.9))
g = sns.FacetGrid(with_result, height=8, aspect=2, xlim=(-80,80), margin_titles=True, despine=True)
#g.map(sns.distplot, "ind_error", hist_kws={'alpha':0.8}).add_legend()
sns.histplot(with_result.ind_error_signed, bins=250, kde=True)
with_result["capped_num_runs"] = np.clip(with_result.num_runs, 1, shared.num_pace_years + 1)
g = sns.FacetGrid(with_result, hue="capped_num_runs", height=8, aspect=2, xlim=(-80,80), margin_titles=True, despine=True)
g.map(sns.distplot, "ind_error_signed", hist=False, hist_kws={'alpha':0.8}).add_legend()
def calculate_relay_values(result_row):
rows_so_far = with_result[(with_result.team_id == result_row.team_id) & (with_result["leg"] <= result_row["leg"])]
return pd.Series({"real_relay_time": np.sum(rows_so_far["leg-time"]), "mean_num_runs": np.mean(rows_so_far["num_runs"]).round(0)})
relay_values = with_result.apply(lambda row: calculate_relay_values(row), axis=1)
with_result["real_relay_time"] = relay_values["real_relay_time"]
with_result["mean_num_runs"] = relay_values["mean_num_runs"]
with_result["relay_error"] = np.abs(np.exp(with_result["fin_sum_log_mean"]) - with_result["real_relay_time"])
with_result["relay_error_ratio"] = with_result["relay_error"] / with_result["real_relay_time"]
display(np.mean(with_result["relay_error"]))
display(np.mean(with_result["relay_error_ratio"]))
reports.append(f'Viestiennusteen keskivirhe: {np.mean(with_result["relay_error"]).round(1)} minuuttia')
reports.append(f'Viestiennusteen virheen mediaani: {np.median(with_result["relay_error"]).round(1)} minuuttia')
display(reports)
reports.append(with_result[["leg", 'relay_error']].groupby('leg').agg(["mean", "median", "count"]).round(1).to_string())
display(reports)
plt.figure(figsize=(16, 10))
plot = sns.scatterplot(x="team_id", y="relay_error", hue="leg", data=with_result)
plot.axes.set_ylim(0, with_result.relay_error.quantile(0.95))
plt.figure(figsize=(16, 10))
plot = sns.scatterplot(x="team_id", y="relay_error", hue="mean_num_runs", data=with_result)
plot.axes.set_ylim(0, with_result.relay_error.quantile(0.95))
g = sns.FacetGrid(with_result, hue="leg", height=8, aspect=2, xlim=(0,180), margin_titles=True, despine=True)
sns.distplot(with_result.relay_error, bins=250, kde=True)
g = sns.FacetGrid(with_result, hue="leg", height=8, aspect=2, xlim=(0,180), margin_titles=True, despine=True)
g.map(sns.distplot, "relay_error", hist=False, hist_kws={'alpha':0.8}).add_legend()
with_result["real_finish_time"] = pd.to_timedelta(with_result["real_relay_time"], unit="min") + shared.start_timestamp[ve_or_ju][year]
with_result["real_interval_error"] = (with_result["real_finish_time"] < with_result["fin_time_start95"]) | (with_result["real_finish_time"] > with_result["fin_time_end95"])
with_result["real_interval_error_fast"] = (with_result["real_finish_time"] < with_result["fin_time_start95"])
with_result["real_interval_error_slow"] = (with_result["real_finish_time"] > with_result["fin_time_end95"])
with_result["real_interval_error_type"] = "No error"
with_result.loc[with_result["real_interval_error_fast"], "real_interval_error_type"] = "Fast runner"
with_result.loc[with_result["real_interval_error_slow"], "real_interval_error_type"] = "Slow runner"
np.mean(with_result["real_interval_error"])
reports.append(f'Viestin aikaväliennuste väärin: {np.mean(with_result["real_interval_error"] * 100).round(1)} %')
reports.append(f'Juoksija nopeampi kuin viestin aikaväliennuste: {np.mean(with_result["real_interval_error_fast"] * 100).round(1)} %')
reports.append(f'Juoksija hitaampi kuin viestin aikaväliennuste: {np.mean(with_result["real_interval_error_slow"] * 100).round(1)} %')
display(reports)
with_result["real_interval"] = with_result["fin_time_end95"] - with_result["fin_time_start95"]
with_result["real_interval"] = pd.to_timedelta(with_result["real_interval"].values).total_seconds() / 60
reports.append(with_result[["leg", 'real_interval', 'real_interval_error', 'real_interval_error_fast', 'real_interval_error_slow']].groupby('leg').agg(["mean"]).round(3).to_string())
display(reports)
by_num_runs = with_result[["mean_num_runs", "real_interval", 'real_interval_error', 'real_interval_error_fast', 'real_interval_error_slow']].groupby('mean_num_runs').agg(["mean"]).round(3)
reports.append(by_num_runs.to_string())
by_num_runs
shared.write_simple_text_report(reports, f'post_race_analysis_{shared.race_id_str()}.txt')
with_result.sample(10)
#out_of_interval = with_result[with_result["real_interval_error"]]
plt.figure(figsize=(20, 15))
plot = sns.scatterplot(x="team_id", y="real_relay_time", hue="real_interval_error_type", palette="bright", style="leg", alpha=0.5, data=with_result)
plot.axes.set_ylim(with_result.real_relay_time.min() - 5, with_result.real_relay_time.quantile(0.995))
g = sns.FacetGrid(with_result, row="leg", hue="real_interval_error", xlim=(0,with_result.team_id.max()), height=6, aspect=1, legend_out=False)
g.map(sns.regplot, "team_id", "fin_sum_log_std", scatter_kws={'alpha':0.1}, order=2).add_legend()
endTime = time.time()
shared.log_df(f"{shared.race_id_str()} runtime {round(((endTime - startTime)/ 60), 2)} mins")
```
| github_jupyter |
Wikipedia link: [linear congruential generator](https://en.wikipedia.org/wiki/Linear_congruential_generator_)
Wikipedia description:
> A linear congruential generator (LCG) is an algorithm that yields a sequence of pseudo-randomized numbers calculated with a discontinuous piecewise linear equation. The method represents one of the oldest and best-known pseudorandom number generator algorithms. The theory behind them is relatively easy to understand, and they are easily implemented and fast, especially on computer hardware which can provide modulo arithmetic by storage-bit truncation.
The generator is defined by the recurrence relation:
X$_{n+1}$ = (aX$_{n}$+c) mod m
where X is the sequence of pseudorandom values and
- m is the modulus (0 $<$ m)
- a is the multiplier (0 $<$ a $<$ m)
- c is the increment (0 $\leq$ c $<$ m)
- X$_{o}$ is the seed value (0 $\leq$ X$_{o}$ < m)
all of which are integer values.
```
def lcg(m, a, c, seed):
'''linear congruential generator that pseudorandomly generates numbers.
In:
m = (+ int) modulus
a = (+ int) multiplier
c = (nonnegative int) increment
seed = (nonnegative int) initial value
Out:
single random number
'''
# type and value checks
assert type(m) == int, "'m' must be an integer"
assert m > 0, "'m' must be an int greater than 0"
assert type(a) == int, "'a' must be an integer"
assert a > 0, "'a' must be an int greater than 0"
assert type(c) == int, "'c' must be an integer"
assert c >= 0, "'c' must be an int greater than 0"
assert type(seed) == int, "'seed' must be an integer"
assert seed >= 0, "'seed' must be an int greater than 0"
# algorithm
X_n = seed
while True:
X_n = (a * X_n + c) % m
yield X_n
def auto_print(implementation, number):
for _ in range(number):
print(next(implementation))
```
#### Basic Implementation
```
basic = lcg(m=31, a=7, c=5, seed=1)
auto_print(basic, 20)
```
#### There's a Problem: Parameter Choice Matters
```
problem = lcg(m=31, a=2, c=1, seed=0)
auto_print(problem, 20)
```
**Explanation:** a poor choice of parameters leads to a short period length. Once the period length, or cycle, is known, it is trivially to make predictions. This means the numbers generated are no longer random.
#### Lehmer Implementation (c=0)
```
lehmer = lcg(m=2147483647, a=16807, c=0, seed=1)
auto_print(lehmer, 20)
```
**Note:** this Lehmer implementation uses a Mersenne Prime (a prime that is 1 less than a power of two: 2$^n$ - 1).
## Advantages
- Fast
- Minimal memory to retain state
- Great for embedded systems and video games
## Disadvantages
- Serial correlation of sequence (see Marsaglia's Theorem)
- Short period of the low-order bits when m is chosen to be a power of 2
- Not suitable for a Monte Carlo simulation
- Must not be used for cryptographic applications
## Final Notes
The low-order bits of LCGs when m is a power of 2 should never be relied on for any degree of randomness whatsoever. Indeed, simply substituting 2$^n$ for the modulus term reveals that the low order bits go through very short cycles. In particular, any full-cycle LCG when m is a power of 2 will produce alternately odd and even results.
| github_jupyter |
##### Copyright 2020 The TensorFlow Probability Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# TFP Release Notes notebook (0.12.1)
The intent of this notebook is to help TFP 0.12.1 "come to life" via some small snippets - little demos of things you can achieve with TFP.
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/probability/examples/TFP_Release_Notebook_0_12_1"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/TFP_Release_Notebook_0_12_1.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/TFP_Release_Notebook_0_12_1.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/TFP_Release_Notebook_0_12_1.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
```
#@title Installs & imports { vertical-output: true }
!pip3 install -qU tensorflow==2.4.0 tensorflow_probability==0.12.1 tensorflow-datasets inference_gym
import tensorflow as tf
import tensorflow_probability as tfp
assert '0.12' in tfp.__version__, tfp.__version__
assert '2.4' in tf.__version__, tf.__version__
physical_devices = tf.config.list_physical_devices('CPU')
tf.config.set_logical_device_configuration(
physical_devices[0],
[tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration()])
tfd = tfp.distributions
tfb = tfp.bijectors
tfpk = tfp.math.psd_kernels
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate
import IPython
import seaborn as sns
from inference_gym import using_tensorflow as gym
import logging
```
## Bijectors
### `Glow`
A bijector from the paper [Glow: Generative Flow with Invertible 1x1 Convolutions](https://arxiv.org/abs/1807.03039), by Kingma and Dhariwal.
Here is how to draw an image from a distribution (note that the distribution has not "learned" anything here).
```
image_shape = (32, 32, 4) # 32 x 32 RGBA image
glow = tfb.Glow(output_shape=image_shape,
coupling_bijector_fn=tfb.GlowDefaultNetwork,
exit_bijector_fn=tfb.GlowDefaultExitNetwork)
pz = tfd.Sample(tfd.Normal(0., 1.), tf.reduce_prod(image_shape))
# Calling glow on distribution p(z) creates our glow distribution over images.
px = glow(pz)
# Take samples from the distribution to get images from your dataset.
image = px.sample(1)[0].numpy()
# Rescale to [0, 1].
image = (image - image.min()) / (image.max() - image.min())
plt.imshow(image);
```
### `RayleighCDF`
Bijector for the [Rayleigh distribution's](https://en.wikipedia.org/wiki/Rayleigh_distribution) CDF. One use is sampling from the Rayleigh distribution, by taking uniform samples, then passing them through the inverse of the CDF.
```
bij = tfb.RayleighCDF()
uniforms = tfd.Uniform().sample(10_000)
plt.hist(bij.inverse(uniforms), bins='auto');
```
### `Ascending()` replaces `Invert(Ordered())`
```
x = tfd.Normal(0., 1.).sample(5)
print(tfb.Ascending()(x))
print(tfb.Invert(tfb.Ordered())(x))
```
### Add `low` arg: `Softplus(low=2.)`
```
x = tf.linspace(-4., 4., 100)
for low in (-1., 0., 1.):
bij = tfb.Softplus(low=low)
plt.plot(x, bij(x));
```
### `tfb.ScaleMatvecLinearOperatorBlock` supports blockwise `LinearOperator`, multi-part args
```
op_1 = tf.linalg.LinearOperatorDiag(diag=[1., -1., 3.])
op_2 = tf.linalg.LinearOperatorFullMatrix([[12., 5.], [-1., 3.]])
scale = tf.linalg.LinearOperatorBlockDiag([op_1, op_2], is_non_singular=True)
bij = tfb.ScaleMatvecLinearOperatorBlock(scale)
bij([[1., 2., 3.], [0., 1.]])
```
## Distributions
### `Skellam`
Distribution over differences of two `Poisson` RVs. Note that samples from this distribution can be negative.
```
x = tf.linspace(-5., 10., 10 - -5 + 1)
rates = (4, 2)
for i, rate in enumerate(rates):
plt.bar(x - .3 * (1 - i), tfd.Poisson(rate).prob(x), label=f'Poisson({rate})', alpha=0.5, width=.3)
plt.bar(x.numpy() + .3, tfd.Skellam(*rates).prob(x).numpy(), color='k', alpha=0.25, width=.3,
label=f'Skellam{rates}')
plt.legend();
```
### `JointDistributionCoroutine[AutoBatched]` produce `namedtuple`-like samples
Explicitly specify `sample_dtype=[...]` for the old `tuple` behavior.
```
@tfd.JointDistributionCoroutineAutoBatched
def model():
x = yield tfd.Normal(0., 1., name='x')
y = x + 4.
yield tfd.Normal(y, 1., name='y')
draw = model.sample(10_000)
plt.hist(draw.x, bins='auto', alpha=0.5)
plt.hist(draw.y, bins='auto', alpha=0.5);
```
### `VonMisesFisher` supports `dim > 5`, `entropy()`
The [von Mises-Fisher distribution](https://en.wikipedia.org/wiki/Von_Mises%E2%80%93Fisher_distribution) is a distribution on the $n-1$ dimensional sphere in $\mathbb{R}^n$.
```
dist = tfd.VonMisesFisher([0., 1, 0, 1, 0, 1], concentration=1.)
draws = dist.sample(3)
print(dist.entropy())
tf.reduce_sum(draws ** 2, axis=1) # each draw has length 1
```
### `ExpGamma`, `ExpInverseGamma`
`log_rate` parameter added to `Gamma`.
Numerical improvements when sampling low-concentration `Beta`, `Dirichlet` & friends. Implicit reparameterization gradients in all cases.
```
plt.figure(figsize=(10, 3))
plt.subplot(121)
plt.hist(tfd.Beta(.02, .02).sample(10_000), bins='auto')
plt.title('Beta(.02, .02)')
plt.subplot(122)
plt.title('GamX/(GamX+GamY) [the old way]')
g = tfd.Gamma(.02, 1); s0, s1 = g.sample(10_000), g.sample(10_000)
plt.hist(s0 / (s0 + s1), bins='auto')
plt.show()
plt.figure(figsize=(10, 3))
plt.subplot(121)
plt.hist(tfd.ExpGamma(.02, 1.).sample(10_000), bins='auto')
plt.title('ExpGamma(.02, 1)')
plt.subplot(122)
plt.hist(tfb.Log()(tfd.Gamma(.02, 1.)).sample(10_000), bins='auto')
plt.title('tfb.Log()(Gamma(.02, 1)) [the old way]');
```
### `JointDistribution*AutoBatched` support reproducible sampling (with length-2 tuple/Tensor seeds)
```
@tfd.JointDistributionCoroutineAutoBatched
def model():
x = yield tfd.Normal(0, 1, name='x')
y = yield tfd.Normal(x + 4, 1, name='y')
print(model.sample(seed=(1, 2)))
print(model.sample(seed=(1, 2)))
```
### `KL(VonMisesFisher || SphericalUniform)`
```
# Build vMFs with the same mean direction, batch of increasing concentrations.
vmf = tfd.VonMisesFisher(tf.math.l2_normalize(tf.random.normal([10])),
concentration=[0., .1, 1., 10.])
# KL increases with concentration, since vMF(conc=0) == SphericalUniform.
print(tfd.kl_divergence(vmf, tfd.SphericalUniform(10)))
```
### `parameter_properties`
Distribution classes now expose a `parameter_properties(dtype=tf.float32, num_classes=None)` class method, which can enable automated construction of many classes of distributions.
```
print('Gamma:', tfd.Gamma.parameter_properties())
print('Categorical:', tfd.Categorical.parameter_properties(dtype=tf.float64, num_classes=7))
```
### `experimental_default_event_space_bijector`
Now accepts additional args pinning some distribution parts.
```
@tfd.JointDistributionCoroutineAutoBatched
def model():
scale = yield tfd.Gamma(1, 1, name='scale')
obs = yield tfd.Normal(0, scale, name='obs')
model.experimental_default_event_space_bijector(obs=.2).forward(
[tf.random.uniform([3], -2, 2.)])
```
### `JointDistribution.experimental_pin`
Pins some joint distribution parts, returning `JointDistributionPinned` object representing the joint unnormalized density.
Working with the `experimental_default_event_space_bijector`, this makes doing variational inference or MCMC with sensible defaults much more straightforward. In the below example, the first two lines of `sample` make running MCMC a breeze.
```
dist = tfd.JointDistributionSequential([
tfd.HalfNormal(1.),
lambda scale: tfd.Normal(0., scale, name='observed')])
@tf.function
def sample():
bij = dist.experimental_default_event_space_bijector(observed=1.)
target_log_prob = dist.experimental_pin(observed=1.).unnormalized_log_prob
kernel = tfp.mcmc.TransformedTransitionKernel(
tfp.mcmc.HamiltonianMonteCarlo(target_log_prob,
step_size=0.6,
num_leapfrog_steps=16),
bijector=bij)
return tfp.mcmc.sample_chain(500,
current_state=tf.ones([8]), # multiple chains
kernel=kernel,
trace_fn=None)
draws = sample()
fig, (hist, trace) = plt.subplots(ncols=2, figsize=(16, 3))
trace.plot(draws, alpha=0.5)
for col in tf.transpose(draws):
sns.kdeplot(col, ax=hist);
```
### `tfd.NegativeBinomial.experimental_from_mean_dispersion`
Alternative parameterization. Email tfprobability@tensorflow.org or send us a PR to add similar class methods for other distributions.
```
nb = tfd.NegativeBinomial.experimental_from_mean_dispersion(30., .01)
plt.hist(nb.sample(10_000), bins='auto');
```
### `tfp.experimental.distribute`
`DistributionStrategy`-aware joint distributions, allowing for cross-device likelihood computations. Sharded `Independent` and `Sample` distributions.
```
# Note: 2-logical devices are configured in the install/import cell at top.
strategy = tf.distribute.MirroredStrategy()
assert strategy.num_replicas_in_sync == 2
@tfp.experimental.distribute.JointDistributionCoroutine
def model():
root = tfp.experimental.distribute.JointDistributionCoroutine.Root
group_scale = yield root(tfd.Sample(tfd.Exponential(1), 3, name='group_scale'))
_ = yield tfp.experimental.distribute.ShardedSample(tfd.Independent(tfd.Normal(0, group_scale), 1),
sample_shape=[4], name='x')
seed1, seed2 = tfp.random.split_seed((1, 2))
@tf.function
def sample(seed):
return model.sample(seed=seed)
xs = strategy.run(sample, (seed1,))
print("""
Note that the global latent `group_scale` is shared across devices, whereas
the local `x` is sampled independently on each device.
""")
print('sample:', xs)
print('another sample:', strategy.run(sample, (seed2,)))
@tf.function
def log_prob(x):
return model.log_prob(x)
print("""
Note that each device observes the same log_prob (local latent log_probs are
summed across devices).
""")
print('log_prob:', strategy.run(log_prob, (xs,)))
@tf.function
def grad_log_prob(x):
return tfp.math.value_and_gradient(model.log_prob, x)[1]
print("""
Note that each device observes the same log_prob gradient (local latents have
independent gradients, global latents have gradients aggregated across devices).
""")
print('grad_log_prob:', strategy.run(grad_log_prob, (xs,)))
```
## PSD Kernels
### `GeneralizedMatern`
The `GeneralizedMatern` positive-semidefinite kernel generalizes `MaternOneHalf`, `MAterhThreeHalves`, and `MaternFiveHalves`.
```
gm = tfpk.GeneralizedMatern(df=[0.5, 1.5, 2.5], length_scale=1., amplitude=0.5)
m1 = tfpk.MaternOneHalf(length_scale=1., amplitude=0.5)
m2 = tfpk.MaternThreeHalves(length_scale=1., amplitude=0.5)
m3 = tfpk.MaternFiveHalves(length_scale=1., amplitude=0.5)
xs = tf.linspace(-1.5, 1.5, 100)
gm_matrix = gm.matrix([[0.]], xs[..., tf.newaxis])
plt.plot(xs, gm_matrix[0][0])
plt.plot(xs, m1.matrix([[0.]], xs[..., tf.newaxis])[0])
plt.show()
plt.plot(xs, gm_matrix[1][0])
plt.plot(xs, m2.matrix([[0.]], xs[..., tf.newaxis])[0])
plt.show()
plt.plot(xs, gm_matrix[2][0])
plt.plot(xs, m3.matrix([[0.]], xs[..., tf.newaxis])[0])
plt.show()
```
### `Parabolic` (Epanechnikov)
```
epa = tfpk.Parabolic()
xs = tf.linspace(-1.05, 1.05, 100)
plt.plot(xs, epa.matrix([[0.]], xs[..., tf.newaxis])[0]);
```
## VI
### `build_asvi_surrogate_posterior`
Automatically construct a structured surrogate posterior for VI in a way that incorporates the graphical structure of the prior distribution. This utilizes the method described in the paper Automatic Structured Variational Inference (https://arxiv.org/abs/2002.00643).
```
# Import a Brownian Motion model from TFP's inference gym.
model = gym.targets.BrownianMotionMissingMiddleObservations()
prior = model.prior_distribution()
ground_truth = ground_truth = model.sample_transformations['identity'].ground_truth_mean
target_log_prob = lambda *values: model.log_likelihood(values) + prior.log_prob(values)
```
This models a Brownian Motion process with a Gaussian observation model. It consists of 30 timesteps, but the middle 10 timesteps are unobservable.
```
locs[0] ~ Normal(loc=0, scale=innovation_noise_scale)
for t in range(1, num_timesteps):
locs[t] ~ Normal(loc=locs[t - 1], scale=innovation_noise_scale)
for t in range(num_timesteps):
observed_locs[t] ~ Normal(loc=locs[t], scale=observation_noise_scale)
```
The goal is to infer the values of `locs` from noisy observations (`observed_locs`). Since the middle 10 timesteps are unobservable, `observed_locs` are `NaN` values at timesteps [10,19].
```
# The observed loc values in the Brownian Motion inference gym model
OBSERVED_LOC = np.array([
0.21592641, 0.118771404, -0.07945447, 0.037677474, -0.27885845, -0.1484156,
-0.3250906, -0.22957903, -0.44110894, -0.09830782, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, -0.8786016,
-0.83736074, -0.7384849, -0.8939254, -0.7774566, -0.70238715, -0.87771565,
-0.51853573, -0.6948214, -0.6202789
]).astype(dtype=np.float32)
# Plot the prior and the likelihood observations
plt.figure()
plt.title('Brownian Motion Prior Samples and Observations')
num_samples = 15
prior_samples = prior.sample(num_samples)
plt.plot(prior_samples, c='blue', alpha=0.1)
plt.plot(prior_samples[0][0], label="Prior Samples", c='blue', alpha=0.1)
plt.scatter(x=range(30),y=OBSERVED_LOC, c='black', alpha=0.5, label="Observations")
plt.legend(bbox_to_anchor=(1.05, 1), borderaxespad=0.);
logging.getLogger('tensorflow').setLevel(logging.ERROR) # suppress pfor warnings
# Construct and train an ASVI Surrogate Posterior.
asvi_surrogate_posterior = tfp.experimental.vi.build_asvi_surrogate_posterior(prior)
asvi_losses = tfp.vi.fit_surrogate_posterior(target_log_prob,
asvi_surrogate_posterior,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
num_steps=500)
logging.getLogger('tensorflow').setLevel(logging.NOTSET)
# Construct and train a Mean-Field Surrogate Posterior.
factored_surrogate_posterior = tfp.experimental.vi.build_factored_surrogate_posterior(event_shape=prior.event_shape)
factored_losses = tfp.vi.fit_surrogate_posterior(target_log_prob,
factored_surrogate_posterior,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
num_steps=500)
logging.getLogger('tensorflow').setLevel(logging.ERROR) # suppress pfor warnings
# Sample from the posteriors.
asvi_posterior_samples = asvi_surrogate_posterior.sample(num_samples)
factored_posterior_samples = factored_surrogate_posterior.sample(num_samples)
logging.getLogger('tensorflow').setLevel(logging.NOTSET)
```
Both the ASVI and the mean-field surrogate posterior distributions have converged, and the ASVI surrogate posterior had a lower final loss (negative ELBO value).
```
# Plot the loss curves.
plt.figure()
plt.title('Loss Curves for ASVI vs Mean-Field Surrogate Posteriors')
plt.plot(asvi_losses, c='orange', label='ASVI', alpha = 0.4)
plt.plot(factored_losses, c='green', label='Mean-Field', alpha = 0.4)
plt.ylim(-50, 300)
plt.legend(bbox_to_anchor=(1.3, 1), borderaxespad=0.);
```
The samples from the posteriors highlight how nicely the ASVI surrogate posterior captures the uncertainty for the timesteps without observations. On the other hand, the mean-field surrogate posterior struggles to capture the true uncertainty.
```
# Plot samples from the ASVI and Mean-Field Surrogate Posteriors.
plt.figure()
plt.title('Posterior Samples from ASVI vs Mean-Field Surrogate Posterior')
plt.plot(asvi_posterior_samples, c='orange', alpha = 0.25)
plt.plot(asvi_posterior_samples[0][0], label='ASVI Surrogate Posterior', c='orange', alpha = 0.25)
plt.plot(factored_posterior_samples, c='green', alpha = 0.25)
plt.plot(factored_posterior_samples[0][0], label='Mean-Field Surrogate Posterior', c='green', alpha = 0.25)
plt.scatter(x=range(30),y=OBSERVED_LOC, c='black', alpha=0.5, label='Observations')
plt.plot(ground_truth, c='black', label='Ground Truth')
plt.legend(bbox_to_anchor=(1.585, 1), borderaxespad=0.);
```
## MCMC
### `ProgressBarReducer`
Visualize progress of the sampler. (May have a nominal performance penalty; not currently supported under JIT compilation.)
```
kernel = tfp.mcmc.HamiltonianMonteCarlo(lambda x: -x**2 / 2, .05, 20)
pbar = tfp.experimental.mcmc.ProgressBarReducer(100)
kernel = tfp.experimental.mcmc.WithReductions(kernel, pbar)
plt.hist(tf.reshape(tfp.mcmc.sample_chain(100, current_state=tf.ones([128]), kernel=kernel, trace_fn=None), [-1]), bins='auto')
pbar.bar.close()
```
### `sample_sequential_monte_carlo` supports reproducible sampling
```
initial_state = tf.random.uniform([4096], -2., 2.)
def smc(seed):
return tfp.experimental.mcmc.sample_sequential_monte_carlo(
prior_log_prob_fn=lambda x: -x**2 / 2,
likelihood_log_prob_fn=lambda x: -(x-1.)**2 / 2,
current_state=initial_state,
seed=seed)[1]
plt.hist(smc(seed=(12, 34)), bins='auto');plt.show()
print(smc(seed=(12, 34))[:10])
print('different:', smc(seed=(10, 20))[:10])
print('same:', smc(seed=(12, 34))[:10])
```
### Added streaming computations of variance, covariance, Rhat
Note, the interfaces to these have changed somewhat in `tfp-nightly`.
```
def cov_to_ellipse(t, cov, mean):
"""Draw a one standard deviation ellipse from the mean, according to cov."""
diag = tf.linalg.diag_part(cov)
a = 0.5 * tf.reduce_sum(diag)
b = tf.sqrt(0.25 * (diag[0] - diag[1])**2 + cov[0, 1]**2)
major = a + b
minor = a - b
theta = tf.math.atan2(major - cov[0, 0], cov[0, 1])
x = (tf.sqrt(major) * tf.cos(theta) * tf.cos(t) -
tf.sqrt(minor) * tf.sin(theta) * tf.sin(t))
y = (tf.sqrt(major) * tf.sin(theta) * tf.cos(t) +
tf.sqrt(minor) * tf.cos(theta) * tf.sin(t))
return x + mean[0], y + mean[1]
fig, axes = plt.subplots(nrows=4, ncols=5, figsize=(14, 8),
sharex=True, sharey=True, constrained_layout=True)
t = tf.linspace(0., 2 * np.pi, 200)
tot = 10
cov = 0.1 * tf.eye(2) + 0.9 * tf.ones([2, 2])
mvn = tfd.MultivariateNormalTriL(loc=[1., 2.],
scale_tril=tf.linalg.cholesky(cov))
for ax in axes.ravel():
rv = tfp.experimental.stats.RunningCovariance(
num_samples=0., mean=tf.zeros(2), sum_squared_residuals=tf.zeros((2, 2)),
event_ndims=1)
for idx, x in enumerate(mvn.sample(tot)):
rv = rv.update(x)
ax.plot(*cov_to_ellipse(t, rv.covariance(), rv.mean),
color='k', alpha=(idx + 1) / tot)
ax.plot(*cov_to_ellipse(t, mvn.covariance(), mvn.mean()), 'r')
fig.suptitle("Twenty tries to approximate the red covariance with 10 draws");
```
## Math, stats
### Bessel functions: ive, kve, log-ive
```
xs = tf.linspace(0.5, 20., 100)
ys = tfp.math.bessel_ive([[0.5], [1.], [np.pi], [4.]], xs)
zs = tfp.math.bessel_kve([[0.5], [1.], [2.], [np.pi]], xs)
for i in range(4):
plt.plot(xs, ys[i])
plt.show()
for i in range(4):
plt.plot(xs, zs[i])
plt.show()
```
### Optional `weights` arg to `tfp.stats.histogram`
```
edges = tf.linspace(-4., 4, 31)
samps = tfd.TruncatedNormal(0, 1, -4, 4).sample(100_000, seed=(123, 456))
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3))
ax1.bar(edges[:-1], tfp.stats.histogram(samps, edges))
ax1.set_title('samples histogram')
ax2.bar(edges[:-1], tfp.stats.histogram(samps, edges, weights=1 / tfd.Normal(0, 1).prob(samps)))
ax2.set_title('samples, weighted by inverse p(sample)');
```
### `tfp.math.erfcinv`
```
x = tf.linspace(-3., 3., 10)
y = tf.math.erfc(x)
z = tfp.math.erfcinv(y)
print(x)
print(z)
```
| github_jupyter |
# GSD: Assessing ambiguous nts in 1011 collection genomes (TINY SET)
**DEVELOPMENT WITH JUST TWO OF THE 1011 genomes collection (TINY SET)**
Checks a collection of sequenced cerevisiae genomes from [Peter et al 2018](https://www.ncbi.nlm.nih.gov/pubmed/29643504) for ambiguous/gap-representing residues. Compares them to a roughly dozen genomes sequenced by the PacBio method from [Yue et al., 2017](https://www.ncbi.nlm.nih.gov/pubmed/28416820) as well as the SGD reference sequence from [here](https://downloads.yeastgenome.org/sequence/S288C_reference/chromosomes/fasta/).
References for sequence data:
- [Genome evolution across 1,011 Saccharomyces cerevisiae isolates. Peter J, De Chiara M, Friedrich A, Yue JX, Pflieger D, Bergström A, Sigwalt A, Barre B, Freel K, Llored A, Cruaud C, Labadie K, Aury JM, Istace B, Lebrigand K, Barbry P, Engelen S, Lemainque A, Wincker P, Liti G, Schacherer J. Nature. 2018 Apr;556(7701):339-344. doi: 10.1038/s41586-018-0030-5. Epub 2018 Apr 11. PMID: 29643504](https://www.ncbi.nlm.nih.gov/pubmed/29643504)
- [Contrasting evolutionary genome dynamics between domesticated and wild yeasts.
Yue JX, Li J, Aigrain L, Hallin J, Persson K, Oliver K, Bergström A, Coupland P, Warringer J, Lagomarsino MC, Fischer G, Durbin R, Liti G. Nat Genet. 2017 Jun;49(6):913-924. doi: 10.1038/ng.3847. Epub 2017 Apr 17. PMID: 28416820](https://www.ncbi.nlm.nih.gov/pubmed/28416820)
- [Life with 6000 genes. Goffeau A, Barrell BG, Bussey H, Davis RW, Dujon B, Feldmann H, Galibert F, Hoheisel JD, Jacq C, Johnston M, Louis EJ, Mewes HW, Murakami Y, Philippsen P, Tettelin H, Oliver SG. Science. 1996 Oct 25;274(5287):546, 563-7. PMID: 8849441](https://www.ncbi.nlm.nih.gov/pubmed/8849441)
-----
## Preparation
Get scripts and sequence data necessary.
**Before doing `Run All`, make sure the fifth cell is set to get the desired set from the 1011 genomes collection.**
```
!pip install pyfaidx
```
Get the genomes data by running these commands.
```
import pandas as pd
# Prepare for getting PacBio (Yue et al 2017 sequences)
#make a list of the strain designations
yue_et_al_strains = ["S288C","DBVPG6044","DBVPG6765","SK1","Y12",
"YPS128","UWOPS034614","CBS432","N44","YPS138",
"UFRJ50816","UWOPS919171"]
# Get & unpack the genome sequences from strains
for s in yue_et_al_strains:
!curl -LO http://yjx1217.github.io/Yeast_PacBio_2016/data/Nuclear_Genome/{s}.genome.fa.gz
!curl -OL http://yjx1217.github.io/Yeast_PacBio_2016/data/Mitochondrial_Genome/{s}.mt.genome.fa.gz
!gunzip -f {s}.genome.fa.gz
!gunzip -f {s}.mt.genome.fa.gz
# add the mitochondrial genome content onto the nuclear
!cat {s}.genome.fa {s}.mt.genome.fa > temp.genome.fa
!mv temp.genome.fa {s}.genome.fa
!rm *.mt.genome.fa
# add identifiers to each `chr` so results for each strain clear later
chromosome_id_prefix = "chr"
def add_strain_id_to_description_line(file,strain_id):
'''
Takes a file and edits every description line to add
strain_id after the caret.
Saves the fixed file
'''
import sys
output_file_name = "temp.txt"
# prepare output file for saving so it will be open and ready
with open(output_file_name, 'w') as output_file:
# read in the input file
with open(file, 'r') as input_handler:
# prepare to give feeback later or allow skipping to certain start
lines_processed = 0
for line in input_handler:
lines_processed += 1
if line.startswith(">"):
rest_o_line = line.split(">")
new_line = ">"+strain_id + rest_o_line[1]
else:
new_line = line
# Send text to output
output_file.write(new_line)
# replace the original file with edited
!mv temp.txt {file}
# Feedback
sys.stderr.write("\n{} chromosome identifiers tagged.".format(file))
for s in yue_et_al_strains:
add_strain_id_to_description_line(s+".genome.fa",s)
# Get SGD reference sequence that includes nuclear and mitochondrial sequence as one file,
# among others. I'll use file name for the reference genome worked out
# in `GSD Assessing_ambiguous_nts_in_nuclear_PB_genomes.ipynb`, so more of the
# previously worked out code will work.
!curl -OL https://downloads.yeastgenome.org/sequence/S288C_reference/genome_releases/S288C_reference_genome_Current_Release.tgz
!tar -xzf S288C_reference_genome_Current_Release.tgz
!rm S288C_reference_genome_Current_Release.tgz
!mv S288C_reference_genome_R64-2-1_20150113/S288C_reference_sequence_R64-2-1_20150113.fsa ./SGD_REF.genome.fa
!rm -rf S288C_reference_genome_R64-2-1_20150113
```
**Now to get the entire collection or a subset of the 1011 genomes, the next cell will need to be edited.** I'll probably leave it with a small set for typical running purposes. However, to make it run fast, try the 'super-tiny' set with just two.
```
# Method to get ALL the genomes. TAKES A WHILE!
#!curl -O http://1002genomes.u-strasbg.fr/files/1011Assemblies.tar.gz
#!tar xzf 1011Assemblies.tar.gz
# Small development set
#!curl -OL https://www.dropbox.com/s/f42tiygq9tr1545/medium_setGENOMES_ASSEMBLED.tar.gz
#!tar xzf medium_setGENOMES_ASSEMBLED.tar.gz
# Tiny development set
!curl -OL https://www.dropbox.com/s/txufq2jflkgip82/tiny_setGENOMES_ASSEMBLED.tar.gz
!tar xzf tiny_setGENOMES_ASSEMBLED.tar.gz
!mv tiny_setGENOMES_ASSEMBLED GENOMES_ASSEMBLED
```
Address `CDH.re.fa` if included:
Before process the list of all of them, fix one that has an file name mismatch with what the description lines have.
Specifically, the assembly file name is `CDH.re.fa`, but the FASTA-entries inside begin `CDH-3`.
Simple file name mismatch. So next cell will change that file name to match.
(I have been adding this particular file into the unpacked 'GENOMES_ASSEMBLED' directory from the 'SMALL' and , sometimes the 'TINY', set via drag and drop from my local computer at this point in order to test the next cell. GIVE IT A MINUTE TO UPLOAD BECAUSE IF YOU RUN THE NEXT CELL TOO SOON, IT WILL ONLY MOVE WHAT IS UPLOADED AND THEN THERE WILL STILL BE A FILE WITH ORIGINAL NAME AS REST UPLOADS.)
```
import os
import sys
file_with_issues = "CDH.re.fa"
if os.path.isfile("GENOMES_ASSEMBLED/"+file_with_issues):
sys.stderr.write("\nFile with name non-matching entries ('{}') observed and"
" fixed.".format(file_with_issues))
!mv GENOMES_ASSEMBLED/CDH.re.fa GENOMES_ASSEMBLED/CDH_3.re.fa
#pause and then check if file with original name is there still because
# it means this was attempted too soon and need to start over.
import time
time.sleep(12) #12 seconds
if os.path.isfile("GENOMES_ASSEMBLED/"+file_with_issues):
sys.stderr.write("\n***PROBLEM. TRIED THIS CELL BEFORE FINISHED UPLOADING.\n"
"DELETE FILES ASSOCIATED AND START ALL OVER AGAIN WITH UPLOAD STEP***.")
else:
sys.stderr.write("\nFile '{}' not seen and so nothing done"
". OKAY??".format(file_with_issues))
# Make a list of all `genome.fa` files, excluding `genome.fa.nhr` and `genome.fa.nin` and `genome.fansq`
# The excluding was only necessary because I had run some BLAST queries preiminarily in development. Normally,
# it would just be the `.re.fa` at the outset. (But keeping because removal ability could be useful.)
fn_to_check = "genome.fa"
genomes = []
import os
import fnmatch
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*'+fn_to_check):
if not file.endswith(".nhr") and not file.endswith(".nin") and not file.endswith(".nsq") :
genomes.append(file)
for file in os.listdir('GENOMES_ASSEMBLED'):
if fnmatch.fnmatch(file, '*'+".re.fa"):
if not file.endswith(".nhr") and not file.endswith(".nin") and not file.endswith(".nsq") :
genomes.append("GENOMES_ASSEMBLED/"+file)
genomes
```
Now you are prepared to analyze each genome.
## Assessing the genomes in regards to stretches of N base calls
### Total number of Ns present
```
from pyfaidx import Fasta
query = "N"
#query = "X"
#query = "A" # CONTROL. should occur!
genomes_with_ambiguous = [] # This to be used for reporting when amount to analyze starts
# to get large
cut_off_for_printing_status_each = 100
for g in genomes:
chrs = Fasta(g)
ambiguous = []
for x in chrs:
#print(x.name)
if query in str(x):
ambiguous.append(x.name)
if ambiguous:
genomes_with_ambiguous.append(g)
#print(ambiguous)
if len(genomes) < cut_off_for_printing_status_each:
if not ambiguous:
print ("No ambiguous nucleotides or gaps in {}.".format(g))
else:
print("There are occurences of '{}' in {}.".format(query,g))
if genomes_with_ambiguous and (len(genomes) >= cut_off_for_printing_status_each):
print("There are occurences of '{}' in {} genomes.".format(query,len(genomes_with_ambiguous)))
if len(genomes_with_ambiguous) < 250:
print("The following are the ones with '{}':\n{}.".format(query,genomes_with_ambiguous))
else:
print("That is a lot, and so they won't be listed here.\nAnalyses below will reveal more specifics.")
else:
print ("No ambiguous nucleotides or gaps were found in any the genomes.")
```
Another way to assess, just count all the letters present:
```
%%time
from pyfaidx import Fasta
import pandas as pd
import collections
nt_counts = {}
for g in genomes:
if ".genome.fa" in g:
strain_id = g.split(".genome.fa")[0]
else:
strain_id = g.split(".re.fa")[0][18:]
concatenated_seqs = ""
chrs = Fasta(g)
for x in chrs:
#print(x.name)
concatenated_seqs += str(x)
nt_counts[strain_id] = collections.Counter(concatenated_seqs)
nt_count_df = pd.DataFrame.from_dict(nt_counts, orient='index').fillna(0)
nt_count_df["Total_nts"] = nt_count_df.sum(1)
def percent_calc(items):
'''
takes a list of two items and calculates percentage of first item
within total (second item)
'''
return items[0]/items[1]
nt_count_df['% N'] = nt_count_df[['N','Total_nts']].apply(percent_calc, axis=1)
nt_count_df = nt_count_df.sort_values(['% N', 'Total_nts'],ascending=[0,0])
nt_count_df_styled = nt_count_df.style.format({'Total_nts':'{:.2E}','% N':'{:.2%}'})
nt_count_df.to_csv('PB_n_1011_collection_nt_count_tiny.tsv', sep='\t',index = True)
nt_count_df.to_pickle("PB_n_1011_collection_nt_count_tiny.pkl")
nt_count_df_styled
```
In development, I was suprised at first to see so letters that weren't 'A','G','T','C','N', or lowercase version of those. However, the extras match up with ones identified in [PatMatch documentation](https://www.yeastgenome.org/nph-patmatch) for pattern syntax for nucleotides. And so I assume they refer to that here. But I was still wondering why are a handfull are lowercase?
As an example, this code below shows displaying an example where the `t` lowercase is seen.
```
jovyan@jupyter-fomightez-2dcl-5fsq-5fdemo-2dbinder-2d24azsfxa:~/notebooks/GSD$ cat GENOMES_ASSEMBLED/CEF_4.re.fa|grep "t"
TTCGTARGAYAGACTCWTTCCCGTGtAAATRTTTGTGACAGYTACGTCTATTTTCTACTM
AYAGACTCWTTCCCGTGtAAATRTTTGTGACAGYTACGTCTATTTTCTACTMKATRTTTA
CCCCGAAAGGAGAAATATAatATATATATATATAATATGCATCCTTATTATAATATTATT
AATTAAATTAAATTAAATTAAATTAAATTAAATTAaattaGATGTTCATTAAATAAAAAT
jovyan@jupyter-fomightez-2dcl-5fsq-5fdemo-2dbinder-2d24azsfxa:~/notebooks/GSD$
```
Why lowercase?
Seeing them in place doesn't spark many ideas as to why there'd be a few cases, except those that seem along with other lowercases letters in a block, such as 'aatta'. Sometimes lowercase is used for those where identity might be questionable because support limiting or stretch in which it occurs offers low information content.
The small nature of the block of lowercase though suggests maybe just comes from quality scores from the Illumina reads?
### Examining the number and size of stretches of Ns present
Make a dataframe of the data and also make a summary one with the say, top five(?) number instances, for each. (Idea not yet implemented, yet.)
```
%%time
# count frequency of blocks of Ns in the genomes
import re
from collections import defaultdict
from pyfaidx import Fasta
import pandas as pd
import collections
min_number_Ns_in_row_to_collect = 3
pattern_obj = re.compile("N{{{},}}".format(min_number_Ns_in_row_to_collect), re.I) # adpated from
# code worked out in `collapse_large_unknown_blocks_in_DNA_sequence.py`, which relied heavily on
# https://stackoverflow.com/a/250306/8508004
len_match_dict_by_strain = {}
#genomes = ["N44.genome.fa"]
for g in genomes:
len_match_dict = defaultdict(int)
if ".genome.fa" in g:
strain_id = g.split(".genome.fa")[0]
else:
strain_id = g.split(".re.fa")[0][18:]
records = Fasta(g)
for record in records:
for m in pattern_obj.finditer(str(record)):
len_match_dict[len(m.group())] += 1
#before passing final dictionary for strain to
#collection, add an entry of zero for size of 1 so the
#strain with no stretches will be in final dataframe
if not len_match_dict:
len_match_dict[1]=0
len_match_dict_by_strain[strain_id] = len_match_dict
#stretches_size_freq_df = pd.DataFrame.from_dict(len_match_dict_by_strain, orient='index').fillna(0)
#stretches_size_freq_df = pd.DataFrame.from_dict(len_match_dict_by_strain).fillna(0).stack().reset_index() # if wanted all stretches of size to show count even if none present
stretches_size_freq_df = pd.DataFrame.from_dict(len_match_dict_by_strain).stack().reset_index()
stretches_size_freq_df.columns = ['stretch_size','strain','#_instances']
stretches_size_freq_df = stretches_size_freq_df[['strain','stretch_size','#_instances']]
stretches_size_freq_df = stretches_size_freq_df.sort_values(['stretch_size','#_instances'],ascending=[0,0]).reset_index(drop=True)
```
Because of use of `%%time` in above cell, easiest to display resulting dataframe using next cell.
```
stretches_size_freq_df
stretches_size_freq_df = stretches_size_freq_df.drop(stretches_size_freq_df[(stretches_size_freq_df["stretch_size"] > 17000) | (stretches_size_freq_df["#_instances"] == 0.0)].index)
#df = df.drop(df[(df.score < 50) & (df.score > 20)].index)
stretches_size_freq_df
```
Than can be plotted for visualization.
```
%matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
# Visualization
import seaborn as sns
sns.set() # from https://seaborn.pydata.org/examples/scatterplot_sizes.html; seems to restore style if it gets altered
p = sns.scatterplot(x="stretch_size", y="#_instances", hue="strain",data=stretches_size_freq_df)
# Set the scale of the x-and y-axes
p.set(xscale="log", yscale="log");
#p.get_legend().set_visible(False);
```
Starting to go torwards the representation seen [here in Figure 1](https://www.researchgate.net/publication/12094921_Computational_comparison_of_two_draft_sequences_of_the_human_genome). For example, you can see the banding caused by using the log scale for 'number of instances' axis. However, as there aren't that many and that wide of a range of stretches of N among the 1011 collection (at least with out what I have seen in development), I don't think this path is going to yield a nice visual for the 1011 *cerevisiae genomes* collection.
### Examining the number and size of stretches of Ns present for the 1011 collection (Different tack)
Because of the extremes of the PacBio and the reference sequence, even if just concern yourself, with 'stetch_size' the data don't visualize well with those from the 1011 collection. That can be summarized best with a plot by running the next cell. (Code reduced to minimum but fleshe out in this section.)
```
%matplotlib inline
import seaborn as sns
sns.set()
# collect blocks of Ns in the genomes
import re
from collections import defaultdict
from pyfaidx import Fasta
import pandas as pd
import collections
min_number_Ns_in_row_to_collect = 3
pattern_obj = re.compile("N{{{},}}".format(min_number_Ns_in_row_to_collect), re.I)
matches = []
for g in genomes:
len_match_dict = defaultdict(int)
if ".genome.fa" in g:
strain_id = g.split(".genome.fa")[0]
else:
strain_id = g.split(".re.fa")[0][18:]
records = Fasta(g)
for record in records:
for m in pattern_obj.finditer(str(record)):
matches.append((strain_id,len(m.group())))
matches.append(("SGD_ref",0))
labels = ['strain', 'stretch_size']
stretch_df = pd.DataFrame.from_records(matches, columns=labels)
sns.distplot(stretch_df["stretch_size"], kde=False, rug=True);
```
See how most stuff is squashed all on low end relative the PacBio sequences at around 17500.
For visualization of the 1011 collection data adequately, probably best to separate out PacBio and SGD references and start treating strains as category and let Seaborn handle aggregating the data, as it does this so well. So we need a simpler dataframe than one in above section, one for each instance of a stretch of Ns found.
```
%%time
# collect blocks of Ns in the genomes
import re
from collections import defaultdict
from pyfaidx import Fasta
import pandas as pd
import collections
min_number_Ns_in_row_to_collect = 3
pattern_obj = re.compile("N{{{},}}".format(min_number_Ns_in_row_to_collect), re.I) # adpated from
# code worked out in `collapse_large_unknown_blocks_in_DNA_sequence.py`, which relied heavily on
# https://stackoverflow.com/a/250306/8508004
matches = []
#genomes = ["N44.genome.fa"]
for g in genomes:
len_match_dict = defaultdict(int)
if ".genome.fa" in g:
strain_id = g.split(".genome.fa")[0]
else:
strain_id = g.split(".re.fa")[0][18:]
records = Fasta(g)
for record in records:
for m in pattern_obj.finditer(str(record)):
matches.append((strain_id,len(m.group())))
#before using list of matches to make a dataframe, add
# entry for reference sequence that has no actual instancesto
#collection, add an entry of zero for size of 1 so the
#strain with no stretches will be in final dataframe
matches.append(("SGD_ref",0))
#make a dataframe from the list
labels = ['strain', 'stretch_size']
stretch_df = pd.DataFrame.from_records(matches, columns=labels)
# Note that later, when trying to compare the distribution to the entire sequence set to what I see
# from just the collected mitochondrial genomes sequences, I realized I had not included a sort for the `stretch_df`
# and when there is a lot of data they come out ranging all over the place. The next command adds that. (Adding it
# here only to be consistent when the number of isolates analyzed increases in the 'small' and 'all' versions of
# this notebook. Just by chance this had come out for those from the 1011 set as if sorted and so no change will
# be really noticeable with this tiny set.)
stretch_df = stretch_df.sort_values(['stretch_size'],ascending=[0]).reset_index(drop=True)
stretch_df
len(stretch_df)
stretch_df.to_csv('PB_n_1011_collection_all_stretchesN_df.tsv', sep='\t',index = False)
stretch_df.to_pickle("PB_n_1011_collection_all_stretchesN_df.pkl")
```
(If running on that whole set, store those files so you can read them in later, or elsewhere, as needed.)
```
%matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
# Visualization
import seaborn as sns
sns.set()
p = sns.stripplot(x="stretch_size", y="strain", data=stretch_df);
# Set the scale of the x-and y-axes
p.set(xscale="log");
#p.get_legend().set_visible(False);
#p = sns.swarmplot(x="stretch_size", y="strain", data=df);
```
It is at least showing the rDNA masking in the PacBio data well.
But if you try swarmplot, you'll see it isn't showing the distibution well for those in the 1011 collection as just with these few it is too many, it seems.
```
%matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
# Visualization
import seaborn as sns
sns.set()
p = sns.swarmplot(x="stretch_size", y="strain", data=stretch_df);
# Set the scale of the x-and y-axes
p.set(xscale="log");
#p.get_legend().set_visible(False);
#p = sns.swarmplot(x="stretch_size", y="strain", data=df);
```
Can see that by limiting to the two (or three if included 'CDH_3', as I generally tried to do) from the 1011 collection.
```
stretch_df = stretch_df.drop(stretch_df[(stretch_df["stretch_size"] > 17000) | (stretch_df["strain"] == "SGD_ref")].index)
# Later by alternatively running the code above instead of below & counting length of df, I confirmed that the above line doesn't remove any from the
# 1011 collection and that `(stretch_df["strain"] == "SGD_ref")` and `(stretch_df["stretch_size"] == 0)` result in the same set. <--Turned
# out not the case when dealing with just derived mito data. Went with more explicit command now and moved original to next line.
#stretch_df = stretch_df.drop(stretch_df[(stretch_df["stretch_size"] > 17000) | (stretch_df["stretch_size"] == 0)].index)
%matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
# Visualization
import seaborn as sns
sns.set()
p = sns.swarmplot(x="stretch_size", y="strain", data=stretch_df);
# Set the scale of the x-and y-axes
p.set(xscale="log");
#p.get_legend().set_visible(False);
#p = sns.swarmplot(x="stretch_size", y="strain", data=df);
```
Many more at 50 than can be handled so get artificats like discussed [here](https://stackoverflow.com/questions/50817607/variable-spacing-of-discrete-categories-in-seaborn-swarmplot). And it can be made better by increasing the height, although still seems to happen no matter how tall it is finally made as even the use of `plt.figure(figsize=(8,13))` causes curling.
```
%matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
import matplotlib.pyplot as plt
plt.figure(figsize=(8,13))
# Visualization
import seaborn as sns
sns.set()
p = sns.swarmplot(x="stretch_size", y="strain", data=stretch_df);
# Set the scale of the x-and y-axes
p.set(xscale="log");
#p.get_legend().set_visible(False);
#p = sns.swarmplot(x="stretch_size", y="strain", data=df);
%matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
# Visualization
import seaborn as sns
sns.set()
stretch_df = stretch_df.drop(stretch_df[(stretch_df["stretch_size"] > 17000) | (stretch_df["stretch_size"] == 0)].index)
sns.distplot(stretch_df["stretch_size"], kde=False, rug=True);
```
Nicer, but what pertains to which strain?
```
%matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
# Visualization - based on https://towardsdatascience.com/histograms-and-density-plots-in-python-f6bda88f5ac0
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
#Limit to subset, if not set already
stretch_df = stretch_df.drop(stretch_df[(stretch_df["stretch_size"] > 17000) | (stretch_df["stretch_size"] == 0)].index)
# List of strains to plot
strains = set(stretch_df["strain"].tolist())
# Iterate through the strains
for strain in strains:
# Subset to the strain
subset = stretch_df[stretch_df['strain'] == strain]
# Draw the density plot
sns.distplot(subset['stretch_size'], hist = False, kde = True,# rug = True,
kde_kws = {'shade': True, 'linewidth': 3},
#rug_kws={'color': 'black'},
label = strain)
# Plot formatting
plt.legend(prop={'size': 16}, title = 'Strain')
plt.title('Density Plot with strains')
plt.xlabel('Stretch size (base)')
plt.ylabel('Density');
```
As discussed at the end of [here](https://towardsdatascience.com/histograms-and-density-plots-in-python-f6bda88f5ac0), uncomment the two portions dealing with `rug` to see the actual data points. I don't leave them on here bey default becuase many and cannot see which belong to which strain.
While this is nice, it will be difficult once I add a lot more in order to do anything with the 1011 collection or even a subset. Alternatives? [Raincloud plots](https://nbviewer.jupyter.org/github/pog87/PtitPrince/blob/master/RainCloud_Plot.ipynb) look promising for this situation.
```
!pip install ptitprince
import seaborn as sns
import matplotlib.pyplot as plt
#sns.set(style="darkgrid")
#sns.set(style="whitegrid")
#sns.set_style("white")
sns.set(style="whitegrid",font_scale=2)
#Limit to subset, if not set already
stretch_df = stretch_df.drop(stretch_df[(stretch_df["stretch_size"] > 17000) | (stretch_df["stretch_size"] == 0)].index)
#Want an image file of the figure saved?
savefigs = True
savefig_fn = 'n_stetch_distrbution_raincloud_tiny_subset.png'
f, ax = plt.subplots(figsize=(12, 8))
import ptitprince as pt
ort = "h"
ax=pt.half_violinplot(data=stretch_df, palette="Set2", bw=.2, linewidth=1,cut=0.,\
scale="area", width=1.5, inner=None,orient=ort,x='stretch_size', y="strain")
ax=sns.stripplot(data=stretch_df, palette="Set2", edgecolor="white",size=2,orient=ort,x='stretch_size',y="strain",jitter=1,zorder=0)
ax=sns.boxplot(data=stretch_df, color="black",orient=ort,width=.15,x='stretch_size',y="strain",zorder=10,showcaps=True,boxprops={'facecolor':'none', "zorder":10},showfliers=True,whiskerprops={'linewidth':2, "zorder":10},saturation=1)
# Finalize the figure
#ax.set(xlim=(0,1000)) #This can be used to 'zoom in', if you accept, i.e., note in the legend, that anything with a
# line continuing off means there are more points off the scale. Could indicating symbols be added like I did in
# plot expression plots?
#ax.set(ylim=(3.5, -.7)) # this can be used to actually plot fractions of them 'kind of'. `ax.set(ylim=(3.5, -.7))` results
# in plots of four, although they are spaced out.
sns.despine(left=True)
if savefigs:
f.savefig(savefig_fn, bbox_inches='tight')
```
(See the 'small' or 'all' version of this notebook for some notes on settings within code. Notes are within the code iteself as well as in markdown cell below the plot.)
After getting that raincloud plot to work, I tried with the group that included the PacBio set and SGD_ref, but the compression of the vast distances causes same issues as seen elsewhere above. For the record, here is an image of it:
```
#using code to show b/c encodes as Base64 code and saves with notebook
from IPython.display import Image
Image("../../imgs/raincloud_suffers_same_fate.png")
```
----
```
import time
def executeSomething():
#code here
print ('.')
time.sleep(480) #60 seconds times 8 minutes
while True:
executeSomething()
```
| github_jupyter |
# Part 2 - Plotting element cross sections
As shown in Part 1, OpenMC is able to plot neutron interaction cross sections for specific isotopes. However, we can also do the same for elements.
This python notebook allows users to plot neutron interaction cross sections for specific elements using OpenMC.
To plot elemental cross sections, the cross sections of each stable isotope of the element are combined.
```
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/ELZNeIdSuMY" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
```
This first code block plots the (n,2n) neutron multiplication cross section for all elements.
```
import plotly.graph_objects as go
# the create plot function contains all the OpenMC routines for accessing the cross sections
from plotting_utils import create_element_plot
elements_of_interest = [
'Ag', 'Al', 'Ar', 'As', 'Au', 'B', 'Ba', 'Be', 'Bi', 'Br', 'C', 'Ca', 'Cd', 'Ce', 'Cl',
'Co', 'Cr', 'Cs', 'Cu', 'Dy', 'Er', 'Eu', 'F', 'Fe', 'Ga', 'Gd', 'Ge', 'H', 'He', 'Hf',
'Hg', 'Ho', 'I', 'In', 'Ir', 'K', 'Kr', 'La', 'Li', 'Lu', 'Mg', 'Mn', 'Mo', 'N', 'Na',
'Nb', 'Nd', 'Ni', 'O', 'P', 'Pa', 'Pb', 'Pd', 'Po', 'Pr', 'Rb',
'Re', 'Rh', 'Rn', 'Ru', 'Sb', 'Sc', 'Se', 'Si', 'Sm', 'Sn', 'Sr', 'Ta', 'Tb',
'Te', 'Th', 'Ti', 'Tl', 'Tm', 'U', 'V', 'W', 'Xe', 'Y', 'Zn', 'Zr'
]
reaction_of_interest = '(n,2n)'
# we could plot all the elements but that would take a long time so we just plot the first 15
number_of_elements_to_plot = 15
create_element_plot(
elements=elements_of_interest[:number_of_elements_to_plot],
reaction=reaction_of_interest
)
```
Tritium production is another important reaction in fusion as it affects the rate at which tritium can be bred. When designing breeder blankets we need to use materials which maximise both neutron multiplication AND tritium production.
The next code block plots the (n,Xt) tritium production reaction for all elements.
```
elements_of_interest = [
'Li', 'Ag', 'Al', 'Ar', 'As', 'Au', 'B', 'Ba', 'Be', 'Bi', 'Br', 'C', 'Ca', 'Cd', 'Ce', 'Cl',
'Co', 'Cr', 'Cs', 'Cu', 'Dy', 'Er', 'Eu', 'F', 'Fe', 'Ga', 'Gd', 'Ge', 'H', 'He', 'Hf',
'Hg', 'Ho', 'I', 'In', 'Ir', 'K', 'Kr', 'La', 'Lu', 'Mg', 'Mn', 'Mo', 'N', 'Na',
'Nb', 'Nd', 'Ni', 'O', 'P', 'Pa', 'Pb', 'Pd', 'Po', 'Pr', 'Rb',
'Re', 'Rh', 'Rn', 'Ru', 'Sb', 'Sc', 'Se', 'Si', 'Sm', 'Sn', 'Sr', 'Ta', 'Tb',
'Te', 'Th', 'Ti', 'Tl', 'Tm', 'U', 'V', 'W', 'Xe', 'Y', 'Zn', 'Zr'
]
reaction_of_interest = '(n,Xt)' # The X is a wild card / catch all
# we could plot all the elements but that would take a long time so we just plot the first 15
number_of_elements_to_plot = 15
create_element_plot(
elements=elements_of_interest[:number_of_elements_to_plot],
reaction=reaction_of_interest
)
```
Lithium is the typical candidate tritium breeder material used in D-T fusion reactor designs.
The graph shows that Lithium has a high (n,Xt) cross section for low energy neutrons which decreases as neutron energy increases.
**Learning Outcomes for Part 2:**
- OpenMC can be used to plot interaction cross sections for specific elements.
- Tritium production is an important reaction to consider when selecting a breeder material.
- Lithium is a good material for tritium production.
| github_jupyter |
```
# Copyright 2021 Google LLC
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# Author(s): Kevin P. Murphy (murphyk@gmail.com) and Mahmoud Soliman (mjs@aucegypt.edu)
```
<a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a>
<a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/figures//chapter9_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Cloning the pyprobml repo
```
!git clone https://github.com/probml/pyprobml
%cd pyprobml/scripts
```
# Installing required software (This may take few minutes)
```
!apt-get install octave -qq > /dev/null
!apt-get install liboctave-dev -qq > /dev/null
%%capture
%load_ext autoreload
%autoreload 2
DISCLAIMER = 'WARNING : Editing in VM - changes lost after reboot!!'
from google.colab import files
def interactive_script(script, i=True):
if i:
s = open(script).read()
if not s.split('\n', 1)[0]=="## "+DISCLAIMER:
open(script, 'w').write(
f'## {DISCLAIMER}\n' + '#' * (len(DISCLAIMER) + 3) + '\n\n' + s)
files.view(script)
%run $script
else:
%run $script
def show_image(img_path):
from google.colab.patches import cv2_imshow
import cv2
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
img=cv2.resize(img,(600,600))
cv2_imshow(img)
```
## Figure 9.1:<a name='9.1'></a> <a name='fig:gda2d'></a>
(a) Some 2d data from 3 different classes. (b) Fitting 2d Gaussians to each class.
Figure(s) generated by [discrim_analysis_dboundaries_plot2.py](https://github.com/probml/pyprobml/blob/master/scripts/discrim_analysis_dboundaries_plot2.py)
```
interactive_script("discrim_analysis_dboundaries_plot2.py")
```
## Figure 9.2:<a name='9.2'></a> <a name='fig:LDA'></a>
Gaussian discriminant analysis fit to data in \cref fig:gda2d . (a) Unconstrained covariances induce quadratic decision boundaries. (b) Tied covariances induce linear decision boundaries.
Figure(s) generated by [discrim_analysis_dboundaries_plot2.py](https://github.com/probml/pyprobml/blob/master/scripts/discrim_analysis_dboundaries_plot2.py)
```
interactive_script("discrim_analysis_dboundaries_plot2.py")
```
## Figure 9.3:<a name='9.3'></a> <a name='ldaGeom'></a>
Geometry of LDA in the 2 class case where $\boldsymbol \Sigma _1=\boldsymbol \Sigma _2=\mathbf I $.
```
show_image("/content/pyprobml/notebooks/figures/images/{ldaGeom}.png")
```
## Figure 9.4:<a name='9.4'></a> <a name='fisher'></a>
Example of Fisher's linear discriminant applied to data in 2d drawn from two classes. Dashed green line = first principal basis vector. Dotted red line = Fisher's linear discriminant vector. Solid black line joins the class-conditional means.
Figure(s) generated by [fisherLDAdemo.m](https://github.com/probml/pmtk3/blob/master/demos/fisherLDAdemo.m)
```
!octave -W fisherLDAdemo.m >> _
```
## Figure 9.5:<a name='9.5'></a> <a name='fisherProj'></a>
Example of Fisher's linear discriminant. (a) Projection of points onto Fisher's vector in \cref fig:fisher shows good class separation. (b) Projection of points onto PCA vector in \cref fig:fisher shows poor class separation.
Figure(s) generated by [fisherLDAdemo.m](https://github.com/probml/pmtk3/blob/master/demos/fisherLDAdemo.m)
```
!octave -W fisherLDAdemo.m >> _
```
## Figure 9.6:<a name='9.6'></a> <a name='fisherVowel'></a>
(a) PCA projection of vowel data to 2d. (b) FLDA projection of vowel data to 2d. We see there is better class separation in the FLDA case. Adapted from Figure 4.11 of <a href='#HastieBook'>[HTF09]</a> .
Figure(s) generated by [fisherDiscrimVowelDemo.m](https://github.com/probml/pmtk3/blob/master/demos/fisherDiscrimVowelDemo.m)
```
!octave -W fisherDiscrimVowelDemo.m >> _
```
## Figure 9.7:<a name='9.7'></a> <a name='NBclassCond'></a>
Class conditional densities $p(x_d=1|y=c)$ for two classes, corresponding to ``X windows'' and ``MS windows'', derived from a bag-of-words representation of some email documents, using a vocabulary of 600 words. The big spike at index 107 corresponds to the word ``subject'', which occurs in both classes with probability 1.
Figure(s) generated by [naiveBayesBowDemo.m](https://github.com/probml/pmtk3/blob/master/demos/naiveBayesBowDemo.m)
```
!octave -W naiveBayesBowDemo.m >> _
```
## Figure 9.8:<a name='9.8'></a> <a name='genVsDiscrim'></a>
The class-conditional densities $p(x|y=c)$ (left) may be more complex than the class posteriors $p(y=c|x)$ (right). Adapted from Figure 1.27 of <a href='#BishopBook'>[Bis06]</a> .
Figure(s) generated by [generativeVsDiscrim.m](https://github.com/probml/pmtk3/blob/master/demos/generativeVsDiscrim.m)
```
!octave -W generativeVsDiscrim.m >> _
```
## References:
<a name='BishopBook'>[Bis06]</a> C. Bishop "Pattern recognition and machine learning". (2006).
<a name='HastieBook'>[HTF09]</a> T. Hastie, R. Tibshirani and J. Friedman. "The Elements of Statistical Learning". (2009).
| github_jupyter |
# Tutorial: Die grundlegenden Werkzeuge des privatsphärenerhaltenden Deep Learning
Willkommen zum PySyfts Einführungs-Tutorial zum privatsphärenerhaltenden und dezentralisierten Deep Learning. Diese Reihe von Notebooks ist eine schrittweise Anleitung, mit der Sie die neuen Werkzeuge und Techniken kennenlernen können, die für das Deep Learning an geheimen oder privaten Daten und Modellen erforderlich sind, ohne sie dem Eigentum einer Instanz zu unterwerfen.
**Was wir behandeln werden**: Beachten Sie, dass wir nicht nur über das Dezentralisieren / Verschlüsseln von Daten sprechen, sondern auch darüber, wie PySyft verwendet werden kann, um das gesamte Ökosystem um Daten herum zu dezentralisieren, einschließlich der Datenbanken, in denen sich Daten befinden, gespeichert oder abgefragt werden, sowie die neuronalen Netzwerke, mit denen Informationen aus Daten extrahiert werden. Wenn neue Erweiterungen für PySyft erstellt werden, werden diese Notebooks um neue Tutorials erweitert, um die neuen Funktionen zu erläutern.
Autoren:
- Andrew Trask - Twitter: [@iamtrask](https://twitter.com/iamtrask)
Übersetzer:
- Vineet Jain - Github: [@vineetjai](https://github.com/vineetjai)
- Georgios Kaissis - Twitter: [@gkaissis](https://twitter.com/gkaissis)
## Gliederung:
- Teil 1: Die grundlegenden Werkzeuge des privaten Deep Learning
## Warum dieses Tutorial machen?
1) Wettbewerbsvorteil für Ihre Karierre - In den letzten 20 Jahren hat die digitale Revolution die Daten in immer größeren Mengen zugänglicher gemacht, da analoge Prozesse digitalisiert wurden. Mit neuen Regelungen wie z.B. der [Datenschutzgrundverordnung](https://eugdpr.org/), stehen Unternehmen zunehmend unter Einschränkungen Ihrer Freiheit bei der Verwendung - und vor allem bei der Analyse - persönlicher Informationen. Fazit: Datenwissenschafftler werden mit "Old School" -Werkzeugen nicht mehr auf so viele Daten zugreifen können, aber durch das Erlernen der Tools des privatspährenerhaltenden Deep Learning können SIE dieser Kurve voraus sein und einen Wettbewerbsvorteil für Ihre Karriere erzielen.
2) Unternehmerische Möglichkeiten - Es gibt eine ganze Reihe von Problemen in der Gesellschaft, die Deep Learning lösen kann, aber viele der wichtigsten wurden noch nicht untersucht, da dies den Zugriff auf sensible Informationen über Menschen erfordern würde (erwägen Sie die Verwendung von Deep Learning, um Menschen mit mentalen oder Beziehungsproblemen zu helfen!). Das Erlernen von privatspährenerhaltendem Deep Learning eröffnet Ihnen somit eine Vielzahl neuer Startmöglichkeiten, die anderen ohne diese Tools bisher nicht zur Verfügung standen.
3) Sozialer Fortschritt - Deep Learning kann verwendet werden, um eine Vielzahl von Problemen in der realen Welt zu lösen, aber Deep Learning an persönlichen Informationen ist Deep Learning über Menschen, für Menschen. Das Erlernen des Deep Learning für Daten, die Sie nicht besitzen, ist mehr als eine berufliche oder unternehmerische Gelegenheit. Es ist die Gelegenheit, einige der persönlichsten und wichtigsten Probleme im Leben der Menschen zu lösen - und dies in großem Maßstab.
## Wie bekomme ich Bonuspunkte?
- Gib PySyft einen Stern auf GitHub! - https://github.com/OpenMined/PySyft
- Mach ein Youtube-Video, in dem dieses Notebook's unterrichtet wird!
... Ok, los geht's!
# Teil -1: Voraussetzungen
- Lernen Sie PyTorch - wenn nicht, nehmen Sie am http://fast.ai Kurs teil und kommen Sie zurück
- Lesen Sie das PySyft Framework Paper https://arxiv.org/pdf/1811.04017.pdf! Auf diese Weise erhalten Sie einen umfassenden Hintergrund zum Aufbau von PySyft, wodurch die Dinge sinnvoller werden.
# Teil 0: Setup
Zu Beginn müssen Sie sicherstellen, dass Sie die richtigen Dinge installiert haben. Gehen Sie dazu zum PySyft Readme und folgen Sie den Anweisungen zum Einrichten. Der einfachste Weg für die meisten ist:
- Installieren Sie Python 3.6 oder höher
- Installieren Sie PyTorch 1.3
- Clone PySyft (git clone https://github.com/OpenMined/PySyft.git)
- cd PySyft
- pip install -r pip-dep/requirements.txt
- pip install -r pip-dep/requirements_udacity.txt
- python setup.py install udacity
- python setup.py test
Wenn ein Teil davon für Sie nicht funktioniert (oder einer der Tests fehlschlägt), überprüfen Sie zuerst die [README](https://github.com/OpenMined/PySyft.git) -Installationshilfe und öffnen Sie dann ein GitHub-Problem oder schreiben Sie den #beginner Channel in unserem Slack! [slack.openmined.org](http://slack.openmined.org/)
```
# Run this cell to see if things work
import sys
import torch
from torch.nn import Parameter
import torch.nn as nn
import torch.nn.functional as F
import syft as sy
hook = sy.TorchHook(torch)
torch.tensor([1,2,3,4,5])
```
Wenn diese Zelle ausgeführt wird, kann es losgehen!
# Teil 1: Die grundlegenden Werkzeuge der privaten, dezentralen Datenwissenschaft
Die erste Frage, die Sie sich vielleicht stellen, lautet: Wie trainieren wir ein Modell an Daten, auf die wir keinen Zugriff haben?
Die Antwort ist überraschend einfach. Wenn Sie es gewohnt sind, in PyTorch zu arbeiten, sind Sie es gewohnt, mit `torch.Tensor` Objekten zu arbeiten. Tensor-Objekte wie diese!
```
x = torch.tensor([1,2,3,4,5])
y = x + x
print(y)
```
Natürlich ist die Verwendung dieser Tensoren wichtig, erfordert aber auch, dass Sie die Daten auf Ihrem lokalen Computer haben. Hier beginnt unsere Reise.
# Abschnitt 1.1 - Senden von Tensoren an Bobs Maschine
Während wir normalerweise Data Science / Deep Learning auf der Maschine durchführen würden, die die Daten hat, möchten wir diese Art der Berechnung jetzt auf einer anderen Maschine durchführen. Insbesondere können wir also nicht mehr einfach davon ausgehen, dass sich die Daten auf unserem lokalen Computer befinden.
Anstatt PyTorch Tensoren zu verwenden, werden wir jetzt mit pointers zu Tensoren arbeiten. Lassen Sie uns zunächst eine "virtuelle" Maschine erstellen, die einer "virtuellen" Person gehört - wir nennen sie Bob.
```
bob = sy.VirtualWorker(hook, id="bob")
```
Nehmen wir an, Bobs Maschine befindet sich auf einem anderen Planeten - vielleicht auf dem Mars! Aber im Moment ist die Maschine leer. Lassen Sie uns einige Daten erstellen, damit wir sie an Bob senden und mehr über pointer erfahren können!
```
x = torch.tensor([1,2,3,4,5])
y = torch.tensor([1,1,1,1,1])
```
Und jetzt - lass uns unsere Tensoren an Bob schicken !!
```
x_ptr = x.send(bob)
y_ptr = y.send(bob)
x_ptr
```
BOOM! Jetzt hat Bob zwei Tensoren! Glauben Sie mir nicht? Überzeugen Sie sich selbst!
```
bob._objects
z = x_ptr + x_ptr
z
bob._objects
```
Jetzt bemerken wir: Als wir `x.send(bob)` aufriefen, gab es ein neues Objekt zurück, das wir `x_ptr` nannten. Dies ist unser erster pointer auf einen Tensor. Pointer auf Tensoren enthalten KEINE Daten. Stattdessen enthalten sie einfach Metadaten zu einem Tensor (der Daten enthält), die auf einem anderen Computer gespeichert sind. Der Zweck dieser Tensoren besteht darin, uns eine intuitive Benutzerumgebung zu geben, mit der die andere Maschine angewiesen wird, Funktionen mit diesem Tensor zu berechnen. Werfen wir einen Blick auf die Metadaten, die die pointer enthalten.
```
x_ptr
```
Schauen wir die Metadaten an!
Es gibt zwei Hauptattribute, die für pointer spezifisch sind:
- `x_ptr.location : bob`, der Ort, also ein Verweis auf den Ort, auf den der Zeiger zeigt
- `x_ptr.id_at_location : <random integer>`, die ID des Tensors an diesem Ort
Sie werden im Format "@" gedruckt
Es gibt auch andere allgemeinere Merkmale:
- `x_ptr.id : <random integer>`, die ID unseres pointer Tensors, zufällig zugewiesen
- `x_ptr.owner : "me"`, der Arbeiter, dem der Zeigertensor gehört, in diesem Fall ist es der lokale Arbeiter mit dem Namen "ich"
```
x_ptr.location
bob
bob==x_ptr.location
x_ptr.id_at_location
x_ptr.owner
```
Sie fragen sich vielleicht, warum der lokale Arbeiter (Worker), dem der Zeiger gehört, auch ein VirtualWorker ist, obwohl wir ihn nicht erstellt haben. Genau wie es ein VirtualWorker-Objekt für Bob gibt, gibt es (standardmäßig) immer auch eines für uns selbst. Dieser Worker wird automatisch erstellt, wenn wir `hook = sy.TorchHook()` aufrufen. Daher müssen Sie ihn normalerweise nicht selbst erstellen.
```
me = sy.local_worker
me
me == x_ptr.owner
```
Und schließlich können wir, genau wie wir `.send()` für einen Tensor aufrufen können, `.get()` für einen pointer auf einen Tensor aufrufen, um ihn zurückzubekommen !!!
```
x_ptr
x_ptr.get()
y_ptr
y_ptr.get()
z.get()
bob._objects
```
Und wie Sie sehen können ... hat Bob die Tensoren nicht mehr !!! Sie sind zurück auf unsere Maschine umgezogen!
# Abschnitt 1.2 - Verwenden von Tensor-pointern
Das Senden und Empfangen von Tensoren von Bob ist zwar großartig, aber das ist kaum Deep Learning! Wir möchten in der Lage sein, Tensorberechnungen an entlegenen Tensoren durchzuführen. Glücklicherweise machen Tensor-pointer dies ziemlich einfach! Sie können einfach diese pointer wie normale Tensoren verwenden!
```
x = torch.tensor([1,2,3,4,5]).send(bob)
y = torch.tensor([1,1,1,1,1]).send(bob)
z=x+y
z
```
Et voilà!
Hinter den Kulissen ist etwas interessantes passiert. Anstatt zwischen x und y eine lokale Addition zu berechnen, wurde der Befehl serialisiert und an Bob gesendet, der die Berechnung durchführte, einen Tensor z erstellte und dann den pointer auf z an uns zurückschickte!
Wenn wir `.get()` für den Zeiger aufrufen, erhalten wir das Ergebnis zurück an unsere Maschine!
```
z.get()
```
### Torch-Funktionen
Diese API wurde auf alle Operationen von torch erweitert !!!
```
x
y
z = torch.add(x,y)
z
z.get()
```
### Variablen (einschließlich Backpropagation!)
```
x = torch.tensor([1,2,3,4,5.], requires_grad=True).send(bob)
y = torch.tensor([1,1,1,1,1.], requires_grad=True).send(bob)
z = (x + y).sum()
z.backward()
x = x.get()
x
x.grad
```
Wie Sie sehen können, ist die API sehr flexibel und kann nahezu alle Vorgänge ausführen, die Sie normalerweise in Torch für Remote-Daten ausführen würden. Dies bildet die Grundlage für unsere erweiterten Protokolle zur Wahrung der Privatsphäre wie Federated Learning, Secure Multi-Party Computation und Differential Privacy!
# Herzlichen Glückwunsch!!! - Zeit, der Community beizutreten!
Herzlichen Glückwunsch zum Abschluss dieses Notebook-Tutorials! Wenn es Ihnen gefallen hat und Sie sich der Bewegung zur Wahrung der Privatsphäre, zum dezentralisiertenen Besitz von KI und der KI-Lieferkette (Daten) anschließen möchten, können Sie dies auf folgende Weise tun!
### PySyft auf GitHub einen Stern geben!
Der einfachste Weg, unserer Community zu helfen, besteht darin, die GitHub-Repos mit Sternen auszuzeichnen! Dies hilft, das Bewusstsein für die coolen Tools zu schärfen, die wir bauen.
- [Gib PySyft einen Stern](https://github.com/OpenMined/PySyft)
### Mach mit bei Slack!
Der beste Weg, um über die neuesten Entwicklungen auf dem Laufenden zu bleiben, ist, sich unserer Community anzuschließen! Sie können dies tun, indem Sie das Formular unter [http://slack.openmined.org](http://slack.openmined.org) ausfüllen.
### Treten Sie einem Code-Projekt bei!
Der beste Weg, um zu unserer Community beizutragen, besteht darin, Entwickler zu werden! Sie können jederzeit zur PySyft GitHub Issues-Seite gehen und nach "Projects" filtern. Dies zeigt Ihnen alle Top-Level-Tickets und gibt einen Überblick darüber, an welchen Projekten Sie teilnehmen können! Wenn Sie nicht an einem Projekt teilnehmen möchten, aber ein wenig programmieren möchten, können Sie auch nach weiteren "einmaligen" Miniprojekten suchen, indem Sie nach GitHub-Problemen suchen, die als "good first issue" gekennzeichnet sind.
- [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject)
- [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
### Spenden
Wenn Sie keine Zeit haben, zu unserer Codebase beizutragen, aber dennoch Unterstützung leisten möchten, können Sie auch Unterstützer unseres Open Collective werden. Alle Spenden fließen in unser Webhosting und andere Community-Ausgaben wie Hackathons und Meetups!
[OpenMined's Open Collective Page](https://opencollective.com/openmined)
| github_jupyter |
```
import rocksdb
db = rocksdb.DB("out/test.db", rocksdb.Options(create_if_missing=True))
db.put(b'a', b'data')
print(db.get(b'a'))
import os
os.path.basename('out/test.db')
# The following example python merge operator implements a counter
class AssocCounter(rocksdb.interfaces.AssociativeMergeOperator):
def merge(self, key, existing_value, value):
if existing_value:
s = int(existing_value) + int(value)
return (True, str(s).encode('ascii'))
return (True, value)
def name(self):
return b'AssocCounter'
opts = rocksdb.Options()
opts.create_if_missing = True
opts.merge_operator = AssocCounter()
db = rocksdb.DB('out/test_merge.db', opts)
db.merge(b"a", b"1")
db.merge(b"a", b"1")
# prints b'2'
print(db.get(b"a"))
import gc
del db
gc.collect()
# We provide a set of default operators rocksdb.merge_operators.UintAddOperator and rocksdb.merge_operators.StringAppendOperator:
from rocksdb.merge_operators import UintAddOperator, StringAppendOperator
import struct
opts = rocksdb.Options()
opts.create_if_missing = True
# you should also play with StringAppendOperator
opts.merge_operator = UintAddOperator()
db = rocksdb.DB('out/test_uint.db', opts)
db.put(b'a', struct.pack('Q', 5566))
for x in range(1000):
db.merge(b"a", struct.pack('Q', x))
# self.assertEqual(5566 + sum(range(1000)), struct.unpack('Q', self.db.get(b'a'))[0])
ret=struct.unpack('Q', db.get(b'a'))[0]
print(ret)
print(5566 + sum(range(1000)))
del db
gc.collect()
```
## PrefixExtractor
According to Prefix API a prefix_extractor can reduce IO for scans within a prefix range. A python prefix extractor must implement the rocksdb.interfaces.SliceTransform interface.
The following example presents a prefix extractor of a static size. So always the first 5 bytes are used as the prefix
```
import rocksdb
import gc
from itertools import takewhile
class StaticPrefix(rocksdb.interfaces.SliceTransform):
def name(self):
return b'static'
def transform(self, src):
return (0, 5)
def in_domain(self, src):
return len(src) >= 5
def in_range(self, dst):
return len(dst) == 5
opts = rocksdb.Options()
opts.create_if_missing=True
opts.prefix_extractor = StaticPrefix()
db = rocksdb.DB('out/test_prefix.db', opts)
db.put(b'00001.x', b'x')
db.put(b'00001.y', b'y')
db.put(b'00001.z', b'z')
db.put(b'00002.x', b'x')
db.put(b'00002.y', b'y')
db.put(b'00002.z', b'z')
db.put(b'00003.x', b'x')
db.put(b'00003.y', b'y')
db.put(b'00003.z', b'z')
prefix = b'00002'
it = db.iteritems()
it.seek(prefix)
# prints {b'00002.z': b'z', b'00002.y': b'y', b'00002.x': b'x'}
print(dict(takewhile(lambda item: item[0].startswith(prefix), it)))
db.put(b'00003.z', b'z')
# close
del db
gc.collect()
for e in (takewhile(lambda x: x<5, [1,4,6,4,1])):
print(e)
```
⊕ [7.3. struct — Interpret strings as packed binary data — Python 2.7.16 documentation](https://docs.python.org/2/library/struct.html)
```
import struct
print(len(struct.pack('Q', 5)))
print(len(struct.pack('Q', 500000)))
from sagas.storage.data_space import DataSpace, sys_db
def int_to_bytes(ob):
return str(ob).encode('ascii')
sys_db.put(b'obj', b'property', b'val')
sys_db.get(b'obj', b'property')
sys_db.all_column_values(b'property')
import rocksdb
import os
rocksdb.list_column_families(
os.path.join('./db', 'sys.db'),
rocksdb.Options(),
)
os.path.exists(os.path.join('./db', 'sys.db'))
new_cf_column_family = sys_db.db.get_column_family(b'value')
sys_db.db.put( (new_cf_column_family, b'key'), b'value' )
print( sys_db.db.get( (new_cf_column_family, b'key') ) ) # b'value'
import rocksdb
default_column_family = sys_db.db.get_column_family(b'default')
batch = rocksdb.WriteBatch()
batch.put( (default_column_family, b'key2'), b'value2' )
batch.put( (new_cf_column_family, b'key3'), b'value3' )
batch.delete( (default_column_family, b'key') )
sys_db.db.write(batch)
from simple_pb2 import MyObj, Foo
obj=MyObj(name='hello')
print(obj)
from sagas.storage.data_space import DataSpace, sys_db
new_cf_column_family = sys_db.db.get_column_family(b'value')
sys_db.db.put( (new_cf_column_family, b'key'), obj.SerializeToString() )
val= sys_db.db.get( (new_cf_column_family, b'key') )
obj=MyObj()
obj.ParseFromString(val)
print(obj)
```
| github_jupyter |
```
from tensor2tensor import models
from tensor2tensor import problems
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import t2t_model
from tensor2tensor.utils import registry
from tensor2tensor.utils import metrics
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_problems
from tensor2tensor.data_generators import translate
from tensor2tensor.utils import registry
@registry.register_problem
class TRANSLATION32k(translate.TranslateProblem):
@property
def additional_training_datasets(self):
"""Allow subclasses to add training datasets."""
return []
PROBLEM = 'translatio_n32k'
problem = problems.problem(PROBLEM)
import tensorflow as tf
import os
vocab_file = "t2t/data/vocab.translatio_n32k.32768.subwords"
ckpt_path = tf.train.latest_checkpoint(os.path.join('t2t/train-large'))
vocab_file, ckpt_path
from t import text_encoder
encoder = text_encoder.SubwordTextEncoder(vocab_file)
class Model:
def __init__(self, HPARAMS = "transformer_big", DATA_DIR = 't2t/data'):
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.count_nonzero(self.X, 1, dtype=tf.int32)
maxlen_decode = 50 + tf.reduce_max(self.X_seq_len)
x = tf.expand_dims(tf.expand_dims(self.X, -1), -1)
y = tf.expand_dims(tf.expand_dims(self.Y, -1), -1)
features = {
"inputs": x,
"targets": y,
"target_space_id": tf.constant(1, dtype=tf.int32),
}
self.features = features
Modes = tf.estimator.ModeKeys
hparams = trainer_lib.create_hparams(HPARAMS, data_dir=DATA_DIR, problem_name=PROBLEM)
translate_model = registry.model('transformer')(hparams, Modes.PREDICT)
self.translate_model = translate_model
logits, _ = translate_model(features)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
self.fast_result = translate_model._greedy_infer(features, maxlen_decode)["outputs"]
self.beam_result = translate_model._beam_decode_slow(
features, maxlen_decode, beam_size=5,
top_beams=1, alpha=1.0)["outputs"]
self.fast_result = tf.identity(self.fast_result, name = 'greedy')
self.beam_result = tf.identity(self.beam_result, name = 'beam')
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model()
var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
saver = tf.train.Saver(var_list = var_lists)
saver.restore(sess, ckpt_path)
import re
from unidecode import unidecode
def cleaning(string):
return re.sub(r'[ ]+', ' ', unidecode(string)).strip()
from pprint import pprint
string = 'WHEN ALICIA GARZA wrote “Black lives matter” in a Facebook post nearly seven years ago, the activist from Oakland, California, never imagined that those words would come to define a global movement. The July 13, 2013, acquittal of George Zimmerman for the killing of Trayvon Martin, an unarmed Black teenager, sparked her post.'
pprint(string)
encoded = encoder.encode(cleaning(string)) + [1]
f, b = sess.run([model.fast_result, model.beam_result], feed_dict = {model.X: [encoded]})
encoder.decode(f[0]), encoder.decode(b[0])
string = 'KUALA LUMPUR, July 1 - Datuk Seri Anwar Ibrahim is not suitable to as the prime minister candidate as he is allegedly not "popular" among the Malays, Tun Dr Mahathir Mohamad claimed. The former prime minister reportedly said the PKR president needs someone like himself in order to acquire support from the Malays and win the election.'
encoded = encoder.encode(string) + [1]
f, b = sess.run([model.fast_result, model.beam_result], feed_dict = {model.X: [encoded]})
encoder.decode(f[0]), encoder.decode(b[0])
string = 'Religion usually means trust in God, or a supernatural and supernatural power like God, as well as practices and institutions associated with that belief. Religion and belief are two very relevant things. But Religion has a broader meaning, which refers to a system of cohesive belief, and this belief is about the divine aspect'
encoded = encoder.encode(string) + [1]
f, b = sess.run([model.fast_result, model.beam_result], feed_dict = {model.X: [encoded]})
encoder.decode(f[0]), encoder.decode(b[0])
string = 'Articulate (Well-spoken, Expressive) <> Hmmm yes Horacio Calcaterra is a sportsman that currently is playing for Sporting Cristal which is located in Torneo Descentralizado. <> He currently plays for Sporting Cristal in the Torneo Descentralizado.'
encoded = encoder.encode(string) + [1]
f, b = sess.run([model.fast_result, model.beam_result], feed_dict = {model.X: [encoded]})
encoder.decode(f[0]), encoder.decode(b[0])
string = ('TANGKAK - Tan Sri Muhyiddin Yassin said he did not want to touch on '
'political issues at the moment, instead focusing on the welfare of the '
"people and efforts to revitalize the affected country's economy following "
'the Covid-19 pandemic. The prime minister explained the matter when speaking '
'at a Leadership Meeting with Gambir State Assembly (DUN) leaders at the '
'Bukit Gambir Multipurpose Hall today.')
encoded = encoder.encode(string) + [1]
f, b = sess.run([model.fast_result, model.beam_result], feed_dict = {model.X: [encoded]})
encoder.decode(f[0]), encoder.decode(b[0])
string = "i am in medical school."
string = unidecode(string)
encoded = encoder.encode(string) + [1]
f, b = sess.run([model.fast_result, model.beam_result], feed_dict = {model.X: [encoded]})
encoder.decode(f[0]), encoder.decode(b[0])
string = 'Emmerdale is the debut studio album,songs were not released in the U.S <> These songs were not released in the U.S. edition of said album and were previously unavailable on any U.S. release.'
encoded = encoder.encode(string) + [1]
f, b = sess.run([model.fast_result, model.beam_result], feed_dict = {model.X: [encoded]})
encoder.decode(f[0]), encoder.decode(b[0])
# https://towardsdatascience.com/fuzzy-matching-at-scale-84f2bfd0c536
string = 'Fuzzy matching at scale. From 3.7 hours to 0.2 seconds. How to perform intelligent string matching in a way that can scale to even the biggest data sets. Data in the real world is messy. Dealing with messy data sets is painful and burns through time which could be spent analysing the data itself.'
encoded = encoder.encode(string) + [1]
f, b = sess.run([model.fast_result, model.beam_result], feed_dict = {model.X: [encoded]})
encoder.decode(f[0]), encoder.decode(b[0])
batch_size = 128
path = 't2t/tmp/test-en'
with open(os.path.join(path, 'left.txt')) as fopen:
left = fopen.read().split('\n')
with open(os.path.join(path, 'right.txt')) as fopen:
right = fopen.read().split('\n')
len(left), len(right)
p = sess.run(model.fast_result, feed_dict = {model.X: [encoder.encode(left[0]) + [1]]}).tolist()
results = []
for row in p:
results.append([i for i in row if i not in [0, 1]])
results
from tensor2tensor.utils import bleu_hook
bleu_hook.compute_bleu(reference_corpus = [encoder.encode(right[0])],
translation_corpus = results)
pad_sequences = tf.keras.preprocessing.sequence.pad_sequences
from tqdm import tqdm
results = []
for i in tqdm(range(0, len(left), batch_size)):
index = min(i + batch_size, len(left))
x = left[i: index]
encoded = [encoder.encode(l) + [1] for l in x]
batch_x = pad_sequences(encoded, padding='post')
p = sess.run(model.fast_result, feed_dict = {model.X: batch_x}).tolist()
result = []
for row in p:
result.append([i for i in row if i not in [0, 1]])
results.extend(result)
len(results)
rights = [encoder.encode(r) for r in right[:len(results)]]
bleu_hook.compute_bleu(reference_corpus = rights,
translation_corpus = results)
saver = tf.train.Saver(tf.trainable_variables())
saver.save(sess, 'transformer-large/model.ckpt')
strings = ','.join(
[
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('Variable' in n.op
or 'Placeholder' in n.name
or 'greedy' in n.name
or 'beam' in n.name
or 'alphas' in n.name
or 'self/Softmax' in n.name)
and 'adam' not in n.name
and 'beta' not in n.name
and 'global_step' not in n.name
and 'modality' not in n.name
and 'Assign' not in n.name
]
)
strings.split(',')
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(','),
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('transformer-large', strings)
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
g = load_graph('transformer-large/frozen_model.pb')
x = g.get_tensor_by_name('import/Placeholder:0')
greedy = g.get_tensor_by_name('import/greedy:0')
beam = g.get_tensor_by_name('import/beam:0')
test_sess = tf.InteractiveSession(graph = g)
string = 'KUALA LUMPUR, July 1 - Datuk Seri Anwar Ibrahim is not suitable to as the prime minister candidate as he is allegedly not "popular" among the Malays, Tun Dr Mahathir Mohamad claimed. The former prime minister reportedly said the PKR president needs someone like himself in order to acquire support from the Malays and win the election.'
encoded = encoder.encode(string) + [1]
g, b = test_sess.run([greedy, beam], feed_dict = {x:[encoded]})
encoder.decode(g[0]), encoder.decode(b[0])
```
| github_jupyter |
## _*Using Algorithm Concatenation in Qiskit Aqua*_
This notebook demonstrates how to use the `Qiskit Aqua` library to realize algorithm concatenation. In particular, we experiment with chaining the executions of VQE and IQPE by first running VQE and then preparing IQPE's initial state using the variational form as produced by VQE upon its termination.
```
import numpy as np
from qiskit import BasicAer
from qiskit.transpiler import PassManager
from qiskit.aqua import Operator, QuantumInstance, run_algorithm
from qiskit.aqua.input import EnergyInput
from qiskit.aqua.algorithms import VQE
from qiskit.aqua.algorithms import IQPE
from qiskit.aqua.components.variational_forms import RYRZ
from qiskit.aqua.components.optimizers import SPSA
from qiskit.aqua.components.initial_states.var_form_based import VarFormBased
```
Here an Operator instance is created for our Hamiltonian, for which we are going to estimation the ground energy level. In this case the paulis are from a previously computed Hamiltonian for simplicity.
```
pauli_dict = {
'paulis': [{"coeff": {"imag": 0.0, "real": -1.052373245772859}, "label": "II"},
{"coeff": {"imag": 0.0, "real": 0.39793742484318045}, "label": "IZ"},
{"coeff": {"imag": 0.0, "real": -0.39793742484318045}, "label": "ZI"},
{"coeff": {"imag": 0.0, "real": -0.01128010425623538}, "label": "ZZ"},
{"coeff": {"imag": 0.0, "real": 0.18093119978423156}, "label": "XX"}
]
}
qubit_op = Operator.load_from_dict(pauli_dict)
```
We can now use the Operator without regard to how it was created. First we will use the ExactEigensolver to compute the reference ground energy level.
```
algorithm_cfg = {
'name': 'ExactEigensolver',
}
params = {
'algorithm': algorithm_cfg
}
algo_input = EnergyInput(qubit_op)
result_reference = run_algorithm(params, algo_input)
print('The reference ground energy level is {}.'.format(result_reference['energy']))
```
Having established the reference ground energy, we next carry on with our experiment. First we configure a VQE algorithm instance. The idea is that we can set an termination condition such that the VQE instance returns rather quickly with a rough estimation result.
```
random_seed = 0
np.random.seed(random_seed)
backend = BasicAer.get_backend('qasm_simulator')
var_form_depth = 3
var_form = RYRZ(algo_input.qubit_op.num_qubits, var_form_depth)
spsa_max_trials=10
optimizer = SPSA(max_trials=spsa_max_trials)
vqe_mode = 'paulis'
vqe = VQE(algo_input.qubit_op, var_form, optimizer, vqe_mode)
quantum_instance = QuantumInstance(backend)
result_vqe = vqe.run(quantum_instance)
print('VQE estimated the ground energy to be {}.'.format(result_vqe['energy']))
```
As previously indicated, the energy estimation result is rather rough--it is far from being an acceptable final estimation figure. But, it is close enough such that the accompanying variational form might be a reasonably good approximation to the ground eigenstate, which means the corresponding wave function can serve as the initial state for the IQPE execution that follows. We next prepare such an initial state.
```
state_in = VarFormBased(var_form, result_vqe['opt_params'])
```
With the VQE-generated quantum state wave function serving as the chaining piece and prepared as initial state, we now go ahead with configuring and running an IQPE instance.
```
num_time_slices = 50
num_iterations = 11
iqpe = IQPE(algo_input.qubit_op, state_in, num_time_slices, num_iterations,
expansion_mode='suzuki', expansion_order=2,
shallow_circuit_concat=True)
quantum_instance = QuantumInstance(backend, shots=100, seed=random_seed, pass_manager=PassManager(), seed_transpiler=random_seed)
result_iqpe = iqpe.run(quantum_instance)
print("Continuing with VQE's result, IQPE estimated the ground energy to be {}.".format(
result_iqpe['energy']))
```
As seen, the final ground energy estimation as produced by IQPE is much more accurate that the intermediate result as produced by VQE.
| github_jupyter |
```
from ipywidgets.widgets import Label, FloatProgress, FloatSlider, Button, Dropdown, FileUpload, Image, Layout, HBox, VBox, Box, Output, IntProgress, IntSlider
from IPython.display import display, clear_output
import numpy as np
from numpy import dot, exp, mgrid, pi, ravel, square, uint8, zeros
import math
import bqplot as bq
import time
import threading
from copy import deepcopy
import random
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
import os
import io
from itertools import product
import PIL.Image as ImagePIL
from array import array
def medianFilter(img, progress):
img_noisy1 = img
m, n = img_noisy1.shape[0], img_noisy1.shape[1]
img_new1 = np.zeros([m, n])
for i in range(1, m-1):
progress.value += int((i+1)/100)
for j in range(1, n-1):
temp = [img_noisy1[i-1, j-1],
img_noisy1[i-1, j],
img_noisy1[i-1, j + 1],
img_noisy1[i, j-1],
img_noisy1[i, j],
img_noisy1[i, j + 1],
img_noisy1[i + 1, j-1],
img_noisy1[i + 1, j],
img_noisy1[i + 1, j + 1]]
temp = sorted(temp)
img_new1[i, j]= temp[4] # Median => Sorted
img_new1 = img_new1.astype(np.uint8)
cv2.imwrite('medianFilter.jpg', img_new1)
def gen_gaussian_kernel(k_size, sigma):
center = k_size // 2
x, y = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
g = 1 / (2 * pi * sigma) * exp(-(square(x) + square(y)) / (2 * square(sigma)))
return g
def gaussianFilter(image, k_size, sigma, progress):
height, width = image.shape[0], image.shape[1]
dst_height = height - k_size + 1
dst_width = width - k_size + 1
image_array = zeros((dst_height * dst_width, k_size * k_size))
row = 0
for i, j in product(range(dst_height), range(dst_width)):
progress.value += int((row+1)/100)
window = ravel(image[i : i + k_size, j : j + k_size])
image_array[row, :] = window
row += 1
gaussian_kernel = gen_gaussian_kernel(k_size, sigma)
filter_array = ravel(gaussian_kernel)
dst = dot(image_array, filter_array).reshape(dst_height, dst_width).astype(uint8)
cv2.imwrite('gaussianFilter.jpg', dst)
def averaging(img, progress):
filtr = (1/16) * np.matrix([[1,2,1],[2,4,2],[1,2,1]], dtype=int)
n_r, n_c, ch = img.shape
g = np.zeros([n_r, n_c])
for r in range(1, n_r-1):
progress.value += int((r+1)/100)
for c in range(1, n_c-1):
h = img[r-1, c-1] * filtr[0, 0] + img[r-1, c] * filtr[0, 1] + img[r-1, c + 1] * filtr[0, 2] + img[r, c-1] * filtr[1, 0] + img[r, c] * filtr[1, 1] + img[r, c + 1] * filtr[1, 2] + img[r + 1, c-1] * filtr[2, 0] + img[r + 1, c] * filtr[2, 1] + img[r + 1, c + 1] * filtr[2, 2]
g[r, c]= h
g = g.astype(np.uint8)
cv2.imwrite('averaging.jpg', g)
def unsharpMaskingAndHighboostFiltering(img, k, progress):
img = img.reshape(img.shape[0], img.shape[1], 1)
blur = cv2.GaussianBlur(img, (5,5), 0)
blur = blur.reshape(blur.shape[0], blur.shape[1], 1)
mask = img - blur
final = img + k * mask
progress.value = 100
cv2.imwrite('unsharpMaskingAndHighboostFiltering.jpg', final)
# Highboost filtering if k > 1
def sobelOperator(test, progress):
image_file = test
input_image = test
[nx, ny, nz] = np.shape(input_image)
r_img, g_img, b_img = input_image[:, :, 0], input_image[:, :, 0], input_image[:, :, 0] # 0,1,2
gamma = 1.400
r_const, g_const, b_const = 0.2126, 0.7152, 0.0722
grayscale_image = r_const * r_img ** gamma + g_const * g_img ** gamma + b_const * b_img ** gamma
Gx = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]])
Gy = np.array([[1.0, 2.0, 1.0], [0.0, 0.0, 0.0], [-1.0, -2.0, -1.0]])
[rows, columns] = np.shape(grayscale_image)
sobel_filtered_image = np.zeros(shape=(rows, columns))
for i in range(rows - 2):
progress.value += int((i+1)/100)
for j in range(columns - 2):
gx = np.sum(np.multiply(Gx, grayscale_image[i:i + 3, j:j + 3]))
gy = np.sum(np.multiply(Gy, grayscale_image[i:i + 3, j:j + 3]))
sobel_filtered_image[i + 1, j + 1] = np.sqrt(gx ** 2 + gy ** 2)
cv2.imwrite('sobelOperator.jpg', sobel_filtered_image)
def saltAndPepperNoise(img, progress):
row, col, ch = img.shape
number_of_white_pixels = random.randint(100, 1000)
for i in range(number_of_white_pixels):
progress.value += int((i+1)/50)
y=random.randint(1, row - 2)
x=random.randint(1, col - 2)
img[y][x] = 255
img[y + 1][x] = 255
img[y - 1][x] = 255
#img[y + 1][x + 1] = 255
#img[y - 1][x - 1] = 255
img[y][x + 1] = 255
img[y][x - 1] = 255
#img[y + 1][x + 1] = 255
#img[y - 1][x - 1] = 255
number_of_black_pixels = random.randint(100, 1000)
for i in range(number_of_black_pixels):
progress.value += int((i+1)/50)
y=random.randint(1, row - 2)
x=random.randint(1, col - 2)
img[y][x] = 0
img[y + 1][x] = 0
img[y - 1][x] = 0
#img[y + 1][x + 1] = 0
#img[y - 1][x - 1] = 0
img[y][x + 1] = 0
img[y][x - 1] = 0
#img[y + 1][x + 1] = 0
#img[y - 1][x - 1] = 0
cv2.imwrite('saltAndPepperNoise.jpg', img)
#img = cv2.imread('pic2.png')
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#plt.imshow(img, 'gray')
#plt.imshow(saltAndPepperNoise(img))
# loc = Mean (“centre”) of the distribution.
# scale = Standard deviation (spread or “width”) of the distribution. Must be non-negative.
def gaussianNoise(real_image, progress):
gaussian = np.random.normal(loc=0, scale=1, size=real_image.size)
gaussian = gaussian.reshape(real_image.shape[0], real_image.shape[1], real_image.shape[2]).astype('uint8')
noisy_image = cv2.add(real_image, gaussian)
cv2.imwrite('gaussianNoise.jpg', noisy_image)
progress.value = 100
#img = cv2.imread('pic2.png', 0)
#plt.imshow(img, 'gray')
#plt.imshow(gaussianNoise(img), 'gray')
def imhist(im):
m, n = im.shape
h = [0.0] * 256
for i in range(m):
for j in range(n):
h[im[i, j]]+=1
return np.array(h)/(m*n)
def cumsum(h):
return [sum(h[:i+1]) for i in range(len(h))]
def histeq(im, progress):
#calculate Histogram
h = imhist(im)
cdf = np.array(cumsum(h))
sk = np.uint8(255 * cdf)
s1, s2 = im.shape
Y = np.zeros_like(im)
for i in range(0, s1):
progress.value += int((i+1)/100)
for j in range(0, s2):
Y[i, j] = sk[im[i, j]]
H = imhist(Y)
return Y, h, H, sk
def histogramEq(test, progress):
test = test.reshape(test.shape[0], test.shape[1], 1)
img = np.uint8(test * 255.0)
img = np.uint8((0.2126 * img[:,:,0]) + np.uint8(0.7152 * img[:,:,0]) + np.uint8(0.0722 * img[:,:,0])) # 0,1,2
new_img, h, new_h, sk = histeq(img, progress)
fig = plt.figure()
fig.add_subplot(221)
plt.plot(h)
plt.title('Original histogram') # original histogram
fig.add_subplot(222)
plt.plot(new_h)
plt.title('New histogram') #hist of eqlauized image
fig.add_subplot(223)
plt.plot(sk)
plt.title('Transfer function') #transfer function
cv2.imwrite('histogramEq.jpg', new_img)
def fourierTransform(real_image, progress):
real_image = cv2.resize(real_image, (128, 128))
M = real_image.shape[0] - 1
N = real_image.shape[1] - 1
dft_image = np.zeros((M, N), complex)
Mu = 1
v = 1
j = 1
for m in range(M):
progress.value += int((m+1)/100)
for n in range(N):
for x in range(M):
for y in range(N):
exp_power = -j * 2 * math.pi * ((Mu * x)/M + (v * y)/N)
dft_image[m][n] += real_image[x][y] * np.exp(exp_power)
#return dft_image
cv2.imwrite('fourierTransform.jpg', dft_image)
def kernel(x,a):
if(np.abs(x)>=0)&(np.abs(x)<=1):
res=(a+2)*(np.abs(x)**3)-(a+3)*(np.abs(x)**2)+1
return res
if(np.abs(x)>1)&(np.abs(x)<=2):
res=a*(np.abs(x)**3)-(a*5)*(np.abs(x)**2)+((a*8)*np.abs(x))-a*4
return res
return 0
def pad(imag,col,r,c):
pad=np.zeros((col+4,r+4,c))
pad[2:col+2,2:r+2,:c]=imag
pad[2:col+2,0:2,:c]=imag[:,0:1,:c]
pad[2:col+2,r+2:r+4,:]=imag[:,r-1:r,:]
pad[0:2,2:r+2,:c]=imag[0:1,:,:c]
pad[col+2:col+4,2:r+2,:]=imag[col-1:col,:,:]
pad[0:2,0:2,:c]=imag[0,0,:c]
pad[0:2,r+2:r+4,:c]=imag[0,r-1,:c]
pad[col+2:col+4,0:2,:c]=imag[col-1,0,:c]
pad[col+2:col+4,r+2:r+4,:c]=imag[col-1,r-1,:c]
return pad
def bicubicInterpolation(img, ratio, a, progress):
img = img.reshape(img.shape[0], img.shape[1], 1)
col, rows, ch =img.shape
img= pad(img,col,rows,ch)
dcol= math.floor(col*ratio)
drows=math.floor(rows*ratio)
matriximage=np.zeros((dcol,drows,1))
v= 1/ratio
for c in range(ch):
for column in range(dcol):
progress.value += int((column + 1)/100)
for row in range(drows):
x=row * v + 2
y=column * v + 2
x1= 1+x-math.floor(x)
y1=1+y-math.floor(y)
x2=x-math.floor(x)
y2=y-math.floor(y)
x3=math.floor(x)+1-x
y3=math.floor(y)+1-y
x4=math.floor(x)+2-x
y4=math.floor(y)+2-y
mat_kernelx=np.matrix([[kernel(x1,a),kernel(x2,a),kernel(x3,a),kernel(x4,a)]])
mat_near=np.matrix([[img[int(y-y1),int(x-x1),c],img[int(y-y2),int(x-x1),c],img[int(y+y3),int(x-x1),c],img[int(y+y4),int(x-x1),c]],
[img[int(y-y1),int(x-x2),c],img[int(y-y2),int(x-x2),c],img[int(y+y3),int(x-x2),c],img[int(y+y4),int(x-x2),c]],
[img[int(y-y1),int(x+x3),c],img[int(y-y2),int(x+x3),c],img[int(y+y3),int(x+x3),c],img[int(y+y4),int(x+x3),c]],
[img[int(y-y1),int(x+x4),c],img[int(y-y2),int(x+x4),c],img[int(y+y3),int(x+x4),c],img[int(y+y4),int(x+x4),c]]])
mat_kernely=np.matrix([[kernel(y1,a)],[kernel(y2,a)],[kernel(y3,a)],[kernel(y4,a)]])
matriximage[column, row, c] = np.dot(np.dot(mat_kernelx, mat_near), mat_kernely)
cv2.imwrite('bicubicInterpolation.jpg', matriximage)
options = Dropdown(
options=[('None', 0), ('Median Filter', 1), ('Averaging filter', 2),
('Gaussian Filter', 3), ('Unsharp Masking and Highboost Filtering', 4), ('Sobel Operators', 5),
('Impulse Noise (Salt and Pepper)', 6), ('Gaussian Noise', 7), ('Histogram Eq.', 8),
('Fourier Transform', 9), ('Interpolation (Bicubic)', 10)],
value=0,
description='Select Filter:',
layout=Layout(width='450px'))
outDropdown = Output()
slider = IntSlider(
value=1,
min=1,
max=10,
step=1,
description='K:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
layout=Layout(visibility='hidden', width='475px'))
outSlider = Output()
progressBar = IntProgress(
value=0,
min=0,
max=100,
description='Loading:',
bar_style='success', # 'success', 'info', 'warning', 'danger' or ''
style={'bar_color': '#1cf28c'},
orientation='horizontal',
layout=Layout(visibility='hidden', flex='1 1 0%', width='520px'))
btns = [
#Button(description='Apply Filter', disabled=False, icon='plus', layout=Layout(flex='1 1 0%', width='120px'), button_style='primary'),
#Button(description='Remove Filter', disabled=False, icon='remove', layout=Layout(flex='1 1 0%', width='120px'), button_style='warning'),
]
resetBtn = Button(description='Reset', disabled=False, icon='trash', layout=Layout(flex='1 1 0%', width='150px'), button_style='danger')
outResetBtn = Output()
btns_box_layout = Layout(display='flex', flex_flow='row', align_items='stretch', width='30%')
btns_box = Box(children=btns, layout=btns_box_layout)
def work(progress, slider):
pass
def on_click_resetBtn(change):
with outResetBtn:
pass
def on_slider_change(change):
with outSlider:
v = change['new']
progressBar.value = 0
unsharpMaskingAndHighboostFiltering(deepcopy(selected_img), v, progressBar)
file = open("unsharpMaskingAndHighboostFiltering.jpg", "rb")
image = file.read()
image_after.value = image
slider.layout.visibility = 'visible'
def on_dropdown_change(change):
with outDropdown:
v = change['new']
progressBar.layout.visibility = 'visible'
if v == 0:
slider.layout.visibility = 'hidden'
progressBar.value = 0
file = open("No-filter.png", "rb")
image = file.read()
image_after.value = image
if v == 1:
slider.layout.visibility = 'hidden'
progressBar.value = 0
medianFilter(deepcopy(selected_img), progressBar)
file = open("medianFilter.jpg", "rb")
image = file.read()
new_image = cv2.imread("medianFilter.jpg")
print("After: " , new_image.shape[0], " x ", new_image.shape[1], end="\r")
image_after.value = image
if v == 2:
slider.layout.visibility = 'hidden'
progressBar.value = 0
averaging(deepcopy(selected_img), progressBar)
file = open("averaging.jpg", "rb")
image = file.read()
new_image = cv2.imread("averaging.jpg")
print("After: " , new_image.shape[0], " x ", new_image.shape[1], end="\r")
image_after.value = image
if v == 3:
slider.layout.visibility = 'hidden'
progressBar.value = 0
gaussianFilter(deepcopy(selected_img), 5, 0.8, progressBar)
file = open("gaussianFilter.jpg", "rb")
image = file.read()
new_image = cv2.imread("gaussianFilter.jpg")
print("After: " , new_image.shape[0], " x ", new_image.shape[1], end="\r")
image_after.value = image
if v == 4:
progressBar.value = 0
unsharpMaskingAndHighboostFiltering(deepcopy(selected_img), 1, progressBar)
file = open("unsharpMaskingAndHighboostFiltering.jpg", "rb")
image = file.read()
new_image = cv2.imread("unsharpMaskingAndHighboostFiltering.jpg")
print("After: " , new_image.shape[0], " x ", new_image.shape[1], end="\r")
image_after.value = image
slider.layout.visibility = 'visible'
if v == 5:
slider.layout.visibility = 'hidden'
progressBar.value = 0
sobelOperator(deepcopy(selected_img), progressBar)
file = open("sobelOperator.jpg", "rb")
image = file.read()
new_image = cv2.imread("sobelOperator.jpg")
print("After: " , new_image.shape[0], " x ", new_image.shape[1], end="\r")
image_after.value = image
if v == 6:
slider.layout.visibility = 'hidden'
progressBar.value = 0
saltAndPepperNoise(deepcopy(selected_img), progressBar)
file = open("saltAndPepperNoise.jpg", "rb")
image = file.read()
new_image = cv2.imread("saltAndPepperNoise.jpg")
print("After: " , new_image.shape[0], " x ", new_image.shape[1], end="\r")
image_after.value = image
if v == 7:
slider.layout.visibility = 'hidden'
progressBar.value = 0
gaussianNoise(deepcopy(selected_img), progressBar)
file = open("gaussianNoise.jpg", "rb")
image = file.read()
new_image = cv2.imread("gaussianNoise.jpg")
print("After: " , new_image.shape[0], " x ", new_image.shape[1], end="\r")
image_after.value = image
if v == 8:
slider.layout.visibility = 'hidden'
progressBar.value = 0
histogramEq(deepcopy(selected_img), progressBar)
file = open("histogramEq.jpg", "rb")
image = file.read()
new_image = cv2.imread("histogramEq.jpg")
print("After: " , new_image.shape[0], " x ", new_image.shape[1], end="\r")
image_after.value = image
if v == 9:
slider.layout.visibility = 'hidden'
progressBar.value = 0
fourierTransform(deepcopy(selected_img), progressBar)
file = open("fourierTransform.jpg", "rb")
image = file.read()
new_image = cv2.imread("fourierTransform.jpg")
print("After: " , new_image.shape[0], " x ", new_image.shape[1], end="\r")
image_after.value = image
if v == 10:
slider.layout.visibility = 'hidden'
progressBar.value = 0
bicubicInterpolation(deepcopy(selected_img), 2, -0.5, progressBar)
file = open("bicubicInterpolation.jpg", "rb")
image = file.read()
new_image = cv2.imread("bicubicInterpolation.jpg")
print("After: " , new_image.shape[0], " x ", new_image.shape[1], end="\r")
image_after.value = image
ImgUpload = FileUpload(accept='image/*', multiple=False)
outUpload = Output()
display(ImgUpload, outUpload)
def on_upload_change(change):
with outUpload:
uploaded_file = ImgUpload.value[list(ImgUpload.value.keys())[0]]['content']
global selected_img
selected_img = cv2.imread(str(list(ImgUpload.value.keys())[0]), 0)
selected_img = selected_img.reshape(selected_img.shape[0], selected_img.shape[1], 1)
image_before = Image(value=uploaded_file, width=256, height=256)
file = open("No-filter.png", "rb")
image = file.read()
global image_after
image_after = Image(value=image, width=256, height=256)
print("Before: " , selected_img.shape[0], " x ", selected_img.shape[1])
display(options, HBox([image_before, image_after]), slider, progressBar, outDropdown, outSlider, outResetBtn)
thread.start()
thread = threading.Thread(target=work, args=(progressBar, slider,))
ImgUpload.observe(on_upload_change, 'value')
options.observe(on_dropdown_change, 'value')
slider.observe(on_slider_change, 'value')
resetBtn.on_click(on_click_resetBtn)
```
| github_jupyter |
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<a href="https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/TensorFlow%20In%20Practice/Course%204%20-%20S%2BP/S%2BP%20Week%204%20Lesson%201.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
def plot_series(time, series, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
time = np.arange(4 * 365 + 1, dtype="float32")
baseline = 10
series = trend(time, 0.1)
baseline = 10
amplitude = 40
slope = 0.05
noise_level = 5
# Create the series
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
# Update with noise
series += noise(time, noise_level, seed=42)
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
window_size = 20
batch_size = 32
shuffle_buffer_size = 1000
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
window_size = 30
train_set = windowed_dataset(x_train, window_size, batch_size=128, shuffle_buffer=shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
#batch_size = 16
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=32, kernel_size=3,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(32, return_sequences=True),
tf.keras.layers.LSTM(32, return_sequences=True),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 200)
])
optimizer = tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(dataset,epochs=500)
rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
tf.keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#-----------------------------------------------------------
# Retrieve a list of list results on training and test data
# sets for each training epoch
#-----------------------------------------------------------
mae=history.history['mae']
loss=history.history['loss']
epochs=range(len(loss)) # Get number of epochs
#------------------------------------------------
# Plot MAE and Loss
#------------------------------------------------
plt.plot(epochs, mae, 'r')
plt.plot(epochs, loss, 'b')
plt.title('MAE and Loss')
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(["MAE", "Loss"])
plt.figure()
epochs_zoom = epochs[200:]
mae_zoom = mae[200:]
loss_zoom = loss[200:]
#------------------------------------------------
# Plot Zoomed MAE and Loss
#------------------------------------------------
plt.plot(epochs_zoom, mae_zoom, 'r')
plt.plot(epochs_zoom, loss_zoom, 'b')
plt.title('MAE and Loss')
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(["MAE", "Loss"])
plt.figure()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Sahp59/daa_2021_1/blob/master/2octubre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Meteorología en México
En Sistema meteorológico nacional lleva el registro de la lluvias desde el año 1985 y lo pone a disposición de la población por medio de la pagina datos.gob.mx.
En la siguiente liga se encuentran 2 archivos separados por comas CSV correspondientes a los registros de lluvias
mensuales y anuales de los años 2017 y 2018. En los columnas se encuentran 13, correspondientes al promedio mensual y el promedio anual.
En los renglones se encuentran 33, correspondientes a cada uno de los 32 estados y a nivel nacional.
https://drive.google.com/file/d/1lamkxgq2AsXRu81Y4JTNXLVld4og7nxt/view?usp=sharing
## Planteamiento del problema
Diseñar un algoritmo y programarlo para que:
1. Solicite por teclado el año, el estado y el mes, en base a esa información:
- muestre en pantalla el promedio de ese mes en ese estado en el año seleccionado.
- muestre en pantalla el promedio anual del estado seleccionado.
- muestre la suma de los 12 meses de ese estado en el año seleccionado.
2. Busque el mes que mas llovió en todos los estados durante esos dos años. Imprimir año, estado y mes.
3. Busque el mes que menos llovió en los dos. Imprimir año, estado y mes.
```
import csv
from Arrays import Array3D
def main():
archivos = Array3D(2,33,13) #2 años, 33 estados, 14 datos
r = 0
c = 0
for ano in range(2017,2019,1):
with open(str(ano)+"Precip.csv" , newline='') as file:
reader = csv.reader(file)
for row in reader:
for col in row:
if c == 0:
None
else:
if (r<1):
None
else:
archivos.set_item(ano-2017,r-2,c,col)
c += 1
r += 1
c = 0
r = 0
print("exito al guardar todo el archivo")
a = int(input('Año (2017 o 2018)'))
e = int(input('Edo (1-32)'))
m = int(input('Mes (1-12)'))
#AÑO ESTADO MES
print(f"En el estado {archivos.get_item(a-2017,e,m-m)} llovio un promedio de {archivos.get_item(a-2017,e,m)} centimetros cubicos en el mes de {archivos.get_item(a-2017,e-e,m)} de {a} ")
#ESTADO MES
s = 0
for an in range(0,1,1):
i = archivos.get_item(an,e,m)
s = s + i
d = s/33
print(f"Del año 1985 al 2019 en el mes de {archivos.get_item(a-2017,e-e,m)} del estado {archivos.get_item(a-2017,e,m-m)} hay un promedio de {d} centimetros cubicos")
#ESTADO
s = 0
sp = 0
for an in range(0,1,1):
for me in range(1,13,1):
i = archivos.get_item(an,e,me)
s = s + i
p = s/12
sp = (sp + p)/33
print(f"Del año 1985 al 2018 en todos los meses del estado de {archivos.get_item(a-2017,e,m-m)} hay un promedio de {sp}")
#promedio total
s = 0
sp = 0
spp = 0
for an in range(0,1,1):
for es in range(1,33,1):
for me in range(1,13,1):
i = archivos.get_item(an,es,me)
s = s + i
p = s/12
sp = (sp + p)/32
spp = (spp + sp)/33
print(f"El promedio total de todos los años, los meses y los estados de Mexico es de {spp}")
main()
```
| github_jupyter |
<img src="./pictures/DroneApp_logo.png" style="float:right; max-width: 180px; display: inline" alt="INSA" />
<img src="./pictures/logo_sizinglab.png" style="float:right; max-width: 100px; display: inline" alt="INSA" />
# Propellers estimation models with dimensional analysis and linear regressions
*Written by Marc Budinger, Aitor Ochotorena (INSA Toulouse) and Scott Delbecq (ISAE-SUPAERO), Toulouse, France.*
Propellers characteristics can be expressed by $C_T$ and $C_P$ coefficients. These coefficients are function of dimensions and conditions of use of propellers. Dimensional analysis and linear regression of suppliers data can be used for the generation of $C_T$ and $C_P$ prediction models.
*APC MR (Multi-Rotor) propellers*

## Contents
1. [Dimensional analysis and $\pi$ numbers](#section_1)
2. [Choice of a proper propeller model](#section_2)
3. [Static behaviour](#section_3)
<a id='section_1'></a>
## Dimensional analysis and $\pi$ numbers
The propeller performances can be expressed with 2 aerodynamic coefficients:
- The thrust: $F = C_{T} \rho n^2 D^4$
- The power: $P = C_{P} \rho n^3 D^5 $
The [dimensional analysis](https://web.mit.edu/16.unified/www/FALL/thermodynamics/notes/node86.html#SECTION06374000000000000000) and especially the [Buckingham $\pi$ theorem](https://en.wikipedia.org/wiki/Buckingham_%CF%80_theorem) enable to find this results.
#### Dimensional analysis of the propeller thrust
The thrust $F$ of a propeller depends of multiple parameters (geometrical dimensions, air properties, operational points):
$F=f(\rho_{air},n,D,p,V,\beta_{air})$
with the parameters express in the following table.
Parameter|M|L|T|
--|--|--|--
Thrust $T$ [N] | 1|1|-2
Mass volumic (Air) $\rho_{air}$ [kg/m$^3$]| 1 | -3 | 0|
Rotational speed $n$ [Hz] | 0 | 0 | -1|
Diameter $D$ [m] | 0| 1 | 0|
Pitch $p$ [m] | 0 | 1 | 0|
Drone speed $V$ [m/s] | 0 | 1 | -1|
Bulk modulus (Air) $\beta_{air}$ [m/s] | 1 | -1 | -2|
$=\pi_0$| | | |
$=\pi_1$| | | |
$=\pi_2$| | | |
$=\pi_3$| | | |
**Remark**: The dimension of a parameter $x$ is function of dimensions L, M and T : $[x]=M^aL^bT^c$. The previous table gives the value of $a$, $b$ and $c$ for each parameter of the problem.
> **Exercice**: Complete the table with 4 dimensionless $\pi$ numbers possible for the given problem. Explain the number of dimensionless number.
**Answer:**
Buckingham $\pi$ theorem: 7 parameters - 3 dimensions = 4 dimensionless $\pi$ numbers
Parameter|M|L|T|
--|--|--|--
Thrust $T$ [N] | 1|1|-2
Mass volumic (Air) $\rho_{air}$ [kg/m$^3$]| 1 | -3 | 0|
Rotational speed $n$ [Hz] | 0 | 0 | -1|
Diameter $D$ [m] | 0| 1 | 0|
Pitch $Pitch$ [m] | 0 | 1 | 0|
Drone speed $V$ [m/s] | 0 | 1 | -1|
Bulk modulus (Air) $\beta_{air}$ [Pa] | 1 | -1 | -2|
$C_t=\frac{T}{\rho n^2D^4}=\pi_0$| 0 | 0 | 0 |
$\frac{Pitch}{D}=\pi_1$| 0 | 0 | 0 |
$J=\frac{V}{nD}=\pi_2$| 0 | 0 | 0 |
$\frac{\rho n^2D^2}{\beta}=\pi_3$| 0 | 0 | 0 |
#### Effect of the rotational speed
APC suppliers give complete [propeller data](https://www.apcprop.com/technical-information/) for all their propellers. From the file *APC_STATIC-data-all-props.csv*, we find all static data provided by APC:
```
import pandas as pd
# Read the .csv file with bearing data
path='./data/'
df = pd.read_csv(path+'APC_STATIC-data-all-props.csv', sep=';')
# Print the head (first lines of the file)
df.head()
```
For next steps, we keep only the Multi-Rotor type propellers (MR).
```
# Data Filtering
# Keeping only multirotor (MR) type
df_MR=df[df['TYPE']=='MR']
df_MR.head()
```
We plot the $C_p$ and $C_t$ for the a 10x4.5 propeller (COMP n° 8 in the previous table). We can notice that these coefficients are constant up to a certain value of speed of rotation. The manufacturer recommends using these propellers for a product speed of rotation $\times$ diameter less than a limit (depending on the type of propeller technology) and given [here](https://www.apcprop.com/technical-information/rpm-limits/):
Maximum speed(RPM) x prop diameter (inches) = 105,000
for MR type which gives a blade tip speed of 135 m/s. The dark spots on the following diagram correspond to this non-recommended operating zone.
> **Question:** Explain the origin of this operating limit comes from and the $\pi$ number that can express it.
**Answer:**
The $\frac{\rho_{air} n^2D^2}{\beta}$ dimensionless number is similar to Mach number $M_a=V/c$ with $c=\sqrt{\frac{\beta_{air}}{\rho_{air}}}$ the speed of sound:
$$\frac{\rho n^2D^2}{\beta_{air}}=M_a^2$$
```
import numpy as np
import matplotlib.pyplot as plt
# Keep only the component n°8
df_8=df_MR[df_MR['COMP']==8]
# Extract forbidden ND product
df_8_ND=df_8[df_8['N.D']>105000.]
# Plot the data
plt.plot(df_8['RPM'],df_8['Cp'], 'bo', label='Ct')
plt.plot(df_8['RPM'],df_8['Ct'], 'ro', label='Cp')
plt.plot(df_8_ND['RPM'], df_8_ND['Ct'], 'mo', label='Filtered Ct')
plt.plot(df_8_ND['RPM'], df_8_ND['Cp'], 'co', label='Filtered Cp')
plt.xlabel('Rotational Speed [RPM]')
plt.ylabel('Cpand Ct')
plt.legend()
plt.grid()
plt.show()
```
<a id='section_2'></a>
## Choice of a proper propeller model
The choice of a propeller is based on criteria of good static and dynamic performance. In the following table, we have gathered data values from different propeller families for a middle-range model:
- APC-E-9x4.5
- Graupner CAM 9x4
- Graupner Super Nylon 9x4
- KAVON FK 9x4
- KYOSHO 9x6
- MASTER AIRSCREW G/F 9x5
- APC MR 9x4.5
Data available [here](https://m-selig.ae.illinois.edu/props/volume-1/propDB-volume-1.html)
```
import pandas as pd
path='./data/'
df = pd.read_csv(path+'9x45-DynamicProps.csv',sep=';')
df.head()
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
f, ((ax1, ax2,ax3)) = plt.subplots(1, 3,figsize=(11, 6))
# f.tight_layout()
groups = df.groupby('TYPE')
for name, group in groups:
ax1.plot(group.J, group.CT, marker='o', linestyle='', ms=5,
label=name)
ax1.set_title('Thrust coef. for different J')
ax1.set_xlabel('J[-]',fontsize=14)
ax1.set_ylabel('CT[-]',fontsize=14)
for name, group in groups:
ax2.plot(group.J, group.CP, marker='o', linestyle='', ms=5,
label='_nolegend_')
ax2.set_title('Power coef. for different J')
ax2.set_xlabel('J[-]',fontsize=14)
ax2.set_ylabel('CP[-]',fontsize=14)
for name, group in groups:
ax3.plot(group.J, group.Eta, marker='o', linestyle='', ms=5,
label='_nolegend_')
ax3.set_title('Efficiency vs Advance Ratio J')
ax3.set_xlabel('J[-]',fontsize=14)
ax3.set_ylabel('ETA[-]',fontsize=14)
f.legend(loc='lower center', bbox_to_anchor=(1.1, 0.125),prop={'size': 14},ncol=1)
#f.legend(loc=(axbox.x0,0),prop={'size': 8})
ax1.grid()
ax2.grid()
ax3.grid()
ax1.tick_params(axis = 'both', labelsize = 14)
ax2.tick_params(axis = 'both', labelsize = 14)
ax3.tick_params(axis = 'both', labelsize = 14)
f.tight_layout()
```
APC MR is showing best results regarding performance and efficiencies for dynamic behaviour for middle-range series.
<a id='section_3'></a>
## Static behaviour
In this section, we will focus on finding out continuous mathematical expressions for the performance of the propellers in the static behaviour, this is given in the scenario of hover and take-off.
### Linear regression
For next calculations, we keep only data with following criteria:
- Type 'MR' (Multi-Rotor)
- Maximum RPM < 105,000/prop diameter (inches)
```
# Keep only operating points with ND<105000
df_MR_ND=df_MR[df_MR['N.D']<105000.]
```
The APC static data correspond to the hover operational point where the speed V=0. The aerodynamic coefficients are thus only a function of $p/D$ (called 'ANGLE' in the .csv file) dimensionless number.
$C_t=\frac{F}{\rho_{air} n^2 D^3}=f(\frac{p}{D})$
$C_p=\frac{P}{\rho_{air} n^3 D^5}=g(\frac{p}{D})$
The following code uses the [StatsModels](https://www.statsmodels.org/stable/index.html) package in order to set up a $C_t$ estimator for the static case ($V=0$ or $J=0$).
```
# Determination of the least squares estimator with the OLS function
# of the SatsModels package
import statsmodels.api as sm
# Data
x = df_MR_ND['ANGLE'].values
y_Ct = df_MR_ND['Ct'].values
y_Cp = df_MR_ND['Cp'].values
# Matrix X and Y
X = np.concatenate((np.ones(np.size(x)).reshape(np.size(x),1),x.reshape(np.size(x),1)), axis=1)
Y_Ct = y_Ct.reshape((np.size(x),1))
Y_Cp = y_Cp.reshape((np.size(x),1))
# OLS regression
model_Ct = sm.OLS(Y_Ct, X)
model_Cp = sm.OLS(Y_Cp, X)
result_Ct = model_Ct.fit()
result_Cp = model_Cp.fit()
# Ct Parameters
# ----
print('Parameters: ', result_Ct.params)
print('R2: ', result_Ct.rsquared)
# Y vector prediction
Ct_OLS = result_Ct.predict(X)
# Plot the data
plt.plot(x, y_Ct, 'o', label='Reference data')
plt.plot(x, Ct_OLS, '-g', label='Data prediction')
plt.xlabel('Pitch/Diameter ratio')
plt.ylabel('Ct')
plt.title('Comparison of reference data and OLS regression')
plt.legend()
plt.grid()
plt.show()
print("Ct estimation model : Ct = %.2e + %.2e * p/D "%(result_Ct.params[0],result_Ct.params[1]))
```
> **Exercice**: Produce an OLS (Ordinary Least Square) regression of $C_p$ data.
```
y_Cp = df_MR_ND['Cp'].values
model_Cp = sm.OLS(Y_Cp, X)
# Cp Parameters
# -----
print('Parameters: ', result_Cp.params)
print('R2: ', result_Cp.rsquared)
# Y vector prediction
Cp_OLS = result_Cp.predict(X)
# Plot the data
plt.plot(x, y_Cp, 'o', label='Reference data')
plt.plot(x, Cp_OLS, '-g', label='Data prediction')
plt.xlabel('Pitch/Diameter ratio')
plt.ylabel('Cp')
plt.title('Comparison of original data and OLS regression')
plt.legend()
plt.grid()
plt.show()
print("Cp estimation model : Cp = %.2e + %.2e * p/D "%(result_Cp.params[0],result_Cp.params[1]))
```
<a id='section_4'></a>
| github_jupyter |
# CoursewareHubのセットアップ--直接学認フェデレーションを利用する
---
VCノードにCoursewareHub環境を構築します。
## 概要
CoursewareHubの構成要素を以下に示します。

このNotebookでは上図で `CoursewareHub` を示す枠内にあるものを構築します。
> このNotebookでは IdP-proxyを利用せずに、直接CoursewareHubを学認のSPとして登録する手順について記しています。
各コンテナの役割や処理の詳細については https://github.com/NII-cloud-operation/CoursewareHub-LC_jupyterhub-deploy を参照してください。
### 事前に準備が必要となるものについて
このNotebookを実行するにあたって、あらかじめ準備が必要となるものについて以下に記します。
* CoursewareHubのサーバ証明書
* 学認クラウドゲートウェイのグループ名
#### CoursewareHubのサーバ証明書
CoursewareHubではHTTPSによる公開を行うためサーバ証明書とその秘密鍵を準備する必要があります。また必要に応じて中間CA証明書を準備してください。
#### 学認クラウドゲートウェイのグループ
このNotebookで構築するCoursewareHubでは[学認クラウドゲートウェイサービス](https://cg.gakunin.jp/)の[GakuNin mAP](https://meatwiki.nii.ac.jp/confluence/display/gakuninmappublic/Home)を利用して利用者のグループ管理を行います。
CoursewareHubの利用対象者からなるグループを、学認クラウドゲートウェイにて作成しておいてください。
### UnitGroup名
CoursewareHubの構築環境となるVCPのUnitGroup名を指定します。
VCノードを作成時に指定した値を確認するために `group_vars`ファイル名の一覧を表示します。
```
!ls -1 group_vars/
```
UnitGroup名を次のセルに指定してください。
```
# (例)
# ugroup_name = 'CoursewareHub'
ugroup_name =
```
### チェック
対象となるVCノードがAnsibleによって操作できることを確認します。
Ansibleの設定ファイルの場所を環境変数に設定しておきます。
```
from pathlib import Path
import os
cfg_ansible = Path('ansible.cfg')
if cfg_ansible.exists():
os.environ['ANSIBLE_CONFIG'] = str(cfg_ansible.resolve())
```
構築対象となる各VCノードにアクセスできることを確認します。
```
target_hub = f'{ugroup_name}_manager'
!ansible {target_hub} -m ping
target_nodes = f'{ugroup_name}_worker'
!ansible {target_nodes} -m ping
```
UnitGroup名に対応する group_varsファイルが存在していることを確認します。
```
!test -f group_vars/{ugroup_name}
```
## パラメータの設定
### ホスト名
CoursewareHubのホスト名(FQDN)を指定します。ここで指定したホスト名は、利用者からの入り口となる `auth-proxy` コンテナに割り当てられます。
```
# (例)
# master_fqdn = 'hub.example.org'
master_fqdn =
```
### 管理者情報の設定
管理者として登録するメールアドレスを指定します。CoursewareHubに管理者としてログインするにはここで指定したメールアドレスをログイン画面で入力します。
> このNotebookでは、CoursewareHubの学認への登録がまだ行われていないことを想定して、管理者をローカルユーザとして作成します。
```
# (例)
# teacher_email = 'admin@example.org'
teacher_email =
```
管理者のパスワードを入力してください。ここで指定するパスワードはローカルユーザとしてログインする際に入力するパスワードとなります。
> 学認への登録を行うまでは、学認のIdPを利用したログインができません。そのためローカルユーザとして登録するパスワードをここでは指定します。
```
from getpass import getpass
teacher_password = getpass()
```
### 学認フェデレーション
CoursewareHubと連携する学認フェデレーションに関するパラメータを設定します。
次のセルで参加するフェデレーションを指定してください。
```
# (例)
# federation = 'production' # 運用フェデレーション
# federation = 'test' # テストフェデレーション
federation =
```
参加するフェデレーションに応じたサーバとの連携を行います。各フェデレーションのホスト名を以下の表に示します。
| | 学認DS(Discovery Service) | クラウドゲートウェイ |
|-|--|--|
|運用フェデレーション|`ds.gakunin.nii.ac.jp`|`cg.gakunin.jp`|
|テストフェデレーション|`test-ds.gakunin.nii.ac.jp`|`sptest.cg.gakunin.jp`|
上記の表に示した以外のサーバを指定する場合は、次のセルのコメントを外してサーバのホスト名を設定してください。
```
# ds_fqdn = 'ds.gakunin.nii.ac.jp' # 学認DS(Discovery Service)
# cg_fqdn = 'cg.gakunin.jp' # クラウドゲートウェイ
```
### mAPのグループを指定する
CoursewareHubの利用を許可するユーザを、学認mAPのグループにより指定します。
CoursewareHubの利用を許可するグループのリストを次のセルで指定してください。指定する値は学認 mAP のグループIDの値となります。
```
# (例)
# cg_groups = [
# 'group1-id',
# 'group2-id',
#]
cg_groups = [
]
```
mAPから渡されるグループ情報のプレフィックスを指定します。利用しているフェデレーションによって異なる値を指定する必要があります。
* 運用フェデレーション
- `https://cg.gakunin.jp/gr/`
* テストフェデレーション
- `https://sptest.cg.gakunin.jp/gr/`
```
if federation == 'production':
cg_group_prefix = 'https://cg.gakunin.jp/gr/' # 運用フェデレーション
elif federation == 'test':
cg_group_prefix = 'https://sptest.cg.gakunin.jp/gr/' # テストフェデレーション
else:
raise RuntimeError("federationが設定されていません")
print(cg_group_prefix)
```
### リソース制限の設定
各ユーザが利用するコンテナのリソース制限に関する設定を行います。
#### リソース制限の設定方法
CoursewareHubではユーザの役割に応じて利用方法が異なることがあります。例えば講師権限ユーザーは採点を行う場合などに複数のNotebookを同時に開くことがあります。そのような利用を行う場合、一般の受講者ユーザよりも多くのリソースが必要となります。また、講師権限を与えないが採点の補助をして欲しいTA(Teaching Assistant)といった役割のユーザーがいる場合があります。その場合、一般の受講者ユーザーとは異なるリソース制限設定を行うことが考えられます。
これらのことに対応するためにCoursewareHubでは、権限もしくは所属グループに応じたリソース制限の設定を行うことができます。
リソース制限の設定はYAMLファイルで行います。YAMLファイルの記述例を以下に示します。
```yaml
groups:
student:
mem_limit: 1G
cpu_limit: 1.0
priority: 10
teaching-assistant:
mem_limit: 2G
cpu_limit: 2.0
cpu_guarantee: 1G
cpu_guarantee: 0.5
priority: 1
admin:
mem_limit: 5G
default:
mem_limit: 1G
cpu_limit: 2.0
mem_guarantee: 1G
cpu_guarantee: 0.5
```
上の例では `student`グループ、`teaching-assistant`グループ、講師権限ユーザ(`admin`)、それ以外のユーザ(デフォルト設定)についてリソース制限の設定を行っています。複数のグループに所属するユーザについては、グループの`priority`が小さいほうのグループの設定が優先されます。上記の例では`student`グループ、`teaching-assistant`グループの両方に属するユーザは `priority`が`1`となっている`teaching-assistant`グループの設定が優先されます。
コンテナに対するリソース制限設定として以下の4つの属性を指定することができます。
* [mem_guarantee](https://jupyterhub.readthedocs.io/en/stable/api/spawner.html#jupyterhub.spawner.Spawner.mem_guarantee)
- コンテナの使用が保証されるメモリサイズの下限
* [mem_limit](https://jupyterhub.readthedocs.io/en/stable/api/spawner.html#jupyterhub.spawner.Spawner.mem_limit)
- コンテナが使用可能なメモリのサイズの上限
* [cpu_guarantee](https://jupyterhub.readthedocs.io/en/stable/api/spawner.html#jupyterhub.spawner.LocalProcessSpawner.cpu_guarantee)
- コンテナの使用が保証される CPU 使用率の下限
* [cpu_limit](https://jupyterhub.readthedocs.io/en/stable/api/spawner.html#jupyterhub.spawner.LocalProcessSpawner.cpu_limit)
- コンテナが使用可能な CPU 使用率の上限
JupyterHub にはユーザーグループを扱う仕組みがありますが、それを管理するためのユーザーインターフェースは存在しません。そのため、講師権限ユーザー向けにグループ操作を行うための Notebook を用意しました。
* 83_AddUsersToGroup.ipynb
- ユーザーを指定したグループに追加する Notebook
* 84_RemoveUsersFromGroup.ipynb
- ユーザーを指定したグループから削除する Notebook
* 85_ShowUserGroups.ipynb
- ユーザーの属するグループを確認する Notebook
構築したCoursewareHub環境において、講師権限ユーザーに提供される一連のNotebookのなかに上記のグループ管理用 Notebook が含まれています。
#### リソース制限の設定を行うYAMLファイルを作成する
前節で説明したYAMLファイルを作成します。
リソースファイルを作成するディレクトリを作成します。
```
from tempfile import mkdtemp
from pathlib import Path
rsc_pdir = Path('./rsc').absolute()
rsc_pdir.mkdir(exist_ok=True)
rsc_dir = Path(mkdtemp(dir=rsc_pdir))
```
YAMLファイルを作成します。
リソース制限の設定に対応する内容に次のセルを変更してください。その後、実行するとYAMLファイルが作成されます。
```
%%writefile {rsc_dir}/resource.yaml
groups:
group1:
mem_limit: 1G
cpu_limit: 1.0
priority: 20
admin:
mem_limit: 5G
default:
mem_limit: 1G
cpu_limit: 2.0
mem_guarantee: 1G
cpu_guarantee: 0.5
```
YAMLファイルの記述内容が妥当であるかをチェックします。
次のセルが正常に実行できることを確認してください。実行結果がエラーとなった場合はYAMLファイルの記述内容に問題があります。上のセルを unfreeze して記述内容を修正して、再実行してください。
```
import jsonschema
import json
import yaml
!ansible -c local {target_hub} -m get_url -a \
'url=https://raw.githubusercontent.com/NII-cloud-operation/CoursewareHub-LC_platform/master/jupyterhub/resources-schema.json \
dest={rsc_dir}/resources-schema.json'
with (rsc_dir / 'resources-schema.json').open() as f:
resources_config_schema = json.load(f)
with (rsc_dir / 'resource.yaml').open() as f:
resources_config = yaml.load(f, Loader=yaml.SafeLoader)
jsonschema.validate(resources_config, resources_config_schema)
print(json.dumps(resources_config, indent=2))
```
### JupyterHub
JupyterHubコンテナに設定する環境変数を指定します。
設定できる環境変数の説明を以下に示します。
* `CONCURRENT_SPAWN_LIMIT`
- ユーザーのNotebookサーバーの並列起動処理数の上限
- サーバー起動待ちの数が上限を超えると、新たな起動が拒否される
* `SPAWNER_CONSTRAINTS`
- ユーザーのNotebookサーバーコンテナの起動ノードの制約条件
- https://docs.docker.com/engine/swarm/services/#placement-constraints の --constraintの値を指定する
- 複数の値を指定する場合は `;` でつなげる
- `node.role==worker`を指定することを推奨する
+ ⇒single-user serverコンテナは workerノードで実行され、masterノードではシステムコンテナのみが実行されるようになる
* `SPAWNER_HTTP_TIMEOUT`
- JupyterHubが、ユーザーのNotebookサーバーへのHTTPアクセスが可能になるまで待つタイムアウト時間(秒)
* `SPAWNER_START_TIMEOUT`
- JupyterHubが、ユーザーのNotebookサーバーコンテナの起動を待つタイムアウト時間(秒)
* `CULL_SERVER`
- `yes`: ユーザーのNotebookサーバーのculling機能(アイドルになったサーバーの停止)を有効にする
- `no`: ユーザーのNotebookサーバーは自動で停止されない
- デフォルトは `no`
* `CULL_SERVER_IDLE_TIMEOUT`
- culling機能で停止されるNotebookサーバーのアイドル時間(秒)
- この時間を越えてアイドル状態のサーバーは停止される
- デフォルト値は `600`
* `CULL_SERVER_MAX_AGE`
- ユーザーNotebookサーバーの最大寿命(秒)
- アクティブでも停止される
- デフォルト、または`0`ではアクティブなサーバーは停止されない
* `CULL_SERVER_EVERY`
- サーバーのアイドル状態のチェック間隔(秒)
- チェックとチェックの間では、アイドル状態になっても停止されない
* `ADMIN_ACCESS`
- `yes`または`1`を設定すると、講師がAdminコントロールパネルから他のユーザーのNotebookサーバーにアクセスできるようになる
- デフォルトで有効
設定する環境変数名とその設定値を、次のセルの`dict`変数`jupyterhub_params`のキー、バリューに指定してください。
```
# (例)
# jupyterhub_params = {
# 'SPAWNER_CONSTRAINTS': 'node.role==worker', # 起動ノードの制約条件
# }
jupyterhub_params = {
'SPAWNER_CONSTRAINTS': 'node.role==worker',
}
```
### single-user Jupyter notebook server
JupyterHubが起動する single-user [Jupyter notebook](https://jupyter-notebook.readthedocs.io/) serverのコンテナイメージを指定します。
以下のようなものを指定できます。
* [niicloudoperation/notebook](https://github.com/NII-cloud-operation/Jupyter-LC_docker)
- JupyterNotebookに*Literate Computing for Reproducible Infrastructure*のためのツールを追加したもの
* harbor.vcloud.nii.ac.jp/vcp/coursewarehub:singleuser-disable-run-through
- `niicloudoperation/notebook`に対して[Jupyter-LC_run_through](https://github.com/NII-cloud-operation/Jupyter-LC_run_through)の機能などを無効にしたもの
```
# (例)
# singleuser_image = 'niicloudoperation/notebook'
# singleuser_image = 'harbor.vcloud.nii.ac.jp/vcp/coursewarehub:singleuser-disable-run-through'
singleuser_image = 'harbor.vcloud.nii.ac.jp/vcp/coursewarehub:singleuser-disable-run-through'
```
### データベース
CoursewareHubのデータを保存するデータベースに関するパラメータを指定します。
CoursewareHubが利用するデータベースの名前を指定してください。
```
# (例)
# db_name = 'jupyterhub'
db_name = 'jupyterhub'
```
データベースのユーザ名を指定してください。
```
# (例)
# db_user = 'jhauth'
db_user = 'jhauth'
```
データベースのパスワードを指定してください。
```
from getpass import getpass
db_password = getpass()
```
### オーバーレイネットワークの指定
CoursewareHubのコンテナはDocker Swarmのオーバーレイネットワークで実行されます。オーバーレイネットワークに割り当てるサブネットを指定します。
> 基本的にはデフォルト値からの変更は不要ですが、VCノードに割り当てられているIPアドレスと範囲が重複している場合は他のサブネットに変更して下さい。
```
# (例)
# cousewarehub_backend = '10.1.0.0/20'
cousewarehub_backend = '10.1.0.0/20'
```
指定されたサブネットがVCノードに割り当てられているIPアドレスと重なっていないことをチェックします。次のセルを実行してエラーとならないことを確認してください。
```
!ansible-playbook -v -e cousewarehub_backend={cousewarehub_backend} -l {ugroup_name} \
playbooks/check-subnet.yml
```
### パラメータの保存
ここまで指定したパラメータのうち、ansible playbookや他のNotebookでも参照する値を ansible の変数として `group_vars`ファイルに保存します。
```
import yaml
from pathlib import Path
gvars_path = Path(f'group_vars/{ugroup_name}')
with gvars_path.open() as f:
gvars = yaml.safe_load(f)
gvars.update({
'master_fqdn': master_fqdn,
'rsc_yml': f'{rsc_dir}/resource.yaml',
'jupyterhub_params': jupyterhub_params,
'singleuser_image': singleuser_image,
'db_name': db_name,
'db_user': db_user,
'db_password': db_password,
'cousewarehub_backend': cousewarehub_backend,
'cg_groups': [cg_group_prefix + x for x in cg_groups],
'federation': federation,
})
if 'ds_fqdn' in vars():
gvars['ds_fqdn'] = ds_fqdn
if 'cg_fqdn' in vars():
gvars['cg_fqdn'] = ds_fqdn
with gvars_path.open(mode='w') as f:
yaml.safe_dump(gvars, stream=f)
```
## CoursewareHubのセットアップ
CoursewareHubの構成要素となる、各コンテナのセットアップを行います。
### auth-proxy
#### 証明書の配置
##### サーバ証明書
auth-proxyコンテナで使用するサーバ証明書の配置を確認します。
サーバ証明書を配置するディレクトリを作成します。
```
!ansible {target_hub} -b -m file -a \
'path={{{{base_dir}}}} state=directory owner={{{{ansible_user}}}}'
!ansible {target_hub} -b -m file -a \
'path={{{{base_dir}}}}/certs state=directory owner={{{{ansible_user}}}}'
```
作成したディレクトリ`/srv/cwh/certs`に証明書、秘密鍵を配置してください。出どころなどの情報を必要以上に残さないためにNotebookからの操作ではなく、ターミナルなどから **managerノードに ssh でログインして操作を行ってください**。
配置する証明書などのファイル名は以下のようにしてください。
* サーバ証明書
- `/srv/cwh/certs/auth-proxy.cer`
* サーバ証明書と中間CA証明書を連結したもの
- `/srv/cwh/certs/auth-proxy.chained.cer`
* 秘密鍵
- `/srv/cwh/certs/auth-proxy.key`
- パスフレーズを無しにする
sshでログインする manager ノードのIPアドレスを確認します。表示されたIPアドレスに対して、ユーザ名`vcp`と「VCノード作成」のNotebookで設定したSSHの秘密鍵を指定することで manager ノードにsshでログインできます。
```
!ansible {target_hub} -m debug -a 'var=vc_ipaddress'
```
**証明書などの配置を行った後に、これ以降の操作を行ってください。**
証明書が配置されていることを確認します。managerノードのサーバ証明書の内容を表示してみます。
```
!ansible {target_hub} -a \
'openssl x509 -noout -text -in {{{{certs_dir}}}}/auth-proxy.cer'
```
秘密鍵の内容を表示してみます。
```
!ansible {target_hub} -a \
'openssl rsa -noout -text -in {{{{certs_dir}}}}/auth-proxy.key'
```
中間CA証明書を連結したサーバ証明書の内容を表示してみます。
```
!ansible {target_hub} -a \
'openssl x509 -noout -text -in {{{{certs_dir}}}}/auth-proxy.chained.cer'
```
証明書の owner, group などの値を利用環境に合わせて以下のように設定します。
* owner: 33
* group: 33
この設定はコンテナ内では以下のように設定したことに相当します。
* owner: `www-data`
* group: `www-data`
```
cert_owner = 33
cert_group = 33
!ansible {target_hub} -b -m file -a \
'path={{{{certs_dir}}}} owner={cert_owner} \
group={cert_group} state=directory'
!ansible {target_hub} -b -m file -a \
'path={{{{certs_dir}}}}/auth-proxy.cer \
owner={cert_owner} group={cert_group}'
!ansible {target_hub} -b -m file -a \
'path={{{{certs_dir}}}}/auth-proxy.chained.cer \
owner={cert_owner} group={cert_group}'
!ansible {target_hub} -b -m file -a \
'path={{{{certs_dir}}}}/auth-proxy.key \
owner={cert_owner} group={cert_group} mode=0600'
```
##### メタデータ署名証明書の配置
メタデータの署名を検証するための証明書を配置します。
証明書を配布しているURLを指定してください。
```
# 運用フェデレーション
# metadata_signer_url = 'https://metadata.gakunin.nii.ac.jp/gakunin-signer-2017.cer'
#
# テストフェデレーション
# metadata_signer_url = 'https://metadata.gakunin.nii.ac.jp/gakunin-test-signer-2020.cer'
metadata_signer_url =
```
指定されたURLの証明書を配置します。
```
!ansible {target_hub} -b -m get_url -a \
'url={metadata_signer_url} dest={{{{certs_dir}}}}/gakunin-signer.cer \
owner={cert_owner} group={cert_group} mode=0644'
```
配置した証明書のFingerprintを表示してみます。
参考のため、証明書の fingerprintを記載しているURLを以下に示します。
* 運用フェデレーション
- https://meatwiki.nii.ac.jp/confluence/display/GakuNinShibInstall/signer
* テストフェデレーション
- https://www.gakunin.jp/join/test/rule
```
!ansible {target_hub} -a 'chdir={{{{base_dir}}}}/certs \
openssl x509 -in gakunin-signer.cer -sha256 -fingerprint -noout'
```
#### メタデータ
SAMLのメタデータを配置するディレクトリを用意します。
```
!ansible {target_hub} -b -m file -a \
'path={{{{base_dir}}}}/metadata state=directory owner=33 group=33'
```
##### SP検証環境のメタデータ(テストフェデレーション)
テストフェデレーションのSP検証環境メタデータを取得している場合は CoursewareHub環境への配置を行います。
> SP検証環境のメタデータは利用申請の際に送付されてくるので、通常はこの時点でメタデータファイルがありません。そのため**この節は実行せずにスキップしてください**。通常は、次に実行する「331-CoursewareHubの登録を学認へ申請する」のNotebookのなかで SP検証環境のメタデータを登録します。
>
> この節は、既に学認への登録を済ませている CoursewareHub を再構築する場合などに実行することを想定しています。
SP検証環境のメタデータを、このNotebook環境に配置してそのファイル名を次のセルに設定してください。
```
# (例)
# sptest_metadata = './sptestcgidp-metadata.xml'
sptest_metadata =
```
SP検証環境のメタデータを配置します。
```
if 'sptest_metadata' in vars():
!ansible {target_hub} -b -m copy -a \
'src={sptest_metadata} dest={{{{base_dir}}}}/metadata/cgidp-metadata.xml \
owner=33 group=33'
```
#### auth-proxyコンテナのセットアップ
サーバ証明書以外に
auth-proxyコンテナで必要となるファイルを準備する Ansible Playbook を実行します。
この節で実行する Playbook では以下の処理を行います。
* SimpleSAMLphp のcron実行のためのランダムキーの作成
* コンテナイメージの取得
ここで取得するauth-proxyのコンテナイメージは[NII-cloud-operation/CoursewareHub-LC_platform](https://github.com/NII-cloud-operation/CoursewareHub-LC_platform)の `auth-proxy-fed/`ディレクトリをVCP向けにカスタマイズして [VCPのコンテナレジストリ](https://harbor.vcloud.nii.ac.jp/)に格納したものです。VCPでカスタマイズした部分のソースは [./docker/app/auth-proxy-fed](docker/app/auth-proxy-fed/)にあります。
まず、実際に設定を変更する前にドライラン(チェックモード)でAnsibleを実行します。
```
!ansible-playbook -l {target_hub} -CDv playbooks/setup-auth-proxy.yml || true
```
実際に設定変更を行います。
```
!ansible-playbook -l {target_hub} playbooks/setup-auth-proxy.yml
```
auth-proxy コンテナイメージを取得したことを確認します。
```
!ansible {target_hub} -m shell -a 'docker images | grep auth-proxy'
```
auth-proxyコンテナのためにセットアップしたディレクトリの状態を確認します。
```
!ansible {target_hub} -a 'tree {{{{auth_proxy_dir}}}}'
```
### JupyterHub
#### restuserのインストール
JupyterHubコンテナからホスト環境のローカルユーザ情報を取得するために利用する[restuser](https://github.com/minrk/restuser)をインストールします。
ここでは Ansible Playbookを実行することで restuser のインストールを行います。実行する Playbook は
[NII-cloud-operation/CoursewareHub-LC_jupyterhub-deploy](https://github.com/NII-cloud-operation/CoursewareHub-LC_jupyterhub-deploy) の `roles/restuser/` にあるものをもとにしています。
実際に設定を変更する前にドライラン(チェックモード)でAnsibleを実行します。
```
!ansible-playbook -l {target_hub} -CDv playbooks/install-restuser.yml || true
```
実際にrestuserのインストールを行います。
```
!ansible-playbook -l {target_hub} playbooks/install-restuser.yml
```
インストールされたファイルを確認します。
```
!ansible {target_hub} -a 'tree /srv/restuser'
```
restuserはホスト環境のサービスとして実行します。インストールしたrestuserサービスの状態を確認します。次のセルの出力結果に`Active: active (running)` と表示されることを確認してください。
```
!ansible {target_hub} -b -a 'systemctl status restuser'
```
`restuser`によってユーザ情報が取得できることを確認します。HTTPの応答がOK(200)となり、以下の情報が取得できることを確認してください。
```
{"name": "vcp", "dir": "/home/vcp", "shell": "/bin/bash", "uid": 1000, "gid": 1000}
```
```
!ansible {target_hub} -b -m shell \
-a 'echo -e "POST /{{{{ansible_user}}}} HTTP/1.0\r\n" | nc -U /var/run/restuser.sock'
```
#### JupyterHubコンテナに関するファイルを準備する
JupyterHubコンテナを実行するために必要となるファイルを準備する Ansible Playbook を実行します。
Playbook では以下の処理を行います。
* コンテナイメージの取得
* ロゴファイルの配置
ここで取得するJupyterHubのコンテナイメージは[NII-cloud-operation/CoursewareHub-LC_platform](https://github.com/NII-cloud-operation/CoursewareHub-LC_platform)の `jupyterhub/`ディレクトリをビルドして [VCPのコンテナレジストリ](https://harbor.vcloud.nii.ac.jp/)に格納したものです。
まず、実際に設定を変更する前にドライラン(チェックモード)でAnsibleを実行します。
> ドライランではロゴファイルを配置するディレクトリが作成されないため、ファイルの配置でエラーとなりますがこの時点では問題ありません。
```
!ansible-playbook -l {target_hub} -CDv playbooks/setup-jupyterhub.yml || true
```
実際に設定変更を行います。
```
!ansible-playbook -l {target_hub} playbooks/setup-jupyterhub.yml
```
JupyterHubのコンテナイメージが取得できたことを確認します。
```
!ansible {target_hub} -a \
'docker images -f label=org.jupyter.service=jupyterhub'
```
JupyterHubコンテナのためにセットアップしたディレクトリの状態を確認します。
```
!ansible {target_hub} -a 'tree {{{{jupyterhub_dir}}}}'
```
### PostgreSQL
PostgreSQLコンテナに関するファイルを準備する Ansible Playbook を実行します。
この節で実行する Playbook では以下の処理を行います。
* PostgreSQLのデータを格納するディレクトリの作成
* 初期設定SQLファイルの配置
* コンテナイメージの取得
実際に設定を変更する前にドライラン(チェックモード)でAnsibleを実行します。
```
!ansible-playbook -l {target_hub} -CDv playbooks/setup-postgres.yml
```
実際に設定変更を行います。
```
!ansible-playbook -l {target_hub} playbooks/setup-postgres.yml
```
PostgreSQLコンテナのためにセットアップしたディレクトリの状態を確認します。
```
!ansible {target_hub} -b -a 'tree {{{{postgres_dir}}}}'
```
### single-user Jupyter Notebook server
各VCノードにsingle-user serverのコンテナイメージを取得します。
```
!ansible {ugroup_name} -a 'docker pull {{{{singleuser_image}}}}'
```
JupyterHubからコンテナとして起動する際に指定するタグ名をコンテナイメージに設定します。
```
!ansible {ugroup_name} -a \
'docker tag {{{{singleuser_image}}}} niicloudoperation/jupyterhub-singleuser'
```
各VCノードのコンテナイメージ一覧を確認します。
```
!ansible {ugroup_name} -m shell -a 'docker images | \
grep -e "niicloudoperation/jupyterhub-singleuser"'
```
## コンテナの起動
### docker-compose.yml の配置
複数のコンテナに関する設定をまとめて扱うために `docker-compose.yml` を利用します。ここまでに指定されたパラメータに応じた`docker-compose.yml`を生成し、構築環境に配置します。
まずは、チェックモードで確認を行います。
```
!ansible {target_hub} -CDv -m template \
-a 'src=template/docker-compose.yml dest={{{{base_dir}}}} backup=yes'
```
実際に`docker-compose.yml`の配置を行います。
```
!ansible {target_hub} -Dv -m template \
-a 'src=template/docker-compose.yml dest={{{{base_dir}}}} backup=yes'
```
### コンテナの起動
コンテナを起動します。
```
!ansible {target_hub} -a 'chdir={{{{base_dir}}}} \
docker stack deploy -c docker-compose.yml {{{{ugroup_name}}}}'
```
コンテナの起動状態を確認します。
```
!ansible {target_hub} -a 'docker stack ps {{{{ugroup_name}}}}'
```
全てのコンテナが起動するまで待ち合わせを行います。
> 次のセルの実行結果がエラーとなる場合は、その後のセルを実行してコンテナの実行状況やログを確認してください。
```
import time
import sys
out = !ansible {target_hub} -c local -a 'echo "{{{{vc_ipaddress}}}}"'
ip_addr = out[1]
for retry in range(18):
try:
!ansible {target_hub} -a \
'curl -s -k -I -f --resolve "{{{{master_fqdn}}}}:443:{ip_addr}" \
https://{master_fqdn}'
break
except RuntimeError:
print('retry', file=sys.stderr)
time.sleep(10)
else:
!ansible {target_hub} -a \
'curl -s -k -I -f --resolve "{{{{master_fqdn}}}}:443:{ip_addr}" \
https://{master_fqdn}'
```
起動後の状態を確認します。
> コンテナが起動に失敗し何度も再起動されていないことを確認してください。
```
!ansible {target_hub} -a 'docker stack ps {{{{ugroup_name}}}}'
```
`postgres` コンテナのログを表示してみます。
```
!ansible {target_hub} -a 'docker service logs {{{{ugroup_name}}}}_postgres'
```
`jupyterhub` コンテナのログを表示してみます。
> PostgreSQLに接続できないなどのエラーが表示されていないことを確認してください。
```
!ansible {target_hub} -a 'docker service logs {{{{ugroup_name}}}}_jupyterhub'
```
`auth-proxy` コンテナのログを表示してみます。
> 証明書設定に誤りがあるなどのエラーが表示されていないことを確認してください。
```
!ansible {target_hub} -a 'docker service logs {{{{ugroup_name}}}}_auth-proxy'
```
## 管理者の追加
### Systemユーザの作成
CoursewareHubではメールアドレスから一定のルールで導き出された名前を、ローカルユーザ名として利用します。管理ユーザのローカルユーザ名を確認します。
```
import hashlib
import re
def get_username_from_mail_address(mail_address):
# Convert to lower and remove characters except alphabetic
wk = mail_address.split('@')
local_part = wk[0].lower()
result = re.sub(r'[^a-zA-Z0-9]', '', local_part)
# Add top 6bytes of hash string
md5 = hashlib.md5()
md5.update(mail_address.encode('us-ascii'))
h = md5.hexdigest()[0:6]
result += 'x'
result += h
return result
teacher_id = get_username_from_mail_address(teacher_email)
print(teacher_id)
```
ホームディレクトリの親ディレクトリを作成します。
```
!ansible {target_hub} -b -m file -a 'path=/jupyter/users state=directory'
```
管理者のホームディレクトリを変数に設定しておきます。
```
teacher_homedir = f'/jupyter/users/{teacher_id}'
print(teacher_homedir)
```
`manager`ノードでユーザを作成します。
```
!ansible {target_hub} -b -m user -a 'name={teacher_id} home={teacher_homedir}'
```
ホームディレクトリが作成されていることを確認します。
```
!ansible {target_hub} -b -a 'ls -la {teacher_homedir}'
```
`worker`ノードにも同じ名前のユーザを作成します。ホームディレクトリはNFSになるので、`manager` のUID/GIDと同じ値でユーザを作成します。
まず、`manager` での UID/GID の値を確認します。
```
lines = !ansible {target_hub} -a 'id -u {teacher_id}'
teacher_uid = lines[1]
lines = !ansible {target_hub} -a 'id -g {teacher_id}'
teacher_gid = lines[1]
lines = !ansible {target_hub} -a 'id -g -n {teacher_id}'
teacher_group = lines[1]
(teacher_uid, teacher_gid, teacher_group)
```
`worker`ノードでグループを作成します。
> GID を指定するので、まずグループを作成します。
```
!ansible {target_nodes} -b -m group -a 'name={teacher_group} gid={teacher_gid}'
```
`worker`ノードでユーザを作成します。
```
!ansible {target_nodes} -b -m file \
-a 'path=/jupyter/users state=directory'
!ansible {target_nodes} -b -m user \
-a 'name={teacher_id} uid={teacher_uid} group={teacher_group} \
home={teacher_homedir}'
```
ユーザが作成されたことを確認します。
```
!ansible {target_hub} -a 'id {teacher_id}'
!ansible {target_nodes} -a 'id {teacher_id}'
```
### Prepare contents directory
コンテンツを格納するディレクトリ `info`, `textbook` を準備します。
```
for x in ['info', 'textbook']:
!ansible {target_hub} -b -m file \
-a 'path={teacher_homedir}/{x} state=directory \
owner={teacher_uid} group={teacher_group} mode=0777'
```
### Create SSH key and register
JupyterHubを構成するマシンへログインするための鍵の生成と登録を行います。
SSHの鍵ペアを作成します。
```
!ansible {target_hub} -b -a 'creates={teacher_homedir}/.ssh/id_rsa \
sudo -u {teacher_id} \
ssh-keygen -N "" -f {teacher_homedir}/.ssh/id_rsa'
```
鍵ファイルが作成されたことを確認します。
```
!ansible {target_hub} -b -m shell -a 'ls -l {teacher_homedir}/.ssh/id_rsa*'
```
作成した公開鍵を `authorized_keys` に登録します。まず、公開鍵の値を取得します。
```
lines = !ansible {target_hub} -b -a 'cat {teacher_homedir}/.ssh/id_rsa.pub'
pubkey = lines[1]
print(pubkey)
```
`authorized_keys`に登録します。
```
!ansible {target_hub} -b -m authorized_key -a 'user={teacher_id} key="{pubkey}"'
```
ユーザーのホームディレクトリに不適切なpermissionが設定されているとsshの鍵認証に失敗するので、妥当な値が設定されていることを保証しておきます。
```
!ansible {target_hub} -b -m file \
-a 'path={teacher_homedir} mode="0755" \
owner={teacher_id} group={teacher_group}'
```
### Grant sudo
JupyterHubを構成するマシン上でのsudo権限を与える設定ファイルを配置します。
事前のチェックを行います。
```
!ansible {target_hub} -CD -b -m lineinfile \
-a 'dest=/etc/sudoers.d/{teacher_id} create=yes\
line="{teacher_id} ALL=(ALL) NOPASSWD: ALL"'
```
実際に設定ファイルの配置を行います。
```
!ansible {target_hub} -b -m lineinfile \
-a 'dest=/etc/sudoers.d/{teacher_id} create=yes\
line="{teacher_id} ALL=(ALL) NOPASSWD: ALL"'
```
### Set ansible inventory
JupyterHubを構成するマシンを操作するためのインベントリを配布します。
まずは、インベントリ配布するための playbook をチェックモード実行します。
```
!ansible-playbook -CDv -l {target_hub} \
-e teacher_id={teacher_id} -e teacher_homedir={teacher_homedir} \
-e target_hub={target_hub} -e target_nodes={target_nodes} \
playbooks/deploy-inventory.yml \
|| true
```
実際にインベントリを構築環境の`~/ansible/inventory`に配布します。
```
!ansible-playbook -Dv -l {target_hub} \
-e teacher_id={teacher_id} -e teacher_homedir={teacher_homedir} \
-e target_hub={target_hub} -e target_nodes={target_nodes} \
playbooks/deploy-inventory.yml
```
### JupyterHubユーザの作成
初回のユーザー作成ではJupyterHub APIが使用できないので、直接DBを変更してユーザーを登録します。
まず、JupyterHubのユーザテーブルにユーザを追加するための SQL ファイルを作成します。
```
import random
from datetime import datetime
from tempfile import TemporaryDirectory
cookie_id = ''.join(random.choices("0123456789abcdef", k=32))
with TemporaryDirectory() as tmp_dir:
sql_file = Path(tmp_dir) / ('create_user_{:%Y%m%d_%H%M%S}.sql'.format(datetime.now()))
with sql_file.open(mode='w') as f:
f.write(f'''
INSERT INTO users (name, admin, cookie_id, last_activity)
VALUES ('{teacher_id}', TRUE, '{cookie_id}', '{datetime.now().isoformat()}');
''')
!cat {str(sql_file)}
!ansible {target_hub} -b -m copy -a 'src={str(sql_file)} dest=/jupyter/psql/init/'
```
PostgreSQLコンテナのコンテナIDと実行しているホストのIPアドレスを取得します。
```
lines = !ansible {target_hub} -b -a 'docker service ps {{{{ugroup_name}}}}_postgres -q'
sid = lines[1]
lines = !ansible {target_hub} -b -a \
'docker inspect --format "{{% raw %}} {{{{.NodeID}}}} {{{{.Status.ContainerStatus.ContainerID}}}} {{% endraw %}}" {sid}'
nodeid, cid = lines[1].split()
print(cid)
lines = !ansible {target_hub} -b -a \
'docker node inspect --format "{{% raw %}}{{{{.Status.Addr}}}} {{% endraw %}}" {nodeid}'
target_ip = lines[1].split()[0]
print(target_ip)
```
SQLファイルを実行します。
```
!ansible {target_ip} -b -a 'docker exec -i {cid} \
psql -d {{{{db_name}}}} -U {{{{db_user}}}} -f /docker-entrypoint-initdb.d/{sql_file.name}'
```
ユーザが登録されたことを確認します。
```
!ansible {target_ip} -a 'docker exec -i {cid} \
psql -d {{{{db_name}}}} -U {{{{db_user}}}} -c "SELECT * FROM users"'
```
ローカルユーザーのテーブルにも登録します。 SQLファイルを作成します。
```
from crypt import crypt
with TemporaryDirectory() as tmp_dir:
sql2_file = Path(tmp_dir) / ('create_localuser_{:%Y%m%d_%H%M%S}.sql'.format(datetime.now()))
with sql2_file.open(mode='w') as f:
f.write(f'''
INSERT INTO local_users VALUES(
nextval('local_users_id_seq'),
'{teacher_id}',
'{crypt(teacher_password)}',
'{teacher_email}'
);
''')
!ansible {target_hub} -b -m template -a 'src={str(sql2_file)} dest=/jupyter/psql/init/'
!ansible {target_hub} -b -a 'cat /jupyter/psql/init/{str(sql2_file.name)}'
```
SQLファイルを実行します。
```
!ansible {target_ip} -b -a 'docker exec -i {cid} \
psql -d {{{{db_name}}}} -U {{{{db_user}}}} -f /docker-entrypoint-initdb.d/{sql2_file.name}'
```
ユーザ登録のために一時的に作成したSQLファイルを削除します。
```
!ansible {target_hub} -b -m file -a \
'path=/jupyter/psql/init/{sql_file.name} state=absent'
!ansible {target_hub} -b -m file -a \
'path=/jupyter/psql/init/{sql2_file.name} state=absent'
```
## コンテンツの配備の準備
CoursewareHubのコンテンツを格納するディレクトリを作成し、コンテンツの配置を行うNotebookを管理者のホームディレクトリに配置します。
実際に設定を変更する前にドライラン(チェックモード)でAnsibleを実行します。
```
!ansible-playbook -l {target_hub} -CDv -e teacher_id={teacher_id} playbooks/manage-tools.yml
```
実際に設定変更を行います。
```
!ansible-playbook -l {target_hub} -e teacher_id={teacher_id} playbooks/manage-tools.yml
```
## SimpleSAMLphpの設定ファイルを更新する
構築環境のメタデータに設定されている技術的問い合わせ先などの情報を更新するために SimpleSAMLphpの設定ファイルを編集します。
まず、構築環境の auth-proxyコンテナからSimpleSAMLphpの`config.php`をローカル環境に取得します。
```
Path('./tmp').mkdir(exist_ok=True)
work_dir = Path(mkdtemp(dir='./tmp')).absolute()
simplesamlphp_config = work_dir / 'config.php'
!ansible-playbook -v -e simplesamlphp_config={simplesamlphp_config} -l {target_hub} \
playbooks/fetch-simplesaml-config.yml
```
次のセルの実行結果に表示されるリンクをクリックすることで、取得した `config.php`の内容を編集することができます。必要な項目を編集してください。技術的問い合わせ先を修正するには以下の項目を変更してください。
* `technicalcontact_name`
* `technicalcontact_email`
> ファイルの編集後にはCtrl-Sなどで編集結果を保存してください。
またSimpleSAMLphpの管理者パスワードを設定する以下の項目の設定を変更してください。
* `auth.adminpassword`
```
from notebook import notebookapp
from IPython.display import HTML
nb_conf = list(notebookapp.list_running_servers())[0]
p = Path(nb_conf['base_url']) / 'edit' / simplesamlphp_config.relative_to(nb_conf['notebook_dir'])
HTML(f'<a href={p} target="_blank">{p.name}</a>')
```
ローカル環境で編集した設定ファイル`config.php`を実行環境のauth-proxyコンテナに配置します。
```
!ansible-playbook -v -e simplesamlphp_config={simplesamlphp_config} -l {target_hub} \
playbooks/deploy-simplesaml-config.yml
```
次のセルを実行すると表示されるリンク先を表示するとメタデータが表示されます。技術的問い合わせ先などの値が更新されていることを確認してください。
```
print(f'https://{master_fqdn}/simplesaml/module.php/saml/sp/metadata.php/default-sp?output=xhtml')
```
作業ディレクトリを削除します。
```
!rm -rf {work_dir}
```
## CoursewareHubにアクセスする
構築環境にアクセスして、正しく動作していることを確認してください。
次のセルを実行すると、構築したCoursewareHubのアドレスを表示します。
```
print(f'https://{master_fqdn}')
```
このNotebookで管理者として登録したユーザのsingle-userサーバの環境には、CoursewareHubのコンテンツを配備するためのNotebook「CoursewareHubコンテンツの配備.ipynb」が用意されています。管理者としてCoursewareHubにログインした後に実行してください。
> ログイン画面に「学認フェデレーションへ」とのリンクが表示されていますが「331-CoursewareHubの登録を学認へ申請する.ipynb」を実行するまでは、学認IdPを利用したログインは行えません。
| github_jupyter |
# Post Training Quantization
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/tutorials/post_training_quant.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/tutorials/post_training_quant.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
## Overview
[TensorFlow Lite](https://www.tensorflow.org/lite/) now supports
converting weights to 8 bit precision as part of model conversion from
tensorflow graphdefs to TFLite's flat buffer format. Weight quantization
achieves a 4x reduction in the model size. In addition, TFLite supports on the
fly quantization and dequantization of activations to allow for:
1. Using quantized kernels for faster implementation when available.
2. Mixing of floating-point kernels with quantized kernels for different parts
of the graph.
Note that the activations are always stored in floating point. For ops that
support quantized kernels, the activations are quantized to 8 bits of precision
dynamically prior to processing and are de-quantized to float precision after
processing. Depending on the model being converted, this can give a speedup over
pure floating point computation.
In contrast to
[quantization aware training](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/quantize)
, the weights are quantized post training and the activations are quantized dynamically
at inference in this method.
Therefore, the model weights are not retrained to compensate for quantization
induced errors. It is important to check the accuracy of the quantized model to
ensure that the degradation is acceptable.
In this tutorial, we train an MNIST model from scratch, check its accuracy in
tensorflow and then convert the saved model into a Tensorflow Lite flatbuffer
with weight quantization. We finally check the
accuracy of the converted model and compare it to the original saved model. We
run the training script mnist.py from
[Tensorflow official mnist tutorial](https://github.com/tensorflow/models/tree/master/official/mnist).
## Building an MNIST model
### Setup
```
! pip uninstall -y tensorflow
! pip install -U tf-nightly
import tensorflow as tf
tf.enable_eager_execution()
! git clone --depth 1 https://github.com/tensorflow/models
import sys
import os
if sys.version_info.major >= 3:
import pathlib
else:
import pathlib2 as pathlib
# Add `models` to the python path.
models_path = os.path.join(os.getcwd(), "models")
sys.path.append(models_path)
```
### Train and export the model
```
saved_models_root = "/tmp/mnist_saved_model"
# The above path addition is not visible to subprocesses, add the path for the subprocess as well.
# Note: channels_last is required here or the conversion may fail.
!PYTHONPATH={models_path} python models/official/mnist/mnist.py --train_epochs=1 --export_dir {saved_models_root} --data_format=channels_last
```
For the example, we only trained the model for a single epoch, so it only trains to ~96% accuracy.
### Convert to a TFLite model
The `savedmodel` directory is named with a timestamp. Select the most recent one:
```
saved_model_dir = str(sorted(pathlib.Path(saved_models_root).glob("*"))[-1])
saved_model_dir
```
Using the python `TFLiteConverter`, the saved model can be converted into a TFLite model.
First load the model using the `TFLiteConverter`:
```
import tensorflow as tf
tf.enable_eager_execution()
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
```
Write it out to a tflite file:
```
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
```
To quantize the model on export, set the `post_training_quantize` flag:
```
# Note: If you don't have a recent tf-nightly installed, the
# "post_training_quantize" line will have no effect.
tf.logging.set_verbosity(tf.logging.INFO)
converter.post_training_quantize = True
tflite_quant_model = converter.convert()
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite"
tflite_model_quant_file.write_bytes(tflite_quant_model)
```
Note how the resulting file, with `post_training_quantize` set, is approximately `1/4` the size.
```
!ls -lh {tflite_models_dir}
```
## Run the TFLite models
We can run the TensorFlow Lite model using the python TensorFlow Lite
Interpreter.
### load the test data
First let's load the mnist test data to feed to it:
```
import numpy as np
mnist_train, mnist_test = tf.keras.datasets.mnist.load_data()
images, labels = tf.to_float(mnist_test[0])/255.0, mnist_test[1]
# Note: If you change the batch size, then use
# `tf.lite.Interpreter.resize_tensor_input` to also change it for
# the interpreter.
mnist_ds = tf.data.Dataset.from_tensor_slices((images, labels)).batch(1)
```
### Load the model into an interpreter
```
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
tf.logging.set_verbosity(tf.logging.DEBUG)
interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_quant_file))
interpreter_quant.allocate_tensors()
input_index = interpreter_quant.get_input_details()[0]["index"]
output_index = interpreter_quant.get_output_details()[0]["index"]
```
### Test the model on one image
```
for img, label in mnist_ds.take(1):
break
interpreter.set_tensor(input_index, img)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
import matplotlib.pylab as plt
plt.imshow(img[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(label[0].numpy()),
predict=str(predictions[0,0])))
plt.grid(False)
```
### Evaluate the models
```
def eval_model(interpreter, mnist_ds):
total_seen = 0
num_correct = 0
for img, label in mnist_ds:
total_seen += 1
interpreter.set_tensor(input_index, img)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
if predictions == label.numpy():
num_correct += 1
if total_seen % 500 == 0:
print("Accuracy after %i images: %f" %
(total_seen, float(num_correct) / float(total_seen)))
return float(num_correct) / float(total_seen)
print(eval_model(interpreter, mnist_ds))
```
We can repeat the evaluation on the weight quantized model to obtain:
```
print(eval_model(interpreter_quant, mnist_ds))
```
In this example, we have compressed model with no difference in the accuracy.
## Optimizing an existing model
We now consider another example. Resnets with pre-activation layers (Resnet-v2) are widely used for vision applications.
Pre-trained frozen graph for resnet-v2-101 is available at the
[Tensorflow Lite model repository](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/models.md).
We can convert the frozen graph to a TFLite flatbuffer with quantization by:
```
archive_path = tf.keras.utils.get_file("resnet_v2_101.tgz", "https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/resnet_v2_101.tgz", extract=True)
archive_path = pathlib.Path(archive_path)
archive_dir = str(archive_path.parent)
```
The `info.txt` file lists the input and output names. You can also find them using TensorBoard to visually inspect the graph.
```
! cat {archive_dir}/resnet_v2_101_299_info.txt
graph_def_file = pathlib.Path(archive_path).parent/"resnet_v2_101_299_frozen.pb"
input_arrays = ["input"]
output_arrays = ["output"]
converter = tf.lite.TFLiteConverter.from_frozen_graph(
str(graph_def_file), input_arrays, output_arrays, input_shapes={"input":[1,299,299,3]})
converter.post_training_quantize = True
resnet_tflite_file = graph_def_file.parent/"resnet_v2_101_quantized.tflite"
resnet_tflite_file.write_bytes(converter.convert())
!ls -lh {archive_dir}/*.tflite
```
The model size reduces from 171 MB to 43 MB.
The accuracy of this model on imagenet can be evaluated using the scripts provided for [TFLite accuracy measurement](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/tools/accuracy/ilsvrc).
The optimized model top-1 accuracy is 76.8, the same as the floating point model.
| github_jupyter |
# Lecture05 Logistic Regression
## Classification and Representation
To attempt classification, one method is to use linear regression and map all predictions greater than 0.5 as a 1 and all less than 0.5 as a 0. However, this method doesn't work well because classification is not actually a linear function.
**Logistic Regression Model**: $h_{\theta}(x) = g(\theta^Tx) = g(z) = \frac{1}{1+e^{-z}}$, where $g(z)$ is denoted as logistic function or sigmoid function.
* interpretation of hypothesis output: $h_{\theta}(x)$ = estimated probability that y=1 on input x.
* decision boundary: $y = 1 \Rightarrow h_{\theta}(x) \geq 0.5 \Rightarrow g(z) \geq 0.5 \Rightarrow \theta^Tx \geq 0.5$. Therefore, the descision boundary is the line that separates the area where y=0 and where y=1.
## Logistic Regression Model
Training Set: $(x^{(1)}, y^{(1)}), (x^{(2)}, y^{(2)}), \dots, (x^{(m)}, y^{(m)})$, m examples and x is a $(n+1) \times 1$ vector while $x_0 = 1$, $y \in \{0, 1\}$.
Since the $J(\theta) = Cost(h_{\theta}(x), y) = \frac{1}{2}(h_{\theta}(x) - y)^2$ is non-convex, the logistic regression model uses $Cost(h_{\theta}(x), y) = \left\{\begin{matrix} -log(h_{\theta}(x)) & if & y = 1 \\ -log(1 - h_{\theta}(x)) & if & y = 0 \end{matrix}\right.$ as the **cost function**.
* If our correct answer 'y' is 0, then the cost function will be 0 if our hypothesis function also outputs 0. If our hypothesis approaches 1, then the cost function will approach infinity.
* If our correct answer 'y' is 1, then the cost function will be 0 if our hypothesis function outputs 1. If our hypothesis approaches 0, then the cost function will approach infinity.
**Simplified cost function**: $Cost(h_{\theta}(x), y) = -y log(h_{\theta}(x)) - (1 - y) log(1 - h_{\theta}(x))$.
The $J(\theta) = \frac{1}{m}\sum^m_{i=1}Cost(h_{\theta}(x), y)$:
* to fit parameter $\theta$: $min_{\theta} J(\theta)$.
* to make prediction for given new x, output $h_{\theta}(x) = \frac{1}{1 + e^{-\theta^tx}}$.
**Gradient descent**: algorithm looks identical to linear regression.
**Optimization algorithm**: Conjugate gradient, BFGS, L-BFGS. They have some advantages which is no need to manually pick $\alpha$ and oftern faster than gradient descent, but they are more complex.
To use the library of the optimization algorithms, it needs to write a single function that returns both of these:
``` Octave
function [jVal, gradient] = costFunction(theta)
jVal = [...code to compute J(theta)...];
gradient = [...code to compute derivative of J(theta)...];
end
```
Then we can use octave's "fminunc()" optimization algorithm along with the "optimset()" function that creates an object containing the options we want to send to "fminunc()".
``` Octave
options = optimset('GradObj', 'on', 'MaxIter', 100);
initialTheta = zeros(2,1);
[optTheta, functionVal, exitFlag] = fminunc(@costFunction, initialTheta, options);
```
## Multiclass Classification: one-vs-all
Train a logistic regression classifier $h_{\theta}^i(x)$ for each class $i$ to predict the probability that $y=i$.
To make a prediction on a new $x$, pick the class $i$ that maximizes the $h_{\theta}(x)$.
| github_jupyter |
### X LINES OF PYTHON
# Physical units with `pint`
This notebook goes with [a blog post on the same subject](https://agilescientific.com/blog/2019/8/19/x-lines-of-python-physical-units).
Have you ever wished you could carry units around with your quantities — and have the computer figure out the best units and multipliers to use?
[`pint`](https://pint.readthedocs.io/en/0.9/tutorial.html) is a nince, compact library for doing just this, handling all your [dimensional analysis](https://en.wikipedia.org/wiki/Dimensional_analysis) needs. It can also detect units from strings. We can define our own units, it knows about multipliers (kilo, mega, etc), and it even works with `numpy` and `pandas`.
Install `pint` with `pip` or `conda`, e.g.
pip install pint
**NB** If you are running this on Google Colaboratory, you must uncomment these lines (delete the initial `#`) and run this first:
```
#!pip install pint
#!pip install git+https://github.com/hgrecco/pint-pandas#egg=Pint-Pandas-0.1.dev0
```
To use it in its typical mode, we import the library then instantiate a `UnitRegistry` object. The registry contains lots of physical units.
```
import pint
units = pint.UnitRegistry()
pint.__version__
```
## Attaching and printing units
```
thickness = 68 * units.m
thickness
```
In a Jupyter Notebook you see a 'pretty' version of the quantity. In the interpreter, you'll see something slightly different (the so-called `repr` of the class):
>>> thickness
<Quantity(68, 'meter')>
We can get at the magnitude, the units, and the dimensionality of this quantity:
```
thickness.magnitude, thickness.units, thickness.dimensionality
```
You can also use the following abbreviations for magnitude and units:
thickness.m, thickness.u
For printing, we can use Python's string formatting:
```
f'{thickness**2}'
```
But `pint` extends the string formatting options to include special options for `Quantity` objects. The most useful option is `P` for 'pretty', but there's also `L` for $\LaTeX$ and `H` for HTML. Adding a `~` (tilde) before the option tells `pint` to use unit abbreviations instead of the full names:
```
print(f'{thickness**2:P}')
print(f'{thickness**2:~P}')
print(f'{thickness**2:~L}')
print(f'{thickness**2:~H}')
```
## Doing maths
If we multiply by a scalar, `pint` produces the result you'd expect:
```
thickness * 2
```
Note that you must use units when you need them:
```
thickness + 10
# This is meant to produce an error...
```
Let's try defining an area of $60\ \mathrm{km}^2$, then multiplying it by our thickness. To make it more like a hydrocarbon volume, I'll also multiply by net:gross `n2g`, porosity `phi`, and saturation `sat`, all of which are dimensionless:
```
area = 60 * units.km**2
n2g = 0.5 * units.dimensionless # Optional dimensionless 'units'...
phi = 0.2 # ... but you can just do this.
sat = 0.7
volume = area * thickness * n2g * phi * sat
volume
```
We can convert to something more compact:
```
volume.to_compact()
```
Or be completely explicit about the units and multipliers we want:
```
volume.to('m**3') # Or use m^3
```
The `to_compact()` method can also take units, if you want to be more explicit; it applies multipliers automatically:
```
volume.to_compact('L')
```
Oil barrels are already defined (**careful**, they are abbreviated as `oil_bbl` not `bbl` — that's a 31.5 gallon barrel, about the same as a beer barrel).
```
volume.to_compact('oil_barrel')
```
If we use string formatting (see above), we can get pretty specific:
```
f"The volume is {volume.to_compact('oil_barrel'):~0.2fL}"
```
## Defining new units
`pint` defines hundreads of units ([here's the list](https://github.com/hgrecco/pint/blob/master/pint/default_en.txt)), and it knows about tonnes of oil equivalent... but it doesn't know about barrels of oil equivalent ([for more on conversion to BOE](https://en.wikipedia.org/wiki/Barrel_of_oil_equivalent)). So let's define a custom unit, using the USGS's conversion factor:
```
units.define('barrel_of_oil_equivalent = 6000 ft**3 = boe')
```
Let's suspend reality for a moment and imagine we now want to compute our gross rock volume in BOEs...
```
volume.to('boe')
volume.to_compact('boe')
```
## Getting units from strings
`pint` can also parse strings and attempt to convert them to `Quantity` instances:
```
units('2.34 km')
```
This looks useful! Let's try something less nicely formatted.
```
units('2.34*10^3 km')
units('-12,000.ft')
units('3.2 m')
```
You can also use the `Quantity` constructor, like this:
>>> qty = pint.Quantity
>>> qty('2.34 km')
2.34 kilometer
But the `UnitRegistry` seems to do the same things and might be more convenient.
## `pint` with `uncertainties`
Conveniently, `pint` works well with [`uncertainties`](https://pythonhosted.org/uncertainties/). Maybe I'll do an _X lines_ on that package in the future. Install it with `conda` or `pip`, e.g.
pip install uncertainties
```
from uncertainties import ufloat
area = ufloat(64, 5) * units.km**2 # 64 +/- 5 km**2
(thickness * area).to('Goil_bbl')
```
## `pint` with `numpy`
`pint` works fine with NumPy arrays:
```
import numpy as np
vp = np.array([2300, 2400, 2550, 3200]) * units.m/units.s
rho = np.array([2400, 2550, 2500, 2650]) * units.kg/units.m**3
z = vp * rho
z
```
For some reason, this sometimes doesn't render properly. But we can always do this:
```
print(z)
```
As expected, the magnitude of this quantity is just a NumPy array:
```
z.m
```
## `pint` with `pandas`
**Note** that this functionality is fairly new and is still settling down. YMMV.
To use `pint` (version 0.9 and later) with `pandas` (version 0.24.2 works; 0.25.0 does not work at the time of writing), we must first install `pintpandas`, which must be done from source; [get the code from GitHub](https://github.com/hgrecco/pint-pandas). Here's how I do it:
cd pint-pandas
python setup.py sdist
pip install dist/Pint-Pandas-0.1.dev0.tar.gz
You could also do:
pip install git+https://github.com/hgrecco/pint-pandas#egg=Pint-Pandas-0.1.dev0
Once you have done that, the following should evaluate to `True`:
```
pint._HAS_PINTPANDAS
```
To use this integration, we pass special `pint` data types to the `pd.Series()` object:
```
import pandas as pd
df = pd.DataFrame({
"Vp": pd.Series(vp.m, dtype="pint[m/s]"),
"Vs": pd.Series([1200, 1200, 1250, 1300], dtype="pint[m/s]"),
"rho": pd.Series(rho.m, dtype="pint[kg/m**3]"),
})
df
import bruges as bg
df['E'] = bg.rockphysics.moduli.youngs(df.Vp, df.Vs, df.rho)
df.E
```
We can't convert the units of a whole `Series` but we can do one:
```
df.loc[0, 'E'].to('GPa')
```
So to convert a whole series, we can use `Series.apply()`:
```
df.E.apply(lambda x: x.to('GPa'))
```
## Bonus: dataframe display with units
We *could* subclass dataframes to tweak their `_repr_html_()` method, which would allow us to make units show up in the Notebook representation of the dataframe...
```
class UnitDataFrame(pd.DataFrame):
def _repr_html_(self):
"""New repr for Jupyter Notebook."""
html = super()._repr_html_() # Get the old repr string.
units = [''] + [f"{dtype.units:~H}" for dtype in self.dtypes]
style = "text-align: right; color: gray;"
new = f'<tr style="{style}"><th>' + "</th><th>".join(units) + "</th></tr></thead>"
return html.replace('</thead>', new)
df = UnitDataFrame({
"Vp": pd.Series(vp.m, dtype="pint[m/s]"),
"Vs": pd.Series([1200, 1200, 1250, 1300], dtype="pint[m/s]"),
"rho": pd.Series(rho.m, dtype="pint[kg/m**3]"),
})
df
```
Cute.
----
© Agile Scientific 2019, licensed CC-BY
| github_jupyter |
(PTDN)=
# 1.6 Polinomios de Taylor y diferenciación numérica
```{admonition} Notas para contenedor de docker:
Comando de docker para ejecución de la nota de forma local:
nota: cambiar `<ruta a mi directorio>` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker y `<versión imagen de docker>` por la versión más actualizada que se presenta en la documentación.
`docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_optimizacion -p 8888:8888 -d palmoreck/jupyterlab_optimizacion:<versión imagen de docker>`
password para jupyterlab: `qwerty`
Detener el contenedor de docker:
`docker stop jupyterlab_optimizacion`
Documentación de la imagen de docker `palmoreck/jupyterlab_optimizacion:<versión imagen de docker>` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/optimizacion).
```
---
Nota generada a partir de la [liga1](https://www.dropbox.com/s/jfrxanjls8kndjp/Diferenciacion_e_Integracion.pdf?dl=0), [liga2](https://www.dropbox.com/s/mmd1uzvwhdwsyiu/4.3.2.Teoria_de_convexidad_Funciones_convexas.pdf?dl=0) e inicio de [liga3](https://www.dropbox.com/s/ko86cce1olbtsbk/4.3.1.Teoria_de_convexidad_Conjuntos_convexos.pdf?dl=0).
```{admonition} Al final de esta nota el y la lectora:
:class: tip
* Aprenderá que el método de diferenciación finita es un método inestable numéricamente respecto al redondeo.
* Conocerá las expresiones de los polinomios de Taylor para funciones de varias variables.
* Tendrá en su lista de programas del lenguaje *R* implementaciones para aproximar al gradiente y a la Hessiana de una función con los métodos de diferenciación finita.
* Utilizará las fórmulas para calcular errores absolutos y relativos para valores y vectores revisadas en {ref}`fórmulas para calcular errores absolutos y relativos <FORERRABSERRREL>`
```
## Problema: ¿Cómo aproximar una función $f$ en un punto $x_1$?
Si $f$ es continuamente diferenciable en $x_0$ y $f^{(1)}, f^{(2)}$ existen y están acotadas en $x_0$ entonces:
$$f(x_1) \approx f(x_0) + f^{(1)}(x_0)(x_1-x_0)$$
y se nombra **aproximación de orden 1**. Ver {ref}`Definición de función, continuidad y derivada <FCD>` para definición de continuidad, diferenciabilidad y propiedades.
```{admonition} Comentarios
* Lo anterior requiere de los valores: $x_0, x_1, f(x_0), f^{(1)}(x_0)$. Esta aproximación tiene un error de **orden** $2$ pues su error es **proporcional** al cuadrado del ancho del intervalo: $h=x_1-x_0$, esto es, si reducimos a la mitad $h$ entonces el error se reduce en una cuarta parte.
* Otra aproximación más simple sería:
$$f(x_1) \approx f(x_0)$$
lo cual sólo requiere del conocimiento de $f(x_0)$ y se nombra aproximación de **orden** $0$, sin embargo esta aproximación tiene un error de **orden** $1$ pues este es proporcional a $h$ , esto es, al reducir a la mitad $h$ se reduce a la mitad el error.
* Los errores anteriores los nombramos errores por **truncamiento**, ver {ref}`Fuentes del error <FuenErr>` y {ref}`Análisis del error <AnErr>` para un recordatorio de tal error. Utilizamos la notación "O grande" $\mathcal{O}(\cdot)$ para escribir lo anterior:
$$f(x)-f(x_0)=\mathcal{O}(h)$$
con la variable $h=x-x_0$. En este caso se representa a un error de orden $1$. Análogamente:
$$f(x)-(f(x_0)+f^{(1)}(x_0)(x-x_0)) = \mathcal{O}(h^2)$$
y se representa un error de orden $2$.
```
```{admonition} Observaciones
:class: tip
* No confundir órdenes de una aproximación con órdenes de error.
* Otras aproximaciones a una función se pueden realizar con:
* Interpoladores polinomiales (representación por Vandermonde, Newton, Lagrange).
```
## Aproximación a una función por el teorema de Taylor
En esta sección se presenta el teorema de Taylor, el cual, bajo ciertas hipótesis nos proporciona una expansión de una función alrededor de un punto. Este teorema será utilizado en **diferenciación e integración numérica**. El teorema es el siguiente:
```{admonition} Teorema de Taylor
Sea $f: \mathbb{R} \rightarrow \mathbb{R}$, $f \in \mathcal{C}^n([a,b])$ tal que $f^{(n+1)}$ existe en [a,b]. Si $x_0 \in [a,b]$ entonces $\forall x \in [a,b]$ se tiene: $f(x) = P_n(x) + R_n(x)$ donde:
$$P_n(x) = \displaystyle \sum_{k=0}^n \frac{f^{(k)}(x_0)(x-x_0)^k}{k!} \quad (f^{(0)} = f)$$
$$R_n(x) = \frac{f^{(n+1)}(\xi_x)(x-x_0)^{(n+1)}}{(n+1)!}$$
con $\xi_x$ entre $x_0, x$ y $x_0$ se llama centro. Ver {ref}`Definición de función, continuidad y derivada <FCD>` para definición del conjunto $\mathcal{C}^n([a,b])$.
```
```{admonition} Comentarios
* El teorema de Taylor nos indica que cualquier función suave (función en $\mathcal{C}^n)$ se le puede aproximar por un polinomio en el intervalo $[a,b]$, de hecho $f(x) \approx P_n(x)$.
* Si el residuo no tiene una alta contribución a la suma $P_n(x) + R_n(x)$ entonces es una buena aproximación **local** (alta contribución y buena aproximación depende de factores como elección de la norma y la aplicación).
* El teorema de Taylor es una generalización del [teorema del valor medio para derivadas](https://en.wikipedia.org/wiki/Mean_value_theorem).
* $P_n(x)$ se le llama polinomio de Taylor alrededor de $x_0$ de orden $n$ y $R_n(x)$ es llamado residuo de Taylor alrededor de $x_0$ de orden $n+1$, tiene otras expresiones para representarlo y la que se utiliza en el enunciado anterior es en su forma de Lagrange (ver [liga](https://en.wikipedia.org/wiki/Taylor%27s_theorem) para otras expresiones del residuo).
* $\xi_x$ es un punto entre $x_0, x$ desconocido y está en función de $x$ (por eso se le escribe un subíndice).
* Una forma del teorema de Taylor es escribirlo definiendo a la variable $h=x-x_0$:
$$f(x) = f(x_0+h) = P_n(h) + R_n(h) = \displaystyle \sum_{k=0}^n \frac{f^{(k)}(x_0)h^k}{k!} + \frac{f^{(n+1)}(\xi_h)h^{n+1}}{(n+1)!}$$
y si $f^{(n+1)}$ es acotada, escribimos: $R_n(h) = \mathcal{O}(h^{n+1})$.
```
### Ejemplo
Graficar la función y los polinomios de Taylor de grados $0,1,2,3$ y $4$ en una sola gráfica para el intervalo $[1,2]$ de la función $\frac{1}{x}$ con centro en $x_0=1.5$. ¿Cuánto es la aproximación de los polinomios en x=1.9?. Calcula el error relativo de tus aproximaciones.
**Solución**
Obtengamos los polinomios de Taylor de orden $n$ con $n \in \{0,1,2, 3\}$ y centro en $x_0=1.5$ para la función $\frac{1}{x}$ en el intervalo $[1,2]$. Los primeros tres polinomios de Taylor son:
$$P_0(x) = f(x_0) = \frac{2}{3} \quad \text{(constante)}$$
$$P_1(x) = f(x_0) + f^{(1)}(x_0)(x-x_0) = \frac{2}{3} - \frac{1}{x_0^2}(x-x_0) =\frac{2}{3} - \frac{1}{1.5^2}(x-1.5) \quad \text{(lineal)}$$
$$
\begin{eqnarray}
P_2(x) &=& f(x_0) + f^{(1)}(x_0)(x-x_0) + \frac{f^{(2)}(x_0)(x-x_0)^2}{2} \nonumber \\
&=& \frac{2}{3} - \frac{1}{x_0^2}(x-x_0) + \frac{1}{x_0^3}(x-x_0)^2 \nonumber \\
&=& \frac{2}{3} -\frac{1}{1.5^2}(x-1.5) + \frac{1}{1.5^3}(x-1.5)^2 \quad \text{(cuadrático)} \nonumber
\end{eqnarray}
$$
```
library(ggplot2)
options(repr.plot.width=6, repr.plot.height=6) #esta línea sólo se ejecuta para jupyterlab con R
Taylor_approx <- function(x,c,n){
'
Taylor approximation for 1/x function. Will return Taylor polynomial of degree n with
center in c and evaluated in x.
Args:
x (double): numeric vector or scalar in which Taylor polynomial will be evaluated.
c (double): scalar which represents center of Taylor polynomial of degree n.
n (integer): scalar which represents degree of Taylor polynomial.
Returns:
sum (double): scalar evaluation of Taylor polynomial of degree n with center c in x.
'
length_x <- length(x)
sum <- vector("double", length_x)
for(j in 1:length_x){
mult <- c^(-1)
sum[j] <- mult
for(k in 1:n){
mult <- -1*c^(-1)*(x[j]-c)*mult
sum[j] <- sum[j] + mult
}
}
sum #accumulated sum
}
x0 <- 1.5
x <- seq(from=1,to=2,by=.005)
n <- c(0,1,2,3,4) #degrees of Taylor polynomials
f <- function(z)1/z
y <- f(x)
y_Taylor_0 <- f(x0)*(vector("double", length(x))+1)
y_Taylor_1 <- Taylor_approx(x,x0,1)
y_Taylor_2 <- Taylor_approx(x,x0,2)
y_Taylor_3 <- Taylor_approx(x,x0,3)
y_Taylor_4 <- Taylor_approx(x,x0,4)
gg <- ggplot()
print(gg+
geom_line(aes(x=x,y=y,color='f(x)')) +
geom_line(aes(x=x,y=y_Taylor_0,color='constante'))+
geom_line(aes(x=x,y=y_Taylor_1,color='lineal')) +
geom_line(aes(x=x,y=y_Taylor_2,color='grado 2')) +
geom_line(aes(x=x,y=y_Taylor_3,color='grado 3')) +
geom_line(aes(x=x,y=y_Taylor_4,color='grado 4')) +
geom_point(aes(x=x0, y=f(x0)), color='blue',size=3))
```
```{admonition} Observación
:class: tip
Para cualquier aproximación calculada siempre es una muy buena idea reportar el error relativo de la aproximación si tenemos el valor del objetivo. No olvidar esto :)
```
**Para el cálculo del error utilizamos {ref}`fórmulas para calcular errores absolutos y relativos <FORERRABSERRREL>`:**
$$\text{ErrRel(aprox)} = \frac{|\text{aprox}-\text{obj}|}{|\text{obj}|}$$
**La siguiente función calcula un error relativo para un valor `obj`:**
```
compute_error_point_wise<-function(obj, approx){
'
Relative or absolute error between approx and obj in a point wise fashion.
'
if (abs(obj) > .Machine$double.eps*.Machine$double.xmin){
Err<-abs(obj-approx)/abs(obj)
}else
Err<-abs(obj-approx)
Err
}
x_test_point <- 1.9
objective <- f(x_test_point)
#Approximations
p1_approx <- Taylor_approx(x_test_point, x0, 1)
p2_approx <- Taylor_approx(x_test_point, x0, 2)
p3_approx <- Taylor_approx(x_test_point, x0, 3)
p4_approx <- Taylor_approx(x_test_point, x0, 4)
print('error relativo polinomio constante')
print(compute_error_point_wise(objective, 1/x0))
print('error relativo polinomio lineal')
print(compute_error_point_wise(objective, p1_approx))
print('error relativo polinomio grado 2')
print(compute_error_point_wise(objective, p2_approx))
print('error relativo polinomio grado 3')
print(compute_error_point_wise(objective, p3_approx))
print('error relativo polinomio grado 4')
print(compute_error_point_wise(objective, p4_approx))
```
```{admonition} Ejercicio
:class: tip
Aproximar $f(1)$ con polinomios de Taylor de orden $0,1,2,3,4$ si $f(x)=-0.1x^4-0.15x^3-0.5x^2-0.25x+1.2$ con centro en $x0=0$. Calcula los errores relativos de tus aproximaciones. Realiza las gráficas de cada polinomio en el intervalo $[0,1]$ con `ggplot2`. Observa que $R_5(x)$ es cero.
```
(TEOTAYLORNVARIABLES)=
## Teorema de Taylor para una función $f: \mathbb{R}^n \rightarrow \mathbb{R}$
Sea $f: \mathbb{R}^n \rightarrow \mathbb{R}$ diferenciable en $\text{dom}f$. Si $x_0, x \in \text{dom}f$ y $x_0+t(x-x_0) \in \text{dom}f, \forall t \in (0,1),$ entonces $\forall x \in \text{dom}f$ se tiene $f(x) = P_0(x) + R_0(x)$ donde:
$$P_0(x) = f(x_0)$$
$$R_0(x) = \nabla f(x_0+t_x(x-x_0))^T(x-x_0)$$
para alguna $t_x \in (0,1)$ y $\nabla f(\cdot)$ gradiente de $f$, ver {ref}`Definición de función, continuidad y derivada <FCD>` para definición del gradiente de una función.
```{admonition} Observación
:class: tip
La aproximación anterior la nombramos **aproximación de orden $0$** para $f$ con centro en $x_0$. Si $\nabla f(\cdot)$ es acotado en $\text{dom}f$ entonces se escribe: $R_0(x)=\mathcal{O}(||x-x_0||)$.
```
Si además $f$ es continuamente diferenciable en $\text{dom}f$(su derivada es continua, ver {ref}`Definición de función, continuidad y derivada <FCD>` para definición de continuidad), $f^{(2)}$ existe en $\text{dom}f$, se tiene $f(x) = P_1(x) + R_1(x)$ donde:
$$P_1(x) = f(x_0) + \nabla f(x_0)^T(x-x_0)$$
$$R_1(x) = \frac{1}{2}(x-x_0)^T \nabla ^2f(x_0+t_x(x-x_0))(x-x_0)$$
para alguna $t_x \in (0,1)$ y $\nabla^2 f(\cdot)$ Hessiana de $f$ (ver {ref}`Definición de función, continuidad y derivada <FCD>` para definición de la matriz Hessiana).
```{admonition} Observación
:class: tip
La aproximación anterior la nombramos **aproximación de orden $1$** para $f$ con centro en $x_0$. Si $\nabla^2f(\cdot)$ es acotada en $\text{dom}f$ entonces se escribe: $R_1(x) = \mathcal{O}(||x-x_0||^2)$.
```
Si $f^{(2)}$ es continuamente diferenciable y $f^{(3)}$ existe y es acotada en $\text{dom}f$, se tiene $f(x)=P_2(x) + R_2(x)$ donde:
$$P_2(x) = f(x_0) + \nabla f(x_0)^T(x-x_0) + \frac{1}{2}(x-x_0)^T \nabla ^2f(x_0)(x-x_0)$$
```{admonition} Observación
:class: tip
* La aproximación anterior la nombramos **aproximación de orden $2$** para $f$ con centro en $x_0$. Para las suposiciones establecidas se tiene:
$$R_2(x)= \mathcal{O}(||x-x_0||^3).$$
* En este caso $f^{(3)}$ es un tensor.
```
```{admonition} Comentario
Tomando $h=x-x_0$, se reescribe el teorema como sigue, por ejemplo para la aproximación de orden $1$ incluyendo su residuo:
$$f(x) = f(x_0 + h) = \underbrace{f(x_0) + \nabla f(x_0)^Th}_{\textstyle P_1(h)} + \underbrace{\frac{1}{2}h^T \nabla ^2f(x_0+t_xh)h}_{\textstyle R_1(h)}.$$
Si $f^{(2)}$ es acotada en $\text{dom}f$, escribimos: $R_1(h)=\mathcal{O}(||h||^2)$.
```
(DIFNUMDIFFINITAS)=
## Diferenciación numérica por diferencias finitas
```{admonition} Comentario
En esta sección se revisan métodos numéricos para aproximar las derivadas. Otros métodos para el cálculo de las derivadas se realizan con el cómputo simbólico o algebraico, ver {ref}` Definición de función, continuidad y derivada <FCD>` para ejemplos.
```
Las fórmulas de aproximación a las derivadas por diferencias finitas pueden obtenerse con los polinomios de Taylor, presentes en el teorema del mismo autor, por ejemplo:
Sea $f \in \mathcal{C}^1([a,b])$ y $f^{(2)}$ existe y está acotada $\forall x \in [a,b]$ entonces, si $x+h \in [a,b]$ con $h>0$ por el teorema de Taylor:
$$f(x+h) = f(x) + f^{(1)}(x)h + f^{(2)}(\xi_{x+h})\frac{h^2}{2}$$
con $\xi_{x+h} \in [x,x+h]$ y al despejar $f^{(1)}(x)$ se tiene:
$$f^{(1)}(x) = \frac{f(x+h)-f(x)}{h} - f^{(2)}(\xi_{x+h})\frac{h}{2}.$$
y escribimos:
$$f^{(1)}(x) = \frac{f(x+h)-f(x)}{h} + \mathcal{O}(h).$$
La aproximación $\frac{f(x+h)-f(x)}{h}$ es una fórmula por diferencias hacia delante con error de orden $1$. Gráficamente se tiene:
<img src="https://dl.dropboxusercontent.com/s/r1ypkxkwa9g3pmk/dif_hacia_delante.png?dl=0" heigth="500" width="500">
Con las mismas suposiciones es posible obtener la fórmula para la aproximación por diferencias hacia atrás:
$$f^{(1)}(x) = \frac{f(x)-f(x-h)}{h} + \mathcal{O}(h), h >0.$$
<img src="https://dl.dropboxusercontent.com/s/mxmc8kohurlu9sp/dif_hacia_atras.png?dl=0" heigth="500" width="500">
Considerando $f \in \mathcal{C}^2([a,b]), f^{(3)}$ existe y está acotada $\forall x \in [a,b]$ si $x-h, x+h \in [a,b]$ y $h>0$ entonces:
$$f^{(1)}(x) = \frac{f(x+h)-f(x-h)}{2h} + \mathcal{O}(h^2), h >0.$$
y el cociente $\frac{f(x+h)-f(x-h)}{2h}$ es la aproximación por diferencias centradas con error de orden $2$. Gráficamente:
<img src="https://dl.dropboxusercontent.com/s/jwi0y1t5z58pydz/dif_centradas.png?dl=0" heigth="500" width="500">
```{admonition} Observaciones
:class: tip
* La aproximación por diferencias finitas a la primer derivada de la función tiene un error de orden $\mathcal{O}(h)$ por lo que una elección de $h$ igual a $.1 = 10^{-1}$ generará aproximaciones con alrededor de un dígito correcto.
* La diferenciación numérica por diferencias finitas **no es un proceso con una alta exactitud** pues los problemas del redondeo de la aritmética en la máquina se hacen presentes en el mismo (ver nota {ref}`Sistema de punto flotante <SPF>`). Como ejemplo de esta situación realicemos el siguiente ejemplo.
```
### Ejemplo
Realizar una gráfica de log(error relativo) vs log(h) (h en el eje horizontal) para aproximar la primera derivada de $f(x)=e^{-x}$ en $x=1$ con $h \in \{10^{-16}, 10^{-14}, \dots , 10^{-1}\}$ y diferencias hacia delante. Valor a aproximar: $f^{(1)}(1) = -e^{-1}$.
**Definimos la función**
```
f <- function(x){
exp(-x)
}
```
**Definimos la aproximación numérica por diferencias finitas a la primera derivada**
```
approx_first_derivative <- function(f, x, h){
'
Numerical differentiation by finite differences. Uses forward point formula
to approximate first derivative of function.
Args:
f (function): function definition.
x (float): point where first derivative will be approximated
h (float): step size for forward differences. Tipically less than 1
Returns:
res (float): approximation to first_derivative.
'
res <- (f(x+h)-f(x))/h
res
}
```
**Puntos donde se evaluará la aproximación:**
```
x<-1
h<-10^(-1*(1:16))
```
**Aproximación numérica:**
```
approx_df <- approx_first_derivative(f,x,h)
```
**Derivada de la función:**
```
df<-function(x){
-exp(-x)
}
obj_df <- df(x)
```
**Cálculo de errores:**
```
res_relative_error <- compute_error_point_wise(obj_df, approx_df)
```
**Gráfica:**
```
gf <- ggplot()
print(gf+
geom_line(aes(x=log(h),y=log(res_relative_error)))+
ggtitle('Aproximación a la primera derivada por diferencias finitas'))
```
```{admonition} Ejercicio
:class: tip
Realizar una gráfica de log(error relativo) vs log(h) (h en el eje horizontal) con `ggplot2` para aproximar la segunda derivada de $f(x)=e^{-x}$ en $x=1$ con $h \in \{10^{-16}, 10^{-14}, \dots , 10^{-1}\}$ y diferencias hacia delante. Valor a aproximar: $f^{(2)}(1) = e^{-1}$. Usar:
$$\frac{d^2f(x)}{dx} = \frac{f(x+2h)-2f(x+h)+f(x)}{h^2} + \mathcal{O}(h)$$
Encontrar valor(es) de $h$ que minimiza(n) al error absoluto y relativo.
```
```{admonition} Comentario
Aproximaciones a la segunda derivada de una función $f: \mathbb{R} \rightarrow \mathbb{R}$ se pueden obtener con las fórmulas:
* $\frac{d^2f(x)}{dx} = \frac{f(x+2h)-2f(x+h)+f(x)}{h^2} + \mathcal{O}(h)$ por diferencias hacia delante.
* $\frac{d^2f(x)}{dx} = \frac{f(x)-2f(x-h)+f(x-2h)}{h^2} + \mathcal{O}(h)$ por diferencias hacia atrás.
* $\frac{d^2f(x)}{dx} = \frac{f(x+h)-2f(x)+f(x-h)}{h^2} + \mathcal{O}(h^2)$ por diferencias centradas.
Estas fórmulas se obtienen con el teorema de Taylor bajo las suposiciones correctas.
```
## Análisis del error por redondeo y truncamiento en la aproximación por diferencias finitas hacia delante
El ejemplo anterior muestra (vía una gráfica) que el método numérico de diferenciación numérica **no es estable numéricamente respecto al redondeo** (ver nota {ref}`Condición de un problema y estabilidad de un algoritmo <CPEA>` para definición de estabilidad de un algoritmo) y también se puede corroborar realizando un análisis del error. En esta sección consideramos la aproximación a la primer derivada por diferencias finitas hacia delante:
$$\frac{f(x+h)-f(x)}{h}$$
Suponemos que $\hat{f}(x)$ aproxima a $f(x)$ y por errores de redondeo $\hat{f}(x) = f(x)(1 + \epsilon_{f(x)})$ con $|\epsilon_{f(x)}| \leq \epsilon_{maq}$ error de redondeo al evaluar $f$ en $x$. $\hat{f}(x)$ es la aproximación en un SPFN (ver nota {ref}`Sistema de punto flotante <SPF>`). Además supóngase que $x,x+h,h \in \mathcal{Fl}$ . Entonces en la aproximación a la primer derivada por diferencias hacia delante:
$f^{(1)}(x) = \frac{f(x+h)-f(x)}{h} + \mathcal{O}(h)$ y calculando el error absoluto:
$$
\begin{eqnarray}
\text{ErrAbs}\left ( \frac{\hat{f}(x+h)-\hat{f}(x)}{h} \right ) &=& \left |f^{(1)}(x) - \frac{\hat{f}(x+h)-\hat{f}(x)}{h} \right | \nonumber \\
&=& \left | \frac{f(x+h)-f(x)}{h} + \mathcal{O}(h) - \left ( \frac{f(x+h)(1+\epsilon_{f(x+h)})-f(x)(1+\epsilon_{f(x)})}{h} \right ) \right | \nonumber \\
&=& \left |\mathcal{O}(h) - \frac{f(x+h)\epsilon_{f(x+h)}-f(x)\epsilon_{f(x)}}{h} \right | \nonumber \\
&\leq& \mathcal{O}(h) + \frac{C\epsilon_{maq}}{h}
\end{eqnarray}
$$
suponiendo en el último paso que $|f(x+h)\epsilon_{f(x+h)} - f(x)\epsilon_{f(x)}| \leq C \epsilon_{maq}$ con $C>0$ constante que acota a la función $f$ en el intervalo $[a,b]$. Obsérvese que $\frac{\hat{f}(x+h)-\hat{f}(x)}{h}$ es la aproximación a la primer derivada por diferencias hacia delante que se obtiene en la computadora, por lo que la cantidad $\left |f^{(1)}(x) - \frac{\hat{f}(x+h)-\hat{f}(x)}{h} \right |$ es el error absoluto de tal aproximación.
El error relativo es:
$$\text{ErrRel}\left (\frac{\hat{f}(x+h)-\hat{f}(x)}{h} \right) = \frac{\text{ErrAbs}\left ( \frac{\hat{f}(x+h)-\hat{f}(x)}{h} \right )}{|f^{(1)}(x)|} \leq \frac{\mathcal{O}(h) + \frac{C\epsilon_{maq}}{h}}{|f^{(1)}(x)|} = K_1h + K_2\frac{1}{h}$$
con $K_1, K_2 >0$ constantes.
Entonces la función $g(h) = \mathcal{O}(h) + \mathcal{O}\left (\frac{1}{h} \right)$ acota al error absoluto y al error relativo y se tiene:
* Si $h \rightarrow 0$ la componente $\mathcal{O} \left ( \frac{1}{h} \right )$ domina a la componente $\mathcal{O}(h)$, la cual tiende a $0$.
* Si $h \rightarrow \infty$ la componente $\mathcal{O}(h)$ domina a $\mathcal{O} \left ( \frac{1}{h} \right )$, la cual tiende a $0$.
Por lo anterior, existe un valor de $h$ que minimiza a los errores. Tal valor se observa en las gráficas anteriores y es igual a:
```
print(h[which.min(res_relative_error)])
```
```{admonition} Ejercicio
:class: tip
Obtener de forma analítica el valor de $h$ que minimiza la función $g(h)$ anterior. Tip: utilizar criterio de primera y segunda derivada para encontrar mínimo global.
```
### Conclusiones y comentarios
* La componente $\mathcal{O}(h)$ es el error por truncamiento, la cual resulta del teorema de Taylor. El teorema de Taylor nos indica que añadir términos en el polinomio de Taylor si la $x$ a aproximar es cercana al centro, las derivadas de $f$ son acotadas y $h \rightarrow 0$ entonces el error por truncamiento debe tender a $0$. Lo anterior no ocurre en la implementación numérica (corroborado de forma analítica y visual) del método por diferenciación numérica para la primer derivada por la presencia de la componente $\mathcal{O}\left(\frac{1}{h}\right)$ en los errores. Tal componente proviene del error por redondeo.
* Obsérvese que el error relativo máximo es del $100\%$ lo que indica que no se tiene ninguna cifra correcta en la aproximación:
```
print(max(res_relative_error))
```
y esto ocurre para un valor de $h$ igual a:
```
print(h[which.max(res_relative_error)])
```
```{admonition} Pregunta
:class: tip
¿Por qué se alcanza el máximo error relativo en el valor de $h=10^{-16}$?.
```
* Con lo anterior se tiene que la diferenciación numérica es un método **inestable numéricamente respecto al redondeo**. Ver nota {ref}`Condición de un problema y estabilidad de un algoritmo <CPEA>`.
* Un análisis de error similar se utiliza para el método de diferencias finitas por diferencias centradas para aproximar la primera derivada. En este caso el valor de $h$ que minimiza a los errores es del orden $h^* = 10^{-6}$.
## Diferenciación numérica para una función $f: \mathbb{R}^n \rightarrow \mathbb{R}$
Supongamos $f$ es dos veces diferenciable en $\text{intdom}f$. Si $f: \mathbb{R}^n \rightarrow \mathbb{R}$ entonces $\nabla f: \mathbb{R}^n \rightarrow \mathbb{R}^n$ y $\nabla ^2f: \mathbb{R}^n \rightarrow \mathbb{R}^{n\times n}$ (ver {ref}`Definición de función, continuidad y derivada <FCD>` para definición de derivadas en funciones $f: \mathbb{R}^n \rightarrow \mathbb{R}^m$). Ambas funciones al evaluarse resultan en un vector en $\mathbb{R}^n$ y en una matriz en $\mathbb{R}^{n\times n}$ respectivamente.
Podemos utilizar las fórmulas de aproximación en diferenciación numérica con diferencias finitas para el caso $f: \mathbb{R} \rightarrow \mathbb{R}$ revisadas anteriormente para aproximar al gradiente y a la Hessiana.
Para el caso del gradiente se tiene por **diferenciación hacia delante**:
$$\nabla f(x) =
\begin{array}{l}
\left[ \begin{array}{c}
\frac{\partial f(x)}{\partial x_1}\\
\vdots\\
\frac{\partial f(x)}{\partial x_n}
\end{array}
\right] \approx, \nabla \hat{f}(x) =\left[
\begin{array}{c}
\frac{f(x+he_1) - f(x)}{h}\\
\vdots\\
\frac{f(x+he_n) - f(x)}{h}
\end{array}
\right]
\end{array} \in \mathbb{R}^n$$
con $e_j$ $j$-ésimo vector canónico que tiene un número $1$ en la posición $j$ y $0$ en las entradas restantes para $j=1,\dots,n$. Se cumple $||\nabla f(x) - \nabla \hat{f}(x)|| = \mathcal{O}(h)$. Y para el caso de la Hessiana:
$$\nabla^2f(x) = \left[\begin{array}{cccc}
\frac{\partial^2f(x)}{\partial x_1^2} &\frac{\partial^2f(x)}{\partial x_2 \partial x_1}&\dots&\frac{\partial^2f(x)}{\partial x_n \partial x_1}\\
\frac{\partial^2f(x)}{\partial x_1 \partial x_2} &\frac{\partial^2f(x)}{\partial x_2^2} &\dots&\frac{\partial^2f(x)}{\partial x_n \partial x_2}\\
\vdots &\vdots& \ddots&\vdots\\
\frac{\partial^2f(x)}{\partial x_1 \partial x_n} &\frac{\partial^2f(x)}{\partial x_2 \partial x_n}&\dots&\frac{\partial^2f(x)}{\partial x_n^2} \\
\end{array}
\right],
$$
$$
\nabla^2 \hat{f}(x) = \left[\begin{array}{cccc}
\frac{f(x+2he_1)-2f(x+he_1)+f(x)}{h^2} &\frac{f(x+he_1+he_2)-f(x+he_1)-f(x+he_2)+f(x)}{h^2}&\dots&\frac{f(x+he_1+he_n)-f(x+he_1)-f(x+he_n)+f(x)}{h^2}\\
\frac{f(x+he_1+he_2)-f(x+he_2)-f(x+he_1)+f(x)}{h^2}&\frac{f(x+2he_2)-2f(x+he_2)+f(x)}{h^2} &\dots&\frac{f(x+he_2+he_n)-f(x+he_2)-f(x+he_n)+f(x)}{h^2}\\
\vdots &\vdots& \ddots&\vdots\\
\frac{f(x+he_1+he_n)-f(x+he_n)-f(x+he_1)+f(x)}{h^2}&\frac{f(x+he_2+he_n)-f(x+he_n)-f(x+he_2)+f(x)}{h^2}&\dots&\frac{f(x+2he_n)-2f(x+he_n)+f(x)}{h^2}\\
\end{array}
\right]
$$
Se cumple: $||\nabla^2f(x)-\nabla\hat{f}^2(x)||=\mathcal{O}(h)$.
### Ejemplo
Aproximar $\nabla f(x), \nabla^2f(x)$ con diferencias hacia delante y $h \in \{10^{-16}, 10^{-14}, \dots , 10^{-1}\}$ para $f: \mathbb{R}^4 \rightarrow \mathbb{R}$, dada por $f(x) = (x_1^2-x_2^2)^2+x_1^2+(x_3^2-x_4^2)^2+x_3^2$ en el punto $x_0=(1.5,1.5,1.5,1.5)^T$. Realizar una gráfica de $\log($Err_rel) vs $\log($h)
Para esta función se tiene:
$$\nabla f(x) =
\left[ \begin{array}{c}
4x_1(x_1^2-x_2^2) + 2x_1\\
-4x_2(x_1^2-x_2^2)\\
4x_3(x_3^2-x_4^2)+2x_3\\
-4x_4(x_3^2-x_4^2)
\end{array}
\right] ,
$$
$$\nabla^2f(x)=
\left[\begin{array}{cccc}
12x_1^2-4x_2^2+2 &-8x_1x_2&0&0\\
-8x_1x_2 &-4x_1^2+12x_2^2&0&0\\
0 &0&12x_3^2-4x_4^2+2&-8x_3x_4\\
0&0&-8x_3x_4&-4x_3^2+12x_4^2\\
\end{array}
\right]
$$
**Gradiente de f calculado de forma simbólica**
```
gf<-function(x){
c(4*x[1]*(x[1]^2-x[2]^2)+2*x[1],
-4*x[2]*(x[1]^2-x[2]^2),
4*x[3]*(x[3]^2-x[4]^2)+2*x[3],
-4*x[4]*(x[3]^2-x[4]^2))
}
```
**Punto en el que se evaluará**
```
x_0<-c(1.5,1.5,1.5,1.5)
print(gf(x_0))
```
$$\nabla f(x_0) =
\left[ \begin{array}{c}
3\\
0\\
3\\
0
\end{array}
\right],
$$
**Hessiana de f calculada de forma simbólica**
```
gf2<-function(x){
matrix(c(12*x[1]^2-4*x[2]^2+2,-8*x[1]*x[2],0,0,
-8*x[1]*x[2],-4*x[1]^2+12*x[2]^2,0,0,
0,0,12*x[3]^2-4*x[4]^2+2,-8*x[3]*x[4],
0,0,-8*x[3]*x[4],-4*x[3]^2+12*x[4]^2),nrow=4,ncol=4)
}
```
**Evaluación de la Hessiana**
```
print(gf2(x_0))
```
$$\nabla^2f(x_0)=
\left[\begin{array}{cccc}
20 &-18&0&0\\
-18&18&0&0\\
0 &0&20&-18\\
0&0&-18&18\\
\end{array}
\right]
$$
**Definición de función y punto en el que se calculan las aproximaciones**
```
f <- function(x){
(x[1]^2-x[2]^2)^2+x[1]^2+(x[3]^2-x[4]^2)^2+x[3]^2
}
x0 <- rep(1.5,4)
```
**Lo siguiente calcula el gradiente y la Hessiana de forma numérica con la aproximación por diferencias hacia delante**
```
inc_index<-function(vec,index,h){
'
Auxiliary function for gradient and Hessian computation.
Args:
vec (double): vector
index (int): index.
h (float): quantity that vec[index] will be increased.
Returns:
vec (double): vector with vec[index] increased by h.
'
vec[index]<-vec[index]+h
vec
}
gradient_approximation<-function(f,x,h=1e-8){
'
Numerical approximation of gradient for function f using forward differences.
Args:
f (expression): definition of function f.
x (double): vector that holds values where gradient will be computed.
h (float): step size for forward differences, tipically h=1e-8
Returns:
gf (array): numerical approximation to gradient of f.
'
n<-length(x)
gf<-vector("double",n)
for(i in 1:n){
gf[i]=(f(inc_index(x,i,h))-f(x))
}
gf/h
}
Hessian_approximation<-function(f,x,h=1e-6){
'
Numerical approximation of Hessian for function f using forward differences.
Args:
f (expression): definition of function f.
x (double): vector that holds values where Hessian will be computed.
h (float): step size for forward differences, tipically h=1e-6
Returns:
Hf (double): matrix of numerical approximation to Hessian of f.
'
n<-length(x)
Hf<-matrix(rep(0,n^2),nrow=n,ncol=n)
f_x<-f(x)
for(i in 1:n){
x_inc_in_i<-inc_index(x,i,h)
f_x_inc_in_i<-f(x_inc_in_i)
for(j in i:n){
dif<-f(inc_index(x_inc_in_i,j,h))-f_x_inc_in_i-f(inc_index(x,j,h))+f_x
Hf[i,j]<-dif
if(j!=i)
Hf[j,i]<-dif
}
}
Hf/h^2
}
```
**Conjunto de valores de h para diferencias hacia delante**
```
h<-10^(-1*(1:16))
```
**Para el cálculo del error utilizamos {ref}`fórmulas para calcular errores absolutos y relativos <FORERRABSERRREL>`:**
$$\text{ErrRel(aprox)} = \frac{||\text{aprox}-\text{obj}||}{||\text{obj}||}$$
**La siguiente función calcula un error relativo para un vector `obj`:**
```
Euclidian_norm<-function(vec){
'Compute Euclidian norm of vector'
sqrt(sum(vec*vec))
}
compute_error<-function(obj,approx){
'
Relative or absolute error between obj and approx based in Euclidian norm.
Approx is a numeric vector.
'
if (Euclidian_norm(obj) > .Machine$double.eps*.Machine$double.xmin){
Err<-Euclidian_norm(obj-approx)/Euclidian_norm(obj)
}else
Err<-Euclidian_norm(obj-approx)
Err
}
gf_numeric_approximations <- lapply(h,gradient_approximation,f=f,x=x0)
gf2_numeric_approximations <- lapply(h,Hessian_approximation,f=f,x=x0)
rel_err_gf <- sapply(gf_numeric_approximations,compute_error,obj=gf(x_0))
rel_err_gf2 <- sapply(gf2_numeric_approximations,compute_error,obj=gf2(x_0))
gg<-ggplot()
print(gg+
geom_line(aes(x=log(h),y=log(rel_err_gf)))+
ggtitle('Aproximación al gradiente por diferencias finitas'))
print(h[which.min(rel_err_gf)])
print(gg+
geom_line(aes(x=log(h),y=log(rel_err_gf2)))+
ggtitle('Aproximación a la Hessiana por diferencias finitas'))
print(h[which.min(rel_err_gf2)])
```
```{admonition} Ejercicio
:class: tip
Aproximar $\nabla f(x), \nabla^2f(x)$ con diferencias hacia delante y $h \in \{10^{-16}, 10^{-14}, \dots , 10^{-1}\}$ para $f: \mathbb{R}^3 \rightarrow \mathbb{R}$, dada por $f(x) = x_1x_2\exp(x_1^2+x_3^2-5)$ en el punto $x_0=(1,3,-2)^T$ Realizar una gráfica de $\log($Err_rel) vs $\log($h).
```
```{admonition} Ejercicios
:class: tip
1. Resuelve los ejercicios y preguntas de la nota.
```
**Referencias**
1. R. L. Burden, J. D. Faires, Numerical Analysis, Brooks/Cole Cengage Learning, 2005.
2. M. T. Heath, Scientific Computing. An Introductory Survey, McGraw-Hill, 2002.
3. S. P. Boyd, L. Vandenberghe, Convex Optimization. Cambridge University Press, 2004.
| github_jupyter |
# Homework 4: Probability and Sampling
Reading: Textbook chapter [8](https://www.inferentialthinking.com/chapters/08/randomness.html).
Please complete this notebook by filling in the cells provided. Before you begin, execute the following cell to load the provided tests. Each time you start your server, you will need to execute this cell again to load the tests.
```
# Don't change this cell; just run it.
import numpy as np
from datascience import *
%matplotlib inline
import matplotlib.pyplot as plots
plots.style.use('fivethirtyeight')
from client.api.notebook import Notebook
ok = Notebook('hw04.ok')
_ = ok.auth(inline=True)
```
<font color="#E74C3C">**Important**: The `ok` tests don't usually tell you that your answer is correct. More often, they help catch careless mistakes. It's up to you to ensure that your answer is correct. If you're not sure, ask someone (not for the answer, but for some guidance about your approach).</font>
## 1. Sampling Basketball Players
This exercise uses salary data and game statistics for basketball players from the 2014-2015 NBA season. The data were collected from [basketball-reference](http://www.basketball-reference.com) and [spotrac](http://www.spotrac.com).
Run the next cell to load the two datasets.
```
player_data = Table.read_table('player_data.csv')
salary_data = Table.read_table('salary_data.csv')
player_data.show(3)
salary_data.show(3)
```
**Question 1.** We would like to relate players' game statistics to their salaries. Compute a table called `full_data` that includes one row for each player who is listed in both `player_data` and `salary_data`. It should include all the columns from `player_data` and `salary_data`, except the `"PlayerName"` column.
```
full_data = player_data.join('Name', salary_data, 'PlayerName')
full_data
full_data.num_rows
_ = ok.grade('q1_1')
```
Basketball team managers would like to hire players who perform well but don't command high salaries. From this perspective, a very crude measure of a player's *value* to their team is the number of points the player scored in a season for every **\$1000 of salary** (*Note*: the `Salary` column is in dollars, not thousands of dollars). For example, Al Horford scored 1156 points and has a salary of 12,000 thousands of dollars, so his value is $\frac{1156}{12000}$.
**Question 2.** Create a table called `full_data_with_value` that's a copy of `full_data`, with an extra column called `"Value"` containing each player's value (according to our crude measure). Then make a histogram of players' values. **Specify bins that make the histogram informative, and don't forget your units!**
Hint: Informative histograms contain a majority of the data and exclude outliers.
```
full_data_with_value = full_data.with_column("Value", full_data.column(9)/(full_data.column(10)/1000))
full_data_with_value.hist('Value', bins=np.arange(0,1.5,.1), unit = 'point per $1,000 dollar')
```
Now suppose we weren't able to find out every player's salary. (Perhaps it was too costly to interview each player.) Instead, we have gathered a *simple random sample* of 100 players' salaries. The cell below loads those data.
```
sample_salary_data = Table.read_table("sample_salary_data.csv")
sample_salary_data.show(3)
sample_full_data.num_rows
```
**Question 3.** Make a histogram of the values of the players in `sample_salary_data`, using the same method for measuring value we used in question 2. **Use the same bins, too.** *Hint:* This will take several steps.
```
# Use this cell to make your histogram.
sample_salary_data
sample_full_data = player_data.join('Name', sample_salary_data, 'PlayerName')
sample_full_data_with_value = sample_full_data.with_column("Value", sample_full_data.column(9)/
(sample_full_data.column(10)/1000))
sample_full_data_with_value.hist('Value', bins=np.arange(0,1.5,.1), unit = 'point per dollar')
```
Now let us summarize what we have seen. To guide you, we have written most of the summary already.
**Question 4.** Complete the statements below by filling in the [SQUARE BRACKETS]:
The plot in question 2 displayed a(n) [probability distribution] histogram of the population of [492] players. The areas of the bars in the plot sum to [0.97].
The plot in question 3 displayed a(n) [empirical distribution] histogram of the sample of [100] players. The areas of the bars in the plot sum to [0.97].
**Question 5.** For which range of values does the plot in question 3 better depict the distribution of the **population's player values**: 0 to 0.5, or above 0.5?
*0 to 0.5 because the distribution between 0 to 0.5 looks very simillar to population's player values between 0 to 0.5*
## 2. How Many Devices?
When a company produces medical devices, it must be sure that its devices will not fail. Sampling is used ubiquitously in the medical device industry to test how well devices work.
Suppose you work at a company that produces syringes, and you are responsible for ensuring the syringes work well. After studying the manufacturing process for the syringes, you have a hunch that they have a 1% failure rate. That is, you suspect that 1% of the syringes won't work when a doctor uses them to inject a patient with medicine.
To test your hunch, you would like to find at least one faulty syringe. You hire an expert consultant who can test a syringe to check whether it is faulty. But the expert's time is expensive, so you need to avoid checking more syringes than you need to.
**Important note:** This exercise asks you to compute numbers that are related to probabilities. For all questions, you can calculate your answer using algebra, **or** you can write and run a simulation to compute an approximately-correct answer. (For practice, we suggest trying both.) An answer based on an appropriate simulation will receive full credit. If you simulate, use at least **5,000** trials.
**Question 1.** Suppose there is indeed a 1% failure rate among all syringes. If you check 20 syringes chosen at random from among all syringes, what is the chance that you find at least 1 faulty syringe? (You may assume that syringes are chosen with replacement from a population in which 1% of syringes are faulty.) Name your answer `chance_to_find_syringe`.
```
# For your convenience, we have created a list containing
# 99 copies of the number 0 (to represent good syringes)
# and 1 copy of the number 1 (to represent a bad syringe).
# This may be useful if you run a simulation. Feel free
# to delete it.
faultiness = np.append(0*np.arange(99), 1)
faultiness
np.random.choice(faultiness, 20)
chance_to_find_syringe = 1 - pow(0.99,20)
chance_to_find_syringe
_ = ok.grade('q2_1')
```
**Question 2.** Continue to assume that there really is a 1% failure rate. Find the smallest number of syringes you can check so that you have at least a 50% chance of finding a faulty syringe. (Your answer should be an integer.) Name that number `num_required_for_50_percent`. **It's okay if your answer is off by as many as 11 for full credit.**
```
import math
num_required_for_50_percent = (math.log(1/2))/(math.log(99/100))
num_required_for_50_percent = int(np.round(num_required_for_50_percent))
num_required_for_50_percent
_ = ok.grade('q2_2')
```
**Question 3.** A doctor purchased 5 syringes and found 4 of them to be faulty. Assuming that there is indeed a 1% failure rate, what was the probability of **exactly 4** out of 5 syringes being faulty?
```
probability_of_four_faulty = 5*pow(0.01,4)*(0.99)
probability_of_four_faulty
_ = ok.grade('q2_3')
```
**Question 4.** Assuming that there is indeed a 1% failure rate, assign `order` to a **list** of the numbers 1 through 7, ordered by the size of the quantities described below from smallest to largest. For example, `order` will start with 2 because list item 2 ("Zero") is the smallest quantity.
1. One half
1. Zero
1. The chance that **zero** out of 5 syringes are faulty.
1. The chance that **at least 1** out of 5 syringes is faulty.
1. The chance that **exactly 4** out of 5 syringes are faulty.
1. The chance that **at least 4** out of 5 syringes are faulty.
1. The chance that **all 5** out of 5 syringes are faulty.
```
1-pow(.99,5)
order = make_array(2,7,5,6,4,1,3)
_ = ok.grade('q2_4')
```
## 3. Submission
Once you're finished, select "Save and Checkpoint" in the File menu and then execute the `submit` cell below. The result will contain a link that you can use to check that your assignment has been submitted successfully. If you submit more than once before the deadline, we will only grade your final submission.
```
_ = ok.submit()
```
## 4. Predicting Temperatures
<font color="#E74C3C">**Important**: The following problem is **optional** and for extra practice only. You don't have to complete it in order to get full score on the assignment (nor will you get extra points for these). However, we highly recommend working on it as practice for the midterm.</font>
In this exercise, we will try to predict the weather in California using the prediction method discussed in [section 7.1 of the textbook](https://www.inferentialthinking.com/chapters/07/1/applying-a-function-to-a-column.html). Much of the code is provided for you; you will be asked to understand and run the code and interpret the results.
The US National Oceanic and Atmospheric Administration (NOAA) operates thousands of climate observation stations (mostly in the US) that collect information about local climate. Among other things, each station records the highest and lowest observed temperature each day. These data, called "Quality Controlled Local Climatological Data," are publicly available [here](http://www.ncdc.noaa.gov/orders/qclcd/) and described [here](https://www.ncdc.noaa.gov/data-access/land-based-station-data/land-based-datasets/quality-controlled-local-climatological-data-qclcd).
`temperatures.csv` contains an excerpt of that dataset. Each row represents a temperature reading in Fahrenheit from one station on one day. (The temperature is actually the highest temperature observed at that station on that day.) All the readings are from 2015 and from California stations.
```
temperatures = Table.read_table("temperatures.csv")
temperatures
```
Try visualizing the temperature data over time on the next cell:
```
#Create a visualization here:
```
Each entry in the column "Date" is a number in MMDD format, meaning that the last two digits denote the day of the month, and the first 1 or 2 digits denote the month.
**Question 1.** Why does the data form vertical bands with gaps?
*Write your answer here, replacing this text.*
Let us solve that problem. We will convert each date to the number of days since the start of the year.<br>
**Question 2.** Implement the `get_day_in_month` function. The result should be an integer.<br>
_Hint:_ Use the [remainder operator](https://www.inferentialthinking.com/chapters/03/1/expressions.html).
```
def get_month(date):
"""The month in the year for a given date.
>>> get_month(315)
3
"""
return int(date / 100) # Divide by 100 and round down to the nearest integer
def get_day_in_month(date):
"""The day in the month for a given date.
>>> get_day_in_month(315)
15
"""
...
_ = ok.grade('q4_2')
```
Next, we'll compute the *day of the year* for each temperature reading, which is the number of days from January 1 until the date of the reading.
```
# You don't need to change this cell, but you are strongly encouraged
# to read all of the code and understand it.
days_in_month = make_array(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
# A table with one row for each month. For each month, we have
# the number of the month (e.g. 3 for March), the number of
# days in that month in 2015 (e.g. 31 for March), and the
# number of days in the year before the first day of that month
# (e.g. 0 for January or 59 for March).
days_into_year = Table().with_columns(
"Month", np.arange(12)+1,
"Days until start of month", np.cumsum(days_in_month) - days_in_month)
# First, compute the month and day-of-month for each temperature.
months = temperatures.apply(get_month, "Date")
day_of_month = temperatures.apply(get_day_in_month, "Date")
with_month_and_day = temperatures.with_columns(
"Month", months,
"Day of month", day_of_month
)
# Then, compute how many days have passed since
# the start of the year to reach each date.
t = with_month_and_day.join('Month', days_into_year)
day_of_year = t.column('Days until start of month') + t.column('Day of month')
with_dates_fixed = t.drop(0, 6, 7).with_column("Day of year", day_of_year)
with_dates_fixed
```
**Question 3**. Set `missing` to an array of all the days of the year (integers from 1 through 365) that do not have any temperature readings in the `with_dates_fixed` dataset.<br>
*Hint:* One strategy is to start with a table of all days in the year, then use either the predicate `are.not_contained_in` ([docs](http://data8.org/datascience/predicates.html)) or the method `exclude` ([docs](http://data8.org/datascience/_autosummary/datascience.tables.Table.exclude.html#datascience.tables.Table.exclude)) to eliminate all of the days of the year that do have a temperature reading.
```
missing = ...
missing
_ = ok.grade('q4_3')
```
Using `with_dates_fixed`, we can make a better scatter plot.
```
with_dates_fixed.scatter("Day of year", "Temperature")
```
Let's do some prediction. For any reading on any day, we will predict its value using all the readings from the week before and after that day. A reasonable prediction is that the reading will be the average of all those readings. We will package our code in a function.
```
def predict_temperature(day):
"""A prediction of the temperature (in Fahrenheit) on a given day at some station.
"""
nearby_readings = with_dates_fixed.where("Day of year", are.between_or_equal_to(day - 7, day + 7))
return np.average(nearby_readings.column("Temperature"))
```
**Question 4.** Suppose you're planning a trip to Yosemite for Thanksgiving break this year, and you'd like to predict the temperature on November 26. Use `predict_temperature` to compute a prediction for a temperature reading on that day.
```
thanksgiving_prediction = ...
thanksgiving_prediction
_ = ok.grade('q4_4')
```
Below we have computed a predicted temperature for each reading in the table and plotted both. (It may take a **minute or two** to run the cell.)
```
with_predictions = with_dates_fixed.with_column(
"Predicted temperature",
with_dates_fixed.apply(predict_temperature, "Day of year"))
with_predictions.select("Day of year", "Temperature", "Predicted temperature")\
.scatter("Day of year")
```
**Question 5.** The scatter plot is called a *graph of averages*. In the [example in the textbook](https://www.inferentialthinking.com/chapters/07/1/applying-a-function-to-a-column.html#Example:-Prediction), the graph of averages roughly followed a straight line. Is this observation true for this graph? Using your knowledge about seasons, explain why or why not.
*Write your answer here, replacing this text.*
**Question 6.** According to the [Wikipedia article](https://en.wikipedia.org/wiki/Climate_of_California) on California's climate, "[t]he climate of California varies widely, from hot desert to subarctic." Suppose we limited our data to weather stations in a smaller area whose climate varied less from place to place (for example, the state of Vermont, or the San Francisco Bay Area).
If we made the same graph for that dataset, in what ways would you expect it to look different? Be specific.
*Write your answer here, replacing this text.*
| github_jupyter |
# Exhaustive Optimizer Log Example
When evaluating optimization performance it can be useful to observe the parametric surface of optimization results. The `ITKOptimizationMonitor` external module provides an event-based procedure through which data generated with an `itk.ExhaustiveOptimizer` may be preserved and referenced as an `itk.Image` object.
This example adapts the [ITKSphinxExamples Exhaustive Optimizer sample notebook](https://itk.org/ITKExamples/src/Numerics/Optimizers/ExhaustiveOptimizer/PlotExhaustiveOptimizer.html) to demonstrate how the `itk.CommandExhaustiveLog` class may be employed for data collection and surface visualization. Two 2D images are registered with an `itk.Euler2DTransform` evaluated with an `itk.MeanSquaresImageToImageMetricv4` object. Results are visualized over the entire optimization region as a 3D image and then as a 2D image slice.
```
import os
from urllib.request import urlretrieve
import itk
from itkwidgets import view
```
### Get sample data
```
os.makedirs('Input', exist_ok=True)
fixed_img_path = 'Input/apple.jpg'
moving_img_path = 'Input/orange.jpg'
if not os.path.exists(fixed_img_path):
url = 'https://data.kitware.com/api/v1/file/5cad1aec8d777f072b181870/download'
urlretrieve(url, fixed_img_path)
if not os.path.exists(moving_img_path):
url = 'https://data.kitware.com/api/v1/file/5cad1aed8d777f072b181879/download'
urlretrieve(url, moving_img_path)
fixed_img = itk.imread(fixed_img_path, itk.F)
moving_img = itk.imread(moving_img_path, itk.F)
```
### Define Registration Parameters
```
dimension = 2
FixedImageType = itk.Image[itk.F, dimension]
MovingImageType = itk.Image[itk.F, dimension]
TransformType = itk.Euler2DTransform[itk.D]
OptimizerType = itk.ExhaustiveOptimizerv4[itk.D]
MetricType = itk.MeanSquaresImageToImageMetricv4[FixedImageType, MovingImageType]
TransformInitializerType = \
itk.CenteredTransformInitializer[itk.MatrixOffsetTransformBase[itk.D,2,2],
FixedImageType, MovingImageType]
RegistrationType = itk.ImageRegistrationMethodv4[FixedImageType,MovingImageType]
transform = TransformType.New()
initializer = TransformInitializerType.New(
Transform=transform,
FixedImage=fixed_img,
MovingImage=moving_img,
)
initializer.InitializeTransform()
metric_results = dict()
metric = MetricType.New()
optimizer = OptimizerType.New()
optimizer.SetNumberOfSteps([10,10,1])
scales = optimizer.GetScales()
scales.SetSize(3)
scales.SetElement(0, 0.1)
scales.SetElement(1, 1.0)
scales.SetElement(2, 1.0)
optimizer.SetScales(scales)
registration = RegistrationType.New(Metric=metric,
Optimizer=optimizer,
FixedImage=fixed_img,
MovingImage=moving_img,
InitialTransform=transform,
NumberOfLevels=1)
```
### Add CommandExhaustiveLog Observer
Note that available `itk.CommandExhaustiveLog` template wrappings are limited to wrapped ITK image types. In order to visualize optimization over a transform with greater than three parameters it may be necessary to build ITK locally from source with extra image wrappings.
```
observer = itk.CommandExhaustiveLog[itk.F, transform.GetNumberOfParameters()].New()
observer.SetCenter(transform.GetParameters())
optimizer.AddObserver(itk.StartEvent(), observer) # Initialize
optimizer.AddObserver(itk.IterationEvent(), observer) # Collect data
```
### Run Registration
Observer data is updated with each `itk.IterationEvent` fired by the optimizer.
```
registration.Update()
print(f'MinimumMetricValue: {optimizer.GetMinimumMetricValue():.4f}\t'
f'MaximumMetricValue: {optimizer.GetMaximumMetricValue():.4f}\n'
f'MinimumMetricValuePosition: {list(optimizer.GetMinimumMetricValuePosition())}\t'
f'MaximumMetricValuePosition: {list(optimizer.GetMaximumMetricValuePosition())}\n'
f'StopConditionDescription: {optimizer.GetStopConditionDescription()}\t')
```
### Visualize 3D Parameter Space
Metric values within the exhaustive transform region may be visualized directly with `itkwidgets` as a 3D `itk.Image` object.
```
view(observer.GetDataImage())
```
### Visualize 2D Data Slice
Metric values may be visualized over a 2D slice of the 3D transform parameter domain using `itkwidgets`. In this case we will slice along the third transform dimension to visualize the surface containing the maximum metric value in the exhaustive domain.
See [Process a 2D Slice of a 3D Image](https://itk.org/ITKExamples/src/Filtering/ImageGrid/ProcessA2DSliceOfA3DImage/Documentation.html) in the [ITKSphinxExamples](https://itk.org/ITKExamples/index.html) for image slicing procedures.
```
max_point = optimizer.GetMaximumMetricValuePosition()
filter = itk.ExtractImageFilter[type(observer.GetDataImage()),itk.Image[itk.F,2]].New()
filter.SetInput(observer.GetDataImage())
filter.SetDirectionCollapseToSubmatrix()
dest_region = observer.GetDataImage().GetBufferedRegion()
size = dest_region.GetSize()
index = dest_region.GetIndex()
size[2] = 0 # Collapse along third dimension
index[2] = observer.GetDataImage().TransformPhysicalPointToIndex(max_point)[2]
dest_region.SetSize(size)
dest_region.SetIndex(index)
filter.SetExtractionRegion(dest_region)
filter.Update()
view(filter.GetOutput())
```
| github_jupyter |
# Your title for your regression project
## Project Planning
### Goals
(see background)...your customer is the zillow data science team. state your goals as if you were delivering this to zillow. They have asked for something from you (see 'background') and you are basically communicating in a more concise way, and very clearly, the goals as you understand them and as you have taken and acted upon through your research.
*Project Overview
Background:
Zillow wants to improve their Zestimate. The zestimate is estimated value of a home. Zillow theorizes that there is more information to be gained to improve its existing model. Because of that, Zillow wants you to develop a model to predict the error between the Zestimate and the sales price of a home. In predicting the error, you will discover features that will help them improve the Zestimate estimate itself. Your goal of this project is to develop a linear regression model that will best predict the log error of the Zestimate. The error is the difference of the sales price and the Zestimate. The log error is computed by taking the log function of that error. You don't need to worry about the fact that the error is of a logarithmic function. It is a continuous number that represents an error rate.*
### Deliverables
What should the zillow team expect to receive from you? Again, as you were communicating to them, not to your instructors.
*your deliverables:*
1. *A report (in the form of a presentation, both verbal and through a slides) that summarizes your findings about the drivers of the Zestimate error. This will come from the analysis you do during the exploration phase of the pipeline. In the report, you will have charts that visually tell the story of what is driving the errors.*
2. *A Jupyter notebook, titled 'Regression_Proj_YourName', that contains a clearly labeled section and code clearly documented for each the stages below (project planning, data acquisition, data prep, exploration, and modeling). All of the work will take place in your jupyter notebook.*
### Data Dictionary & Details
**we will be using 2016 data so do NOT use properties_2017 or predictions_2017**
Data dictionary of fields you will use. Why? So that you can refer back and others can refer to the meanings as you are developing your model. This is about gaining knowledge in the domain space so that you will understand when data doesn't look right, be able to more effectively develop hypotheses, and use that domain knowledge to build a more robust model (among other reasons)
Define your fields. You may be able to to some ahead of time, but you may need to return to this section after you have explored the data and understand more about what each field means. Also, look for a data dictionary either in the database or in the original data source (kaggle.com).
You are free to use more fields than is mentioned, but I would recommend not expanding it to too many fields in the beginning as it will add complexity and you want to make sure you get an initial version completed before diving in deeper.
- logerror
- bathroomcnt
- bedroomcnt
- calculatedfinishedsquarefeet
- fullbathcnt
- garagecarcnt
- roomcnt
- yearbuilt
- taxvaluedollarcnt
- taxamount
### Initial Thoughts, Ideas, Hypotheses
Brainstorming ideas, hypotheses, related to how variables might impact or relate to each other, both within independent variables and between the independent variables and dependent variable, and also related to any ideas for new features you may have while first looking at the existing variables and challenge ahead of you.
### Prepare your environment
import libraries you will use throughout the project. You may need to add to this as you go. But try to keep all of your imports in this section
### Data Acquisition
#### Acquire via csv
- add the path to where your files are located in your env.py file.
- ensure that others can read the files based on their local env.py file.
- Filter out any from predictions_2016 that do not have a logerror.
- Generate a cohesive data set that includes the following fields:
- `logerror`
- `bathroomcnt`
- `bedroomcnt`
- `calculatedfinishedsquarefeet`
- `fullbathcnt`
- `garagecarcnt`
- `roomcnt`
- `yearbuilt`
- `taxvaluedollarcnt`
- `taxamount`
- (optional) `regionidzip`
recommendations for dealing with the large dataset
- remove any without a logerror
- sample until you have the right query (and then pull entire dataset)
- export to local csv
- read from local csv (sample, but larger sample than your first)
- save sql query to add into to_sql() function in python.
#### Summarize the data.
Why? So you can confirm the data look like what you would expect.
- peek at a few rows
- data types
- summary stats
- column names
- number of rows and columns
- ...
### Data Preparation
Goal: leave this section with a dataset that is ready to be analyzed.
#### Create a variable, `colnames`, that is a list of the column names.
Why? You will likely reference this variable later.
#### Identify the data types of each variable. Why?
You need to make sure they are what makes sense for the data and the meaning of the data that lies in that variable. If it does not, make necessary changes.
#### Identify the columns that have missing values and the number of missing values in each column.
Why? Missing values are going to cause issues down the line so you will need to handle those appropriately. For each variable with missing values, if it makes sense to replace those missing with a 0, do so. For those where that doesn't make sense, decide if you should drop the entire observations (rows) that contain the missing values, or drop the entire variable (column) that contains the missing values.
#### Create a list of the independent variable names (aka attributes) and assign it to `x_vars`
Why? During exploration, you will likely use this list to refer to the attribute names.
#### Clearly identify your dependent (target) variable.
- What is the name of the variable?
- Is it discrete or continuous?
#### View the distribution of the numeric variables
- plot a histogram and box plot of each variable.
- Why? To see the distribution, skewness, outliers, and unit scales. You will use this information in your decision of whether to normalize, standardize or neither.
#### BONUS: Data standardization
Create a new data frame that is the min-max normalization of the independent variable in the original data frame (+ the original dependent variable). You will normalize each of the numeric independent variables independently, i.e. using the min and max of each variable, not the min/max of the whole dataframe. Why? Regression is very sensitive to difference in units. It will be almost impossible to extract a meaningful linear regression model with such extreme differences in scale. For more context, see: https://medium.com/@rrfd/standardize-or-normalize-examples-in-python-e3f174b65dfc
You will use this dataframe in the future analysis.
### Data Exploration
Goal is to address each of the questions you posed in your planning & brainstorming through visual or statistical analysis.
When you have completed this step, you will have the findings from your analysis that will be used in your final report, the answers to your questions and your customer's questions that will reach the goal.
#### Split data into training and test datasets
#### Create a jointplot for each independent variable (normalized version) with the dependent variable.
Be sure you have Pearson's r and p-value annotated on each plot.
#### Create a feature plot using seaborn's PairGrid() of the interaction between each variable (dependent + independent).
You may want to use a normalized dataframe (if you did that) or adjust the axis scale (set to logarithmic, e.g) so you can more clearly view the interactions.
#### Create a heatmap of the correlation between each variable pair.
#### Summarize your conclusions from all of these steps.
#### Is the logerror significantly different for homes with 3 bedrooms vs those with 5 or more bedrooms?
Run a t-test to test this difference.
#### Do the same for another 2 samples you are interested in comparing (e.g. those with 1 bath vs. x baths)
```
```
### Data Modeling
#### Feature Engineering & Selection
#### Are there new features you could create based on existing features that might be helpful?
Come up with at least one possible new feature that is a calculation from 2+ existing variables.
#### Use statsmodels ordinary least squares to assess the importance of each feature with respect to the target
#### Summarize your conclusions and next steps from your analysis in above.
What will you try when developing your model? (which features to use/not use/etc)
#### Train & Test Model
#### Fit, predict (in-sample) & evaluate multiple linear regression models to find the best one.
- Make any changes as necessary to improve your model.
- Identify the best model after all training
- Compare the models by plotting the predictions of each model with actual values (see plot right above section 12 in 'Regression in Python' lesson
(the predictions are a 1 x 83 two dimensional matrix, but we want just a single array of predictions. We can use the .ravel method to achieve this.)
`pd.DataFrame({'actual': y_train.final_grade,
'lm1': y_pred_lm1.ravel(),
'lm2': y_pred_lm2.ravel()})\
.melt(id_vars=['actual'], var_name='model', value_name='prediction')\
.pipe((sns.relplot, 'data'), x='actual', y='prediction', hue='model')
plt.plot([60, 100], [60, 100], c='black', ls=':')
plt.ylim(60, 100)
plt.xlim(60, 100)
plt.title('Predicted vs Actual Final Grade')`
#### Predict & evaluate on out-of-sample data (test)
- Plot the residuals from your out-of-sample predictions.
- Summarize your expectations about how you estimate this model will perform in production.
| github_jupyter |
[](https://colab.research.google.com/github/tulasiram58827/craft_tflite/blob/main/colabs/CRAFT_TFLITE.ipynb)
This notebook converts tensorflow savel model to tflite version
## SetUp
```
"""
Authors
* Tulasi Ram
"""
!pip install onnx
!pip install git+https://github.com/onnx/onnx-tensorflow.git
import onnx
import numpy as np
import os
import cv2
import time
import torch
from torch.autograd import Variable
from skimage import io
import tensorflow as tf
from onnx_tf.backend import prepare
from google.colab.patches import cv2_imshow
tf.__version__
```
## Helper functions
```
def loadImage(img_file):
img = io.imread(img_file) # RGB order
if img.shape[0] == 2: img = img[0]
if len(img.shape) == 2 : img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
if img.shape[2] == 4: img = img[:,:,:3]
img = np.array(img)
return img
def normalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)):
# should be RGB order
img = in_img.copy().astype(np.float32)
img -= np.array([mean[0] * 255.0, mean[1] * 255.0, mean[2] * 255.0], dtype=np.float32)
img /= np.array([variance[0] * 255.0, variance[1] * 255.0, variance[2] * 255.0], dtype=np.float32)
return img
def denormalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)):
# should be RGB order
img = in_img.copy()
img *= variance
img += mean
img *= 255.0
img = np.clip(img, 0, 255).astype(np.uint8)
return img
def resize_aspect_ratio(img, square_size, interpolation, mag_ratio=1):
height, width, channel = img.shape
# magnify image size
target_size = mag_ratio * max(height, width)
# set original image size
if target_size > square_size:
target_size = square_size
ratio = target_size / max(height, width)
target_h, target_w = int(height * ratio), int(width * ratio)
proc = cv2.resize(img, (target_w, target_h), interpolation = interpolation)
# make canvas and paste image
target_h32, target_w32 = target_h, target_w
if target_h % 32 != 0:
target_h32 = target_h + (32 - target_h % 32)
if target_w % 32 != 0:
target_w32 = target_w + (32 - target_w % 32)
resized = np.zeros((target_h32, target_w32, channel), dtype=np.float32)
resized[0:target_h, 0:target_w, :] = proc
target_h, target_w = target_h32, target_w32
size_heatmap = (int(target_w/2), int(target_h/2))
return resized, ratio, size_heatmap
def saveResult(img_file, img, boxes, dirname='./result/', verticals=None, texts=None):
""" save text detection result one by one
Args:
img_file (str): image file name
img (array): raw image context
boxes (array): array of result file
Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output
Return:
None
"""
img = np.array(img)
# make result file list
filename, file_ext = os.path.splitext(os.path.basename(img_file))
# result directory
res_file = dirname + "res_" + filename + '.txt'
res_img_file = dirname + "res_" + filename + '.jpg'
if not os.path.isdir(dirname):
os.mkdir(dirname)
#data = open('task3.txt', 'w')
count = 0
with open(res_file, 'w') as f:
for i, box in enumerate(boxes):
#text = save_polygon(img, box, count)
#box_data = ""
#for co_ord in box:
# box_data+=f"{co_ord[0]}, {co_ord[1]}"
#print(box_data, text)
#data.write(box_data+","+text+"\n")
#count+=1
poly = np.array(box).astype(np.int32).reshape((-1))
#strResult = ','.join([str(p) for p in poly]) + '\r\n'
#f.write(strResult)
poly = poly.reshape(-1, 2)
min_co = tuple(np.min(poly, axis=0))
max_co = tuple(np.max(poly, axis=0))
#x_1, x_2, y_1, y_2 = poly[0][0], poly[1][0], poly[1][1], poly[2][1]
cv2.rectangle(img, min_co, max_co, (0, 0, 255), 2)
#cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)
ptColor = (0, 255, 255)
if verticals is not None:
if verticals[i]:
ptColor = (255, 0, 0)
if texts is not None:
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
cv2.putText(img, "{}".format(texts[i]), (poly[0][0]+1, poly[0][1]+1), font, font_scale, (0, 0, 0), thickness=1)
cv2.putText(img, "{}".format(texts[i]), tuple(poly[0]), font, font_scale, (0, 255, 255), thickness=1)
# Save result image
cv2.imwrite(res_img_file, img)
""" auxilary functions """
# unwarp corodinates
def warpCoord(Minv, pt):
out = np.matmul(Minv, (pt[0], pt[1], 1))
return np.array([out[0]/out[2], out[1]/out[2]])
""" end of auxilary functions """
def getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text):
# prepare data
linkmap = linkmap.copy()
textmap = textmap.copy()
img_h, img_w = textmap.shape
""" labeling method """
ret, text_score = cv2.threshold(textmap, low_text, 1, 0)
ret, link_score = cv2.threshold(linkmap, link_threshold, 1, 0)
text_score_comb = np.clip(text_score + link_score, 0, 1)
nLabels, labels, stats, centroids = cv2.connectedComponentsWithStats(text_score_comb.astype(np.uint8), connectivity=4)
det = []
mapper = []
for k in range(1,nLabels):
# size filtering
size = stats[k, cv2.CC_STAT_AREA]
if size < 10: continue
# thresholding
if np.max(textmap[labels==k]) < text_threshold: continue
# make segmentation map
segmap = np.zeros(textmap.shape, dtype=np.uint8)
segmap[labels==k] = 255
segmap[np.logical_and(link_score==1, text_score==0)] = 0 # remove link area
x, y = stats[k, cv2.CC_STAT_LEFT], stats[k, cv2.CC_STAT_TOP]
w, h = stats[k, cv2.CC_STAT_WIDTH], stats[k, cv2.CC_STAT_HEIGHT]
niter = int(math.sqrt(size * min(w, h) / (w * h)) * 2)
sx, ex, sy, ey = x - niter, x + w + niter + 1, y - niter, y + h + niter + 1
# boundary check
if sx < 0 : sx = 0
if sy < 0 : sy = 0
if ex >= img_w: ex = img_w
if ey >= img_h: ey = img_h
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1 + niter, 1 + niter))
segmap[sy:ey, sx:ex] = cv2.dilate(segmap[sy:ey, sx:ex], kernel)
# make box
np_contours = np.roll(np.array(np.where(segmap!=0)),1,axis=0).transpose().reshape(-1,2)
rectangle = cv2.minAreaRect(np_contours)
box = cv2.boxPoints(rectangle)
# align diamond-shape
w, h = np.linalg.norm(box[0] - box[1]), np.linalg.norm(box[1] - box[2])
box_ratio = max(w, h) / (min(w, h) + 1e-5)
if abs(1 - box_ratio) <= 0.1:
l, r = min(np_contours[:,0]), max(np_contours[:,0])
t, b = min(np_contours[:,1]), max(np_contours[:,1])
box = np.array([[l, t], [r, t], [r, b], [l, b]], dtype=np.float32)
# make clock-wise order
startidx = box.sum(axis=1).argmin()
box = np.roll(box, 4-startidx, 0)
box = np.array(box)
det.append(box)
mapper.append(k)
return det, labels, mapper
def getPoly_core(boxes, labels, mapper, linkmap):
# configs
num_cp = 5
max_len_ratio = 0.7
expand_ratio = 1.45
max_r = 2.0
step_r = 0.2
polys = []
for k, box in enumerate(boxes):
# size filter for small instance
w, h = int(np.linalg.norm(box[0] - box[1]) + 1), int(np.linalg.norm(box[1] - box[2]) + 1)
if w < 10 or h < 10:
polys.append(None); continue
# warp image
tar = np.float32([[0,0],[w,0],[w,h],[0,h]])
M = cv2.getPerspectiveTransform(box, tar)
word_label = cv2.warpPerspective(labels, M, (w, h), flags=cv2.INTER_NEAREST)
try:
Minv = np.linalg.inv(M)
except:
polys.append(None); continue
# binarization for selected label
cur_label = mapper[k]
word_label[word_label != cur_label] = 0
word_label[word_label > 0] = 1
""" Polygon generation """
# find top/bottom contours
cp = []
max_len = -1
for i in range(w):
region = np.where(word_label[:,i] != 0)[0]
if len(region) < 2 : continue
cp.append((i, region[0], region[-1]))
length = region[-1] - region[0] + 1
if length > max_len: max_len = length
# pass if max_len is similar to h
if h * max_len_ratio < max_len:
polys.append(None); continue
# get pivot points with fixed length
tot_seg = num_cp * 2 + 1
seg_w = w / tot_seg # segment width
pp = [None] * num_cp # init pivot points
cp_section = [[0, 0]] * tot_seg
seg_height = [0] * num_cp
seg_num = 0
num_sec = 0
prev_h = -1
for i in range(0,len(cp)):
(x, sy, ey) = cp[i]
if (seg_num + 1) * seg_w <= x and seg_num <= tot_seg:
# average previous segment
if num_sec == 0: break
cp_section[seg_num] = [cp_section[seg_num][0] / num_sec, cp_section[seg_num][1] / num_sec]
num_sec = 0
# reset variables
seg_num += 1
prev_h = -1
# accumulate center points
cy = (sy + ey) * 0.5
cur_h = ey - sy + 1
cp_section[seg_num] = [cp_section[seg_num][0] + x, cp_section[seg_num][1] + cy]
num_sec += 1
if seg_num % 2 == 0: continue # No polygon area
if prev_h < cur_h:
pp[int((seg_num - 1)/2)] = (x, cy)
seg_height[int((seg_num - 1)/2)] = cur_h
prev_h = cur_h
# processing last segment
if num_sec != 0:
cp_section[-1] = [cp_section[-1][0] / num_sec, cp_section[-1][1] / num_sec]
# pass if num of pivots is not sufficient or segment widh is smaller than character height
if None in pp or seg_w < np.max(seg_height) * 0.25:
polys.append(None); continue
# calc median maximum of pivot points
half_char_h = np.median(seg_height) * expand_ratio / 2
# calc gradiant and apply to make horizontal pivots
new_pp = []
for i, (x, cy) in enumerate(pp):
dx = cp_section[i * 2 + 2][0] - cp_section[i * 2][0]
dy = cp_section[i * 2 + 2][1] - cp_section[i * 2][1]
if dx == 0: # gradient if zero
new_pp.append([x, cy - half_char_h, x, cy + half_char_h])
continue
rad = - math.atan2(dy, dx)
c, s = half_char_h * math.cos(rad), half_char_h * math.sin(rad)
new_pp.append([x - s, cy - c, x + s, cy + c])
# get edge points to cover character heatmaps
isSppFound, isEppFound = False, False
grad_s = (pp[1][1] - pp[0][1]) / (pp[1][0] - pp[0][0]) + (pp[2][1] - pp[1][1]) / (pp[2][0] - pp[1][0])
grad_e = (pp[-2][1] - pp[-1][1]) / (pp[-2][0] - pp[-1][0]) + (pp[-3][1] - pp[-2][1]) / (pp[-3][0] - pp[-2][0])
for r in np.arange(0.5, max_r, step_r):
dx = 2 * half_char_h * r
if not isSppFound:
line_img = np.zeros(word_label.shape, dtype=np.uint8)
dy = grad_s * dx
p = np.array(new_pp[0]) - np.array([dx, dy, dx, dy])
cv2.line(line_img, (int(p[0]), int(p[1])), (int(p[2]), int(p[3])), 1, thickness=1)
if np.sum(np.logical_and(word_label, line_img)) == 0 or r + 2 * step_r >= max_r:
spp = p
isSppFound = True
if not isEppFound:
line_img = np.zeros(word_label.shape, dtype=np.uint8)
dy = grad_e * dx
p = np.array(new_pp[-1]) + np.array([dx, dy, dx, dy])
cv2.line(line_img, (int(p[0]), int(p[1])), (int(p[2]), int(p[3])), 1, thickness=1)
if np.sum(np.logical_and(word_label, line_img)) == 0 or r + 2 * step_r >= max_r:
epp = p
isEppFound = True
if isSppFound and isEppFound:
break
# pass if boundary of polygon is not found
if not (isSppFound and isEppFound):
polys.append(None); continue
# make final polygon
poly = []
poly.append(warpCoord(Minv, (spp[0], spp[1])))
for p in new_pp:
poly.append(warpCoord(Minv, (p[0], p[1])))
poly.append(warpCoord(Minv, (epp[0], epp[1])))
poly.append(warpCoord(Minv, (epp[2], epp[3])))
for p in reversed(new_pp):
poly.append(warpCoord(Minv, (p[2], p[3])))
poly.append(warpCoord(Minv, (spp[2], spp[3])))
# add to final result
polys.append(np.array(poly))
return polys
def getDetBoxes(textmap, linkmap, text_threshold, link_threshold, low_text, poly=False):
boxes, labels, mapper = getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text)
if poly:
polys = getPoly_core(boxes, labels, mapper, linkmap)
else:
polys = [None] * len(boxes)
return boxes, polys
def adjustResultCoordinates(polys, ratio_w, ratio_h, ratio_net = 2):
if len(polys) > 0:
polys = np.array(polys)
for k in range(len(polys)):
if polys[k] is not None:
polys[k] *= (ratio_w * ratio_net, ratio_h * ratio_net)
return polys
```
### Export to Tensorflow Graph
```
# Export model to tensorflow
!wget https://github.com/tulasiram58827/craft_tflite/blob/main/models/craft.onnx?raw=true -O craft.onnx
onnx_model = onnx.load('/content/craft.onnx')
tf_rep = prepare(onnx_model)
tf_rep.export_graph('craft.pb')
print("Model converted to tensorflow graph succesfully.")
```
### Convert to TFLite
```
!gdown https://drive.google.com/uc?id=1HvujMMNCG49esPpZOHEnPFY6k-khbrmZ
!unzip /content/image_files.zip
dataset_path = '/content/image_files/'
loaded = tf.saved_model.load('/content/craft.pb')
concrete_func = loaded.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
concrete_func.inputs[0].set_shape([None, 3, 800, 600])
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
def representative_data_gen():
for file in os.listdir(dataset_path)[:10]:
file_path = dataset_path+file
image = loadImage(file_path)
image = cv2.resize(image, dsize=(600, 800), interpolation=cv2.INTER_LINEAR)
img_resized, target_ratio, size_heatmap = resize_aspect_ratio(image, 800, interpolation=cv2.INTER_LINEAR, mag_ratio=1)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = normalizeMeanVariance(image)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
x = x.cpu().detach().numpy()
yield [x]
quantization = "float16" #@param ["dr", "int8", "float16"]
if quantization == 'float16':
converter.target_spec.supported_types = [tf.float16]
elif quantization == 'int8':
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
tflite_model = converter.convert()
open(f'craft_model_800_{quantization}.tflite', 'wb').write(tflite_model)
print("Converted to tensorflow lite succesfully.")
!ls -lh craft_model_800_{quantization}.tflite
!ls -lh craft.pb/saved_model.pb
```
### TFLite Inference
```
def run_tflite_model(input_data):
# Load the TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path=f"craft_model_800_{quantization}.tflite")
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test the model on random input data.
input_shape = input_details[0]['shape']
# input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
y = interpreter.get_tensor(output_details[0]['index'])
feature = interpreter.get_tensor(output_details[1]['index'])
return y, feature
```
### Processing the inference results.
```
def cvt2HeatmapImg(img):
img = (np.clip(img, 0, 1) * 255).astype(np.uint8)
img = cv2.applyColorMap(img, cv2.COLORMAP_JET)
return img
import math
#Use any sample image.
image_path = '/content/image_files/000.jpg'
start_time = time.time()
image = loadImage(image_path)
image = cv2.resize(image, dsize=(600, 800), interpolation=cv2.INTER_LINEAR)
img_resized, target_ratio, size_heatmap = resize_aspect_ratio(image, 800, interpolation=cv2.INTER_LINEAR, mag_ratio=1)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = normalizeMeanVariance(image)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
# forward pass
x = x.cpu().detach().numpy()
y, feature = run_tflite_model(x)
y = torch.from_numpy(y)
feature = torch.from_numpy(feature)
# make score and link map
score_text = y[0,:,:,0].cpu().data.numpy()
score_link = y[0,:,:,1].cpu().data.numpy()
text_threshold = 0.7
link_threshold = 0.4
low_text = 0.4
poly = False
# Post-processing
boxes, polys = getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, poly)
# coordinate adjustment
boxes = adjustResultCoordinates(boxes, ratio_w, ratio_h)
polys = adjustResultCoordinates(polys, ratio_w, ratio_h)
for k in range(len(polys)):
if polys[k] is None: polys[k] = boxes[k]
# render results (optional)
render_img = score_text.copy()
render_img = np.hstack((render_img, score_link))
ret_score_text = cvt2HeatmapImg(render_img)
saveResult(image_path, image[:,:,::-1], polys, dirname='output')
filename, file_ext = os.path.splitext(os.path.basename(image_path))
print("Total time taken to run CRAFT tflite model......", time.time()-start_time)
image = cv2.imread('/content/outputres_000.jpg')
cv2_imshow(image)
```
| github_jupyter |
# SEIRS+ Community Testing, Tracing, & Isolation Demo
**This notebook provides a demonstration of the functionality of the Extended SEIRS+ Network Model and offers a sandbox for easily changing simulation parameters and scenarios.**
For a more thorough walkthrough of the model and use of this package, refer to the README.
### Installing and Importing the model code
All of the code needed to run the model is imported from the ```models``` module of this package.
#### Install the package using ```pip```
The package can be installed on your machine by entering this in the command line:
```sudo pip install seirsplus```
Then, the ```models``` module can be imported into your scripts as shown here:
```
from seirsplus.models import *
import networkx
```
#### *Alternatively, manually copy the code to your machine*
*You can use the model code without installing a package by copying the ```models.py``` module file to a directory on your machine. In this case, the easiest way to use the module is to place your scripts in the same directory as the module, and import the module as shown here:*
```python
from models import *
```
### Generating interaction networks
This model simulates SEIRS epidemic dynamics for populations with a structured interaction network (as opposed to standard deterministic SIR/SEIR/SEIRS models, which assume uniform mixing of the population). As such, a graph specifying the interaction network for the population must be specified, where each node represents an individual in the population and edges connect individuals who have regular interactions.
The interaction network can be specified by a ```networkx``` Graph object or a 2D numpy array representing the adjacency matrix, either of which can be defined and generated by any method.
*Here, we use a ```custom_exponential_graph()``` generation function included in this package, which generates power-law graphs that have degree distributions with two exponential tails. For more information on this custom graph type and its generation, see the README.*
**_Note:_** *Simulation time increases with network size. Small networks simulate quickly, but have more stochastic volatility. Networks with ~10,000 are large enough to produce per-capita population dynamics that are generally consistent with those of larger networks, but small enough to simulate quickly. We recommend using networks with ~10,000 nodes for prototyping parameters and scenarios, which can then be run on larger networks if more precision is required (for more on this, see README).*
```
numNodes = 10000
baseGraph = networkx.barabasi_albert_graph(n=numNodes, m=9)
# Baseline normal interactions:
G_normal = custom_exponential_graph(baseGraph, scale=100)
plot_degree_distn(G_normal, max_degree=40)
```
Epidemic scenarios of interest often involve interaction networks that change in time. Multiple interaction networks can be defined and used at different times in the model simulation, as will be shown below.
*Here we generate a graph representing interactions during corresponding to Social Distancing, where each individual drops some portion of their normal interactions with others. Again, we use the ```custom_exponential_graph()``` to generate this graph; for more information, see the README.*
```
# Social distancing interactions:
G_distancing = custom_exponential_graph(baseGraph, scale=10)
plot_degree_distn(G_distancing, max_degree=40)
```
This SEIRS+ model features dynamics corresponding to testing individuals for the disease and moving individuals with detected infection into a state where their rate of recovery, mortality, etc may be different. In addition, given that this model considers individuals in an interaction network, a separate graph defining the interactions for individuals with detected cases can be specified.
*Here we generate a graph representing the interactions that individuals have when they test positive for the disease. In this case, a significant portion of each individual's normal interaction edges are removed from the graph, as if the individual is quarantined upon detection of infection. Again, we use the ```custom_exponential_graph()``` to generate this graph; for more information, see the README.*
For more information on how testing, contact tracing, and detected cases are handled in this model, see the README.
```
# Quarantine interactions:
G_quarantine = custom_exponential_graph(baseGraph, scale=5)
plot_degree_distn(G_quarantine, max_degree=40)
```
### Initializing the model parameters
All model parameter values, including the normal and quarantine interaction networks, are set in the call to the ```SEIRSNetworkModel``` constructor. The normal interaction network ```G``` and the basic SEIR parameters ```beta```, ```sigma```, and ```gamma``` are the only required arguments. All other arguments represent parameters for optional extended model dynamics; these optional parameters take default values that turn off their corresponding dynamics when not provided in the constructor. For clarity and ease of customization in this notebook, all available model parameters are listed below.
For more information on parameter meanings, see the README.
*The parameter values shown correspond to rough estimates of parameter values for the COVID-19 epidemic.*
```
model = SEIRSNetworkModel(G =G_normal,
beta =0.155,
sigma =1/5.2,
gamma =1/12.39,
mu_I =0.0004,
mu_0 =0,
nu =0,
xi =0,
p =0.5,
Q =G_quarantine,
beta_D =0.155,
sigma_D =1/5.2,
gamma_D =1/12.39,
mu_D =0.0004,
theta_E =0,
theta_I =0,
phi_E =0,
phi_I =0,
psi_E =1.0,
psi_I =1.0,
q =0.5,
initI =numNodes/100,
initE =0,
initD_E =0,
initD_I =0,
initR =0,
initF =0)
```
### Checkpoints
Model parameters can be easily changed during a simulation run using checkpoints. A dictionary holds a list of checkpoint times (```checkpoints['t']```) and lists of new values to assign to various model parameters at each checkpoint time. Any model parameter listed in the model constrcutor can be updated in this way. Only model parameters that are included in the checkpoints dictionary have their values updated at the checkpoint times, all other parameters keep their pre-existing values.
*The checkpoints shown here correspond to starting social distancing and testing at time ```t=20``` (the graph ```G``` is updated to ```G_distancing``` and locality parameter ```p``` is decreased to ```0.1```; testing params ```theta_E```, ```theta_I```, ```phi```, and ```phi_I``` are set to non-zero values) and then stopping social distancing at time ```t=100``` (```G``` and ```p``` changed back to their "normal" values; testing params remain non-zero).*
```
checkpoints = {'t': [20, 100],
'G': [G_distancing, G_normal],
'p': [0.1, 0.5],
'theta_E': [0.02, 0.02],
'theta_I': [0.02, 0.02],
'phi_E': [0.2, 0.2],
'phi_I': [0.2, 0.2]}
```
### Running the simulation
```
model.run(T=300, checkpoints=checkpoints)
```
### Visualizing the results
The ```SEIRSNetworkModel``` class has a ```plot()``` convenience function for plotting simulation results on a matplotlib axis. This function generates a line plot of the frequency of each model state in the population by default, but there are many optional arguments that can be used to customize the plot.
The ```SEIRSNetworkModel``` class also has convenience functions for generating a full figure out of model simulation results (optionaly arguments can be provided to customize the plots generated by these functions).
- ```figure_basic()``` calls the ```plot()``` function with default parameters to generate a line plot of the frequency of each state in the population.
- ```figure_infections()``` calls the ```plot()``` function with default parameters to generate a stacked area plot of the frequency of only the infection states ($E$, $I$, $D_E$, $D_I$) in the population.
For more information on the built-in plotting functions, see the README.
```
model.figure_infections(vlines=checkpoints['t'], ylim=0.15)
```
#### Reference simulation visualizations
We can also visualize the results of other simulation(s) as a reference for comparison of our main simulation.
Here we simulate a model where no distancing or testing takes place, so that we can compare the effects of these interventions:
```
ref_model = SEIRSNetworkModel(G=G_normal, beta=0.155, sigma=1/5.2, gamma=1/12.39, mu_I=0.0004, p=0.5,
Q=G_quarantine, beta_D=0.155, sigma_D=1/5.2, gamma_D=1/12.39, mu_D=0.0004,
theta_E=0, theta_I=0, phi_E=0, phi_I=0, psi_E=1.0, psi_I=1.0, q=0.5,
initI=numNodes/100)
ref_model.run(T=300)
```
Now we can visualize our main simulation together with this reference simulation by passing the model object of the reference simulation to the appropriate figure function argument (note: a second reference simulation could also be visualized by passing it to the ```dashed_reference_results``` argument):
```
model.figure_infections(vlines=checkpoints['t'], ylim=0.2, shaded_reference_results=ref_model)
```
As further demonstration, we might also wish to compare the results of these network model simulations to a deterministic model simulation of the same SEIRS parameters (with no interventions in this case):
```
ref_model_determ = SEIRSModel(beta=0.147, sigma=1/5.2, gamma=1/12.39, mu_I=0.0004, initI=100, initN=10000)
ref_model_determ.run(T=300)
model.figure_infections(vlines=checkpoints['t'], ylim=0.2,
shaded_reference_results=ref_model, shaded_reference_label='network: no interventions',
dashed_reference_results=ref_model_determ, dashed_reference_label='deterministic: no interventions')
```
| github_jupyter |
# Assignment 2
## Data
This is a historical dataset on the modern Olympic Games, including all the Games from Athens 1896 to Rio 2016. The data was taken from Kaggle. The `athlete_events` Dataset contains $271,116$ rows and $15$ columns.
**Source:**
Griffin, R, H (2018) 120 years of Olympic history: athletes and results, athlete_events, Found at: https://www.kaggle.com/heesoo37/120-years-of-olympic-history-athletes-and-results#athlete_events.csv
**ATTRIBUTES:**
**athlete_events.csv**
| Column Name | Data Type | Description/Notes |
|:----:|:----:|:----|
| ID | integer | Unique number for each athlete |
| Name | string | Athlete’s name |
| Sex | string | M or F |
| Age | integer | |
| Height | integer | In centimeters |
| Weight | integer | In kilograms |
| Team | string | Team name |
| NOC | string | National Olympic Committee, 3 letter code (Matches with `NOC` from noc_regions.csv) |
| Games | string | Year and season |
| Year | integer | |
| Season | string | Summer or Winter |
| City | string | Host city |
| Sport | string | |
| Event | string | |
| Medal | string | Gold, Silver, Bronze, or NA |
Download athlete_events.csv from the link above and load it into a DataFrame called `athlete_events`:
```
# Your answer goes here
```
Use `info()` method on this DataFrame to get a sense of the data:
```
# Your answer goes here
```
## Question 1
Clearly not every athlete receives a medal in the Olympics. How many records have a non-null value for `Medal` field? In other words, how many medals were given according to this dataset?
```
# Your answer goes here
```
Use the `head()` method to inspect the first few rows of the data:
```
# Your answer goes here
```
## Question 2
What is the minimum and maximum `Year`?
```
# Your answer goes here
# Your answer goes here
```
## Question 3
Is the following statement True or False?
> Average age of female athletes who attended the olympic games after 1990 has raised when compared to the era before then.
```
# Your answer goes here
# Your answer goes here
```
## Question 4
How many Gold medals were given to men from 1970 to 2000 (including both years)?
```
# Your answer goes here
```
## Question 5
How many athletes attended the olympic games in 1968?
Hint: You can use the method `.unique()` on any pandas Series to get the unique values. Any column subset of a DataFrame returns a Series object.
```
# Your answer goes here
```
## Question 6
Who won the event "Swimming Men's 100 metres Breaststroke" in 2004? Please note that in the Event description "metres" is spelled in British!
```
# Your answer goes here
```
| github_jupyter |
# Natural Language Processing (NLP)
In this tutorial we'll learn how we can process natural language documents into a form understandable by computers.
Up till now, we've only seen **structured data**. Structured data, are data that are organized in a known *schema*. We know what features there are and in most cases what each feature corresponds to. Some examples of structured data are DataFrames, spreadsheets and relational databases.
In contrast, natural language is, by its nature, **unstructured**, meaning that we don't know the *schema* of the data. Finding a *structure* that describes the data is necessary in order to feed them into a ML model. This will be our main goal during this tutorial.
For NLP we will be using python's [Natural Language Toolkit](http://www.nltk.org/), as well as several functions from scikit-learn. More information about the library's functionality can be found in the [NLTK book](http://www.nltk.org/book/).
```
import numpy as np
import nltk
import matplotlib.pyplot as plt
%matplotlib inline
```
NTLK is a vast library that includes multiple sub-modules. When installed, NLTK is striped down to its most basic functionality. To access the more complex sub-modules, we need to manually download each one. This can be done through `nltk.download()`. We'll see how this is used in practice.
## Loading corpora
There are three main ways of loading a document corpus in python:
- **Download through a python library**.
If we want to download a well-known dataset, chances are we can do it through a library like scikit-learn or nltk. For instance, the [reuters dataset](https://archive.ics.uci.edu/ml/datasets/reuters-21578+text+categorization+collection), can be retrieved through ntlk:
```python
nltk.download('reuters') # download the dataset
from nltk.corpus import reuters # import it
print('The dataset has a total of {} categories:'.format(len(reuters.categories())))
print(reuters.categories()[:10]) # print first 10 category names
print(reuters.fileids('category name')) # print the file ids for a category, given its name
```
- **Download directly from the internet**.
If a document is available in the internet, we can retrieve using urllib:
```python
import urllib
url = 'http://www.gutenberg.org/files/2554/2554-0.txt' # document's url
response = urllib.urlopen(url) # send the request
raw = response.read().decode('utf8') # store the document in s tring
print(raw[:500]) # print its 500 first characters
```
- **Load from a local file**.
If we've already downloaded the document in our computer, we'll just need to load it. We've seen how do so in a previous tutorial.
```python
with open('mydoc.txt', 'r') as f: # open 'mydoc.txt' for reading
document = ''
for line in f: # read each line of the text file one at a time
document += line # store it to a string
```
## Typical NLP workflow
The goal of any NLP workflow is to **remove** any **unnecessary content** from the document and **structure** it as best as possible.
Suppose we've loaded a document using one of the previous methods.
```
document = """
Commerce Secretary Malcolm Baldrige
said he supported efforts to persuade newly-industrialized
countries (NICS) to revalue currencies that are tied to the
dollar in order to help the United States cut its massive trade
deficit.
"We do need to do something with those currencies or we
will be substituting Japanese products for Taiwanese products,"
or those of other nations with currencies tied to the dollar,
Baldrige told a House banking subcommittee.
The U.S. dollar has declined in value against the Yen and
European currencies, but has changed very little against the
currencies of some developing countries such as South Korea and
Taiwan because they are linked to the value of the dollar.
As a result, efforts to reduce the value of the dollar over
the past year and a half have done little to improve the trade
deficits with those countries.
Baldrige told a House Banking subcommittee that the
Treasury Department was attempting to persuade those countries
to reach agreement with the United States on exchange rates.
"""
```
### String operations
After loading the document, there are a few things we might want to do. First of all, we should convert all the characters to **lowercase**. This is done because the computer treats upper and lower case letters differently (e.g. *Cat* and *cat* are two totally different words for the computer). Afterwards, we'll remove all punctuation from the string and **split** it into words. The goal is to create a list of all the words appearing in the document.
For the first task (i.e. converting the document to lowercase), we can use python's built-in string method `.lower()`.
```
document = document.lower()
print(document)
```
For the second task, we'll use string method `.replace()` to remove each punctuation mark. Instead of identifying them manually, they are available in a package called *string*, stored in a variable called `punctuation`.
```
import string
print(string.punctuation) # string containing all available punctuation marks
for punct_mark in string.punctuation: # iterate over all punctuation marks
document = document.replace(punct_mark, '') # remove each punctuation mark
print(document)
```
As for the task of splitting the document into words, one thought could be to use the string method `.split()`. However, besides spaces (`' '`), we need to separate tabs (`\t`), new lines (`\n`), etc. An easier way is available through nltk.
Because words are typically referred to as **tokens**, this process is called **tokenization**.
```
nltk.download('punkt') # required for the tokenizer
words = nltk.word_tokenize(document)
print(words)
```
### Stopword removal
The next step involves reducing the number of tokens, with a minimal loss of the semantic content of the document. Why this is necessary will become apparent later on. The easiest thing to do is to remove the **stopwords** (i.e. common words that exist in almost every document and don't contribute to its semantic content). Some examples of stopwords are *"and"*, *"this"*, *"that"*, *"me"*, etc.
This can be easily done again through nltk.
```
nltk.download('stopwords') # download a file containing english stopwords
from nltk.corpus import stopwords
filtered_words = [word for word in words if word not in stopwords.words('english')] # filter out stopwords
print(filtered_words)
print('length before stopword removal:', len(words))
print('length after stopword removal:', len(filtered_words))
```
This way, we cut the size of the document by around half, without removing any of the words that define its meaning.
## Stemming & Lemmatization
Words in a document may appear in different lexical forms (e.g. *play*, *plays*, *playing*, *played*). While we, as humans, understand that all of these words relay the same meaning, the computer recognizes them as different. To resolve this, there are a couple of techniques (i.e. stemming and lemmatization) that aim to reduce a word to its inflectional form.
**Stemming** refers to the process of reducing words to their *stem*. This is most commonly done by cutting their suffixes.
**Lemmatization** is a procedure where a word is reduced to its *lemma* (or dictionary form).
Both have a similar goal, but approach it differently. Stemming is usually the crude heuristic of chopping off parts of the word, while lemmatization uses a dictionary and attempts to perform a morphological analysis of the word.
```
# required downloads for the stemmer/lemmatizer
nltk.download('wordnet')
nltk.download('rslp')
# we'll be using the wordnet lemmatizer and the porter stemmer
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
wordnet_lemmatizer = WordNetLemmatizer()
porter_stemmer = PorterStemmer()
# apply stemming/lemmatization to each word in the document
lem_words = [wordnet_lemmatizer.lemmatize(word) for word in filtered_words]
stem_words = [porter_stemmer.stem(word) for word in filtered_words]
# print results for comparison
print('\n{:<20} {:<20} {:<20}'.format('Original', 'Stemmed', 'Lemmatized'))
print('-'*60)
for i in range(len(filtered_words)):
print('{:<20} {:<20} {:<20}'.format(filtered_words[i], stem_words[i], lem_words[i]))
```
Feel free to try out different stemmers or lemmatizers and select whichever you prefer.
Once we have completed all linguistic pre-processing steps, we can move on to the next task: to structure the input (i.e. to convert the document to a form recognizable by ML algorithms). The algorithms we've seen up till now could only handle **vectors** (i.e. a series of numbers - each corresponding to the value of a specific feature) as input. Each vector will be derived from the terms of a document and should be able to *describe* it. In this sense, documents with similar subjects should have vectors close to one another. Likewise, unrelated documents' vectors should be far apart.
### Bag of Words
Bag-of-words is probably the simplest method of *vectorizing* documents. First, it finds all unique terms in a corpus of documents; think of this as a list of the *features* of the (where each term is a feature). Then, for each document, it counts the number appearances and forms a vector from the unique terms of the first step.
Like we did in previous tutorials, we won't be using scikit-learn's implementation, to better understand how it works.
```
doc = 'four three two one four two three four three four' # a random string
from collections import Counter # Counter pretty much does all the work
cnt = Counter(doc.split())
print(cnt)
```
To fully understand how bag-of-words works, we need a corpus of documents. Let's create such, and pre-process them using the steps we saw previously:
```
# Our new corpus
documents = ["Lionel Messi is the best football player in the world! Messi plays for Barcelona Football Club. Barcelona Football Club plays in the Spanish Primera Division.",
"Lionel Messi a football player, playing for Barcelona Football Club, a Spanish football team.",
"Barcelona is a city in a northern spanish province called Catalonia. It is the largest city in Catalonia and the second most populated spanish city.",
"Python is a programming language. Python is an object-oriented programming language. Unlike COBOL, Python is a interpreted programming language.",
"COBOL is a compiled computer programming language designed for business use. This programming language is imperative, procedural and, since 2002, object-oriented. But Python is better."]
# Pre-process the documents
def preprocess_document(document):
# function that performs all linguistic preprocessing steps mentioned previously
document = document.lower()
for punct_mark in string.punctuation:
document = document.replace(punct_mark, '')
words = nltk.word_tokenize(document.lower())
filtered_words = [word for word in words if word not in stopwords.words('english')]
stemmed_words = [porter_stemmer.stem(word) for word in filtered_words]
return stemmed_words
preprocessed_documents = [preprocess_document(doc) for doc in documents]
for doc in preprocessed_documents:
print(doc)
```
The first step is to create a list of all unique terms in our corpus. This is called a **vocabulary**.
```
total_counter = Counter(preprocessed_documents[0])
for i in range(1, len(preprocessed_documents)):
total_counter += Counter(preprocessed_documents[i])
print(total_counter)
```
The above depicts our corpus' vocabulary along with the total number of appearances of each term. Terms appearing only **once** in the whole corpus aren't much useful, as they can't be used see how two documents are similar. In order to reduce the size of the vocabulary we might want to remove terms that appear very few times.
```
threshold = 1
vocabulary = [word for word in total_counter if total_counter[word] > threshold] # remove rarely occurring words
print(vocabulary)
```
Likewise, terms that appear in almost **every** document aren't very useful and could be removed as well.
We will now complete our bag-of-words model by counting the appearance of each term in every document and placing them all in an array where each line would represent a single document and each row a term. An example of such an array with $N$ documents and $M$ terms, is the following:
$$
X = \left( \begin{array}{cccc}
x_{11} & x_{12} & ... & x_{1M} \\
x_{21} & x_{22} & ... & x_{2M} \\
... & ... & ... & ... \\
x_{N1} & x_{N2} & ... & x_{NM} \\
\end{array} \right)
$$
Each element $x_{ij}$ represents **how many times term $t_j$ appears in document $d_i$**.
```
bag_array = np.zeros((len(preprocessed_documents), len(vocabulary))) # create an N-by-M arrays
document_counters = [Counter(doc) for doc in preprocessed_documents] # count terms in each document
for i in range(len(preprocessed_documents)):
for j in range(len(vocabulary)):
bag_array[i,j] += document_counters[i][vocabulary[j]] # populate the array
print(vocabulary, '\n')
print(bag_array)
```
Now, we've finally reached a point where we have an array we can feed into a ML algorithm! The above array would typically serve as our input array $X$ and depending on whether or not we have labels for each document, we would either have a supervised or an unsupervised problem. This array from this point on should be treated as any other input to a **structured** ML problem (e.g. we could shuffle it, split it into training/test sets, perform any feature scaling/selection/extraction, etc.).
It's time to discuss a couple of interesting properties of problems involving NLP.
1. The first thing to note is that input arrays when dealing with natural language tend to have a **high dimensionality**, especially when using models like bag-of-words, where they treat each unique term as a new feature. This is the reason why, during this tutorial, almost all our pre-processing operations had the goal of reducing the number of terms in the corpus. Think about it:
- While **lowercasing** our words had the goal of mapping two different words that had the same semantic meaning (e.g. *Cat* and *cat*) to a common term, it had the much desired effect of removing a lot of would-be terms (those that have uppercase characters in them) from the vocabulary.
- The same can be said for **stemming/lemmatization**.
- **Stopword removal** had the goal of removing several terms from the vocabulary, that are too common to convey any semantic information.
- Removing the **least-commonly appearing terms** also had the goal of reducing the vocabulary size.
- As would removing the **most-common terms**, which was discussed but not performed.
2. Another interesting thing to note about bag-of-words arrays, are how **sparse** they are (i.e. they have a lot of zero elements). This becomes even more apparent in large corpora consisting of documents talking about multiple topics. This property should be taken into account when designing the rest of the ML workflow (pre-processing steps, algorithm, etc.).
3. When counting the number of appearances of every term in each document, no type of scaling was performed regarding the size of the document. This would have the effect of *large* documents having higher values (in the bag-of-words array) than *smaller* ones, which could be an issue if the corpus contains documents with highly-variable in length. This third point will lead us into our next technique which aims at alleviating this issue.
## TF-IDF
For a better weight assignment to each term, we'll use **TF-IDF** (Term Frequency - Inverse Document Frequency).
As implied by its name, tf-idf consists of 2 terms. The first one is **Term Frequency (TF)**:
$$ tf(t_j, d_i) = \frac{f(t_j, d_i)}{\sum_{t} f(t, d_i)}$$
Where $t_j$ is a term in document $d_i$. *TF* essentially represents the frequency with which a term appears in a document. Terms with a large frequency as assigned a larger weight (and as a result are more important) than terms with a small frequency. This alleviates the problem of documents of varying length, as each term is *normalized* with the total number of terms in the document it appears in.
```
print(bag_array.sum(axis=1), '\n') # number of terms per document
freq_array = np.zeros(shape=bag_array.shape)
for i in range(len(bag_array)):
freq_array[i, :] = bag_array[i, :] / bag_array.sum(axis=1)[i] # term frequency
# (term appearances / number of terms in document)
print(np.round(freq_array, 3))
```
The second term in tf-idf is the **Inverse Document Frequency**:
$$ idf(t_j) = log \left( \frac{N}{df(t_j)} \right)$$
Where $N$ is the number of documents and $df(t_j)$ is the number of documents where term $t_j$ appears in. *IDF* is a measure of the **information** that each term conveys. If a term appears in every document, then that term's *IDF* would become equal to $0$ (the fraction would have a value of $1$). Likewise, if a term appears only in a few of the documents, its *IDF* would be relatively large.
```
non_zero_elements_per_row = np.count_nonzero(bag_array, axis=0) # np.count_zero counts how many non-zero elements an array has
# we are interested in counting this along each column
idf = np.log10(float(len(bag_array))/non_zero_elements_per_row) # the nominator is the number of documents
# (or the number of lines in 'bag_array')
# the denominator is the number of non-zero
# elements that each column of 'bag_array' has
print(np.round(idf, 3))
```
To fully grasp how exactly the denominator is calculated, you can look at an equivalent code:
```python
non_zero_elements_per_row = np.zeros((len(bag_array[0])))
for i in range(len(bag_array)):
for j in range(len(bag_array[0])):
if bag_array[i,j] > 0.0:
non_zero_elements_per_row[j] += 1
```
Finally, we can calculate tf-idf as the product of its two terms:
$$tf{\text -}idf(t_j, d_i) = tf(t_j, d_i) \cdot idf(d_i)$$
```
tf_idf = freq_array * idf # tf-idf is the product of tf with idf
print(np.round(tf_idf, 3))
```
Let's now try to see if our model makes sense.
We'll calculate the distances amongst each of the five documents, to see which ones are close to one another.
```
distances = np.zeros((len(tf_idf), len(tf_idf)))
for i in range(len(tf_idf)):
for j in range(len(tf_idf)):
distances[i,j] = sum(np.abs(tf_idf[i] - tf_idf[j])) # sum of absolute distance of the tf-idf vectors of each document
print(distances)
```
As expected, the first two documents are close to one another, as their distance is small (around $0.12$). The same can be said for the last two documents (distance of around $0.1$). In contrast, all unrelated documents have a distance larger than $0.7$.
The tf-idf array, like the bag-of-words array before, can be input in any ML workflow.
We'll attempt to do so now. Because we don't have any labels available for the documents, we are forced to take an unsupervised approach. We'll use a k-means algorithm we discussed in the previous tutorial, which will aim at separating the documents into clusters. Since we are aware of the context of the documents, we can empirically evaluate the results. We'd expect the first two documents to end up in the same cluster and the last two in another.
```
from sklearn.cluster import KMeans
km = KMeans(3, random_state=99)
km.fit_predict(tf_idf)
```
As expected, the first two documents ended up in the same cluster, as did the last two. The 3rd document which was unrelated got its own cluster. As a second example we'll see a clustering technique that doesn't require us to explicitly select the number of clusters.
# Hierarchical Clustering
Hierarchical clustering algorithms approach clustering differently to the centroid-based ones (like k-means). Their goal is to hierarchically cluster their input, with each cluster composed of sub-clusters, finally forming a tree-like structure.

There are two categories of hierarchical clustering algorithms:
- **Agglomerative**, or bottom-up hierarchical clustering algorithms, start by assuming that each example is its own cluster and then begin merging clusters
- **Divisive**, or top-down algorithms, start by assuming that every example belongs to a single cluster and then begin splitting this cluster into sub-clusters.
Hierarchical clustering algorithms make use of the distances between the training examples to merge or split clusters. Unlike k-means, in hierarchical clustering algorithms, it isn't necessary to specify the number of clusters ($k$) a-priori.
The algorithm we'll see, belongs to the first category and uses [Ward's minimum variance method](https://en.wikipedia.org/wiki/Ward%27s_method) for merging clusters together. This recursively looks for the pair of clusters, that when merged, will amount to the least increase to the total internal variance of the clusters. *(Note: with the term internal variance, we mean the variance of the the examples within a certain cluster. Total internal variance is the sum of the internal variances for all clusters.)*
Initially, each example is considered to be one cluster. Then it looks for the pair of clusters that would lead to the least increase in total internal variance, if merged (i.e. Ward's method). Those two clusters are merged into one and the procedure is repeated until 2 clusters are left.
The above can be simply implemented with [scikit-learn](https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html):
```
from scipy.cluster.hierarchy import dendrogram, linkage
Z = linkage(tf_idf, 'ward') # train the algorithm
dendrogram(Z) # plots a dendrogram with the results of the clustering algorithm
print()
```
As expected, the documents that are close to one another, ended up in the same cluster.
Let's try to apply what we've learned to a more complex example.
## NLP/Clustering example
For the example we'll use the [20 Newsgroups](http://qwone.com/~jason/20Newsgroups/) dataset, which is available through [sklearn](http://scikit-learn.org/stable/datasets/twenty_newsgroups.html).
```
from sklearn.datasets import fetch_20newsgroups
newsgroups_train = fetch_20newsgroups(subset='all')
```
For the results to be better interpretable, we'll just include 3 categories from the dataset, each consisting of 5 documents. To make our lives easier, the categories will be rather distinct.
```
from functools import reduce
categ = ['alt.atheism', 'comp.graphics', 'rec.sport.baseball']
data = reduce(lambda x,y: x+y, [fetch_20newsgroups(categories=[x], remove=('headers', 'footers'))['data'][:5] for x in categ])
print('Input shape:', len(data), '\n')
print(data[0][:500])
```
This time around, instead of preprocessing the documents manually, we'll use scikit-learn's [TfidfVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html), which can support every [pre-processing](http://scikit-learn.org/stable/modules/feature_extraction.html#customizing-the-vectorizer-classes) we previously saw (stopwords, stemming, lemmatizing, κτλ). We'll also use two extra parameters: `max_df=x`, which ignores any terms that appear in documents with a frequency larger than `x` (i.e. the most common terms) and `max_df=y`, which ignores terms that appear less than `y` times in the corpus (i.e. the least common terms). For this example we won't be performing any stemming/lemmatizing, but the vectorizer does support this functionality (see link above).
```
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(max_df=0.5, min_df=2, stop_words='english') # remove terms appearing in more than 50% of the documents
# remove terms appearing less than 2 times in the corpus
# remove english stopwords
tf_idf_array = vectorizer.fit_transform(data).toarray() # returns a sparse matrix, to convert it to a regular array we use .toarray()
print('TF-IDF array shape:', tf_idf_array.shape)
Z = linkage(tf_idf_array, 'ward')
labels = ['a'] * 5 + ['g'] * 5 + ['b'] * 5 # 'a' = atheism, 'g' = graphics, 'b' = baseball
dendrogram(Z, labels=labels, color_threshold=0)
colors = {'a': 'r', 'g': 'g', 'b': 'b'}
for l in plt.gca().get_xticklabels():
l.set_color(colors[l.get_text()])
print()
```
The algorithm did a good enough job of placing similar documents in the same clusters, as we can see from above. Another thing to note is that hierarchical alrorithms are capable of identifying hierarchies within groups of similar documents. Also in a truly unsupervised setting the labels/colors in the x-axis would **not** be available.
Let's try to do the same thing with **k-means**, this time with a few more documents per category
```
data = reduce(lambda x,y: x+y, [fetch_20newsgroups(categories=[x], remove=('headers', 'footers'))['data'][:100] for x in categ])
print('Total number of documents:', len(data))
```
Now, we'll run k-means for multiple values of $k$, like we did in the previous tutorial, to see if it can identify any distinct number of clusters.
```
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
tf_idf_array = vectorizer.fit_transform(data) # vectorizer with the same parameters as before
silhouette_scores = []
for k in range(2, 10):
km = KMeans(k)
preds = km.fit_predict(tf_idf_array)
silhouette_scores.append(silhouette_score(tf_idf_array, preds))
```
Finally, we'll plot the silhouette scores and identify the best $k$. This will represent the number of clusters our documents belong in.
```
plt.plot(range(2, 10), silhouette_scores, zorder=-1)
best_k = np.argmax(silhouette_scores) + 2 # +2 because range() begins from k=2 and not 0
plt.scatter(best_k, silhouette_scores[best_k-2], color='#ff7f0e') # for the same reason the best k is -2 from the list index
plt.xlim([2,9])
plt.annotate("best k", xy=(best_k, silhouette_scores[best_k-2]),
xytext=(5, silhouette_scores[best_k-2]), arrowprops=dict(arrowstyle="->")) # add annotation
print('Maximum average silhouette score for k =', best_k)
```
With the silhouette criterion we found 3 clusters in our dataset, which we know to be the correct number.
Let's print the assignments made by k-means:
```
km = KMeans(best_k)
km.fit(tf_idf_array)
print(km.labels_)
```
Because we haven't shuffled our dataset, we know that the first 100 documents belong in the first category, the next 100 in the second and so on. We can see that k-means did pretty well in clustering the documents.
In a truly unsupervised problem we wouldn't have any means of evaluating the results of the clustering. One thing we can do is to print the top terms in each cluster. This way we can have an idea on the context of the documents in each cluster.
```
terms = vectorizer.get_feature_names()
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
for i in range(best_k):
out = "Cluster %d:" % i
for ind in order_centroids[i, :20]:
out += ' %s' % terms[ind]
print(out)
```
This way we can get an intuition on what the documents of each cluster are about.
We can even use a larger value for $k$ to see if there are any sub-categories in our documents.
```
km = KMeans(8)
km.fit(tf_idf_array)
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
for i in range(7):
out = "Cluster %d:" % i
for ind in order_centroids[i, :20]:
out += ' %s' % terms[ind]
print(out)
```
## Final remarks
The methodologies we saw in this tutorial for converting natural language documents to a structured form, are called [Vector Space Model](https://en.wikipedia.org/wiki/Vector_space_model). The vector used to represent a document is called an **embedding**.
One thing you should take away when dealing with these types of models, is that **reducing** the size of the vocabulary (i.e. the number of dimensions) with NLP methods (stemming/lemmatization, frequent/rarest term removal, etc.), usually outperforms doing the same thing with standard ML pre-processing (feature selection, PCA, etc.). Both methods should be used with a measure; you don't want to sacrifice any important features for the sake of dimensionality reduction.
As a final word, unsupervised NLP problems can be **very** difficult. The categories we selected for this tutorial are easily distinguishable; a fact that helped us achieve a good performance. Don't expect this to always be the case.
| github_jupyter |
<a href="https://colab.research.google.com/github/annms1/Mnist/blob/main/Fashion_mnist.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from keras.callbacks import LearningRateScheduler
print(tf.__version__)
mnist = tf.keras.datasets.fashion_mnist
(train_img, train_labels), (test_img, test_labels) = mnist.load_data()
train_img = train_img/255.0
test_img = test_img/255.0
len(train_img)
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
plt.figure()
plt.imshow(train_img[0], cmap=plt.cm.binary)
plt.colorbar()
plt.gca().grid(False)
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(train_img[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
train_img.shape
def model_predictions(model, test_img, sub1, sub2):
ls = []
pred = model.predict(test_img[sub1:sub2])
for i in range(sub1, sub2):
ls.append(class_names[np.argmax(pred[i])])
return ls
def listing(range_1, range_2, labels):
ls = []
for i in range(range_1, range_2):
ls.append(labels[i])
print(class_names[ls[i]])
return
listing(0, 20, test_labels)
plt.imshow(test_img[0], cmap=plt.cm.binary)
print(class_names[test_labels[0]])
plt.imshow(train_img[2], cmap=plt.cm.binary)
print(class_names[train_labels[2]])
def get_lr(init_lr, lr_decay, name):
def exp_decay(epoch):
lr = init_lr * np.exp(-lr_decay * epoch)
return lr
lr_sche = LearningRateScheduler(exp_decay)
name = [lr_sche]
return name
tf.random.set_seed(30)
model_1 = tf.keras.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100, activation=tf.nn.relu),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model_1.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
history_1 = model_1.fit(train_img, train_labels, epochs=8, validation_data=(test_img, test_labels), callbacks=get_lr(0.0015, 0.04, "callback_1"))
test_loss, test_acc = model_1.evaluate(test_img, test_labels)
history_1.history.keys()
plt.figure(figsize=(8, 7))
plt.plot(history_1.history["loss"], history_1.history["lr"])
plt.plot(history_1.history["val_loss"], history_1.history["lr"])
plt.show()
tf.random.set_seed(30)
cnn = tf.keras.Sequential()
cnn.add(tf.keras.layers.Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation="relu"))
cnn.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2)))
cnn.add(tf.keras.layers.Conv2D(32, (2, 2), activation="relu"))
cnn.add(tf.keras.layers.Flatten())
cnn.add(tf.keras.layers.Dense(100, activation="relu"))
cnn.add(tf.keras.layers.Dense(64, activation="relu"))
cnn.add(tf.keras.layers.Dense(10, activation="softmax"))
cnn.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
history_2 = cnn.fit(train_img, train_labels, batch_size=32, epochs=8, validation_data=(test_img, test_labels), callbacks=get_lr(0.0012, 0.027, "callback_2"))
test_loss, test_acc = cnn.evaluate(test_img, test_labels)
model_predictions(cnn, test_img, 0, 20), listing(0, 20, test_labels)
history_2.history.keys()
plt.figure(figsize=(9, 8))
plt.plot(history_2.history["lr"], history_2.history["loss"])
plt.plot(history_2.history["lr"], history_2.history["val_loss"])
plt.show()
cnn_preds = np.round(cnn.predict(test_img))
np_ls = []
for ls in cnn_preds:
np_ls.append(np.argmax(ls))
np_ls = np.array(np_ls)
cnn_preds = np_ls
```
| github_jupyter |
### Ch7 Figure1
```
# Let's say the Knowledge Explorer wants to see a summary of all the men and women who shop on their running shoe website. They want to show a breakdown by age groups. The data analyst might ask about the age brackets. They could create one every five years or every ten years. If they create one every five years then you could have eighteen or nineteen brackets. They’ll also probably be skewed in the middle. There are probably fewer runners younger than eighteen and older than ninety.
data = []
shoe = ['lightweight trainer', 'cross-trainer', 'trail', 'stability', 'cushioned stability', 'neutral', 'motion control', 'walking', 'racing']
for i in range(3000):
if rd.random() < .7:
age = rd.randint(18,90)
else:
age = rd.randint(0,100)
if rd.random() <= .8:
shoe_type = 'stability'
else:
shoe_type = shoe[rd.choice([0,1,2,5,6,8])]
if age <= 21:
if rd.random() < .4:
shoe_type = 'lightweight trainer'
else:
shoe_type = shoe[rd.choice([0,1,2,5,6,7,8])]
if age >= 65:
if rd.random() < .4:
shoe_type='neutral'
else:
shoe_type = shoe[rd.choice([0,1,3,4,7])]
if age >= 90:
if rd.random() > .9:
shoe_type = 'cross-trainer'
else:
shoe_type = shoe[rd.choice([0,1,3,4,7])]
data.append([i, shoe_type, age])
df = pd.DataFrame(data, columns = ['id', 'type', 'age'])
# df.to_csv('csv_output/ch7_fig2.csv', index=False)
df = pd.read_csv('csv_output/ch7_fig2.csv')
df.head()
df = pd.read_csv('csv_output/ch7_fig2.csv')
type_age_count = df.groupby(['type', 'age']).id.count().reset_index()
df2 = pd.pivot_table(type_age_count, values='id', columns='type', index='age').fillna(0)
%matplotlib inline
sns.set_style("whitegrid")
f, ax = plt.subplots(2,2, figsize=(10,12))
ax1 = plt.subplot2grid((2,2),(0,0), rowspan=2)
sns.heatmap(df2/df2.sum(), ax=ax1, cmap='GnBu');
ax1.set_title('type by age heatmap');
def set_group1(x):
# group1 = ['0~18', '19~65', '66~90', '90+']
if x <= 18:
return '0~18'
elif 18<x<=65:
return '19~65'
elif 65<x<=90:
return '66~90'
else:
return '90+'
def set_group2(x):
# group2 = ['0~20', '21~40', '41~60', '61~80', '80+']
if x <= 20:
return '0~20'
elif 20<x<=40:
return '21~40'
elif 41<x<=60:
return '41~60'
elif 61<x<=80:
return '61~80'
else:
return '80+'
df['group1'] = df.age.apply(set_group1);
group1_count = df.groupby(['type', 'group1']).id.count().reset_index()
df3 = pd.pivot_table(group1_count, values='id', columns='type', index='group1').fillna(0)
sns.heatmap(df3/df3.sum(), ax=ax[0][1], cmap='GnBu');
ax[0][1].set_title('type by age group1 heatmap');
df['group2'] = df.age.apply(set_group2);
group1_count = df.groupby(['type', 'group2']).id.count().reset_index()
df4 = pd.pivot_table(group1_count, values='id', columns='type', index='group2').fillna(0)
sns.heatmap(df4/df4.sum(), ax=ax[1][1], cmap='GnBu');
ax[1][1].set_title('type by age group2 heatmap');
f.tight_layout()
f.savefig('svg_output/ch7_fig2.svg', format='svg')
```
Looking at the heatmap of the most granular level (each age as a cell) and heatmaps of different age grouping: The left one shows lightweight trainer are most popular around 18~21, motion control, racing, stability and trail are more popular for age 21~65. However, because of different the age cutoffs, on the right, it might carry a very different message. For the upper right, it seems that lightweight trainers are equally popular in all three age buckets (0~18, 19~65, 66~90) but looking at the bottom right, and the original one, it's clearly more popular among only 18~21. This is because the first bucket on the upper right divide the most popular range right in half.
```
%load_ext rpy2.ipython
%%R -w 480 -h 800 -u px
df = read.csv('csv_output/ch7_fig2.csv')
head(df)
require(ggplot2)
ggplot(df, aes(type, age)) + geom_tile(aes(fill = age), colour = "white") + scale_fill_gradient(low = "white", high = "steelblue") + ggtitle('type by age heatmap') + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 30, hjust= 1))
# ggsave("svg_output/ch7_fig2_R.svg")
```
| github_jupyter |
# Spot detection with napari
### Overview
In this activity, we will perform spot detection on some in situ sequencing data ([Feldman and Singh et al., Cell, 2019](https://www.cell.com/cell/fulltext/S0092-8674(19)31067-0s)). In doing so, we will combine methods from [scipy](https://www.scipy.org/), [scikit-image](https://scikit-image.org/), and [cellpose](https://github.com/MouseLand/cellpose). The goal is to familiarize you with performing analysis that integrates the scientific python ecosystem and napari.
### Data source
The data were downloaded from the [OpticalPooledScreens github repository](https://github.com/feldman4/OpticalPooledScreens).
### Next steps
Following this activity, we will use the workflow generated in this activity to create a napari spot detection plugin.
# Load the data
In the cells below load the data using the scikit-image `imread()` function. For more information about the `imread()` function, please see the [scikit-image docs](https://scikit-image.org/docs/dev/api/skimage.io.html#skimage.io.imread). We are loading two images:
- `nuclei`: an image of cell nuclei
- `spots`: an image of in situ sequencing spots
```
from skimage import io
nuclei_url = 'https://raw.githubusercontent.com/kevinyamauchi/napari-spot-detection-tutorial/main/data/nuclei_cropped.tif'
nuclei = io.imread(nuclei_url)
spots_url = 'https://raw.githubusercontent.com/kevinyamauchi/napari-spot-detection-tutorial/main/data/spots_cropped.tif'
spots = io.imread(spots_url)
```
# View the data
We will use napari to view our data. To do so, we first must create the viewer. Once the Viewer is created, we can add images to the viewer via the Viewer's `add_image()` method.
```
import napari
# create the napari viewer
viewer = napari.Viewer();
# add the nuclei image to the viewer
viewer.add_image(nuclei);
```
In the cell below, add the spots image to the viewer as was done above for the nuclei image. After loading the data, inspect it in the viewer and adjust the layer settings to your liking (e.g., contrast limits, colormap). You can pan/zoom around the image by click/dragging to pan and scrolling with your mousewheel or trackpad to zoom.
**Hint**: you can adjust a layer's opacity to see the change how much you see of the layers that are "under" it.
```
# add the spots image to the viewer
```
# Create an image filter
You may have noticed the the spots image contains background and autofluorescence from the cells. To improve spot detection, we will apply a high pass filter to improve the contrast of the spots.
```
import numpy as np
from scipy import ndimage as ndi
def gaussian_high_pass(image: np.ndarray, sigma: float = 2):
"""Apply a gaussian high pass filter to an image.
Parameters
----------
image : np.ndarray
The image to be filtered.
sigma : float
The sigma (width) of the gaussian filter to be applied.
The default value is 2.
Returns
-------
high_passed_im : np.ndarray
The image with the high pass filter applied
"""
low_pass = ndi.gaussian_filter(image, sigma)
high_passed_im = image - low_pass
return high_passed_im
```
In the cell below, apply the gaussian high pass filter to the `spots` image and add the image to the viewer.
```
# Use the gaussian_high_pass function to filter the spots image
# add the filtered image to the viewer
# hint: set the opacity < 1 in order to see the layers underneath
```
# Detect spots
Next, we will create a function to detect the spots in the spot image. This function should take the raw image, apply the gaussian high pass filter from above and then use one of the blob detection algorithms from sci-kit image to perform the blob detection. The `detect_spots()` function should return a numpy array containing the coordinates of each spot and a numpy array containing the diameter of each spot.
Some hints:
- See the [blob detection tutorial from scikit-image](https://scikit-image.org/docs/dev/auto_examples/features_detection/plot_blob.html). - We recommend the [blob_log detector](https://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.blob_log), but feel free to experiment!
- See the "Note" from the blob_log docs: "The radius of each blob is approximately $\sqrt{2}\sigma$ for a 2-D image"
```
import numpy as np
from skimage.feature import blob_log
def detect_spots(
image: np.ndarray,
high_pass_sigma: float = 2,
spot_threshold: float = 0.01,
blob_sigma: float = 2
):
"""Apply a gaussian high pass filter to an image.
Parameters
----------
image : np.ndarray
The image in which to detect the spots.
high_pass_sigma : float
The sigma (width) of the gaussian filter to be applied.
The default value is 2.
spot_threshold : float
The threshold to be passed to the blob detector.
The default value is 0.01.
blob_sigma: float
The expected sigma (width) of the spots. This parameter
is passed to the "max_sigma" parameter of the blob
detector.
Returns
-------
points_coords : np.ndarray
An NxD array with the coordinate for each detected spot.
N is the number of spots and D is the number of dimensions.
sizes : np.ndarray
An array of size N, where N is the number of detected spots
with the diameter of each spot.
"""
# filter the image with the gaussian_high_pass filter
# detect the spots on the filtered image
# convert the output of the blob detector to the
# desired points_coords and sizes arrays
# (see the docstring for details)
return points_coords, sizes
```
In the cell below, apply `detect_spots()` to our `spots` image. To visualize the results, add the spots to the viewer as a [Points layer](https://napari.org/tutorials/fundamentals/points.html). If you would like to see an example of using a points layer, see [this example](https://github.com/napari/napari/blob/master/examples/add_points.py). To test out your function, vary the detection parameters and see how they affect the results. Note that each time you run the cell, the new results are added as an addition Points layer, allowing you to compare results from different parameters. To make it easier to compare results, you can try modifying the layer opacity or toggling the visibility with the "eye" icon in the layer list.
```
# detect the spots
# add the detected spots to the viewer as a Points layer
```
## Conclusion
In this activity, we have interactively prototyped a spot detection function using a combination of jupyter notebook, scipy, scikit-image, and napari. In the next activity, we will take the spot detection function we created and turn it into a napari plugin.
| github_jupyter |
```
import pandas as pd
from sklearn.metrics import average_precision_score, roc_auc_score, f1_score, roc_curve
import argparse
# Initiate argument parser
class args:
gt_df='/notebooks/data/datasets/pipistrel/Hackathon/SingleFrame_ObjectProposalClassification/test/labels.csv'
detfile='./train_logs_b256_e15_sea_train_test/predictions_fm.csv'
gt_df = pd.read_csv(args.gt_df)
classT = {'nature': 0.0,'boat': 1.0}
gt_df['class'] = [classT[item] for item in gt_df['class']]
det_df = pd.read_csv(args.detfile)
det_df['filename'] = det_df['filename'].apply(lambda x: x.split('/')[-1])
# det_df = det_df[det_df['filename'].str.contains("_")]
det_df = det_df[['confidence','filename','true_labels']]
# df = pd.merge(left=gt_df, right=det_df, how='left', left_on='filename', right_on='filename', )
# df = df.dropna()
AP=average_precision_score(det_df['true_labels'], det_df['confidence'], average="macro", sample_weight=None)
print("Average precision:",str(round(AP, 2)))
roc_auc = roc_auc_score(det_df['true_labels'], det_df['confidence'], average="macro", sample_weight=None)
print("AUROC", roc_auc)
# from sklearn.metrics import accuracy_score
# print(accuracy_score(det_df['true_labels'], det_df['confidence'] > 200))
precision, recall, _ = precision_recall_curve(det_df['true_labels'], det_df['confidence'])
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
step_kwargs = {'step': 'post'}
plt.vlines(recall[precision.argmax()], 0, 1, colors="r", label='{:.2f} recall for 1.0 precision'.format(recall[precision.argmax()]))
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(
AP));
plt.legend()
plt.show()
plt.figure()
lw = 2
fpr, tpr, _ = roc_curve(det_df['true_labels'], det_df['confidence'])
plt.plot(fpr, tpr, color='darkorange', lw=lw)
plt.vlines(fpr[tpr.argmax()], 0, 1, colors='r', label='{:.2f} fpr for 1.0 tpr'.format(fpr[tpr.argmax()]))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC={0:0.2f}'.format(roc_auc))
plt.legend()
plt.show()
fpr[tpr.argmax()]
roc_curve(det_df['true_labels'], det_df['confidence'])
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(det_df[det_df['true_labels'] == 0.]['confidence'], 50, facecolor=(0, 1, 0, 0.5),
label="Normal samples", density=True)
plt.hist(det_df[det_df['true_labels'] == 1.]['confidence'], 50, facecolor=(1, 0, 0, 0.5),
label="Anomalous samples", density=True)
plt.legend()
plt.show()
from sklearn.metrics import f1_score, precision_score, recall_score, precision_recall_curve
THRESHOLD = 4.5
print("F1", f1_score(det_df['true_labels'], det_df['confidence'] > THRESHOLD, average="macro", sample_weight=None))
print("PR", precision_score(det_df['true_labels'], det_df['confidence'] > THRESHOLD, average="macro", sample_weight=None))
print("RC", recall_score(det_df['true_labels'], det_df['confidence'] > THRESHOLD, average="macro", sample_weight=None))
(df['class'] == 1 - df['true_labels']).all()
det_df[['confidence','filename','true_labels']]
?pd.merge
detfile
gt_df.head()
det_df
```
| github_jupyter |
```
import argparse
import pathlib
import random
from collections import deque
from typing import List, Dict, Tuple, Union
from data import Dataset
from data_utils import *
from sampling import *
from models import BetaBernoulli
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from utils import mean_reciprocal_rank
import pickle
%matplotlib inline
import matplotlib;matplotlib.rcParams['font.size'] = 10
import matplotlib;matplotlib.rcParams['font.family'] = 'serif'
LINEWIDTH = 13.97
LOG_FREQ = 10
RUNS = 100
method_list = ['random_arm', 'random_data', 'random_arm_informed', 'random_data_informed', 'ts_uniform', 'ts_informed']
method_format = {'random_arm': ('Random Arm', 'b', '.', '--'),
'random_data': ('Random Data', 'g', '^', '--'),
'random_arm_informed': ('Random Arm Informative', 'b', '.', '-'),
'random_data_informed': ('Random Data Informative', 'g', '^', '-'),
'ts_uniform': ('TS Uninformative', 'k', '*', '-'),
'ts_informed': ('TS Informative', 'r', '+', '-'),
}
DATASET_LIST = ['cifar100', 'dbpedia', '20newsgroup', 'svhn', 'imagenet'] #'imagenet',
metric = 'groupwise_accuracy'
group_method = 'score_equal_size' # score_equal_size, predicted_class
pseudocount = 2
TOPK = False
output = pathlib.Path("../output/%s/" % metric )
num_groups_dict = {}
for dataset in DATASET_LIST:
if group_method == 'score_equal_size':
num_groups_dict[dataset] = 10
if group_method == 'predicted_class':
num_groups_dict[dataset] = NUM_CLASSES_DICT[dataset]
# load results and compute ground truth
ground_truth = {}
l2_error = {}
l1_ece = {} # L1 ECE
l2_ece = {} # L2 ECE
avg_num_agreement = {}
mrr = {}
ece = {}
for dataset_name in DATASET_LIST:
if TOPK:
topk = TOPK_DICT[args.dataset_name]
else:
topk = 1
experiment_name = '%s_groupby_%s_top%d_pseudocount%.2f' % (dataset_name, group_method, topk, pseudocount)
ground_truth[dataset_name] = pickle.load(open(output / experiment_name / "ground_truth.pkl", "rb"))
l2_error[dataset_name] = pickle.load(open(output / experiment_name / "l2_error.pkl", "rb"))
l1_ece[dataset_name] = pickle.load(open(output / experiment_name / "l1_ece.pkl", "rb"))
l2_ece[dataset_name] = pickle.load(open(output / experiment_name / "l2_ece.pkl", "rb"))
ece[dataset_name] = pickle.load(open(output / experiment_name / "ece.pkl", "rb"))
if metric in ['most_accurate', 'least_accurate']:
avg_num_agreement[dataset_name] = pickle.load(open(output / experiment_name / "avg_num_agreement.pkl", "rb"))
mrr[dataset_name] = pickle.load(open(output / experiment_name / "mrr.pkl", "rb"))
# plot
method_format = {#'random_arm': ('Random Arm', 'g', '.', '--'),
'random_data': ('Random Uninformative', 'b', '^', '--'),
#'random_arm_informed': ('Random Arm Informed', 'g', '.', '-'),
'random_data_informed': ('Random Informative', 'b', '^', '-'),
'ts_uniform': ('TS Uninformative', 'k', '*', '-'),
'ts_informed': ('TS Informative', 'r', '+', '-'),}
fig, axes = plt.subplots(nrows=1, ncols=len(DATASET_LIST), figsize=(LINEWIDTH,2.2),
gridspec_kw = {'wspace':0.25, 'hspace':0})
for i, dataset_name in enumerate(DATASET_LIST):
start = 10
num_steps = l2_error[dataset_name]['ts_informed'].shape[1] - 1
stepsize = 1
for method_name in method_format:
legend_name, color, marker, linestyle = method_format[method_name]
xrange = np.arange(num_steps) * LOG_FREQ + LOG_FREQ
idx = np.arange(num_steps)[start:(start+num_steps)][::stepsize]
axes[i].plot(xrange[idx],
np.mean(l2_error[dataset_name][method_name], axis=0)[idx],
label=legend_name, linewidth=3, color=color, linestyle=linestyle)
if method_name != 'random_data':
axes[i].fill_between(xrange[idx],
np.quantile(l2_error[dataset_name][method_name], 0.125, axis=0)[idx],
np.quantile(l2_error[dataset_name][method_name], 0.875, axis=0)[idx],
color=color, alpha=.2)
#plt.ylim([0, 0.3])
axes[i].set_xlabel('#Labeled')
axes[i].set_xscale('log')
axes[i].set_title(DATASET_NAMES[dataset_name])
axes[i].spines['right'].set_visible(False)
axes[i].spines['top'].set_visible(False)
vals = axes[i].get_yticks()
axes[i].set_yticklabels(['{:.3f}'.format(x) for x in vals])
vals = axes[i].get_yticks()
axes[i].set_yticklabels(['${:.2f}$'.format(x) for x in vals])
axes[-1].legend(fontsize=10, loc='upper right', bbox_to_anchor=(1.4, 1))
axes[0].set_ylabel('RMSE')
plt.tight_layout()
dataset_list = DATASET_LIST
dataset_list = ['cifar100','dbpedia']
# dataset_list = ['20newsgroup','svhn']
def print_table(dataset_list):
method_format = {'random_data': ('Random Uninformative', 'b', '^', '--'),
'random_data_informed': ('Random Informative', 'b', '^', '-'),
'ts_informed': ('TS Informative', 'r', '+', '-'),}
N_list = [20,50,100]
num_datasets = len(dataset_list)
if num_datasets == 2:
print('\\begin{tabular}{@{}ccccccccc@{}}')
elif num_datasets == 3:
print('\\begin{tabular}{@{}ccccccccccccc@{}}')
print('\\toprule ')
for dataset in dataset_list:
print('& \phantom{a} & \multicolumn{3}{c}{%s}' % DATASET_NAMES[dataset])
print('\\\ ')
if num_datasets == 2:
print('\cmidrule{3-5} \cmidrule{7-9}')
elif num_datasets == 3:
print('\cmidrule{3-5} \cmidrule{7-9} \cmidrule{11-13}')
for dataset in dataset_list:
print('& \phantom{a} & N=%d & N=%d & N=%d' % tuple(N_list))
print('\\\ \\midrule')
for method in method_format:
method_name = method_format[method][0]
print('\multicolumn{2}{c}{%20s}' % method_name, end = '')
for dataset_name in dataset_list:
y = l2_error[dataset_name][method]
y = np.mean(y, axis=0)
for N in N_list:
val = y[(N-LOG_FREQ)//LOG_FREQ]
print('& %.3f ' % val, end = '')
if dataset_name != dataset_list[-1]:
print('&', end = '')
print('\\\\ \n', end = '');
print('\\bottomrule')
print('\\end{tabular}')
print_table(['cifar100','dbpedia'])
print_table(['20newsgroup','svhn', 'imagenet'])
def print_narrow_table(dataset_list):
method_format = {'random_data': ('UPrior', 'b', '^', '--'),
'random_data_informed': ('IPrior', 'b', '^', '-'),
'ts_informed': ('IPrior+TS', 'r', '+', '-'),}
print('\\begin{tabular}{@{}cccccc@{}}')
print('\\toprule ')
print(' & {N/K} & {N}', end = '')
for method in method_format:
print('& {%10s}' % method_format[method][0], end = '')
print('\\\ ')
for i, dataset in enumerate(dataset_list):
N_list = [2, 5, 10]
print('\\midrule')
for idx, N in enumerate(N_list):
if idx == 0:
name_string = DATASET_NAMES[dataset]
else:
name_string = ''
vals = []
for method in method_format:
y = l2_error[dataset][method]
y = np.mean(y, axis=0)
vals.append(y[(N * num_groups_dict[dataset])//LOG_FREQ] * 100)
if N == 0:
vals[1] = vals[2]
print('{%20s} & %d & %d & %.2f &%.2f &%.2f \\\\ \n' % \
(name_string, N, N * num_groups_dict[dataset], vals[0], vals[1], vals[2]), end = '')
print('\\bottomrule')
print('\\end{tabular}')
print_narrow_table(DATASET_LIST)
def print_narrow_table(dataset_list):
method_format = {'random_data': ('UPrior', 'b', '^', '--'),
'ts_uniform': ('UPrior+TS', 'b', '^', '--'),
'random_data_informed': ('IPrior', 'b', '^', '-'),
'ts_informed': ('IPrior+TS', 'r', '+', '-'),}
print('\\begin{tabular}{@{}ccccccc@{}}')
print('\\toprule ')
print(' & {N/K} & {N}', end = '')
for method in method_format:
print('& {%10s}' % method_format[method][0], end = '')
print('\\\ ')
for i, dataset in enumerate(dataset_list):
N_list = [2, 5, 10]
print('\\midrule')
for idx, N in enumerate(N_list):
if idx == 0:
name_string = DATASET_NAMES[dataset]
else:
name_string = ''
vals = []
for method in method_format:
y = l2_error[dataset][method]
y = np.mean(y, axis=0)
vals.append(y[(N * num_groups_dict[dataset])//LOG_FREQ] * 100)
if N == 0:
vals[1] = vals[2]
print('{%20s} & %d & %d & %.2f &%.2f &%.2f &%.2f \\\\ \n' % \
(name_string, N, N * num_groups_dict[dataset], vals[0], vals[1], vals[2], vals[3]), end = '')
print('\\bottomrule')
print('\\end{tabular}')
print_narrow_table(DATASET_LIST)
from scipy.stats import wilcoxon
def statistical_test(method_0, method_1):
for i, dataset in enumerate(dataset_list):
N_list = [0, 2, 5, 10]
difference = (l2_error[dataset][method_0] - l2_error[dataset][method_1])
for idx, N in enumerate(N_list):
w, p = wilcoxon(difference[:, (N * num_groups_dict[dataset])//LOG_FREQ])
if p > 0.05:
print(dataset, N, method_0, method_1)
print("DONE")
statistical_test('random_data', 'random_data_informed')
statistical_test('random_data', 'ts_informed')
statistical_test('random_data_informed', 'ts_informed')
def print_ece_estimation_error_table(dataset_list):
method_format = {'random_data': ('UPrior', 'b', '^', '--'),
'random_data_informed': ('IPrior', 'b', '^', '-'),
'ts_informed': ('IPrior+TS', 'r', '+', '-'),}
print('\\begin{tabular}{@{}cccccc@{}}')
print('\\toprule ')
print(' & {N/K} & {N}', end = '')
for method in method_format:
print('& {%10s}' % method_format[method][0], end = '')
print('\\\ ')
for i, dataset in enumerate(dataset_list):
N_list = [2, 5, 10, 20, 50, 100]
print('\\midrule')
for idx, N in enumerate(N_list):
if idx == 0:
name_string = DATASET_NAMES[dataset]
else:
name_string = ''
vals = []
for method in method_format:
y = (ece[dataset][method] - ground_truth[dataset]['ece']) / ground_truth[dataset]['ece']
y = np.mean(np.abs(y), axis=0)
vals.append(y[(N * num_groups_dict[dataset])//LOG_FREQ] * 100)
if N == 0:
vals[1] = vals[2]
print('{%20s} & %d & %d & %.1f &%.1f &\\textbf{%.1f} \\\\ \n' % \
(name_string, N, N * num_groups_dict[dataset], vals[0], vals[1], vals[2]), end = '')
print('\\bottomrule')
print('\\end{tabular}')
print_ece_estimation_error_table(DATASET_LIST)
```
| github_jupyter |
# Contains code for deploying the trained model
```
import sagemaker
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
role = get_execution_role()
sess = sagemaker.Session()
training_image = get_image_uri(sess.boto_region_name, 'image-classification', repo_version="latest")
# bucket = 'project-completion-udacity'
# dataset_name = 'nsfw_dataset'
# s3_output_location = 's3://project-completion-udacity/nsfw_dataset/output'
# image_classifier = sagemaker.estimator.Estimator(
# training_image,
# role,
# train_instance_count = 1,
# train_instance_type = 'ml.p2.xlarge',
# output_path=s3_output_location,
# sagemaker_session=sess
# )
```
# the only change you need to make is to change job_name with the name of your own training job.
```
job_name = 'IC-nsfw-dataset-1568090782'
import boto3
from time import gmtime, strftime
import time
sage = boto3.Session().client(service_name='sagemaker')
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
model_name="DEMO-full-image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
hosting_image = get_image_uri(boto3.Session().region_name, 'image-classification')
primary_container = {
'Image': hosting_image,
'ModelDataUrl': model_data,
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
```
<h2> For endpoint configuration
```
# %%time
# # Deploying a model to an endpoint takes a few minutes to complete
# deployed_endpoint = image_classifier.deploy(
# initial_instance_count = 1,
# instance_type = 'ml.t2.medium'
# )
from time import gmtime, strftime
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_config_name = 'my-endpoint' + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.t2.medium',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print('Endpoint configuration name: {}'.format(endpoint_config_name))
print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn']))
```
## code for lauanching the endpoint.
```
endpoint_name = 'myprojectcapstone'
print('Endpoint name: {}'.format('myprojectcapstone'))
import sagemaker as sm
endpoint_params = {
'EndpointName': endpoint_name,
'EndpointConfigName': endpoint_config_name,
}
endpoint_response = sess.sagemaker_client.create_endpoint(**endpoint_params)
print('EndpointArn = {}'.format(endpoint_response['EndpointArn']))
endpoint_dec = sess.wait_for_endpoint(endpoint_name)
```
## Testing the deployed endpoint
```
import json
import numpy as np
import os
def classify_deployed(file_name, classes , endpoint_name):
payload = None
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
response = sess.sagemaker_runtime_client.invoke_endpoint(
EndpointName = endpoint_name,
ContentType = 'application/x-image',
Body = payload)
#print(response)
result = response['Body'].read()
result = json.loads(result)
print(result)
best_prob_index = np.argmax( np.array( result ) )
print(best_prob_index)
return (classes[best_prob_index], result[best_prob_index])
file_name = 'flowes/test/daisy/10555826524_423eb8bf71_n.jpg'
classes = ['animated' , 'nude' ,'porn' , 'safe_for_work' , 'semi_nude']
class_predicted , class_score = classify_deployed(file_name, classes , endpoint_name)
print(class_predicted , class_score)
```
## Deleting the endpoint.
```
sess.sagemaker_client.delete_endpoint(EndpointName = endpoint_name)
```
| github_jupyter |
# Idealized entrainment
This notebook visualizes the results of GOTM simulations in an idealized wind stress-driven entrainment case with no rotation, in which the mixed layer gradually entrains into an underlying non-turbulent region with constant stable stratification.
The results are compared with the scaling derived from laboratory experiments of [Price (1979)](https://doi.org/10.1017/S0022112079002366), in which the time evolution of the mixed layer depth follows
$h_m(t) = (2R_v)^{1/4}u_*\left(\frac{t}{N_0}\right)^{1/2}$
where $R_v\approx0.6$ is the bulk Richardson number, $u_*$ the water side surface friction velocity, $N_0$ the initial buoyancy frequency.
See, e.g., section 5.1 of [Umlauf and Burchard (2005)](https://doi.org/10.1016/j.csr.2004.08.004) for more details.
```
import sys
import numpy as np
import string
import matplotlib.pyplot as plt
# add the path of gotmtool
sys.path.append("../gotmtool")
from gotmtool import *
from gotmtool.diags import *
```
## Load data
First, store the information of the four simulations into lists and dictionaries.
```
runlabel = '_Nz250_Nt18000'
casenames = [
'GLS-C01A' + runlabel,
'KPP-CVMix' + runlabel,
'KPPLT-VR12' + runlabel,
'KPPLT-LF17' + runlabel,
]
labels = {
casenames[0]: 'GLS-C01A',
casenames[1]: 'KPP-CVMix',
casenames[2]: 'KPPLT-VR12',
casenames[3]: 'KPPLT-LF17',
}
colors = {
casenames[0]: 'tab:blue',
casenames[1]: 'tab:olive',
casenames[2]: 'tab:orange',
casenames[3]: 'tab:red',
}
linestyles = {
casenames[0]: '-',
casenames[1]: '-',
casenames[2]: '-',
casenames[3]: '-',
}
casedir = '../gotm/run/Entrainment'
save_fig = True
```
Load data for the four simulations into a dictionary.
```
sims = {}
dataset = {}
for case in casenames:
sim = Simulation(path=casedir+'/'+case)
sims[case] = sim
dataset[case] = sim.load_data()
```
## Figure 1
Vertical profiles of (a) down-wind velocity $u$, (b) turbulent viscosity $\nu_m$, and (c) squared buoyancy frequency $N^2$ at the end of the 30-hour simulations.
```
fig, axarr = plt.subplots(1,3, sharey='row')
fig.set_size_inches([8,4])
for case in casenames:
data = dataset[case]
data.u[:,-1,0,0].plot(y='z', ax=axarr[0], color=colors[case])
data.num[:,-1,0,0].plot(y='zi', ax=axarr[1], color=colors[case])
data.NN[:,-1,0,0].plot(y='zi', ax=axarr[2], color=colors[case], label=labels[case])
for i in np.arange(3):
axarr[i].set_title('')
axarr[i].set_ylabel('')
axarr[0].set_ylim([-40,0])
axarr[0].set_ylabel('Depth (m)')
axarr[0].set_xlabel('$u$ (m s$^{-1}$)')
axarr[1].set_xlabel('$\\nu_m$ (m$^2$ s$^{-1}$)')
axarr[2].set_xlabel('$N^2$ (s$^{-2}$)')
axarr[2].ticklabel_format(axis="x", style="sci", scilimits=(-2,2))
axarr[2].legend(loc='upper right')
for i, ax in enumerate(axarr):
ax.text(0.82, 0.05, '('+string.ascii_lowercase[i]+')', transform=ax.transAxes,
fontsize=12, va='bottom')
if save_fig:
plt.tight_layout()
figname = 'entrainment_pfl.pdf'
fig.savefig(figname, dpi=300)
```
## Figure 2
A comparison of the time evolution of the mixed layer depth $h_m$ in GOTM simulations defined by the depth at which $N^2$ reaches its maximum in color, and the prediction of [Price (1979)](https://doi.org/10.1017/S0022112079002366) in black.
First, compute the predicted mixed layer depth.
```
data0 = dataset[casenames[0]]
N0 = np.sqrt(data0.NN[2,-1,0,0].values)
ustar = data0.u_taus[-1,0,0].values
sec = ((data0.time - data0.time[0])/np.timedelta64(1, 's')).values
mld_val = np.sqrt(np.sqrt(2.*0.6))*ustar*np.sqrt(sec/N0)
mld_p79 = xr.DataArray(mld_val, dims=['time'], coords={'time': data0.time},
attrs={'long_name': 'mixed layer depth (Price 1979)',
'units': 'm'})
```
Then, plot the figure. See also Fig. 5a of [Umlauf and Burchard (2005)](https://doi.org/10.1016/j.csr.2004.08.004).
```
fig = plt.figure(figsize=[5,3.5])
hours = sec/3600.
plt.plot(hours, mld_p79.values, color='k', linestyle='-', linewidth=1.5, label='P79')
for case in casenames:
data = dataset[case]
NN = data.NN[:,:,0,0]
mld = get_bld_maxNN(NN)
# plt.plot(hours, mld.values, color=colors[case], linestyle=linestyles[case], label=labels[case])
plt.scatter(hours, mld.values, s=10, color=colors[case], label=labels[case])
plt.ylabel('$h_m$ (m)')
plt.xlabel('Time (hours)')
plt.xlim([hours[0], hours[-1]])
plt.ylim([0,35])
plt.legend(loc='lower right')
plt.grid()
if save_fig:
plt.tight_layout()
figname = 'entrainment_ts.pdf'
fig.savefig(figname, dpi=300)
```
| github_jupyter |
# Testing the experiment classes
```
# from common.experiment import KubemlExperiment, History, TrainOptions, TrainRequest
import pandas as pd
import glob
import numpy as np
import pickle
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torchvision import transforms
from torchvision import models
import torch.utils.data as tdata
from torchinfo import summary
from torch import nn
class LeNet(nn.Module):
""" Definition of the LeNet network as per the 1998 paper
Credits to https://github.com/ChawDoe/LeNet5-MNIST-PyTorch for the
convenience of the network definition and the train loop found there
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(256, 120)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(120, 84)
self.relu4 = nn.ReLU()
self.fc3 = nn.Linear(84, 10)
self.relu5 = nn.ReLU()
def forward(self, x):
y = self.conv1(x)
y = self.relu1(y)
y = self.pool1(y)
y = self.conv2(y)
y = self.relu2(y)
y = self.pool2(y)
y = y.view(y.shape[0], -1)
y = self.fc1(y)
y = self.relu3(y)
y = self.fc2(y)
y = self.relu4(y)
y = self.fc3(y)
y = self.relu5(y)
return y
n = LeNet()
summary(n, input_size=(32, 1, 28, 28))
mode = models.resnet.resnet34()
summary(mode)
'''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
#print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
model = resnet56()
summary(model)
```
# Load the dataset
```
x_train, labels = np.load('./datasets/cifar10/cifar10_x_train.npy'), np.load('./datasets/cifar10/cifar10_y_train.npy')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_transf = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize
])
val_transf = transforms.Compose([
transforms.ToTensor(),
normalize
])
from torch.nn.functional import nll_loss, cross_entropy
def train(model: nn.Module, device,
train_loader: tdata.DataLoader,
optimizer: torch.optim.Optimizer, epoch) -> float:
"""Loop used to train the network"""
# create optimizer
# optimizer = optim.SGD(model.parameters(), lr=0.1, weight_decay=1e-4, momentum=0.9)
# load_state(optimizer)
criterion = nn.CrossEntropyLoss()
model.train()
loss, tot = 0, 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = cross_entropy(output, target)
tot += loss.item()
loss.backward()
optimizer.step()
if batch_idx % 30 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# save the optimizer state
# save_state(optimizer)
return tot/len(train_loader)
def validate(model, device, val_loader: tdata.DataLoader) -> (float, float):
"""Loop used to validate the network"""
criterion =nn.CrossEntropyLoss()
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device)
output = model(data)
_, predicted = torch.max(output.data, 1)
test_loss += cross_entropy(output, target).item() # sum up batch loss
correct += predicted.eq(target).sum().item()
test_loss /= len(val_loader)
accuracy = 100. * correct / len(val_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(val_loader.dataset),
100. * correct / len(val_loader.dataset)))
return accuracy, test_loss
```
# Functions to load and save state
```
import os
def load_state(optimizer):
if os.path.isfile('state.pkl'):
with open('state.pkl', 'rb') as f:
state = pickle.load(f)
update_state(optimizer, state)
else:
print('no state found')
def update_state(optimizer, state):
state = {
'param_groups': optimizer.state_dict()['param_groups'],
'state': state
}
optimizer.load_state_dict(state)
def save_state(optimizer):
print('saving optimizer state')
with open('state.pkl', 'wb') as f:
pickle.dump(optimizer.state_dict()['state'], f)
from torchvision.models.resnet import resnet18
torch.manual_seed(42)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
# Create the model
# model = create_model(init=True).to(device)
model = resnet18().to(device)
class MnistData(tdata.Dataset):
def __init__(self, feat, labels):
self.feat = feat
self.labels = labels.flatten()
self.normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
self.transf = transforms.Compose([
transforms.ToTensor(),
self.normalize
])
def __len__(self):
return len(self.feat)
def __getitem__(self, idx):
return self.transf(self.feat[idx]), self.labels[idx].astype(np.int64)
train_data = MnistData(x_train, labels)
from torch import optim
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128)
# val_loader = torch.utils.data.DataLoader(val_data, batch_size=128)
for epoch in range(4):
# create the optimizer in each iteration and load state
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4)
load_state(optimizer)
print('Epoch', epoch)
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4)
train(model, device, train_loader, optimizer, epoch)
save_state(optimizer)
# validate(model, device, val_loader)
optimizer.__dict__
```
| github_jupyter |
# Ada-DiffAct Model using Keras
## libraries to be used
```
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
%load_ext tensorboard
import datetime
```
## Custom Activation Functions
```
initializer0 = keras.initializers. RandomUniform(minval = -1, maxval =2)
initializer1 = keras.initializers. RandomUniform(minval = -1, maxval =2)
initializer2 = keras.initializers. RandomUniform(minval = -0.05, maxval =0.05)
class Diffact(keras.layers.Layer):
def __init__(self):
super(Diffact, self).__init__()
self.k0 = self.add_weight(name='k0', shape = (), initializer=initializer0, trainable=True)
self.k1 = self.add_weight(name='k1', shape = (), initializer=initializer1, trainable=True)
self.k2 = self.add_weight(name='k2', shape = (), initializer=initializer2, trainable=True)
def call(self, inputs):
return self.k0 + tf.multiply(inputs, self.k1) + tf.multiply(tf.math.multiply(inputs,inputs), self.k2)
```
## Model Graph
training on MNIST, trial 1
```
num_classes = 10
input_shape = (28, 28, 1)
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
inputs = keras.Input(shape=input_shape)
x = layers.Conv2D(32, kernel_size=(3, 3))(inputs)
diffact = Diffact()
x = diffact(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Conv2D(64, kernel_size=(3, 3))(x)
x = diffact(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Flatten()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(num_classes, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_diffact")
model.summary()
batch_size = 128
epochs = 15
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
log_dir = "mnistlogs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1, callbacks=[tensorboard_callback])
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
print("AF coefficients (weights) {}".format(diffact.get_weights()))
%tensorboard --logdir mnistlogs/fit --port=6051
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.