code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example of converting an .ipynb file to .pdf via LaTeX # The following file contains sample code that contains: # # * a function `makeSampleFrame` to create an arbitrary DataFrame with an integer column and a day-of-the-week column # * a dictionary comprehension to create a dictionary with DataFrames as values and integers as keys. # * a dictionary comprehension to group the dictionary by the day-of-the-week. # # The grouping performs the aggregation: # $$ # \bar{Y}_j = \frac{1}{N_j}\sum_{i} Y_{ij} # $$ # # + import pandas as pd import numpy.random as npr def makeSampleFrame(): return pd.DataFrame({ 'MeterID': npr.random_integers(0, 100, size = 1000), 'DayOfWeek': npr.choice( a = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'], size = 1000, replace = True), 'value': npr.normal(size = 1000) }) dictDFs = {i: makeSampleFrame() for i in range(2)} goupedDictDFs = {i: dictDFs[i].groupby(['DayOfWeek', 'MeterID']) for i in range(2)} goupedDictDFs[0] # test dictAggDFs = {i: goupedDictDFs[i].aggregate(np.mean).reset_index() for i in range(2)} dictAggDFs[1]
Code/Miscellaneous/.ipynb_checkpoints/Grouping in dictionary comprehensions-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plot import numpy as np from random import randint # + vetor = [] # cria lista # preenche a lista com 3000 votos no candidato A e 7000 no candidato B for i in range(3000): vetor.append('A') for j in range(7000): vetor.append('B') # + tentativas = 2000 n1 = 30 n2 = 700 # lista para guardas as proporções amostrais de votos em A prop_amostral_a_1 = [] prop_amostral_a_2 = [] # - # popula dados das listas de proporcao amostral de votos em A com reposição for i in range(tentativas): amostra_1 = [] for j in range(n1): amostra_1.append(vetor[randint(0,9999)]) amostra_2 = [] for j in range(n2): amostra_2.append(vetor[randint(0,9999)]) prop_amostral_a_1.append(amostra_1.count('A')/n1) prop_amostral_a_2.append(amostra_2.count('A')/n2) # + print("Amostra de tamanho: " + str(n1)) plot.hist(prop_amostral_a_1, bins=15) plot.xlabel('Probabilidade') plot.show() print('Média: ' + str(round(np.average(prop_amostral_a_1), 5))) print('Desvio Padrão: ' + str(round(np.std(prop_amostral_a_1), 5))) # + print("Amostra de tamanho: " + str(n2)) plot.hist(prop_amostral_a_2, bins=15) plot.show() print('Média: ' + str(round(np.average(prop_amostral_a_2), 5))) print('Desvio Padrão: ' + str(round(np.std(prop_amostral_a_2), 5)))
prob_amostral.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Decoding the Frustrations and Vulnerabilities of Modern Text Processing # ## <NAME> # + [markdown] slideshow={"slide_type": "slide"} # Hello! # # This is my 2nd pycon, and my first conference talk! # # I'm here because... # + [markdown] slideshow={"slide_type": "slide"} # ### Text processing is awesome!!! # + [markdown] slideshow={"slide_type": "fragment"} # # Until... # ![](https://pbs.twimg.com/media/DE35D56UwAALgoJ.jpg:large) # + [markdown] slideshow={"slide_type": "slide"} # # Join me in part of my adventure # ![adventure time](http://www.indiewire.com/wp-content/uploads/2015/03/adventure-time.png?w=780) # + [markdown] slideshow={"slide_type": "slide"} # But first lets learn about strings # + [markdown] slideshow={"slide_type": "slide"} # ### Python 2: # * Unicode: # ``` python # print(u"...") # ``` # * Bytes: # ``` python # print("...") # ``` # # ### Python 3: # * Unicode: # ``` python # print("...") # ``` # * Bytes: # ``` python # print(b"...") # ``` # + [markdown] slideshow={"slide_type": "slide"} # Easy right? # + [markdown] slideshow={"slide_type": "fragment"} # # No... # ![](https://media.tenor.com/images/014788c2a75ff6380650f34ca92e04df/tenor.gif) # + [markdown] slideshow={"slide_type": "slide"} # Even with all this uncode support in python3, we *still* come across errors # + [markdown] slideshow={"slide_type": "fragment"} # Python 2 would do implicit conversions, but now with python3, although there's more support, things are more strict and everything has to be explicit. # + [markdown] slideshow={"slide_type": "slide"} # # Bytes vs. Unicode? # + [markdown] slideshow={"slide_type": "slide"} # So, Unicode strings are cool right?! # + [markdown] slideshow={"slide_type": "fragment"} # 😎😎😎👍👍👍🔥🔥🔥😍😍😍 # + [markdown] slideshow={"slide_type": "fragment"} # こんにちは # + [markdown] slideshow={"slide_type": "fragment"} # Ḩ̱̙͔͍͉̦̖̮̩͍̝̣ͪ͐͋͐͊ę̭̟̫͇͔̖̠̲̣̬̯͇͍͎̖̟̘̩ͣ͗̽ͯ͢ͅl̸̢̢̢̯̣͇͉̜̥͖̺̭̤̪̣̤̬͚͚̠̪ͣ̅͋͌̒͐̾ͬ̋̄̇͌̊̄͂̽̿ͪͯl̷̵̳͚̠͕͓̈̈́̒͗̄̋ͭ̃̔ͬ͑ͩ̂ǫ̹̼̟͙̋̃͗̏̊̃̎͑ͮ͑̄̏̚̕ ̶̷͓͔͍͇̗̞̫̩͎̗̟̰̗̩̳̗̜̇̅̾͐ͮ̈̔͒̓̈́ͣ̽̂̉͊̀W̴̧͉̱̩̤̞̻ͯ́͋̓̏̓̌ͦ̃ͮ̂̀̕͟o̸̸͕̭͚͇̫̜̮̲͙͐͒͂̒͑̂ͧ̕͢͡ͅŗ̳̳͚̱͈̺̩̹̰͓́͒̔̌ͯͪ̀ͥͣ̐͂ͪͤ͗ͅl̵̪̹̫̠̺̱̯̤͓̞̝̗̆̈́̄̏́͟d̢͇̰̩̹̋͛͌̂ͩ̓ͤ̓́̀͘ͅ # + [markdown] slideshow={"slide_type": "slide"} # But what can we do with byte strings? # + [markdown] slideshow={"slide_type": "fragment"} # They can do some useful things, like # + slideshow={"slide_type": "fragment"} s = b"abc" a = s[0] print(a, chr(a)) # + [markdown] slideshow={"slide_type": "slide"} # How do I make them? # * Make a bytes literal string # * `b"this is a bytes string"` # * Use the `bytes()` Constructor # * `"string".encode(encoding = ..., errors = ...)` # * And more # + [markdown] slideshow={"slide_type": "fragment"} # Converting back... # * `b"bytes".decode(encoding = ..., errors = ...)` # * `str(object = b"", encoding = ..., errors = ...)` # + [markdown] slideshow={"slide_type": "slide"} # However something really cool... # + slideshow={"slide_type": "fragment"} b = bytes.fromhex("2e f0f2 F1") print(b) print(b.hex()) # + [markdown] slideshow={"slide_type": "fragment"} # Handy! # + [markdown] slideshow={"slide_type": "slide"} # So let's pretend you're me... # + [markdown] slideshow={"slide_type": "fragment"} # And you're trying to use these byte things for science! # + [markdown] slideshow={"slide_type": "fragment"} # Trying to smash the stack # + [markdown] slideshow={"slide_type": "slide"} # If you don't know what stack smashing is (aka buffer overflow exploits) # # It's about overflowing a buffer to overwrite memory at a specific location with a malicious value. # + [markdown] slideshow={"slide_type": "slide"} # So you write this... # ``` bash # $ python3 -c 'print("3"*72 + "\xdb\x07\x40\x00")' > smash # ``` # + [markdown] slideshow={"slide_type": "slide"} # But it works for all my friends in Python2! # ``` bash # $ python2 -c 'print("3"*72 + "\xdb\x07\x40\x00")' > smash # ``` # + [markdown] slideshow={"slide_type": "slide"} # # # Why? # + [markdown] slideshow={"slide_type": "fragment"} # * In Python3, sys.stdout is a Text I/O object # * And sys.stdout isn't bytes writeable! # * So when you print a string to stdout in Python3, it will first be encoded into utf8, thus not printing the bytes that we want # * ![](https://cdn1.iconfinder.com/data/icons/emoticon-set-volume-4/512/emoticon-60-512.png) # + [markdown] slideshow={"slide_type": "slide"} # This creates problems whenever anything is trying to write as bytes through print. # + [markdown] slideshow={"slide_type": "fragment"} # For example: # # ``` python # sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach()) # ``` # Would be the main solution to: # + [markdown] slideshow={"slide_type": "fragment"} # *"Halp a unicode dragon is breathing fire"* # ![](http://www.chrisdillustration.com/wp-content/uploads/2017/03/dragon_fire_vector_LRGsliderthumb.jpg) # + [markdown] slideshow={"slide_type": "slide"} # ``` python # sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach()) # ``` # This gets the streamwriter for the utf8 encoding, then detaches the sys.stdout buffer and sets that as the stream... # + [markdown] slideshow={"slide_type": "slide"} # What's a better fix? # + [markdown] slideshow={"slide_type": "fragment"} # ``` python # import sys; sys.stdout.buffer.write(b"3"*72 + b"\xdb\x07\x40\x00") # ``` # + [markdown] slideshow={"slide_type": "fragment"} # Writes bytes directly to the stdout buffer! # + [markdown] slideshow={"slide_type": "slide"} # There's another problem though... # # Like, what's this encoded in? # + [markdown] slideshow={"slide_type": "fragment"} # By default, the character encoding is dependant on your system, taken from `locale.getpreferredencoding()` # # And of course Windows likes to do things differently. # + [markdown] slideshow={"slide_type": "slide"} # But if you're really stuck, you should check out the `errors` argument. # # * strict - default # * ignore # * replace # * xmlcharrefreplace # * backslashreplace # * namereplace - \N{...} (names escape sequences # * surrogateescape - Replaces using the surrogate unicode characters # # So it lets you do things like ignoring unicode errors entirely! # # Or escaping/replacing them, as you should be... # # https://docs.python.org/3/library/codecs.html#error-handlers # + [markdown] slideshow={"slide_type": "slide"} # So we went over `u''` and `b''` string literals... # + [markdown] slideshow={"slide_type": "fragment"} # Can we break text processing in other ways? # + [markdown] slideshow={"slide_type": "fragment"} # What about f-strings? # + [markdown] slideshow={"slide_type": "slide"} # Well first, lets take a step back in time into `str.format` # + [markdown] slideshow={"slide_type": "fragment"} # But to do that, What is a format string exploit? # + [markdown] slideshow={"slide_type": "fragment"} # It's about taking advantage of string formatting to create malicious execution # + [markdown] slideshow={"slide_type": "slide"} # ``` c # #include <stdio.h> # # int main(int argc, char *argv[]){ # char *s; # scanf("%s", &s); # printf(s); # } # ``` # + [markdown] slideshow={"slide_type": "slide"} # ``` c # #include <stdio.h> # # int main(int argc, char *argv[]){ # char *s; # scanf("%s", &s); # printf("%s", s); # } # ``` # + [markdown] slideshow={"slide_type": "slide"} # Does Python have them? # + [markdown] slideshow={"slide_type": "fragment"} # Yes! # + [markdown] slideshow={"slide_type": "fragment"} # http://cse.unsw.edu.au/~evank/format_string/ # + [markdown] slideshow={"slide_type": "slide"} # ``` python # {.__init__.__globals__[USERS][1].password} # ``` # + [markdown] slideshow={"slide_type": "fragment"} # ``` python # {.__init__.__globals__[app].secret_key} # # ``` # + [markdown] slideshow={"slide_type": "slide"} # How do we fix them? # # http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/ # + [markdown] slideshow={"slide_type": "slide"} # So now what about these new fancy f-strings? # + [markdown] slideshow={"slide_type": "fragment"} # Well, they're pretty cool. # + slideshow={"slide_type": "fragment"} print(f"{input()}") # + [markdown] slideshow={"slide_type": "slide"} # Can we break them? # + [markdown] slideshow={"slide_type": "slide"} # Well to start with: # * f-strings don't use or have access to locals and globals. # * Recursive interpolation is not supported # + slideshow={"slide_type": "slide"} # What about this? # What happens here? a = "a" b = "b" f"{a}".format(a = b) # + [markdown] slideshow={"slide_type": "fragment"} # f-strings are evaluted during parsing, and thus get evaluated before we can even get to execute the .format # + slideshow={"slide_type": "slide"} # What will this do? s = r"esc\ape" f"{s.replace('\\', '')}" # + [markdown] slideshow={"slide_type": "fragment"} # f-strings can't contain backslashes :( # + [markdown] slideshow={"slide_type": "slide"} # Can they contain colons? # + slideshow={"slide_type": "fragment"} l = (0, 1, 2) f"{l[:1][0]:10}" # + [markdown] slideshow={"slide_type": "slide"} # What about a lambda? # + slideshow={"slide_type": "fragment"} l = [3, 2, 1] f"lambda : {lambda x: x**2}" # + [markdown] slideshow={"slide_type": "slide"} # f-strings inside f-strings? # + slideshow={"slide_type": "fragment"} a = "hello" print(f"""{a.replace(f"{a.replace('a', 'c')}", "b")}""") # + [markdown] slideshow={"slide_type": "slide"} # # nesting? # + slideshow={"slide_type": "fragment"} f"result: {value:{width:{0}}.{precision:1}}" # + [markdown] slideshow={"slide_type": "slide"} # So, it's pretty hard to break things using user input like we did with .format # that is provided you don't eval or exec anything that user input is in control of. # + [markdown] slideshow={"slide_type": "fragment"} # But who would do that right? # + [markdown] slideshow={"slide_type": "fragment"} # ``` python # os.system(f"echo {message_from_user}") # myquery = sql(f"SELECT {column} FROM {table};") # myresponse = html(f"<html><body>{response.body}</body></html>") # ``` # + [markdown] slideshow={"slide_type": "slide"} # how do we fix **that**? # + [markdown] slideshow={"slide_type": "fragment"} # PEP501 Proposes i-strings, which are like f-strings but can be escaped before rendered # + [markdown] slideshow={"slide_type": "slide"} # So I hope now you have some extra knowledge # + [markdown] slideshow={"slide_type": "slide"} # In how to avoid this: # # ![](https://pbs.twimg.com/media/DE35GiCUMAAXbCu.jpg) # + [markdown] slideshow={"slide_type": "slide"} # and this: # # ![](http://diysolarpanelsv.com/images/dragon-fire-clipart-4.jpg) # + [markdown] slideshow={"slide_type": "slide"} # # <NAME> # ## Thanks! # ### Questions? # ### Tweet at me: @ekohilas # ### All source on github.com/ekohilas
slides.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/VindhyaHV/AppliedAI_Assignments/blob/main/SGD%20Classifier%20with%20Logloss%20and%20L2%20regularization%20Using%20SGD.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="7eiDWcM_MC3H" # # <font color='red'>Implement SGD Classifier with Logloss and L2 regularization Using SGD without using sklearn</font> # + [markdown] id="yfe2NTQtLq11" # **There will be some functions that start with the word "grader" ex: grader_weights(), grader_sigmoid(), grader_logloss() etc, you should not change those function definition.<br><br>Every Grader function has to return True.** # + [markdown] id="Fk5DSPCLxqT-" # <font color='red'> Importing packages</font> # + id="42Et8BKIxnsp" import numpy as np import pandas as pd from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn import linear_model # + [markdown] id="NpSk3WQBx7TQ" # <font color='red'>Creating custom dataset</font> # + id="BsMp0oWzx6dv" # please don't change random_state X, y = make_classification(n_samples=50000, n_features=15, n_informative=10, n_redundant=5, n_classes=2, weights=[0.7], class_sep=0.7, random_state=15) # make_classification is used to create custom dataset # Please check this link (https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html) for more details # + colab={"base_uri": "https://localhost:8080/"} id="L8W2fg1cyGdX" outputId="35ba2ff3-b62e-4bca-f22d-0d6cc561a043" X.shape, y.shape # + [markdown] id="x99RWCgpqNHw" # <font color='red'>Splitting data into train and test </font> # + id="0Kh4dBfVyJMP" #please don't change random state X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=15) # + id="gONY1YiDq7jD" # Standardizing the data. scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="0DR_YMBsyOci" outputId="6b7593be-1c21-44a1-a48f-fae8f8ea8d74" X_train.shape, y_train.shape, X_test.shape, y_test.shape # + [markdown] id="BW4OHswfqjHR" # # <font color='red' size=5>SGD classifier</font> # + colab={"base_uri": "https://localhost:8080/"} id="3HpvTwDHyQQy" outputId="6059a987-1444-441b-ce07-db8e66d0db5d" # alpha : float # Constant that multiplies the regularization term. # eta0 : double # The initial learning rate for the ‘constant’, ‘invscaling’ or ‘adaptive’ schedules. clf = linear_model.SGDClassifier(eta0=0.0001, alpha=0.0001, loss='log', random_state=15, penalty='l2', tol=1e-3, verbose=2, learning_rate='constant') clf # Please check this documentation (https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html) # + colab={"base_uri": "https://localhost:8080/"} id="YYaVyQ2lyXcr" outputId="47aced7e-8bfe-4c02-bf16-3b92809be59f" clf.fit(X=X_train, y=y_train) # fitting our model # + colab={"base_uri": "https://localhost:8080/"} id="EAfkVI6GyaRO" outputId="6c759922-ca39-4546-e038-b04dc074d13a" clf.coef_, clf.coef_.shape, clf.intercept_ #print(type(clf.coef_)) #clf.coef_ will return the weights #clf.coef_.shape will return the shape of weights #clf.intercept_ will return the intercept term # + [markdown] id="_-CcGTKgsMrY" # # # ``` # # This is formatted as code # ``` # # ## <font color='red' size=5> Implement Logistic Regression with L2 regularization Using SGD: without using sklearn </font> # # # + [markdown] id="W1_8bdzitDlM" # # # # 1. We will be giving you some functions, please write code in that functions only. # # 2. After every function, we will be giving you expected output, please make sure that you get that output. # # # # # + [markdown] id="zU2Y3-FQuJ3z" # # <br> # # * Initialize the weight_vector and intercept term to zeros (Write your code in <font color='blue'>def initialize_weights()</font>) # # * Create a loss function (Write your code in <font color='blue'>def logloss()</font>) # # $log loss = -1*\frac{1}{n}\Sigma_{for each Yt,Y_{pred}}(Ytlog10(Y_{pred})+(1-Yt)log10(1-Y_{pred}))$ # - for each epoch: # # - for each batch of data points in train: (keep batch size=1) # # - calculate the gradient of loss function w.r.t each weight in weight vector (write your code in <font color='blue'>def gradient_dw()</font>) # # $dw^{(t)} = x_n(y_n − σ((w^{(t)})^{T} x_n+b^{t}))- \frac{λ}{N}w^{(t)})$ <br> # # - Calculate the gradient of the intercept (write your code in <font color='blue'> def gradient_db()</font>) <a href='https://drive.google.com/file/d/1nQ08-XY4zvOLzRX-lGf8EYB5arb7-m1H/view?usp=sharing'>check this</a> # # $ db^{(t)} = y_n- σ((w^{(t)})^{T} x_n+b^{t}))$ # # - Update weights and intercept (check the equation number 32 in the above mentioned <a href='https://drive.google.com/file/d/1nQ08-XY4zvOLzRX-lGf8EYB5arb7-m1H/view?usp=sharing'>pdf</a>): <br> # $w^{(t+1)}← w^{(t)}+α(dw^{(t)}) $<br> # # $b^{(t+1)}←b^{(t)}+α(db^{(t)}) $ # - calculate the log loss for train and test with the updated weights (you can check the python assignment 10th question) # - And if you wish, you can compare the previous loss and the current loss, if it is not updating, then # you can stop the training # - append this loss in the list ( this will be used to see how loss is changing for each epoch after the training is over ) # # + [markdown] id="ZR_HgjgS_wKu" # <font color='blue'>Initialize weights </font> # + id="GecwYV9fsKZ9" def initialize_weights(dim): ''' In this function, we will initialize our weights and bias''' #initialize the weights to zeros array of (1,dim) dimensions #you use zeros_like function to initialize zero, check this link https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros_like.html #initialize bias to zero w = np.zeros_like(dim) b = 0 return w,b # + colab={"base_uri": "https://localhost:8080/"} id="A7I6uWBRsKc4" outputId="a60f47f5-aa85-4723-8424-f25528fe78f0" dim=X_train[0] w,b = initialize_weights(dim) print('w =',(w)) print('b =',str(b)) print(w.shape) # + [markdown] id="4MI5SAjP9ofN" # <font color='cyan'>Grader function - 1 </font> # + colab={"base_uri": "https://localhost:8080/"} id="Pv1llH429wG5" outputId="39fb6bd7-1e50-499d-8981-d3ca1fb0a91c" dim=X_train[0] w,b = initialize_weights(dim) def grader_weights(w,b): assert((len(w)==len(dim)) and b==0 and np.sum(w)==0.0) return True grader_weights(w,b) # + [markdown] id="QN83oMWy_5rv" # <font color='blue'>Compute sigmoid </font> # + [markdown] id="qPv4NJuxABgs" # $sigmoid(z)= 1/(1+exp(-z))$ # + id="nAfmQF47_Sd6" def sigmoid(z): ''' In this function, we will return sigmoid of z''' # compute sigmoid(z) and return sigma=1/(1+np.exp(-z)) return sigma # + [markdown] id="9YrGDwg3Ae4m" # <font color='cyan'>Grader function - 2</font> # + colab={"base_uri": "https://localhost:8080/"} id="P_JASp_NAfK_" outputId="6946e28a-acc0-4bab-f4f9-21dcc6f6cb93" def grader_sigmoid(z): val=sigmoid(z) assert(val==0.8807970779778823) return True grader_sigmoid(2) # + [markdown] id="gS7JXbcrBOFF" # <font color='blue'> Compute loss </font> # + [markdown] id="lfEiS22zBVYy" # $log loss = -1*\frac{1}{n}\Sigma_{for each Yt,Y_{pred}}(Ytlog10(Y_{pred})+(1-Yt)log10(1-Y_{pred}))$ # + id="VaFDgsp3sKi6" #https://www.programmersought.com/article/24172229251/ def log10_(input): if input==0: input+=1e-10 return np.log10(input) def logloss(y_true,y_pred): '''In this function, we will compute log loss ''' n = len(y_true) loss = 0 for i in range(len(y_true)): loss+=y_true[i]*log10_(y_pred[i])+(1-y_true[i])*log10_(1-y_pred[i]) loss=(-1/n)*loss return loss # + [markdown] id="Zs1BTXVSClBt" # <font color='cyan'>Grader function - 3 </font> # + colab={"base_uri": "https://localhost:8080/"} id="LzttjvBFCuQ5" outputId="a775843c-a1fb-4afc-e40a-b7f7c88fd4e2" def grader_logloss(true,pred): loss=logloss(true,pred) assert(loss==0.07644900402910389) return True true=[1,1,0,1,0] pred=[0.9,0.8,0.1,0.8,0.2] grader_logloss(true,pred) # + [markdown] id="tQabIadLCBAB" # <font color='blue'>Compute gradient w.r.to 'w' </font> # + [markdown] id="YTMxiYKaCQgd" # $dw^{(t)} = x_n(y_n − σ((w^{(t)})^{T} x_n+b^{t}))- \frac{λ}{N}w^{(t)}$ <br> # + id="NMVikyuFsKo5" # + [markdown] id="RUFLNqL_GER9" # <font color='cyan'>Grader function - 4 </font> # + colab={"base_uri": "https://localhost:8080/"} id="WI3xD8ctGEnJ" outputId="91fea164-9465-43b0-ebc6-1b9497e9f11d" def grader_dw(x,y,w,b,alpha,N): grad_dw=gradient_dw(x,y,w,b,alpha,N) assert(np.sum(grad_dw)==2.613689585) return True grad_x=np.array([-2.07864835, 3.31604252, -0.79104357, -3.87045546, -1.14783286, -2.81434437, -0.86771071, -0.04073287, 0.84827878, 1.99451725, 3.67152472, 0.01451875, 2.01062888, 0.07373904, -5.54586092]) grad_y=0 grad_w,grad_b=initialize_weights(grad_x) alpha=0.0001 N=len(X_train) grader_dw(grad_x,grad_y,grad_w,grad_b,alpha,N) # + [markdown] id="LE8g84_GI62n" # <font color='blue'>Compute gradient w.r.to 'b' </font> # + [markdown] id="fHvTYZzZJJ_N" # $ db^{(t)} = y_n- σ((w^{(t)})^{T} x_n+b^{t})$ # + [markdown] id="pbcBzufVG6qk" # <font color='cyan'>Grader function - 5 </font> # + id="_S1xhSfDFi-g" colab={"base_uri": "https://localhost:8080/"} outputId="a6ea922c-0e7e-4b2b-8344-85682edaa04e" def grader_db(x,y,w,b): grad_db=gradient_db(x,y,w,b) assert(grad_db==-0.5) return True grad_x=np.array([-2.07864835, 3.31604252, -0.79104357, -3.87045546, -1.14783286, -2.81434437, -0.86771071, -0.04073287, 0.84827878, 1.99451725, 3.67152472, 0.01451875, 2.01062888, 0.07373904, -5.54586092]) grad_y=0 grad_w,grad_b=initialize_weights(grad_x) alpha=0.0001 N=len(X_train) grader_db(grad_x,grad_y,grad_w,grad_b) # + [markdown] id="aJrGlQADwwPN" # predicting y_train / y_test # + id="4nhU_LAm0z-B" def pred(w,b, X): N = len(X) predict = [] for i in range(N): z=np.dot(w,X[i])+b if sigmoid(z) >= 0.5: # sigmoid(w,x,b) returns 1/(1+exp(-(dot(x,w)+b))) predict.append(1) else: predict.append(0) return np.array(predict) # + [markdown] id="TCK0jY_EOvyU" # <font color='blue'> Implementing logistic regression</font> # + id="tSJOk6ByuQYa" import matplotlib.pyplot as plt # + id="JGsQwVdpS8yR" def train(X_train,y_train,X_test,y_test,epochs,alpha,eta0): ''' In this function, we will implement logistic regression''' #Here eta0 is learning rate #implement the code as follows # initalize the weights (call the initialize_weights(X_train[0]) function) # for every epoch # for every data point(X_train,y_train) #compute gradient w.r.to w (call the gradient_dw() function) #compute gradient w.r.to b (call the gradient_db() function) #update w, b # predict the output of x_train[for all data points in X_train] using w,b #compute the loss between predicted and actual values (call the loss function) # store all the train loss values in a list # predict the output of x_test[for all data points in X_test] using w,b #compute the loss between predicted and actual values (call the loss function) # store all the test loss values in a list # you can also compare previous loss and current loss, if loss is not updating then stop the process and return w,b w,b=initialize_weights(X_train[0]) y_pred = [] y_test_pred = [] train_loss = [] test_loss = [] for i in range(epochs): for i in range(len(X_train)): dw = gradient_dw(X_train[i],y_train[i],w,b,alpha,N) db = gradient_db(X_train[i],y_train[i],w,b) w=w+(eta0*dw) b=b+(eta0*db) y_pred = pred(w,b,X_train) y_test_pred = pred(w,b,X_test) train_loss.append(logloss(y_train,y_pred)) test_loss.append(logloss(y_test,y_test_pred)) return w,b,train_loss,test_loss # + id="sUquz7LFEZ6E" alpha=0.0001 eta0=0.0001 N=len(X_train) epochs=14 w,b,train_loss,test_loss=train(X_train,y_train,X_test,y_test,epochs,alpha,eta0) # + [markdown] id="l4Zf_wPARlwY" # <font color='red'>Goal of assignment</font> # + [markdown] id="l3eF_VSPSH2z" # Compare your implementation and SGDClassifier's the weights and intercept, make sure they are as close as possible i.e difference should be in terms of 10^-3 # + id="nx8Rs9rfEZ1R" colab={"base_uri": "https://localhost:8080/"} outputId="cf94d2fb-2e4e-438e-aa90-9cfb69bf21c4" # these are the results we got after we implemented sgd and found the optimal weights and intercept print(clf.coef_, clf.coef_.shape, clf.intercept_) print('==========================================') print(w,b) print('==========================================') print(abs(clf.coef_-w)) # + [markdown] id="230YbSgNSUrQ" # <font color='blue'>Plot epoch number vs train , test loss </font> # # * epoch number on X-axis # * loss on Y-axis # + id="1O6GrRt7UeCJ" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="c72b3683-4d22-4420-bbff-d28ecc721c52" plt.plot(np.arange(epochs),train_loss) plt.plot(np.arange(epochs),test_loss) plt.xlabel('epochs') plt.ylabel('train_loss,test_loss') plt.grid() plt.title('epochs v/s loss') # + id="FUN8puFoEZtU" colab={"base_uri": "https://localhost:8080/"} outputId="64fb20a6-145c-4e9c-aa65-f5c99a3cf60b" print(1-np.sum(y_train - pred(w,b,X_train))/len(X_train)) print(1-np.sum(y_test - pred(w,b,X_test))/len(X_test))
SGD Classifier with Logloss and L2 regularization Using SGD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Optimal probabilistic clustering - Part II # > ... # # - toc: true # - branch: master # - badges: true # - comments: true # - categories: [Clustering, Entropy, Membership Entropy] # - image: images/post_image_optimal_clustering.png # - hide: false # - search_exclude: false # - author: <NAME> # + import numpy as np from scipy.linalg import norm from scipy.spatial.distance import cdist class OPC: def __init__(self, n_clusters=10, max_iter=150, m=2, error=1e-5, random_state=42): self.u, self.centers = None, None self.n_clusters = n_clusters self.max_iter = max_iter self.m = m self.error = error self.random_state = random_state def fit(self, X, initial_centers=None): N = X.shape[0] C = self.n_clusters centers = initial_centers # u = np.random.dirichlet(np.ones(C), size=N) r = np.random.RandomState(self.random_state) u = r.rand(N,C) u = u / np.tile(u.sum(axis=1)[np.newaxis].T,C) iteration = 0 while iteration < self.max_iter: u2 = u.copy() if iteration==0 and not centers is None: centers = centers print(centers.shape) print("-------------------------------------------") else: centers = self.next_centers(X, u) u = self.next_u(X, centers) iteration += 1 # Stopping rule if norm(u - u2) < self.error: break self.u = u self.centers = centers return self def next_centers(self, X, u): um = u ** self.m return (X.T @ um / np.sum(um, axis=0)).T def next_u(self, X, centers): return self._predict(X, centers) def _predict(self, X, centers): power = float(2 / (self.m - 1)) temp = cdist(X, centers) ** power denominator_ = temp.reshape((X.shape[0], 1, -1)).repeat(temp.shape[-1], axis=1) denominator_ = temp[:, :, np.newaxis] / denominator_ return 1 / denominator_.sum(2) def predict(self, X): if len(X.shape) == 1: X = np.expand_dims(X, axis=0) u = self._predict(X, self.centers) return np.argmax(u, axis=-1) # - # + ######################################## Part I #from fcmeans import FCM def run_cluster(n_clusters, features, initial_centers=None, random_state=42): # membership probabilities model = OPC(n_clusters=n_clusters, random_state=random_state, max_iter=1000, error=1e-9) model = model.fit(features, initial_centers=initial_centers) p = model.u centers = model.centers # representative cluster representative_cluster = np.argmax(p, 1) # membership entropy Sx = -np.sum(p*np.log(p), 1) / np.log(n_clusters) # total membership entropy (across the entire feature space) S = np.sum(Sx) return centers, p, representative_cluster, Sx, S # - # Check if I'm introducing a regularization in inferring the optimal number of clusters regularization = 1.0 # ## Experimental results import numpy as np # (n,k,m) n observations, k clusters, at least m observations per cluster def construct_random_partition(n, k, m, seed=None): rand = np.random.RandomState(seed=seed) parts = rand.choice(range(1, n-k*(m-1)), k-1, replace=False) parts.sort() parts = np.append(parts, n-k*(m-1)) parts = np.append(parts[0], np.diff(parts)) - 1 + m return parts partition = construct_random_partition(n=200, k=5, m=2, seed=40) print(partition) # **Generation of random datasets** def generate_random_dataset(partition, n_features, std, seed): random_state = np.random.RandomState(seed=seed) dataset = list() for n in partition: # cluster centre coordinates cluster_centre = random_state.uniform(-1, 1, n_features) # observation coordinates for observation in range(0, n): dataset.append(cluster_centre+std*random_state.standard_normal(n_features)) dataset = np.array(dataset) # shuffles the observations dataset = dataset[random_state.permutation(dataset.shape[0]), :] return np.array(dataset) dataset = generate_random_dataset(partition=partition, n_features=2, std=0.05, seed=42) # We will, at each iteration, collect the mean-intracluster entropy Si = list() iteration = 0 centers = None n_clusters_trials = np.arange(2, 10, 1) # Some helpful functions # + ### Minimization of membership entropy def minimize_membership_entropy(n_clusters_trials, dataset, regularization=0, random_state=42): total_entropies = list() for trial in n_clusters_trials: _, _, _, _, total_entropy = run_cluster(n_clusters=trial, features=dataset, random_state=random_state) total_entropies.append(total_entropy+regularization*trial) optimal_nclusters = n_clusters_trials[np.argmin(total_entropies)] return optimal_nclusters, total_entropies ### Cluster quality def calculate_cluster_quality(p, representative_cluster, PRINT=True): Si = dict() for clust in set(representative_cluster): probs = p[np.argmax(p, 1)==clust, :] entropy = -np.sum(probs*np.log(probs), 1) / np.log(probs.shape[1]) Si.update({clust: np.mean(entropy)}) if PRINT: [print("Mean membership entropy across cluster {0} = {1}".format(i, np.round(Si[i], 3))) for i in Si.keys()] return Si # - # ### Iteration 1 # **1.1) Minimization of membership entropy** optimal_nclusters, total_entropies = minimize_membership_entropy(n_clusters_trials, dataset, regularization) print("Optimal number of clusters =", optimal_nclusters) # **1.2) Clustering** centers, p, representative_cluster, Sx, S = run_cluster(optimal_nclusters, dataset) # **1.3) Cluster quality** Si.append(calculate_cluster_quality(p, representative_cluster)) # **1.4) Plot** # + import matplotlib from matplotlib import cm import matplotlib.pyplot as plt def make_rgb_transparent(rgb, alpha): bg_rgb = [1, 1, 1] return [alpha * c1 + (1 - alpha) * c2 for (c1, c2) in zip(rgb, bg_rgb)] colormap = cm.get_cmap('Accent') edgecolors = list() facecolors = list() for i in range(0, optimal_nclusters): edgecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=1)) facecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=0.65)) # + fig, axes = plt.subplots(1, 2, figsize=(10, 4)) axes[0].plot([optimal_nclusters, optimal_nclusters], [0, np.max(total_entropies)], color=(0.8,0.6,0.6), linewidth=2) axes[0].plot(n_clusters_trials, total_entropies, color=(0.46,0.46,0.46), linewidth=2) axes[0].set_xlabel('Number of clusters') axes[0].set_ylabel('Total membership entropy') color_seq = list() for j in range(0, dataset.shape[0]): color_seq.append(make_rgb_transparent(edgecolors[representative_cluster[j]], 1-Sx[j])) for i in range(0, optimal_nclusters): axes[1].scatter([], [], label=str(i), color=edgecolors[i]) axes[1].scatter(dataset[:,0], dataset[:,1], marker='.', s=60, edgecolors=(0.6,0.6,0.6,0.5), c=color_seq) axes[1].scatter(centers[:,0], centers[:,1], color=(0.8,0.2,0.2, 0.8), marker="v") axes[1].set_xlabel('X') axes[1].set_ylabel('Y') axes[1].set_xlim(-1.2,1.2) axes[1].set_ylim(-1.2,1.2) axes[1].legend(loc="best") plt.tight_layout() plt.show() # - # **1.5) Finds clusters with an below-average mean membership entropy** print("Intra-cluster mean membership entropy") Si[iteration] bad_clusters = np.array(list(Si[iteration].keys()))[list(Si[iteration].values()) > np.mean(list(Si[iteration].values()))] print("Clusters with above-average membership entropy") bad_clusters good_clusters = np.array(list(set(Si[iteration].keys()).difference(set(bad_clusters)))) good_clusters centers_good_clusters = centers[good_clusters,:] # **1.6) Collects observations in the above selected clusters** # + inds = [] for cluster in bad_clusters: inds += list(np.where(representative_cluster==cluster)[0]) inds = np.squeeze(np.array(inds)) dataset_bad_clusters = dataset[inds,:] # - optimal_nclusters, total_entropies = minimize_membership_entropy(n_clusters_trials, dataset_bad_clusters, regularization) print("Optimal number of clusters =", optimal_nclusters) new_centers, p, representative_cluster, Sx, S = run_cluster(optimal_nclusters, dataset) trial_centers = np.vstack((centers_good_clusters, new_centers)) centers, p, representative_cluster, Sx, S = run_cluster(centers.shape[0], dataset, initial_centers=trial_centers) # + optimal_nclusters = centers.shape[0] edgecolors = list() facecolors = list() for i in range(0, optimal_nclusters): edgecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=1)) facecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=0.65)) fig, axes = plt.subplots(1, 2, figsize=(10, 4)) color_seq = list() for j in range(0, dataset.shape[0]): color_seq.append(make_rgb_transparent(edgecolors[representative_cluster[j]], 1-Sx[j])) for i in range(0, optimal_nclusters): axes[1].scatter([], [], label=str(i), color=edgecolors[i]) axes[1].scatter(dataset[:,0], dataset[:,1], marker='.', s=60, edgecolors=(0.6,0.6,0.6,0.5), c=color_seq) axes[1].scatter(centers[:,0], trial_centers[:,1], color=(0.8,0.2,0.2, 0.8), marker="v") axes[1].set_xlabel('X') axes[1].set_ylabel('Y') axes[1].set_xlim(-1.2,1.2) axes[1].set_ylim(-1.2,1.2) axes[1].legend(loc="best") plt.tight_layout() plt.show() # - len(edgecolors) # Initialize fcmeans with different seeds the do statistics would probably help # # # # **References:** # {% bibliography --cited %}
__writing/.ipynb_checkpoints/__writing-optimal_probabilistic_clustering_part2_test-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Boosting: Fit and evaluate a model # # Using the Titanic dataset from [this](https://www.kaggle.com/c/titanic/overview) Kaggle competition. # # In this section, we will fit and evaluate a simple Gradient Boosting model. # ### Read in Data # + import joblib import pandas as pd from sklearn.ensemble import GradientBoostingClassifier from sklearn.model_selection import GridSearchCV import warnings warnings.filterwarnings('ignore', category=FutureWarning) warnings.filterwarnings('ignore', category=DeprecationWarning) train_features = pd.read_csv('../Data/train_features.csv') train_labels = pd.read_csv('../Data/train_labels.csv', header=None) # - # ### Hyperparameter tuning # # ![GB](img/gb.png) def print_results(results): print('BEST PARAMS: {}\n'.format(results.best_params_)) means = results.cv_results_['mean_test_score'] stds = results.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, results.cv_results_['params']): print('{} (+/-{}) for {}'.format(round(mean,3), round(std *2, 3), params)) # + gb = GradientBoostingClassifier() parameters = { 'n_estimators' : [5, 50, 250, 500], 'max_depth': [1, 3, 5, 7, 9], 'learning_rate': [0.01, 0.1, 1, 10, 100] } cv = GridSearchCV(gb, parameters, cv=5) cv.fit(train_features, train_labels.values.ravel()) print_results(cv) # - # ### Write out pickled model joblib.dump(cv.best_estimator_, '../Pickled_Models/GB_model.pkl')
ML - Applied Machine Learning - Algorithms/06.Boosting/02.Boosting - Fit and evaluate a model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # <center> # <img src="../../img/ods_stickers.jpg"> # ## Открытый курс по машинному обучению # </center> # Авторы материала: Data Science интерн Ciklum, студент магистерской программы CSDS UCU Виталий Радченко, программист-исследователь Mail.ru Group, старший преподаватель Факультета Компьютерных Наук ВШЭ Юрий Кашницкий. Материал распространяется на условиях лицензии [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). Можно использовать в любых целях (редактировать, поправлять и брать за основу), кроме коммерческих, но с обязательным упоминанием автора материала. # # <center> Тема 5. Композиции алгоритмов, случайный лес</center> # ## <center> Часть 3. Важность признаков</center> # Очень часто вы хотите понять свой алгоритм, почему он именно так, а не иначе дал определенный ответ. Или если не понять его полностью, то хотя бы какие переменные больше всего влияют на результат. Из случайного леса можно довольно просто получить данную информацию. # # ### Суть метода # # По данной картинке интуитивно понятно, что важность признака «Возраст» в задаче кредитного скоринга выше, чем важность признака «Доход» . Формализуется это с помощью понятия прироста информации. # <img src="../../img/credit_scoring_toy_tree.gif" align='center'> # # Если построить много деревьев решений (случайный лес), то чем выше в среднем признак в дереве решений, тем он важнее в данной задаче классификации/регрессии. При каждом разбиении в каждом дереве улучшение критерия разделения (в нашем случае коэффициент Джини) — это показатель важности, связанный с переменной разделения, и накапливается он по всем деревьям леса отдельно для каждой переменной. # # Давайте немного углубимся в детали. Среднее снижение точности, вызываемое переменной, определяется во время фазы вычисления out-of-bag ошибки. Чем больше уменьшается точность предсказаний из-за исключения (или перестановки) одной переменной, тем важнее эта переменная, и поэтому переменные с бо́льшим средним уменьшением точности более важны для классификации данных. Среднее уменьшение коэффициента Джини (или ошибки mse в задачах регрессии) является мерой того, как каждая переменная способствует однородности узлов и листьев в окончательной модели случайного леса. Каждый раз, когда отдельная переменная используется для разбиения узла, коэффициент Джини для дочерних узлов рассчитывается и сравнивается с коэффициентом исходного узла. Коэффициент Джини является мерой однородности от 0 (однородной) до 1 (гетерогенной). Изменения в значении критерия разделения суммируются для каждой переменной и нормируются в конце вычисления. Переменные, которые приводят к узлам с более высокой чистотой, имеют более высокое снижение коэффициента Джини. # # А теперь представим все вышеописанное в виде формул. # $$ \large VI^{T} = \frac{\sum_{i \in \mathfrak{B}^T}I \Big(y_i=\hat{y}_i^{T}\Big)}{\Big |\mathfrak{B}^T\Big |} - \frac{\sum_{i \in \mathfrak{B}^T}I \Big(y_i=\hat{y}_{i,\pi_j}^{T}\Big)}{\Big |\mathfrak{B}^T\Big |} $$ # # $ \large \hat{y}_i^{(T)} = f^{T}(x_i) $ — предсказание класса перед перестановкой/удалением признака # $ \large \hat{y}_{i,\pi_j}^{(T)} = f^{T}(x_{i,\pi_j}) $ — предсказание класса после перестановки/удаления признака # $ \large x_{i,\pi_j} = (x_{i,1}, \dots , x_{i,j-1}, \quad x_{\pi_j(i),j}, \quad x_{i,j+1}, \dots , x_{i,p})$ # Заметим, что $ \large VI^{(T)}(x_j) = 0 $, если $ \large X_j $ не находится в дереве $ \large T $ # # Расчет важности признаков в ансамбле: # — ненормированные # $$ \large VI(x_j) = \frac{\sum_{T=1}^{N}VI^{T}(x_j)}{N} $$ # # — нормированные # $$ \large z_j = \frac{VI(x_j)}{\frac{\hat{\sigma}}{\sqrt{N}}} $$ # **Пример.** # # Рассмотрим результаты анкетирования посетителей хостелов с сайтов Booking.com и TripAdvisor.com. Признаки — средние оценки по разным факторам (перечислены ниже) — персонал, состояние комнат и т.д. Целевой признак — рейтинг хостела на сайте. # from __future__ import division, print_function # отключим всякие предупреждения Anaconda import warnings warnings.filterwarnings('ignore') # %matplotlib inline from matplotlib import pyplot as plt import seaborn as sns # russian headres from matplotlib import rc font = {'family': 'Verdana', 'weight': 'normal'} rc('font', **font) import pandas as pd import numpy as np from sklearn.ensemble.forest import RandomForestRegressor # + hostel_data = pd.read_csv("../../data/hostel_factors.csv") features = {"f1":u"Персонал", "f2":u"Бронирование хостела ", "f3":u"Заезд в хостел и выезд из хостела", "f4":u"Состояние комнаты", "f5":u"Состояние общей кухни", "f6":u"Состояние общего пространства", "f7":u"Дополнительные услуги", "f8":u"Общие условия и удобства", "f9":u"Цена/качество", "f10":u"ССЦ"} forest = RandomForestRegressor(n_estimators=1000, max_features=10, random_state=0) forest.fit(hostel_data.drop(['hostel', 'rating'], axis=1), hostel_data['rating']) importances = forest.feature_importances_ indices = np.argsort(importances)[::-1] # Plot the feature importancies of the forest num_to_plot = 10 feature_indices = [ind+1 for ind in indices[:num_to_plot]] # Print the feature ranking print("Feature ranking:") for f in range(num_to_plot): print("%d. %s %f " % (f + 1, features["f"+str(feature_indices[f])], importances[indices[f]])) plt.figure(figsize=(15,5)) plt.title(u"Важность конструктов") bars = plt.bar(range(num_to_plot), importances[indices[:num_to_plot]], color=([str(i/float(num_to_plot+1)) for i in range(num_to_plot)]), align="center") ticks = plt.xticks(range(num_to_plot), feature_indices) plt.xlim([-1, num_to_plot]) plt.legend(bars, [u''.join(features["f"+str(i)]) for i in feature_indices]); # - # На рисунке выше видно, что люди больше всего обращают внимание на персонал и соотношение цена/качество и на основе впечатления от данных вещей пишут свои отзывы. Но разница между этими признаками и менее влиятельными признаками не очень значительная, и выкидывание какого-то признака приведет к уменьшению точности нашей модели. Но даже на основе нашего анализа мы можем дать рекомендации отелям в первую очередь лучше готовить персонал и/или улучшить качество до заявленной цены. #
jupyter/topic05_bagging_rf/topic5_part3_feature_importance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: vrb-know # language: python # name: vrb-know # --- import pandas as pd import numpy as np # + # Source: https://www.fema.gov/openfema-data-page/disaster-declarations-summaries disas = pd.read_csv('DisasterDeclarationsSummaries.csv' ) print(disas.shape) disas.head() # - disas.tail() disas.info() # Keep # Two character code that defines if this is a major disaster, fire management, or emergency declaration. For more information on the disaster process, please visit https://www.fema.gov disas['declarationType'].value_counts() # Denotes whether the Individuals and Households program was declared for this disaster. For more information on the program, please visit https://www.fema.gov # This is given out when people need temporary housing # As such this category should definetly be kept as it relates to # distruction of property disas['ihProgramDeclared'].value_counts() disas['iaProgramDeclared'].value_counts() # Keep disas['incidentType'].value_counts() # ### Do these program columns give us much useful information? n_program = disas[disas['incidentType'] == 'Biological']['hmProgramDeclared'].sum() n_incident = len(disas[disas['incidentType'] == 'Biological']['hmProgramDeclared']) # ih issued when Biological == 0.5139253809774041 # ia issued when Biological == 0 # pa issued when Biological == 0.9997372569626904 # hm issued when Biological == 0 n_program/n_incident n_program = disas[disas['incidentType'] == 'Hurricane']['hmProgramDeclared'].sum() n_incident = len(disas[disas['incidentType'] == 'Hurricane']['hmProgramDeclared']) # ih issued when Hurricane == 0.13192304489048054 # ia issued when Hurricane == 0.1860581327558924 # pa issued when Hurricane == 0.976263846089781 # hm issued when Hurricane == 0.3223952694261681 n_program/n_incident n_program = disas[disas['incidentType'] == 'Fire']['hmProgramDeclared'].sum() n_incident = len(disas[disas['incidentType'] == 'Fire']['hmProgramDeclared']) # ih issued when Fire == 0.0442604226156482 # ia issued when Fire == 0.09994288977727013 # pa issued when Fire == 0.9486007995431183 # hm issued when Fire == 0.31267846944603084 n_program/n_incident # Remove Unneccary Columns disas_trimmed = disas[[ 'state', 'declarationType', 'declarationDate', 'incidentType', 'declarationTitle', 'incidentBeginDate', 'incidentEndDate', 'fipsStateCode', 'fipsCountyCode', 'designatedArea' ]] disas_trimmed.head() disas_trimmed['fipsCountyCode'].value_counts() codes = pd.read_csv('../travel_revenue/all-geocodes-v2018.csv', skiprows=4 ) print(codes.shape) codes.head() states = pd.read_csv('../travel_revenue/StatesFIPSCodes.csv') print(states.shape) states.head() def get_state_name(state_code): """Returns state name when given State FIPS Code""" query_string = f"{state_code} == `STATE_FIPS`" return state_fips.query(query_string)['STATE_NAME'].iloc[0] def convert_fips_city(state_code, county_code): """ Takes a tuple of state fips code, county fips code And converts to Name String """ # mask = (codes['State Code (FIPS)'] == int(state_code)) & (codes['County Code (FIPS)'] == int(county_code)) query_string = f"{int(state_code)} == `State Code (FIPS)` & {int(county_code)} == `County Code (FIPS)`" codes.query(query_string) # return codes[mask]['Area Name (including legal/statistical area description)'].iloc[0] county = codes.query(query_string)['Area Name (including legal/statistical area description)'].iloc[0] query_string_state = f"{state_code} == `STATE_FIPS`" state = states.query(query_string_state)['STATE_NAME'].iloc[0] return str(county + ', ' + state) disas_trimmed.info() convert_fips_city(13, 1) # + # %%time loc_full = [] for i, row in disas_trimmed.iterrows(): try: loc_full.append(convert_fips_city(row[7], row[8])) except: loc_full.append(np.nan) # - len(loc_full) == len(disas_trimmed) disas_trimmed.insert(10, 'loc_full', loc_full) disas_trimmed.head() # Very Few Nans disas_trimmed['loc_full'].isna().sum() / len(disas_trimmed['loc_full']) pd.pivot_table(disas_trimmed, index=['loc_full', ], aggfunc='count')#['declarationTitle'] pv = pd.pivot_table(disas_trimmed, index=['loc_full', 'incidentType'], aggfunc='count')['declarationTitle'] pv.head(10) pd.DataFrame(pv) disas_trimmed['incidentType'].nunique() final_df = pv.unstack() print(final_df.shape) final_df.head() final_df.to_csv('cols_disaster_stats.csv')
data/nat_disasters_cost/exploring_csv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Keras from keras.models import Sequential from keras.layers import Bidirectional, LSTM, Dense, Embedding, SpatialDropout1D from keras.optimizers import adam from keras.callbacks import EarlyStopping from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from sklearn.preprocessing import LabelEncoder import keras # Regular Expression import re # NLTK from nltk.tokenize import word_tokenize from nltk import FreqDist from nltk.stem import WordNetLemmatizer # EDA from string import punctuation import pandas as pd import numpy as np # - # Data Load df = pd.read_csv("../input/movie-review-sentiment-analysis-kernels-only/train.tsv", sep=" ") df.head() len(df) df.isnull().sum() df['Sentiment'].value_counts() # Preprocessing df['Phrase'] = df['Phrase'].apply(lambda x: x.lower()) df['Phrase'] = df['Phrase'].apply((lambda x: re.sub('[^A-z\s]','',x))) lemma=WordNetLemmatizer() def clean_text(text): text_corpus=[] for i in range(0,len(text)): review = str(text[i]) review = [lemma.lemmatize(w) for w in word_tokenize(str(review))] review = ' '.join(review) text_corpus.append(review) return text_corpus # + #df['Phrase'] = df['Phrase'].map(lambda x : x if len(x.split(" ")) > 1 else None) # - df['clean_text'] = clean_text(df['Phrase'].values) df.head() # Total Words aa = ' '.join(list(df['clean_text'])) aa = list(set(aa.split(" "))) len(aa) from sklearn.utils import shuffle df = shuffle(df) df.head() # Tokenizer vocabulary_size = len(aa) tokenizer = Tokenizer(num_words=vocabulary_size, split=' ') tokenizer.fit_on_texts(df['clean_text'].values) sequences = tokenizer.texts_to_sequences(df['clean_text'].values) data = pad_sequences(sequences)#, maxlen=45) from keras.utils.np_utils import to_categorical # Encoder encoder = LabelEncoder() encoder = encoder.fit_transform(df['Sentiment']) target = to_categorical(encoder) data.shape, target.shape from keras.backend import zeros embeddings_index = dict() f = open('../input/glove6b300dtxt/glove.6B.300d.txt', encoding='utf-8') for line in f: values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() embedding_matrix = np.zeros((vocabulary_size, 300)) for word, index in tokenizer.word_index.items(): if index > vocabulary_size - 1: break else: embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[index] = embedding_vector from keras.layers import GRU, Dropout # Model model = Sequential() model.add(Embedding(vocabulary_size, 300, input_length = data.shape[1], weights = [embedding_matrix], trainable=True)) model.add(SpatialDropout1D(0.2)) model.add(Bidirectional(LSTM(256, return_sequences=True))) model.add(Bidirectional(GRU(256))) model.add(Dropout(0.5)) model.add(Dense(target.shape[1], activation='softmax')) model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy']) print(model.summary()) early_stopping_filter = EarlyStopping(monitor='val_loss', patience=2) #model.fit(data, target, validation_split=0.1, epochs=4, callbacks=[early_stopping_filter], batch_size=256) model.fit(data, target, epochs=4, callbacks=[early_stopping_filter], batch_size=256) # # Testing testdf = pd.read_csv("../input/movie-review-sentiment-analysis-kernels-only/test.tsv", sep=" ") testdf.head() testdf['Phrase'] = testdf['Phrase'].apply(lambda x: x.lower()) testdf['Phrase'] = testdf['Phrase'].apply((lambda x: re.sub('[^A-z\s]','',x))) testdf['clean_test'] = clean_text(testdf['Phrase'].values) test_sequences = tokenizer.texts_to_sequences(testdf['clean_test'].values) test_data = pad_sequences(test_sequences, maxlen=data.shape[1]) y_pred = model.predict_classes(test_data, verbose=1) submissiondf = pd.DataFrame({'PhraseId': testdf['PhraseId'], 'Sentiment': y_pred}) submissiondf.head() submissiondf.to_csv("sampleSubmission.csv", index=False)
code/movie-review-sentiment-analysis-first-kernel-sub.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np x = np.array([[1, 2, 3], [4, 5, 6]]) print("x:\n{}".format(x)) # + from scipy import sparse # 대각선 원소는 1이고 나머지는 0인 2차원 NumPy배열을 만듭니다 eye = np.eye(4) print("NumPy 배열:\n{}".format(eye)) # - # NumPy 배열을 CSR 포맷의 SciPy 희소 행렬로 변환합니다. # 0이 아닌 원소만 저장됩니다. sparse_matrix = sparse.csr_matrix(eye) print("SciPy의 CSR 행렬:\n{}".format(sparse_matrix)) data = np.ones(4) row_indices = np.arange(4) col_indices = np.arange(4) eye_coo = sparse.coo_matrix((data, (row_indices, col_indices))) print("COO 표현:\n{}".format(eye_coo)) # + # %matplotlib inline import matplotlib.pyplot as plt # -10에서 10까지 100개의 간격으로 나뉜 배열을 생성합니다 x = np.linspace(-10, 10, 100) # 사인(sin) 함수를 사용하여 y 배열을 생성합니다. y = np.sin(x) # 플롯(plot) 함수는 한 배열의 값을 다른 배열에 대응해서 선 그래프를 그립니다. plt.plot(x, y, marker="x") # - print("hello") # + from IPython.display import display import pandas as pd data = {'Name': ["John", "Anna", "Peter", "Linda"], 'Location': ["New York", "Paris", "Berlin", "London"], 'Age': [24, 13, 53, 33] } data_pandas = pd.DataFrame(data) display(data_pandas) # - display(data_pandas[data_pandas.Age > 30])
chapter01/exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # To flex both our plotting and function writing muscles, let's write a function to do some plotting! Your function should: # # * take a data frame as produced by the function above as input # * allow the user to choose between a strip, violin, or box plot # * set one of the above three be the default # * have a docstr so users can get help() on it # * produce the plot requested by the user (of course!) def make_plot(in_data, plot_type = 'violin') : ''' make_plot makes a strip, box, or violin (the default) plot first argument is a data frame with variables RTs, sex, and strain (any length) legal second arguments are "strip", "box", and "violin" ''' import seaborn as sns sns.catplot(y = "RTs", x="strain", data = in_data, hue = "sex", kind=plot_type); make_plot(our_data, "box") help(make_plot)
examples/tut020ExerciseFunction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: test-geo # language: python # name: test-geo # --- # + import pandas as pd import geopandas as gpd from shapely.geometry import Point, mapping, shape from imp import reload from numpy import mean from planet_utils import search from numpy.random import randint, choice import random from datetime import datetime import folium import json import requests import os import cartopy.crs as ccrs from cartopy import feature #from retrying import retry from IPython.display import Image import matplotlib.pyplot as plt import matplotlib.lines as mlines from matplotlib.patches import Circle import numpy as np from multiprocessing.dummy import Pool as ThreadPool plt.rcParams['figure.figsize'] = (20,20) # %matplotlib inline #print(os.environ["PL_API_KEY"]) NUM_RANDOM_DATES = 10 NUM_RANDOM_LOCATIONS = 20 # - # # API Search Candidate Selection Protocol # The goal of this notebook is to develop the pathway from a set of single-measurement points to a set of cropped PlanetScope imagery for a given date band. # # ## Extract 2017 Measurement Locations snowdata = pd.read_csv("../data/snow_summary_all_2009_2017_locs.csv", parse_dates = ["snow_appearance_date", "snow_disappearance_date", "date_min", "date_max"]) snowdata = snowdata[snowdata.year >= 2017] snowdata['geometry'] = [Point(xy) for xy in zip(snowdata.longitude, snowdata.latitude)] snowdata = gpd.GeoDataFrame(snowdata) snowdata.crs = {'init' : 'epsg:4326'} # + locations = snowdata.dropna(subset=["longitude", 'latitude']).drop_duplicates("Location") # - locations = locations.loc[choice(locations.index, NUM_RANDOM_LOCATIONS, replace=False)] len(locations) # ## Add bounding boxes boxes = locations[['Location', 'geometry']].copy() boxes.geometry = [g.buffer(0.005, cap_style=3) for g in boxes.geometry] # ## Search a = search.SimpleSearch(boxes.geometry.values[0], datetime(2018, 5, 1), datetime(2018, 5, 10)).query() cc = ([abs(pd.to_datetime(r['acquired']) - datetime(2016, 5, 1)) for r in a.properties.values]) a.id reload(search) dates = locations[['Location', "snow_appearance_date", "snow_disappearance_date"]] searcher = search.Search(boxes, dates, dry=False, key='Location', start_col='snow_appearance_date', end_col="snow_disappearance_date") results = searcher.query() # ## Parse Results # Choose `NUM_RANDOM_DATES` dates from results for each loc # + loc_img_ids = {} for group in results.groupby('loc_id'): if (len(group[1]) >= NUM_RANDOM_DATES): loc_img_ids[group[0]] = list(set(choice(group[1].id.values, NUM_RANDOM_DATES, replace=False))) else: loc_img_ids[group[0]] = list(set(group[1].id.values)) # - CLIP_API_URL = "https://api.planet.com/compute/ops/clips/v1/" IMAGEDIR = "../images/" PL_API_KEY = os.environ["PL_API_KEY"] # + def clip_request_and_download(loc, image): @retry(wait_fixed=5000) def _check_clip_op(id): r = requests.get("{_base}/{id}".format(_base = CLIP_API_URL, id=id), auth=(PL_API_KEY, "")) if r.json()['state'] != "succeeded": print("...waiting") raise Exception("Not Yet") else: print("response found.") return(r.json()) geom = boxes.loc[loc].geometry payload = { "aoi" : mapping(geom), "targets" : [{ "item_id" : image, "item_type" : "PSScene4Band", "asset_type" : 'analytic' }] } r = requests.post(CLIP_API_URL, auth=(PL_API_KEY, ""), json=payload) print(r.json()) response = _check_clip_op(r.json()['id']) image_url = response['_links']['results'][0] local_filename = os.path.join(IMAGEDIR, "{loc}_{img}.zip".format(loc=loc, img=image)) r = requests.get(image_url, stream=True, auth=(PL_API_KEY, "")) with open(local_filename, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) return local_filename # - len(loc_img_ids) reload(download) files = {} for loc_id, img_ids in loc_img_ids.items(): box = boxes.loc[loc_id].geometry dl = download.CroppedDownload(loc_id, box, img_ids[], IMAGEDIR) files[loc_id] = dl.run()
_historical/notebooks/pipeline-all-with-download.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ridge Regression Demo # Ridge extends LinearRegression by providing L2 regularization on the coefficients when predicting response y with a linear combination of the predictors in X. It can reduce the variance of the predictors, and improves the conditioning of the problem. # # The Ridge Regression function implemented in the cuml library allows the user to change the fit_intercept, normalize, solver and alpha parameters. Here is a brief on RAPIDS' Ridge Regression's parameters: # 1. alpha:float or double. Regularization strength - must be a positive float. Larger values specify stronger regularization. Array input will be supported later. # 1. solver:‘eig’ or ‘svd’ or ‘cd’ (default = ‘eig’). Eig uses a eigendecomposition of the covariance matrix, and is much faster. SVD is slower, but is guaranteed to be stable. CD or Coordinate Descent is very fast and is suitable for large problems. # 1. fit_intercept:boolean (default = True). If True, Ridge tries to correct for the global mean of y. If False, the model expects that you have centered the data. # 1. normalize:boolean (default = False). If True, the predictors in X will be normalized by dividing by it’s L2 norm. If False, no scaling will be done. # # The methods that can be used with the Ridge Regression are: # 1. fit: Fit the model with X and y. # 1. get_params: Sklearn style return parameter state # 1. predict: Predicts the y for X. # 1. set_params: Sklearn style set parameter state to dictionary of params. # # The model accepts only numpy arrays or cudf dataframes as the input. In order to convert your dataset to cudf format please read the cudf documentation on https://rapidsai.github.io/projects/cudf/en/latest/. It is important to understand that the 'svd' solver will run slower than the 'eig' solver however, the 'svd' solver is more stable and robust. Therefore, we would recomend that you use the 'eig' solver when a slight error is acceptable. For additional information please refer to the documentation on https://rapidsai.github.io/projects/cuml/en/latest/index.html # + import numpy as np import pandas as pd import cudf import os from cuml import Ridge as cuRidge from sklearn.linear_model import Ridge as skRidge from sklearn.datasets import make_regression from sklearn.metrics import mean_squared_error # Select a particular GPU to run the notebook os.environ["CUDA_VISIBLE_DEVICES"]="0" # - # # Helper Functions # + # helps to calculate the time required by a cell to run from timeit import default_timer class Timer(object): def __init__(self): self._timer = default_timer def __enter__(self): self.start() return self def __exit__(self, *args): self.stop() def start(self): """Start the timer.""" self.start = self._timer() def stop(self): """Stop the timer. Calculate the interval in seconds.""" self.end = self._timer() self.interval = self.end - self.start # - # check if mortgage dataset is present and then extract the data from it, else just create a random dataset for regression import gzip # change the path of the mortgage dataset if you have saved it in a different directory def load_data(nrows, ncols, cached = 'data/mortgage.npy.gz'): train_rows = int(nrows*0.8) if os.path.exists(cached): print('use mortgage data') with gzip.open(cached) as f: X = np.load(f) # the 4th column is 'adj_remaining_months_to_maturity' # used as the label X = X[:,[i for i in range(X.shape[1]) if i!=4]] y = X[:,4:5] rindices = np.random.randint(0,X.shape[0]-1,nrows) X = X[rindices,:ncols] y = y[rindices] df_y_train = pd.DataFrame({'fea%d'%i:y[0:train_rows,i] for i in range(y.shape[1])}) df_y_test = pd.DataFrame({'fea%d'%i:y[train_rows:,i] for i in range(y.shape[1])}) else: print('use random data') # create a random regression dataset X,y = make_regression(n_samples=nrows,n_features=ncols,n_informative=ncols, random_state=0) df_y_train = pd.DataFrame({'fea0':y[0:train_rows,]}) df_y_test = pd.DataFrame({'fea0':y[train_rows:,]}) df_X_train = pd.DataFrame({'fea%d'%i:X[0:train_rows,i] for i in range(X.shape[1])}) df_X_test = pd.DataFrame({'fea%d'%i:X[train_rows:,i] for i in range(X.shape[1])}) return df_X_train, df_X_test, df_y_train, df_y_test # # Run tests # + # %%time # nrows = number of samples # ncols = number of features of each sample nrows = 2**20 ncols = 399 #split the dataset into training and testing sets, in the ratio of 80:20 respectively X_train, X_test, y_train, y_test = load_data(nrows,ncols) print('training data',X_train.shape) print('training label',y_train.shape) print('testing data',X_test.shape) print('testing label',y_test.shape) # - # %%time # use the sklearn ridge regression model to fit the training dataset skridge = skRidge(fit_intercept=False, normalize=True, alpha=0.1) skridge.fit(X_train, y_train) # %%time # calculate the mean squared error of the sklearn ridge regression model on the testing dataset sk_predict = skridge.predict(X_test) error_sk = mean_squared_error(y_test,sk_predict) # %%time # convert the pandas dataframe to cudf format X_cudf = cudf.DataFrame.from_pandas(X_train) X_cudf_test = cudf.DataFrame.from_pandas(X_test) y_cudf = y_train.values y_cudf = y_cudf[:,0] y_cudf = cudf.Series(y_cudf) # %%time # run the cuml ridge regression model to fit the training dataset. Eig is the faster algorithm, but svd is more accurate curidge = cuRidge(fit_intercept=False, normalize=True, solver='svd', alpha=0.1) curidge.fit(X_cudf, y_cudf) # %%time # calculate the mean squared error of the testing dataset using the cuml ridge regression model cu_predict = curidge.predict(X_cudf_test).to_array() error_cu = mean_squared_error(y_test,cu_predict) # print the mean squared error of the sklearn and cuml model to analyse them print("SKL MSE(y):") print(error_sk) print("CUML MSE(y):") print(error_cu)
cuml/ridge_regression_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %%capture ## compile PyRoss for this notebook import os owd = os.getcwd() os.chdir('../../') # %run setup.py install os.chdir(owd) import numpy as np import pyross # Get individual contact matrices CH, CW, CS, CO = pyross.contactMatrix.UK() # Generate class with contact matrix for SIR model with UK contact structure generator = pyross.contactMatrix.ContactMatrixFunction(CH, CW, CS, CO) # get constant contact matrix function (this is plug-and-play for model.simulate) C = generator.constant_contactMatrix() # compare: Does the constant contact matrix function yield the sum of the individual contact matrices? (CH + CW + CS + CO == C(123)).all() # ## Create matrix for temporal intervention # + times= [1, 2, 3] # temporal boundaries between different contact-behaviour # prefactors for CW, CS, CO: interventions = [[0.9,0.9,0.8], # before first time [0.5,0.4,0.3], # between first and second time [0.8,0.7,0.6], # between second and third time [0.9,0.3,0.5]] # for times larger than third time # generate corresponding contact matrix function C = generator.interventions_temporal(times=times,interventions=interventions) # + # Check: Does the contact matrix function give what it should? # times at which to evaluate contact matrix function test_times = [-0.1, 0.5, 1.1, 2.9, 5] # for each time, we here enter the corresponding row from the "interventions" matrix by hand test_indices = [0, 0, 1, 2, 3] for i,t in enumerate(test_times): j = test_indices[i] lhs = CH + interventions[j][0]**2*CW + interventions[j][1]**2*CS + interventions[j][2]**2*CO rhs = C(t) print(np.allclose(lhs, rhs)) # - # ## Create matrix for population-threshold driven intervention # + # For the SIR model, we have 3*M population numbers (S,Ia,Is) # We now consider M = 1 # (Possible extension: Include class R) # thresholds for switching thresholds = np.array([ [0,20,0], [0,40,0], [0, 100, 0]]) # interventions interventions = [[0.5,0.2,0.3], # before first time [0.2,0.1,0.1], # between first and second time [0.4,0.5,0.2], # between second and third time [0.7,0.1,0.1]] # for times larger than third time # generate contact matrix function C = generator.interventions_threshold(thresholds=thresholds,interventions=interventions) # Note that this contact matrix function now takes 4 arguments! # C == C(t, S, Ia, Is) # + # Check: Does the contact matrix function give what it should? test_populations = [ [ 10,10,10] , [0, 10, 10], [10, 50,100], [100,200,0]] # The function is written so that for a "population vector" state = (S,Ia,Is), # the intervention with the largest index i such that # state[j] >= thresholds[index,j] for all j # is chosen. # Put differently, we look for the first row (from above) in the matrix "thresholds" such that # all population numbers exceed the threshold. # for each time, we here enter the corresponding row from the "interventions" matrix by hand test_indices = [0, 0, 2, 3] # -
examples/contactMatrix/ex01-SIR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/marilynle/DS-Unit-3-Sprint-2-SQL-and-Databases/blob/master/module2-sql-for-analysis/Assignment_module_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="U0kjNHjIoe1c" colab_type="code" outputId="5129ee51-25d1-4159-b885-d24cd876fb66" colab={"base_uri": "https://localhost:8080/"} # !pip install psycopg2-binary # + id="t-uKWs49pDZJ" colab_type="code" colab={} import pandas as pd import psycopg2 import sqlite3 # + id="xmGOyOu4tkC8" colab_type="code" colab={} df = pd.read_csv('https://raw.githubusercontent.com/marilynle/DS-Unit-3-Sprint-2-SQL-and-Databases/master/module2-sql-for-analysis/titanic.csv') # + id="4q_8Xh66tkIs" colab_type="code" outputId="b826c613-1496-43df-bb93-b6462ba5236e" colab={"base_uri": "https://localhost:8080/", "height": 204} df.head() # + id="SLfuJbYDpvVV" colab_type="code" colab={} # setting up and inserting the titanic data into a PostgreSQL database # + id="FgxegLREti2s" colab_type="code" colab={} database = 'wnpxjdxe' user = 'wnpxjdxe' password = '<PASSWORD>' host = 'rajje.db.elephantsql.com' # + id="jFcKngoYvQmj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="fa2482cf-ed2d-4a43-da6b-2f24a844972f" # !wget https://github.com/LambdaSchool/DS-Unit-3-Sprint-2-SQL-and-Databases/blob/master/module2-sql-for-analysis/titanic.csv?raw=true # + id="f5D87wL1vgKY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="1a8eca81-0539-4bbb-c76f-0077f0125477" # !mv 'titanic.csv?raw=true' titanic.csv # !ls -alh # + id="sylN3lymIlgL" colab_type="code" colab={} pg_conn = psycopg2.connect(database=database, user=user, password=password, host=host) pg_curs = pg_conn.cursor() # + id="BBM3SmE8qYUI" colab_type="code" colab={} # set up a new table for the Titanic data (titanic.csv) create_titanic_table = """ CREATE TABLE titanic( index serial PRIMARY KEY, survived INT, p_class INT, name VARCHAR (100) NOT NULL, sex VARCHAR (100) NOT NULL, age INT, siblings_spouses_aboard INT, parents_children_aboard INT, fare INT ); """ # + id="X83Txn5BG9Nw" colab_type="code" colab={} pg_curs.execute(create_titanic_table) # + id="v9Gv7YrCIywF" colab_type="code" colab={} show_tables = """ SELECT * FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'; """ # + id="SB2OZ6RaJGhU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0e300ecd-b4ae-4b38-bc94-06fbfa4d05a0" pg_curs.execute(show_tables) pg_curs.fetchall() # + id="Lcl_rDrwqYaa" colab_type="code" colab={} # Once it is set up, write a insert_titanic.py script that uses psycopg2 to connect to and upload the data from the csv, and add the file to your repo pg_curs.close() #closed the cursor pg_conn.commit() #commited changes to database
module2-sql-for-analysis/Assignment_module_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler , MinMaxScaler , RobustScaler from sklearn.model_selection import train_test_split , cross_val_score from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC import os import warnings warnings.filterwarnings('ignore') print(os.listdir("../input/")) # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" train = pd.read_csv('../input/train.csv' , index_col = 'PassengerId') label = train['Survived'] test = pd.read_csv('../input/test.csv', index_col = 'PassengerId') index = test.index # - train.head(3) train.info() # > <h3>Survived</h3> # Target variable for this dataset is Survived . So let us do some analysis on this field first. sns.countplot(label) # More than half (around 60%) of the passengers died. # <h3> Male and Female</h3> fig, ax =plt.subplots(1,3 , figsize=(10, 6) , sharex='col', sharey='row') a = sns.countplot(x = 'Sex' , data=train , ax = ax[0] , order=['male' , 'female']) b = sns.countplot(x = 'Sex' , data= train[label == 1] , ax = ax[1] , order=['male' , 'female']) c = sns.countplot(x = 'Sex' , data= train[ ((train['Age'] < 21) & (label == 1)) ] , order=['male' , 'female']) ax[0].set_title('All passenger') ax[1].set_title('Survived passenger') ax[2].set_title('Survived passenger under age 21') # 1. Majority of passengers were male on Titanic.<br> # 2. Most of the female survived.<br> # <h3>Passanger Class</h3> fig, ax =plt.subplots(1,3 , figsize=(10, 6) , sharex='col', sharey='row') a = sns.countplot(x = 'Pclass' , data=train , ax = ax[0] , order=[1 ,2,3]) b = sns.countplot(x = 'Pclass' , data= train[label == 1] , ax = ax[1] , order=[1 ,2,3]) c = sns.countplot(x = 'Pclass' , data= train[ ((train['Age'] < 21) & (label == 1)) ] , order=[1,2,3]) ax[0].set_title('All passanger') ax[1].set_title('Survived passanger') ax[2].set_title('Survived passanger under age 21') # 1. Most of the poor people died (ie From passenger class 3) . # 2. Most poor people who survived were under age 21 # > <h3>Embarked</h3> fig, ax =plt.subplots(1,3 , figsize=(10, 6) , sharex='col', sharey='row') a = sns.countplot(x = 'Embarked' , data=train , ax = ax[0] , order=['S' ,'Q','C']) b = sns.countplot(x = 'Embarked' , data= train[label == 1] , ax = ax[1] , order=['S' ,'Q','C']) c = sns.countplot(x = 'Embarked' , data= train[ ((train['Age'] < 21) & (label == 1)) ] , order=['S' ,'Q','C']) ax[0].set_title('All passanger') ax[1].set_title('Survived passanger') ax[2].set_title('Survived passanger under age 21') # 1. Most people boarded from Southampton since it the starting port of Titanic. # 2. Most of the people who boarded from Southampton died. # <h3> Feature Engineering</h3> # <h4>Deck </h4> # A deck is a permanent covering over a compartment or a hull of a ship. On a boat or ship, the primary or upper deck is the horizontal structure that forms the "roof" of the hull, strengthening it and serving as the primary working surface.<br> # It also gives information in which part of the ship a particular passenger might be when the ship was shinking. # More information can be found here [here](https://en.wikipedia.org/wiki/RMS_Titanic) in Dimensions and layout section<br> # We can get this information from the first letter of Cabin name if it not NaN train['Deck'] = train.Cabin.str.get(0) test['Deck'] = test.Cabin.str.get(0) train['Deck'] = train['Deck'].fillna('NOTAVL') test['Deck'] = test['Deck'].fillna('NOTAVL') #Replacing T deck with closest deck G because there is only one instance of T train.Deck.replace('T' , 'G' , inplace = True) train.drop('Cabin' , axis = 1 , inplace =True) test.drop('Cabin' , axis = 1 , inplace =True) # <h4>Lets count the missing values in train and test</h4> train.isna().sum() test.isna().sum() # In training set there is missing value in **Embarked** and **Age**<br> # In training set there is missing value in **Fare** and **Age** # <h4>Let's fill the missing values in Embarked with the most frequent value in train set</h4> train.loc[train.Embarked.isna() , 'Embarked'] = 'S' # <h4>In the above bar graph we saw that Pclass ,sex , Embarked were the determing factor for the servival of a passenger we will group them using these features and fill the median age in the corresponding missing values in the group</h4> age_to_fill = train.groupby(['Pclass' , 'Sex' , 'Embarked'])[['Age']].median() age_to_fill for cl in range(1,4): for sex in ['male' , 'female']: for E in ['C' , 'Q' , 'S']: filll = pd.to_numeric(age_to_fill.xs(cl).xs(sex).xs(E).Age) train.loc[(train.Age.isna() & (train.Pclass == cl) & (train.Sex == sex) &(train.Embarked == E)) , 'Age'] =filll test.loc[(test.Age.isna() & (test.Pclass == cl) & (test.Sex == sex) &(test.Embarked == E)) , 'Age'] =filll # Lets check if the above for loop is correct or not.<br> # There shouldn't be any difference between the previous median of groups and after filling its median in place of NaN train.groupby(['Pclass' , 'Sex' , 'Embarked'])[['Age']].median() # YAY ! There isn't any difference # Fare is string with number at the end , Two consecutive ticket number means they are bougth from same place or they got same deck on the ship... train.Ticket = pd.to_numeric(train.Ticket.str.split().str[-1] , errors='coerce') test.Ticket = pd.to_numeric(test.Ticket.str.split().str[-1] , errors='coerce') # Lets fill the missing Ticket value in train data with median Ticket value and one missing fare value in test data with median fare in train Ticket_median = train.Ticket.median() train.Ticket.fillna(Ticket_median , inplace =True) test.Fare.fillna(train.Fare.median() , inplace =True) train.isna().sum() test.isna().sum() # Lets create one feature variable **Status** in the society . This features can be derive from the name features like 'Dr' , 'Rev' , 'Col' , 'Major' etc train['Status'] = train['Name'].str.split(',').str.get(1).str.split('.').str.get(0).str.strip() test['Status'] = test['Name'].str.split(',').str.get(1).str.split('.').str.get(0).str.strip() importan_person = ['Dr' , 'Rev' , 'Col' , 'Major' , 'Mlle' , 'Don' , 'Sir' , 'Ms' , 'Capt' , 'Lady' , 'Mme' , 'the Countess' , 'Jonkheer' , 'Dona'] for person in importan_person: train.Status.replace(person, 'IMP' , inplace =True) test.Status.replace(person, 'IMP' , inplace =True) train.Status.unique() test.Status.unique() train.head() test.head() test.drop(['Name' , 'Ticket' ] ,axis = 1, inplace = True) train.drop(['Survived','Ticket' ,'Name' ], inplace =True , axis =1) cat_col = ['Pclass' , 'Sex' , 'Embarked' , 'Status' , 'Deck'] train.Pclass.replace({ 1 :'A' , 2:'B' , 3:'C' } , inplace =True) test.Pclass.replace({ 1 :'A' , 2:'B' , 3:'C' } , inplace =True) train = pd.get_dummies(train , columns=cat_col) test = pd.get_dummies(test , columns=cat_col) print(train.shape , test.shape) # Lets scale the data # + scaler = MinMaxScaler() train= scaler.fit_transform(train) test = scaler.transform(test) # - # <h3>Machine Learning</h3> model = RandomForestClassifier(bootstrap= True , min_samples_leaf= 3, n_estimators = 500 , min_samples_split = 10, max_features = "sqrt", max_depth= 6) cross_val_score(model , train , label , cv=5) model = LogisticRegression() cross_val_score(model , train , label , cv=5) from sklearn.svm import SVC model = SVC(C=4) cross_val_score(model , train , label , cv=5) model.fit(train , label) pre = model.predict(test) ans = pd.DataFrame({'PassengerId' : index , 'Survived': pre}) ans.to_csv('submit.csv', index = False) ans.head() # <h3>If you liked this kernel please consider upvoting it.<h3>
titanic/titanic-top-10-percent-simple-solution-and-eda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Housing Market # ### Introduction: # # This time we will create our own dataset with fictional numbers to describe a house market. As we are going to create random data don't try to reason of the numbers. # # ### Step 1. Import the necessary libraries import pandas as pd import numpy as np # ### Step 2. Create 3 differents Series, each of length 100, as follows: # 1. The first a random number from 1 to 4 # 2. The second a random number from 1 to 3 # 3. The third a random number from 10,000 to 30,000 # ### Step 3. Let's create a DataFrame by joinning the Series by column # ### Step 4. Change the name of the columns to bedrs, bathrs, price_sqr_meter # ### Step 5. Create a one column DataFrame with the values of the 3 Series and assign it to 'bigcolumn' # ### Step 6. Oops, it seems it is going only until index 99. Is it true? # ### Step 7. Reindex the DataFrame so it goes from 0 to 299
05_Merge/Housing Market/Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Recognized Formats import pandas as pd from beakerx_base import * from beakerx_tabledisplay import * TableDisplay.timeZoneGlobal = "Europe/London" "no errors" # 1 (Array of Integers parameter) TableDisplay({"a":100, "b":200, "c":300}) # 2 (2D Array of Integers parameter) TableDisplay([{"a":1}, {"a":10, "b":20}]) # 3 (Array of Decimals parameter) TableDisplay({"a":1/10, "b":1/20, "c":0.33}) # 4 (2D Array of Decimals parameter) TableDisplay([{"a":1/10}, {"a":1/100, "b":3.12345}]) # 5 (Array of Strings parameter) TableDisplay({"a":'string aaa', "b":'string bbb', "c":'string ccc'}) # 6 (2D Array of Strings parameter) TableDisplay([{"a":'a'}, {"a":'1a', "b":'2b'}]) # 7 (Array of Integer Arrays parameter) TableDisplay({"a":[1, 2, 3], "b":[10, 20, 30], "c":[100, 200, 300]}) # 8 (2D Array of Integer Arrays parameter) TableDisplay([ {"a":[1, 2, 3]}, {"a":[10, 20, 30], "b":[100, 200, 300]}]) # 9 (2D Array of Integer,Decimal,String,Array Arrays parameter) row1 = {"a":100, "b":200, "c":300} row2 = {"a":1/10, "b":1/20, "c":0.33} row3 = {"a":'a a a', "b":'b b b', "c":'c c c'} row4 = {"a":[1, 2, 3], "b":[10, 20, 30], "c":[100, 200, 300]} TableDisplay([row1, row2, row3, row4]) # 10 ([Integer,Decimal,String,Array] parameter) TableDisplay({"a":100, "b":1/20, "c":'c c c', "d":[100, 200, 300]}) # 11 (2D Arrays of [Integer,Decimal,String,Array] parameter) row1 = {"a":10, "b":1/10, "c":'c', "d":[100, 200]} row2 = {"a":100, "b":1/20, "c":'c c c', "d":[100, 200, 300]} TableDisplay([row1, row2]) # 12 (numbers as name of Array keys (Array parameter)) TableDisplay({10:20, 1/10:1/20, 'c':'c c c', '[100, 200]':[100, 200, 300]}) # 13 (numbers as name of Array keys (2D Array parameter) row1 = {40:40, 1/40:1/40, 'c':'c'} row2 = {40:20, 1/40:1/20, 'c':'c c c', '[100, 200]':[100, 200, 300]} TableDisplay([row1, row2]) # 14 beakerx_tabledisplay.pandas_display_table() pd.DataFrame(data=np.zeros((5,5)), index=pd.Int64Index([0, 1, 2, 3, 4], dtype='int64')) # ## Set index to DataFrame # 15 df = pd.read_csv('../../resources/data/interest-rates-small.csv') df.set_index(['m3']) df # 16 df = pd.read_csv('../../resources/data/interest-rates-small.csv') df.index = df['time'] df # ## The length of types should be same as number of cols. # 17 # expected result is error message colNames = ["column 1", "column 2", "column 3"] row1 = [6, 6, 0.5] row2 = [3, 3, 2.0] table46 = TableDisplay(pd.DataFrame([row1, row2], columns=colNames), colNames, ["integer", "double" ]) table46
autotests/ipynb/python/TableInputDataTest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # + import os.path import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # - golden_ratio = 0.5 * (1 + np.sqrt(5)) golden_size = lambda width: (width, width / golden_ratio) n_splits = 3 name = "default" summary_dir = "../logs" names = ["default", "adam", "adam-beta-0.5"] splits = range(n_splits) def get_basename(name, split_num): return f"{name}.split{split_num:d}" def make_plot_data(names, splits): df_list = [] for name in names: for split_num in splits: basename = get_basename(name, split_num) csv_path = os.path.join(summary_dir, f"{basename}.csv") df = pd.read_csv(csv_path).assign(name=name, split=split_num) df_list.append(df) data = pd.concat(df_list, axis="index", sort=True) return data data = make_plot_data(names, splits).rename(columns=dict(acc="train", val_acc="validation")) data data = data.rename(columns=dict(acc="train", val_acc="validation")) data data = data.assign(name=data.name.replace({"default": "rmsprop"})) new_data = pd.melt(data, id_vars=["name", "split", "epoch"], value_vars=["train", "validation"], value_name="accuracy", var_name="partition") new_data # + fig, ax = plt.subplots() g = sns.lineplot(x="epoch", y="accuracy", hue="name", # style="partition", # units="split", estimator=None, data=new_data, ax=ax) ax.set_yscale('log') ax.set_ylim(0.8, 1.0) plt.show() # - g = sns.relplot(x="epoch", y="accuracy", hue="name", # units="split", estimator=None, ci="sd", # col="partition", height=5, aspect=golden_ratio, data=new_data, kind="line") # facet_kws=dict(margin_titles=True)) # g.set_titles(template='') # # .set_titles(row_template=r"amplitude $\sigma={{{row_name:.2f}}}$", # # col_template=r"lengthscale $\ell={{{col_name:.3f}}}$") # g.despine(top=True, right=True, left=True, bottom=True, trim=False) # g.set(xticks=[], yticks=[]) # g.set_axis_labels("", "") # g.fig.subplots_adjust(wspace=.01, hspace=.01) names = ["default", "adam", "adam-beta-0.5"] pretty_names = ["RMSProp", r"Adam ($\beta=0.9$)", r"Adam ($\beta=0.5$)"] pretty_name_mapping = dict(zip(names, pretty_names)) pretty_name_mapping new_data.query("partition == 'validation'")
scratch/results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import networkx as nx from selenium import webdriver import time import random def r_time(): time.sleep(random.gauss(2, 1)) driver = webdriver.Firefox() r_time() driver.get('https://yandex.com/search/?text=машиностроение') r_time() # driver.find_element_by_class_name('CheckboxCaptcha-Anchor').click() serps = driver.find_elements_by_class_name('serp-item') serps[0] import pandas as pd dataFrame = pd.DataFrame(columns=['name', 'url','description','phones','mail','category','address']) dataFrame = dataFrame.append({ 'name': '', 'url': '', 'description': '', 'phones': '', 'mail': '', 'category': '', 'address': '' }, ignore_index=True) dataFrame = dataFrame.append({ 'name': '', 'url': '', 'description': '', 'phones': '', 'mail': '', 'category': '', 'address': '' }, ignore_index=True) dataFrame dataFrame.to_csv('temporary_data/first.csv') G = nx.DiGraph()
lib/research/research.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sparse Approximations # # # The `gp.MarginalSparse` class implements sparse, or inducing point, GP approximations. It works identically to `gp.Marginal`, except it additionally requires the locations of the inducing points (denoted `Xu`), and it accepts the argument `sigma` instead of `noise` because these sparse approximations assume white IID noise. # # Three approximations are currently implemented, FITC, DTC and VFE. For most problems, they produce fairly similar results. These GP approximations don't form the full covariance matrix over all $n$ training inputs. Instead they rely on $m < n$ *inducing points*, which are "strategically" placed throughout the domain. Both of these approximations reduce the $\mathcal{O(n^3)}$ complexity of GPs down to $\mathcal{O(nm^2)}$ --- a significant speed up. The memory requirements scale down a bit too, but not as much. They are commonly referred to as *sparse* approximations, in the sense of being data sparse. The downside of sparse approximations is that they reduce the expressiveness of the GP. Reducing the dimension of the covariance matrix effectively reduces the number of covariance matrix eigenvectors that can be used to fit the data. # # A choice that needs to be made is where to place the inducing points. One option is to use a subset of the inputs. Another possibility is to use K-means. The location of the inducing points can also be an unknown and optimized as part of the model. These sparse approximations are useful for speeding up calculations when the density of data points is high and the lengthscales is larger than the separations between inducing points. # # For more information on these approximations, see [Quinonero-Candela+Rasmussen, 2006](http://www.jmlr.org/papers/v6/quinonero-candela05a.html) and [Titsias 2009](https://pdfs.semanticscholar.org/9c13/b87b5efb4bb011acc89d90b15f637fa48593.pdf). # ## Examples # # For the following examples, we use the same data set as was used in the `gp.Marginal` example, but with more data points. # + import pymc3 as pm import theano import theano.tensor as tt import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + # set the seed np.random.seed(1) n = 2000 # The number of data points X = 10*np.sort(np.random.rand(n))[:,None] # Define the true covariance function and its parameters ℓ_true = 1.0 η_true = 3.0 cov_func = η_true**2 * pm.gp.cov.Matern52(1, ℓ_true) # A mean function that is zero everywhere mean_func = pm.gp.mean.Zero() # The latent function values are one sample from a multivariate normal # Note that we have to call `eval()` because PyMC3 built on top of Theano f_true = np.random.multivariate_normal(mean_func(X).eval(), cov_func(X).eval() + 1e-8*np.eye(n), 1).flatten() # The observed data is the latent function plus a small amount of IID Gaussian noise # The standard deviation of the noise is `sigma` σ_true = 2.0 y = f_true + σ_true * np.random.randn(n) ## Plot the data and the unobserved latent function fig = plt.figure(figsize=(12,5)); ax = fig.gca() ax.plot(X, f_true, "dodgerblue", lw=3, label="True f"); ax.plot(X, y, 'ok', ms=3, alpha=0.5, label="Data"); ax.set_xlabel("X"); ax.set_ylabel("The true f(x)"); plt.legend(); # - # ### Initializing the inducing points with K-means # # We use the NUTS sampler and the `FITC` approximation. with pm.Model() as model: ℓ = pm.Gamma("ℓ", alpha=2, beta=1) η = pm.HalfCauchy("η", beta=5) cov = η**2 * pm.gp.cov.Matern52(1, ℓ) gp = pm.gp.MarginalSparse(cov_func=cov, approx="FITC") # initialize 20 inducing points with K-means # gp.util Xu = pm.gp.util.kmeans_inducing_points(20, X) σ = pm.HalfCauchy("σ", beta=5) y_ = gp.marginal_likelihood("y", X=X, Xu=Xu, y=y, noise=σ) trace = pm.sample(1000) # + X_new = np.linspace(-1, 11, 200)[:,None] # add the GP conditional to the model, given the new X values with model: f_pred = gp.conditional("f_pred", X_new) # To use the MAP values, you can just replace the trace with a length-1 list with `mp` with model: pred_samples = pm.sample_posterior_predictive(trace, vars=[f_pred], samples=1000) # + # plot the results fig = plt.figure(figsize=(12,5)); ax = fig.gca() # plot the samples from the gp posterior with samples and shading from pymc3.gp.util import plot_gp_dist plot_gp_dist(ax, pred_samples["f_pred"], X_new); # plot the data and the true latent function plt.plot(X, y, 'ok', ms=3, alpha=0.5, label="Observed data"); plt.plot(X, f_true, "dodgerblue", lw=3, label="True f"); plt.plot(Xu, 10*np.ones(Xu.shape[0]), "cx", ms=10, label="Inducing point locations") # axis labels and title plt.xlabel("X"); plt.ylim([-13,13]); plt.title("Posterior distribution over $f(x)$ at the observed values"); plt.legend(); # - # ### Optimizing inducing point locations as part of the model # # For demonstration purposes, we set `approx="VFE"`. Any inducing point initialization can be done with any approximation. # + Xu_init = 10*np.random.rand(20) with pm.Model() as model: ℓ = pm.Gamma("ℓ", alpha=2, beta=1) η = pm.HalfCauchy("η", beta=5) cov = η**2 * pm.gp.cov.Matern52(1, ℓ) gp = pm.gp.MarginalSparse(cov_func=cov, approx="VFE") # set flat prior for Xu Xu = pm.Flat("Xu", shape=20, testval=Xu_init) σ = pm.HalfCauchy("σ", beta=5) y_ = gp.marginal_likelihood("y", X=X, Xu=Xu[:, None], y=y, noise=σ) mp = pm.find_MAP() # + mu, var = gp.predict(X_new, point=mp, diag=True) sd = np.sqrt(var) # draw plot fig = plt.figure(figsize=(12,5)); ax = fig.gca() # plot mean and 2σ intervals plt.plot(X_new, mu, 'r', lw=2, label="mean and 2σ region"); plt.plot(X_new, mu + 2*sd, 'r', lw=1); plt.plot(X_new, mu - 2*sd, 'r', lw=1); plt.fill_between(X_new.flatten(), mu - 2*sd, mu + 2*sd, color="r", alpha=0.5) # plot original data and true function plt.plot(X, y, 'ok', ms=3, alpha=1.0, label="observed data"); plt.plot(X, f_true, "dodgerblue", lw=3, label="true f"); Xu = mp["Xu"] plt.plot(Xu, 10*np.ones(Xu.shape[0]), "cx", ms=10, label="Inducing point locations") plt.xlabel("x"); plt.ylim([-13,13]); plt.title("predictive mean and 2σ interval"); plt.legend(); # -
docs/source/notebooks/GP-SparseApprox.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.png) # # Deploying a web service to Azure Kubernetes Service (AKS) # This notebook shows the steps for deploying a service: registering a model, creating an image, provisioning a cluster (one time action), and deploying a service to it. # We then test and delete the service, image and model. from azureml.core import Workspace from azureml.core.compute import AksCompute, ComputeTarget from azureml.core.webservice import Webservice, AksWebservice from azureml.core.model import Model import azureml.core print(azureml.core.VERSION) # # Get workspace # Load existing workspace from the config file info. # + from azureml.core.workspace import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') # - # # Register the model # Register an existing trained model, add descirption and tags. # + #Register the model from azureml.core.model import Model model = Model.register(model_path = "sklearn_regression_model.pkl", # this points to a local file model_name = "sklearn_regression_model.pkl", # this is the name the model is registered as tags = {'area': "diabetes", 'type': "regression"}, description = "Ridge regression model to predict diabetes", workspace = ws) print(model.name, model.description, model.version) # - # # Create the Environment # Create an environment that the model will be deployed with # + from azureml.core import Environment from azureml.core.conda_dependencies import CondaDependencies conda_deps = CondaDependencies.create(conda_packages=['numpy','scikit-learn==0.19.1','scipy'], pip_packages=['azureml-defaults', 'inference-schema']) myenv = Environment(name='myenv') myenv.python.conda_dependencies = conda_deps # - # #### Use a custom Docker image # # You can also specify a custom Docker image to be used as base image if you don't want to use the default base image provided by Azure ML. Please make sure the custom Docker image has Ubuntu >= 16.04, Conda >= 4.5.\* and Python(3.5.\* or 3.6.\*). # # Only supported with `python` runtime. # ```python # # use an image available in public Container Registry without authentication # myenv.docker.base_image = "mcr.microsoft.com/azureml/o16n-sample-user-base/ubuntu-miniconda" # # # or, use an image available in a private Container Registry # myenv.docker.base_image = "myregistry.azurecr.io/mycustomimage:1.0" # myenv.docker.base_image_registry.address = "myregistry.azurecr.io" # myenv.docker.base_image_registry.username = "username" # myenv.docker.base_image_registry.password = "password" # ``` # # Write the Entry Script # Write the script that will be used to predict on your model # + # %%writefile score.py import os import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge def init(): global model # AZUREML_MODEL_DIR is an environment variable created during deployment. # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION) # For multiple models, it points to the folder containing all deployed models (./azureml-models) model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) # note you can pass in multiple rows for scoring def run(raw_data): try: data = json.loads(raw_data)['data'] data = numpy.array(data) result = model.predict(data) # you can return any data type as long as it is JSON-serializable return result.tolist() except Exception as e: error = str(e) return error # - # # Create the InferenceConfig # Create the inference config that will be used when deploying the model # + from azureml.core.model import InferenceConfig inf_config = InferenceConfig(entry_script='score.py', environment=myenv) # - # # Model Profiling # # Profile your model to understand how much CPU and memory the service, created as a result of its deployment, will need. Profiling returns information such as CPU usage, memory usage, and response latency. It also provides a CPU and memory recommendation based on the resource usage. You can profile your model (or more precisely the service built based on your model) on any CPU and/or memory combination where 0.1 <= CPU <= 3.5 and 0.1GB <= memory <= 15GB. If you do not provide a CPU and/or memory requirement, we will test it on the default configuration of 3.5 CPU and 15GB memory. # # In order to profile your model you will need: # - a registered model # - an entry script # - an inference configuration # - a single column tabular dataset, where each row contains a string representing sample request data sent to the service. # # Please, note that profiling is a long running operation and can take up to 25 minutes depending on the size of the dataset. # # At this point we only support profiling of services that expect their request data to be a string, for example: string serialized json, text, string serialized image, etc. The content of each row of the dataset (string) will be put into the body of the HTTP request and sent to the service encapsulating the model for scoring. # # Below is an example of how you can construct an input dataset to profile a service which expects its incoming requests to contain serialized json. In this case we created a dataset based one hundred instances of the same request data. In real world scenarios however, we suggest that you use larger datasets with various inputs, especially if your model resource usage/behavior is input dependent. # You may want to register datasets using the register() method to your workspace so they can be shared with others, reused and referred to by name in your script. # You can try get the dataset first to see if it's already registered. # + import json from azureml.core import Datastore from azureml.core.dataset import Dataset from azureml.data import dataset_type_definitions dataset_name='sample_request_data' dataset_registered = False try: sample_request_data = Dataset.get_by_name(workspace = ws, name = dataset_name) dataset_registered = True except: print("The dataset {} is not registered in workspace yet.".format(dataset_name)) if not dataset_registered: input_json = {'data': [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]]} # create a string that can be put in the body of the request serialized_input_json = json.dumps(input_json) dataset_content = [] for i in range(100): dataset_content.append(serialized_input_json) sample_request_data = '\n'.join(dataset_content) file_name = "{}.txt".format(dataset_name) f = open(file_name, 'w') f.write(sample_request_data) f.close() # upload the txt file created above to the Datastore and create a dataset from it data_store = Datastore.get_default(ws) data_store.upload_files(['./' + file_name], target_path='sample_request_data') datastore_path = [(data_store, 'sample_request_data' +'/' + file_name)] sample_request_data = Dataset.Tabular.from_delimited_files( datastore_path, separator='\n', infer_column_types=True, header=dataset_type_definitions.PromoteHeadersBehavior.NO_HEADERS) sample_request_data = sample_request_data.register(workspace=ws, name=dataset_name, create_new_version=True) # - # Now that we have an input dataset we are ready to go ahead with profiling. In this case we are testing the previously introduced sklearn regression model on 1 CPU and 0.5 GB memory. The memory usage and recommendation presented in the result is measured in Gigabytes. The CPU usage and recommendation is measured in CPU cores. # + from datetime import datetime from azureml.core import Environment from azureml.core.conda_dependencies import CondaDependencies from azureml.core.model import Model, InferenceConfig environment = Environment('my-sklearn-environment') environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[ 'azureml-defaults', 'inference-schema[numpy-support]', 'joblib', 'numpy', 'scikit-learn==0.19.1', 'scipy' ]) inference_config = InferenceConfig(entry_script='score.py', environment=environment) # if cpu and memory_in_gb parameters are not provided # the model will be profiled on default configuration of # 3.5CPU and 15GB memory profile = Model.profile(ws, 'sklearn-%s' % datetime.now().strftime('%m%d%Y-%H%M%S'), [model], inference_config, input_dataset=sample_request_data, cpu=1.0, memory_in_gb=0.5) # profiling is a long running operation and may take up to 25 min profile.wait_for_completion(True) details = profile.get_details() # - # # Provision the AKS Cluster # This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it. # # > Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. # + from azureml.core.compute import ComputeTarget from azureml.core.compute_target import ComputeTargetException # Choose a name for your AKS cluster aks_name = 'my-aks-9' # Verify that cluster does not exist already try: aks_target = ComputeTarget(workspace=ws, name=aks_name) print('Found existing cluster, use it.') except ComputeTargetException: # Use the default configuration (can also provide parameters to customize) prov_config = AksCompute.provisioning_configuration() # Create the cluster aks_target = ComputeTarget.create(workspace = ws, name = aks_name, provisioning_configuration = prov_config) if aks_target.get_status() != "Succeeded": aks_target.wait_for_completion(show_output=True) # - # # Create AKS Cluster in an existing virtual network (optional) # See code snippet below. Check the documentation [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-enable-virtual-network#use-azure-kubernetes-service) for more details. # + # from azureml.core.compute import ComputeTarget, AksCompute # # Create the compute configuration and set virtual network information # config = AksCompute.provisioning_configuration(location="eastus2") # config.vnet_resourcegroup_name = "mygroup" # config.vnet_name = "mynetwork" # config.subnet_name = "default" # config.service_cidr = "10.0.0.0/16" # config.dns_service_ip = "10.0.0.10" # config.docker_bridge_cidr = "172.17.0.1/16" # # Create the compute target # aks_target = ComputeTarget.create(workspace = ws, # name = "myaks", # provisioning_configuration = config) # - # # Enable SSL on the AKS Cluster (optional) # See code snippet below. Check the documentation [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-secure-web-service) for more details # + # provisioning_config = AksCompute.provisioning_configuration(ssl_cert_pem_file="cert.pem", ssl_key_pem_file="key.pem", ssl_cname="www.contoso.com") # - # %%time aks_target.wait_for_completion(show_output = True) print(aks_target.provisioning_state) print(aks_target.provisioning_errors) # ## Optional step: Attach existing AKS cluster # # If you have existing AKS cluster in your Azure subscription, you can attach it to the Workspace. # + # # Use the default configuration (can also provide parameters to customize) # resource_id = '/subscriptions/92c76a2f-0e1c-4216-b65e-abf7a3f34c1e/resourcegroups/raymondsdk0604/providers/Microsoft.ContainerService/managedClusters/my-aks-0605d37425356b7d01' # create_name='my-existing-aks' # # Create the cluster # attach_config = AksCompute.attach_configuration(resource_id=resource_id) # aks_target = ComputeTarget.attach(workspace=ws, name=create_name, attach_configuration=attach_config) # # Wait for the operation to complete # aks_target.wait_for_completion(True) # - # # Deploy web service to AKS # + tags=["sample-deploy-to-aks"] # Set the web service configuration (using default here) aks_config = AksWebservice.deploy_configuration() # # Enable token auth and disable (key) auth on the webservice # aks_config = AksWebservice.deploy_configuration(token_auth_enabled=True, auth_enabled=False) # + tags=["sample-deploy-to-aks"] # %%time aks_service_name ='aks-service-1' aks_service = Model.deploy(workspace=ws, name=aks_service_name, models=[model], inference_config=inf_config, deployment_config=aks_config, deployment_target=aks_target) aks_service.wait_for_deployment(show_output = True) print(aks_service.state) # - # # Test the web service using run method # We test the web sevice by passing data. # Run() method retrieves API keys behind the scenes to make sure that call is authenticated. # + # %%time import json test_sample = json.dumps({'data': [ [1,2,3,4,5,6,7,8,9,10], [10,9,8,7,6,5,4,3,2,1] ]}) test_sample = bytes(test_sample,encoding = 'utf8') prediction = aks_service.run(input_data = test_sample) print(prediction) # - # # Test the web service using raw HTTP request (optional) # Alternatively you can construct a raw HTTP request and send it to the service. In this case you need to explicitly pass the HTTP header. This process is shown in the next 2 cells. # + # # if (key) auth is enabled, retrieve the API keys. AML generates two keys. # key1, Key2 = aks_service.get_keys() # print(key1) # # if token auth is enabled, retrieve the token. # access_token, refresh_after = aks_service.get_token() # + # construct raw HTTP request and send to the service # # %%time # import requests # import json # test_sample = json.dumps({'data': [ # [1,2,3,4,5,6,7,8,9,10], # [10,9,8,7,6,5,4,3,2,1] # ]}) # test_sample = bytes(test_sample,encoding = 'utf8') # # If (key) auth is enabled, don't forget to add key to the HTTP header. # headers = {'Content-Type':'application/json', 'Authorization': 'Bearer ' + key1} # # If token auth is enabled, don't forget to add token to the HTTP header. # headers = {'Content-Type':'application/json', 'Authorization': 'Bearer ' + access_token} # resp = requests.post(aks_service.scoring_uri, test_sample, headers=headers) # print("prediction:", resp.text) # - # # Clean up # Delete the service, image and model. # %%time aks_service.delete() model.delete()
how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # In the North, we trust! # # **The European Social Survey (ESS)** is a biennial cross-national survey of attitudes and behaviour. Since its beginning in 2001, the study has been conducted 7 times. The results are published online. # # In this brief study, we are interested in which what factors have seen the greatest changes in the ESS across the years. We observe that trust to political authorities is one of these. We examine trust to politicians and the European Parliament, and show that there has been a decrease in trust particularly towards the EU and in Central and Southern Europe. However, Northern European respondents report notably higher levels of trust. We speculate if the decrease in trust towards authoroties is related to Generalized Social Trust towards other people but judging by visual inspection of the data, this does not seem to be the case. # # This notebook will guide you through the analysis. Please run each cell so that the code will run and figures will be shown. # # Note: please unzip the file data.zip to the same folder with this notebook. We have had to zip the data because of file size constraints of Github. # ### Ingest # + import pandas as pd import numpy as np import zipfile filename = 'ESS1-7e01.csv' #Read file contents in pandas Data Frame zf = zipfile.ZipFile('data.zip') df = pd.read_csv(zf.open('ESS1-7e01.csv'), sep=',', low_memory=False) #df = pd.read_csv(filename, sep=',', low_memory=False) # - # ### Drop uninteresting variables # + #The data set contains some variables which are not particularly interesting for us. Let us drop some of them. df = df.drop(columns=['edition', 'idno', 'name', 'cproddat', 'cedition', 'cname', 'cseqno']) #Let's also drop weights for now df = df.drop(columns=['dweight', 'pspwght', 'pweight']) # - # ### Data encoding and missing values # # Most of the questions in the survey are categorical or binary tickboxes but they are encoded as numbers. # We would like to treat nominal variables differently to ordinal variables. # However, it is difficult to recognize which variables are nominal and which ordinal based on the encoded values. # # Many questions are Likert-like. Because the ESS survey is time series data, we can analyze trends based on Likert-like and binary values. # + #Some question include additional missing value options also encoded as numbers. #These are encoded with numbers 6, 7, 8, 9, 55, 66, 77, 88, 99, 555, 666, etc. #We well replace ESS missing data encodings with NaN. The below values don't appear naturally. #However, we are still left with missing value encodings [6, 7, 6, 9]. df.replace(to_replace=[99, 88, 77, 66, 55, 999, 888, 777, 666, 555, 9999, 8888, 7777, 6666, 5555], value=np.nan, inplace=True) # + #Replace missing data encodings with NaN in variables with less unique values for col in list(df): if 6 not in df[col].unique() and 7 in df[col].unique() and 8 in df[col].unique() and 9 in df[col].unique(): df[col].replace(to_replace=[7, 8, 9], value=np.nan, inplace=True) for col in list(df): if 5 not in df[col].unique() and 6 in df[col].unique() and 7 in df[col].unique() and 8 in df[col].unique() and 9 in df[col].unique(): df[col].replace(to_replace=[6, 7, 8, 9], value=np.nan, inplace=True) # - # ### Drop values with insufficient response rate # # We still have a lot of data. We probably don't need all of it. Let's drop variables which have more than 50% missing values. # + df = df[df.columns[df.isnull().mean() < 0.5]] #Let's save this thinned data a file so we don't need to continue to process such big files (and more importantly, so that we can share this). df.to_csv('ESS1-7e01_mod.csv') # - # ## Load the preprocessed dataset (start here if you don't have the original ESS data) # # Github, through which we are sharing this notebook, has file size constraints. Because of this, we are loading in a dataset which had multiple variables dropped, through the aforementioned steps. zf = zipfile.ZipFile('data_mod.zip') df = pd.read_csv(zf.open('ESS1-7e01_mod.csv'), sep=',', low_memory=False) df = df.drop(columns=['Unnamed: 0']) # ### Aggregate variables to a more insightful level # # We have a lot of data but nothing specific to look for. # Perhaps we will find something interesting if we look at which variables have seen the greatest absolute change since the beginning of ESS. #First, let's see aggregate a mean for each variable per each ESS round and country. df.groupby(['essround','cntry']).agg('mean').unstack().T # + #Since the question are with different scales, we'll hopefully get a more accurate idea by taking the percentage of change from one year to another. df.groupby(['essround','cntry']).agg('mean').unstack().pct_change() # + #Let's only look at the change between the first and the last ESS round. cum_changes = df.groupby(['essround','cntry']).agg('mean').unstack().pct_change(6)[6:].T # - # ### European aggregation # # To look at Europe as a whole, let's again aggregate these averages to European level. # + #We take the mean for each variable on level 0, which is the country variable in this DataFrame. sorted_changes = cum_changes.mean(level=0).sort_values(by=[7]) #Fill infinite values with NaN. sorted_changes = sorted_changes.replace([np.inf, -np.inf], np.nan).dropna() #Let's change the name to something more appropriate. sorted_changes.columns = ['pct_change'] #Calculate absolute change and make it a new column, and sort based on that. sorted_changes['abs_pct_change'] = sorted_changes['pct_change'].abs() sorted_changes.sort_values(by='abs_pct_change', ascending=False) #Retrieve the 20 variables where we see the greatest change across Europe top20 = sorted_changes.nlargest(20, 'abs_pct_change') top20 = top20[['abs_pct_change', 'pct_change']] #Make the table prettier. top20.style.bar(subset=['pct_change', 'abs_pct_change'], align='mid', color=['#d65f5f', '#5fba7d']) # - # ### Codebook exempts for the most changed variables # # Let's examine what do the most changed values mean by looking at the ESS codebook. # # **dscrna**: "On what grounds is your group discriminated against?", multiple choice tickbox question where this variable is binary indicator of whether the respondent did not tick any other boxes. Because there is a negative change, the respondents are thus more able to tick one other box stating a factor which has lead them to experience discrimination. Therefore, experiences of discrimation based on a group characteristic has increased over the years. # # **dscrntn**: "On what grounds is your group discriminated against? - Nationality". Binary tickbox. Experiences of discrimination based on nationality have increased. # # **dscrgnd**: "On what grounds is your group discriminated against? - Gender". Binary tickbox. Experiences of discrimination based on gender have increased. # # **uempla**: "Using this card, which of these descriptions applies to what you have been doing for the last 7 days? - Unemployed and actively looking for a job." Binary tickbox. Unemployment and job-seeking activities have increased. # # **dscrrlg**: "On what grounds is your group discriminated against? - Religion". Binary tickbox. Experiences of discrimination based on religion have increased. # # **dscrrce**: "On what grounds is your group discriminated against? - Race". Binary tickbox. Experiences of discrimination based on race have increased. # # **hswrk**: "Using this card, which of these descriptions applies to what you have been doing for the last 7 days? - # Doing housework, looking after children or other persons." Binary tickbox. Housework activities have decreased. # # **hswrkp**: "Which of the descriptions on this card applies to what he/she has been doing for the last 7 days? - # Doing housework, looking after children or other persons" Binary tickbox. Partner's ousework activities have decreased. # # **rtrdp**: "Which of the descriptions on this card applies to what he/she has been doing for the last 7 days? - # Retired" Binary tickbox. More partners have been retired. # # **uemplap**: "Using this card, which of these descriptions applies to what he/she has been doing for the last 7 days? - Unemployed and actively looking for a job." Binary tickbox. Partner's unemployment and job-seeking activities have increased. # # **rtrdp**: "Which of the descriptions on this card applies to what you have been doing for the last 7 days? - # Retired" Binary tickbox. More respondents have been retired. # # **dscrage**: "On what grounds is your group discriminated against? - Age". Binary tickbox. Experiences of discrimination based on age have increased. # # **edulvla**: "What is the highest level of education you have achieved?" Ordinal scale. Respondents' level of education has increased. # # **freehms**: "Using this card, please say to what extent you agree or disagree with each of the following statements - # Gay men and lesbians should be free to live their own life as they wish" Likert-like scale. Respondents agree with the statement more. # # **uemplip**: "Which of the descriptions on this card applies to what he/she has been doing for the last 7 days? - # Unemployed, wanting a job but not actively looking for a job" Binary tickbox. Number of Partners who are unemployed, wanting a job but not seeking one has increased. # # **trstplt**: "Using this card, please tell me on a score of 0-10 how much you personally trust each of the institutions I read out. 0 means you do not trust an institution at all, and 10 means you have complete trust. Firstly... # ... politicians?" Likert-like scale. Trust to politicians decreased. # # **dsbld**: "Using this card, which of these descriptions applies to what you have been doing for the last 7 days? # Permanently sick or disabled" Binary tickbox. Number of disabled increased. # # **trstep**: "Using this card, please tell me on a score of 0-10 how much you personally trust each of the institutions I read out. 0 means you do not trust an institution at all, and 10 means you have complete trust. Firstly... # ... the European Parliament?" Likert-like scale. Trust to European Parliament decreased. # # **stfhlth**: "Still using this card, please say what you think overall about the state of health services in [country] nowadays?" # Likert-like scale. Perception of health services quality has increased. # # **iphlppl**: "Now I will briefly describe some people. Please listen to each description and tell me how much each person is or is not like you. Use this card for your answer. # It's very important to her/him to help the people around her/him. She/he wants to care for their well-being." Likert-like scale. Self-identification towards helpful people decreased. # + #A lot of stuff, a lot of explaining! #We would like to visualize some of these changes. #Since we're going to draw these graphs a lot, let's make a function out of it. import matplotlib.pyplot as plt def draw_change(var, group, stat): fig, ax = plt.subplots(figsize=(15,7)) df.groupby(['essround',group])[var].agg(stat).unstack().plot(ax=ax) df.groupby(['essround',group])[var].agg(stat).unstack().T.agg('mean').plot(ax=ax, style='--', colormap='gray', title=var) plt.show() # - # A little caveat with the list of most changed variables is the emphasis that the above method puts on binary variables. Because we are looking at the changes as percentages, change from the binary scale 1 to 0 is rather drastic. Ideally, we'd eliminate binary variables from this examination. Hence we are focusing on Likert-like variables which where the above examination made more sense. # # ### Finding the insight # # Now that we have bunch of digestible data and a function that let's us explore them, we need to start exploring. # Even if the task is to find "one insight", we cannot find an interesting insight without stumbling around multiple other possibilities for insights. # # First, we want to test something that is common knowledge. Education levels have risen across the world so we should see that in the ESS data. Further, we should see that Northern and Western European have higher levels of education compared to Central and South Europe. # + #There were a lot of interesting observations! Let's look at some on country-level. #First, education: draw_change('edulvla', 'cntry', 'mean') # - # So many countries makes the graph a bit of a mess. Let's group some of them together. # We are assuming, a priori, that some countries are similar. # # Alternatively, we could do e.g. a cluster analysis and see if our perception of similar countries is in accordance with the data. # But let's not question the status quo right now and let's go with traditional geography-inspired distinctions: # + def labelRegion(cntry): if cntry in ['DK', 'FI', 'SE', 'NO']: return 'north' if cntry in ['HU', 'PL', 'SI']: return 'central' if cntry in ['PT', 'ES']: return 'south' if cntry in ['DE', 'CH', 'FR', 'BE', 'NL']: return 'west' if cntry in ['GB', 'IE']: return 'uki' df['region'] = df.apply (lambda row: labelRegion(row['cntry']),axis=1) # + #Let's look at education again - but regionally draw_change('edulvla', 'region', 'mean') # - # We see what we know; Northern Europe is highly educated whereas South is not as much. However, we see that education levels have been increasing across the board. # + #Let's look at values; acceptance of homosexuality draw_change('freehms', 'region', 'mean') # - # We notice that people disagree less with the statement that "Gays and lesbians should be free to live their life as they wish. However, central European nations are still more opposed to this compared to other European geographies. # # ## End the truisms: Insights into Trust # # Trust is another interesting variable. From listening to a plenty of behavioural economics podcasts, I have been lead to believe countries with higher levels of Social Trust have higher GDPs. Unfortunately, we don't have GDP information in this data - but the geographical grouping also reflects the wealth of those nations. # # After some exploration, we choose Trust as to focal point for our insight. Focusing on this gives as plenty of room where we would go with further analyses. # # First, let's look how much people can trust politicians and the European Parliament. # + #Trust is interesting, let's look how much people can trust politicians and the European Parliament draw_change('trstplt', 'region', 'mean') draw_change('trstep', 'region', 'mean') # - draw_change('trstplt', 'cntry', 'mean') draw_change('trstep', 'cntry', 'mean') # Some observation: The British have approximately mean levels of trust to politicians but the lowest trust to the EP. The trust of the Portuguese towards EP has decline drastically since mid-ESS history (around 2010, after the Great Recession hit). Scandinavians continue to trust everyone. # # We also see that confidence intervals or drawing sigmas around the mean would help us understand whether there actually has been a difference throughout time. We must remember that *n* of samples is quite high so we might assume even from this that even smallish changes in the mean level indicate a true change. # ## On the Theory of Trust # # We saw a decline in trust towards political authorities. If we speculate a bit further, could increasing lack of trust be the reason for the turmoil in Europe? # # Some researches (Beilmann, 2017; Breen, 2016) have argued for Generalized Social Trust Index which is measured by three questions in ESS: # * Trust: ‘Would you say that most people can be trusted, or that you can’t be too careful in dealing with people?’ (0 = ‘You can't be too careful’ – 10 = ‘Most people can be trusted’); # * Fairness: ‘Do you think that most people would try to take advantage of you if they got the chance, or would they try to be fair?’ (0 = ‘Most people would try to take advantage of me’ – 10 = ‘Most people would try to be fair’); # * Helpfulness: ‘Would you say that most of the time people try to be helpful or that they are mostly looking out for themselves?’ (0 = ‘People mostly look out for themselves’ – 10 = ‘People mostly try to be helpful’). # # Do we observe a decline in Generalized Social Trust Index, or are the European trust issues specifically related to political authority? Can the rising tide of extremist idealogies, increaing inequality, marginalizing rethoric and the echo chambers of social media be manifestations of diminished Social Trust? Let us see. # # References: # *<NAME>. (2017). Social Capital and Individualism–Collectivism at the Individual Level (Doctoral dissertation). # <NAME>., & <NAME>. (2016). Changing Values, Attitudes and Behaviours in Ireland: An Analysis of European Social Survey Data in Ireland, 2002-2012. Cambridge Scholars Publishing.* # + #Let's calculate social trust, as defined in the literature df['socialTrust'] = ((df.ppltrst + df.pplfair + df.pplhlp) / 3) # - draw_change('socialTrust', 'cntry', 'mean') draw_change('socialTrust', 'region', 'mean') # Alright, we don't really see a real change in social trust over the years. Maybe slight upward trend. # Maybe the decline in social cohesion is actually exhibited through increased deviation in how much people can trust others? draw_change('socialTrust', 'cntry', 'std') draw_change('socialTrust', 'region', 'std') # It would seem that there is a slight decline in the dispersion of social trust, i.e. people are having more similar levels of trust. Central Europe has the greatest variability in social trust, possibly because there is greater inequality present. # The North, on the other hand, continues to be a place where everyone trust everyone. # # ## Conclusion # # We showed how the European Social Survey can tells us what changes in attitudes are happening across Europe as years are passing. In particular, we examined trust which was among the most changed variables within the ESS. We saw a particular decline in trust towards the European Parliament, especially in the UK/Ireland and Southern Europe. However, we did not see a decline in Generalized Trust Index, suggesting that the increased distrust is directed mainly towards political authority.
analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # <div class="contentcontainer med left" style="margin-left: -50px;"> # <dl class="dl-horizontal"> # <dt>Title</dt> <dd> Violin Element</dd> # <dt>Dependencies</dt> <dd>Bokeh</dd> # <dt>Backends</dt> <dd><a href='./Violin.ipynb'>Matplotlib</a></dd> <dd><a href='../bokeh/Violin.ipynb'>Bokeh</a></dd> # </dl> # </div> import numpy as np import holoviews as hv hv.extension('matplotlib') # A ``Violin`` element is used to visualise the distribution of a dataset by displaying its probability density. It is very similar to the ``BoxWhisker`` element but provides a more faithful representation even for bi- or multimodal data. The probability density is shown by the area akin to a vertical and mirrored ``Distribution`` element. The thick black bar in the centre represents the interquartile range, the thin black line extended from it represents the 95% confidence intervals, and the white line is the median. # # The data of a ``Violin`` Element may have any number of key dimensions representing the grouping of the value dimension and a single value dimensions representing the distribution of values within each group. See the [Tabular Datasets](../../../user_guide/07-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays. # # In the simplest case a ``Violin`` can be used to display a single distribution of values, such as a NumPy array of normally distributed values: np.random.seed(37) violin = hv.Violin(np.random.randn(1000), vdims='Value') violin # The Violin element supports multiple options for indicating the distribution values in addition to the default ``inner`` value of 'box'. The 'medians' option indicates the median as a single line instead. Additionally the ``bandwidth`` option may be used to control the kernel density estimate: violin.relabel(group='Medians').options(inner='medians', cut=0.1, bandwidth=0.1) # The ``Violin`` element is particularly useful to compare multiple distribution across different categories. As a simple example we can create a dataset of values with randomly assigned Group and Category values and compare the distributions. # %%opts Violin [fig_size=300 aspect=2.5 show_legend=False] groups = [chr(65+g) for g in np.random.randint(0, 3, 200)] hv.Violin((groups, np.random.randint(0, 5, 200), np.random.randn(200)), ['Group', 'Category'], 'Value') # For full documentation and the available style and plot options, use ``hv.help(hv.Violin).``
examples/reference/elements/matplotlib/Violin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity #from collections import Count def get_cosine(*args): vectors = [text for text in get_vectors(*args)] return cosine_similarity(vectors) def get_vectors(*args): text = [t for t in args] vectorizer = CountVectorizer(text) vectorizer.fit(text) return vectorizer.transform(text).toarray() # - x = get_cosine('sad as', 'sad the','sad it hte') print(x)
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # Iris Classification # - # In this project we are going to classify whether a species of Iris flower is **Setosa**, **Virginica** or **Versicolour**. The dataset for this project is collected from [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/iris). There are 150 rows and 5 columns in this dataset, 4 columns are the feature columns and 1 column is the target column. # # The four numeric features columns are: # 1. sepal length in cm # 2. sepal width in cm # 3. petal length in cm # 4. petal width in cm # # and, 1 target column is: # 1. species # - Iris Setosa # - Iris Versicolour # - Iris Virginica # # # # Import libraries # + import pandas as pd from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, RandomizedSearchCV from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.metrics import accuracy_score from scipy.stats import randint import matplotlib.pyplot as plt import seaborn as sb import pickle import warnings warnings.simplefilter("ignore") # - data = pd.read_csv('../data/raw/iris.data') data.head() # The dataset don't have any column names. So, we have to give these columns names. As, it is discussed above there are 4 feature columns **sepal-length, sepal-width, petal-length, petal-width** and 1 target column **species**. data.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species'] data.head() data.describe() data.info() # # Data Preparation # Check if there is any null value in the data data.isnull().any().mean() # There isn't any null values in the data. # Now let's check data types of these values. data.dtypes # All the four feature columns are of **float** type but the target column is of **object** type. So, we have to convert the object type to either int or category type. # Let's change the data types of species column to **category** type data['species'] = data['species'].astype('category') data.dtypes # Split data into features **X** and ckass **y** X = data.drop('species', axis=1) y = data['species'] X.head() y.head() # ### Split data into training, testing data and validation data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42) X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.2, random_state=1) # # Modeling # Initialize the model dt_clf = DecisionTreeClassifier() rf_clf = RandomForestClassifier() ab_clf = AdaBoostClassifier() gb_clf = GradientBoostingClassifier() nb_clf = GaussianNB() lr_clf = LogisticRegression() sgd_clf = SGDClassifier() knn_clf = KNeighborsClassifier() svc_clf = SVC() # A function to train the model, predict the outcomes of the test set and then calculate the accuracy score def train_predict(model): """ INPUT: model FUNCTION: Train the model with the training data, predict the outcomes of the test data and calculate the accuracy score OUTPUT: accuracy score """ model.fit(X_train, y_train) pred = model.predict(X_val) score = accuracy_score(y_val, pred) return score # The above function is used to train and test the model # + models = [dt_clf, rf_clf, ab_clf, gb_clf, nb_clf, lr_clf, sgd_clf, knn_clf, svc_clf] scores = {} for model in models: pipe = make_pipeline( StandardScaler(), model ) score = train_predict(pipe) scores[model.__class__.__name__] = score scores # - # #### Plot the accuracy scores def plot_scores(scores, fig_name, fig=(19, 10)): fig = plt.figure(figsize=fig) base_color = sb.color_palette()[0] name = [key for key in scores.keys()] scor = [val for val in scores.values()] plots = sb.barplot(x=name, y=scor, color=base_color) for p in plots.patches: plots.annotate(format(p.get_height(), '.2f'), (p.get_x() + p.get_width() / 2., p.get_height()), ha = 'center', va = 'center', size=15, xytext = (0, -12), textcoords = 'offset points') plt.xlabel('Names of the Classifiers', size=18) plt.savefig(f'../reports/figures/{fig_name}.png'); plot_scores(scores, 'before_optimisation') # Models with the higher accuracy are: # - AdaBoostClassifier # - GradientBoostingClassifier # - SVC # # So, let's optimise these classifiers to increase the accuracy # #### Set the parameters for RandomizedSearchCV # + ab_param_distribution = { 'n_estimators': randint(50, 500), 'learning_rate': [1.0, 2.5, 3.8, 4.2], 'random_state': randint(1, 50) } gb_param_distribution = { 'learning_rate': [0.5, 1.0, 1.5, 2.0], 'n_estimators': randint(150, 500), 'criterion': ['friedman_mse', 'squared_error', 'mse', 'mae'] } svc_param_distribution = { 'C': [1.5, 2.5, 3.0, 4.0], 'kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'degree': randint(3, 10), 'random_state': randint(1, 50) } # - # Create a function and initialized the RandomizedSearchCV to search across the different parameters for the best parameters def searchCV(model, params): """ INPUT: model, parameters distribution FUNCTION: Search for the best parameters and use it with model to train and to get best results OUTPUT: accuracy score, best parameters for the model """ search = RandomizedSearchCV(estimator=model, n_iter=5, param_distributions=params, random_state=0) search.fit(X_train, y_train) score = search.score(X_test, y_test) return score, search.best_params_ # Let's give our best three models to the above function to check the scores new_scores = {} best_params = {} models = {ab_clf: ab_param_distribution, gb_clf: gb_param_distribution, svc_clf: svc_param_distribution} for model, params in models.items(): score, best_param = searchCV(model, params) new_scores[model.__class__.__name__] = score best_params[model.__class__.__name__] = best_param # Print the scores and the best parameters plot_scores(new_scores, 'after_optimisation', fig=(12, 6)) # After optimising the models we get the 100% accuracy in the validation set by the **AdaBoostClassifier** and **GradientBoostingClassifier** # # Model Evaluation # Initialize our best candidate which is AdaBoostClassifier clf = AdaBoostClassifier(learning_rate=1.0, n_estimators=97, random_state=1) # Function for pipeline, fit the train data, make prediction on the test data and returns accuracy score def pipeline_fit_predict(model): """ INPUT: model FUNCTION: fit the training data into model predict the target values for test data, calculate the accuracy score and returns the score" """ model.fit(X_train, y_train) pred = model.predict(X_test) score = accuracy_score(y_test, pred) return score score = pipeline_fit_predict(clf) score # After performing optimisations techniques on the model we get the 100% accuracy score on the test set # # Save Model model_name = 'iris_classification.model' pickle.dump(clf, open(f"../Models/{model_name}", 'wb'))
notebooks/Iris-Classification.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .cpp // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: C++14 // language: C++14 // name: xeus-cling-cpp14 // --- // Welcome to initial Jupyter testing #include "../include/posit.h" sizeof(int_least_bits<10>::type) using P=Posit<int8_t,8,3,uint16_t,PositSpec::WithNan>; P a(10.0f); P b(20.0f); (float)(a+b) (float)(a-b) (float)inv(a) (float)(a*b) a < b
jupyter/first.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # List # **list** , 是Python中的基本数据结构之一 # + # 创建一个列表 list_01 = [] list_02 = [1, 2, 3] # 一个list中可以包含各种类型的数据,以逗号分隔 list_03 = [1, 'hello', (), [], {}] print(type(list_01)) print(type(list_02)) print(type(list_03)) # + # 访问列表元素: 通过索引来访问单个元素,可以切片,可以遍历 # 访问整个list print(list_03) # 访问指定索引号元素 print(list_03[1]) # 切片访问 print(list_03[1:3]) # - # 添加list元素 list_03.append('Hi!') print(list_03) # + # 更新lsit元素 print(list_03) list_03[2] = 'world' print(list_03) # - # 删除list元素 del list_03[0] print(list_03) # 拼接list list_a = [1, 2, 3, '!'] list_b = [4, 5, 6] print(list_a + list_b) # list的乘法 print(4 * list_a) # 遍历list for i in list_a: print(i, end=' ') # 生成器 print([i * i for i in list_b]) # + # 判断元素是否在list中 3 in list_a # - # ## list函数及方法 # 可作用于list的函数 list_a = [1, 2, 3, 4] print('此list的长度为:', len(list_a)) # 返回list长度 print('此list中的最大值为:', max(list_a)) # 返回list中最大值 print('此list中的最小值为:', min(list_a)) # 返回list中最小值 # + # list的方法 list_b = ['h', 'e', 'l', 'l', '0'] # list.append() list末尾添加元素 list_b.append('!') print(list_b) # list.count(obj) 统计某个元素出现的次数 print(list_b.count('h')) # list.extend(seq) 末尾添加序列 list_b.extend(['w', 'o', 'r', 'l', 'd']) print(list_b) # list.index(obj) 返回某个元素第一次出现位置的索引 print(list_b.index('l')) # list.insert(index, obj) 在指定位置插入元素 list_b.insert(1,'insert') print(list_b) # list.pop(index=-1) 移除元素并返回它的值,默认移除最后一个 print(list_b.pop(), list_b.pop(0)) print(list_b) # list.remove(obj) 移除list中匹配的第一个元素 list_b.remove('l') print(list_b) # list.reverse() 倒序排列 list_b.reverse() print(list_b) # list.sort() 排序 # list.copy() 复制列表 (会指向一个新的list,直接赋值会指向同一个list) list_copy = list_b.copy() print(list_copy) # list.clear() 清空列表 list_b.clear() print(list_b) # -
01_python_basic/03 list.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py3] * # language: python # name: conda-env-py3-py # --- # # Deep Neural Networks # # 1. Turn your classifier into deep network # 2. Use the optimizer to compute gradients # 3. Understand regularization # # ## 1. Rectified Linear Units (ReLUs) # # Insert `H` number of ReLUs between two linear functions # # ![two-layer-network.png](attachment:two-layer-network.png) # # ## 2. Multilayer Neural Networks # # In this lesson, you'll learn how to build multilayer neural networks with TensorFlow. Adding a hidden layer to a network allows it to model more complex functions. Also, using a non-linear activation function on the hidden layer lets it model non-linear functions. # # Next, you'll see how a ReLU hidden layer is implemented in TensorFlow. # # **Note**: Depicted above is a "2-layer" neural network: # # 1. The first layer effectively consists of the set of weights and biases applied to X and passed through ReLUs. The output of this layer is fed to the next one, but is not observable outside the network, hence it is known as a **hidden layer**. # # 2. The second layer consists of the weights and biases applied to these intermediate outputs, followed by the softmax function to generate probabilities. # # # ## 3. Quiz: TensorFlow ReLU # # A Rectified linear unit (ReLU) is type of [activation function](https://en.wikipedia.org/wiki/Activation_function) that is defined as `f(x) = max(0, x)`. The function returns `0` if `x` is negative, otherwise it returns `x`. TensorFlow provides the ReLU function as `tf.nn.relu()`, as shown below. # # ```python # # Hidden Layer with ReLU activation function # hidden_layer = tf.add(tf.matmul(features, hidden_weights), hidden_biases) # hidden_layer = tf.nn.relu(hidden_layer) # # output = tf.add(tf.matmul(hidden_layer, output_weights), output_biases) # # ``` # # The above code applies the `tf.nn.relu()` function to the `hidden_layer`, effectively turning off any negative weights and acting like an on/off switch. Adding additional layers, like the `output layer`, after an activation function turns the model into a nonlinear function. This nonlinearity allows the network to solve more complex problems. # # ### Quiz # # ![relu-network.png](attachment:relu-network.png) # # In this quiz, you'll use TensorFlow's ReLU function to turn the linear model below into a nonlinear model. # # + # Solution is available in the other "solution.py" tab import tensorflow as tf output = None hidden_layer_weights = [ [0.1, 0.2, 0.4], [0.4, 0.6, 0.6], [0.5, 0.9, 0.1], [0.8, 0.2, 0.8]] out_weights = [ [0.1, 0.6], [0.2, 0.1], [0.7, 0.9]] # Weights and biases weights = [ tf.Variable(hidden_layer_weights), tf.Variable(out_weights)] biases = [ tf.Variable(tf.zeros(3)), tf.Variable(tf.zeros(2))] # Input features = tf.Variable([[1.0, 2.0, 3.0, 4.0], [-1.0, -2.0, -3.0, -4.0], [11.0, 12.0, 13.0, 14.0]]) # TODO: Create Model hidden_layer = tf.add(tf.matmul(features, weights[0]), biases[0]) hidden_layer = tf.nn.relu(hidden_layer) logits = tf.add(tf.matmul(hidden_layer, weights[1]), biases[1]) # TODO: save and print session results on variable output with tf.Session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits) print(output) # - # ## 4. The Chain Rule # # Compute the derivative using the derivatives of each step # # ## 5. Back Prop # # Back prop usually takes 2X the memory needed by forward prop # # ## 6. Deep Neural Network in TensorFlow # # You've seen how to build a logistic classifier using TensorFlow. Now you're going to see how to use the logistic classifier to build a deep neural network. # # ### Step by Step # # In the following walkthrough, we'll step through TensorFlow code written to classify the letters in the MNIST database. If you would like to run the network on your computer, the file is provided [here](https://d17h27t6h515a5.cloudfront.net/topher/2017/February/58a61a3a_multilayer-perceptron/multilayer-perceptron.zip). You can find this and many more examples of TensorFlow at [<NAME>'s GitHub repository](https://github.com/aymericdamien/TensorFlow-Examples). # # ### Code # # ### TensorFlow MNIST # # ```python # from tensorflow.examples.tutorials.mnist import input_data # mnist = input_data.read_data_sets(".", one_hot=True, reshape=False) # # ``` # # You'll use the MNIST dataset provided by TensorFlow, which batches and One-Hot encodes the data for you. # # ### Learning Parameters # # ```python # import tensorflow as tf # # # Parameters # learning_rate = 0.001 # training_epochs = 20 # batch_size = 128 # Decrease batch size if you don't have enough memory # display_step = 1 # # n_input = 784 # MNIST data input (img shape: 28*28) # n_classes = 10 # MNIST total classes (0-9 digits) # # ``` # # The focus here is on the architecture of multilayer neural networks, not parameter tuning, so here we'll just give you the learning parameters. # # ### Hidden Layer Parameters # # ```python # n_hidden_layer = 256 # layer number of features # # ``` # # The variable `n_hidden_layer` determines the size of the hidden layer in the neural network. This is also known as the width of a layer. # # ### Weights and Biases # # ```python # # Store layers weight & bias # weights = { # 'hidden_layer': tf.Variable(tf.random_normal([n_input, n_hidden_layer])), # 'out': tf.Variable(tf.random_normal([n_hidden_layer, n_classes])) # } # biases = { # 'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])), # 'out': tf.Variable(tf.random_normal([n_classes])) # } # # ``` # # Deep neural networks use multiple layers with each layer requiring it's own weight and bias. The `'hidden_layer'` weight and bias is for the hidden layer. The `'out'` weight and bias is for the output layer. If the neural network were deeper, there would be weights and biases for each additional layer. # # ### Input # # ```python # # tf Graph input # x = tf.placeholder("float", [None, 28, 28, 1]) # y = tf.placeholder("float", [None, n_classes]) # # x_flat = tf.reshape(x, [-1, n_input]) # # ``` # # The MNIST data is made up of 28px by 28px images with a single channel. The `tf.reshape()` function above reshapes the 28px by 28px matrices in `x` into row vectors of 784px. # # ### Multilayer Perceptron # # ![multi-layer.png](attachment:multi-layer.png) # # ```python # # Hidden layer with RELU activation # layer_1 = tf.add(tf.matmul(x_flat, weights['hidden_layer']),\ # biases['hidden_layer']) # layer_1 = tf.nn.relu(layer_1) # # Output layer with linear activation # logits = tf.add(tf.matmul(layer_1, weights['out']), biases['out']) # # ``` # # You've seen the linear function `tf.add(tf.matmul(x_flat, weights['hidden_layer']), biases['hidden_layer'])` before, also known as `xw + b`. Combining linear functions together using a ReLU will give you a two layer network. # # ### Optimizer # # ```python # # Define loss and optimizer # cost = tf.reduce_mean(\ # tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\ # .minimize(cost) # # ``` # # This is the same optimization technique used in the Intro to TensorFLow lab. # # ### Session # # ```python # # Initializing the variables # init = tf.global_variables_initializer() # # # Launch the graph # with tf.Session() as sess: # sess.run(init) # # Training cycle # for epoch in range(training_epochs): # total_batch = int(mnist.train.num_examples/batch_size) # # Loop over all batches # for i in range(total_batch): # batch_x, batch_y = mnist.train.next_batch(batch_size) # # Run optimization op (backprop) and cost op (to get loss value) # sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) # # ``` # # The MNIST library in TensorFlow provides the ability to receive the dataset in batches. Calling the `mnist.train.next_batch()` function returns a subset of the training data. # # # ## 7. Deeper Neural Network # # ![layers.png](attachment:layers.png) # # That's it! Going from one layer to two is easy. Adding more layers to the network allows you to solve more complicated problems. # # ## 8. Save and Restore TensorFlow Models # # Training a model can take hours. But once you close your TensorFlow session, you lose all the trained weights and biases. If you were to reuse the model in the future, you would have to train it all over again! # # Fortunately, TensorFlow gives you the ability to save your progress using a class called `tf.train.Saver`. This class provides the functionality to save any `tf.Variable` to your file system. # # ### a) Saving Variables # # Let's start with a simple example of saving `weights` and `bias` Tensors. For the first example you'll just save two variables. Later examples will save all the weights in a practical model. # # ```python # import tensorflow as tf # # # The file path to save the data # save_file = './model.ckpt' # # # Two Tensor Variables: weights and bias # weights = tf.Variable(tf.truncated_normal([2, 3])) # bias = tf.Variable(tf.truncated_normal([3])) # # # Class used to save and/or restore Tensor Variables # saver = tf.train.Saver() # # with tf.Session() as sess: # # Initialize all the Variables # sess.run(tf.global_variables_initializer()) # # # Show the values of weights and bias # print('Weights:') # print(sess.run(weights)) # print('Bias:') # print(sess.run(bias)) # # # Save the model # saver.save(sess, save_file) # # ``` # # # > Weights: # # > [[-0.97990924 1.03016174 0.74119264] # # > [-0.82581609 -0.07361362 -0.86653847]] # # > Bias: # # > [ 1.62978125 -0.37812829 0.64723819] # # The Tensors `weights` and `bias` are set to random values using the `tf.truncated_normal()` function. The values are then saved to the `save_file` location, "model.ckpt", using the `tf.train.Saver.save()` function. (The ".ckpt" extension stands for "checkpoint".) # # If you're using TensorFlow 0.11.0RC1 or newer, a file called "model.ckpt.meta" will also be created. This file contains the TensorFlow graph. # # ### b) Loading Variables # # Now that the Tensor Variables are saved, let's load them back into a new model. # # ```python # # Remove the previous weights and bias # tf.reset_default_graph() # # # Two Variables: weights and bias # weights = tf.Variable(tf.truncated_normal([2, 3])) # bias = tf.Variable(tf.truncated_normal([3])) # # # Class used to save and/or restore Tensor Variables # saver = tf.train.Saver() # # with tf.Session() as sess: # # Load the weights and bias # saver.restore(sess, save_file) # # # Show the values of weights and bias # print('Weight:') # print(sess.run(weights)) # print('Bias:') # print(sess.run(bias)) # # ``` # # > Weights: # # > [[-0.97990924 1.03016174 0.74119264] # # > [-0.82581609 -0.07361362 -0.86653847]] # # > Bias: # # > [ 1.62978125 -0.37812829 0.64723819] # # You'll notice you still need to create the `weights` and `bias` Tensors in Python. The `tf.train.Saver.restore()` function loads the saved data into `weights` and `bias`. # # Since `tf.train.Saver.restore()` sets all the TensorFlow Variables, you don't need to call `tf.global_variables_initializer()`. # # ### c) Save a Trained Model # Let's see how to train a model and save its weights. # # First start with a model: # # ```python # # Remove previous Tensors and Operations # tf.reset_default_graph() # # from tensorflow.examples.tutorials.mnist import input_data # import numpy as np # # learning_rate = 0.001 # n_input = 784 # MNIST data input (img shape: 28*28) # n_classes = 10 # MNIST total classes (0-9 digits) # # # Import MNIST data # mnist = input_data.read_data_sets('.', one_hot=True) # # # Features and Labels # features = tf.placeholder(tf.float32, [None, n_input]) # labels = tf.placeholder(tf.float32, [None, n_classes]) # # # Weights & bias # weights = tf.Variable(tf.random_normal([n_input, n_classes])) # bias = tf.Variable(tf.random_normal([n_classes])) # # # Logits - xW + b # logits = tf.add(tf.matmul(features, weights), bias) # # # Define loss and optimizer # cost = tf.reduce_mean(\ # tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)) # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\ # .minimize(cost) # # # Calculate accuracy # correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)) # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # # ``` # # Let's train that model, then save the weights: # # ```python # import math # # save_file = './train_model.ckpt' # batch_size = 128 # n_epochs = 100 # # saver = tf.train.Saver() # # # Launch the graph # with tf.Session() as sess: # sess.run(tf.global_variables_initializer()) # # # Training cycle # for epoch in range(n_epochs): # total_batch = math.ceil(mnist.train.num_examples / batch_size) # # # Loop over all batches # for i in range(total_batch): # batch_features, batch_labels = mnist.train.next_batch(batch_size) # sess.run( # optimizer, # feed_dict={features: batch_features, labels: batch_labels}) # # # Print status for every 10 epochs # if epoch % 10 == 0: # valid_accuracy = sess.run( # accuracy, # feed_dict={ # features: mnist.validation.images, # labels: mnist.validation.labels}) # print('Epoch {:<3} - Validation Accuracy: {}'.format( # epoch, # valid_accuracy)) # # # Save the model # saver.save(sess, save_file) # print('Trained Model Saved.') # # ``` # # > Epoch 0 - Validation Accuracy: 0.06859999895095825 # # > Epoch 10 - Validation Accuracy: 0.20239999890327454 # # > Epoch 20 - Validation Accuracy: 0.36980000138282776 # # > Epoch 30 - Validation Accuracy: 0.48820000886917114 # # > Epoch 40 - Validation Accuracy: 0.5601999759674072 # # > Epoch 50 - Validation Accuracy: 0.6097999811172485 # # > Epoch 60 - Validation Accuracy: 0.6425999999046326 # # > Epoch 70 - Validation Accuracy: 0.6733999848365784 # # > Epoch 80 - Validation Accuracy: 0.6916000247001648 # # > Epoch 90 - Validation Accuracy: 0.7113999724388123 # # > Trained Model Saved. # # ### d) Load a Trained Model # # Let's load the weights and bias from memory, then check the test accuracy. # # ```python # saver = tf.train.Saver() # # # Launch the graph # with tf.Session() as sess: # saver.restore(sess, save_file) # # test_accuracy = sess.run( # accuracy, # feed_dict={features: mnist.test.images, labels: mnist.test.labels}) # # print('Test Accuracy: {}'.format(test_accuracy)) # # ``` # # > Test Accuracy: 0.7229999899864197 # # That's it! You now know how to save and load a trained model in TensorFlow. Let's look at loading weights and biases into modified models in the next section. # # ## 9. Loading the Weights and Biases into a New Model # # Sometimes you might want to adjust, or "finetune" a model that you have already trained and saved. # # However, loading saved Variables directly into a modified model can generate errors. Let's go over how to avoid these problems. # # ### a) Naming Error # # TensorFlow uses a string identifier for Tensors and Operations called `name`. If a name is not given, TensorFlow will create one automatically. TensorFlow will give the first node the name `<Type>`, and then give the name `<Type>_<number>` for the subsequent nodes. Let's see how this can affect loading a model with a different order of `weights` and `bias`: # # ```python # import tensorflow as tf # # # Remove the previous weights and bias # tf.reset_default_graph() # # save_file = 'model.ckpt' # # # Two Tensor Variables: weights and bias # weights = tf.Variable(tf.truncated_normal([2, 3])) # bias = tf.Variable(tf.truncated_normal([3])) # # saver = tf.train.Saver() # # # Print the name of Weights and Bias # print('Save Weights: {}'.format(weights.name)) # print('Save Bias: {}'.format(bias.name)) # # with tf.Session() as sess: # sess.run(tf.global_variables_initializer()) # saver.save(sess, save_file) # # # Remove the previous weights and bias # tf.reset_default_graph() # # # Two Variables: weights and bias # bias = tf.Variable(tf.truncated_normal([3])) # weights = tf.Variable(tf.truncated_normal([2, 3])) # # saver = tf.train.Saver() # # # Print the name of Weights and Bias # print('Load Weights: {}'.format(weights.name)) # print('Load Bias: {}'.format(bias.name)) # # with tf.Session() as sess: # # Load the weights and bias - ERROR # saver.restore(sess, save_file) # # ``` # # The code above prints out the following: # # > Save Weights: Variable:0 # # > Save Bias: Variable_1:0 # # > Load Weights: Variable_1:0 # # > Load Bias: Variable:0 # # > ... # # > InvalidArgumentError (see above for traceback): Assign requires shapes of both tensors to match. # # > ... # # You'll notice that the `name` properties for `weights` and `bias` are different than when you saved the model. This is why the code produces the "Assign requires shapes of both tensors to match" error. The code `saver.restore(sess, save_file)` is trying to load weight data into `bias` and bias data into `weights`. # # Instead of letting TensorFlow set the name property, let's set it manually: # # ```python # import tensorflow as tf # # tf.reset_default_graph() # # save_file = 'model.ckpt' # # # Two Tensor Variables: weights and bias # weights = tf.Variable(tf.truncated_normal([2, 3]), name='weights_0') # bias = tf.Variable(tf.truncated_normal([3]), name='bias_0') # # saver = tf.train.Saver() # # # Print the name of Weights and Bias # print('Save Weights: {}'.format(weights.name)) # print('Save Bias: {}'.format(bias.name)) # # with tf.Session() as sess: # sess.run(tf.global_variables_initializer()) # saver.save(sess, save_file) # # # Remove the previous weights and bias # tf.reset_default_graph() # # # Two Variables: weights and bias # bias = tf.Variable(tf.truncated_normal([3]), name='bias_0') # weights = tf.Variable(tf.truncated_normal([2, 3]) ,name='weights_0') # # saver = tf.train.Saver() # # # Print the name of Weights and Bias # print('Load Weights: {}'.format(weights.name)) # print('Load Bias: {}'.format(bias.name)) # # with tf.Session() as sess: # # Load the weights and bias - No Error # saver.restore(sess, save_file) # # print('Loaded Weights and Bias successfully.') # # ``` # # > Save Weights: weights_0:0 # # > Save Bias: bias_0:0 # # > Load Weights: weights_0:0 # # > Load Bias: bias_0:0 # # > Loaded Weights and Bias successfully. # # That worked! The Tensor names match and the data loaded correctly. # ## 10. Regularization # # 1. Early Termination(Best Way) # 2. L2 Regularization # ## 11. [TensorFlow Dropout](https://www.tensorflow.org/api_docs/python/tf/nn/dropout) # # ![dropout-node.jpeg](attachment:dropout-node.jpeg) # # Dropout is a regularization technique for reducing overfitting. The technique temporarily drops units ([artificial neurons](https://en.wikipedia.org/wiki/Artificial_neuron)) from the network, along with all of those units' incoming and outgoing connections. Figure 1 illustrates how dropout works. # # TensorFlow provides the `tf.nn.dropout()` function, which you can use to implement dropout. # # Let's look at an example of how to use `tf.nn.dropout()`. # # ```python # keep_prob = tf.placeholder(tf.float32) # probability to keep units # # hidden_layer = tf.add(tf.matmul(features, weights[0]), biases[0]) # hidden_layer = tf.nn.relu(hidden_layer) # hidden_layer = tf.nn.dropout(hidden_layer, keep_prob) # # logits = tf.add(tf.matmul(hidden_layer, weights[1]), biases[1]) # # ``` # # The code above illustrates how to apply dropout to a neural network. # # The `tf.nn.dropout()` function takes in two parameters: # # 1. `hidden_layer`: the tensor to which you would like to apply dropout # 2. `keep_prob`: the probability of keeping (i.e. not dropping) any given unit # # `keep_prob` allows you to adjust the number of units to drop. In order to compensate for dropped units, `tf.nn.dropout()` multiplies all units that are kept (i.e. not dropped) by `1/keep_prob`. # # During training, a good starting value for `keep_prob` is `0.5`. # # During testing, use a `keep_prob` value of `1.0` to keep all units and maximize the power of the model. # # ### Quiz 1 # # Take a look at the code snippet below. Do you see what's wrong? # # There's nothing wrong with the syntax, however the test accuracy is extremely low. # # ```python # ... # # keep_prob = tf.placeholder(tf.float32) # probability to keep units # # hidden_layer = tf.add(tf.matmul(features, weights[0]), biases[0]) # hidden_layer = tf.nn.relu(hidden_layer) # hidden_layer = tf.nn.dropout(hidden_layer, keep_prob) # # logits = tf.add(tf.matmul(hidden_layer, weights[1]), biases[1]) # # ... # # with tf.Session() as sess: # sess.run(tf.global_variables_initializer()) # # for epoch_i in range(epochs): # for batch_i in range(batches): # .... # # sess.run(optimizer, feed_dict={ # features: batch_features, # labels: batch_labels, # keep_prob: 0.5}) # # validation_accuracy = sess.run(accuracy, feed_dict={ # features: test_features, # labels: test_labels, # keep_prob: 0.5}) # # ``` # # What's wrong with the above code? # # > `keep_prob` should be set to 1.0 when evaluating validation accuracy # # That's correct! You should only drop units while training the model. During validation or testing, you should keep all of the units to maximize accuracy. # # ### Quiz 2 # # This quiz will be starting with the code from the ReLU Quiz and applying a dropout layer. Build a model with a ReLU layer and dropout layer using the `keep_prob` placeholder to pass in a probability of `0.5`. Print the logits from the model. # # Note: Output will be different every time the code is run. This is caused by dropout randomizing the units it drops. # # + # Solution is available in the other "solution.py" import tensorflow as tf from test import * tf.set_random_seed(123456) hidden_layer_weights = [ [0.1, 0.2, 0.4], [0.4, 0.6, 0.6], [0.5, 0.9, 0.1], [0.8, 0.2, 0.8]] out_weights = [ [0.1, 0.6], [0.2, 0.1], [0.7, 0.9]] # set random seed tf.set_random_seed(123456) # Weights and biases weights = [ tf.Variable(hidden_layer_weights), tf.Variable(out_weights)] biases = [ tf.Variable(tf.zeros(3)), tf.Variable(tf.zeros(2))] # Input features = tf.Variable([[0.0, 2.0, 3.0, 4.0], [0.1, 0.2, 0.3, 0.4], [11.0, 12.0, 13.0, 14.0]]) # TODO: Create Model with Dropout keep_prob = tf.placeholder(tf.float32) rate = 1 - keep_prob hidden_layer = tf.add(tf.matmul(features, weights[0]), biases[0]) hidden_layer = tf.nn.relu(hidden_layer) hidden_layer = tf.nn.dropout(hidden_layer, keep_prob) # hidden_layer = tf.nn.dropout(hidden_layer, keep_prob=None, noise_shape=None, # seed=None, name=None, rate=rate) logits = tf.add(tf.matmul(hidden_layer, weights[1]), biases[1]) # TODO: save and print session results as variable named "output" with tf.Session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, feed_dict={keep_prob: 0.5}) print(output) # -
Deep Neural Networks/Deep Neural Networks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Reco-Gym - Pure Organic vs Pure Bandit # #### Varying the number of online users # # The purpose of this notebook is to show that a "Pure Organic" algorithm i.e. an algorithm that orders recommendations according to a next (organic) item prediction method is able to harness relatively plentiful organic data i.e. data the users provide themselves by organically interacting with the website. Although the data is plentiful, it is only a proxy of the data of interest for making recommendations which is indeed bandit data. # # In contrast a "Pure Bandit" algorithm receives feedback from the recommendations however it is only able to discover that poor recommendations are poor by trying them extensively and perorming poorly. # # enables the %%R magic # %load_ext rpy2.ipython # + # %matplotlib inline import numpy as np import gym, reco_gym import pandas as pd from reco_gym import env_1_args from copy import deepcopy env_1_args['random_seed'] = 5 # initialize the gym for the first time by calling .make() and .init_gym() env = gym.make('reco-gym-v1') env_1_args['num_products']=80 # + env.init_gym(env_1_args) # create environments that differ only in the amount of noise "number of flips" env_dict = {} for f in (int(env.num_products*2/8),): # number of flips (difference between organic and bandit behaviour) env_dict[f] = deepcopy(env) env_dict[f].generate_beta(f) # + from agents import BanditCount, bandit_count_args from agents import OrganicCount, organiccount_args bandit_count_args['num_products'] = env_1_args['num_products'] organiccount_args['num_products'] = env_1_args['num_products'] # - d_agent = {'Pure Organic': OrganicCount(organiccount_args), 'Pure Bandit': BanditCount(bandit_count_args)} offline_organic = 10000 eval_size = 20000 l=list() for samples in (1, 10, 100, 1000, 10000): for name in d_agent.keys(): agent = d_agent[name] for f in env_dict.keys(): c05, c0025, c0975 = reco_gym.test_agent(deepcopy(env_dict[f]), deepcopy(agent), samples, eval_size, offline_organic) df={} df['agent'] = name df['training'] = samples df['ctr_05'] = [c05] df['ctr_0025'] = [c0025] df['ctr_0975'] = [c0975] df['flips'] = 'flips=%03d' %(f) df['f'] = f l.append(pd.DataFrame().from_dict(df)) res = pd.concat(l) # + magic_args="-i res -w 5 -h 5 --units in -r 200" language="R" # # import df from global environment # # make default figure size 5 by 5 inches with 200 dpi resolution # # # library(ggplot2) # library(data.table) # library(scales) # res<-data.table(res) # res[agent=='Pure Bandit',training:=1.1*training] # dodge # # ggplot(res) + geom_errorbar(aes(x=training, ymin=ctr_0025, ymax=ctr_0975, colour=agent), width=0) + facet_grid(flips ~ .) + theme_bw() + ylab('Click through rate') + xlab('Number of Online Users') + scale_x_log10() + scale_y_continuous(labels=percent) + scale_colour_manual(values=c('blue', 'orange')) # # - # We see that this plot reproduces the following prediction from our paper (with Pure Bandit and Pure Organic only): # # “Pure Organic” behavior is de- termined by the amount of noise in $\sigma$ and is unaffected by the number of bandit events (this is not strictly true in the simulation and a small increase is evident on pure organic, as the user also provides an organic signal). If $\sigma$ is sufficiently small that the actions are correctly ordered it will perform well, in contrast if it is large it will perform poorly. The performance does not change as the number of bandit events increase as these are not used. In contrast the “Pure Bandit” algorithm is unable to make predictions without large numbers of bandit events. It performs poorly when this data is scarce and well when it is plentiful. # # <img src="images/section3_n_bandit_events.png" alt="Drawing" style="width: 650px;"/>
Pure Organic vs Bandit - Number of Online Users.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # TODO: Figure out ravel() 1d array problem import pandas as pd import numpy as np import os from nltk import word_tokenize, sent_tokenize from nltk.corpus import stopwords from sklearn.feature_extraction.text import CountVectorizer from __future__ import division import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline from sklearn.feature_extraction.text import TfidfVectorizer labels = pd.read_csv('stocknews//labels.csv', header=None) # http://stackoverflow.com/questions/28382735/python-pandas-does-not-read-the-first-row-of-csv-file # # `pd.read_csv` was cutting off the first row of labels # Confirm size of labels to make sure data loaded correctly labels.shape str_tokens = pd.read_csv('stocknews//tokens_str.csv', dtype=str, keep_default_na=False) str_tokens.head() str_tokens.shape def replace_num(element): return ' '.join([('numero' if k.isdigit() else k) for k in element.split()]) # http://stackoverflow.com/questions/6905636/python-conditional-list-joins # Instead of a digit, use `'NUMERO'` as it is resistant to stemming/lemmatizing. It's possible that headlines with numbers might contain some information. This will prevent preprocessing from discarding the information. str_tokens['merged'] = str_tokens.iloc[:, 0:].apply(lambda x: ' '.join(x.dropna().values.tolist()), axis=1) # + # type(str_tokens['merged'][0]) # - str_tokens['merged'] = str_tokens['merged'].apply(replace_num) from nltk.stem.porter import PorterStemmer pstem = PorterStemmer() def stm(element): return ' '.join([pstem.stem(k) for k in element.split()]) str_tokens['merged'] = str_tokens['merged'].apply(stm) str_tokens['merged'] # Split labels and tokens into 4 folds # Fold 1 = [:499] # Fold 2 = [499:996] # Fold 3 = [996:1493] # Fold 4 = [1493:] # ## Split data into train and test sets train_text = str_tokens.merged[0:1493] # train features test_text = str_tokens.merged[1493:] # test features train_labels = labels[0:1493].values # train labels test_labels = labels[1493:].values; # test labels vectorizer = TfidfVectorizer( max_features = 40000, ngram_range = ( 1, 1 ), sublinear_tf = True ) # Only need text, not labels train_x = vectorizer.fit_transform( train_text ) test_x = vectorizer.transform( test_text ) train_x.shape test_x.shape from sklearn.linear_model import PassiveAggressiveClassifier classifier = PassiveAggressiveClassifier(n_iter=8) train_labels.shape train_labels.ravel() classifier.fit(train_x, train_labels.ravel()) classifier.score(test_x, test_labels.ravel()) from sklearn.linear_model import SGDClassifier classifier = PassiveAggressiveClassifier(loss='log', n_iter=8) classifier.fit(train_x, train_labels.ravel()) classifier.score(test_x, test_labels.ravel()) # ### Ridge Classifier from sklearn.linear_model import RidgeClassifier clf = RidgeClassifier() clf.fit(train_x, train_labels.ravel()) clf.score(test_x, test_labels.ravel()) # ### Gaussian Naive Bayes from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb.fit(train_x.toarray(), train_labels.ravel()) gnb.score(test_x.toarray(), test_labels.ravel()) # ### Support Vector Classifier from sklearn.svm import SVC supportvc = SVC() supportvc.fit(train_x.toarray(), train_labels.ravel()) supportvc.score(test_x.toarray(), test_labels.ravel())
BOW_Stemmed_Unigrams.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # sbPoynETNRPy: An Einstein Toolkit Thorn for Computing the 4-Velocity Time-Component $u^0$, the Magnetic Field Measured by a Comoving Observer $b^{\mu}$, and the Poynting Vector $S^i$ # # ## Author: <NAME> # ### Formatting improvements courtesy <NAME> # # [comment]: <> (Abstract: TODO) # # **Notebook Status:** <font color='green'><b> Validated </b></font> # # **Validation Notes:** This module has been validated against the hand-written smallbPoynET in WVUThorns_diagnostics (a trsuted code), which itself is based on expressions in IllinoisGRMHD... which was validated against the original GRMHD code of the Illinois NR group. # # ## Introduction: # In the [previous tutorial notebook](Tutorial-u0_smallb_Poynting-Cartesian.ipynb), we constructed within SymPy full expressions for the 4-velocity time-component $u^0$, the magnetic field (measured by a comoving observer) $b^{\mu}$, and the Poynting vector $S^i$. # # Here we will work through the steps necessary to construct an Einstein Toolkit diagnostic thorn (module) that uses ADMBase and HydroBase variables as input into the NRPy+-generated SymPy expressions for $b^{\mu}$, $b^2$, and the Poynting Vector $S^i$, outputting to gridfunctions `smallb4U[]`, `smallb2etk` (the "etk" suffix must be appended because base gridfunction names ending in numbers are not allowed in NRPy+), and `SPoyn[]`, respectively. # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This tutorial is organized as follows # # 1. [Step 1](#initializenrpy): Call on NRPy+ to convert the SymPy expressions for $b^{\mu}$, $b^2$, and the Poynting Vector $S^i$ to C code kernels # 1. [Step 2](#etk): Build up the needed Einstein Toolkit infrastructure to implement the NRPy+-generated C code kernels # 1. [Step 2.a](#etkc): Write the C code functions called by the Einstein Toolkit scheduler that incorporate the above ".h" files # 1. [Step 2.b](#cclfiles): CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure # 1. [Step 2.c](#etksys): Inform the Einstein Toolkit build system of the C code # 1. [Step 3](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file # <a id='initializenrpy'></a> # # # Step 1: Call on NRPy+ to convert the SymPy expressions for $b^{\mu}$, $b^2$, and the Poynting Vector $S^i$ to C code kernels \[Back to [top](#toc)\] # $$\label{initializenrpy}$$ # Step 1a: import all needed modules from NRPy+: import NRPy_param_funcs as par import indexedexp as ixp import grid as gri from outputC import * import sympy as sp # Step 1b: Initialize parameters (stub; there are none for this module) thismodule = __name__ # We will to disable verbose output in the NRPy+ outputC function. This is an important step in this case, because our final expressions are very large. Verbose output, when enabled, will print (in comments) the input SymPy expressions to the top of the file *without* CSE, resulting here in an *enormous* output file. # # We will also declare the additional gridfunctions we need for this thorn: # # **Inputs from ADMBase:** # * the physical metric $\gamma_{ij}$ # * the spacetime gauge quantities $\alpha$ and $\beta^i$ # # **Inputs from HydroBase:** # * the Valencia 3-velocity $v^i_{(n)}$ # * the densitized magnetic field of a normal observer $\tilde{B}^i$ # # **Output gridfunctions:** # * the magnetic field as observed in a frame comoving with the plasma $b^\mu$ (`smallb4U[]}`) # * twice the magnetic pressure $2 P_{\rm mag} = b_\mu b^\mu = b^2$ (`smallb2etk`) # * the Poynting vector $S^i$ (`SPoyn[]`) # + # Step 1c: Set spatial dimension (must be 3 for BSSN) DIM = 3 par.set_parval_from_str("grid::DIM",DIM) # Step 1d: declare the additional gridfunctions (i.e., functions whose values are declared # at every grid point, either inside or outside of our SymPy expressions) needed # for this thorn # INPUT GRIDFUNCTIONS: gammaDD = ixp.register_gridfunctions_for_single_rank2("AUX","gammaDD", "sym01") # The AUX or EVOL designation is *not* # used in diagnostic modules. betaU = ixp.register_gridfunctions_for_single_rank1("AUX","betaU") # The AUX or EVOL designation is *not* # used in diagnostic modules. alpha = gri.register_gridfunctions("AUX","alpha") # The AUX or EVOL designation is *not* # used in diagnostic modules. ValenciavU = ixp.register_gridfunctions_for_single_rank1("AUX","ValenciavU") # The AUX or EVOL designation is *not* # used in diagnostic modules. BU = ixp.register_gridfunctions_for_single_rank1("AUX","BU") # The AUX or EVOL designation is *not* # used in diagnostic modules. # OUTPUT GRIDFUNCTIONS: smallb4U = ixp.register_gridfunctions_for_single_rank1("AUX","smallb4U",DIM=4) # The AUX or EVOL designation is *not* # used in diagnostic modules. smallb2etk = gri.register_gridfunctions("AUX","smallb2etk") # The AUX or EVOL designation is *not* # used in diagnostic modules. PoynSU = ixp.register_gridfunctions_for_single_rank1("AUX","PoynSU") # The AUX or EVOL designation is *not* # used in diagnostic modules. # Step 1f: Call the NRPy+ module to set up the SymPy expressions for the output, as well as the C code for computing u^0 import u0_smallb_Poynting__Cartesian.u0_smallb_Poynting__Cartesian as u0etc u0etc.compute_u0_smallb_Poynting__Cartesian(gammaDD,betaU,alpha,ValenciavU,BU) # Step 1g: Set the gridfunction memory access type to "ETK": par.set_parval_from_str("GridFuncMemAccess","ETK") # Step 1h: Make output directories: # !mkdir sbPoynETNRPy 2>/dev/null # 2>/dev/null: Don't throw an error or warning if the directory already exists. # !mkdir sbPoynETNRPy/src 2>/dev/null # 2>/dev/null: Don't throw an error or warning if the directory already exists. # Step 1i: Output routine for computing u0: with open("sbPoynETNRPy/src/u0.h", "w") as file: file.write(str(u0etc.computeu0_Cfunction)) print("Wrote to file \""+file.name+"\"") # Step 1j: Use NRPy+'s outputC to convert the SymPy expressions for smallb4U, smallb2etk, and PoynSU to C code: #outputC([u0etc.smallb4U[0],u0etc.smallb4U[1],u0etc.smallb4U[2],u0etc.smallb4U[3],u0etc.smallb2etk, outputC([u0etc.smallb4U[0],u0etc.smallb4U[1],u0etc.smallb4U[2],u0etc.smallb4U[3],u0etc.smallb2etk, u0etc.PoynSU[0],u0etc.PoynSU[1],u0etc.PoynSU[2]], [gri.gfaccess("","smallb4U0"),gri.gfaccess("","smallb4U1"),gri.gfaccess("","smallb4U2"),gri.gfaccess("","smallb4U3"), gri.gfaccess("","smallb2etk"), gri.gfaccess("","PoynSU0"),gri.gfaccess("","PoynSU1"),gri.gfaccess("","PoynSU2")], filename="sbPoynETNRPy/src/smallb4U_smallb2etk_PoynSU.h", params="outCverbose=False") # <- Force outCverbose=False for this # module to avoid gigantic C file filled with the # non-CSE expressions for the Weyl scalars. # - # <a id='etk'></a> # # # Step 2: Build up the needed Einstein Toolkit infrastructure to implement the NRPy+-generated C code kernels \[Back to [top](#toc)\] # $$\label{etk}$$ # # <a id='etkc'></a> # # ## Step 2.a: Write the C code functions called by the Einstein Toolkit scheduler that incorporate the above ".h" files \[Back to [top](#toc)\] # $$\label{etkc}$$ # + # %%writefile sbPoynETNRPy/src/sbPoynETNRPy.c #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "cctk.h" #include "cctk_Arguments.h" #include "cctk_Parameters.h" void sbPoynETNRPy_lowlevel(const cGH* restrict const cctkGH,const int *cctk_lsh, const CCTK_REAL *gammaDD00GF,const CCTK_REAL *gammaDD01GF,const CCTK_REAL *gammaDD02GF, const CCTK_REAL *gammaDD11GF,const CCTK_REAL *gammaDD12GF,const CCTK_REAL *gammaDD22GF, const CCTK_REAL *alphaGF, const CCTK_REAL *betaU0GF,const CCTK_REAL *betaU1GF,const CCTK_REAL *betaU2GF, const CCTK_REAL *vel,const CCTK_REAL *Bvec, CCTK_REAL *smallb4U0GF,CCTK_REAL *smallb4U1GF,CCTK_REAL *smallb4U2GF,CCTK_REAL *smallb4U3GF, CCTK_REAL *smallb2etkGF, CCTK_REAL *PoynSU0GF,CCTK_REAL *PoynSU1GF,CCTK_REAL *PoynSU2GF) { DECLARE_CCTK_PARAMETERS; #pragma omp parallel for for(int i2=0;i2<cctk_lsh[2];i2++) for(int i1=0;i1<cctk_lsh[1];i1++) for(int i0=0;i0<cctk_lsh[0];i0++) { const CCTK_REAL gammaDD00 = gammaDD00GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)]; const CCTK_REAL gammaDD01 = gammaDD01GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)]; const CCTK_REAL gammaDD02 = gammaDD02GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)]; const CCTK_REAL gammaDD11 = gammaDD11GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)]; const CCTK_REAL gammaDD12 = gammaDD12GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)]; const CCTK_REAL gammaDD22 = gammaDD22GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)]; const CCTK_REAL alpha = alphaGF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)]; const CCTK_REAL betaU0 = betaU0GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)]; const CCTK_REAL betaU1 = betaU1GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)]; const CCTK_REAL betaU2 = betaU2GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)]; // Valencia 3-velocity may be adjusted due to the velocity ceiling. CCTK_REAL ValenciavU0 = vel[CCTK_GFINDEX4D(cctkGH, i0,i1,i2, 0)]; CCTK_REAL ValenciavU1 = vel[CCTK_GFINDEX4D(cctkGH, i0,i1,i2, 1)]; CCTK_REAL ValenciavU2 = vel[CCTK_GFINDEX4D(cctkGH, i0,i1,i2, 2)]; const CCTK_REAL BU0 = Bvec[CCTK_GFINDEX4D(cctkGH, i0,i1,i2, 0)]; const CCTK_REAL BU1 = Bvec[CCTK_GFINDEX4D(cctkGH, i0,i1,i2, 1)]; const CCTK_REAL BU2 = Bvec[CCTK_GFINDEX4D(cctkGH, i0,i1,i2, 2)]; CCTK_REAL u0; #include "u0.h" #include "smallb4U_smallb2etk_PoynSU.h" } } extern void sbPoynETNRPy(CCTK_ARGUMENTS) { DECLARE_CCTK_PARAMETERS; DECLARE_CCTK_ARGUMENTS; if(sbPoynETNRPy_calc_every<=0 || cctk_iteration%sbPoynETNRPy_calc_every!=0) { return; } /* Calculate smallb4U[], smallb2etk, and PoynSU[]: */ sbPoynETNRPy_lowlevel(cctkGH,cctk_lsh, gxx,gxy,gxz,gyy,gyz,gzz, alp, betax,betay,betaz, vel,Bvec, smallb4U0,smallb4U1,smallb4U2,smallb4U3, smallb4_sq, PoynSU0,PoynSU1,PoynSU2); } # - # <a id='cclfiles'></a> # # ## Step 2.b: CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure \[Back to [top](#toc)\] # $$\label{cclfiles}$$ # # Writing a module ("thorn") within the Einstein Toolkit requires that three "ccl" files be constructed, all in the root directory of the thorn: # # 1. `interface.ccl`: defines the gridfunction groups needed, and provides keywords denoting what this thorn provides and what it should inherit from other thorns. # 1. `param.ccl`: specifies free parameters within the thorn. # 1. `schedule.ccl`: allocates storage for gridfunctions, defines how the thorn's functions should be scheduled in a broader simulation, and specifies the regions of memory written to or read from gridfunctions. # # Let's start with `interface.ccl`. The [official Einstein Toolkit (Cactus) documentation](http://einsteintoolkit.org/usersguide/UsersGuide.html) defines what must/should be included in an `interface.ccl` file [**here**](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-178000D2.2). # + # %%writefile sbPoynETNRPy/interface.ccl # With "implements", we give our thorn its unique name. implements: sbPoynETNRPy # By "inheriting" other thorns, we tell the Toolkit that we # will rely on variables/function that exist within those # functions. inherits: ADMBase Boundary Grid HydroBase MethodofLines # Tell the Toolkit that we want the various Weyl scalars # and invariants to be visible to other thorns by using # the keyword "public". Note that declaring these # gridfunctions *does not* allocate memory for them; # that is done by the schedule.ccl file. public: CCTK_REAL smallb4U_group type=GF timelevels=3 { smallb4U0,smallb4U1,smallb4U2,smallb4U3 } "smallb4U 4-vector" public: CCTK_REAL smallb4_sq_group type=GF timelevels=3 { smallb4_sq } "smallb^{mu} squared == twice the magnetic pressure" public: CCTK_REAL PoynSU_group type=GF timelevels=3 { PoynSU0,PoynSU1,PoynSU2 } "Poynting 3-vector" # - # We will now write the file `param.ccl`. This file allows the listed parameters to be set at runtime. We also give allowed ranges and default values for each parameter. More information on this file's syntax can be found in the [official Einstein Toolkit documentation](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-183000D2.3). # # The first parameter specifies how many time levels need to be stored. Generally when using the ETK's adaptive-mesh refinement (AMR) driver [Carpet](https://carpetcode.org/), three timelevels are needed so that the diagnostic quantities can be properly interpolated and defined across refinement boundaries. # # The second parameter determines how often we will calculate $b^\mu$, $b^2$, and $S^i$. # # The third parameter sets the maximum allowed Lorentz factor when computing $u^0$ (i.e., $\Gamma_{\rm max}$, as defined in the [previous tutorial notebook](Tutorial-u0_smallb_Poynting-Cartesian.ipynb)). # + # %%writefile sbPoynETNRPy/param.ccl shares: HydroBase USES CCTK_INT timelevels restricted: CCTK_INT timelevels "Number of active timelevels" STEERABLE=RECOVER { 0:3 :: "" } 3 restricted: CCTK_INT sbPoynETNRPy_calc_every "Compute these quantities every sbPoynETNRPy_calc_every iterations." STEERABLE=ALWAYS { *:* :: "" } 1 restricted: CCTK_REAL GAMMA_SPEED_LIMIT "Maximum Lorentz factor." { 1:* :: "Positive > 1, though you'll likely have troubles in GRMHD far above 10, or far above 2000 in GRFFE." } 10.0 # - # Finally, we will write the file `schedule.ccl`; its official documentation is found [here](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-186000D2.4). # # This file registers the function we wish to call, `sbPoynETNRPy`, with the Einstein Toolkit scheduler. # + # %%writefile sbPoynETNRPy/schedule.ccl STORAGE: smallb4U_group[timelevels] STORAGE: smallb4_sq_group[timelevels] STORAGE: PoynSU_group[timelevels] schedule group sbPoynETNRPy_group in MoL_PseudoEvolution after ADMBase_SetADMVars { } "Schedule sbPoynETNRPy group" schedule sbPoynETNRPy in sbPoynETNRPy_group { LANG: C READS: admbase::gxx(Everywhere) READS: admbase::gxy(Everywhere) READS: admbase::gxz(Everywhere) READS: admbase::gyy(Everywhere) READS: admbase::gyz(Everywhere) READS: admbase::gzz(Everywhere) READS: admbase::alpha(Everywhere) READS: admbase::betax(Everywhere) READS: admbase::betay(Everywhere) READS: admbase::betaz(Everywhere) READS: HydroBase::vel(Everywhere) READS: HydroBase::Bvec(Everywhere) WRITES: sbPoynETNRPy::smallb4U0(Everywhere) WRITES: sbPoynETNRPy::smallb4U1(Everywhere) WRITES: sbPoynETNRPy::smallb4U2(Everywhere) WRITES: sbPoynETNRPy::smallb4U3(Everywhere) WRITES: sbPoynETNRPy::smallb4_sq(Everywhere) WRITES: sbPoynETNRPy::PoynSU0(Everywhere) WRITES: sbPoynETNRPy::PoynSU1(Everywhere) WRITES: sbPoynETNRPy::PoynSU2(Everywhere) } "Call sbPoynETNRPy main function, to compute $b^mu$, $b^2$, and $S^i$" # - # <a id='etksys'></a> # # ## Step 2.c: Inform the Einstein Toolkit build system of the C code \[Back to [top](#toc)\] # $$\label{etksys}$$ # # The `make.code.defn` lists the source files that need to be compiled. Naturally, this thorn has only the one C file $-$ written above $-$ to compile: # + # %%writefile sbPoynETNRPy/src/make.code.defn SRCS = sbPoynETNRPy.c # - # <a id='latex_pdf_output'></a> # # # Step 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-ETK_thorn-u0_smallb_Poynting.pdf](Tutorial-ETK_thorn-u0_smallb_Poynting.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) # !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-ETK_thorn-u0_smallb_Poynting.ipynb # !pdflatex -interaction=batchmode Tutorial-ETK_thorn-u0_smallb_Poynting.tex # !pdflatex -interaction=batchmode Tutorial-ETK_thorn-u0_smallb_Poynting.tex # !pdflatex -interaction=batchmode Tutorial-ETK_thorn-u0_smallb_Poynting.tex # !rm -f Tut*.out Tut*.aux Tut*.log
Tutorial-ETK_thorn-u0_smallb_Poynting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="ff2fd268-839a-4483-8681-3c26914f7d9b" _uuid="89210ac8934aeaeeaaa2b194439c712aded9e688" # # Set-up # + _cell_guid="75fd2f9a-7297-4203-b8f7-1502f66e0c9e" _kg_hide-output=true _uuid="54cda7088daf8d27c5627cb80f9bef15fa30f6e7" # DATA MANIPULATION import numpy as np # linear algebra import pandas as pd # data processing import datetime # manipulating date formats import random # VIZUALIZATION import matplotlib.pyplot as plt # basic plotting import seaborn # for prettier plots # %matplotlib inline # - # # Read data # + _cell_guid="6eccd50b-c197-4fbb-8f21-dac64e7165d4" _uuid="2d3b84468f9166e001f7ddf20700ac61030101ee" oil=pd.read_csv('../input/oil.csv', parse_dates=['date']) oil.head() # - # Apparently, since oil is a stock-like time series it only reports values on working days. # # Missing data (oil) tmp = pd.DataFrame({'date':pd.date_range(oil.date.min(), oil.date.max())}) oil = pd.merge(oil, tmp, how='right') oil.sort_values('date', inplace=True) oil.reset_index(inplace=True) oil.head(10) oil.dcoilwtico.isnull().sum() # + oil_ts = pd.Series(oil.dcoilwtico.values, index=oil.date) afilter='2017' plt.figure(figsize=(15,5)) plt.plot(oil_ts[afilter]) for date in oil_ts[oil_ts.isnull()][afilter].index: plt.axvspan(date, date, color='red', alpha=0.5) plt.show() # - # # Imputation # + ts1 = oil_ts.interpolate() # linear ts2 = oil_ts.interpolate(method='time') ts3 = oil_ts.interpolate(method='spline', order=4) plt.figure(figsize=(15,5)) plt.plot(ts1[afilter], label='linear') plt.plot(ts2[afilter], label='time') plt.plot(ts3[afilter], label='spline4') plt.plot(oil_ts[afilter], label='original') plt.legend() plt.show() # + # Compare interpolations miss_dates = oil[oil.dcoilwtico.isnull()]['date'] miss_dates pd.DataFrame({'linear':ts1[miss_dates], 'time':ts2[miss_dates], 'spline':ts3[miss_dates]}).head(10) # + # Chose a method to replace the missing values ts1[0] = ts1[1] # First record oil = ts1.to_frame('dcoilwtico') oil.reset_index(inplace=True) oil.head(10) # - oil.to_csv('../input/processed/oil.csv',index=False,float_format="%.3f")
notebooks/1.oil_inputation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <b><h1> Analysis for OpenIMSCore # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import seaborn as sns #import matplotlib import numpy as np #sns.set(font_scale=1.3, style="ticks") def select_and_rename(df, mapping): """ Helper: Selects columns of df using the keys of the mapping dict. It renames the columns to the values of the mappings dict. """ # select subset of columns dff = df[list(mapping.keys())] # rename for k, v in mapping.items(): #print("Renaming: {} -> {}".format(k, v)) dff.rename(columns={k: v}, inplace=True) #print(dff.head()) return dff # <b>Read result File #df = pd.read_csv("/home/arif/gitRepos/ah-tng-bench-experiments/results-archive/vaaa_results_20191126_second_run/result_ec_metrics.csv") #df = pd.read_csv("/home/arif/gitRepos/ah-tng-bench-experiments/results-archive/20191202_run_local_lab/result_ec_metrics.csv") df = pd.read_csv("/Users/arifhossen/ArifDevelopmentWork/gitRepos/ah-tng-bench-experiments/results-archive/openimscore_results_20191213_v2/result_ec_metrics.csv") #df.head df.columns.tolist() # + #df.columns.tolist() # - 'metric__mp.input.vdu01.0__CallRate_C', 'metric__mp.input.vdu01.0__CallRate_P', 'metric__mp.input.vdu01.0__CurrentCall', 'metric__mp.input.vdu01.0__DeadCallMsgs_C', 'metric__mp.input.vdu01.0__DeadCallMsgs_P', 'metric__mp.input.vdu01.0__FailedCallRejected_C', 'metric__mp.input.vdu01.0__FailedCallRejected_P', 'metric__mp.input.vdu01.0__FailedCall_C', 'metric__mp.input.vdu01.0__FailedCall_P', 'metric__mp.input.vdu01.0__FailedMaxUDPRetrans_C', 'metric__mp.input.vdu01.0__FailedMaxUDPRetrans_P', 'metric__mp.input.vdu01.0__FailedTcpConnect_C', 'metric__mp.input.vdu01.0__FailedTcpConnect_P', 'metric__mp.input.vdu01.0__FailedUnexpectedMessage_C', 'metric__mp.input.vdu01.0__FailedUnexpectedMessage_P', 'metric__mp.input.vdu01.0__FatalErrors_C', 'metric__mp.input.vdu01.0__FatalErrors_P', 'metric__mp.input.vdu01.0__IncomingCall_C', 'metric__mp.input.vdu01.0__IncomingCall_P', 'metric__mp.input.vdu01.0__OutgoingCall_C', 'metric__mp.input.vdu01.0__OutgoingCall_P', 'metric__mp.input.vdu01.0__Retransmissions_C', 'metric__mp.input.vdu01.0__Retransmissions_P', 'metric__mp.input.vdu01.0__SuccessfulCall_C', 'metric__mp.input.vdu01.0__SuccessfulCall_P', 'metric__mp.input.vdu01.0__TargetRate', 'metric__mp.input.vdu01.0__TotalCallCreated', 'metric__mp.input.vdu01.0__Warnings_C', 'metric__mp.input.vdu01.0__Warnings_P', 'metric__mp.output.vdu01.0__CallRate_C', 'metric__mp.output.vdu01.0__CallRate_P', 'metric__mp.output.vdu01.0__CurrentCall', 'metric__mp.output.vdu01.0__DeadCallMsgs_C', 'metric__mp.output.vdu01.0__DeadCallMsgs_P', 'metric__mp.output.vdu01.0__FailedCallRejected_C', 'metric__mp.output.vdu01.0__FailedCallRejected_P', 'metric__mp.output.vdu01.0__FailedCall_C', 'metric__mp.output.vdu01.0__FailedCall_P', 'metric__mp.output.vdu01.0__FailedMaxUDPRetrans_C', 'metric__mp.output.vdu01.0__FailedMaxUDPRetrans_P', 'metric__mp.output.vdu01.0__FailedTcpConnect_C', 'metric__mp.output.vdu01.0__FailedTcpConnect_P', 'metric__mp.output.vdu01.0__FailedUnexpectedMessage_C', 'metric__mp.output.vdu01.0__FailedUnexpectedMessage_P', 'metric__mp.output.vdu01.0__FatalErrors_C', 'metric__mp.output.vdu01.0__FatalErrors_P', 'metric__mp.output.vdu01.0__IncomingCall_C', 'metric__mp.output.vdu01.0__IncomingCall_P', 'metric__mp.output.vdu01.0__OutgoingCall_C', 'metric__mp.output.vdu01.0__OutgoingCall_P', 'metric__mp.output.vdu01.0__Retransmissions_C', 'metric__mp.output.vdu01.0__Retransmissions_P', 'metric__mp.output.vdu01.0__SuccessfulCall_C', 'metric__mp.output.vdu01.0__SuccessfulCall_P', 'metric__mp.output.vdu01.0__TargetRate', 'metric__mp.output.vdu01.0__TotalCallCreated', 'metric__mp.output.vdu01.0__TotalCallCreated', 'metric__mp.output.vdu01.0__Warnings_P', openims_map_01 = { "param__func__de.upb.openimscore.0.1__cpu_bw": "cpu", "param__func__de.upb.openimscore.0.1__mem_max": "vnf_memory", #"metric__mp.input.vdu01.0__CallRate_C": "In_CallRate", #"metric__mp.input.vdu01.0__OutgoingCall_C": "In_OutgoingCall", #"metric__mp.input.vdu01.0__IncomingCall_C": "In_IncomingCall", #"metric__mp.input.vdu01.0__SuccessfulCall_C": "In_SuccCall", #"metric__mp.input.vdu01.0__TargetRate": "In_TargetRate", #"param__func__mp.input__cmd_start": "req_size", "metric__mp.output.vdu01.0__CallRate_C": "OutCallRate", "metric__mp.output.vdu01.0__FailedCallRejected_C": "FailedCallRejected", "metric__mp.output.vdu01.0__FailedCall_C": "FailedCall", "metric__mp.output.vdu01.0__FailedMaxUDPRetrans_C": "FailedMaxUDP", "metric__mp.output.vdu01.0__FailedTcpConnect_C": "FailedTcpConn", "metric__mp.output.vdu01.0__FailedUnexpectedMessage_C": "FailedUnMsg", #"metric__mp.output.vdu01.0__FatalErrors_C": "FatalError", #"metric__mp.output.vdu01.0__IncomingCall_C": "IncomingCall", "metric__mp.output.vdu01.0__OutgoingCall_C": "OutgoingCall", "metric__mp.output.vdu01.0__Retransmissions_C": "Retransmissions", "metric__mp.output.vdu01.0__SuccessfulCall_C": "SuccessfulCall", "metric__mp.output.vdu01.0__TargetRate": "TargetRate", "metric__mp.output.vdu01.0__TotalCallCreated": "TotalCallCreated", #"param__func__mp.input__cpu_bw": "probe_cpu_bw", #"param__func__mp.input__mem_max": "probe_memory", #"metric__mp.input.vdu01.0__ab_total_transfer_byte": "transf_bytes", #"metric__mp.input.vdu01.0__ab_transfer_rate_kbyte_per_second": "req_transf_rate", #"metric__vnf0.vdu01.0__stat__input__rx_bytes": "if_rx_bytes", #"metric__vnf0.vdu01.0__stat__input__rx_dropped": "if_in_rx_dropped", #"metric__vnf0.vdu01.0__stat__input__rx_errors": "if_in_rx_errors", #"metric__vnf0.vdu01.0__stat__input__rx_packets": "if_in_rx_packets", #"metric__vnf0.vdu01.0__stat__input__tx_bytes": "if_tx_bytes", #"metric__vnf0.vdu01.0__stat__input__tx_dropped": "if_in_tx_dropped", #"metric__vnf0.vdu01.0__stat__input__tx_errors": "if_in_tx_errors", #"metric__vnf0.vdu01.0__stat__input__tx_packets": "if_in_tx_packets", } def cleanup(df): """ Cleanup of df data. Dataset specific. """ def _replace(df, column, str1, str2): if column in df: df[column] = df[column].str.replace(str1, str2) def _to_num(df, column): if column in df: df[column] = pd.to_numeric(df[column]) #_replace(df, "req_size", "ab -c 1 -t 60 -n 9999 -e /tngbench_share/ab_logs.csv -s 60 -k -i http://20.0.0.254:80/", "-") # + df["vnf"] = "openims" dfs_raw = [df] map_list = [openims_map_01] dfs = list() # clean data frames for (df, m) in zip(dfs_raw, map_list): tmp = select_and_rename(df.copy(), m) #cleanup(tmp) dfs.append(tmp) dfs[0].head(10) # - dfs[0].tail(10) df01 = select_and_rename(df, openims_map_01) # + #df01.head(20) # - metrics = ["SuccessfulCall", "Retransmissions", "TotalCallCreated","FailedCall"] hue_col = "vnf_memory" x_col = "cpu" #dff = df01.loc[df01["memory"] == 1024] #dff = dff.loc[dff["ruleset"] == "big"] #dff = dff.loc[dff[x_col] >= .5] for m in metrics: sns.catplot(x=x_col, y=m, hue=hue_col, data=df01, kind="strip", height=4, aspect=1, linewidth=1) # <br> # + metrics = ["Radius-Auth-Response", "Probe-Radius-Auth-Response"] hue_col = "memory" x_col = "cpu_bw" #dff = df01.loc[df01["memory"] == 1024] #dff = dff.loc[dff["ruleset"] == "big"] #dff = dff.loc[dff[x_col] >= .5] for m in metrics: sns.lmplot(x=x_col, y=m, hue=hue_col, data=df01) # -
analysis/openimscore/openimscore-analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + x=int(input("enter your salary")) if x <= 250000: tax=0 if (x <= 500000) and (x>250000): tax=(x-250000)*5/100 if (x <= 750000) and (x>500000): tax=(x-500000)*10/100+12500 if (x <= 1000000) and (x>750000): tax=(x-750000)*15/100+37500 if (x<=1250000) and (x>1000000): tax=(x-1000000)*20/100+75000 if (x<=1500000) and (x>1250000): tax=(x-1250000)*25/100+125000 if (x>1500000): tax=(x-1500000)*30/100+187500 print(tax) # + x=int(input("enter your salary")) def shihas(a,b,c,d): tax=(a-b)*c/100+d return tax if x <= 250000: tax=shihas(0,0,0,0) if (x <= 500000) and (x>250000): tax=shihas(x,250000,5,0) if (x <= 750000) and (x>500000): tax=shihas(x,500000,10,12500) if (x <= 1000000) and (x>750000): tax=shihas(x, 750000,15,37500) if (x<=1250000) and (x>1000000): tax=shihas(x,1000000,20,75000) if (x<=1500000) and (x>1250000): tax=shihas(x,1250000,25,125000) if (x>1500000): tax=shihas(x,1500000,30,187500) print(tax) # -
day8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Random Forest Regressor df = pd.read_csv('df_pos.csv', index_col=0) df = df.drop('player_name', axis=1) df_nf = df.drop('follower', axis=1) # ## No Follower # + from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error, r2_score from sklearn.preprocessing import StandardScaler from sklearn.ensemble import RandomForestRegressor z = 10 scores_rm = np.zeros([2, z]) scores = np.zeros(z) cv = KFold(z, shuffle=True, random_state=0) for i, (idx_train, idx_test) in enumerate(cv.split(df_nf)): df_train = df_nf.iloc[idx_train] df_test = df_nf.iloc[idx_test] X_train = df_train.drop('value', axis=1).astype(float) X_train = StandardScaler().fit_transform(X_train) y_train = df_train['value'] model = RandomForestRegressor(n_estimators = 1000, random_state = 42) result = model.fit(X_train, y_train) pred = result.predict(StandardScaler().fit_transform(df_test.drop('value', axis=1))) rsquared = r2_score(df_test.value, pred) mse = mean_squared_error(df_test.value, pred) scores_rm[0, i] = rsquared scores_rm[1, i] = mse scores[i] = model.score(df_test.drop("value", axis=1), df_test['value']) print("R2 = {:.8f}".format(rsquared)) print("mse = {:.8f}".format(mse)) print("R2 Score = {:.8f}".format(scores[i])) print("모델 성능 : R2 = {}, mse = {}".format(scores_rm[0].mean(), scores_rm[1].mean())) print("R2 Score = {}".format(scores.mean())) # - # + from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error, r2_score from sklearn.preprocessing import StandardScaler from sklearn.ensemble import RandomForestRegressor z = 10 scores_rm = np.zeros([2, z]) scores = np.zeros(z) cv = KFold(z, shuffle=True, random_state=0) for i, (idx_train, idx_test) in enumerate(cv.split(df)): df_train = df.iloc[idx_train] df_test = df.iloc[idx_test] X_train = df_train.drop('value', axis=1).astype(float) X_train = StandardScaler().fit_transform(X_train) y_train = df_train['value'] model = RandomForestRegressor(n_estimators = 1000, random_state = 42) result = model.fit(X_train, y_train) pred = result.predict(StandardScaler().fit_transform(df_test.drop('value', axis=1))) rsquared = r2_score(df_test.value, pred) mse = mean_squared_error(df_test.value, pred) scores_rm[0, i] = rsquared scores_rm[1, i] = mse scores[i] = model.score(df_test.drop('value', axis=1), df_test.value) print("R2 = {:.8f}".format(rsquared)) print("mse = {:.8f}".format(mse)) print("R2 Score = {:.8f}".format(scores[i])) print("모델 성능 : R2 = {}, mse = {}".format(scores_rm[0].mean(), scores_rm[1].mean())) print("R2 Score = {}".format(scores.mean())) # -
Analysis/8.1_RandomForestRegressor_prev.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Week 13 Warm-Up # ## Analyzing the 4-year BLS Data # >- Here we will be analyzing the combined cleaned BLS data that you created from Quiz 9 called, `bls4yr` # >- If you haven't exported your cleaned file you can do that with the following code: `bls4yr.to_excel('bls4yr.xlsx',index=False)` # ### Q1. Create a DataFrame named `bls_amean` # >- This will be a subset of the `bls4yr` DataFrame you created in Quiz 9 # >- Include these fields from `bls4yr` # >>- year, occ_title, a_mean # >- Only include the records for "major" from the `group` field # >>- You should have 88 total records in this new dataframe (There are 22 major groups, 4 years of data, so 88 total records) # ### Q2: What is the overall average `a_mean` (annual average salary) over the 4 years of data? # >- Round to two decimals # ### Q3. What is the average annual salary for each major occupational title (occ_title)? # >- Show the number of observations (count) used in the average calculation as well as the mean # >- Sort the results in descending order # >>- What occupational title had the highest average salary? # ### Q4. Try to plot the annual average salary (`a_mean`) across the 4 years of data # >- In this case you want to plot the average salary across all occupations for each year (i.e., one data point per year representing the overall average salary for that year) # >- We will go over plotting today in class but give this a try on your own first # # ### Q5. If you got the plot by year, try to plot by year and occ_title # >- This will show the annual salaries over time for each major occ_title
Week 13/Week13_WarmUp_EDA_bls4yr_student.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import scipy def negative_binomial(k, n, p): """Probability of the n-th success on the k-th Bernoulli trial. Args: k (int): total number of trials n (int): number of successes p (float): probability of success Returns: (float): probability of the n-th success occurring on the k-th trial """ assert n >= 1, "n must be a positive integer" assert type(n) == int, "n must a positive integer" assert k >= 0, "k must be an integer greater than or equal to zero" assert type(n) == int, "k must be an integer greater than or equal to zero" assert 0 < p <= 1, "p must be greater than 0 and less than or equal to 1" return scipy.special.comb( k-1, n-1, exact=True ) * p**n * (1-p)**(k-n) def find_min_k(n, p, c): """Minimum number of Bernoulli trials required to expect at least n successes with at least probability c" Args: n (int): desired number of successes p (float): independent probability of success for each trial c (float): desired probability of at least n successes Returns: (int): minimum number of trials (k) needed (float): probability of at least n successes after k trials """ assert n >= 1, "n must be a positive integer" assert type(n) == int, "n must a positive integer" assert 0 < p <= 1, "p must be greater than 0 and less than or equal to 1" assert 0 < c < 1, "c must be betweeen 0 and 1" prob_of_n_successes = 0.0 k = n while prob_of_n_successes < c: x = negative_binomial(k, n, p) prob_of_n_successes += negative_binomial(k, n, p) if prob_of_n_successes >= c: return k, prob_of_n_successes k+=1 # + inputHidden=false outputHidden=false import matplotlib.pyplot as plt c = 0.5 p = 0.6 n = 10 k, pstar = find_min_k(n,p,c) n_tries = [ i for i in range(n,k+10) ] prob_of_n_tries = list(map( negative_binomial, n_tries, [n]*len(n_tries), [p]*len(n_tries) )) plt.hlines(c, n, k+10) plt.scatter( n_tries, scipy.cumsum(prob_of_n_tries) ) plt.scatter( k, pstar, facecolor='red', marker=(5, 1), s=160) plt.title("Probability of " + str(n) + " Successes in K or Fewer Trials") plt.xlabel("Cumulative Trials") plt.ylabel("Probability") plt.show()
minimum_k.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbsphinx="hidden" # This notebook is part of the `nbsphinx` documentation: https://nbsphinx.readthedocs.io/. # - # # Cell Execution Timeout # # By default, `nbconvert` (which is used to execute the notebooks during the Sphinx build process) will give a cell 30 seconds to execute before it times out. # # If you would like to change the amount of time given for a cell, you can change the timeout length for all notebooks by setting the following option in [conf.py](conf.py): # # ```python # nbsphinx_timeout = 60 # ``` # # Or change the timeout length on a per-notebook basis by adding this to the notebook's JSON metadata: # # ``` # "nbsphinx": { # "timeout": 60 # }, # ``` # # The timeout is given in seconds, use `-1` to disable the timeout. # # Alternatively, you can manually execute the notebook in question and save the results, see [the pre-executed example notebook](pre-executed.ipynb).
doc/timeout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="WqHiDh2mxsfu" # What's in a (sub)word? # In this colab, we'll work with subwords, or words made up of the pieces of larger words, and see how that impacts our network and related embeddings. # We’ve worked with full words before for our sentiment models, and found some issues right at the start of the lesson when using character-based tokenization. Subwords are another approach, where individual words are broken up into the more commonly appearing pieces of themselves. This helps avoid marking very rare words as OOV when you use only the most common words in a corpus. # # As shown in the video, this can further expose an issue affecting all of our models up to this point, in that they don’t understand the full context of the sequence of words in an input. The next lesson on recurrent neural networks will help address this issue. # # https://video.udacity-data.com/topher/2020/March/5e6fb669_subwords/subwords.png # # Subword Datasets # # There are a number of already created subwords datasets available online. If you check out the IMDB dataset on TFDS https://www.tensorflow.org/datasets/catalog/imdb_reviews, for instance, by scrolling down you can see datasets with both 8,000 subwords as well as 32,000 subwords in a corpus (along with regular full-word datasets). # # But how to creat TensorFlow’s SubwordTextEncoder and its build_from_corpus function to create one from the reviews dataset we used previously is shown below: # # + id="Q8Wa_ZlX-mPH" import numpy as np import pandas as pd import tensorflow as tf from sklearn.model_selection import train_test_split # from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.layers import Dense, Input, GlobalAveragePooling1D, Flatten from tensorflow.keras.layers import LSTM, Embedding from tensorflow.keras.models import Model # + colab={"base_uri": "https://localhost:8080/"} id="XJAxrOLi-02C" outputId="8a3c6f3e-9cef-4318-ad66-bc5827aaa1f6" # !wget --no-check-certificate \ # https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P \ # -O /tmp/sentiment.csv # https://www.kaggle.com/marklvl/sentiment-labelled-sentences-data-set # + id="FC2Leqipxsf5" path = tf.keras.utils.get_file('sentiment.csv', 'https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P') # + colab={"base_uri": "https://localhost:8080/"} id="_qzVPXLwNRdF" outputId="d5ce05ee-ec89-4903-9583-2610cdcbfa01" print (path) # + id="Dr-EDUKP_HBl" df = pd.read_csv(path) # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="l11LJeKONXM7" outputId="8507b1cf-f101-422d-f782-0c68ededec6a" df.head() # + [markdown] id="8zut9Wng_R3B" # @todo : We can use the existing Amazon and Yelp reviews dataset with `tensorflow_datasets`'s `SubwordTextEncoder` functionality. `SubwordTextEncoder.build_from_corpus()` will create a tokenizer we can use this functionality to get subwords from a much larger corpus of text as well. # + [markdown] id="wr1FKFBWK2b_" # The Amazon and Yelp dataset we are using isn't super large, so we'll create a subword `vocab_size` of only the 1,000 most common words, as well as cutting off each subword to be at most 5 characters. Documentation [here](https://www.tensorflow.org/datasets/api_docs/python/tfds/features/text/SubwordTextEncoder#build_from_corpus). # + id="CcEJIxWwK2b_" # note this is the code in the past examples without using subwords #tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok) #tokenizer.fit_on_texts(training_sentences) # + id="lVuGTmm3K2cA" sentences = df['text'].tolist() # + id="nUzd-5YbMggG" labels = df['sentiment'].tolist() # + id="KFV_wbG_xsgC" import tensorflow_datasets as tfds # + id="aElsgxia_43g" MAX_VOCAB_SIZE = 1000 MAX_SUBWORD_LENGTH = 5 tokenizer = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(sentences, MAX_VOCAB_SIZE, MAX_SUBWORD_LENGTH) # + colab={"base_uri": "https://localhost:8080/"} id="4JzH9ptyyNne" outputId="d7aecc3e-50ff-444b-fe34-976e26a71aea" # Check that the tokenizer works appropriately num = 5 print(sentences[num]) encoded = tokenizer.encode(sentences[num]) print(encoded) # + colab={"base_uri": "https://localhost:8080/"} id="0XNZWGKqBDc3" outputId="e3ac90a5-3282-42b0-9abe-d5fe74805d3a" # Separately print out each subword, decoded for i in encoded: print(tokenizer.decode([i])) # + id="rAmql34aGfeV" ''' Replace sentence data with encoded subwords Now, we'll re-create the dataset to be used for training by actually encoding each of the individual sentences. This is equivalent to text_to_sequences with the Tokenizer we used in earlier exercises. ''' for i, sentence in enumerate(sentences): sentences[i] = tokenizer.encode(sentence) # + colab={"base_uri": "https://localhost:8080/"} id="jNnee_csG5Iz" outputId="91e9dd4b-8a9d-41f4-ba68-3eb9d3f03a1b" # Check the sentences are appropriately replaced print(sentences[1]) # + id="INIFSAcEHool" # Before training, we still need to pad the sequences, as well as split into training and test sets. max_length = 50 trunc_type='post' padding_type='post' # Pad all sentences sentences_padded = pad_sequences(sentences, maxlen=max_length, padding=padding_type, truncating=trunc_type) # Separate out the sentences and labels into training and test sets training_size = int(len(sentences) * 0.8) training_sentences = sentences_padded[0:training_size] testing_sentences = sentences_padded[training_size:] training_labels = labels[0:training_size] testing_labels = labels[training_size:] # Make labels into numpy arrays for use with the network later training_labels_final = np.array(training_labels) testing_labels_final = np.array(testing_labels) # + colab={"base_uri": "https://localhost:8080/"} id="jxuDONtjMnVl" outputId="1f683b30-21ad-4462-9869-a70f6c081c23" # Create the model # We get to choose embedding dimensionality D = 16 # 20 #V = len(word_index) # Hidden state dimensionality # M = 15 i = Input(shape=(max_length,)) # T = 121 x = Embedding(MAX_VOCAB_SIZE, embedding_dim)(i) # V = 7246 D = 20 x = GlobalAveragePooling1D()(x) x = Dense(6, activation='relu')(x) x = Dense(1, activation='sigmoid')(x) model = Model(i, x) model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="eDKcL64IPcfy" outputId="6b5882a1-b38e-4b30-d915-3156411de8f3" embedding_dim = 16 model = tf.keras.Sequential([ tf.keras.layers.Embedding(MAX_VOCAB_SIZE, embedding_dim, input_length=max_length), tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Dense(6, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="VqkMNtIeP3oz" outputId="1a12860e-a8f6-49da-b892-755860eb84fb" num_epochs = 30 model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) history = model.fit(training_sentences, training_labels_final, epochs=num_epochs, validation_data=(testing_sentences, testing_labels_final)) # + colab={"base_uri": "https://localhost:8080/", "height": 541} id="uy8KIMPIQlvH" outputId="638764a1-64a0-400c-d8e5-9027f1d0e909" # Does there appear to be a difference in how validation accuracy and loss is trending compared to with full words? import matplotlib.pyplot as plt def plot_graphs(history, string): plt.plot(history.history[string]) plt.plot(history.history['val_'+string]) plt.xlabel("Epochs") plt.ylabel(string) plt.legend([string, 'val_'+string]) plt.show() plot_graphs(history, "accuracy") plot_graphs(history, "loss") # + colab={"base_uri": "https://localhost:8080/"} id="VCwKC5LdPPAo" outputId="c3b19482-7f37-409a-c346-49184ff90c1b" # First get the weights of the embedding layer e = model.layers[1] weights = e.get_weights()[0] print(weights.shape) # shape: (MAX_VOCAB_SIZE, embedding_dim) # + [markdown] id="tn1GY2F8xsgu" # Note that the below code does have a few small changes to handle the different way text is encoded in our dataset compared to before with the built in `Tokenizer`. You may get an error like "Number of tensors (999) do not match the number of lines in metadata (992)." As long as you load the vectors first without error and wait a few seconds after this pops up, you will be able to click outside the file load menu and still view the visualization. # + id="LXKqy9Z1RSmt" import io # Write out the embedding vectors and metadata out_v = io.open('vecs.tsv', 'w', encoding='utf-8') out_m = io.open('meta.tsv', 'w', encoding='utf-8') for word_num in range(0, MAX_VOCAB_SIZE - 1): word = tokenizer.decode([word_num]) embeddings = weights[word_num] out_m.write(word + "\n") out_v.write('\t'.join([str(x) for x in embeddings]) + "\n") out_v.close() out_m.close() # + colab={"base_uri": "https://localhost:8080/", "height": 17} id="v04wBMybRoGx" outputId="b35375d5-b249-4bac-d91d-84e669d4fb28" # Download the files try: from google.colab import files except ImportError: pass else: files.download('vecs.tsv') files.download('meta.tsv') # + # Predicting Sentiment in New Reviews Use the model to predict a review fake_reviews = ['I love this phone', 'I hate spaghetti', 'Everything was cold', 'Everything was hot exactly as I wanted', 'Everything was green', 'the host seated us immediately', 'they gave us free chocolate cake', 'not sure about the wilted flowers on the table', 'only works when I stand on tippy toes', 'bats are all wearing underwears with tie', 'bat was runnig the show', 'does not work when I stand on my head'] print(fake_reviews) # Create the sequences padding_type='post' sample_sequences = tokenizer.texts_to_sequences(fake_reviews) fakes_padded = pad_sequences(sample_sequences, padding=padding_type, maxlen=max_length) print('\nHOT OFF THE PRESS! HERE ARE SOME NEWLY MINTED, ABSOLUTELY GENUINE REVIEWS!\n') classes = model.predict(fakes_padded) # The closer the class is to 1, the more positive the review is deemed to be for x in range(len(fake_reviews)): print(fake_reviews[x]) print(classes[x]) print('\n') # Try adding reviews of your own # Add some negative words (such as "not") to the good reviews and see what happens # For example: # they gave us free chocolate cake and did not charge us # + [markdown] id="6ZV30fXmK2cA" # You’ve already learned an amazing amount of material on Natural Language Processing with TensorFlow in this # lesson.You started with Tokenization by: # # You’ve already learned an amazing amount of material on Natural Language # Processing with TensorFlow in this lesson. # You started with Tokenization by: # Tokenizing input text # Creating and padding sequences # Incorporating out of vocabulary words # Generalizing tokenization and sequence methods to real world datasets # # From there, you moved onto Embeddings, where you: # # transformed tokenized sequences into embeddings # developed a basic sentiment analysis model # visualized the embeddings vector # tweaked hyperparameters of the model to improve it # and diagnosed potential issues with using pre-trained subword tokenizers when the network doesn’t have sequence context # In the next lesson, you’ll dive into Recurrent Neural Networks, which will be able to understand the sequence of # inputs, and you'll learn how to generate new text.
3. NLP/AZ/Text Classification/01_RNN/03_nlp_subwords_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:test_csaf] # language: python # name: conda-env-test_csaf-py # --- # + import csaf_f16.ngoals as f16g from csaf_f16.acas import AcasScenarioViewer import re from datetime import datetime import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' # - # ## Scenario 1: Rejoin # # ### Summary # # Ownship is attempting to fly north at a constant airspeed, while intruder is attempting a eastward turn while position to west of the intruder. Both aircraft start at the same north / south position (flying in formation). # # # ### Coordinates # # * **Altitude** - Discrete - {15k, 35k} ft - Both aircraft are positioned at the same altitude # * **Airspeed** - Discrete - {600, 1k} ft/s - The ownship is given this airspeed # * **Relative East / West Distance** - Continuous - [7k, 10k] ft - Intruder is placed at the origin and intruder is placed at (0, relative distance) # * **Relative Intruder Airspeed** - Continuous - [-150, 300] ft/s - Ownship is giving the above airspeed and intruder is given ownship's airspeed + relative value # * **Relative Intruder Heading Angle** - Continuous - [-pi, pi] rad - Ownship is facing North and intruder is given this heading angle ar = f16g.AcasRejoinScenario() sys = ar.generate_system((15E3, 1000.0, 12000.0, 0.0, 0.0)) trajs = sys.simulate_tspan((0.0, 20.0)) av = AcasScenarioViewer(trajs, ar) f, ax = av.summary_plot(bounds = ((-3000, 12000), (-1000, 21000))) ax.annotate(s='', xy=(0.0,0.0), xytext=(12000.0,0), arrowprops=dict(arrowstyle='<->')) ax.annotate(s='horizontal distance', xy=(6000.0,500.0), ha='center') # ## Scenario 2: Airport # # ### Summary # # Ownship and intruder are turning into one another. The ownship has ACAS-Xu autopilot, while the intruder does not. Both aircraft start at the same North / South position. # # ### Coordinates # # * **Altitude** - Discrete - {15k, 35k} ft - Both aircraft are positioned at the same altitude # * **Airspeed** - Discrete - {600, 1k} ft/s - The ownship is given this airspeed # * **Relative East / West Distance** - Continuous - [7k, 10k] ft - Intruder is placed at the origin and intruder is placed at (0, relative distance) # * **Relative Intruder Airspeed** - Continuous - [-150, 300] ft/s - Ownship is giving the above airspeed and intruder is given ownship's airspeed + relative value # * **Relative Intruder Heading Angle** - Continuous - [-pi, pi] rad - Ownship is facing North and intruder is given this heading angle ar = f16g.AcasAirportScenario() sys = ar.generate_system((15E3, 1000.0, 12000.0, 0.0, 0.0)) trajs = sys.simulate_tspan((0.0, 20.0)) av = AcasScenarioViewer(trajs, ar) f, ax = av.summary_plot(bounds = ((-6000, 6000), (-1000, 21000))) ax.annotate(s='', xy=(-6000.0,0.0), xytext=(6000.0,0.0), arrowprops=dict(arrowstyle='<->')) ax.annotate(s='horizontal distance', xy=(0.0,500.0), ha='center') # ## Scenario 3: Head On # # ### Summary # # Both aircraft are at the same east / west position and can be positioned head on. Both attempt to fly straight at a constant airspeed. # # ### Coordinates # # * **Altitude** - Discrete - {15k, 35k} ft - Both aircraft are positioned at the same altitude # * **Airspeed** - Discrete - {600, 1k} ft/s - The ownship is given this airspeed # * **Relative North / South Distance** - Continuous - [7k, 10k] ft - Intruder is placed at the origin and intruder is placed at (relative distance, 0) # * **Relative Intruder Airspeed** - Continuous - [-150, 300] ft/s - Ownship is giving the above airspeed and intruder is given ownship's airspeed + relative value # * **Relative Intruder Heading Angle** - Continuous - [-pi, pi] rad - Ownship is facing North and intruder is given this heading angle ar = f16g.AcasHeadOnScenario() sys = ar.generate_system((15E3, 1000.0, 12000.0, 0.0, -3.141592)) trajs = sys.simulate_tspan((0.0, 10.0)) av = AcasScenarioViewer(trajs, ar) f, ax = av.summary_plot(bounds = ((-3000, 3000), (-1000, 14000))) ax.annotate(s='', xy=(-1000.0,0.0), xytext=(-1000.0,12000.0), arrowprops=dict(arrowstyle='<->')) ax.annotate(s='vertical distance', xy=(-2500.0,6000.0), ha='center') # ## Goal Falsification # # Constrained Bayesion optimization is used to attempt to falsify the 250 ft well clear region. # # #### Constraints # # * minimum distance to the intruder is set to 7000 ft. # * the airspeed of the intruder is constrained to be "reasonable" (meaning not to trip a simulation error). This works out to be between [550, 1100] ft/s. # # #### Kernel # # * an standard periodic kernel is used with ARD turned on to created different lengthscales. 20 ft/s is used for speed quantities, 200 ft is used for distances, and 0.01 rad is used for angular. def video_title(scen): """generate a filename for a scenario object""" scen_sc = re.sub('(?!^)([A-Z]+)', r'_\1',scen.__class__.__name__).lower() return f"{scen_sc}_{datetime.now().strftime('%m_%d_%H_%M')}" #g = f16g.AcasRejoin() #g = f16g.AcasHeadOnGoal() g = f16g.AcasAirportGoal() g.test(max_time=60*5) # plot GPyOpt's Convergence Information g.optimizer.plot_convergence() # + # run the best example and plot the results (using AcasScenarioViewer) from csaf_f16.acas import AcasScenarioViewer x0 = g.optimizer.x_opt scen = g.scenario_type() trajs, p = g.run_sim(x0) viewer = AcasScenarioViewer(trajs, scen) fig, ax = viewer.summary_plot() ax.set_xlim(-10000, 10000) ax.set_ylim(0, 25000) plt.show() # - g.optimizer.x_opt # + #from IPython.display import HTML #from matplotlib import animation #viewer = AcasScenarioViewer(trajs, ar) #anim = viewer.summary_video(bounds=((-15000, 15000), (-5000, 25000)), msize=0.0005) # use this to render a video player in the notebooks #HTML(anim.to_jshtml()) # use this to write it as gif to cwd #writervideo = animation.FFMpegWriter(fps=30) #anim.save(f"{video_title(g.scenario_type())}.gif", writer=writervideo) # - import numpy as np sidxs= np.argsort(g.optimizer.Y.flatten()) Xt, Yt = g.optimizer.X[sidxs[:10]], g.optimizer.Y[sidxs[:10]] for idx, xi in enumerate(Xt): print("-----") print(idx, xi) scen = g.scenario_type() trajs, p = g.run_sim(xi) viewer = AcasScenarioViewer(trajs, scen) fig, ax = viewer.summary_plot() ax.set_xlim(-10000, 10000) ax.set_ylim(0, 25000) plt.show() print("-----")
notebooks/f16-scenarios-new.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Internet use and religion in Europe, part four # ----------------------------------------- # # This notebook presents explorations of the association between Internet use and religion in Europe, using data from the European Social Survey (http://www.europeansocialsurvey.org). # # Copyright 2015 <NAME> # # MIT License: http://opensource.org/licenses/MIT # + from __future__ import print_function, division import string import random import cPickle as pickle import numpy as np import pandas as pd import statsmodels.formula.api as smf import thinkstats2 import thinkplot import matplotlib.pyplot as plt import ess # colors by colorbrewer2.org BLUE1 = '#a6cee3' BLUE2 = '#1f78b4' GREEN1 = '#b2df8a' GREEN2 = '#33a02c' PINK = '#fb9a99' RED = '#e31a1c' ORANGE1 = '#fdbf6f' ORANGE2 = '#ff7f00' PURPLE1 = '#cab2d6' PURPLE2 = '#6a3d9a' YELLOW = '#ffff99' BROWN = '#b15928' # %matplotlib inline # - # Open the store containing resampled DataFrames. store.close() store = pd.HDFStore('ess.resamples.h5') # Make the country objects reload(ess) country_map = ess.make_countries(store) # For each resampled frame, run both models and store the results in the Country objects keys = store.keys() len(keys) # + reload(ess) FORMULA1 = ('hasrelig_f ~ inwyr07_f + yrbrn60_f + yrbrn60_f2 + ' 'edurank_f + hincrank_f +' 'tvtot_f + rdtot_f + nwsptot_f + netuse_f') num = 101 ess.process_all_frames(store, country_map, num, smf.logit, FORMULA1, model_num=1) # + reload(ess) FORMULA2 = ('rlgdgr_f ~ inwyr07_f + yrbrn60_f + yrbrn60_f2 + ' 'edurank_f + hincrank_f +' 'tvtot_f + rdtot_f + nwsptot_f + netuse_f') ess.process_all_frames(store, country_map, num, smf.ols, FORMULA2, model_num=2) # - store.close() with open('ess4.pkl', 'wb') as fp: pickle.dump(country_map, fp) with open('ess4.pkl', 'rb') as fp: country_map = pickle.load(fp) # + plot_counter = 1 def save_plot(flag=True): """Saves plots in png format. flag: boolean, whether to save or not """ global plot_counter if flag: root = 'ess4.%2.2d' % plot_counter thinkplot.Save(root=root, formats=['png']) plot_counter += 1 # - # Make a plot showing confidence interval for the given parameters xlabel1 = 'Difference in percentage points of hasrelig' xlabel2 = 'Difference in religiosity (0-10 scale)' xlim = [-25, 15] # First let's check on the estimated parameters for the age variables. t = ess.extract_ranges(country_map, 'yrbrn60_f', 'hasrelig_f') ess.plot_cis(t, GREEN2) thinkplot.Config(title='Year born', xlabel=xlabel1, xlim=xlim) save_plot() # In almost every country, year born is associated with less religiosity. t = ess.extract_ranges(country_map, 'inwyr07_f', 'hasrelig_f') ess.plot_cis(t, GREEN1) thinkplot.Config(title='Interview year', xlabel=xlabel1, xlim=xlim) save_plot() t = ess.extract_ranges(country_map, 'edurank_f', 'hasrelig_f') ess.plot_cis(t, ORANGE2) thinkplot.Config(title='Education (relative rank)', xlabel=xlabel1, xlim=xlim) save_plot() t = ess.extract_ranges(country_map, 'hincrank_f', 'hasrelig_f') ess.plot_cis(t, ORANGE1) thinkplot.Config(title='Income (relative rank)', xlabel=xlabel1, xlim=xlim) save_plot() t = ess.extract_ranges(country_map, 'tvtot_f', 'hasrelig_f') ess.plot_cis(t, RED) thinkplot.Config(title='Television watching', xlabel=xlabel1, xlim=xlim) save_plot() t = ess.extract_ranges(country_map, 'rdtot_f', 'hasrelig_f') ess.plot_cis(t, BLUE1) thinkplot.Config(title='Radio listening', xlabel=xlabel1, xlim=xlim) save_plot() t = ess.extract_ranges(country_map, 'nwsptot_f', 'hasrelig_f') ess.plot_cis(t, BLUE2) thinkplot.Config(title='Newspaper reading', xlabel=xlabel1, xlim=xlim) save_plot() t = ess.extract_ranges(country_map, 'netuse_f', 'hasrelig_f') ess.plot_cis(t, PURPLE2) thinkplot.Config(title='Internet use', xlabel=xlabel1, xlim=xlim) save_plot() reload(ess) cdfnames = ['yrbrn60_f', 'netuse_f', 'edurank_f', 'tvtot_f', 'hincrank_f', 'rdtot_f', 'nwsptot_f', 'inwyr07_f' ] ess.plot_cdfs(country_map, ess.extract_ranges, cdfnames=cdfnames) thinkplot.Config(xlabel='Difference in percentage points', xlim=[-20, 10], ylabel='CDF', legend=True, loc='upper left') save_plot() t = ess.extract_ranges(country_map, 'netuse_f', 'hasrelig_f') ess.plot_scatter(t, BLUE) thinkplot.Config(title='', xlabel=xlabel1, ylabel='Fraction affiliated', xlim=[-10, 5], ylim=[0, 1]) save_plot() t = ess.extract_ranges(country_map, 'netuse_f', 'rlgdgr_f') ess.plot_scatter(t, BLUE) thinkplot.Config(title='', xlabel=xlabel1, ylabel='Mean religiosity', xlim=[-10, 5], ylim=[0, 7.5]) save_plot() t = ess.extract_ranges(country_map, 'netuse_f', 'netuse_f') ess.plot_scatter(t, BLUE) thinkplot.Config(title='', xlabel=xlabel1, ylabel='Mean Internet use', xlim=[-10, 5], ylim=[0, 7.5]) save_plot() # Make similar figures for the second model, with degree of religiosity as the dependent variable. xlim = [-2.5, 1.0] t = ess.extract_ranges2(country_map, 'yrbrn60_f', 'rlgdgr_f') ess.plot_cis(t, GREEN2) thinkplot.Config(title='Year born', xlabel=xlabel2, xlim=xlim) save_plot() t = ess.extract_ranges2(country_map, 'inwyr07_f', 'rlgdgr_f') ess.plot_cis(t, GREEN1) thinkplot.Config(title='Education rank', xlabel=xlabel2, xlim=xlim) save_plot() t = ess.extract_ranges2(country_map, 'edurank_f', 'rlgdgr_f') ess.plot_cis(t, ORANGE2) thinkplot.Config(title='Education rank', xlabel=xlabel2, xlim=xlim) save_plot() t = ess.extract_ranges2(country_map, 'hincrank_f', 'hasrelig_f') ess.plot_cis(t, ORANGE1) thinkplot.Config(title='Income rank', xlabel=xlabel2, xlim=xlim) save_plot() t = ess.extract_ranges2(country_map, 'tvtot_f', 'hasrelig_f') ess.plot_cis(t, RED) thinkplot.Config(title='Television watching', xlabel=xlabel2, xlim=xlim) save_plot() t = ess.extract_ranges2(country_map, 'rdtot_f', 'hasrelig_f') ess.plot_cis(t, BLUE1) thinkplot.Config(title='Radio listening', xlabel=xlabel2, xlim=xlim) save_plot() t = ess.extract_ranges2(country_map, 'nwsptot_f', 'hasrelig_f') ess.plot_cis(t, BLUE2) thinkplot.Config(title='Newspaper reading', xlabel=xlabel2, xlim=xlim) save_plot() t = ess.extract_ranges2(country_map, 'netuse_f', 'hasrelig_f') ess.plot_cis(t, PURPLE2) thinkplot.Config(title='Internet use', xlabel=xlabel2, xlim=xlim) save_plot() cdfnames = ['netuse_f', 'edurank_f', 'tvtot_f', 'hincrank_f', 'rdtot_f', 'nwsptot_f', 'inwyr07_f', 'yrbrn60_f'] ess.plot_cdfs(country_map, ess.extract_ranges2, cdfnames=cdfnames) thinkplot.Config(xlabel=xlabel2, xlim=[-2, 0.7], ylabel='CDF', loc='upper left') save_plot() # Here's the scatter plot of effect size on rlgdgr versus mean value of rlgdgr # # rlgdgr is on a 0 to 10 scale, so it is mildly astonishing that national means vary as much as they do, from 2.5 to 7. t = ess.extract_ranges2(country_map, 'netuse_f', 'hasrelig_f') ess.plot_scatter(t, BLUE) thinkplot.Config(title='', xlabel=xlabel2, ylabel='Fraction affiliated', xlim=[-2.5, 0.5], ylim=[0, 1] ) save_plot() t = ess.extract_ranges2(country_map, 'netuse_f', 'rlgdgr_f') ess.plot_scatter(t, BLUE) thinkplot.Config(title='', xlabel=xlabel2, ylabel='Mean religiosity', xlim=[-2.5, 0.5], ylim=[0, 7.5] ) save_plot() t = ess.extract_ranges2(country_map, 'netuse_f', 'netuse_f') ess.plot_scatter(t, PURPLE2) thinkplot.Config(title='', xlabel=xlabel2, ylabel='Mean Internet use', xlim=[-2.5, 0.5], ylim=[0, 7.5]) save_plot() # + reload(ess) varnames = ['inwyr07_f', 'yrbrn60_f', 'netuse_f', 'edurank_f', 'tvtot_f', 'hincrank_f', 'rdtot_f', 'nwsptot_f'] ts = ess.make_table(country_map, varnames, ess.extract_ranges) ess.print_table(ts) # - ts = ess.make_table(country_map, varnames, ess.extract_ranges2) ess.print_table(ts)
ess4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from misc import HP import argparse import random import time import pickle import copy import SYCLOP_env as syc from misc import * import sys import os import cv2 import argparse import tensorflow.keras as keras from keras_networks import rnn_model_102, rnn_model_multicore_201, rnn_model_multicore_202 from curriculum_utils import create_mnist_dataset, bad_res102 # + def generate_trajectory(n_steps,max_q,acceleration_mode): starting_point = np.array([max_q[0] // 2, max_q[1] // 2]) steps = [] qdot=0 for j in range(n_steps): steps.append(starting_point * 1) if acceleration_mode: qdot += np.random.randint(-1, 2, 2) starting_point += qdot else: starting_point += np.random.randint(-5, 6, 2) return np.array(steps) # + def split_dataset_xy(dataset): dataset_x1 = [uu[0] for uu in dataset] dataset_x2 = [uu[1] for uu in dataset] dataset_y = [uu[-1] for uu in dataset] return (np.array(dataset_x1)[...,np.newaxis],np.array(dataset_x2)[:,:n_timesteps,:]),np.array(dataset_y) #parse hyperparameters lsbjob = os.getenv('LSB_JOBID') lsbjob = '' if lsbjob is None else lsbjob hp = HP() hp.save_path = 'saved_runs' hp.description='' parser = argparse.ArgumentParser() parser.add_argument('--tau_int', default=4., type=float, help='Integration timescale for adaaptation') parser.add_argument('--resize', default=1.0, type=float, help='resize of images') parser.add_argument('--run_name_suffix', default='', type=str, help='suffix for runname') parser.add_argument('--eval_dir', default=None, type=str, help='eval dir') parser.add_argument('--dqn_initial_network', default=None, type=str, help='dqn_initial_network') parser.add_argument('--decoder_initial_network', default=None, type=str, help='decoder_initial_network') parser.add_argument('--decoder_arch', default='default', type=str, help='decoder_network architecture: default / multicore_201') parser.add_argument('--decoder_n_cores', default=1, type=int, help='decoder number of cores') parser.add_argument('--decoder_learning_rate', default=1e-3, type=float, help='decoder learning rate') parser.add_argument('--decoder_dropout', default=0.0, type=float, help='decoder dropout') parser.add_argument('--decoder_rnn_type', default='gru', type=str, help='gru or rnn') parser.add_argument('--decoder_rnn_units', default=100, type=int, help='decoder rnn units') parser.add_argument('--decoder_rnn_layers', default=1, type=int, help='decoder rnn units') parser.add_argument('--decoder_ignore_position', dest='decoder_ignore_position', action='store_true') parser.add_argument('--no-decoder_ignore_position', dest='decoder_ignore_position', action='store_false') parser.add_argument('--syclop_learning_rate', default=2.5e-3, type=float, help='syclop (RL) learning rate') parser.add_argument('--color', default='grayscale', type=str, help='grayscale/rgb') parser.add_argument('--speed_reward', default=0.0, type=float, help='speed reward, typically negative') parser.add_argument('--intensity_reward', default=0.0, type=float, help='speed penalty reward') parser.add_argument('--loss_reward', default=-1.0, type=float, help='reward for loss, typically negative') parser.add_argument('--resolution', default=28, type=int, help='resolution') parser.add_argument('--max_eval_episodes', default=10000, type=int, help='episodes for evaluation mode') parser.add_argument('--steps_per_episode', default=9, type=int, help='time steps in each episode in ') parser.add_argument('--fit_verbose', default=1, type=int, help='verbose level for model.fit ') parser.add_argument('--steps_between_learnings', default=100, type=int, help='steps_between_learnings') parser.add_argument('--num_epochs', default=100, type=int, help='steps_between_learnings') parser.add_argument('--alpha_increment', default=0.01, type=float, help='reward for loss, typically negative') parser.add_argument('--beta_t1', default=400000, type=int, help='time rising bete') parser.add_argument('--beta_t2', default=700000, type=int, help='end rising beta') parser.add_argument('--beta_b1', default=0.1, type=float, help='beta initial value') parser.add_argument('--beta_b2', default=1.0, type=float, help='beta final value') parser.add_argument('--curriculum_enable', dest='curriculum_enable', action='store_true') parser.add_argument('--no-curriculum_enable', dest='curriculum_enable', action='store_false') parser.add_argument('--conv_fe', dest='conv_fe', action='store_true') parser.add_argument('--no-conv_fe', dest='conv_fe', action='store_false') parser.add_argument('--acceleration_mode', dest='acceleration_mode', action='store_true') parser.add_argument('--no-acceleration_mode', dest='acceleration_mode', action='store_false') parser.set_defaults(eval_mode=False, decode_from_dvs=False,test_mode=False,rising_beta_schedule=True,decoder_ignore_position=False, curriculum_enable=True, conv_fe=False, acceleration_mode=True) config = parser.parse_args('') # config = parser.parse_args() config = vars(config) hp.upadte_from_dict(config) hp.this_run_name = sys.argv[0] + '_noname_' + hp.run_name_suffix + '_' + lsbjob + '_' + str(int(time.time())) #define model n_timesteps = hp.steps_per_episode ## # deploy_logs() ## # if hp.decoder_arch == 'multicore_201': # decoder = rnn_model_multicore_201(n_cores=hp.decoder_n_cores,lr=hp.decoder_learning_rate,ignore_input_B=hp.decoder_ignore_position,dropout=hp.decoder_dropout,rnn_type=hp.decoder_rnn_type, # input_size=(hp.resolution,hp.resolution, 1),rnn_layers=hp.decoder_rnn_layers,conv_fe=hp.conv_fe, rnn_units=hp.decoder_rnn_units, n_timesteps=hp.steps_per_episode) # if hp.decoder_arch == 'multicore_202': # decoder = rnn_model_multicore_202(n_cores=hp.decoder_n_cores, lr=hp.decoder_learning_rate, # ignore_input_B=hp.decoder_ignore_position, dropout=hp.decoder_dropout, # rnn_type=hp.decoder_rnn_type, # input_size=(hp.resolution, hp.resolution, 1), # rnn_layers=hp.decoder_rnn_layers, conv_fe=hp.conv_fe, # rnn_units=hp.decoder_rnn_units, n_timesteps=hp.steps_per_episode) # elif hp.decoder_arch == 'default': # decoder = rnn_model_102(lr=hp.decoder_learning_rate,ignore_input_B=hp.decoder_ignore_position,dropout=hp.decoder_dropout,rnn_type=hp.decoder_rnn_type, # input_size=(hp.resolution,hp.resolution, 1),rnn_layers=hp.decoder_rnn_layers,conv_fe=hp.conv_fe,rnn_units=hp.decoder_rnn_units, n_timesteps=hp.steps_per_episode) decoder_initial_network = 'saved_runs/trajectory_curriculum101.py_noname__200012_1624129553_1/final_decoder.nwk' decoder = keras.models.load_model(decoder_initial_network) #define dataset (images, labels), (images_test, labels_test) = keras.datasets.mnist.load_data(path="mnist.npz") #fit one epoch in a time # scheduler = Scheduler(hp.lambda_schedule) # for epoch in range(hp.num_epochs): # lambda_epoch = scheduler.step(epoch) # - hp.acceleration_mode alpha=0 hp.num_trials = 30 # trajectories = [] train_pred_pred = [] val_pred_pred = [] for trial in range(hp.num_trials): # this_trajectory=generate_trajectory(hp.steps_per_episode,[72,72],hp.acceleration_mode) this_trajectory=trajectories[trial] train_dataset, test_dataset = create_mnist_dataset(images, labels, 6, sample=hp.steps_per_episode, bad_res_func=bad_res102, return_datasets=True, q_0=this_trajectory, alpha=0.0, random_trajectories=True,acceleration_mode=hp.acceleration_mode) train_dataset_x, train_dataset_y = split_dataset_xy(train_dataset) test_dataset_x, test_dataset_y = split_dataset_xy(test_dataset) q_prime = train_dataset_x[1][0] # print('epoch', epoch, ' CONTROL!!!',' first q --', q_prime.reshape([-1])) print("evaluating trajectory ", trial) train_preds = decoder.predict( train_dataset_x, batch_size=64, verbose=hp.fit_verbose, # We pass some validation for # monitoring validation loss and metrics # at the end of each epoch ) val_preds = decoder.predict( test_dataset_x, batch_size=64, verbose=hp.fit_verbose, # We pass some validation for # monitoring validation loss and metrics # at the end of each epoch ) accuracy = np.mean(np.argmax(val_preds, axis=1)==test_dataset_y) print('accuracy:', accuracy) # trajectories.append(this_trajectory+0.) train_pred_pred.append(train_preds+0.0) val_pred_pred.append(val_preds+0.0) accuracy = np.mean(np.argmax(preds, axis=1)==test_dataset_y) accuracy ent = np.zeros([np.shape(test_dataset_y)[0],hp.num_trials]) lablab = np.zeros([np.shape(test_dataset_y)[0],hp.num_trials]) for jj,preds in enumerate(pred_pred): ent[:,jj]=np.sum(-preds*np.log(preds),axis=1) lablab[:,jj]=np.argmax(preds, axis=1) ii=np.argmin(ent,axis=1) best_lbl=[] for jj,uu in enumerate(ii): best_lbl.append(lablab[jj,uu]) np.mean(best_lbl==test_dataset_y) np.mean(lablab==test_dataset_y.reshape([-1,1])) accuracies=np.mean(lablab==test_dataset_y.reshape([-1,1]),axis=0) best_ii=np.argmax(np.mean(lablab==test_dataset_y.reshape([-1,1]),axis=0)) np.mean(ii==best_ii) np.mean(np.any(lablab==test_dataset_y.reshape([-1,1]),axis=1)) best_ent=np.min(ent,axis=1) _=plt.hist(best_ent,bins=20) _=plt.hist(best_ent[best_lbl!=test_dataset_y],bins=20) _=plt.hist(best_ent[best_lbl==test_dataset_y],bins=20) super_pred=np.sum(pred_pred,axis=0) super_label=np.argmax(super_pred,axis=1) np.mean(super_label==test_dataset_y) super_label.shape with open('committee103_traj_30.pkl','wb') as f: pickle.dump(trajectories,f) trajectories trajectories[25] def super_pred_fun(pred,T=1): logits = np.log(pred) pred_T = np.exp(1./T*logits) pred_T = pred_T/np.sum(pred_T,axis=-1)[...,np.newaxis] super_pred=np.sum(pred_T,axis=0) return super_pred super_pred = super_pred_fun(train_pred_pred) super_pred = super_pred_fun(val_pred_pred,T=1000) super_label=np.argmax(super_pred,axis=1) print(np.mean(super_label==test_dataset_y)) np.linspace(0.1,5.0,100) super_pred = super_pred_fun(val_pred_pred[:15],T=1000) super_label=np.argmax(super_pred,axis=1) print(np.mean(super_label==test_dataset_y)) super_pred = super_pred_fun(val_pred_pred[:5],T=1000) super_label=np.argmax(super_pred,axis=1) print(np.mean(super_label==test_dataset_y)) super_pred = super_pred_fun(val_pred_pred[:2],T=1000) super_label=np.argmax(super_pred,axis=1) print(np.mean(super_label==test_dataset_y)) # + # x = np.linspace(0, 2*np.pi, 64) # y = np.cos(x) # pl.figure() # pl.plot(x,y) n = hp.num_trials # colors = plt.cm.jet(accuracies) colors = plt.cm.jet((accuracies-np.min(accuracies))/(np.max(accuracies)-np.min(accuracies))) # for trial in range(hp.num_trials): plt.plot(trajectories[trial][:,0],trajectories[trial][:,1], color=colors[trial]) # plt.colorbar() # - colors = plt.cm.jet((accuracies-np.min(accuracies))/(np.max(accuracies)-np.min(accuracies))) n = hp.num_trials # colors = plt.cm.jet(accuracies) colors = plt.cm.RdYlGn((accuracies-np.min(accuracies))/(np.max(accuracies)-np.min(accuracies))) # for trial in range(hp.num_trials): plt.plot(trajectories[trial][:,0],trajectories[trial][:,1], color=colors[trial],linewidth=3) plt.cm.jet(1.0) # + n_lines = hp.num_trials x = np.arange(100) yint = np.arange(0, n_lines*10, 10) ys = np.array([x + b for b in yint]) xs = np.array([x for i in range(n_lines)]) # could also use np.tile colors = np.arange(n_lines) fig, ax = plt.subplots() lc = multiline(xs, ys, yint, cmap='bwr', lw=2) axcb = fig.colorbar(lc) axcb.set_label('Y-intercept') ax.set_title('Line Collection with mapped colors') # + # Set the input shape input_shape = (50,) # print(f'Feature shape: {input_shape}') # Create the model model = keras.Sequential() model.add(keras.layers.Dense(300, input_shape=input_shape, activation='relu')) model.add(keras.layers.Dropout(0.4)) model.add(keras.layers.Dense(100, activation='relu')) model.add(keras.layers.Dropout(0.4)) model.add(keras.layers.Dense(50, activation='relu')) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(10, activation='softmax')) # Configure the model and start training model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(np.transpose(train_pred_pred[:5],[1,2,0]).reshape([-1,50]), train_dataset_y.astype(int), epochs=100, batch_size=250, verbose=1, validation_split=0.2) # -
committee103.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this Notebook I want to compare learnability of networks by different set of samples (some samples from diagonal frames and some samples from outer frames) # + import os import sys model_path = "/Users/neda/HiCPlus_pytorch/src/models" sys.path.insert(0, model_path) import model import numpy as np import matplotlib.pyplot as plt import pickle import gzip from torch.utils import data import torch import torch.optim as optim from torch.autograd import Variable from time import gmtime, strftime import torch.nn as nn from scipy.stats.stats import pearsonr import argparse use_gpu = 0 down_sample_ratio = 16 epochs = 50 HiC_max_value = 100 # ????? batch_size = 256 # - indices = np.load("/Users/neda/HiCPlus_pytorch/data/divided-data/GM12878_primary/10kb_resolution/chr1-17-index.npy", "r") indices = indices.astype("int64") def d_indices(d): return np.where(indices[:,1] + d == indices[:,2]) def corr_highVSlow(index,data1,data2): return pearsonr(data1[index,0,:,:].flatten(),data2[index,0,:,:].flatten())[0] # + # defining training data # shift size indicate location of frames responding to matrix diagonal get_minimum = 0 shift_size = 50 low_resolution_samples = np.load("/Users/neda/HiCPlus_pytorch/data/divided-data/GM12878_primary/10kb_resolution/chr1-17(down16)(rep2).npy", "r").astype(np.float32) * down_sample_ratio low_resolution_samples = np.expand_dims(low_resolution_samples, axis=1) high_resolution_samples = np.load("/Users/neda/HiCPlus_pytorch/data/divided-data/GM12878_primary/10kb_resolution/chr1-17.npy", "r").astype(np.float32) high_resolution_samples = np.expand_dims(high_resolution_samples, axis=1) high_resolution_samples = high_resolution_samples[d_indices(shift_size)[0],:,:,:] low_resolution_samples = low_resolution_samples[d_indices(shift_size)[0],:,:,:] if get_minimum == 1: high_resolution_samples = np.minimum(high_resolution_samples, HiC_max_value) low_resolution_samples = np.minimum(low_resolution_samples, HiC_max_value) sample_size = high_resolution_samples.shape[-1] half_padding = int(model.half_padding) num_samples = low_resolution_samples.shape[0] """ lowres_set = torch.from_numpy(low_resolution_samples[39*256:40*256,]) hires_set = torch.from_numpy(high_resolution_samples[39*256:40*256,]) print("high and low loss: ", _loss(Variable(lowres_set), Variable(hires_set)).item()) zero_data = torch.from_numpy(np.zeros((256,1,sample_size,sample_size), dtype = 'float32')) print("high and zero loss: ", _loss(Variable(zero_data), Variable(hires_set)).item()) """ high_resolution_samples = high_resolution_samples[:,:,half_padding:(sample_size-half_padding),half_padding:(sample_size-half_padding)] lowres_set = data.TensorDataset(torch.from_numpy(low_resolution_samples), torch.from_numpy(np.zeros(low_resolution_samples.shape[0]))) lowres_loader = torch.utils.data.DataLoader(lowres_set, batch_size=batch_size, shuffle=False) hires_set = data.TensorDataset(torch.from_numpy(high_resolution_samples), torch.from_numpy(np.zeros(high_resolution_samples.shape[0]))) hires_loader = torch.utils.data.DataLoader(hires_set, batch_size=batch_size, shuffle=False) for t in range(2): # defining network Net = model.Net(40, 28) if use_gpu: Net = Net.cuda() optimizer = optim.SGD(Net.parameters(), lr = 0.00001) _loss = nn.MSELoss() Net.train() running_loss = 0.0 losslist = [] for epoch in range(0, epochs): # iterate over two lists and their indices using enumerate together with zip # lowres_loader is list of batches for i, (v1, v2) in enumerate(zip(lowres_loader, hires_loader)): # probably it is for skipping last incomplete batch if (i == len(lowres_loader) - 1): continue # v1 is list with length = 2. v1[0] is data tensor so with shape 256*1*40*40. v1[1] is vector of 256 zeros because pf line 85 but what's the reason? _lowRes, _ = v1 _highRes, _ = v2 # print "_lowres:", _lowRes, "\n shape: ", _lowRes.shape _lowRes = Variable(_lowRes) _highRes = Variable(_highRes) if use_gpu: _lowRes = _lowRes.cuda() _highRes = _highRes.cuda() optimizer.zero_grad() y_prediction = Net(_lowRes) loss = _loss(y_prediction, _highRes) loss.backward() optimizer.step() #print(loss.item()) running_loss += loss.item() print ('-------', i, epoch, running_loss/i, strftime("%Y-%m-%d %H:%M:%S", gmtime())) losslist.append(running_loss/i) running_loss = 0.0 globals()["Net" + str(t)] = Net # - temp_Net = Net0 lowres_set = torch.from_numpy(low_resolution_samples) hires_set = torch.from_numpy(high_resolution_samples) loss_list = [] for alpha in np.arange(0,1.01,0.01): for (temp_param, param1, param2) in zip(temp_Net.parameters(), Net0.parameters(), Net1.parameters()): temp_param.data = (alpha * param1.data) + ((1 - alpha) * param2.data) y_prediction = temp_Net(Variable(lowres_set)) loss_list.append(_loss(y_prediction, Variable(hires_set))) plt.plot(loss_list) plt.axis(np.arange(0,1.001,0.001)) plt.show() # mean of correlation based on location of frames responding to diagonal of matrix mean_int = {} for i in range(-200,201,25): mean_int[i] = [] for j in d_indices(i)[0]: mean_int[i].append(np.mean(low_resolution_samples[j,])) corr_list = {} for i in range(-200,201,25): corr_list[i] = [] for j in d_indices(i)[0]: if np.sum(low_resolution_samples[j,:,:]) != 0 and np.sum(high_resolution_samples[j,:,:]) != 0: corr_list[i].append(pearsonr(low_resolution_samples[j,0,:,:].flatten(),high_resolution_samples[j,0,:,:].flatten())[0]) mean_corr_list = [np.mean(corr_list[i]) for i in range(-200,201,25)] import matplotlib.pyplot as plt plt.scatter(range(-200,201,25), mean_corr_list) plt.show() for param in temp_Net.parameters(): print(param.data)
src/notebooks/.ipynb_checkpoints/HiC max value reason-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # Sine LSTM prediction # The aim of this notebook is to train a LSTM model being able to predict the values of a sine. # + deletable=true editable=true import numpy as np from matplotlib import pyplot as pl from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.layers import LSTM from keras.optimizers import RMSprop from keras.utils import np_utils from sklearn import metrics as me # %matplotlib inline # + deletable=true editable=true import math step_radians = 0.01 steps_of_history = 200 steps_in_future = 1 index = 0 # Training set x = np.sin(np.arange(0, 20*math.pi, step_radians)) seq = [] next_val = [] for i in range(0, len(x) - steps_of_history, steps_in_future): seq.append(x[i: i + steps_of_history]) next_val.append(x[i + steps_of_history]) seq = np.reshape(seq, [-1, steps_of_history, 1]) next_val = np.reshape(next_val, [-1, 1]) print(np.shape(seq)) trainX = np.array(seq) trainY = np.array(next_val) # Testing set x = np.sin(np.arange(20*math.pi, 24*math.pi, step_radians)) seq = [] for i in range(0, len(x) - steps_of_history, steps_in_future): seq.append(x[i: i + steps_of_history]) seq = np.reshape(seq, [-1, steps_of_history, 1]) testX = np.array(seq) # + deletable=true editable=true batch_size = 234 # create and fit the LSTM network model = Sequential() model.add(LSTM(4, input_shape=(200, 1))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.summary() # + deletable=true editable=true model.fit(trainX, trainY, epochs=25, batch_size=batch_size, verbose=1) # + deletable=true editable=true import matplotlib.pyplot as plt # Predict the future values predictY = model.predict(testX) print(predictY) # Plot the results plt.figure(figsize=(20,4)) plt.suptitle('Prediction') plt.title('History='+str(steps_of_history)+', Future='+str(steps_in_future)) # The data is shiffted of 200 values, it is due to the fact that the 1st value is predicted from the 200 previous values. # Therefore, we have to remove from the plot the first 200 values of the training set. plt.plot(x[200:], 'r-', label='Actual') plt.plot(predictY, 'gx', label='Predicted') plt.legend() # -
src/SimpleLSTM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![image](https://colab.research.google.com/assets/colab-badge.svg)](https://githubtocolab.com/giswqs/leafmap/blob/master/examples/notebooks/44_attribute_table.ipynb) # [![image](https://mybinder.org/badge_logo.svg)](https://gishub.org/leafmap-binder) # # Uncomment the following line to install [leafmap](https://leafmap.org) if needed. # + # # !pip install leafmap # - import leafmap # Add vector data to the map and use the GUI to open attribute table. # + m = leafmap.Map() url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/countries.geojson" m.add_geojson( url, layer_name="Countries", fill_colors=['red', 'yellow', 'green', 'orange'], info_mode=False, ) in_geojson = 'https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/cable_geo.geojson' m.add_geojson(in_geojson, layer_name="Cable lines", info_mode=False) m # - # ![](https://i.imgur.com/IIoLVSG.gif)
examples/notebooks/44_attribute_table.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append('..') import pickle import gensim.downloader as api import nltk from nltk.tokenize import word_tokenize, wordpunct_tokenize from nltk.corpus import stopwords import string import numpy as np nltk.download('punkt') nltk.download('stopwords') with open('../trainTest.pk', 'rb') as f: classes = pickle.load(f) train = pickle.load(f) test = pickle.load(f) # - wv = api.load('word2vec-google-news-300') # + def remove_punctuation(s: str) -> str: result = '' for c in s: if c not in string.punctuation: result += c return result english_stopwords = stopwords.words('english') def remove_stopwords(_list): return list(filter(lambda token: token not in stopwords.words('english'), _list)) def preprocess(s: str): return remove_stopwords(word_tokenize(remove_punctuation(s.lower()))) def get_embeddings(words): result = [] for word in words: if word in wv: result.append(wv[word]) return np.array(result) # + dataset_name = "test" dataset = train if dataset_name == "train" else test title_embeddings = [] synopsis_embeddings = [] for i, (cls, title, synopsis, id) in enumerate(dataset): title_embeddings.append(get_embeddings(preprocess(title))) synopsis_embeddings.append(get_embeddings(preprocess(synopsis))) if i % 200 == 0: print(i) print('title shape, synopsis shape') print(len(title_embeddings), len(synopsis_embeddings)) # - import pickle pickle_filename = "{}.pk".format(dataset_name) with open(pickle_filename, 'wb') as f: pickle.dump([title_embeddings, synopsis_embeddings], f) import pickle with open(pickle_filename, 'rb') as f: loaded_t, loaded_s = pickle.load(f) title = train[20][1] print('title:', title) title = preprocess(title) print(title) title_embeddings = [] for word in title: if word in wv: print('appending:', word) title_embeddings.append(wv[word]) title_embeddings = np.array(title_embeddings) # print(title_embeddings.shape)
core/generate_embeddings/generate_w2v_embedddings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import category_encoders as ce #from scipy import stats pd.set_option('display.max_columns', None) #pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.5f}'.format # %matplotlib inline #sns.set(style='whitegrid', palette='muted', font_scale=1.5) import warnings warnings.filterwarnings('ignore') # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # + #pd.set_option('display.max_rows', 20) # - # !pip install openpyxl data = pd.read_excel("/kaggle/input/housing-data/housingData_7R.xlsx") # + #data = df.copy() # - df = data.copy() # + #data = df.copy() # - data.head() data.shape data.columns data = data.drop('Unnamed: 0', axis = 1) data.describe(include = "all") data['adDescription'].head() data['adDealType'].value_counts() # + _kg_hide-output=false data['Deal_Type'] = data['adDealType'].copy() # + target_map = { 10: 'rental', 20: 'sell'} data.Deal_Type.replace(target_map, inplace = True) # - data['Deal_Type'].head() data.columns len(data.columns) # + original_col = data.columns new_col = ['ID', 'DealType', 'Canton', 'ZipCode', 'City','PublishedDate', 'AvailableDate', 'PriceText', 'Description', 'LangDetected', 'NumRooms', 'Floor', 'YearOfConstruction', 'NumApartments', 'Floor1', 'NumApartments1', 'LivingSpace', 'BuildingArea', 'UsefulArea', 'CoordE', 'CoordN' ,'Deal_Type'] data = data.rename(columns=dict(zip(original_col,new_col))) data.head() # - def percentage_missing(df): pm = (df.isnull().sum()/len(data))*100 return pm # + # replace all column names (in place) #new_cols = [‘column_x’, ‘column_y’, ‘column_z’] #df.columns = new_cols # - # ## EDA data.head() data.columns sns.set_style('darkgrid') plt.figure(figsize=(8, 5)) sns.countplot(data['Deal_Type']) plt.title('Target variable distribution') plt.show() data.shape data.isnull().sum().sort_values(ascending = False) #Plot missing values in data ax = data.isna().sum().sort_values().plot(kind = 'barh', figsize = (9, 10)) plt.title('Percentage of Missing Values Per Column Dataset', fontdict={'size':15}) for p in ax.patches: percentage ='{:,.0f}%'.format((p.get_width()/data.shape[0])*100) width, height =p.get_width(),p.get_height() x=p.get_x()+width+0.02 y=p.get_y()+height/2 ax.annotate(percentage,(x,y)) #To find the percentage of missing rows in each column total = data.isnull().sum().sort_values(ascending=False) percentage = ((data.isnull().sum()/data.isnull().count()).sort_values(ascending=False)*100) missing_data = pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) missing_data.head(20) # ## Treating Missing Values and Outliers data.isnull().sum().sort_values(ascending=False) mis_columns = ['NumApartments', 'NumApartments1', 'BuildingArea', 'UsefulArea' ] data.corr() data['NumApartments'].head() data['NumApartments'].value_counts() mis_columns NumApartments = data['NumApartments'].copy() BuildingArea = data['BuildingArea'].copy() UsefulArea = data['UsefulArea'].copy() data.columns # + outliers = [] def detect_outliers_zscore(data): thres = 3 mean = np.mean(data) std = np.std(data) # print(mean, std) for i in data: z_score = (i-mean)/std if (np.abs(z_score) > thres): outliers.append(i) return outliers# Driver code #sample_outliers = detect_outliers_zscore(data['adNumApartments.1']) #print("Outliers from Z-scores method: ", sample_outliers) # - sns.boxplot(data['NumApartments']) plt.show() sns.distplot(data['NumApartments']) plt.show() data = data.drop(columns = mis_columns, axis = 1) data.columns data.isnull().sum().any() data.isnull().sum().sort_values(ascending = False) data['City'].value_counts() City = data['City'].copy() data['City'].describe() data['City'].head() #Filling Null Values data['City'].fillna('Zürich', inplace = True) #Fillna #data['City'].ffill(axis = 0) #ForwardFill data['City'].mode() data.isnull().sum().sort_values(ascending = False) # + # ptype_encode = {} # ptype_encode_values = range(16,0,-1) # for i,k in zip(type_count.index,ptype_encode_values): # ptype_encode[i]=k # ptype_encode # data['adCity'] = data['adCity'].map(ptype_encode) # - # #### **Exploring the City copied from data['City']** City.head() City = pd.DataFrame(data = City) City.head() City.info() encoder=ce.TargetEncoder(cols='City') data['City'].head() data['City'].head(20) data['City'].describe() data.describe(include = 'all') # + #sns.scatterplot(x = data['City'], y = data['PriceText']) #plt.show() # - # ## Exploring the target Column # ### PriceText PriceText = data['PriceText'].copy() PriceText = pd.DataFrame(data = PriceText) PriceText.head() data['PriceText'].head() data['PriceText'] = data['PriceText'].replace(',','', regex = True) data['PriceText'].head() data['PriceText'] = data['PriceText'].replace('CHF','', regex = True) data['PriceText'].head() data['PriceText'].describe() data['PriceText'].isnull().sum() data['PriceText'].value_counts() data['PriceText'] = data['PriceText'].replace('On request',np.nan) data['PriceText'].value_counts() data['PriceText'].isnull().sum() ax = data.isna().sum().sort_values().plot(kind = 'barh', figsize = (9, 10)) plt.title('Percentage of Missing Values Per Column Dataset', fontdict={'size':15}) for p in ax.patches: percentage ='{:,.0f}%'.format((p.get_width()/data.shape[0])*100) width, height =p.get_width(),p.get_height() x=p.get_x()+width+0.02 y=p.get_y()+height/2 ax.annotate(percentage,(x,y)) data['PriceText'] = data['PriceText'].replace('CHF','', regex = True) data['PriceText'].head() data['PriceText'].value_counts() data['PriceText'] = data['PriceText'].replace('EUR','', regex = True) data['PriceText'].head() data['PriceText'].dtype data['PriceText'].value_counts() data['PriceText'] = data['PriceText'].apply(pd.to_numeric) data['PriceText'].dtype data['PriceText'].describe() sns.distplot(data['PriceText']) sns.boxplot(data['PriceText']) outliers = [] def detect_outliers_zscore(data): thres = 3 mean = np.mean(data) std = np.std(data) # print(mean, std) for i in data: z_score = (i-mean)/std if (np.abs(z_score) > thres): outliers.append(i) return outliers# Driver code sample_outliers = detect_outliers_zscore(data['PriceText']) print("Outliers from Z-scores method: ", sample_outliers) # Since, PriceText contains outliers, drop the rows data = data[data["PriceText"]<min(sample_outliers)].reset_index(drop = True) data['PriceText'].describe() sns.distplot(data['PriceText']) sample_outliers = detect_outliers_zscore(data['PriceText']) #print("Outliers from Z-scores method: ", sample_outliers) data = data[data["PriceText"]<min(sample_outliers)].reset_index(drop = True) data["PriceText"].describe() sns.boxplot(data['PriceText']) plt.boxplot(data["PriceText"]) plt.title("Detecting outliers using Boxplot") plt.xlabel('Sample') sns.distplot(data['PriceText']) # **Encoding the categorical column [''City''] with the target column** City.shape, data['PriceText'].shape, data['City'].shape City = data['City'].copy() City.shape, data['PriceText'].shape, data['City'].shape #Fit and Transform Train Data City = encoder.fit_transform(City,data['PriceText']) City.head() City = City.apply(pd.to_numeric) sns.distplot(City) plt.show() # + #City.skew() # + #City.dtype # - plt.boxplot(City) plt.show() data['City'] = City.copy() data.isnull().sum() # Dropping the description for now, Will work on it later Description = data['Description'].copy() data = data.drop('Description', axis = 1) data.describe() data.info() data.columns # + _kg_hide-input=false data.shape # - data.columns # ### LangDetected data['LangDetected'].value_counts() obj_col = [] obj_col.append('LangDetected') # ### Canton data['Canton'].head() data['Canton'].value_counts() # Run it at the later end because it will add to the number of columns and there is still plenty of work to do # + #data = pd.get_dummies(data = data['Canton'], drop_first = True) # - obj_col.append('Canton') # ### ZipCode data.ZipCode.head() data.ZipCode.value_counts() # Is this column really relevant to the model???? data.columns # ## Date_Columns date_col = ['PublishedDate', 'AvailableDate'] data[date_col].head() data['AvailableDate'].value_counts() data['AvailableDate'].head() # + #sofort = 'right away' #nach Vereinbarung = by arrangement # - data['PublishedDate'].head() data.columns data['Description'] = Description.copy() data[['PublishedDate', 'AvailableDate', 'DealType', 'PriceText', 'Description']].head() data['PublishedDate'].head() AvlDate = data['AvailableDate'].copy() PubDate = data['PublishedDate'].copy() #AvlDate = pd.to_datetime(AvlDate) AvlDate AvlDate.value_counts() replacement_mapping_dict = { "nach Vereinbarung": np.nan, "sofort": np.nan, "By arrangement": np.nan, "Immediately": np.nan, "not available": np.nan } AvlDate = AvlDate.replace(replacement_mapping_dict) AvlDate.value_counts() len(data) (AvlDate.isnull().sum()/len(data))*100 # + #AvlDate.fillna("Others", inplace = True) # - AvlDate.isnull().sum() # + #AvlDate = pd.to_datetime(AvlDate) # - AvlDate.describe() percentage_missing(AvlDate) AvlDate = pd.DataFrame(AvlDate) ax = AvlDate.isna().sum().sort_values().plot(kind = 'barh', figsize = (9, 10)) plt.title('Percentage of Missing Values Per Column Dataset', fontdict={'size':15}) for p in ax.patches: percentage ='{:,.0f}%'.format((p.get_width()/data.shape[0])*100) width, height =p.get_width(),p.get_height() x=p.get_x()+width+0.02 y=p.get_y()+height/2 ax.annotate(percentage,(x,y)) # Maybe I will need to drop the avilable dates for now because of too much null value # ### Pend Pend Pend Pend # ### PublishedDate data['PublishedDate'].head() data['PublishedDate'] = pd.to_datetime(data['PublishedDate']) data['PublishedDate'].head() # + #Extracting the year, month & day from the published datetime column # Year data['PublishedYear'] = data['PublishedDate'].dt.year # Month data['PublishedMonth'] = data['PublishedDate'].dt.month # Day data['PublishedDay'] = data['PublishedDate'].dt.day # - # ## We Move to other columns again****** data.columns NumRooms = data['NumRooms'].copy() data['NumRooms'].head() (data.NumRooms.isnull().sum()/len(data))*100 data['NumRooms'].value_counts() data['NumRooms'] = data['NumRooms'].replace('None', np.nan) data['NumRooms'].head() data['NumRooms'] = data['NumRooms'].apply(pd.to_numeric) data['NumRooms'].isnull().sum() percentage_missing(data['NumRooms']) data['NumRooms'].value_counts() data['NumRooms'].dtype sns.boxplot(data['NumRooms']) data['NumRooms'].describe() def detect_outliers_zscore(data): thres = 3 mean = np.mean(data) std = np.std(data) # print(mean, std) for i in data: z_score = (i-mean)/std if (np.abs(z_score) > thres): outliers.append(i) return outliers# Driver code sample_outliers = detect_outliers_zscore(data['NumRooms']) #print("Outliers from Z-scores method: ", sample_outliers) data = data[data["NumRooms"]<min(sample_outliers)].reset_index(drop = True) sns.boxplot(data['NumRooms']) plt.title("Detecting outliers using Boxplot") plt.xlabel('Sample') data['NumRooms'].describe() sns.distplot(data['NumRooms']) data['NumRooms'].isnull().sum() # + #data['NumRooms'].groupby() # - percentage_missing(data['NumRooms']) data.columns # ### Floor Column data[['Floor', 'Floor1']].head() data = data.drop('Floor1', axis = 1) Floor = data['Floor'].copy() data['Floor'].value_counts() #To remove the digits from the floor column data['Floor'] = data['Floor'].replace('\d+', '', regex = True) data['Floor'].value_counts() # * untergeschoss = basement # * Stock oder höher = floor or higher # # * This is the equivalent of this words in English # The replace fucntion to replace the words in german in the english format and correct the format of the incorrect value replacement_mapping_dict = { ". Stock": 'Stock', ". floor ": 'Floor', "Untergeschoss": 'basement', ". Stock oder höher": 'floor or higher', "EG": 'ground floor' } data['Floor'] = data['Floor'].replace(replacement_mapping_dict) data['Floor'].value_counts() obj_col.append('Floor') data[['Floor', 'NumRooms']].head() data.columns # ## Things to try out # # * **data = data.sort_values(by=['adPublishedDate']).reset_index(drop=True)** # * Trying to sort the data by dates... 'adPublishedDate', 'adAvailableDate' # * How many days is between the published date and available date # * Extract days and month from the date columns # * Get the number of years between the year of construction and the Available Year of the house & Year of Publishing the year for an ad # ### Year of Construction Year = data['YearOfConstruction'].copy() data['YearOfConstruction'].head() data['YearOfConstruction'].value_counts() data['YearOfConstruction'] = data['YearOfConstruction'].replace('not available', 0) data['YearOfConstruction'] = data['YearOfConstruction'].apply(pd.to_numeric) data['YearOfConstruction'].value_counts() sns.distplot(data['YearOfConstruction']) sns.boxplot(data['YearOfConstruction']) plt.show() data['YearOfConstruction'].describe() data.columns data['YearOfConstruction'].describe() # ### Living Space LivingSpace = data['LivingSpace'].copy() data['LivingSpace'].head() data['LivingSpace'].dtype data['LivingSpace'].describe() data['LivingSpace'].value_counts() data['LivingSpace'] = data['LivingSpace'].replace('not available', 0) data['LivingSpace'].value_counts() data['LivingSpace'] = data['LivingSpace'].replace(',', '', regex=True) data['LivingSpace'].value_counts() sns.distplot(data['LivingSpace']) data['LivingSpace'] = data['LivingSpace'].apply(pd.to_numeric) sns.boxplot(data['LivingSpace']) plt.show() data['LivingSpace'].describe() sample_outliers = detect_outliers_zscore(data['LivingSpace']) # + #print(sample_outliers) # + #data = data[data["LivingSpace"]<min(sample_outliers)].reset_index(drop = True) # - data['LivingSpace'].describe() # + #sample_outliers = detect_outliers_zscore(data['LivingSpace']) #print(sample_outliers) # - sns.boxplot(data['LivingSpace']) sns.distplot(data['LivingSpace']) data['LivingSpace'].describe() Livingspace = data['LivingSpace'].copy() Livingspace = Livingspace.apply(pd.to_numeric) Livingspace = pd.DataFrame(Livingspace) Livingspace.head() Livingspace['LivingSpace'].head() Livingspace.shape sample_outliers = detect_outliers_zscore(Livingspace['LivingSpace']) #print(sample_outliers) #data = data[data["LivingSpace"]<min(sample_outliers)].reset_index(drop = True) Livingspace = Livingspace[Livingspace["LivingSpace"]<min(sample_outliers)].reset_index(drop = True) Livingspace.shape LivingSpace.shape # Should you fix the outliers or leave it in this scenerio??? data.columns # #### Some Columns with outliers that you can work on maybe it would improve the model # * YearOfObservation # * LivingSpace # ### Coordinates data[['CoordN', 'CoordE']].head() fig, ax = plt.subplots(figsize=(16,8)) ax.scatter(data['CoordE'], data['CoordN']) ax.set_xlabel('Eastern Coordinates') ax.set_ylabel('Northern Coordinates') plt.show() data[['CoordN', 'CoordE']].value_counts() data['CoordN'].dtype # ## Final Preparation for Modelling data.columns data[['DealType', 'Deal_Type']].head() target_map = { 10: 0, 20: 1} data['DealType'].replace(target_map, inplace = True) data['DealType'].head() data = data.drop('Deal_Type', axis = 1) data.columns data.shape obj_col data.columns drop_cols = ['ID','AvailableDate', 'Description', 'PublishedDate'] data.head() d = data.copy() data = pd.get_dummies(data = data,columns = obj_col, drop_first = True) data.head() # + #Remember this for some visualization #count_plot = sns.catplot(x="adNumApartments.1", kind="count" ,data=data, hue="DealType") #plt.hist(data['adNumApartments.1']) #plt.show() # - data.isnull().sum() data = data.drop(columns = drop_cols, axis = 1) data.shape data.info() data.describe() # # Modelling y = data['PriceText'] X = data.drop(['PriceText'], axis = 1) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=42) x_train.shape, y_train.shape, x_test.shape, y_test.shape from sklearn.model_selection import KFold, cross_val_score, StratifiedKFold, train_test_split from sklearn.metrics import mean_squared_error, make_scorer from sklearn.linear_model import LinearRegression from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor from xgboost import XGBRegressor from lightgbm import LGBMRegressor from catboost import CatBoostRegressor #Initialize the model lr = LinearRegression() gbr = GradientBoostingRegressor() rfr = RandomForestRegressor() xgb = XGBRegressor() ctb = CatBoostRegressor(silent=True) lgb = LGBMRegressor(n_estimators=4000, learning_rate=0.1, subsample=0.3, max_depth=-1, min_child_samples=30, colsample_bytree=1.0) # + #train the model lr.fit(x_train, y_train) gbr.fit(x_train, y_train) rfr.fit(x_train, y_train) xgb.fit(x_train, y_train) ctb.fit(x_train, y_train) lgb.fit(x_train, y_train) # - lr_pred = lr.predict(x_test) gbr_pred = gbr.predict(x_test) rfr_pred = rfr.predict(x_test) xgb_pred = xgb.predict(x_test) ctb_pred = ctb.predict(x_test) lgb_pred = lgb.predict(x_test) lr_rms = np.sqrt(mean_squared_error(y_test, lr_pred)) gbr_rms = np.sqrt(mean_squared_error(y_test, gbr_pred)) rfr_rms = np.sqrt(mean_squared_error(y_test, rfr_pred)) xgb_rms = np.sqrt(mean_squared_error(y_test, xgb_pred)) ctb_rms = np.sqrt(mean_squared_error(y_test, ctb_pred)) lgb_rms = np.sqrt(mean_squared_error(y_test, lgb_pred)) lr_tr_score = lr.score(x_train, y_train) gbr_tr_score = gbr.score(x_train, y_train) rfr_tr_score = rfr.score(x_train, y_train) xgb_tr_score = xgb.score(x_train, y_train) ctb_tr_score = ctb.score(x_train, y_train) lgb_tr_score = lgb.score(x_train, y_train) lr_te_score = lr.score(x_test, y_test) gbr_te_score = gbr.score(x_test, y_test) rfr_te_score = rfr.score(x_test, y_test) xgb_te_score = xgb.score(x_test, y_test) ctb_te_score = ctb.score(x_test, y_test) lgb_te_score = lgb.score(x_test, y_test) # + # 'Train Score': [lr_tr_score, gbr_tr_score, rfr_tr_score, xgb_tr_score, ctb_tr_score, lgb_tr_score], # 'Test Score': [lr_te_score, gbr_te_score, rfr_te_score, xgb_te_score, ctb_te_score, lgb_te_score] # + dict_ = {'Algorithm':['LinearRegression', 'GradientBoostingRegressor', 'RandomForestRegressor', 'XGBoost', 'CatBoost', 'LightGBM'], 'RMSE_Score':[lr_rms, gbr_rms, rfr_rms, xgb_rms, ctb_rms, lgb_rms]} df = pd.DataFrame(dict_, index=[0,1,2,3,4,5]) df # + dict1 = {'Algorithm':['LinearRegression', 'GradientBoostingRegressor', 'RandomForestRegressor', 'XGBoost', 'CatBoost', 'LightGBM'], 'RMSE_Score':[lr_rms, gbr_rms, rfr_rms, xgb_rms, ctb_rms, lgb_rms], 'Train Score': [lr_tr_score, gbr_tr_score, rfr_tr_score, xgb_tr_score, ctb_tr_score, lgb_tr_score], 'Test Score': [lr_te_score, gbr_te_score, rfr_te_score, xgb_te_score, ctb_te_score, lgb_te_score]} df = pd.DataFrame(dict1, index=[0,1,2,3,4,5]) df # - lgb.fit(x_train, y_train,eval_set=[(x_test,y_test),(x_train,y_train)], verbose=1, early_stopping_rounds=100) errcb2=[] y_pred_totcb2=[] fold=StratifiedKFold(n_splits = 10) i=1 for train_index, test_index in fold.split(X,y): X_train, X_test = X.iloc[train_index], X.iloc[test_index] y_train, y_test = y.iloc[train_index], y.iloc[test_index] m2 = CatBoostRegressor(n_estimators = 1000, eval_metric = 'RMSE', od_wait = 200, learning_rate = 0.2, bootstrap_type = 'Bernoulli' ,metric_period = 100, max_depth = 8, use_best_model = True) m2.fit(X_train,y_train,eval_set=[(X_train,y_train),(X_test, y_test)], early_stopping_rounds = 200, verbose = 100,) preds=m2.predict(X_test) print("RMSE: ", np.sqrt(mean_squared_error(y_test,preds))) errcb2.append((np.sqrt(mean_squared_error(y_test,preds)))) #np.sqrt(mean_squared_error(y_test, lr_pred) #p2 = m2.predict(df_test) #y_pred_totcb2.append(p2) np.mean(errcb2) errcb2=[] y_pred_totcb2=[] fold=StratifiedKFold(n_splits = 10) i=1 for train_index, test_index in fold.split(X,y): X_train, X_test = X.iloc[train_index], X.iloc[test_index] y_train, y_test = y.iloc[train_index], y.iloc[test_index] m2 = CatBoostRegressor(n_estimators = 4000, eval_metric = 'RMSE', od_wait = 200, learning_rate = 0.1, bootstrap_type = 'Bernoulli' ,metric_period = 100, max_depth = 8, use_best_model = True) m2.fit(X_train,y_train,eval_set=[(X_train,y_train),(X_test, y_test)], early_stopping_rounds = 200, verbose = 100,) preds=m2.predict(X_test) print("RMSE: ", np.sqrt(mean_squared_error(y_test,preds))) errcb2.append((np.sqrt(mean_squared_error(y_test,preds)))) #np.sqrt(mean_squared_error(y_test, lr_pred) #p2 = m2.predict(df_test) #y_pred_totcb2.append(p2) np.mean(errcb2) metric = make_scorer(mean_squared_error, greater_is_better = True) # + gbr_cv = cross_val_score(gbr, X, y, scoring = metric, cv = 5) gbr_cv = [np.around(i, decimals=3) for i in gbr_cv] rfr_cv = cross_val_score(rfr, X, y, scoring = metric, cv = 5) rfr_cv = [np.around(i, decimals=3) for i in rfr_cv] lr_cv = cross_val_score(lr, X, y, scoring = metric, cv = 5) lr_cv = [np.around(i, decimals=3) for i in lr_cv] ctb_cv = cross_val_score(ctb, X, y, scoring = metric, cv = 5) ctb_cv = [np.around(i, decimals=3) for i in ctb_cv] xgb_cv = cross_val_score(xgb, X, y, scoring = metric, cv = 5) xgb_cv = [np.around(i, decimals=3) for i in xgb_cv] lgb_cv = cross_val_score(lgb, X, y, scoring = metric, cv = 5) lgb_cv = [np.around(i, decimals=3) for i in lgb_cv] # - gbr_cv1 = np.sqrt(gbr_cv) rfr_cv1 = np.sqrt(rfr_cv) lr_cv1 = np.sqrt(lr_cv) xgb_cv1 = np.sqrt(xgb_cv) ctb_cv1 = np.sqrt(ctb_cv) lgb_cv1 = np.sqrt(lgb_cv) gbr_cv1 = [np.around(i, decimals=3) for i in gbr_cv1] rfr_cv1 = [np.around(i, decimals=3) for i in rfr_cv1] lr_cv1 = [np.around(i, decimals=3) for i in lr_cv1] xgb_cv1 = [np.around(i, decimals=3) for i in xgb_cv1] ctb_cv1 = [np.around(i, decimals=3) for i in ctb_cv1] lgb_cv1 = [np.around(i, decimals=3) for i in lgb_cv1] dict_ = {'Algorithm': ['GradientBoostingRegressor', 'RandomForestRegressor', 'LogisticRegression', 'XGBoost', 'CatBoost', 'LightGBM'], 'cv_score':[gbr_cv1, rfr_cv1, lr_cv1, xgb_cv1, ctb_cv1, lgb_cv1], 'cv_mean':[np.mean(gbr_cv1), np.mean(rfr_cv1), np.mean(lr_cv1), np.mean(xgb_cv1), np.mean(ctb_cv1), np.mean(lgb_cv1)], 'cv_std':[np.std(gbr_cv1), np.std(rfr_cv1), np.std(lr_cv1), np.std(xgb_cv1), np.std(ctb_cv1), np.std(lgb_cv1)] } df = pd.DataFrame(dict_, index = [0,1,2,3,4,5]) df
Housing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Requests for handling HTTP get and other requests import requests import time # import for playing nice and not getting blocked import pandas as pd # from BeautifulSoup4 import BeatifulSoup if installed through pip install BeautifulSoup4 # from bs4 import BeautifulSoup # + # if bs4 not found install it with # # !pip install beatifulsoup4 # https://www.crummy.com/software/BeautifulSoup/ # - # !dir # ar ! i can run command line commands # centrs = "https://www.ss.com/lv/real-estate/flats/riga/centre/sell/" # url = "https://www.ss.com/lv/real-estate/homes-summer-residences/riga-region/all/sell/" url = "https://www.ss.com/lv/real-estate/flats/riga/centre/sell/" req = requests.get(url) # so here we make a call to webpage via HTTP GET request and get something back req.status_code # we could add if to check for 200 req.text[:200] req.text.count("Dzirn") # we could parse it ourselves but why bother when we have libraries ? req.text.index("Carn") type(req) # + # req.json() will not work since this page is not JSON! # - req.text[req.text.index("Carn"):req.text.index("Carn")+100] # we could do parsing by hand but usually we want to use a premade parser # parser means structuring your text into some structure soup = BeautifulSoup(req.text, 'lxml') # lxml is improved parser, a little bit more lenient # soup = BeautifulSoup(req.text) # could use Python's built in HTML parser soup.title type(soup) # https://www.crummy.com/software/BeautifulSoup/bs4/doc/#find-all # https://developer.mozilla.org/en-US/docs/Web/HTML/Element/table tables = soup.find_all("table") # finds ALL elements matching our filter len(tables) # find finds just one match headline = soup.find("tr", {"id":"head_line"}) # this is a shorter way of finding by tr AND this element having particular id headline type(headline) headline.text # i could use split here but we can go deeper! headtds = headline.find_all("td") # get list of all td tags inside our headline tag only(not the whole document!) # td stands for table data # https://developer.mozilla.org/en-US/docs/Web/HTML/Element/td headtds len(headtds) # so we have 8 table data elements headline.text # this returns ALL text in headline and its children and grandchildren headtds[0].text # we do not need the first one headcolumns = [el.text for el in headtds[1:]] #.text gets us content even from children and grandchildren headcolumns # lets combine the above cells into a function which will always get us columns def getColList(soup): column_list = ["description","url"] # we decided to that we need these two column names no matter the html headline = soup.find("tr", {"id":"head_line"}) headtds = headline.find_all("td") headcolumns = [el.text for el in headtds[1:]] # this will get all column names starting with 2nd in HTML column_list += headcolumns return column_list column_names = getColList(soup) column_names # to be used later when creating our table trows = soup.find_all('tr') # tr stands for table row len(trows) # + # if i had no other choices I could find starting and end index by hand and hard code it # but that is liable to change between pages # - trows[9] # we coudl do it by hand trows[6] # so could take trows[6:35] downside is that this is fixed with magic numbers trows[9]["id"], trows[9].get("id") # get is safer since not all elements have ids # hardest part in this # how to filter only specific rows # we need to find something that is unique to our apartment rows but not to the extra rows # remember we id is not guaranteed, so "" gives us default value when there is no id apt_rows = [row for row in trows if row.get('id',"").startswith("tr_") and not row.get('id',"").startswith("tr_bnr") ] len(apt_rows) apt_rows[-1] apt_rows[0] # lets make a function from the above doodle and make it work on most pages on SS def getRowList(soup): trows = soup.find_all('tr') aprows = [row for row in trows if row.get('id',"").startswith("tr_") and not row.get('id',"").startswith("tr_bnr") ] return aprows apt_rows = getRowList(soup) len(apt_rows) first_ad = apt_rows[0] first_ad row_tds = first_ad.find_all('td') # get all table data elements from first ad/row len(row_tds) row_tds row_tds[0].text row_tds[1].text row_tds[1].attrs # we can see elements attributes img = row_tds[1].find("img") img img.get("src") a = row_tds[1].find('a') # a tag is called anchor tag a a.attrs a.get('href'), a['href'] row_tds[2].text # 3rd element has the description row_tds[-1].text # last element has the price column_names # from row get dictionary with values def getRow(row,colist=column_names): row_tds = row.find_all('td') rowDict = {} if len(row_tds) < 3: # a little sanity check print("Hmm bad row") return rowDict # we start with description rowDict[colist[0]] = row_tds[2].text # so the big assumption is that we always get description in 3rd column # then url rowDict[colist[1]] = "https://ss.com" + row_tds[1].find('a').get('href') for td,key in zip(row_tds[3:],colist[2:]): rowDict[key] = td.text return rowDict apt_rows[0] # first row of ads getRow(apt_rows[0]) getRow(first_ad) # so if we know how to work on single row then we can do process multiple rows def getRows(rowlist,colist=column_names): return [getRow(row, colist=colist) for row in rowlist] # so return a list of dictionaries row_ads = getRows(apt_rows) row_ads[-3:] # last 3 ads type(row_ads) # list of dictionaries for each ad dtemp = pd.DataFrame(row_ads, columns=column_names) dtemp.shape dtemp.head() dtemp.to_excel("rigas_rajons30.xlsx") # so with this function I can get full dataframe from a single page on ss.com not only apartments def getDFfromURL(url): # print("getting data from", url) req = requests.get(url) if req.status_code != 200: print("Request Fail with", req.status_code) return None # maybe return empty dataframe here soup = BeautifulSoup(req.text, 'lxml') column_names = getColList(soup) rowlist = getRowList(soup) rows = getRows(rowlist,colist=column_names) return pd.DataFrame(rows, columns=column_names) centrs = url centrs imanta = "https://www.ss.com/lv/real-estate/flats/riga/imanta/sell/" idf = getDFfromURL(imanta) idf.head() df = getDFfromURL(centrs) # this will involve calling web page url df = getDFfromURL(url) df.shape df.shape df.head() # + # https://developer.mozilla.org/en-US/docs/Web/HTML/Element/a anchors = soup.find_all("a") len(anchors) # anchors are popular in pages # - anchors[:5] navs = [anchor for anchor in anchors if anchor.get("name") == "nav_id"] # get will get us None if no "name" exists len(navs) navs navs[0] navs[0].attrs navs[0]['href'] # could use regular expression here but we can use split afterpage = navs[0]['href'].split("/page")[-1] # -1 might be safer than 1 in case there is region with name page afterpage beforedot = afterpage.split(".html")[0] beforedot lastpage = int(beforedot) lastpage # + # so now we put everything we just did in a single function # - def getAllLocalUrls(url): """Get a list of all urls including paginated pages""" results = [url] # default is just the url if no extra pages found, teiksim Bolderājai... req = requests.get(url) if req.status_code != 200: print(f"Bad response! {req.status_code}") return [] soup = BeautifulSoup(req.text, 'lxml') # we just need a one element prevanchor = soup.find('a', {"rel":"prev"}) # find finds first match only if prevanchor == None: # means there is only one page of ads return results href = prevanchor.attrs.get('href') lastPageNum = int(href.split('/page')[-1].split('.html')[0]) print("Last page is",lastPageNum) nurls = [f"{url}page{n}.html" for n in range(2,lastPageNum+1)] results += nurls return results url centrs = url url local_urls = getAllLocalUrls(url) len(local_urls),local_urls[:3],local_urls[-1] local_urls dlast = getDFfromURL(local_urls[-1]) dlast.shape dlast def get_all_ads_df(start_url, save_excel_path=None): df_list=[] # so we will save our dataframes in a list local_urls = getAllLocalUrls(start_url) for url in local_urls: print(f"Gathering data from {url}") df_list.append(getDFfromURL(url)) time.sleep(0.3) # we need this to play nice! to avoid rate limit or IP ban!! # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html big_df = pd.concat(df_list) # then make a big dataframe from all the small dataframes if save_excel_path: big_df.to_excel(save_excel_path) return big_df centrs = url centrs centra_df = get_all_ads_df(centrs, save_excel_path="centrs_dzivokli_nov12.xlsx") url riga_region = get_all_ads_df(url, "riga_region_sep28.xlsx") riga_region.shape riga_region.head(10) riga_region.tail(10) agens = "https://www.ss.com/lv/real-estate/flats/riga/agenskalns/sell/" df = get_all_ads_df(agens, "agens_27_09.xlsx") df.head() centra_df df = centra_df # alias df.shape df.shape df.columns df.info() df = get_all_ads_df(centrs, "centrs_27_09.xlsx") df = centra_df df.shape df.head() df.tail() df.shape #we can do more post processing - feature engineering using existing columns df[['Floor','MaxFloor']] = df.Stāvs.str.split("/",expand=True) df.head() df = riga_region # just a shortcut df.info() df.head() df.info() df["price"] = df.Cena.str.split(' ').str[0] # create a new column from price df.head() df.price = df.price.str.replace(",","").astype('int32') # so clean string of , and then convert to integer df.head() df.info() df["floor"] = df.Floor.astype('int32') df.m2 = df.m2.astype('int32') df.info() df.MaxFloor = df.MaxFloor.astype('int32') df.head() df.head() sorted_df = df.sort_values(by="price",ascending=False) sorted_df.head() df.sort_values(by="price",ascending=True).head() df.describe() df[df.floor > 2] df.sort_values(by="MaxFloor",ascending=False).head() df.head() df.columns grouped_by_street = df.groupby(by="Iela")['price'].agg(["count", "min", "max", "mean", ]) grouped_by_street grouped_by_floor = df.groupby(by="floor")['price'].agg(["count", "min", "max", "mean", ]) grouped_by_floor len(grouped_by_floor) grouped_by_floor.hist(bins=len(grouped_by_floor)) grouped_by_pag = df.groupby(by="Pagasts")['price'].agg(["count", "min", "max", "mean", ]) grouped_by_pag grouped_by_pag.hist() grouped_by_pag.plot(x="Pagasts", y="mean") df.to_excel("c:/temp/my_apartments.xlsx") # i could this with absolute path from datetime import datetime as dt now = dt.now() now.month, now.day, now.hour, now.second # url = "https://www.ss.com/lv/real-estate/flats/riga/sarkandaugava/hand_over/" url = centrs region = url.split("riga/")[-1].split("/")[0] now = dt.now() save_path = f"../data/{region}_{now.day}_{now.month}__{now.hour}_{now.second}.xlsx" save_path # + url = "https://www.ss.com/lv/real-estate/flats/riga/sarkandaugava/hand_over/" region = url.split("riga/")[-1].split("/")[0] now = dt.now() save_path = f"../data/{region}_{now.day}_{now.month}__{now.hour}_{now.second}.xlsx" df = get_all_ads_df(url, save_path) # - url = "https://www.ss.com/lv/transport/cars/vaz/riga_f/" df = get_all_ads_df(url, "../data/vaz_18_03.xlsx") # # TODO # # Try with different starting address not only centrs # ## Maybe combine regions # ## See how it would work with maybe cars # ## Data engineering make new columns based on existing ones, clean some columns # ### Changing floors 2/6 to columns 2 and 6, clean up Euro signs # ## Sorting, Describing, Grouping by regions etc df = pd.read_excel("centrs_10_03.xlsx") df.head() df.shape df["price"] = df.Cena.str.split(' ').str[0] df.head() df.price = df.price.str.replace(",","").astype('int32') # so clean string of , and then convert to integer df.head() df.price.hist() df[['Floor','MaxFloor']] = df.Stāvs.str.split("/",expand=True) df.info() #typecasting example df.Floor = df.Floor.astype('int32') df.MaxFloor = df.MaxFloor.astype('int32') df.head() d23 = df[df.MaxFloor > 20] d23 df.describe().T # T is for transposing rows and columns df.describe().T.to_csv("center_stats.csv") import plotly.express as px fig = px.histogram(df, x="price", nbins=100) fig.show() import seaborn as sns # seaborn sits on top of matplotlib with better defaults import matplotlib.pyplot as plt df.info() # + # Basic correlogram sns.pairplot(df) # + # Basic correlogram sns.pairplot(df) # - volvo_url = "https://www.ss.com/lv/transport/cars/volvo/sell/" volvos = get_all_ads_df(volvo_url, "../data/volvo_03_18.xlsx") volvos = pd.read_excel("volvo_270921.xlsx") volvos.head() volvos.shape volvos.head(10) gr_auto['Price'] = volvos["price"] = volvos.Cena.str.split(' ').str[0] volvos.price = volvos.price.str.replace(",","").astype('int32') volvos.head() # + gr_auto = volvos.groupby(["Gads","Modelis"])["price"].agg(["count", "min", "max", "mean", ]) #.head(n = 10) gr_auto # - gr_auto.loc[gr_auto.index.get_level_values(0).str.contains("2015")] # how to filter multi level index gr_auto.loc[gr_auto.index.get_level_values(1).str.contains("S60")] gr_auto.loc[gr_auto.index.get_level_values(1).str.contains("S60")].to_excel("S60.xlsx") print(dt.today()) suzuki_url = "https://www.ss.com/lv/transport/cars/suzuki/" # sdf = get_all_ads_df(suzuki_url, f"suzuki_{dt.today()}.xlsx") sdf = get_all_ads_df(suzuki_url, f"suzuki.xlsx") sdf.head() sdf.info() sdf["Tilp."] = sdf["Tilp."].str.replace("\w","", regex=True) sdf.head() sdf["Nobrauk."] = sdf["Nobrauk."].str.replace("\D","", regex=True) # so no digits removed sdf.head() sdf["Cena"] = sdf["Cena"].str.replace("\D","", regex=True) # so no digits removed sdf.head() sdf = sdf.dropna(how="any", axis=0) sdf sdf = sdf[sdf["Nobrauk."].str.len() > 0] # got rid of those empty rows finally sdf sdf["Cena"] = sdf["Cena"].astype("float32") gr_auto = sdf.groupby(["Gads","Modelis"])["Cena"].agg(["count","min", "max", "mean"]) gr_auto
Diena_15_Web_Scraping/Web Scraping Apartments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div style='background: #FF7B47; padding: 10px; border: thin solid darblue; border-radius: 5px; margin-bottom: 2vh'> # # # Session 01 - Notebook # # Like most session notebooks in this course, this notebook is divided into two parts. Part one is a 'manual' that will allow you to code along with the new code that we introduce at the beginning of each session. The second part is the actual lab/assignment part, where you will work through a few practical tasks and write small but useful programs. # <div style='background: #FF7B47; padding: 10px; border: thin solid darblue; border-radius: 5px'> # # ## Part 1 - Manual # <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> # A.1 - "hello, world!" # + # your code here # - # <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> # A.2 - basic datatypes - strings and numeric variables # + # your code here # - # <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> # A.3 - basic operations # + # your code here # - # <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> # A.4 - advanced data types to store collections of data # + # your code here - lists # + # your code here - dictionaries # - # <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> # A.5 - for loops # + # your code here # - # <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> # A.6 - Python If ... Else # + # your code here # - # <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> # A.6 - Functions # + # Your code here # - # <div style='background: #6A9EB4; padding: 10px; border: thin solid darblue; border-radius: 5px'> # # ## Part 2 - Lab # # During today's lab you will write code that will help the College to perform the house lottery more efficiently and assist the house administrations in a variety of tasks. # <div style='background: #ADCAD6; padding: 10px; border: thin solid darblue; border-radius: 5px'> # # ## Task #1 - automatize the house lottery # # In the precirculated template folder, you will find the file students.csv with all rising sophomores that will enter the house lottery, i.e. they will get assigned to one of the twelve undergraduate houses. So far, the college has done this task manually but they hope that you can help them to automtize that process. Please load the csv and add another column 'house_id'. Pyhton's csv package will come in handy to load the csv file and treat each row as a list. Having loaded the file, add a random house id to each student and save that information in a new csv file. You might find the python package 'random' quite useful to automatize the lottery process. We've imported the package for you and provided an example. # # <div style='background: #ADCAD6; padding: 10px; border: thin solid darblue; border-radius: 5px'> # Examples and precirculated code: # + # house ids lookup tables house_to_id = { 'Adams House': 0, 'Cabot House': 1, 'Currier House' : 2, 'Dunster House': 3, 'Eliot House': 4, 'Kirkland House': 5, 'Leverett House': 6, 'Lowell House': 7, 'Mather House': 8, 'Pforzheimer House':9, 'Quincy House': 10, 'Winthrop House': 11 } id_to_house = { 0: 'Adams House', 1: 'Cabot House', 2: 'Currier House', 3: 'Dunster House', 4: 'Eliot House', 5: 'Kirkland House', 6: 'Leverett House', 7: 'Lowell House', 8: 'Mather House', 9: 'Pforzheimer House', 10: 'Quincy House', 11: 'Winthrop House' } # - # importing useful python packages import random import csv # + # some example code snippets how to load a csv file and how to write into one # read file_read = open("data/students.csv", "r") reader = csv.reader(file_read) for row in reader: print(row) break # breaking after first element feel free to check out the entire data stucture file_read.close() # write - notice that the file doesn't have to exist beforehand! csv write will create the file automatically, which is very useful! file_write = open('students_with_house.csv', 'w', newline='') writer = csv.writer(file_write) # we just write one row here. It might be useful to put this line into a loop when automatizing things writer.writerow(['first_name', 'last_name', 'HUID','email', 'house_id']) file_write.close() # - # example - generate a random integer between 1 and 10. example_random = random.randint(1,10) print(example_random) # <div style='background: #ADCAD6; padding: 10px; border: thin solid darblue; border-radius: 5px'> # Your turn - load the csv file, create a random number for each student between 0-11 and store all students in a new csv file with their respective house assignments. A for loop might come in handy. # + # your code here # + # solution # reader & writer file_read = open("data/students.csv", "r") file_write = open('data/students_with_house.csv', 'w', newline='') reader = csv.reader(file_read) writer = csv.writer(file_write) for student_line in reader: student_line.append(random.randint(0,11)) # print(student_line) writer.writerow(student_line) # close the files file_read.close() file_write.close() # - # <div style='background: #ADCAD6; padding: 10px; border: thin solid darblue; border-radius: 5px'> # Write a small program that makes sure that you've successfully created and populated a csv with all students and their assigned houses. # + # your code here file_test = open('students_with_house.csv', 'r') reader = csv.reader(file_test) for line in reader: print(line) file_test.close() # - # <div style='background: #ADCAD6; padding: 10px; border: thin solid darblue; border-radius: 5px'> # # ## Task #2 - generate a file for a house on demand # # OK, you've helped the college out with the lottery but now the house administrators are struggling a bit because they have all 2000 students in one file but only care about the students that were assigned to their particular house. Write a small programm that solves that task on demand and generates a csv for them with only their students. You can write a program that does this task on demand for a given house, or you can generate a csv for each house in advance. # # + # your code here # + # on demand solution # pick a house house = 'Adams House' house_id = house_to_id[house] print(house_id) file_read = open('students_with_house.csv', 'r') file_write = open(house + '_students.csv', 'w', newline='') reader = csv.reader(file_read) writer = csv.writer(file_write) for row in reader: print(row) # check if house id is the same as selected house if int(row[4]) == house_id: writer.writerow(row) file_read.close() file_write.close() # + # automated solution for all houses # write a function def gen_csv(house): house_id = house_to_id[house] file_read = open('students_with_house.csv', 'r') file_write = open(house + '_students.csv', 'w', newline='') reader = csv.reader(file_read) writer = csv.writer(file_write) for row in reader: # check if house id is the same as selected house if int(row[4]) == house_id: writer.writerow(row) file_read.close() file_write.close() # - # now loop over all houses and call the function for each house # all_houses = [a,b,c,d] for house in house_to_id: gen_csv(house) # <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'> # # ## Bonus Tasks # # 1. calculate vacant rooms per house # 2. write a program that computes the number of students assigned per house in a given csv # 3. write a function that checks whether there are problems with the numbers of students assigned to each house # 4. write code that assigns students randomly but in such a way that there are no capacity issues. # <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'> # Some house administrators have complaned that the list of students is too long to accomodate all new sophomores assigned to their houses. Since some houses are bigger and others are smaller, we cannot simply generate integers and get away with the randomly generated number of students in each house. Rather, we have to check more carefolly whether there is still capacity. Below, find two useful dictionaries hat should help you to solve this task. # + # bonus is house with exact capacities house_capacity = { 'Adams House': 411, 'Cabot House': 362, 'Currier House' : 356, 'Dunster House': 428, 'Eliot House': 450, 'Kirkland House': 400, 'Leverett House': 480, 'Lowell House': 450, 'Mather House': 426, 'Pforzheimer House':360, 'Quincy House': 420, 'Winthrop House': 500 } # number of occupied rooms after seniors have left house_occupied = { 'Adams House': 236, 'Cabot House': 213, 'Currier House' : 217, 'Dunster House': 296, 'Eliot House': 288, 'Kirkland House': 224, 'Leverett House': 233, 'Lowell House': 242, 'Mather House': 217, 'Pforzheimer House':195, 'Quincy House': 253, 'Winthrop House': 310 } # + house_names = [] for house in house_occupied: house_names.append(house) print(house_names) # - # <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'>Let's start by writing a small program that helps us to calculate the vacant rooms for each house. Try to use a dictionary structure that contains all information for each house. Feel free to also write a few lines that check how many vacant rooms there are in total. # + vacant_rooms = {} for house in house_occupied: vacant_rooms[house] = house_capacity[house] - house_occupied[house] print(vacant_rooms) # + # check how many rooms we have in total total = 0 for rooms in vacant_rooms: total +=vacant_rooms[rooms] print(total) # - # <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'>Let's now write a small function that calculates the number of students assigned per house with our old method and returns a dictionary with that information def calculate_students_per_house(filename): # helper dict helper_dict = {} # reader & writer file_read = open(filename, "r") reader = csv.reader(file_read) for row in reader: house_id = row[4] house_name = id_to_house[int(house_id)] if(house_name in helper_dict): helper_dict[house_name] += 1 else: helper_dict[house_name] = 1 # close the files file_read.close() return helper_dict print(calculate_students_per_house('students_with_house.csv')) # <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'>Next, let's check by how much we were off for each house with our random approach. def house_assignment_check(assignements_per_house): for house in house_capacity: difference =vacant_rooms[house] - assignements_per_house[house] if difference < 0: print(f'There is a problem with {house} - we have assigned {abs(difference)} too many students') else: print(f'no assignment problems with {house}') assigned_students_random = calculate_students_per_house('students_with_house.csv') house_assignment_check(assigned_students_random) # <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'>Finally, let's write a function that assignes houses more carefully. We can still generate random integers to assign a house, but we need to check whether that house still has capacity. For that reason, please create a funcction called assign_house() that you include into the code tat we've written before # + # solution def assign_house(): house_id = random.randint(0,11) house_name = id_to_house[house_id] # check capacity if (house_capacity[house_name] - house_occupied[house_name] > 0): # update occupancy house_occupied[house_name] += 1 print(f'assigned student to{house_name}') return house_id else: print(f'no more capacity in {house_name}') return assign_house() # + # reader & writer file_read = open("data/students.csv", "r") file_write = open('data/students_with_house_correct_capacity.csv', 'w', newline='') reader = csv.reader(file_read) writer = csv.writer(file_write) for row in reader: correct_house = assign_house() row.append(correct_house) writer.writerow(row) # close the files file_read.close() file_write.close() # + # let's now check whether this worked better using our small function from before students_per_house_correct = calculate_students_per_house('students_with_house_correct_capacity.csv') students_per_house_correct house_assignment_check(students_per_house_correct) # -
Session_01/solution/Session_01_notebook_master.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="3rTbXuHMIf7S" executionInfo={"status": "ok", "timestamp": 1631388959659, "user_tz": 240, "elapsed": 2947, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13953621006807715822"}} outputId="1bb46376-ce0a-4cf6-dbd3-d23d1e012267" import pandas as pd import numpy as np import os import random import progressbar import multiprocessing import pickle import matplotlib.pyplot as plt data_path = 'gdrive/My Drive/Summer Research/hmp2-data-stanford/Preprocessed/' data_choices = { data_path: '', data_path+'Denoised/': 'Denoised', data_path+'WT Domain/': 'T*S', data_path+'Normalized/': 'Normalized', data_path+'Normalized/'+'Denoised/': 'Denoised Normalized', data_path+'Normalized/'+'WT Domain/': 'T*S Normalized', } hmp_datas = ['cytokine_abundance','gut_16s_abundance','Lipidomics', #'metabolome_abundance', 'Metabolomics','nares_16s_abundance', #'proteome_abundance', 'Proteomics','RNAseq_abundance', 'Targ.proteomics', 'Transcriptomics_VST_excl_3participants'] from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import (plot_confusion_matrix, plot_precision_recall_curve, plot_roc_curve, auc) from sklearn.model_selection import cross_validate, StratifiedKFold # #!pip install delayed # #!pip uninstall scikit-learn # #!pip install scikit-learn # #!pip install -U imbalanced-learn #from imblearn.over_sampling import (ADASYN, BorderlineSMOTE, KMeansSMOTE, # RandomOverSampler, SMOTE, SMOTEN, SMOTENC, # SVMSMOTE) #from imblearn.under_sampling import (AllKNN, ClusterCentroids, # CondensedNearestNeighbour, # EditedNearestNeighbours, # InstanceHardnessThreshold, # NearMiss, NeighbourhoodCleaningRule, # OneSidedSelection, RandomUnderSampler, # RepeatedEditedNearestNeighbours, # TomekLinks) #from imblearn.combine import SMOTEENN, SMOTETomek from imblearn.pipeline import Pipeline # !pip install proc import seaborn as sb from statistics import mean, stdev from google.colab import drive drive.mount('/content/gdrive') # + [markdown] id="3WugkQ_xbiRz" # Oversampling and undersampling # + id="fOo8Yclhbker" executionInfo={"status": "ok", "timestamp": 1631388959660, "user_tz": 240, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13953621006807715822"}} def resampling(args): if args == 'SMOTEENN': resampler = SMOTEENN(n_jobs=-1) elif args == 'SMOTETomek': resampler = SMOTETomek(n_jobs=-1) return resampler # + [markdown] id="XTrcf7W2JzW_" # Random Forest model # + id="9x-N1YJTJ0rv" executionInfo={"status": "ok", "timestamp": 1631388959660, "user_tz": 240, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13953621006807715822"}} def RFModel(X, y, cv): model = RandomForestClassifier(n_estimators=100, criterion='gini', max_depth=None, warm_start=True) #K-fold Cross Validation scores = cross_validate(model, X, y, cv=cv, scoring=('accuracy', 'balanced_accuracy', 'precision', 'recall', 'roc_auc', 'f1'), n_jobs=-1, verbose=0, return_estimator=True) return scores # + id="RM20f6voMTSV" executionInfo={"status": "ok", "timestamp": 1631388959817, "user_tz": 240, "elapsed": 161, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13953621006807715822"}} def metrics(scores, X, y, cv, resampling_method, data_choice, hmp_data_choice): dir = 'gdrive/My Drive/Summer Research/Figures/HMP Data/Random Forest/' file_name = hmp_data_choice + '/' + data_choice try: os.makedirs(dir+hmp_data_choice+'/') except FileExistsError: pass rem_list = ['estimator', 'fit_time', 'score_time'] csv_scores = dict([(key, val) for key, val in scores.items() if key not in rem_list]) df = pd.DataFrame.from_dict(csv_scores) df.to_csv(dir+file_name+' Metrics.csv', index=False) #TODO: generate PR, ROC, Confusion matrix graphs tprs = [] aucs = [] mean_fpr = np.linspace(0, 1, 100) cm = np.zeros((4,10)) fig, ax = plt.subplots(figsize=(10,10)) fig2, ax2 = plt.subplots(figsize=(10,10)) fig3, ax3 = plt.subplots(figsize=(10,10)) fig4, ax4 = plt.subplots(figsize=(10,10)) for i, (train, test) in enumerate(cv.split(X, y)): viz = plot_roc_curve(scores['estimator'][i], X[test], y[test], name='ROC fold {}'.format(i), alpha=0.3, lw=1, ax=ax) interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr) interp_tpr[0] = 0.0 tprs.append(interp_tpr) aucs.append(viz.roc_auc) p = plot_precision_recall_curve(scores['estimator'][i], X[test], y[test], name='P v. R fold {}'.format(i), alpha=0.5, lw=1.5, ax=ax2) c = plot_confusion_matrix(scores['estimator'][i], X[test], y[test], normalize='all', ax=ax4) cm[:,i] = np.array(c.confusion_matrix).reshape(4,) plt.close(fig=fig4) #ROC Curve ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance', alpha=.8) mean_tpr = np.mean(tprs, axis=0) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) std_auc = np.std(aucs) ax.plot(mean_fpr, mean_tpr, color='b', label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc), lw=2, alpha=.8) std_tpr = np.std(tprs, axis=0) tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower = np.maximum(mean_tpr - std_tpr, 0) ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2, label=r'$\pm$ 1 std. dev.') ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title="ROC Curve") ax.legend(loc="lower right") fig.savefig(dir+file_name+' ROC.png', bbox_inches='tight') plt.close(fig=fig) #PR Curve ax2.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title="Precision v. Recall Curve") ax2.legend(loc="lower left") fig2.savefig(dir+file_name+' PR.png', bbox_inches='tight') plt.close(fig=fig2) #Confusion Matrix c1, c2, c3, c4 = cm[0,:], cm[1,:], cm[2,:], cm[3,:] means = np.array([[mean(c1), mean(c2)],[mean(c3), mean(c4)]]) stds = np.array([[stdev(c1), stdev(c2)],[stdev(c3), stdev(c4)]]) labels = np.array([["{:.2%} $\pm$ {:.2%}".format(mean(c1), stdev(c1)), "{:.2%} $\pm$ {:.2%}".format(mean(c2), stdev(c2))], ["{:.2%} $\pm$ {:.2%}".format(mean(c3), stdev(c3)), "{:.2%} $\pm$ {:.2%}".format(mean(c4), stdev(c4))]]) plt.figure(figsize=(12,8)) g = sb.heatmap(100*means, fmt='', annot=labels, cmap='Greens', xticklabels=['Predicted IS', 'Predicted IR'], yticklabels=['IS', 'IR'], ax=ax3, cbar_kws={'format': '%.0f%%'}) g.set_yticklabels(labels=g.get_yticklabels(), va='center') g.set_title('Confusion Matrix') fig3.savefig(dir+file_name+' Confusion Matrix.png', bbox_inches='tight') plt.close(fig=fig3) plt.close('all') # + id="RNIP13mDHevO" executionInfo={"status": "ok", "timestamp": 1631388959818, "user_tz": 240, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13953621006807715822"}} def run_model(data_choice, hmp_datas): cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=1) resampling_method = 'SMOTETomek' df = pd.read_csv(data_choice+hmp_datas+'.csv', index_col=False).drop(['SampleID'], axis=1) #Get rid of Unknown IR_IS_classifications, encode IR as 0, IS as 1 df.loc[:,'IR_IS_classification'] = (df.loc[:,'IR_IS_classification']).drop([i for i in range(len(df)) if df['IR_IS_classification'][i] == 'Unknown'] , axis=0).replace({'IS':0, 'IR':1}) #Remove blank entries remove_blanks = [i for i in range(len(df)) if df['Age'][i] == '' or df['BMI'][i] == '' or df['SSPG'][i] == ''] df.loc[:,['Age', 'BMI', 'SSPG']] = df.loc[:,['Age', 'BMI', 'SSPG']].drop(remove_blanks, axis=0) #Convert Race to numbers is_asian = [int(df['Race'][i] == 'A') for i in range(len(df))] is_black = [int(df['Race'][i] == 'B') for i in range(len(df))] is_cauc = [int(df['Race'][i] == 'C') for i in range(len(df))] is_hisp = [int(df['Race'][i] == 'H') for i in range(len(df))] df['Asian?'], df['Black?'], df['Caucasian?'], df['Hispanic?'] = pd.DataFrame(is_asian), pd.DataFrame(is_black), pd.DataFrame(is_cauc), pd.DataFrame(is_hisp) #Convert Sex to numbers is_m = [int(df['Sex'][i] == 'M') for i in range(len(df))] is_f = [int(df['Sex'][i] == 'F') for i in range(len(df))] df['Male?'], df['Female?'] = pd.DataFrame(is_m), pd.DataFrame(is_f) df = df.drop(['Race', 'Sex'], axis=1).fillna(0) X = np.array(df.drop(['IR_IS_classification'], axis=1)) y = np.array(df.loc[:,'IR_IS_classification']) scores = RFModel(X, y, cv) metrics(scores, X, y, cv, resampling_method, data_choices[data_choice], hmp_datas) # + colab={"base_uri": "https://localhost:8080/"} id="l5EOYQZYHhSF" executionInfo={"status": "ok", "timestamp": 1631389121047, "user_tz": 240, "elapsed": 161232, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13953621006807715822"}} outputId="0cc76068-e267-4eb2-beab-6ad4497d78ec" hmp_datas = ['cytokine_abundance', 'gut_16s_abundance', 'Lipidomics', 'Metabolomics', 'nares_16s_abundance', 'Proteomics', 'RNAseq_abundance', 'Targ.proteomics', 'Transcriptomics_VST_excl_3participants'] widgets = [' [', progressbar.Timer(format= 'elapsed time: %(elapsed)s'), '] ', progressbar.Bar('#'),' (', progressbar.ETA(), ') ', progressbar.Counter(format='%(value)d/%(max_value)d') ] bar = progressbar.ProgressBar(max_value=6*len(hmp_datas), widgets=widgets).start() count = 0 processes = list() for i in data_choices: for j in range(len(hmp_datas)//2): p = multiprocessing.Process(target=run_model, args=(i, hmp_datas[j])) processes.append(p) p.start() for p in processes: p.join() count += 1 bar.update(count) # + id="vjfcz9ZSHoj3" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1631389492714, "user_tz": 240, "elapsed": 371675, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13953621006807715822"}} outputId="9359ee26-6d40-4787-f1dc-35372496c385" processes.clear() for i in data_choices: for j in range(len(hmp_datas)//2, len(hmp_datas)): p = multiprocessing.Process(target=run_model, args=(i, hmp_datas[j])) processes.append(p) p.start() for p in processes: p.join() count += 1 bar.update(count)
Code/HMP Classification/Random Forest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Specifying factors for GMST/GSAT conversion and amount of total Earth energy in the ocean # # Requires fair >= 1.6.0c3 and < 2.0.0 # NBVAL_IGNORE_OUTPUT import openscm_runner # NBVAL_IGNORE_OUTPUT print(openscm_runner.__version__) # + import os.path import numpy as np import matplotlib.pyplot as plt import pandas as pd import pyam from fair.forward import fair_scm from scmdata import ScmRun from tqdm import tqdm_notebook from openscm_runner.run import run from openscm_runner.adapters import FAIR # - fair = FAIR() # NBVAL_IGNORE_OUTPUT fair.get_version() # NBVAL_IGNORE_OUTPUT df_fair = ScmRun( os.path.join( "..", "tests", "test-data", "rcmip_scen_ssp_world_emissions.csv" ), lowercase_cols=True, ) # NBVAL_IGNORE_OUTPUT x = run( climate_models_cfgs={ "FAIR": [ {}, # passing an empty list of an empty dict will run with defaults {"ohu_factor": 0.95, "gmst_factor": np.linspace(0.90, 1.00, 351)}, ], }, scenarios=df_fair, output_variables=( "Surface Air Ocean Blended Temperature Change", "Heat Content|Ocean", "Heat Uptake|Ocean", ), ) # Note in the plots below that 'model' is the IAM that produced the scenario. In all cases, the climate model is FaIR. # NBVAL_IGNORE_OUTPUT x.get_unique_meta("climate_model", no_duplicates=True) # In the below we plot two runs per scenario. The first is with default OHU and GMST factors (0.92 and 1/1.04) and the second is with the specified factors (0.95 for OHU and a time-varying one for GMST). # NBVAL_IGNORE_OUTPUT ax = plt.figure(figsize=(12, 7)).add_subplot(111) x.filter(variable="Surface Air Ocean Blended Temperature Change").lineplot( hue="scenario", style="model", ax=ax, time_axis="year", units="run_id", estimator=None, ) # NBVAL_IGNORE_OUTPUT ax = plt.figure(figsize=(12, 7)).add_subplot(111) x.filter(variable="Heat Uptake|Ocean").lineplot( hue="scenario", style="model", ax=ax, time_axis="year", units="run_id", estimator=None, ) # NBVAL_IGNORE_OUTPUT ax = plt.figure(figsize=(12, 7)).add_subplot(111) x.filter(variable="Heat Content|Ocean").lineplot( hue="scenario", style="model", ax=ax, time_axis="year", units="run_id", estimator=None, )
notebooks/fair-gmst-ohu-factors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import seaborn as sb #import category_encoders as ce import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.metrics import f1_score,confusion_matrix from sklearn.metrics import accuracy_score from sklearn.feature_selection import SelectKBest, chi2, f_regression dataset = pd.read_csv("C:\\Users\\akash\\Downloads\\forest-cover-type-prediction\\train1.csv") dataset.head() Y = dataset.Cover_Type X = dataset.drop(['Cover_Type'], axis=1) # + from sklearn.model_selection import train_test_split # Split dataset into training set and test set X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,random_state=109) # 70% training and 30% test # + #Create a svm Classifier RandForest_K_best = RandomForestClassifier() RandForest_K_best = RandForest_K_best.fit(X_train, y_train) # - y_pred = RandForest_K_best.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print('Accuracy: ',accuracy) UnivariateFeatureSelection = SelectKBest(f_regression, k=20).fit(X_train, y_train) x_train_k_best = UnivariateFeatureSelection.transform(X_train) x_test_k_best = UnivariateFeatureSelection.transform(X_test) print("Shape of original data: ", X_train.shape) print("Shape of corpus with best features: ", x_train_k_best.shape) RandForest_K_best = RandomForestClassifier() RandForest_K_best = RandForest_K_best.fit(x_train_k_best, y_train) y_pred = RandForest_K_best.predict(x_test_k_best) accuracy = accuracy_score(y_test, y_pred) print('Accuracy: ',accuracy) feature_names = list(X_train.columns.values) # + mask = UnivariateFeatureSelection.get_support() #list of booleans new_features = [] # The list of your K best features for bool, feature in zip(mask, feature_names): if bool: new_features.append(feature) new_features # -
Feature Selection 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CHEM 1000 - Spring 2022 # Prof. <NAME>, University of Pittsburgh # # ## Recitation Ch 8 - Numeric Integration # # **Learning Objectives** # # - Understand how to perform numeric integration (using a few different techniques) # - Understand why some methods work better than others # # **Attribution** # # Much of this material has been adapted from [Mathematical Python by <NAME>](https://github.com/patrickwalls/mathematical-python/blob/master/integration/riemann-sums.ipynb) # # ### Numeric Integration # # Sometimes we need to use numeric integration rather than analytical indefinite or definite integrals: # - The function / data we want to integrate doesn't have a formula (e.g., NMR spectra, MRI images, real-world data, etc.) # - The function we want to integrate has no known analytical solution # - (e.g., electron density on a grid, machine learning models # - The function has an analytical form, but "it's complicate" and maybe it's faster or better to do the integration numerically # # Remember that we initially expressed integrals as the area under a curve - a limit of a sum of rectangles. # # These are known as [Riemann sums](https://en.wikipedia.org/wiki/Riemann_sum), which important because they provide an easy way to approximate a definite integral # # $$ # \int_a^b f(x) \, dx \approx \sum_{i=1}^N f(x_i^ * ) (x_i - x_{i-1}) \ # $$ # # Notice that the product $f(x_i^ * ) (x_i - x_{i-1})$ for each $i$ is the area of a rectangle of height $f(x_i^ * )$ and width $x_i - x_{i-1}$. We can think of a Riemann sum as the area of $N$ rectangles with heights determined by the graph of $y=f(x)$. # # The value $x_i^*$ chosen in each subinterval is arbitrary however there are certain obvious choices: # # * A *left* Riemann sum is when each $x_i^* = x_{i-1}$ is the left endpoint of the subinterval $[x_{i-1},x_i]$ # * A *right* Riemann sum is when each $x_i^* = x_i$ is the right endpoint of the subinterval $[x_{i-1},x_i]$ # * A *midpoint* Riemann sum is when each $x_i^* = (x_{i-1} + x_i)/2$ is the midpoint of the subinterval $[x_{i-1},x_i]$ # # Let's visualize rectangles in the left, right and midpoint Riemann sums for the function # # $$ # f(x) = \frac{1}{1 + x^2} # $$ # # over the interval $[0,5]$ with a partition of size $N=10$. # + # let's plot this import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format = 'retina' plt.style.use('../lectures/chem1000.mplstyle') # + # our function to integrate def f(x): return 1/(1+x**2) a = 0; b = 5; N = 10 # range and partion size n = 10 # Use n*N+1 points to plot the function smoothly x = np.linspace(a,b,N+1) y = f(x) X = np.linspace(a,b,n*N+1) Y = f(X) # set up three plots plt.figure(figsize=(15,5)) # left endpoints plt.subplot(1,3,1) plt.plot(X,Y,'b') x_left = x[:-1] # Left endpoints y_left = y[:-1] plt.plot(x_left,y_left,'b.',markersize=10) plt.bar(x_left,y_left,width=(b-a)/N,alpha=0.2,align='edge',edgecolor='b') plt.title('Left Riemann Sum, N = {}'.format(N)) plt.ylabel('f(x)') plt.xlabel('x') # midpoints plt.subplot(1,3,2) plt.plot(X,Y,'b') x_mid = (x[:-1] + x[1:])/2 # Midpoints y_mid = f(x_mid) plt.plot(x_mid,y_mid,'b.',markersize=10) plt.bar(x_mid,y_mid,width=(b-a)/N,alpha=0.2,edgecolor='b') plt.title('Midpoint Riemann Sum, N = {}'.format(N)) # right endpoints plt.subplot(1,3,3) plt.plot(X,Y,'b') x_right = x[1:] # right endpoints y_right = y[1:] plt.plot(x_right,y_right,'b.',markersize=10) plt.bar(x_right,y_right,width=-(b-a)/N,alpha=0.2,align='edge',edgecolor='b') plt.title('Right Riemann Sum, N = {}'.format(N)) plt.ylabel('f(x)') plt.xlabel('x') # - # Notice that when the function $f(x)$ is decreasing on $[a,b]$ the left endpoints give an overestimate of the integral $\int_a^b f(x) dx$ and right endpoints give an underestimate. The opposite is true is when the function is increasing. # # Let's compute the value of each of the Riemann sums: # + dx = (b-a)/N print("Partition with",N,"subintervals.") x_left = np.linspace(a,b-dx,N) left_riemann_sum = np.sum(f(x_left) * dx) print("Left Riemann Sum:", left_riemann_sum) x_midpoint = np.linspace(dx/2,b - dx/2,N) midpoint_riemann_sum = np.sum(f(x_midpoint) * dx) print("Midpoint Riemann Sum:", midpoint_riemann_sum) x_right = np.linspace(dx,b,N) right_riemann_sum = np.sum(f(x_right) * dx) print("Right Riemann Sum:", right_riemann_sum) # - # We know the exact value # # $$ # \int_0^5 \frac{1}{1 + x^2} dx = \arctan(5) # $$ # # and we can compare the Riemann sums to the value: I = np.arctan(5) print(I) print("Left Riemann Sum Error:",np.abs(left_riemann_sum - I)) print("Midpoint Riemann Sum:",np.abs(midpoint_riemann_sum - I)) print("Right Riemann Sum:",np.abs(right_riemann_sum - I)) # Much like the central difference formula for numeric derivatives, the midpoint Riemann Sum is much, much more accurate. def riemann_sum(f,a,b,N=50): '''Compute the midpoint Riemann sum of f(x) over the interval [a,b]. f : function - numpy function of one variable a , b : numbers - Endpoints of the interval [a,b] N : integer - Number of subintervals of equal length in the partition of [a,b] ''' dx = (b - a)/N x = np.linspace(a,b,N+1) # now we need to get the midpoints # this is a bit sneaky: # x[:-1] - it takes the array up to the second-to-last element (i.e., last - 1) # x[1:] - it takes the second array up to the last # so x[0] + x[1], etc. x_mid = (x[:-1] + x[1:])/2 # just add up the rectangles return np.sum(f(x_mid)*dx) riemann_sum(np.sin, 0, np.pi/2, 100) # ### Exercise 1 # # We know that: # # $$ # \int_{0}^{1} \frac{4}{1+x^{2}} d x=\pi # $$ # # Use a Riemann sum to calculate $\pi$. # # - How big does N need to be to calculate $\pi$ to 5 decimal places? # - How many intervals do you need to get 10 decimal places correct? # # + def f(x): return 4/(1 + x**2) # 3.14159265358979 # 3.14159 # 3.1415926536 (rounding up) riemann_sum(f, 0, 1, 28000) # - # how big is N for 5 decimal places? N = 107 # 3.1415999322458608 # how big is n for 10 decimal places? N = 28000 # gives 3.141592653696086 -- still not accurate enough # ### Trapezoid Rule # # The [trapezoid rule](https://en.wikipedia.org/wiki/Trapezoidal_rule) gives a better approximation of a definite integral by summing the areas of the trapezoids connecting the points # # $$ # (x_{i-1},0), (x_i,0), (x_{i-1},f(x_{i-1})), (x_i,f(x_i)) # $$ # # for each subinterval $[x_{i-1},x_i]$ of a partition. Note that the area of each trapezoid is the sum of a rectangle and a triangle # # $$ # (x_i - x_{i-1}) f(x_{i-1}) + \frac{1}{2}(x_i - x_{i-1}) (f(x_i) - f(x_{i-1})) = \frac{1}{2}(f(x_i) + f(x_{i-1}))(x_i - x_{i-1}) # $$ # # For example, we can use a single trapezoid to approximate: # # $$ # \int_0^1 e^{-x^2} \, dx # $$ # # First, let's plot the curve $y = e^{-x^2}$ and the trapezoid on the interval $[0,1]$: # + x = np.linspace(-0.5,1.5,100) y = np.exp(-x**2) plt.plot(x,y) x0 = 0; x1 = 1; y0 = np.exp(-x0**2); y1 = np.exp(-x1**2); plt.fill_between([x0,x1],[y0,y1]) plt.xlim([-0.5,1.5]); plt.ylim([0,1.5]); plt.show() # - A = 0.5*(y1 + y0)*(x1 - x0) print("Trapezoid area:", A) # Let's compare this to the midpoint Riemann sum (i.e., with one rectangle) # + def f(x): return np.exp(-x**2) riemann_sum(f, 0, 1, 1) # - # The limit is about: riemann_sum(f, 0, 1, 1000) # So one trapezoid sounds like a good idea, but it's around the same accuracy as the midpoint Riemann sum. # # Let's try some side-by-side comparisons with an implementation: def trapz(f,a,b,N=50): '''Approximate the integral of f(x) from a to b by the trapezoid rule. The trapezoid rule approximates the integral \int_a^b f(x) dx by the sum: (dx/2) \sum_{k=1}^N (f(x_k) + f(x_{k-1})) where x_k = a + k*dx and dx = (b - a)/N. Parameters ---------- f : function - numpy function of a single variable a , b : numbers - Interval of integration [a,b] N : integer - Number of subintervals of [a,b] ''' x = np.linspace(a,b,N+1) # N+1 points make N subintervals y = f(x) y_right = y[1:] # right endpoints y_left = y[:-1] # left endpoints dx = (b - a)/N return (dx/2) * np.sum(y_right + y_left) print('Riemann: ', riemann_sum(f, 0, 1, 10)) print('trapezoids: ', trapz(f, 0, 1, 10)) print('Riemann: ', riemann_sum(np.sin, 0, np.pi/2, 10)) print('trapezoids: ', trapz(np.sin, 0, np.pi/2, 10)) # In short, they're really close in accuracy. There's even a numpy (np) method for performing integration: # # `np.trapz(y-values, dx=spacing)` # # The trick is to make sure that the spacing is consistent between your function and what you tell: # # `np.trapz(.., dx=..)` # # Note that since `np.trapz()` just asks for $y$ values, it can be used on anything (e.g., real-world data) spacing = 0.05 x = np.arange(0, np.pi/2, spacing) np.trapz(data, dx=spacing) # ### Exercise 2 # # We used Riemann sums to approximate $\pi$ above, let's try with trapezoids # # - How many intervals do you need to get 5 decimal place accuracy? # - How many intervals do you need to get 10 decimal place accuracy? # + def f(x): return 4/(1 + x**2) trapz(f, 0, 1, 43083) # want 3.1415926535 8979 # - # how big is N for 5 decimal places? N = 251 # gives 3.141590008129139 # how big is n for 10 decimal places? N = 43083 # gives 3.141592653500001 # ### Simpson's Rule # # [Simpson's rule](https://en.wikipedia.org/wiki/Simpson%27s_rule) uses a quadratic polynomial on each subinterval of a partition to approximate the function $f(x)$ and to compute the definite integral. This is an improvement over the trapezoid rule which approximates $f(x)$ by a straight line on each subinterval of a partition. # # Image from Wikipedia - function f(x) is in blue and the quadratic approximation P() is in red: # <a href="https://commons.wikimedia.org/wiki/File:Simpsons_method_illustration.svg#/media/File:Simpsons_method_illustration.svg"><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/c/ca/Simpsons_method_illustration.svg/1200px-Simpsons_method_illustration.svg.png" alt="Simpsons method illustration.svg" width="300"></a> # # In short, you approximate the integral by taking a small parabola using the two end points and the midpoint (i.e., three points exactly determines a curve). # # The formula for Simpson's rule is: # # $$ # S_N(f) = \frac{\Delta x}{3} \sum_{i=1}^{N/2} \left( f(x_{2i-2}) + 4 f(x_{2i-1}) + f(x_{2i}) \right) # $$ # # where $N$ is an ***even*** number of subintervals of $[a,b]$, $\Delta x = (b - a)/N$ and $x_i = a + i \Delta x$. def simps(f,a,b,N=50): '''Approximate the integral of f(x) from a to b by Simpson's rule. Simpson's rule approximates the integral \int_a^b f(x) dx by the sum: (dx/3) \sum_{k=1}^{N/2} (f(x_{2i-2} + 4f(x_{2i-1}) + f(x_{2i})) where x_i = a + i*dx and dx = (b - a)/N. Parameters ---------- f : function - numpy function of a single variable a , b : numbers - Interval of integration [a,b] N : (even) integer - Number of subintervals of [a,b] ''' if N % 2 == 1: raise ValueError("N must be an even integer.") dx = (b-a)/N x = np.linspace(a,b,N+1) y = f(x) return dx/3 * np.sum(y[0:-1:2] + 4*y[1::2] + y[2::2]) # Let's try it out on $\sin x$ first: print('Riemann: ', riemann_sum(np.sin, 0, np.pi/2, 4)) print('trapezoids: ', trapz(np.sin, 0, np.pi/2, 4)) print('Simpson: ', simps(np.sin, 0, np.pi/2, 4)) # Notice that I intentionally used a small number of intervals here (4) and Simpson's rule is already accurate to over 3 decimal places, much better than either other method. # # In some sense, Simpson's rule is a weighted average of the estimates from the two other methods. # # There are other numeric integration methods, but Riemann / Trapezoid and Simpson's rule are good, useful methods. # + # from the homework def f(x): return np.sin(x**2) print('Riemann: ', riemann_sum(f, 0, np.pi/2, 50)) print('trapezoids: ', trapz(f, 0, np.pi/2, 50)) print('Simpson: ', simps(f, 0, np.pi/2, 50)) # - # ### Exercise 3 # # We approximated $\pi$ using Riemann sums and trapezoids above. # # Now try with Simpson's rule. # # - How many intervals do you need to get 5 decimal place accuracy? # - How many intervals do you need to get 10 decimal place accuracy? # + def f(x): return 4/(1 + x**2) simps(f, 0, 1, 30) # want 3.1415926536 or 3.14159265358979 # - # how big is N for 5 decimal places? N = 6 # gives 3.141591780936043 # how big is n for 10 decimal places? N = 28 # gives 3.1415926535074465 N = 30 # gives 3.141592653535359 # ### Exercise 4 # # Go back and compare: # # - For Riemann sums, trapezoids, and Simpson's rule, how many intervals did you need for 5 decimal place accuracy? # - For all three methods, how many do you need for 10 decimal place accuracy? # 5 decimal places - how big does N need to be? riemann = 107 trapz = 251 simps = 6 # 10 decimal places - how big does N need to be? riemann = 28000 trapz = 43083 simps = 28 # ------- # This notebook is from Prof. <NAME>, University of Pittsburgh # https://github.com/ghutchis/chem1000 # # Most of this material has been adapted from [Mathematical Python by <NAME>](https://github.com/patrickwalls/mathematical-python/tree/master/scipy) # # <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a>
recitation/08-numeric-integration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Table of Contents # <p><div class="lev1"><a href="#Task-1.-Compiling-Ebola-Data"><span class="toc-item-num">Task 1.&nbsp;&nbsp;</span>Compiling Ebola Data</a></div> # <div class="lev1"><a href="#Task-2.-RNA-Sequences"><span class="toc-item-num">Task 2.&nbsp;&nbsp;</span>RNA Sequences</a></div> # <div class="lev1"><a href="#Task-3.-Class-War-in-Titanic"><span class="toc-item-num">Task 3.&nbsp;&nbsp;</span>Class War in Titanic</a></div></p> # + # all imports here import pandas as pd import seaborn as sns import numpy as np import matplotlib.pyplot as plt import os import glob import re import math import calendar import datetime import random from calendar import monthrange from sklearn import datasets, linear_model, ensemble from sklearn.model_selection import train_test_split # %matplotlib inline # - DATA_FOLDER = './Data' # Use the data folder provided in Tutorial 02 - Intro to Pandas.("/") # ## Task 1. Compiling Ebola Data # # The `DATA_FOLDER/ebola` folder contains summarized reports of Ebola cases from three countries (Guinea, Liberia and Sierra Leone) during the recent outbreak of the disease in West Africa. For each country, there are daily reports that contain various information about the outbreak in several cities in each country. # # Use pandas to import these data files into a single `Dataframe`. # Using this `DataFrame`, calculate for *each country*, the *daily average per month* of *new cases* and *deaths*. # Make sure you handle all the different expressions for *new cases* and *deaths* that are used in the reports. # # ### Path extracting # - First, we need to collect into an array every file name where the data is located # - We will separate the data of each country at first, because they do not follow the same conventions and change the columns name that we are interested in into the same ones for each country. # - We'll finally merge them as requested for the rest of the computations. # + country_folder_names = ['guinea_data', 'liberia_data', 'sl_data'] country_paths = [DATA_FOLDER + '/ebola/' + name for name in country_folder_names] # all files about the ebola task (by country) all_ebola_files = [glob.glob(os.path.join(path, "*.csv")) for path in country_paths] # - # ### Data reading # Using the paths we extracted above, we read and then merge the data into 3 `Dataframes`, one for each country. Finally we add a column to each `Dataframe` containing the name of the country the data comes from # + guinea_files = all_ebola_files[0] guinea_df = pd.concat([pd.read_csv(path) for path in guinea_files], axis=0).rename( columns={'Date': 'Date', 'Description': 'Info', 'Totals': 'Total'}) guinea_df['Country'] = 'Guinea' #'Total deaths of confirmed' | 'Total cases of confirmed' liberia_files = all_ebola_files[1] liberia_df = pd.concat([pd.read_csv(path) for path in liberia_files], axis=0).rename( columns={'Date': 'Date', 'Variable': 'Info', 'National': 'Total'}) liberia_df['Country'] = 'Liberia' # 'Total death/s in confirmed cases' | 'Total confirmed cases' sl_files = all_ebola_files[2] sl_df = pd.concat([pd.read_csv(path) for path in sl_files], axis=0).rename( columns={'date': 'Date', 'variable': 'Info', 'National': 'Total'}) sl_df['Country'] = 'Sierra Leone' #'death_confirmed' | 'cum_confirmed' || totals # - # We then merge the three Dataframes into one and replace missing values by 0. # + ebola_df = pd.concat( [ guinea_df, liberia_df, sl_df ], axis=0 ) # replace missing values by 0 ebola_df.fillna('0', inplace=True) # - # The values in the date column are not all in the same format, therefore we need to uniformize their format.<br/> # Then, we set the index of the `Dataframe` into a combination of the country and the date. # + # unify dates ebola_df['Date'] = pd.to_datetime(ebola_df['Date']) # build index ebola_df.index = [ebola_df['Country'], ebola_df['Date']] ebola_df.index.rename(['Country', 'Date_index'], inplace=True) # - # displaying some ebola_df[:5] # ### Assumption # - We will assume that for each row the total sum is contained in the column named "Total". Therefore if there is a difference between the value in the column "Total" and the sum of the values in the other columns, the ground truth resides in the value in the column "Total". # Based on this assumption, only the columns "Info" and the "Total" will be used. ebola_df = ebola_df[['Total', 'Info']] # displaying some rows ebola_df[:5] # ### Assumptions # We will assume from now on that: # - Only data of confirmed cases and deaths is reliable and important. Therefore we will ignore 'suspected' and 'probable' cases in our calculations. # - The descriptions 'Total deaths of confirmed', 'Total death/s in confirmed cases' and 'death_confirmed', 'Total cases of confirmed', 'Total confirmed cases' and 'cum_confirmed' are cumulative totals. # - The descriptions 'Total deaths of confirmed', 'Total death/s in confirmed cases' and 'death_confirmed' contain data about the number of death of people that we were sure had Ebola. # - The descriptions 'Total cases of confirmed', 'Total confirmed cases' and 'cum_confirmed' contain data about the number of cases of people that we are sure have Ebola. # ### Reasoning # We could have chosen the daily counts instead of cumulative counts. However, they were not always given in all countries which means that we couldn't have used a uniform method of computation for every country. <br> # Moreover, using culumative is sufficient and easier. It is also more consistent. # Following our assumptions, we need to keep the results concerned by the mentionned descriptions only.<br><br> # Furthermore, some entries containing the data do not only constitute of numbers, but of '%' or ',', those characters need to be removed. # + deaths_info_to_keep = ['Total deaths of confirmed', 'Total death/s in confirmed cases', 'death_confirmed'] cases_info_to_keep = ['Total cases of confirmed', 'Total confirmed cases', 'cum_confirmed'] ebola_df['Total'] = pd.to_numeric(ebola_df['Total'].replace(',|%', '', regex=True)) ebola_df['Deads'] = np.where(ebola_df['Info'].isin(deaths_info_to_keep), ebola_df['Total'], 0) ebola_df['Cases'] = np.where(ebola_df['Info'].isin(cases_info_to_keep), ebola_df['Total'], 0) # displaying some data the dataframe ebola_df.head(20) # + countries = ['Guinea', 'Liberia', 'Sierra Leone'] infos = ['Deads', 'Cases'] # we don't need the "Total" and "Info" columns anymore ebola_infos_df = ebola_df[infos] # + # plotting data by country for country in countries: ebola_infos_df.loc[country].groupby(['Date_index']).agg(sum).plot(title=country) # - # As we can see above, lots of data is missing where y = 0, we will need to ignore those point in the future. Moreover, some points seem to be incorrect unless one can resuscitate. # # # ### Methods # # To get better estimations and ignore unrealistic values, we will create and train a model to detect outliers and create another one to create an extrapolation. We will use a RANSAC Regressor to detect the outliers and an ExtraTreesRegressor to extrapolate the values.<br/> # # # #### Alternatives # # We could have used a LinearRegressor instead of the ExtraTreesRegressor, this would have lead to a better approximation of the trend and it could also compensate human error induced during data fetching. However, we are more interested in the exact daily average given by the data knowing that outliers are taken care of by the RANSAC Regressor and thus the error is minimal enough to be ignored. # + day_offset = datetime.datetime(2014, 1, 1,) def days_in_month(year, month): return monthrange(year, month)[1] def days_in_interval(start, end): return (end.total_seconds() - start.total_seconds()) / (3600 * 24) # example : delta of 35 days would return "February" def days_delta_to_month(days): return calendar.month_name[math.floor(days/30) + 1] # - # Since data is sometimes missing, we then extract the intervals for each country where we have data about the number of cases, and where we have the number of deads. # For each of those intervals: # - we get the length in days for each month in the interval. # - we create pairs for each month in the interval, where the first item is the earliest day of that month contained in the interval, and the second one, the latest one. def build_interval_by_month(start, end): assert(start.year == end.year) # works within the same year only assert(start.month < end.month ) # can't go backwards or same month interval = [] # corner case #1 : start.day is not the first of the month interval.append([ datetime.datetime(start.year, start.month, start.day), datetime.datetime(start.year, start.month, days_in_month(start.year, start.month)) ]) for month_idx in range(start.month + 1, end.month): interval.append([ datetime.datetime(start.year, month_idx, 1), datetime.datetime(start.year, month_idx, days_in_month(start.year, month_idx)) ]) # corner case #2 : end.day in not necessary the last of the month interval.append([ datetime.datetime(end.year, end.month, 1), datetime.datetime(end.year, end.month, end.day) ]) return [[date-day_offset for date in dates] for dates in interval] # + intervals_of_interest = {} for country in countries: intervals_of_interest[country] = {} for info in infos: agg_data = ebola_infos_df.loc[country].groupby(['Date_index']).agg(sum) agg_data_greater_zero = agg_data[agg_data[info]>0] start = agg_data_greater_zero.index[0] end = agg_data_greater_zero.index[-1] intervals_of_interest[country][info] = build_interval_by_month(start, end) # - # To train our models, we created some new features to complement the only one we have. The primary feature being the date in seconds of the record since a previously decided day offset (beginning of the year) divided by 100'000. def get_features(dates): X = pd.DataFrame( data=[date.total_seconds() for date in dates], index=range(len(dates)), columns=['Date_index'] ) X["Date_index"] = X["Date_index"] / 100000. X["log"] = X["Date_index"].apply(np.log) X["Date1/2"] = X["Date_index"]**(1/2.) X["Date^2"] = X["Date_index"]**2 return X # note : it also plots the predictions def get_model(country, info): agg_data = ebola_infos_df.loc[country].groupby(['Date_index']).agg(sum) agg_data_greater_zero = agg_data[agg_data[info] > 0] delta = pd.DataFrame(agg_data_greater_zero[info].index)["Date_index"] - day_offset X = get_features(delta.tolist()) reg1 = linear_model.RANSACRegressor(random_state=1024) reg2 = ensemble.ExtraTreesRegressor(random_state=1411) reg1.fit(X, agg_data_greater_zero[info]) inlier_mask = reg1.inlier_mask_ outlier_mask = np.logical_not(inlier_mask) x_train, x_test, y_train, y_test = train_test_split(X[inlier_mask], agg_data_greater_zero[info][inlier_mask], test_size=0.0) reg2.fit(x_train, y_train) pred_df = pd.DataFrame(reg2.predict(X)).rename(columns={0:"Prediction"}) pred_df["Real Values (zeroes filtered)"] = agg_data_greater_zero[info].values pred_df["Date"] = agg_data_greater_zero.index pred_df.plot(x="Date", title=country+' - '+info) return reg2 def plot_info_per_month(intervals, plot_name): intervals_df = pd.DataFrame( data=[interval[0] for interval in intervals], index=[interval[1] for interval in intervals], ) intervals_df.index.name = "Months" intervals_df.plot(kind="bar", title=plot_name, legend=False) return intervals_df # + pred = [] final_df = pd.DataFrame() for country in countries: for info in infos: model = get_model(country, info) intervals = [] for interval in intervals_of_interest[country][info]: features = get_features(interval) pred.append(model.predict(features)) if(interval[0] == interval[1]): intervals.append([pred[-1][1] - pred[-2][1], days_delta_to_month(interval[0].days)]) else: intervals.append([ (pred[-1][1] - pred[-1][0]) / days_in_interval(interval[0], interval[1]), days_delta_to_month(interval[0].days) ]) temp_df = plot_info_per_month(intervals, country + " - " + info + " - daily average per month") temp_df['Country'] = country temp_df['Info'] = info final_df = pd.concat([final_df, temp_df], axis=0) final_df.index = [final_df.index, final_df['Country'], final_df['Info']] final_df = final_df[[0]].rename( columns={0: 'Count'}) final_df.groupby(['Country', 'Info', 'Months']).head() # - # ## Task 2. RNA Sequences # # In the `DATA_FOLDER/microbiome` subdirectory, there are 9 spreadsheets of microbiome data that was acquired from high-throughput RNA sequencing procedures, along with a 10<sup>th</sup> file that describes the content of each. # # Use pandas to import the first 9 spreadsheets into a single `DataFrame`. # Then, add the metadata information from the 10<sup>th</sup> spreadsheet as columns in the combined `DataFrame`. # Make sure that the final `DataFrame` has a unique index and all the `NaN` values have been replaced by the tag `unknown`. # ### Our solution # # -> We import the metadata into a dataframe # # # -> We import each data files (MIDx.xls) into dataframes (1 file = 1 dataframe) <br/> # note: we handle the first column as row indices and then use it to concat the different dataframes # # # -> For each data files, we add their respective index that we get from the dataframe of the metadata <br/> # note: it's said we must add it as columns, we assumed it was as column names for each data source which makes lots of sense # # # -> We concat rows of the MIDs dataframes in one single dataframe # # # -> We replace NaN values by "unknown" # reading all filenames in the folder ending with .xls task2_files = glob.glob(os.path.join(DATA_FOLDER+'/microbiome/', "*.xls")) task2_files.sort() task2_files # Thanks to the sort, we can see that the metadata is at the end of the list. Hence we extract it using this info. # separating data file paths (0 to 8) and metadata file path (last = 9) metadata_file = task2_files[-1] mids_files = task2_files[:-1] # importing metadata file into a dataframe and showing it # + metadata_df = pd.read_excel(metadata_file, index_col=None) metadata_df # - # note: we can check the that the order of the rows is the same as the order of the files (MID1 -> MID9). # This makes it easy to associate file with its corresponding colum name using indices : # + mids_df = [] for idx, file in enumerate(mids_files): mids_df.append(pd.read_excel(file, index_col=0, header=None)) mids_df[idx].columns = [[index] for index in metadata_df.loc[range(len(metadata_df)), ["GROUP", "SAMPLE"]].T[idx]] mids_df[3][:5] # - # (above : showing dataframe samples of file MID4.xls) # concat of the 9 dataframes into one (concat by row) and NA are being replaced by "unknown" # + mids_df_concat = pd.concat(mids_df, axis=1) mids_df_concat.fillna(value="unknown", inplace=True) mids_df_concat # - # (above : showing final dataframe) # ## Task 3. Class War in Titanic # # Use pandas to import the data file `Data/titanic.xls`. It contains data on all the passengers that travelled on the Titanic. from IPython.core.display import HTML HTML(filename=DATA_FOLDER+'/titanic.html') # For each of the following questions state clearly your assumptions and discuss your findings: # 1. Describe the *type* and the *value range* of each attribute. Indicate and transform the attributes that can be `Categorical`. # 2. Plot histograms for the *travel class*, *embarkation port*, *sex* and *age* attributes. For the latter one, use *discrete decade intervals*. # 3. Calculate the proportion of passengers by *cabin floor*. Present your results in a *pie chart*. # 4. For each *travel class*, calculate the proportion of the passengers that survived. Present your results in *pie charts*. # 5. Calculate the proportion of the passengers that survived by *travel class* and *sex*. Present your results in *a single histogram*. # 6. Create 2 equally populated *age categories* and calculate survival proportions by *age category*, *travel class* and *sex*. Present your results in a `DataFrame` with unique index. # ### Data loading data = pd.read_excel(DATA_FOLDER+'/titanic.xls') data.head(5) data.describe(include='all') # ## 1. Describe the *type* and the *value range* of each attribute. Indicate and transform the attributes that can be `Categorical`. # ### Data description # - *pclass*: is an Integer between 1 and 3 that represents in which class that passenger was in. It can be categorical because we cannot say that 3rd class is twice as far as 2nd class from 1st class. # - *survived*: is an Integer that tells us whether the passenger survived (1) or died (0). Even though it should be categorical, it is easier to let it non-categorical as it is then possible to sum the values to get the total number of people who survived. # - *name*: is a String that contains the name of the passenger. The title could be extracted from this attribute and be transformed into a categorical attribut but we don't use it here. # - *sex*: is String that is either 'female' or 'male'. It contains the information about the sex of the passenger. It can be categorical. # - *age*: is a Double that represents the age in year of the passenger. It could be transformed into ranges and which could then be considered as categorical. # - *sibsp*: is an Integer that contains the number of siblings and spouse also embarked aboard the Titanic. # - *parch*: is an Integer that contains the number of children and parents also embarked aboard the Titanic. # - *ticket*: is a String that contains the ticket number of the passenger. # - *fare*: is an Integer that represents the price that the passenger paid for its ticket. # - *cabin*: is a String that contains the deck letter and the cabin number of the passenger. # - *embarked*: is a character {S, Q, C} that represent the Port where the passenger embarked. # - *boat*: is the id of the boat that the passenger took after the Titanic crashed into the iceberg. # - *body*: is the number attributed to a dead body when it was found. # - *home.dest*: represents two informations: before the / is the home of the person and after the / is the destination of the person. data['pclass'] = data.pclass.astype('category') data['sex'] = data.sex.astype('category') data['embarked'] = data.embarked.astype('category') # ## 2. Plot histograms for the *travel class*, *embarkation port*, *sex* and *age* attributes. For the latter one, use *discrete decade intervals*. sns.countplot(x="pclass", data=data, palette="Greens_d") sns.countplot(x="embarked", data=data, palette="Greens_d") sns.countplot(x="sex", data=data, palette="Greens_d") # For the discrete interval, we decide to divide the ages in 9 intervals: 1 for each decades from 0 to 90 years old. Thanks to the use of panda cut function we do not have to bother about NAs. sns.countplot(x=pd.cut(data.age, [0,10,20,30,40,50,60,70,80,90], right=False), palette="Greens_d") # ## 3. Calculate the proportion of passengers by *cabin floor*. Present your results in a *pie chart*. # Let's first plot a pie for every values we have for the cabin floors. data['cabin'].astype(str).str[0].value_counts(sort=False).plot(kind='pie') # As we can see above, around 3/4 of the data is missing for this attribute. We need to ignore the missing values to have a better view of the pie chart. Note that the cabin floor T might seem to be a typo, we will assume it refers to the tank top that one can find on a titanic deckplan and hence keep the value. data['cabin'].astype(str).str[0].value_counts()[1:].plot(kind='pie') # ## 4. For each *travel class*, calculate the proportion of the passengers that survived. Present your results in *pie charts*. df=data.groupby("pclass").survived.agg(["sum", lambda x: len(x) - sum(x)]) df.columns=["survived", "dead"] df=df.T df.columns=["1st class", "2nd class", "3rd class"] df.plot(kind="pie", subplots=True, figsize=(18, 6)) # ## 5. Calculate the proportion of the passengers that survived by *travel class* and *sex*. Present your results in *a single histogram*. # Seaborn allows us to plot such a graph. The black bars show the variance. sns.barplot(x="sex", y="survived", hue="pclass", data=data); # ### 6. Create 2 equally populated *age categories* and calculate survival proportions by *age category*, *travel class* and *sex*. Present your results in a `DataFrame` with unique index. # To split the datas in two equally populated age categories, we use the median and group the data on both side of the median. Obviously some datas will reside in the median and hence we will not get two perfectly equally populated categories but we assume that considering the number of datas we have, we condiser a small delta is acceptable df = data[['pclass', 'sex', 'age', 'survived']][data['age'].isnull() != True] median_age = df[['age']].median() df['age_cat'] = np.where(df[['age']] > median_age, '>{}'.format(median_age[0]), '<{}'.format(median_age[0])) df = df[['pclass', 'sex', 'age_cat', 'survived']] df.groupby(['age_cat', 'pclass', 'sex']).agg(np.mean)
01 - Pandas and Data Wrangling/Homework 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/fronkzoza001/Feelme_FEC-Dataset/blob/main/FEC_ResNet50.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="aNssSDY3uLnb" outputId="e638aa24-18ee-408d-ca90-f8c062ec258b" from google.colab import drive drive.mount('/content/drive') # + id="_6bc4zfngfbq" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sn import skimage.io import keras.backend as K import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications import ResNet50 from tensorflow.keras.layers import Dense, Flatten, Dropout,BatchNormalization ,Activation from tensorflow.keras.models import Model, Sequential from keras.applications.nasnet import NASNetLarge from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping from tensorflow.keras.optimizers import Adam # + id="h_ah2rjGgfbq" train_datagen = ImageDataGenerator(rescale = 1./255, validation_split = 0.2, rotation_range=5, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, #zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') valid_datagen = ImageDataGenerator(rescale = 1./255, validation_split = 0.2) test_datagen = ImageDataGenerator(rescale = 1./255 ) # + [markdown] id="tEBPw_ZCLrz-" # # How to choose batch size # we choose batch size = 64 because of time which is faster than 32 about 5% and accuracy is acceptable at 12% less than 32. # https://www.bualabs.com/archives/729/what-is-batch-size-in-deep-neural-networks-how-to-adjust-machine-learning-model-accuracy-deep-learning-hyperparameter-tuning-ep-2/ # + id="kG1AXLH1gfbr" colab={"base_uri": "https://localhost:8080/", "height": 358} outputId="a2558295-667f-4569-9da1-2992fa9eb217" train_dataset = train_datagen.flow_from_directory(directory = '../input/train.csv', target_size = (48,48), class_mode = 'categorical', subset = 'training', batch_size = 64) # + id="owH99tN-gfbr" valid_dataset = valid_datagen.flow_from_directory(directory = '../input/fer2013/train', target_size = (48,48), class_mode = 'categorical', subset = 'validation', batch_size = 64) # + id="YQwSdEHSgfbr" test_dataset = test_datagen.flow_from_directory(directory = '../input/fer2013/test', target_size = (48,48), class_mode = 'categorical', batch_size = 64) # + id="xQr2qFCggfbs" from keras.preprocessing import image img = image.load_img("../input/fer2013/test/angry/PrivateTest_10131363.jpg",target_size=(48,48)) img = np.array(img) plt.imshow(img) print(img.shape) img = np.expand_dims(img, axis=0) from keras.models import load_model print(img.shape) # + id="bUVhOVeNgfbs" base_model = tf.keras.applications.ResNet50(input_shape=(48,48,3),include_top=False,weights="imagenet") # + id="qqA1CIo2gfbs" # Freezing Layers for layer in base_model.layers[:-4]: layer.trainable=False # + id="mCZgeravgfbs" # Building Model model=Sequential() model.add(base_model) model.add(Dropout(0.5)) model.add(Flatten()) model.add(BatchNormalization()) model.add(Dense(32,kernel_initializer='he_uniform')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(32,kernel_initializer='he_uniform')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(32,kernel_initializer='he_uniform')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(7,activation='softmax')) # + id="nzr_yFXMgfbt" # Model Summary model.summary() # + id="dEYlrQ-Wgfbt" from tensorflow.keras.utils import plot_model from IPython.display import Image plot_model(model, to_file='convnet.png', show_shapes=True,show_layer_names=True) Image(filename='convnet.png') # + id="U-6vlLPzgfbt" def f1_score(y_true, y_pred): #taken from old keras source code true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2*(precision*recall)/(precision+recall+K.epsilon()) return f1_val # + id="zNM8ShGvgfbt" METRICS = [ tf.keras.metrics.BinaryAccuracy(name='accuracy'), tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall'), tf.keras.metrics.AUC(name='auc'), f1_score, ] # + id="OLxC_411gfbu" lrd = ReduceLROnPlateau(monitor = 'val_loss',patience = 20,verbose = 1,factor = 0.50, min_lr = 1e-10) mcp = ModelCheckpoint('model.h5') es = EarlyStopping(verbose=1, patience=20) # + id="WrKh6Lk8gfbu" model.compile(optimizer='Adam', loss='categorical_crossentropy',metrics=METRICS) # + id="BZH4SrS4gfbu" history=model.fit(train_dataset,validation_data=valid_dataset,epochs = 60,verbose = 1,callbacks=[lrd,mcp,es]) # + id="i0e7ijvegfbu" #%% PLOTTING RESULTS (Train vs Validation FOLDER 1) def Train_Val_Plot(acc,val_acc,loss,val_loss,auc,val_auc,precision,val_precision,f1,val_f1): fig, (ax1, ax2,ax3,ax4,ax5) = plt.subplots(1,5, figsize= (20,5)) fig.suptitle(" MODEL'S METRICS VISUALIZATION ") ax1.plot(range(1, len(acc) + 1), acc) ax1.plot(range(1, len(val_acc) + 1), val_acc) ax1.set_title('History of Accuracy') ax1.set_xlabel('Epochs') ax1.set_ylabel('Accuracy') ax1.legend(['training', 'validation']) ax2.plot(range(1, len(loss) + 1), loss) ax2.plot(range(1, len(val_loss) + 1), val_loss) ax2.set_title('History of Loss') ax2.set_xlabel('Epochs') ax2.set_ylabel('Loss') ax2.legend(['training', 'validation']) ax3.plot(range(1, len(auc) + 1), auc) ax3.plot(range(1, len(val_auc) + 1), val_auc) ax3.set_title('History of AUC') ax3.set_xlabel('Epochs') ax3.set_ylabel('AUC') ax3.legend(['training', 'validation']) ax4.plot(range(1, len(precision) + 1), precision) ax4.plot(range(1, len(val_precision) + 1), val_precision) ax4.set_title('History of Precision') ax4.set_xlabel('Epochs') ax4.set_ylabel('Precision') ax4.legend(['training', 'validation']) ax5.plot(range(1, len(f1) + 1), f1) ax5.plot(range(1, len(val_f1) + 1), val_f1) ax5.set_title('History of F1-score') ax5.set_xlabel('Epochs') ax5.set_ylabel('F1 score') ax5.legend(['training', 'validation']) plt.show() Train_Val_Plot(history.history['accuracy'],history.history['val_accuracy'], history.history['loss'],history.history['val_loss'], history.history['auc'],history.history['val_auc'], history.history['precision'],history.history['val_precision'], history.history['f1_score'],history.history['val_f1_score'] )
FEC_ResNet50.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Let us import some Python libraries that will help us load, manipulate, analyse and perform machine learning algorithms on the data. import pandas as pd #Data Manipulation import numpy as np import seaborn as sns # %matplotlib inline import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler #For scaling the dataset from sklearn.cluster import AgglomerativeClustering# Clustering Algorithms from scipy.cluster.hierarchy import dendrogram, linkage # Let's load the data into Python so that we can explore it more thouroughly data = pd.read_csv('Copy of snapshot_full_df.csv', keep_default_na=False, low_memory=False).drop('Unnamed: 0', axis=1).sample(n=40000) data['tran_date'] = pd.to_datetime(data['tran_date']) categorical = ['store_region', 'store_grading', 'sku_department', 'sku_subdepartment', 'sku_category', 'sku_subcategory'] for i in categorical: data = data.join(pd.get_dummies(data[i], prefix=i)) data.drop(categorical,axis=1,inplace=True) data.drop('tran_date', axis=1, inplace=True) data.dropna(inplace=True) data.shape # ** Additional Column added at the end of the DatFrame with the cluster number for each observation point.** # # ***Let us now plot the data using a*** # generate the linkage matrix# gener Z = linkage(data,method='ward') # **Dendogram of the last 18 merges** plt.title('Hierarchical Clustering Dendrogram (truncated)') plt.xlabel('sample index or (cluster size)') plt.ylabel('distance') dendrogram( Z, truncate_mode='lastp', # show only the last p merged clusters p=18, # show only the last p merged clusters leaf_rotation=90., leaf_font_size=12., show_contracted=True, # to get a distribution impression in truncated branches ) plt.show() # **Full Dendogram** plt.figure(figsize=(25,10)) plt.title('Hierarchical Clustering Dendrogram') plt.xlabel('sample index') plt.ylabel('distance') dendrogram( Z, leaf_rotation=90., # rotates the x axis labels leaf_font_size=8., # font size for the x axis labels ) plt.show() # + #Fitting the Agglomerative Clustering Algorithm to our dataset model = AgglomerativeClustering(n_clusters=6, affinity = 'euclidean', linkage = 'ward') model_clusters = model.fit(data) data['Cluster'] = model_clusters.labels_ cluster_centers = model.n_clusters # - # # Clusters # ** Below we will analyse each cluster by looking at the average values(mean) of each variable per cluster** clust_1 = data['Cluster']==0 clust_2 = data['Cluster']==1 clust_3 = data['Cluster']==2 clust_4 = data['Cluster']==3 clust_5 = data['Cluster']==4 clust_6 = data['Cluster']==5 cluster1 = data[clust_1].drop('Cluster',axis = 1) cluster2 = data[clust_2].drop('Cluster',axis = 1) cluster3 = data[clust_3].drop('Cluster',axis = 1) cluster4 = data[clust_4].drop('Cluster',axis = 1) cluster5 = data[clust_5].drop('Cluster',axis = 1) cluster6 = data[clust_6].drop('Cluster',axis = 1) # ### *Visualizing the clusters* # + from scipy.cluster.hierarchy import fcluster k=4 clusts = fcluster(Z, k, criterion='maxclust') #Scatter Plot of the data plt.figure(figsize=(10, 8)) plt.scatter(data.values[:,0], data.values[:,1], c=clusts) # plot points with cluster dependent colors plt.show() # - # ** Let's create a dataframe of all the clusters and their average values. We will use this dataframe to understand which values are dominant in each clusters** cluster1_df = pd.DataFrame(cluster1.mean(),columns=['Cluster 1']) cluster2_df = pd.DataFrame(cluster2.mean(),columns=['Cluster 2']) cluster3_df = pd.DataFrame(cluster3.mean(),columns=['Cluster 3']) cluster4_df = pd.DataFrame(cluster4.mean(),columns=['Cluster 4']) cluster5_df = pd.DataFrame(cluster5.mean(),columns=['Cluster 5']) cluster6_df = pd.DataFrame(cluster6.mean(),columns=['Cluster 6']) cluster_mix = pd.concat([cluster1_df,cluster2_df,cluster3_df,cluster4_df,cluster5_df, cluster6_df], axis=1, join_axes=[cluster1_df.index]) # # Conclusion # **Below we we will look at the each feature and which have the highest average values, in attempt to analyse each cluster and its constituents.** # cluster_mix.sort_values(by='Cluster 1', ascending=False).drop(['Cluster 2','Cluster 3','Cluster 4','Cluster 5','Cluster 6'], axis=1) # **Cluster 2** cluster_mix.sort_values(by='Cluster 2', ascending=False).drop(['Cluster 1','Cluster 3','Cluster 4', 'Cluster 5', 'Cluster 6'], axis=1) # **Cluster 3** cluster_mix.sort_values(by='Cluster 3', ascending=False).drop(['Cluster 1','Cluster 2','Cluster 4', 'Cluster 5', 'Cluster 6'], axis=1) # **Cluster 4** cluster_mix.sort_values(by='Cluster 4', ascending=False).drop(['Cluster 1','Cluster 2','Cluster 3', 'Cluster 5', 'Cluster 6'], axis=1) # **Cluster 5** cluster_mix.sort_values(by='Cluster 5', ascending=False).drop(['Cluster 1','Cluster 2','Cluster 3', 'Cluster 4', 'Cluster 6'], axis=1) # **Cluster 6** cluster_mix.sort_values(by='Cluster 6', ascending=False).drop(['Cluster 1','Cluster 2','Cluster 3', 'Cluster 5'], axis=1)
notebooks/milton_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Iallen520/lhy_DL_Hw/blob/master/hw14_life_long_learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="LD5roJkIvoRj" colab_type="text" # ## LifeLong Machine Learning # # ### 助教的投影片連結 # [投影片](https://docs.google.com/presentation/d/13JmcOZ9i_m5xJbRBKNMAKE1fIzGhyaeLck3frY0B2xY/edit?usp=sharing) # # ### 定義 # 老師的影片有詳細說明定義 這裡不細提 詳細可以參考 [lifelong learning](https://youtu.be/7qT5P9KJnWo) # # # ### 方法 # 在2019年底,有人提出了一個大匯整將lifelong learning 的方法,從2016- 2019 年初 的模型做了歸類,大致上可以分成三種大方法 # * Replay-based methods # * Regularization-based methods # * Parameter isolation methods # # <img src="https://i.ibb.co/VDFJkWG/2019-12-29-17-25.png" width="100%"> # # 在這次的作業之中,我們要走過一次regularization-based methods 裡面的 prior-focused的兩種方法 分別是 EWC 和 MAS 這兩種方法 # # 圖片出處 [Continual Learning in Neural # Networks](https://arxiv.org/pdf/1910.02718.pdf) # # 若有任何問題,歡迎來信至助教信箱 <EMAIL> # # # # # + [markdown] id="On1VZz4HUIJw" colab_type="text" # # Import Libraries # + id="dLnpJTNtje_J" colab_type="code" colab={} import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.utils.data as data import torch.utils.data.sampler as sampler import torchvision from torchvision import datasets, transforms import numpy as np import os import random from copy import deepcopy import json device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # + [markdown] id="KRGf4QAKFzz9" colab_type="text" # # 模型 # + [markdown] id="zZvlf0Wv7YdU" colab_type="text" # >因為本次作業強調的是lifelong learning 的訓練方法,並非疊模型,所以今天我們所舉的例子,都會使用同一個模型來做訓練只是應用上不同lifelong learning的訓練方法, 在這次的作業的例子內 我們使用的是 一個 六層的 fully-connected layer 的 模型 加上 relu的 activation function. # + [markdown] id="fkc3Z-4JYaPe" colab_type="text" # ## Basic Model # + id="g8aQRs7ss3nx" colab_type="code" colab={} class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.fc1 = nn.Linear(3*32*32, 1024) self.fc2 = nn.Linear(1024, 512) self.fc3 = nn.Linear(512, 256) self.fc4 = nn.Linear(256, 128) self.fc5 = nn.Linear(128, 128) self.fc6 = nn.Linear(128, 10) self.relu = nn.ReLU() def forward(self, x): x = x.view(-1, 3*32*32) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.relu(x) x = self.fc3(x) x = self.relu(x) x = self.fc4(x) x = self.relu(x) x = self.fc5(x) x = self.relu(x) x = self.fc6(x) return x # + [markdown] id="AfeD8zfAccZe" colab_type="text" # 以下我們將依序介紹這兩種方法 EWC 跟 MAS # + [markdown] id="qwri0rVpjd6h" colab_type="text" # ## EWC # + [markdown] id="e3NePROJyWyW" colab_type="text" # ### Elastic Weight Consolidation # # #### 概念 # 老師在影片中已經把核心概念介紹給大家,那在這邊我想大家都非常了解了這個方法的概念,我們就直接進入主題 # # 今天我們的任務 是在學習連續的兩個 task task A 跟 task B: # # 在 EWC 作法下 他的 loss function 會被定義如下 # $$\mathcal{L}_B = \mathcal{L}(\theta) + \sum_{i} \frac{\lambda}{2} F_i (\theta_{i} - \theta_{A,i}^{*})^2 $$ # # 先解釋這個 loss function 裡的變數,$\mathcal{L}_B$ 是指 task B 的 loss, 會等於 正常的loss function $\mathcal{L}(\theta)$ (如果 是 classification 的問題,就是 cross entropy 的 loss function) 加上一個正則項 (regularization term) # # 這個正則項的由兩個部份組成,第一個是 $F_i$ 也是這個方法的核心, 第二個部份是 $(\theta_{i} - \theta_{A,i}^{*})^2$ , $\theta_{A,i}^{*}$ 代表的是 訓練完task A 存下來模型第 i 個參數的值, $\theta_i$ 代表的是目前模型第i個參數的值,注意一點是模型的架構在這種 regularization based 的方法上,都是固定ㄉ,目前模型跟 task A 存下來的模型 架構都一樣只是值不一樣。底下我將說明這個 $F_i$ 是怎麼實做出來 # # 在老師的影片中,老師是以只有兩個參數的模型舉例子,那假設我今天模型就是一個 neural network(參數不只兩個) 該怎麼辦呢? # # $F_i$ 對應到老師的影片敘述是指第i個參數的守衛,假設這個參數對 task A 很重要,那這個 $F_i$ 的值就會很大,這個參數盡量不能被更動... # # 實際上這個參數的算法 即是 如下的式子 # # $$ F = [ \nabla \log(p(y_n | x_n, \theta_{A}^{*}) \nabla \log(p(y_n | x_n, \theta_{A}^{*})^T ] $$ # # $F$ 之中 只以對角線的值去近似各個參數的 $F_i$ 值 # # $p(y_n | x_n, \theta_{A}^{*})$ 指的就是模型在給定之前 task 的 data $x_n$ 以及 給定 訓練完 task A 存下來的模型參數 $\theta_A^*$ 得到 $y_n$($x_n$ 對應的 label ) 的 posterior probability. # 那統整一下作法就是 再對這個 $p(y_n | x_n, \theta_{A}^{*})$ 取 log 再取 gradient 並且平方 ( parameter.grad )^2. # # 每一個參數我都可以使用 pytorch 的 backward 之後再取 gradient 的性質算出各自的 $F_i$. # # 有關這個 $F$ 其實博大精深,是來自於 fisher information matrix. 底下我放上有關這個lifelong learning 在 fisher information matrix 上是怎麼簡單的近似到這一項,簡單的推導來自 [Continual Learning in Neural # Networks](https://arxiv.org/pdf/1910.02718.pdf) 第2.4.1 小節 與 2.4 節 # # For You Information: [Elastic Weight Consolidation](https://arxiv.org/pdf/1612.00796.pdf) # # # # + id="K511GmRzyYWa" colab_type="code" colab={} class EWC(object): """ @article{kirkpatrick2017overcoming, title={Overcoming catastrophic forgetting in neural networks}, author={<NAME> <NAME> Veness, <NAME> Desjardins, <NAME>, <NAME> and <NAME> <NAME> and <NAME> and Grabska-Barwinska, Agnieszka and others}, journal={Proceedings of the national academy of sciences}, year={2017}, url={https://arxiv.org/abs/1612.00796} } """ def __init__(self, model: nn.Module, dataloaders: list, device): self.model = model self.dataloaders = dataloaders self.device = device self.params = {n: p for n, p in self.model.named_parameters() if p.requires_grad} #抓出模型的所有參數 self._means = {} # 初始化 平均參數 self._precision_matrices = self._calculate_importance() # 產生 EWC 的 Fisher (F) 矩陣 for n, p in self.params.items(): self._means[n] = p.clone().detach() # 算出每個參數的平均 (用之前任務的資料去算平均) def _calculate_importance(self): precision_matrices = {} for n, p in self.params.items(): # 初始化 Fisher (F) 的矩陣(都補零) precision_matrices[n] = p.clone().detach().fill_(0) self.model.eval() dataloader_num=len(self.dataloaders) number_data = sum([len(loader) for loader in self.dataloaders]) for dataloader in self.dataloaders: for data in dataloader: self.model.zero_grad() input = data[0].to(self.device) output = self.model(input).view(1, -1) label = output.max(1)[1].view(-1) ############################################################################ ##### 產生 EWC 的 Fisher(F) 矩陣 ##### ############################################################################ loss = F.nll_loss(F.log_softmax(output, dim=1), label) loss.backward() for n, p in self.model.named_parameters(): precision_matrices[n].data += p.grad.data ** 2 / number_data precision_matrices = {n: p for n, p in precision_matrices.items()} return precision_matrices def penalty(self, model: nn.Module): loss = 0 for n, p in model.named_parameters(): _loss = self._precision_matrices[n] * (p - self._means[n]) ** 2 loss += _loss.sum() return loss # + [markdown] id="jmsPw6avjl5B" colab_type="text" # ## MAS # + [markdown] id="C7fSYpALrAVw" colab_type="text" # ### Memory Aware Synapses # 概念: # 老師的影片中,將它歸類到和 EWC 一樣的方法,只是算這個 important weight 的方式不太一樣.底下我將說明這個方法該怎麼實做 # # MAS: # 在 MAS 內,學習一個連續的 tasks, task A, 和 task B, 他的 loss function 定義如下: # # $$\mathcal{L}_B = \mathcal{L}(\theta) + \sum_{i} \frac{\lambda}{2} \Omega_i (\theta_{i} - \theta_{A,i}^{*})^2$$ # # 和 ewc不同的是 式子中的 $F_i$ 被取代成 $\Omega_i$ , $\Omega_i$ 來自於以下的式子: # # $$\Omega_i = || \frac{\partial \ell_2^2(M(x_k; \theta))}{\partial \theta_i} || $$ # # $x_k$ 是 來自於 前面 task 的 sample data。 式子上的作法就是對最後模型的 output vector (最後一層)做 l2 norm 後取平方 再對各自的weight微分(取gradient) 並且取 該 gradient 的絕對值,在該paper 中其實也可以對各個層的 output vector 做 l2 norm ( local 版本),這邊只實做 global 的版本。 # # # For Your Information: # [Memory Aware Synapses](https://arxiv.org/pdf/1711.09601.pdf) # # # # # # # + id="btFvFJMmqxE0" colab_type="code" colab={} class MAS(object): """ @article{aljundi2017memory, title={Memory Aware Synapses: Learning what (not) to forget}, author={<NAME> and <NAME> <NAME>}, booktitle={ECCV}, year={2018}, url={https://eccv2018.org/openaccess/content_ECCV_2018/papers/Rahaf_Aljundi_Memory_Aware_Synapses_ECCV_2018_paper.pdf} } """ def __init__(self, model: nn.Module, dataloaders: list, device): self.model = model self.dataloaders = dataloaders self.params = {n: p for n, p in self.model.named_parameters() if p.requires_grad} #抓出模型的所有參數 self._means = {} # 初始化 平均參數 self.device = device self._precision_matrices = self.calculate_importance() # 產生 MAS 的 Omega(Ω) 矩陣 for n, p in self.params.items(): self._means[n] = p.clone().detach() def calculate_importance(self): print('Computing MAS') precision_matrices = {} for n, p in self.params.items(): precision_matrices[n] = p.clone().detach().fill_(0) # 初始化 Omega(Ω) 矩陣(都補零) self.model.eval() dataloader_num = len(self.dataloaders) num_data = sum([len(loader) for loader in self.dataloaders]) for dataloader in self.dataloaders: for data in dataloader: self.model.zero_grad() output = self.model(data[0].to(self.device)) ####################################################################################### ##### 產生 MAS 的 Omega(Ω) 矩陣 ( 對 output 向量 算他的 l2 norm 的平方) 再取 gradient ##### ####################################################################################### output.pow_(2) loss = torch.sum(output,dim=1) loss = loss.mean() loss.backward() for n, p in self.model.named_parameters(): precision_matrices[n].data += p.grad.abs() / num_data ## difference with EWC precision_matrices = {n: p for n, p in precision_matrices.items()} return precision_matrices def penalty(self, model: nn.Module): loss = 0 for n, p in model.named_parameters(): _loss = self._precision_matrices[n] * (p - self._means[n]) ** 2 loss += _loss.sum() return loss # + [markdown] id="NWQ_JBlbFnKV" colab_type="text" # # 資料 # + [markdown] id="vdep_aMvUYqI" colab_type="text" # ## 資料預處理 # - 轉換 MNIST ($1*28*28$) 到 ($3*32*32$) # - 轉換 USPS ($1*16*16$) 到 ($3*32*32$) # - 正規化 圖片 # + id="xVHrWsHfIPtY" colab_type="code" colab={} class Convert2RGB(object): def __init__(self, num_channel): self.num_channel = num_channel def __call__(self, img): # If the channel of img is not equal to desired size, # then expand the channel of img to desired size. img_channel = img.size()[0] img = torch.cat([img] * (self.num_channel - img_channel + 1), 0) return img class Pad(object): def __init__(self, size, fill=0, padding_mode='constant'): self.size = size self.fill = fill self.padding_mode = padding_mode def __call__(self, img): # If the H and W of img is not equal to desired size, # then pad the channel of img to desired size. img_size = img.size()[1] assert ((self.size - img_size) % 2 == 0) padding = (self.size - img_size) // 2 padding = (padding, padding, padding, padding) return F.pad(img, padding, self.padding_mode, self.fill) def get_transform(): transform = transforms.Compose([transforms.ToTensor(), Pad(32), Convert2RGB(3), transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])]) return transform # + [markdown] id="PW4r9Jd-etyG" colab_type="text" # ## 準備 資料集 # - MNIST : 一張圖片資料大小: $28*28*1$, 灰階 , 10 個種類 # - SVHN : 一張圖片資料大小: $32*32*3$, RGB , 10 個種類 # - USPS : 一張圖片資料大小: $16*16*1$, 灰階 , 10 個種類 # + id="HPIeRDtIox0M" colab_type="code" colab={} class Data(): def __init__(self, path): transform = get_transform() self.MNIST_dataset = datasets.MNIST(root = os.path.join(path, "MNIST"), transform=transform, train = True, download = True) self.SVHN_dataset = datasets.SVHN(root = os.path.join(path, "SVHN"), transform=transform, split='train', download = True) self.USPS_dataset = datasets.USPS(root = os.path.join(path, "USPS"), transform=transform, train = True, download = True) def get_datasets(self): a = [(self.SVHN_dataset, "SVHN"),(self.MNIST_dataset, "MNIST"),(self.USPS_dataset, "USPS")] return a # + [markdown] id="eMtV82EPjsld" colab_type="text" # ## 建立 Dataloader # - *.train_loader: 拿取訓練集並訓練 \\ # - *.val_loader: 拿取驗證集並驗測結果 \\ # + id="29-5g8ZHjs_3" colab_type="code" colab={} class Dataloader(): def __init__(self, dataset, batch_size, split_ratio=0.1): self.dataset = dataset[0] self.name = dataset[1] train_sampler, val_sampler = self.split_dataset(split_ratio) self.train_dataset_size = len(train_sampler) self.val_dataset_size = len(val_sampler) self.train_loader = data.DataLoader(self.dataset, batch_size = batch_size, sampler=train_sampler) self.val_loader = data.DataLoader(self.dataset, batch_size = batch_size, sampler=val_sampler) self.train_iter = self.infinite_iter() def split_dataset(self, split_ratio): data_size = len(self.dataset) split = int(data_size * split_ratio) indices = list(range(data_size)) np.random.shuffle(indices) train_idx, valid_idx = indices[split:], indices[:split] train_sampler = sampler.SubsetRandomSampler(train_idx) val_sampler = sampler.SubsetRandomSampler(valid_idx) return train_sampler, val_sampler def infinite_iter(self): it = iter(self.train_loader) while True: try: ret = next(it) yield ret except StopIteration: it = iter(self.train_loader) # + [markdown] id="vzG5BWtHGA3p" colab_type="text" # # 小工具 # + [markdown] id="vMBoCSH5MBLN" colab_type="text" # ## 儲存模型 # + id="uCZuQrWiMGmH" colab_type="code" colab={} def save_model(model, optimizer, store_model_path): torch.save(model.state_dict(), f'{store_model_path}.ckpt') torch.save(optimizer.state_dict(), f'{store_model_path}.opt') # + [markdown] id="Nde98xvAMxAd" colab_type="text" # ##載入模型 # # + id="FGzZ2Yp2MxK-" colab_type="code" colab={} def load_model(model, optimizer, load_model_path): print(f'Load model from {load_model_path}') model.load_state_dict(torch.load(f'{load_model_path}.ckpt')) optimizer.load_state_dict(torch.load(f'{load_model_path}.opt')) return model, optimizer # + [markdown] id="eoz6awEcOIAz" colab_type="text" # ## 建立模型 & 優化器 # + id="TvWqv_JlOOix" colab_type="code" colab={} def build_model(data_path, batch_size, learning_rate): # create model model = Model().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) data = Data(data_path) datasets = data.get_datasets() tasks = [] for dataset in datasets: tasks.append(Dataloader(dataset, batch_size)) return model, optimizer, tasks # + [markdown] id="74De0sS-O50R" colab_type="text" # # 訓練 # + [markdown] id="_q9Co3vuGWfu" colab_type="text" # ## 正常訓練 ( baseline ) # + id="TBnE9GbiO8Ob" colab_type="code" colab={} def normal_train(model, optimizer, task, total_epochs, summary_epochs): model.train() model.zero_grad() ceriation = nn.CrossEntropyLoss() losses = [] loss = 0.0 for epoch in range(summary_epochs): imgs, labels = next(task.train_iter) imgs, labels = imgs.to(device), labels.to(device) outputs = model(imgs) ce_loss = ceriation(outputs, labels) optimizer.zero_grad() ce_loss.backward() optimizer.step() loss += ce_loss.item() if (epoch + 1) % 50 == 0: loss = loss / 50 print ("\r", "train task {} [{}] loss: {:.3f} ".format(task.name, (total_epochs + epoch + 1), loss), end=" ") losses.append(loss) loss = 0.0 return model, optimizer, losses # + [markdown] id="m2FlojHR_4qb" colab_type="text" # ## EWC 訓練 # + id="nLHALesw_61i" colab_type="code" colab={} def ewc_train(model, optimizer, task, total_epochs, summary_epochs, ewc, lambda_ewc): model.train() model.zero_grad() ceriation = nn.CrossEntropyLoss() losses = [] loss = 0.0 for epoch in range(summary_epochs): imgs, labels = next(task.train_iter) imgs, labels = imgs.to(device), labels.to(device) outputs = model(imgs) ce_loss = ceriation(outputs, labels) total_loss = ce_loss ewc_loss = ewc.penalty(model) total_loss += lambda_ewc * ewc_loss optimizer.zero_grad() total_loss.backward() optimizer.step() loss += total_loss.item() if (epoch + 1) % 50 == 0: loss = loss / 50 print ("\r", "train task {} [{}] loss: {:.3f} ".format(task.name, (total_epochs + epoch + 1), loss), end=" ") losses.append(loss) loss = 0.0 return model, optimizer, losses # + [markdown] id="0B6to7GuqvPX" colab_type="text" # ## MAS 訓練 # + id="fWhZz9uZquew" colab_type="code" colab={} def mas_train(model, optimizer, task, total_epochs, summary_epochs, mas_tasks, lambda_mas,alpha=0.8): model.train() model.zero_grad() ceriation = nn.CrossEntropyLoss() losses = [] loss = 0.0 for epoch in range(summary_epochs): imgs, labels = next(task.train_iter) imgs, labels = imgs.to(device), labels.to(device) outputs = model(imgs) ce_loss = ceriation(outputs, labels) total_loss = ce_loss mas_tasks.reverse() if len(mas_tasks) > 1: preprevious = 1 - alpha scalars = [alpha,preprevious] for mas,scalar in zip(mas_tasks[:2],scalars): mas_loss = mas.penalty(model) total_loss += lambda_mas * mas_loss * scalar elif len(mas_tasks) == 1: mas_loss = mas_tasks[0].penalty(model) total_loss += lambda_mas * mas_loss else: pass optimizer.zero_grad() total_loss.backward() optimizer.step() loss += total_loss.item() if (epoch + 1) % 50 == 0: loss = loss / 50 print ("\r", "train task {} [{}] loss: {:.3f} ".format(task.name, (total_epochs + epoch + 1), loss), end=" ") losses.append(loss) loss = 0.0 return model, optimizer, losses # + [markdown] id="6cuHVXxAfHrA" colab_type="text" # ## 驗證 # # + id="ZBp-n3FrfOCe" colab_type="code" colab={} def val(model, task): model.eval() correct_cnt = 0 for imgs, labels in task.val_loader: imgs, labels = imgs.to(device), labels.to(device) outputs = model(imgs) _, pred_label = torch.max(outputs.data, 1) correct_cnt += (pred_label == labels.data).sum().item() return correct_cnt / task.val_dataset_size # + [markdown] id="dFEYmPAlx_SX" colab_type="text" # ## 主訓練程序 # # + id="cJ54vDP2yC2S" colab_type="code" colab={} def train_process(model, optimizer, tasks, config): task_loss, acc = {}, {} for task_id, task in enumerate(tasks): print ('\n') total_epochs = 0 task_loss[task.name] = [] acc[task.name] = [] if config.mode == 'basic' or task_id == 0: while (total_epochs < config.num_epochs): model, optimizer, losses = normal_train(model, optimizer, task, total_epochs, config.summary_epochs) task_loss[task.name] += losses for subtask in range(task_id + 1): acc[tasks[subtask].name].append(val(model, tasks[subtask])) total_epochs += config.summary_epochs if total_epochs % config.store_epochs == 0 or total_epochs >= config.num_epochs: save_model(model, optimizer, config.store_model_path) if config.mode == 'ewc' and task_id > 0: old_dataloaders = [] for old_task in range(task_id): old_dataloaders += [tasks[old_task].val_loader] ewc = EWC(model, old_dataloaders, device) while (total_epochs < config.num_epochs): model, optimizer, losses = ewc_train(model, optimizer, task, total_epochs, config.summary_epochs, ewc, config.lifelong_coeff) task_loss[task.name] += losses for subtask in range(task_id + 1): acc[tasks[subtask].name].append(val(model, tasks[subtask])) total_epochs += config.summary_epochs if total_epochs % config.store_epochs == 0 or total_epochs >= config.num_epochs: save_model(model, optimizer, config.store_model_path) if config.mode == 'mas' and task_id > 0: old_dataloaders = [] mas_tasks = [] for old_task in range(task_id): old_dataloaders += [tasks[old_task].val_loader] mas = MAS(model, old_dataloaders, device) mas_tasks += [mas] while (total_epochs < config.num_epochs): model, optimizer, losses = mas_train(model, optimizer, task, total_epochs, config.summary_epochs, mas_tasks, config.lifelong_coeff) task_loss[task.name] += losses for subtask in range(task_id + 1): acc[tasks[subtask].name].append(val(model, tasks[subtask])) total_epochs += config.summary_epochs if total_epochs % config.store_epochs == 0 or total_epochs >= config.num_epochs: save_model(model, optimizer, config.store_model_path) if config.mode == 'scp' and task_id > 0: pass ######################################## ## TODO 區塊 ( PART 2 ) ## ######################################## ## PART 2 implementation 的部份 ## ## 你也可以寫別的 regularization 方法 ## ## 助教這裡有提供的是 scp 的 作法 ## ## Slicer Cramer Preservation ## ######################################## ######################################## ## TODO 區塊 ( PART 2 ) ## ######################################## return task_loss, acc # + [markdown] id="7PbfgB3n9eoT" colab_type="text" # # 設定 # + id="3kWSZ4w39gzj" colab_type="code" colab={} class configurations(object): def __init__(self): self.batch_size = 256 self.num_epochs = 10000 self.store_epochs = 250 self.summary_epochs = 250 self.learning_rate = 0.0005 self.load_model = False self.store_model_path = "./model" self.load_model_path = "./model" self.data_path = "./data" self.mode = None self.lifelong_coeff = 0.5 ###### 你也可以自己設定參數 ######## ###### 但上面的參數 是這次作業的預設直 ######### # + [markdown] id="w464f4KOLUh6" colab_type="text" # #主程式區塊 # - 給 EWC, MAS 超參數 $\lambda$ # - 訓練 # + id="AJwVkorvLaSh" colab_type="code" colab={} tags=[] """ the order is svhn -> mnist -> usps =============================================== """ # import tqdm if __name__ == '__main__': mode_list = ['mas','ewc','basic'] ## hint: 謹慎的去選擇 lambda 超參數 / ewc: 80~400, mas: 0.1 - 10 ############################################################################ ##### TODO 區塊 ( PART 1 ) ##### ############################################################################ coeff_list = [0, 0 ,0 ] ## 你需要在這 微調 lambda 參數, mas, ewc, baseline=0## ############################################################################ ##### TODO 區塊 ( PART 1 ) ##### ############################################################################ config = configurations() count = 0 for mode in mode_list: config.mode = mode config.lifelong_coeff = coeff_list[count] print("{} training".format(config.mode)) model, optimizer, tasks = build_model(config.load_model_path, config.batch_size, config.learning_rate) print ("Finish build model") if config.load_model: model, optimizer = load_model(model, optimizer, config.load_model_path) task_loss, acc = train_process(model, optimizer, tasks, config) with open(f'./{config.mode}_acc.txt', 'w') as f: json.dump(acc, f) count += 1 # + [markdown] id="DSJX338dA2He" colab_type="text" # # 畫出 Result 圖片 # + id="4X_RvV4my5Jl" colab_type="code" colab={} # %matplotlib inline import matplotlib.pyplot as plt def plot_result(mode_list, task1, task2, task3): #draw the lines count = 0 for reg_name in mode_list: label = reg_name with open(f'./{reg_name}_acc.txt', 'r') as f: acc = json.load(f) if count == 0: color= 'red' elif count == 1: color= 'blue' else: color = 'purple' ax1=plt.subplot(3, 1, 1) plt.plot(range(len(acc[task1])),acc[task1],color,label=label) ax1.set_ylabel(task1) ax2=plt.subplot(3, 1, 2,sharex=ax1,sharey=ax1) plt.plot(range(len(acc[task3]),len(acc[task1])),acc[task2],color,label=label) ax2.set_ylabel(task2) ax3=plt.subplot(3, 1, 3,sharex=ax1,sharey=ax1) ax3.set_ylabel(task3) plt.plot(range(len(acc[task2]),len(acc[task1])),acc[task3],color,label=label) count += 1 plt.ylim((0.02,1.02)) plt.legend() plt.show() return mode_list = ['ewc','mas','basic'] plot_result(mode_list,'SVHN','MNIST','USPS') # + [markdown] id="43QGlXTxyzw_" colab_type="text" # 在今年 ICLR 2020 的 paper,有以這兩種方法做 baseline,並對這兩種方法各自做了一個 geometry view,也提出新的方法,有興趣的人可以參考 # # paper link 如下 [SLICED CRAMER´ SYNAPTIC CONSOLIDATION FOR # PRESERVING DEEPLY LEARNED REPRESENTATIONS](https://openreview.net/pdf?id=BJge3TNKwH) # + [markdown] id="cbMUPaN_zAs7" colab_type="text" # # 進階 # 請實做其他的 regularization 的方法,助教有提供的是 SCP 的作法, # # 你也可以考慮實做出 SI, Rimennian Walk, IMM, 或是上面的方法, # # 你可以參考助教上方的寫法,寫出雷同的 class 跟 training 來 train, # # 記得畫出與上方雷同的 evaluation 圖表 (show result) example 需要比對的話 可以參考助教給的 slide。 # # + id="m3aOQ2XI-Prm" colab_type="code" colab={} def sample_spherical(npoints, ndim=3): vec = np.random.randn(ndim, npoints) vec /= np.linalg.norm(vec, axis=0) return vec # + id="bcjtln1T6U7T" colab_type="code" colab={} class SCP(object): """ OPEN REVIEW VERSION: https://openreview.net/forum?id=BJge3TNKwH """ def __init__(self, model: nn.Module, dataloaders: list, L: int, device): self.model = model self.dataloaders = dataloaders self.params = {n: p for n, p in self.model.named_parameters() if p.requires_grad} self._means = {} self.L= L self.device = device self._precision_matrices = self.calculate_importance() for n, p in self.params.items(): self._means[n] = p.clone().detach() def calculate_importance(self): print('Computing SCP') precision_matrices = {} for n, p in self.params.items(): precision_matrices[n] = p.clone().detach().fill_(0) self.model.eval() dataloader_num = len(self.dataloaders) num_data = sum([len(loader) for loader in self.dataloaders]) for dataloader in self.dataloaders: for data in dataloader: self.model.zero_grad() output = self.model(data[0].to(self.device)) #################################################################################### ##### TODO 區塊 ( PART 2 ) ##### #################################################################################### ##### 產生 SCP 的 Gamma(Γ) 矩陣( 如同 MAS 的 Omega(Ω) 矩陣, EWC 的 Fisher(F) 矩陣 )##### #################################################################################### ##### 1.對所有資料的 Output vector 取 平均 得到 平均 vector φ(:,θ_A* ) ##### #################################################################################### #################################################################################### ##### 2. 隨機 從 單位球殼 取樣 L 個 vector ξ #( Hint: sample_spherical() ) ##### #################################################################################### #################################################################################### ##### 3. 每一個 vector ξ 和 vector φ( :,θ_A* )內積得到 scalar ρ ##### ##### 對 scalar ρ 取 backward , 每個參數得到各自的 gradient ∇ρ ##### ##### 每個參數的 gradient ∇ρ 取平方 取 L 平均 得到 各個參數的 Γ scalar ##### ##### 所有參數的 Γ scalar 組合而成其實就是 Γ 矩陣 ##### ####(hint: 記得 每次 backward 之後 要 zero_grad 去 清 gradient, 不然 gradient會累加 )###### #################################################################################### #################################################################################### ##### TODO 區塊 ( PART 2 ) ##### #################################################################################### precision_matrices = {n: p for n, p in precision_matrices.items()} return precision_matrices def penalty(self, model: nn.Module): loss = 0 for n, p in model.named_parameters(): _loss = self._precision_matrices[n] * (p - self._means[n]) ** 2 loss += _loss.sum() return loss # + id="tp6EbyrhXoAH" colab_type="code" colab={} def scp_train(model, optimizer, task, total_epochs, summary_epochs, scp_tasks, lambda_scp,alpha=0.65): losses = [] loss = 0.0 ############################### ##### TODO 區塊 (PART 2) ##### ############################### ## 參考 MAS. EWC train 的寫法 ## ############################### ##### TODO 區塊 (PART 2) ##### ############################### return model, optimizer, losses # + id="V1PxP2W3ZcrV" colab_type="code" colab={} # if __name__ == "__main__": # pass ############################### ##### TODO 區塊 (PART 2) ##### ############################### ## 參考 main 區塊一樣 ## ## 的 code 結合新方法 ## ############################### ##### TODO 區塊 (PART 2) ##### ###############################
hw14/hw14_life_long_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit # language: python # name: python38564bite08c0b10238845f5b3a369da6da33380 # --- # # Graph-based image segmentation # --- # # This notebook presents a usage example of the classes: # * `segmentation.GraphBasedSegmentation()`: implements the graph-based segmentation algorithm proposed by Felzenszwalb et. al. ([paper](http://people.cs.uchicago.edu/~pff/papers/seg-ijcv.pdf)). # * `segmentation.DisjointSetForest()`: the data-structure used by the algorithm (not really used outside the other class). # ## Description of the algorithm # # Let's consider a grayscale image of size `NxM`, being N the height of the image and M its width. Each pixel of the image has a value between 0 and 255. # # The algorithm treats each pixel as a node of a graph `G = (V,E)` where: # - `V` is the set of vertices # - `E` the set of edges (i.e. connections between a pair of nodes). # # Each edge has an associated weight which - in the case of this implementation - is the *intensity distance* between the considered pair of nodes. # # The pixels are connected in a 8-grid fashion: this means that each pixel is connected to its 8 neighbors. # # We start by considering each node of the graph as a singleton component (at the beginning we have `height * width` components) and we merge them is an agglomerative way according to the so called *boundary evidence*. # # That is: at each step of the algorithm, two components are compared and, if a boundary evidence is present, they are keep separated, otherwise they are merged into one single components. Doing so, we can reduce the number of components step-by-step reaching a final result in which each component represents a logic instance in the image. # # The boundary evidence is defined as the inequality between two quantities: # - `Dif(C1, C2)`: represents the within-difference between the two components `C1` and `C2` # - `MInt(C1, C2)`: represents the inter-component difference # # For more details see the [paper](http://people.cs.uchicago.edu/~pff/papers/seg-ijcv.pdf). # # For our purpose it is important to notice that the threshold depends on a parameter `k` which can be considered as a *tuning* parameter: # - larger `k`: preference for larger components # - smaller `k`: preference for smaller components # ### Disjoint-set forest # # The components are well represented using the *disjoint-set forest* data structure: # - it stores a collection of disjoint non-overlapping sets (or a partition of a set into disjoint subsets). # - provides operations for: # - adding new sets # - merging sets (replacing them by their union) # - find a representative member of a set. # # # It works as follows: # - the set is initialized with `N * M` nodes, each of one is initially considered as a single components (or subset) # - each component has a representative/parent which is the one having greater rank # - when two components are merged into a single one, the one having greater rank is considered the parent of the other # - the size of a component indicats the number of nodes inside it # # ### Graph # # The implementation of the `GraphBasedSegmentation` class is useful to convert an input image into a graph which can be exploited to run the graph-based segmentation algorithm. # # The class provides the following methods: # - `_preprocessing()`: converts an input RGB image to a grayscale Numpy array and apply some preprocessing operations. # - `_get_diff(pixel1, pixel2)`: returns the difference in terms of intensity between the given pixels. # - `_create_edge(pixel1, pixel2)`: creates the edge between two pixels of the input image. # - `_threshold(k, size)`: defines the threshold for a component havig size = size with the constant k (`t = k/size`). # - `build_graph()`: builds the graph (list of tuples) from the input image, adding the necessary edges. # - `sort()`: sorts the graph according to the weights of the edges (non-decreasing order). # - `segment(k, min_size)`: segments the image applying the algorithm and removes the component with `size < min_size`. # - `generate_image()`: generates the segmented image. # - `find_boundaries()`: found the boundaries of the regions of the segmented image. # - `draw_boxes()`: draw rectangular boxes around each region. import segmentation as seg # We can import the module in which the classes are implemented and use them in the following way: # * we create an instance of the class passing it the input image, call it `ig` (_in the example below we have an array of instances_) # * we segment the image adjusting the `k` and `min_size` parameters along with some preprocessing operation # * after the segmentation we can generate the output image (having different colors for each segmented region) # * we can also draw boxes around the found regions # + import os directory = '../img/input/' images = [] for filename in os.listdir(directory): if filename.endswith(".jpg") or filename.endswith(".png"): images.append(os.path.join(directory, filename)) else: continue igs = [None] * len(images) for i in range(len(images)): print("\nImage {}\n".format(i)) igs[i] = seg.GraphBasedSegmentation(images[i]) igs[i].segment(k=4000, min_size=100, preprocessing=True, contrast=1.5, gaussian_blur=2.3) igs[i].generate_image() igs[i].draw_boxes() # + import matplotlib.pyplot as plt n_images = len(images) fig = plt.figure(figsize=(30,15)) for i in range(n_images): # original image = igs[i].img label = i + 1 sp = fig.add_subplot(3, n_images, i+1) sp.set_title(label) plt.axis('off') plt.imshow(image) # segmented image = igs[i].segmented_img label = i + 1 sp = fig.add_subplot(3, n_images, i+1*n_images+1) sp.set_title(label) plt.axis('off') plt.imshow(image) # boxed image = igs[i].boxed_img label = i + 1 sp = fig.add_subplot(3, n_images, i+2*n_images+1) sp.set_title(label) plt.axis('off') plt.imshow(image) plt.show()
notebooks/graph_based_segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <figure> # <IMG SRC="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d5/Fachhochschule_Südwestfalen_20xx_logo.svg/320px-Fachhochschule_Südwestfalen_20xx_logo.svg.png" WIDTH=250 ALIGN="right"> # </figure> # # # Machine Learning # ### Sommersemester 2021 # Prof. Dr. <NAME> # # Entscheidungsbäume und Random Forests # Um die Funktionsweise von Entscheidungsbäumen und dem CART Algorithmus zu demonstrieren, verwenden wir ein einfaches Beispiel mit nur sehr wenigen Datenpunkten. # Bei den in der Code-Zelle unten angegebenen Wetterdaten `temperatur` und `niederschlag` handelt es sich um Monatsmittelwerte. # Der Datensatz hat also nur 12 Punkte. # # Als Klassen assoziieren wir zu den Monaten je eine Jahreszeit. # Vereinfachend zählen wir die Monate Dezember bis Februar zum Winter, März bis Mai zum Frühling, u.s.w. # + import numpy as np temperatur = np.array([0.6,3.9,6.6,12.4,16.0,17.8,20.2,20.0,15.1,10.7,5.3,3.8]) niederschlag = np.array([72,30,75,35,50,50,40,35,45,28,30,80]) wetter_namen = ['Temperatur', 'Niederschlag'] wetter_label_names = np.array(['Winter', 'Frühling', 'Sommer', 'Herbst']) wetter_label = np.array([0,0,1,1,1,2,2,2,3,3,3,0]) wetter_tabelle = np.column_stack((temperatur,niederschlag)) # - # Nun können wir aus dem Modul `sklearn.tree` die Klasse `DecisionTreeClassifier` verwenden. # + from sklearn.tree import DecisionTreeClassifier wetter_tree = DecisionTreeClassifier(max_depth=3) wetter_tree.fit(wetter_tabelle, wetter_label) # - # Die Vorhersagen von Entscheidungsbäumen sind intuitiv verständlich, vor allem, wenn man eine graphische Repräsentation des Baumes vorliegen hat. # Wir können den Baum über die graphviz Bibliothek erzeugen. # Eine Python-Implementierung von graphviz bietet die Bibliothek `pydot`. # # ```python # import sys # # !conda install --yes --prefix {sys.prefix} pydot # ``` # Um den Baum anzuzeigen, exportieren wir zunächst das trainierte Modell in eine `dot`-Datei über die *sklearn* Funktion `export_graphviz`. # Danach transformieren wir die `dot`-Datei mit der Funktion `write_png` in eine Grafik. # Diese können wir dann mit der *pyplot* Funktion `imshow` anzeigen. # + from sklearn.tree import export_graphviz import matplotlib.pyplot as plt import matplotlib.image as img import pydot # %matplotlib inline with open("wetter.dot", 'w') as f: f = export_graphviz(wetter_tree, feature_names=wetter_namen, class_names=wetter_label_names, filled=True, rounded=True, out_file=f) (graph,) = pydot.graph_from_dot_file("wetter.dot") graph.write_png("wetter.png") fig, ax = plt.subplots(figsize=(12, 12)) ax.imshow(img.imread('wetter.png')) plt.show() # - # Die Funktion `plot_decision_regions` aus der folgenden Code-Zelle stammt aus den Materialien zum Buch *Python Machine Learning* von <NAME>, ebenfalls ein sehr empfehlenswertes Buch zum Thema Maschinelles Lernen. # Wir verwenden die Funktion zum Anzeigen der Entscheidungsgrenzen in einfachen Modellen. # + # <NAME>, 2015 (http://sebastianraschka.com) # Python Machine Learning - Code Examples # # Chapter 3 - A Tour of Machine Learning Classifiers Using Scikit-Learn # # <NAME>. Python Machine Learning. Packt Publishing Ltd., 2015. # GitHub Repo: https://github.com/rasbt/python-machine-learning-book # # License: MIT # https://github.com/rasbt/python-machine-learning-book/blob/master/LICENSE.txt # #[https://github.com/rasbt/python-machine-learning-book/blob/master/code/optional-py-scripts/ch03.py] from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt def plot_decision_regions(X, y, classifier, labelnames=None, test_idx=None, resolution=0.02): # setup marker generator and color map markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y))]) # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) if labelnames is None: # #len(labelnames)!=len(np.unique(y))? labelnames = [i for i in np.unique(y)] for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=[cmap(idx)], marker=markers[idx], label=labelnames[cl]) # highlight test samples if test_idx: # plot all samples if not versiontuple(np.__version__) >= versiontuple('1.9.0'): X_test, y_test = X[list(test_idx), :], y[list(test_idx)] warnings.warn('Please update to NumPy 1.9.0 or newer') else: X_test, y_test = X[test_idx, :], y[test_idx] plt.scatter(X_test[:, 0], X_test[:, 1], c='', alpha=1.0, linewidths=1, marker='o', s=55, label='test set') # - # Nun können wir darstellen, wie die Monate nach den Kriterien *Temperatur* und *Niederschlag* den 4 Jahreszeiten zugeordnet werden. plot_decision_regions(wetter_tabelle, wetter_label, classifier=wetter_tree, labelnames=wetter_label_names) plt.xlabel('Temperatur') plt.ylabel('Niederschlag') plt.legend(loc='upper right') plt.show() # Was müssen Sie ändern, damit alle 12 Datenpunkte exakt eingeordnet werden? Was ist der Nachteil dieses Variante? # ## Der CART Algorithmus # Für eine eigene Implementierung des CART Algorithmus fügen wir vorab die Datenpunkte und Labes zu einer Matrix zusammen. # Dies vereinfacht das Teilen der Datensätze (*split*) etwas. data = np.column_stack((wetter_tabelle,wetter_label)) # Die *Reiheit* einer Menge von Datenpunkten berechnen wir über das *Gini-Maß*. # # Die zugehörige Funktion `_gini` bestimmt zunächst die Menge der Labels. # In unserer Matrix stehen die Labels in der letzten Spalte `data[:,-1]`. # Da in einer Menge jeder Wert nur einmal enthalten sein kann sind in `c` nach der Operation je einmal die Werte 1-4 enthalten, mit denen die Jahreszeiten kodiert sind. # # Für jede Jahreszeit bestimmen wir die Anzahl an Datenpunkten die zu dieser Jahreszeit gehören. # `d[:,-1]==cls` ist `True` für alle Zeilen der Matrix, die zu der Jahreszeit `cls` gehören. # Mit der Operation `d[d[:,-1]==cls]` Rrduzieren wir die Matrix auf die Zeilen für die Jahreszeit `cls` und bestimmen dann mit `len` ihre Länge in Zeilen. # Dieser Wert wird durch die Anzahl aller Datenpunkte `len(d)` geteilt. data[tuple([data[:,-1]==1])] def _gini(d): if len(d)==0: return 1; c = set(data[:,-1]) g=1 for cls in c: p=len(d[d[:,-1]==cls])/len(d) g -= p*p return g # Berechnen wir den *Gini-Wert* für die Ausgangsmenge, so erhalten wir das erwartete Resultat: $1-4\left(\frac{3}{12}\right)^2=1-0.25=0.75$ _gini(data) # Die Funktion `_split` berechnet nun das Merkmal sowie die Bedingung an denen der Datensatz bestmöglich (d.h. mit maximalem Informationsgewinn) geteilt werden kann. # # Dazu läuft die Funktion in einer äußeren Schleife über alle Merkmale, also über alle Spaltennummern, bis auf die letzte (dort stehen die Labels). # In jeder Spalte sortieren wir zunächst die Werte und bestimmen dann die möglichen Schnittpunkte. # Diese Schnittpunkte liegen immer genau zwischen zwei aufeinander folgenden Werten. # Mit `(last+i)/2` berechnen wir diesen Mittelwert und tragen in dann in die Liste `cuts` ein. # # Anschließend läuft die Funktion über alle möglichen Schnittpunkte und wertet jeweils die Kostenfunktion für den Informationsgewinn aus. # # Der global beste Wert und die das zugehörige Merkmal werden ermittelt und als Resultat zurückgegeben. def _split(data): J_min=1 best_val=0 best_feature=0 N_data = len(data) for feature in range(data.shape[1]-1): cuts = [] f_sorted = np.sort(data[:,feature]) last = f_sorted[0] for i in f_sorted[1:]: cuts.append((last+i)/2) last = i for val in cuts: true_set=data[data[:,feature]<=val] false_set=data[data[:,feature]>val] gini_t = _gini(true_set) gini_f = _gini(false_set) J = (gini_t*len(true_set)/N_data+gini_f*len(false_set)/N_data) if J<=J_min: J_min = J best_val = val best_feature=feature return best_feature,best_val # Die Funktion `_split` sollte nun in einer weiteren Funktion verwendet werden um den Entscheidungsbaum rekursiv aufzubauen. # Für unser einfaches Beispiel ist es aber ebensogut möglich, die Schritte des Algorithmus "per Hand" durchzuspielen. # # Als ersten *Split* bestimmt die Funktion das Merkmal 0 (Temperatur) und den Wert 16.9. # Die Kinder dieses Splits haben die *Gini-Werte* 0.67 (Temperatur kleiner oder gleich 16.9) und 0 (Temperatur größer 16.9). # Damit wurde die Klasse der Sommermonate optimal abgedeckt. # # Da nur der linke Teilbaum einen *Gini-Wert* größer 0 hat, muss nur das linke Kind expandiert werden. # Nun wird ebenfalls die Temperatur als Grundlage gewählt und die Wintermonate separiert. # Im letzten Split werden die Frühjahr- und Herbstmonate über die Niederschlagsmenge getrennt. # # Der Entscheidungsbaum besitzt die selben Entscheidungsgrenzen, wie der von sklearn erzeugte. # Allerdings besitzen die beiden Bäume eine andere Struktur, da die sklearn-Version zunächst die Wintermonate abspaltet. # Dies hat ausschließlich mit der Reihenfolge der Berechnungen zu tun, das die *Gini-Werte* und Teilmengengrößen in beiden Fällen identisch sind. # + best_feature,best_val = _split(data) print("Split column", best_feature, "at", best_val) left_set=data[data[:,best_feature]<best_val] right_set=data[data[:,best_feature]>=best_val] print("Gini left: %.2f | Gini right %.2f" % (_gini(left_set), _gini(right_set))) best_feature,best_val = _split(left_set) print("Split column", best_feature, "at", best_val) left_set1=left_set[left_set[:,best_feature]<best_val] right_set1=left_set[left_set[:,best_feature]>=best_val] print("Gini left: %.2f | Gini right %.2f" % (_gini(left_set1), _gini(right_set1))) best_feature,best_val = _split(right_set1) print("Split column", best_feature, "at", best_val) left_set2=right_set1[right_set1[:,best_feature]<best_val] right_set2=right_set1[right_set1[:,best_feature]>=best_val] print("Gini left: %.2f | Gini right %.2f" % (_gini(left_set2), _gini(right_set2))) # - # ## Kreditvergabe mit Entscheidungsbäumen # In den folgenden Code-Zellen verwenden wir sklearn, um den aus dem letzten Arbeitsblatt bekannten Datensatz `kredit.csv` zu verarbeiten. # Statt mit logistischer Regression wollen wir nun *gute* und *schlechte* Kredite über einen Entscheidungsbaum und später über *Random Forests* zu klassifizieren. # # Da die Schritte größtenteils selbsterklärend und aus vorherigen Beispielen bekannt sind, wird hier auf eine ausführliche Beschreibung verzichtet. import pandas as pd df = pd.read_csv("kredit.csv") df.head(10) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(df.iloc[:,1:],df.iloc[:,0],test_size=0.3, random_state=0) # + import numpy as np import matplotlib.pyplot as plt import matplotlib.image as img import pydot from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn.metrics import accuracy_score # %matplotlib inline model = DecisionTreeClassifier(max_depth=3) #model = DecisionTreeClassifier(criterion="entropy") model.fit(X_train,y_train) scr = model.score(X_test, y_test) print("Die Vorhersagegenauigkeit des Entscheidungsbaumes mit Tiefe 3 beträgt %.3f" % scr) # - with open("kredite.dot", 'w') as f: f = export_graphviz(model, feature_names=df.columns.values[1:], class_names=['schlecht','gut'], filled=True, rounded=True, out_file=f) (graph,) = pydot.graph_from_dot_file("kredite.dot") graph.write_png("kredite.png") fig, ax = plt.subplots(figsize=(12, 12)) ax.imshow(img.imread('kredite.png')) plt.show() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(df.iloc[:,1:],df.iloc[:,0],test_size=0.3, random_state=0) model = DecisionTreeClassifier() model.fit(X_train,y_train) scr = model.score(X_test, y_test) print("Die Vorhersagegenauigkeit des Entscheidungsbaumes beträgt %.3f" % scr) # Nun verwenden wir einen *Random Forrest* Klassifizierer mit einem Ensemble aus 100 Entscheidungsbaum-Instanzen. # + from sklearn.ensemble import RandomForestClassifier forrest = RandomForestClassifier(n_estimators=100) forrest.fit(X_train,y_train) scr = forrest.score(X_test, y_test) print("Die Vorhersagegenauigkeit des Random Forests beträgt %.3f" % scr)
u4/08_Entscheidungsbaeume.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn.datasets import load_iris import pandas as pd dataset = load_iris() # + # 판다스로 데이터 확인하기 labels = pd.DataFrame(dataset.target) labels.columns=['labels'] data = pd.DataFrame(dataset.data) data.columns=dataset['feature_names'] data = pd.concat([data,labels],axis=1) data # - X = dataset['data'] Y = dataset['target'] # label - 학습에 사용X print("Shape of X :", X.shape) print("Shape of Y :", Y.shape)
BASELINE_code/Assignment2_IRIS_Clustering/IRIS_Clustering_BASELINE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd olympicData = pd.read_csv('../static/assets/data/olympics_data.csv') olympicData.head(10) # + # Selecting only required columns filtered_df = olympicData[['Country','Year','Season','Event','Medal']] # Dropping NaN values in 'Medal' column as we are inerested in only medal count filtered_df = filtered_df.dropna() # Filtering the dataFrame for year >= 1960 filtered_df = filtered_df[filtered_df['Year'] >= 1960] # Drop duplicate rows for team events. filtered_df = filtered_df.drop_duplicates(['Country','Year','Season','Event','Medal']) filtered_df = filtered_df.rename(columns={'Medal': 'Medals', 'Country': 'Nation'}).reset_index(drop = True) filtered_df.head(10) # - filtered_df.shape # + # Selecting required columns filtered_df = filtered_df[['Year','Season','Nation','Medals']].sort_values(['Year','Nation'], ascending = [True,True]).reset_index(drop = True) filtered_df.head(10) # - filtered_df.shape # + # Filter the dataFrame for Gold medals gold_medals_df = filtered_df[filtered_df['Medals'] == 'Gold'] # Rename 'Medal' column as 'Gold' gold_medals_df = gold_medals_df.rename(columns={'Medals': 'Gold'}) # Sort the dataframe by Year gold_medals_df = gold_medals_df.sort_values(['Year']) # Groupby 'Year' and 'NOC' and find count of Gold medals gold_medals_df = gold_medals_df.groupby(['Year','Season','Nation'], sort = False).count()['Gold'] gold_medals_df = pd.DataFrame(gold_medals_df) # Sort the dataframe by 'Year'(ascending) and 'Gold'(descending) gold_medals_df = gold_medals_df.sort_values(['Year','Gold','Nation'], ascending = [True, False,True]) gold_medals_df.reset_index(inplace=True) gold_medals_df.head(10) # + # Filter the dataFrame for Silver medals silver_medals_df = filtered_df[filtered_df['Medals'] == 'Silver'] # Rename 'Medal' column as 'Silver' silver_medals_df = silver_medals_df.rename(columns={'Medals': 'Silver'}) # Sort the dataframe by Year silver_medals_df = silver_medals_df.sort_values(['Year']) # Groupby 'Year' and 'NOC' and find count of Silver medals silver_medals_df = silver_medals_df.groupby(['Year','Season','Nation'], sort = False).count()['Silver'] silver_medals_df = pd.DataFrame(silver_medals_df) # Sort the dataframe by 'Year'(ascending) and 'Silver'(descending) silver_medals_df = silver_medals_df.sort_values(['Year','Silver','Nation'], ascending = [True, False,True]) silver_medals_df.reset_index(inplace=True) silver_medals_df.head(10) # + # Filter the dataFrame for Bronze medals bronze_medals_df = filtered_df[filtered_df['Medals'] == 'Bronze'] # Rename 'Medal' column as 'Bronze' bronze_medals_df = bronze_medals_df.rename(columns={'Medals': 'Bronze'}) # Sort the dataframe by Year bronze_medals_df = bronze_medals_df.sort_values(['Year']) # Groupby 'Year' and 'NOC' and find count of Bronze medals bronze_medals_df = bronze_medals_df.groupby(['Year','Season','Nation'], sort = False).count()['Bronze'] bronze_medals_df = pd.DataFrame(bronze_medals_df) # Sort the dataframe by 'Year'(ascending) and 'Silver'(descending) bronze_medals_df = bronze_medals_df.sort_values(['Year','Bronze','Nation'], ascending = [True, False,True]) bronze_medals_df.reset_index(inplace=True) bronze_medals_df.head(10) # + # Merge Gold and Silver gold_silver_df = gold_medals_df.merge(silver_medals_df, how='outer').fillna(0) gold_silver_df.head(10) # + # Merge Bronze to Gold and Silver all_medals_df = gold_silver_df.merge(bronze_medals_df, how='outer').fillna(0) all_medals_df.head(10) # + all_medals_df['Medals'] = all_medals_df['Gold'] + all_medals_df['Silver'] + all_medals_df['Bronze'] all_medals_df = all_medals_df.sort_values(['Year','Medals','Nation'], ascending = [True,False,True]) all_medals_df = all_medals_df.reset_index(drop=True) all_medals_df.head(10) # + all_medals_df['Medals'] = pd.to_numeric(all_medals_df['Medals'],downcast='integer') all_medals_df['Gold'] = pd.to_numeric(all_medals_df['Gold'],downcast='integer') all_medals_df['Silver'] = pd.to_numeric(all_medals_df['Silver'],downcast='integer') all_medals_df['Bronze'] = pd.to_numeric(all_medals_df['Bronze'],downcast='integer') all_medals_df # + # Saving it as csv file all_medals_df.to_csv('../static/assets/output/GSB-Medals-1960-2016.csv',index = False) # + import csv import json with open('../static/assets/output/GSB_MedalsByYear.csv') as f: reader = csv.DictReader(f) rows = list(reader) with open('../static/js/GSB-Medals-1960-2016.json', 'w') as f: json.dump(rows, f) # -
Flask-API/Python code/Medals-1960-2016.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Importing the dataset dataset = pd.read_csv('Mall_Customers.csv') dataset.head() #We are going to cluster our customers based on Annual Income and Spending Scores X = dataset.iloc[:, [3, 4]].values # #### We shall consider Agglomerative (Bottom Up) Approach for Hierarchical Clustering. # Using the dendrogram to find the optimal number of clusters. Dendogram works as memory capturing measure of # dissimilarity between clusters. import scipy.cluster.hierarchy as sch dendogram = sch.dendrogram(sch.linkage(X, method ='ward')) # Ward is for minimizing within-cluster variance plt.title('Dendogram') plt.xlabel('Customers') plt.ylabel('Euclidean Distance') plt.show() # #### For Threshold in dendogram we consider the longest vertical line that doesn't cross any extended horizontal line. # #### Here following this process, it gives us 5 clusters. # Fitting Hierarchical Clustering to the dataset, using euclidean distance linkage from sklearn.cluster import AgglomerativeClustering hc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage= 'ward') y_hc = hc.fit_predict(X) #Visualising the clusters plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s=100, c= 'red', label= 'Careful') plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = 'blue', label = 'Standard') plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, c = 'green', label = 'Target') plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s = 100, c = 'cyan', label = 'Careless') plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s = 100, c = 'magenta', label = 'Sensible') plt.title('Clusters of customers') plt.xlabel('Annual Income (k$)') plt.ylabel('Spending Score (1-100)') plt.legend() plt.show()
Clustering/Hierarchical Clustering/P15.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/islam-mirajul/Spinach-Recognition-Using-Convolutional-neural-networks/blob/main/Spinach-Recognition(Xception).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="HJtbJ9tfyUSh" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f112425d-64c9-42fc-aa1b-c11e7c177df9" import tensorflow as tf print("TF Version:", tf.__version__) # + id="yLd_S3GTzeH4" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e2657fb8-ff81-4382-c3ff-4e40d2473a43" import tensorflow_hub as hub print("TF hub Version:", hub.__version__) # + id="RBGpJaWjz1gM" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0a4c4b05-e639-4c31-f10c-c97dcfc27d25" print("GPU", "availabe" if tf.config.list_physical_devices("GPU") else "not availabe") # + id="dLn96sL3z4hM" import warnings warnings.filterwarnings('always') warnings.filterwarnings('ignore') # + id="Zfd9ZSeGz64b" colab={"base_uri": "https://localhost:8080/", "height": 476} outputId="8c6ad4d0-5a31-4428-a1f0-a4418035aebb" from tensorflow.python.client import device_lib device_lib.list_local_devices() # + id="HKZ0PvXjz9Nj" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="679811ce-23a0-4255-cdb1-2cfec962ec0f" import os import numpy as np np.random.seed(777) import keras import keras.backend as K from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import Adam, SGD, RMSprop from keras.layers import Flatten, Dense, Activation, Dropout, Conv2D, MaxPooling2D, BatchNormalization, GlobalAveragePooling2D from keras.models import Model from keras.applications.xception import Xception, preprocess_input from keras.layers.core import Dropout, Dense from keras.models import Model, load_model from keras.initializers import he_normal, glorot_normal from keras import optimizers from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.regularizers import l2 import matplotlib.pyplot as plt # %matplotlib inline plt.rcParams["axes.grid"] = False plt.rcParams.update({'font.size': 20}) # + id="o5I9DF3u0KJ7" output_classes = 5 batch_size = 16 img_height, img_width = 224, 224 input_shape = (img_height, img_width, 3) epochs = 10 nb_train_samples =3028 nb_test_samples =757 # + id="6qdsWdHU0TcC" train_dir = '/content/drive/My Drive/Spinach Classification/Train set' test_dir = '/content/drive/My Drive/Spinach Classification/Test set' # + id="ShxbBHUJ0bcr" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="a7325fe1-41f7-47eb-f93b-a3a8949a9d0c" for root,dirs,files in os.walk(train_dir): print (root, len(files)) print("*"*30) for root,dirs,files in os.walk(test_dir): print (root, len(files)) # + id="h-AdQ_Eo0eBu" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="f403c7c3-1e03-4164-ca8c-6b7785e559cf" random_seed = np.random.seed(1142) train_datagen = ImageDataGenerator( rescale=1. / 255, featurewise_center=True, featurewise_std_normalization=True) train_generator = train_datagen.flow_from_directory( train_dir, target_size=(img_height, img_width), batch_size=batch_size, seed = random_seed, shuffle = True, class_mode='categorical') test_datagen = ImageDataGenerator(rescale=1. / 255) test_generator = test_datagen.flow_from_directory( test_dir, target_size=(img_height, img_width), batch_size=batch_size, seed = random_seed, shuffle = False, class_mode='categorical') # + id="PghZ1GjS0roG" sgd_opt = SGD(lr = 0.02, decay=75e-6, momentum=0.9, nesterov=True) adam_opt = Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-5) sgd_opt = SGD(lr=1e-06, momentum=0.0, decay=0.0, nesterov=False) rmsp_opt = RMSprop(lr=1e-4, decay=0.9) eve_opt = Eve(lr=1e-4, decay=1E-4, beta_1=0.9, beta_2=0.999, beta_3=0.999, small_k=0.1, big_K=10, epsilon=1e-08) # + id="lp_t_0Wf0xXY" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="3bb6f4e6-1f2f-44bc-c193-1b870b2fbaa1" model = Sequential() model.add(Xception(weights = "imagenet", include_top=False, pooling = 'avg')) model.add(Dropout(rate=0.5)) model.add(Dense(units=output_classes, activation='softmax')) model.layers[0].trainable = True # + id="PtaNlO6Z0zvq" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="6a19d60a-b5f3-4c0f-e7fe-c6ada9adccfa" model.compile(optimizer= adam_opt, loss = 'categorical_crossentropy', metrics=['accuracy']) model.summary() # + id="_Zgo-mWd03Ts" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="7af3f3b4-b4bd-485c-8701-40447565720f" history = model.fit_generator( train_generator, steps_per_epoch = nb_train_samples // batch_size, epochs = epochs, validation_data = test_generator, validation_steps = nb_test_samples // batch_size) # + [markdown] id="F7SbqnHEVMJK" # # + id="Ts42H29E05Wf" colab={"base_uri": "https://localhost:8080/", "height": 880} outputId="74fc5e92-167a-4367-9587-32d6e68b2ee3" plt.style.use("ggplot") plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model Accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Training Acc', 'Test Acc'], loc='lower right') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model Loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Training Loss', 'Test Loss'], loc='upper right') plt.show() plt.figure() N = epochs plt.plot(np.arange(0, N), history.history["loss"], label="train_loss") plt.plot(np.arange(0, N), history.history["val_loss"], label="test_loss") plt.plot(np.arange(0, N), history.history["accuracy"], label="train_acc") plt.plot(np.arange(0, N), history.history["val_accuracy"], label="test_acc") plt.title("Training Loss and Accuracy") plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend(loc="upper left") # + id="FKEMAR4b314c" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="d873ac89-846c-44f5-d549-075a75477103" score = model.evaluate_generator(test_generator, steps=5) print ('Validation Score: ', score[0]) print ('Validation Accuracy: ',score[1]) # + id="DYrOBvPF5KJa" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="1bbcbf9d-5884-47f7-c4c1-0fa05a178dcb" filename = test_generator.filenames truth = test_generator.classes label = test_generator.class_indices indexlabel = dict((value, key) for key, value in label.items()) predicts = model.predict_generator(test_generator, steps=test_generator.samples/test_generator.batch_size, verbose=1) predict_class = np.argmax(predicts, axis=1) errors = np.where(predict_class != truth)[0] print("No of errors = {}/{}".format(len(errors),test_generator.samples)) # + id="BmLaqK874hio" colab={"base_uri": "https://localhost:8080/", "height": 840} outputId="77373e42-c50e-4d94-f913-e24bd66cc477" plt.rcParams["axes.grid"] = False plt.rcParams.update({'font.size': 20}) from sklearn.metrics import confusion_matrix cm = confusion_matrix(truth,predict_class) labels = [] for k,v in indexlabel.items(): labels.append(v) import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): accuracy = np.trace(cm) / float(np.sum(cm)) misclass = 1 - accuracy if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion Matrix') print(cm) # fig = plt.figure() plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass)) ##plt.savefig('plots/12.DenseNet-DataSet-CM.png', bbox_inches='tight', dpi = 100) plt.figure(figsize=(10,10)) plot_confusion_matrix(cm, classes=labels, title='Confusion Matrix') # + id="GgVnGfeA7kVc" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="eaf6676e-d342-4ab2-954e-34836837c243" from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix, classification_report y_pred = predicts y_pred_probabilities=y_pred y_pred = np.argmax(y_pred,axis = 1) y_actual = test_generator.classes classnames=[] for classname in test_generator.class_indices: classnames.append(classname) confusion_mtx = confusion_matrix(y_actual, y_pred) print(confusion_mtx) target_names = classnames print(classification_report(y_actual, y_pred, target_names=target_names)) # + id="u6ZLKw_ZI3ED" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="6df12f7d-d966-4e53-9f3c-b4df4382a5ab" total=sum(sum(cm)) accuracy = (cm[0,0]+cm[1,1]) / total print ('Accuracy : ', accuracy) sensitivity = cm[0,0]/(cm[0,0]+cm[1,0]) print('Sensitivity : ', sensitivity ) Specificity = cm[1,1]/(cm[1,1]+cm[0,1]) print('Specificity : ', Specificity ) # + id="pNHwquRL9AtF" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="ab09f2f2-b250-4419-b53c-af87d5b5233a" from sklearn.metrics import roc_curve, roc_auc_score from sklearn.metrics import accuracy_score, precision_score, f1_score, confusion_matrix from sklearn.metrics import classification_report, recall_score from sklearn.metrics import precision_recall_curve from sklearn.metrics import average_precision_score th = 0.3 acc = accuracy_score(truth,predict_class > th) prec = precision_score(truth,predict_class > th,average="weighted") f1 = f1_score(truth,predict_class > th,average="weighted") recall = recall_score(truth,predict_class > th,average="weighted") print('Accuracy: {:.4f}'.format(acc)) print('Precision: {:.4f}'.format(prec)) print('Recall: {:.4f}'.format(recall)) print('F1: {:.4f}'.format(f1)) # + id="xEN7L-X2EgOy" model.save('/content/drive/My Drive/Spinach model/Xception-spinach-model.h5') model.save_weights('/content/drive/My Drive/Spinach model/Xception-spinach-Weights.h5')
Spinach-Recognition(Xception).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from pathlib import Path import numpy as np import pandas as pd import typing as tp import yaml import random import os import sys import soundfile as sf import librosa import cv2 import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data as data import resnest.torch as resnest_torch from torchvision import models from sklearn.model_selection import StratifiedKFold from sklearn.metrics import f1_score from radam import RAdam pd.options.display.max_rows = 500 pd.options.display.max_columns = 500 # + # ROOT = Path.cwd().parent # INPUT_ROOT = ROOT / "input" INPUT_ROOT = Path("/home/knikaido/work/Cornell-Birdcall-Identification/data") RAW_DATA = INPUT_ROOT / "birdsong-recognition" TRAIN_AUDIO_DIR = RAW_DATA / "train_audio" TRAIN_RESAMPLED_AUDIO_DIRS = [ INPUT_ROOT / "birdsong-resampled-train-audio-{:0>2}".format(i) for i in range(5) ] TEST_AUDIO_DIR = RAW_DATA / "test_audio" # - # train = pd.read_csv(RAW_DATA / "train.csv") train = pd.read_csv(TRAIN_RESAMPLED_AUDIO_DIRS[0] / "train_mod.csv") train.head().T TRAIN_RESAMPLED_AUDIO_DIRS = [ INPUT_ROOT / "birdsong-resampled-perc" ] # + tmp_list = [] for audio_d in TRAIN_RESAMPLED_AUDIO_DIRS: if not audio_d.exists(): continue for ebird_d in audio_d.iterdir(): if ebird_d.is_file(): continue for wav_f in ebird_d.iterdir(): tmp_list.append([ebird_d.name, wav_f.name, wav_f.as_posix()]) train_wav_path_exist = pd.DataFrame( tmp_list, columns=["ebird_code", "resampled_filename", "file_path"]) del tmp_list train_all = pd.merge( train, train_wav_path_exist, on=["ebird_code", "resampled_filename"], how="inner") print(train.shape) print(train_wav_path_exist.shape) print(train_all.shape) train_all.head() # + save_dir = '/home/knikaido/work/Cornell-Birdcall-Identification/data/birdsong-resampled-perc-aug/' for i, file_name in tqdm(enumerate(train_wav_path_exist['file_path'])): try: y, sr = sf.read(file_name) y_harm, y_perc = librosa.effects.hpss(y, margin=(1.0,5.0)) save_path = save_dir + file_name.split('/', 7)[-1] my_makedirs(save_path) write(save_path, sr, y_perc) except: print('err!!!' + file_name) # print(save_dir + file_name.split('/', 7)[-1]) # print(file_name.split('/', 7)) # if(i==3): break
0829_make_augment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.3 64-bit (''3.8.3'': pyenv)' # language: python # name: python38364bit383pyenv4fc1801d817447c6b9c76f7bacd532a1 # --- # + # default_exp sensors # - # # Sensors # > reads sensor data over UART # #export import time import serial # pip install pyserial import numpy # + #export class Sensors(object): def __init__(self,baudrate=19200,port="/dev/ttyTHS0"): self.ser = serial.Serial(port=port, baudrate=baudrate, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, ) self.ser.flushInput() time.sleep(1) # Wait a second to let the port initialize def readSerial(self): if ser.inWaiting() > 0: self.data = ser.readline().decode() #print(data.decode()) #if data == "\r".encode(): # ser.write("\n".encode()) def loopReadSerial(self): try: print("starting transmission") ser.write("UART Demonstration Program\r\n".encode()) while True: self.readSerial() print(self.data) except KeyboardInterrupt: print("Exiting Program") except Exception as exception_error: print("Error occurred. Exiting Program") print("Error: " + str(exception_error)) finally: self.ser.close() # - peripherals = Sensors(baudrate=19200,port="/dev/ttyTHS0") peripherals.loopReadSerial()
06_sensors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Test of `psaw` package from psaw import PushshiftAPI import json # + api = PushshiftAPI() gen = api.search_submissions(subreddit = "Overwatch") max_response_cache = 1000 cache = [] for c in gen: cache.append(c) # Omit this test to actually return all results. Wouldn't recommend it though: could take a while, but you do you. if len(cache) >= max_response_cache: break # - # ## `Submission` object API # # The methods `search_submissions` and `search_comments` return a `submission` and `comment` objects which properties can be acessed using the dot notation. You can get all properties in a dictionary by using the additional `.d_ s = cache[0] s.author s.d_ print(json.dumps(s.d_))
jupyter/psaw_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Compare latent space to high-dimensional space import os import pandas as pd import sys import numpy as np from pandas.core.common import flatten import pickle import umap from pathlib import Path import datetime import scipy import matplotlib.pyplot as plt import seaborn as sns import matplotlib from preprocessing_functions import pad_spectro, calc_zscore, create_padded_data from evaluation_functions import get_knn, make_nn_stats_dict, make_statstabs # + wd = os.getcwd() DATA = os.path.join(os.path.sep, str(Path(wd).parents[0]), "data", "processed") FIGURES = os.path.join(os.path.sep, str(Path(wd).parents[0]), "reports", "figures") # + dftype = "reduced" spec_df = pd.read_pickle(os.path.join(os.path.sep, DATA, "df_focal_"+dftype+".pkl")) labels = spec_df.call_lable.values calltypes = sorted(list(set(labels))) specs = spec_df.spectrograms specs = [calc_zscore(x) for x in specs] original_embedding = create_padded_data(specs) embedding = np.loadtxt(os.path.join(os.path.sep, DATA,'basic_UMAP_3D_reduced_default_params.csv'), delimiter=";") # - # ## Compare embedding score S for different k's # + # Make 5 UMAPs n_times=5 embeddings_dict={} for i in range(n_times): reducer = umap.UMAP(min_dist=0, metric= "euclidean", n_components = 3) embeddings_dict[i] = reducer.fit_transform(original_embedding) # + # UMAP space #ks = [1,3,5,8,10,15,20,30,40] ks = [1,3,5,8,10,15,20,30,40,50,100,200] metrics_tab = np.zeros((len(ks)*n_times,2)) i=0 for embedding in embeddings_dict.values(): indices, distances = get_knn(np.max(np.asarray(ks)),embedding) for k_num,k in enumerate(ks): these_indices = [x[0:k] for x in indices] nn_stats_dict = make_nn_stats_dict(calltypes, labels, these_indices) stats_tab, stats_tab_norm = make_statstabs(nn_stats_dict, calltypes, labels, k) metrics_tab[i,0] = np.mean(np.diagonal(stats_tab.iloc[:-1,])) metrics_tab[i,1] = np.mean(np.diagonal(stats_tab_norm)) i+=1 metrics_tab_df = pd.DataFrame(metrics_tab) metrics_tab_df['k'] = ks*n_times metrics_tab_df.columns = ['S', 'Snorm', 'k'] means_df = metrics_tab_df.groupby('k').mean() stds_df = metrics_tab_df.groupby('k').std() # Original space metrics_tab_f = np.zeros((len(ks),2)) indices, distances = get_knn(np.max(np.asarray(ks)),original_embedding) for k_num,k in enumerate(ks): these_indices = [x[0:k] for x in indices] nn_stats_dict = make_nn_stats_dict(calltypes, labels, these_indices) stats_tab, stats_tab_norm = make_statstabs(nn_stats_dict, calltypes, labels,k) metrics_tab_f[k_num,0] = np.mean(np.diagonal(stats_tab.iloc[:-1,])) metrics_tab_f[k_num,1] = np.mean(np.diagonal(stats_tab_norm)) metrics_tab_f_df = pd.DataFrame(metrics_tab_f) metrics_tab_f_df['k'] = ks metrics_tab_f_df.columns = ['S', 'Snorm', 'k'] # + # Original space metrics_tab_f = np.zeros((len(ks),2)) indices, distances = get_knn(np.max(np.asarray(ks)),original_embedding) for k_num,k in enumerate(ks): these_indices = [x[0:k] for x in indices] nn_stats_dict = make_nn_stats_dict(calltypes, labels, these_indices) stats_tab, stats_tab_norm = make_statstabs(nn_stats_dict, calltypes, labels,k) metrics_tab_f[k_num,0] = np.mean(np.diagonal(stats_tab.iloc[:-1,])) metrics_tab_f[k_num,1] = np.mean(np.diagonal(stats_tab_norm)) metrics_tab_f_df = pd.DataFrame(metrics_tab_f) metrics_tab_f_df['k'] = ks metrics_tab_f_df.columns = ['S', 'Snorm', 'k'] # - metrics_tab_f_df means_df # + var ='S' plt.plot(ks, metrics_tab_f_df[var], marker='o',linewidth=2, markersize=4, color ='red',label='Original') plt.plot(ks, means_df[var], color = 'red', marker='o', linestyle='dotted', linewidth=2, markersize=4,label='UMAP') plt.errorbar(ks, means_df[var], yerr=stds_df[var], linestyle='dotted', capsize=5, color = 'red') plt.legend(numpoints=2) plt.ylabel('Embedding score '+var) plt.xlabel('K nearest neighbors') plt.ylim(40,72) outname=os.path.join(os.path.sep, FIGURES, datetime.datetime.today().strftime('%Y-%m-%d')+'_comparison_to_original.jpg') print(outname) plt.savefig(outname) # - # ## Original space evaluation matrix from evaluation_functions import nn, sil nn_stats = nn(original_embedding, np.asarray(labels), k=5) print("Log final metric (unweighted):",nn_stats.get_S()) print("Abs final metric (unweighted):",nn_stats.get_Snorm()) outname = os.path.join(os.path.sep, FIGURES, 'heatS_UMAP_'+dftype+'_original.png') print(outname) nn_stats.plot_heat_S(outname=outname) outname = os.path.join(os.path.sep, FIGURES, 'heatSnorm_UMAP_'+dftype+'_original.png') print(outname) nn_stats.plot_heat_Snorm(outname=outname) nn_stats_umap = nn(embedding, np.asarray(labels), k=5) import string # + fig, axes = plt.subplots(2,2, figsize=(10,10)) norm_palette = sns.diverging_palette(h_neg=275,s=80,l=55, h_pos=150, as_cmap=True) abs_palette = sns.color_palette("Greens", as_cmap=True) p1 = sns.heatmap(nn_stats.get_statstab(), annot=True, vmin=0, vmax=100, center=50, cmap=abs_palette, cbar=None, ax=axes[0][0], annot_kws={"size":14}) p1.set_xlabel("Neighbor label") p1.set_ylabel("Datapoint label") p1.axhline(y=len(set(labels)), color="black") p1.text(-0.1, 1.1, string.ascii_uppercase[0], transform=axes[0][0].transAxes, size=20, weight='bold') p1.text(0.4, 1.1, "Original", transform=axes[0][0].transAxes, size=20) p2 = sns.heatmap(nn_stats_umap.get_statstab(), annot=True, vmin=0, vmax=100, center=50, cmap=abs_palette, cbar=None, ax=axes[0][1], annot_kws={"size":14}) p2.set_xlabel("Neighbor label") p2.set_ylabel("Datapoint label") p2.axhline(y=len(set(labels)), color="black") p2.text(-0.1, 1.1, string.ascii_uppercase[1], transform=axes[0][1].transAxes, size=20, weight='bold') p2.text(0.4, 1.1, "UMAP", transform=axes[0][1].transAxes, size=20) p3 = sns.heatmap(nn_stats.get_statstabnorm(), annot=True, vmin=-13, vmax=13, center=0, cmap=norm_palette, cbar=None, ax=axes[1][0], annot_kws={"size":14}) p3.set_xlabel("Neighbor label") p3.set_ylabel("Datapoint label") p3.text(-0.1, 1.1, string.ascii_uppercase[2], transform=axes[1][0].transAxes, size=20, weight='bold') p4 = sns.heatmap(nn_stats_umap.get_statstabnorm(), annot=True, vmin=-13, vmax=13, center=0, cmap=norm_palette, cbar=None, ax=axes[1][1], annot_kws={"size":14}) p4.set_xlabel("Neighbor label") p4.set_ylabel("Datapoint label") p4.text(-0.1, 1.1, string.ascii_uppercase[3], transform=axes[1][1].transAxes, size=20, weight='bold') plt.tight_layout() plt.savefig(os.path.join(os.path.sep, FIGURES, 'paper_eval_matrices.png'), facecolor="white") # - # ## Original space Silhouette plot sil_stats = sil(original_embedding, labels) # + outname = os.path.join(os.path.sep, FIGURES, 'silplot_UMAP_'+dftype+'_original.png') print(outname) sil_stats.plot_sil(outname=outname) # - sil_stats.get_avrg_score() sil_stats_UMAP = sil(embedding, labels) from sklearn.metrics import silhouette_samples, silhouette_score import string # + labeltypes = sorted(list(set(labels))) n_clusters = len(labeltypes) fig, (ax2, ax1) = plt.subplots(1, 2) fig.set_size_inches(15, 7) ax1.set_xlim([-1, 1]) ax1.set_ylim([0, embedding.shape[0] + (n_clusters + 1) * 10]) #cluster_labels = umap_df['HDBSCAN'] cluster_labels = labels silhouette_avg = silhouette_score(embedding, cluster_labels) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(embedding, cluster_labels) y_lower = 10 pal = sns.color_palette("Set2", n_colors=len(labeltypes)) color_dict = dict(zip(labeltypes, pal)) for i, cluster_label in enumerate(labeltypes): ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == cluster_label] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color_dict[cluster_label], edgecolor=color_dict[cluster_label], alpha=0.7) ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, cluster_label) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax1.set_xlabel("Silhouette value") ax1.set_ylabel("Cluster label") # The vertical line for average silhouette score of all the values ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.text(-0.1, 1.1, string.ascii_uppercase[1], transform=ax1.transAxes, size=20, weight='bold') ax1.text(0.4, 1.1, "UMAP", transform=ax1.transAxes, size=20) ##### **************************************************** ax2.set_xlim([-1, 1]) ax2.set_ylim([0, original_embedding.shape[0] + (n_clusters + 1) * 10]) #cluster_labels = umap_df['HDBSCAN'] cluster_labels = labels silhouette_avg = silhouette_score(original_embedding, cluster_labels) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(original_embedding, cluster_labels) y_lower = 10 pal = sns.color_palette("Set2", n_colors=len(labeltypes)) color_dict = dict(zip(labeltypes, pal)) for i, cluster_label in enumerate(labeltypes): ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == cluster_label] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i ax2.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color_dict[cluster_label], edgecolor=color_dict[cluster_label], alpha=0.7) ax2.text(-0.05, y_lower + 0.5 * size_cluster_i, cluster_label) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples #ax2.set_title("Original") ax2.set_xlabel("Silhouette value") ax2.set_ylabel("Cluster label") # The vertical line for average silhouette score of all the values ax2.axvline(x=silhouette_avg, color="red", linestyle="--") ax2.text(-0.1, 1.1, string.ascii_uppercase[0], transform=ax2.transAxes, size=20, weight='bold') ax2.text(0.4, 1.1, "Original", transform=ax2.transAxes, size=20) #plt.show() plt.savefig(os.path.join(os.path.sep,FIGURES,'paper_SIL_UMAP_original.png'), facecolor="white") # - # ## Original space between and within distances from scipy.spatial.distance import pdist, squareform # + distmat_original = squareform(pdist(original_embedding, metric='euclidean')) labels = np.asarray(labels) self_dists={} other_dists={} for calltype in calltypes: x=distmat_original[np.where(labels==calltype)] x = np.transpose(x) y = x[np.where(labels==calltype)] self_dists[calltype] = y[np.triu_indices(n=y.shape[0], m=y.shape[1],k = 1)] y = x[np.where(labels!=calltype)] other_dists[calltype] = y[np.triu_indices(n=y.shape[0], m=y.shape[1], k = 1)] # + plt.figure(figsize=(8, 8)) i=1 xmin=20 xmax=90 nbins=50 for calltype in calltypes: plt.subplot(4, 2, i) n, bins, patches = plt.hist(x=self_dists[calltype], label="within", density=True, #bins='auto', color='green', bins=np.linspace(xmin, xmax, nbins), color='green', alpha=0.5, rwidth=0.85) plt.vlines(x=np.mean(self_dists[calltype]),ymin=0,ymax=0.5,color='green', linestyles='dotted') n, bins, patches = plt.hist(x=other_dists[calltype], label="between", density=True, bins=np.linspace(xmin, xmax, nbins), color='purple', #bins='auto', color='red', alpha=0.5, rwidth=0.85) plt.vlines(x=np.mean(other_dists[calltype]),ymin=0,ymax=0.5,color='purple', linestyles='dotted') s, pval = scipy.stats.ttest_ind(self_dists[calltype],other_dists[calltype]) if pval<0.01: pval = "p<0.01" else: pval = "p="+str(round(pval,2)) if i==2: plt.legend(loc='upper left', bbox_to_anchor=(1.1,1.05)) plt.grid(axis='y', alpha=0.75) #plt.title(calltype+" ("+pval+")") plt.title(calltype) #plt.xlim(xmin,xmax) plt.ylim(0, 0.2) if i==len(calltypes): plt.ylabel('Density') plt.xlabel('Euclidean distance in original space') i=i+1 #plt.subplot(4,2,i) #plt.legend() plt.tight_layout() outname=os.path.join(os.path.sep, FIGURES, 'paper_distanceswithinwithout_reduced_original.jpg') print(outname) plt.savefig(outname) # + # Do for UMAP space for comparison distmat = squareform(pdist(embedding, metric='euclidean')) self_dists_embedded={} other_dists_embedded={} for calltype in calltypes: x=distmat[np.where(labels==calltype)] x = np.transpose(x) y = x[np.where(labels==calltype)] self_dists_embedded[calltype] = y[np.triu_indices(n=y.shape[0], m=y.shape[1],k = 1)] y = x[np.where(labels!=calltype)] other_dists_embedded[calltype] = y[np.triu_indices(n=y.shape[0], m=y.shape[1], k = 1)] # + index_number = [1,2,5,6,9,10,13,3,4,7,8,11,12,15] plt.figure(figsize=(16, 8)) xmin=20 xmax=90 nbins=50 ncols=4 nrows=4 dens = True for i, calltype in zip(index_number[0:8], calltypes): plt.subplot(nrows, ncols, i) n, bins, patches = plt.hist(x=self_dists[calltype], label="within", density=dens, bins=np.linspace(xmin, xmax, nbins), color='green', alpha=0.5)#, rwidth=0.85) plt.vlines(x=np.mean(self_dists[calltype]),ymin=0,ymax=0.5,color='green', linestyles='dotted') n, bins, patches = plt.hist(x=other_dists[calltype], label="between", density=dens, bins=np.linspace(xmin, xmax, nbins), color='purple', alpha=0.5)#, rwidth=0.85) plt.vlines(x=np.mean(other_dists[calltype]),ymin=0,ymax=0.5,color='purple', linestyles='dotted') plt.grid(axis='y', alpha=0.75) plt.title(calltype) #plt.xlim(20,80) plt.ylim(0, 0.1) plt.yticks([0,0.05,0.1], ['0.00','0.05','0.10']) if i==13: plt.xlabel('Euclidean distance') if i in [1,5,9,13]: plt.ylabel('Density') xmin=0 xmax=12 for i, calltype in zip(index_number[7:], calltypes): plt.subplot(nrows, ncols, i) n, bins, patches = plt.hist(x=self_dists_embedded[calltype], label="within", density=dens, bins=np.linspace(xmin, xmax, nbins), color='green', alpha=0.5)#, rwidth=0.85) plt.vlines(x=np.mean(self_dists_embedded[calltype]),ymin=0,ymax=0.5,color='green', linestyles='dotted') n, bins, patches = plt.hist(x=other_dists_embedded[calltype], label="between", density=dens, bins=np.linspace(xmin, xmax, nbins), color='purple', alpha=0.5)#,rwidth=0.85) plt.vlines(x=np.mean(other_dists_embedded[calltype]),ymin=0,ymax=0.5,color='purple', linestyles='dotted') if i==4: plt.legend(loc='upper left', bbox_to_anchor=(1,1.05)) plt.grid(axis='y', alpha=0.75) plt.title(calltype) plt.ylim(0, 0.5) if i==15: plt.xlabel('Euclidean distance') if i in [3,7,11,15]: plt.ylabel('Density') plt.figtext(0.2,1, "Original", size=20) plt.figtext(0.68,1, "UMAP", size=20) plt.figtext(0.01,1, "A", size=20, weight='bold') plt.figtext(0.47,1, "B", size=20, weight='bold') plt.tight_layout() outname=os.path.join(os.path.sep, FIGURES, 'paper_distanceswithinwithout_reduced_original.jpg') plt.savefig(outname, facecolor="white", bbox_inches='tight') # - # # Evaluation of structure preservation in embedding # ## Nearest neighbor preservation from evaluation_functions import get_knn embedding_filename = os.path.join(os.path.sep, DATA,'basic_UMAP_3D_'+dftype+'_default_params.csv') print(embedding_filename) embedding = np.loadtxt(embedding_filename, delimiter=";") k=10 indices, distances = get_knn(k,embedding) indices_original, distance_original = get_knn(k, original_embedding) for k in [1,2,3,4,5,6,7,8,9,10,15,20,30]: indices, distances = get_knn(k,embedding) indices_original, distance_original = get_knn(k, original_embedding) count_agree = 0 count_disagree = 0 for i in range(indices_original.shape[0]): for j in range(indices_original.shape[1]): # if original neighbor is also among nearest neighbors in UMAP space (indices)... if (indices_original[i,j] in indices[i,:]): count_agree += 1 # ...increase agreement_count else: count_disagree += 1 print(k, ": ", count_agree, "/", count_disagree) print(k, ": ", count_agree/(count_agree+count_disagree)) for k in [50,100]: indices, distances = get_knn(k,embedding) indices_original, distance_original = get_knn(k, original_embedding) count_agree = 0 count_disagree = 0 for i in range(indices_original.shape[0]): for j in range(indices_original.shape[1]): if (indices_original[i,j] in indices[i,:]): count_agree += 1 else: count_disagree += 1 print(k, ": ", count_agree, "/", count_disagree) print(k, ": ", count_agree/(count_agree+count_disagree)) # + # EXTRA: Check how often neighbors in original space are at least among the k*2 nearest neighbors in UMAP space for k in [1,2,3,4,5,6,7,8,9,10,15,20,30, 50, 100]: indices, distances = get_knn(k*2,embedding) indices_original, distance_original = get_knn(k, original_embedding) count_agree = 0 count_disagree = 0 for i in range(indices_original.shape[0]): for j in range(indices_original.shape[1]): if (indices_original[i,j] in indices[i,:]): count_agree += 1 else: count_disagree += 1 print(k, ": ", count_agree, "/", count_disagree) print(k, ": ", count_agree/(k*original_embedding.shape[0])) # - # ## Mantel Test # Mantel test assesses the correlation between distance matrices --> use it to compare Euclidean distance in original space vs. in UMAP space. # from skbio.stats.distance import mantel from scipy.spatial.distance import pdist, squareform distmat = squareform(pdist(embedding, metric='euclidean')) original_distmat = squareform(pdist(original_embedding, metric='euclidean')) r,p,n = mantel(original_distmat, distmat, permutations=100, method='pearson') print("Correlation coefficient: ", r) print("P-value: ",p) r,p,n = mantel(original_distmat, distmat, permutations=100, method='spearman') print("Correlation coefficient: ", r) print("P-value: ",p) r,p,n = mantel(original_distmat, distmat, method='pearson') print("Correlation coefficient: ", r) print("P-value: ",p) # + #r,p,n = mantel(original_distmat, distmat, method='spearman') #print("Correlation coefficient: ", r) #print("P-value: ",p) # - # ## Becht et al. figure # Box plots represent distances across pairs of points in the embeddings, binned using 50 equal-width bins over the pairwise distances in the original space using 10,000 randomly selected points, leading to 49,995,000 pairs of pairwise distances. The last row of graphs represents counts of pairwise distances in each bin of distances from the original space as histograms. The value of the Pearson correlation coefficient computed over the pairs of pairwise distances is reported. For the box plots, the central bar represents the median, and the top and bottom boundary of the boxes represent the 75th and 25th percentiles, respectively. The whiskers represent 1.5 times the interquartile range above (or, respectively, below) the top (or, respectively, bottom) box boundary, truncated to the data range if applicable. # # https://www.nature.com/articles/nbt.4314#Sec7 # # https://www.biorxiv.org/content/10.1101/2019.12.19.877522v1 # # # + # Generate 50 equal-width bins from the pairwise-distances in original space # and assign each pairwise distance a bin (or make mask for each bin) # Use the same mask to filter the paiwirse distances in embedded space # Boxplot of the average distance in that area # Plot all 50 boxplots next to each other # + # Concatenate all relevant ones all_dists = [] for i in range(original_distmat.shape[0]): all_dists.append(original_distmat[i,0:i]) all_dists_vec = np.hstack(all_dists).flatten() # + all_dists_embedded = [] for i in range(distmat.shape[0]): all_dists_embedded.append(distmat[i,0:i]) all_dists_embedded_vec = np.hstack(all_dists_embedded).flatten() # - fig = plt.figure(1, figsize=(9, 6)) n, bins, batches = plt.hist(all_dists_vec, bins=50, color='grey') plt.title('Original space') plt.xlabel('Pairwise distance') fig.savefig(os.path.join(os.path.sep,FIGURES,'dist_hist_original.png'), bbox_inches='tight') fig = plt.figure(1, figsize=(9, 6)) plt.title('UMAP space') plt.xlabel('Pairwise distance') n, bins, batches = plt.hist(all_dists_embedded_vec, bins=50, color='grey') fig.savefig(os.path.join(os.path.sep,FIGURES,'dist_hist_umap.png'), bbox_inches='tight') bins = np.linspace(np.min(all_dists_vec), np.max(all_dists_vec), 50) bin_labels = np.digitize(all_dists_vec, bins) av_bin_labels = set(bin_labels) bin_data_dict = {} for bin_label in av_bin_labels: bin_data_dict[bin_label] = all_dists_embedded_vec[np.where(bin_labels==bin_label)] for i in np.arange(1,51,1): if i not in av_bin_labels: # print(i) bin_data_dict[i] = np.asarray([]) # + fig = plt.figure(1, figsize=(9, 6)) # Create an axes instance ax = fig.add_subplot(111) # Create the boxplot bp = ax.boxplot(bin_data_dict.values(), patch_artist=True, showfliers=False) ax.set_xlabel('Bin') ax.set_ylabel('Euclidean distance in UMAP space') ax.set_xticks(np.arange(5,50,5)) ax.set_xticklabels(np.arange(5,50,5)) t = ax.text(2, 12, '$r$=0.34', fontsize=20) # Save the figure fig.savefig(os.path.join(os.path.sep,FIGURES,'dist_corr_no_outliers.png'), bbox_inches='tight') # + all_means = [np.mean(bin_data_dict[x]) for x in bin_data_dict.keys()] plt.plot(np.arange(1,51,1), all_means) plt.ylim(0,12) # + import string plt.figure(figsize=(12,8)) ax1 = plt.subplot(2,2,3) n, bins, batches = plt.hist(all_dists_vec, bins=50, color='grey', density=True) plt.title('Original space') plt.xlabel('Pairwise distance') ax1.set_ylabel('Density') ax1.text(-0.1, 1.1, string.ascii_uppercase[1], transform=ax1.transAxes, size=20, weight='bold') ax2 = plt.subplot(2,2,4) ax2.set_title('UMAP space') ax2.set_ylabel('Density') ax2.set_xlabel('Pairwise distance') n, bins, batches = plt.hist(all_dists_embedded_vec, bins=50, color='grey', density=True) ax2.text(-0.1, 1.1, string.ascii_uppercase[2], transform=ax2.transAxes, size=20, weight='bold') ax3 = plt.subplot(2,1,1) bp = ax3.boxplot(bin_data_dict.values(), patch_artist=True, showfliers=False) ax3.set_xlabel('Bin') ax3.set_ylabel('Euclidean distance in UMAP space') ax3.set_xticks(np.arange(5,50,5)) ax3.set_xticklabels(np.arange(5,50,5)) t = ax3.text(2, 12, '$r$=0.35', fontsize=20) ax3.text(-0.04, 1.1, string.ascii_uppercase[0], transform=ax3.transAxes, size=20, weight='bold') plt.tight_layout() plt.savefig(os.path.join(os.path.sep,FIGURES, 'becht_plot.jpg'), facecolor="white") # -
notebooks/03_compare_with_highdim_space.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (machine_learning) # language: python # name: machine_learning # --- from bs4 import BeautifulSoup import os import re from IPython.core.debugger import set_trace from transformers import BertTokenizerFast import copy from tqdm import tqdm import html from pprint import pprint import glob import json import time # # Data Preprocessing project_root = "/home/wangyucheng/workplace/notebook/research/nested_ner" ori_data_dir = os.path.join(project_root, "ori_data") preprocessed_data_dir = os.path.join(project_root, "preprocessed_data") exp_name = "genia" genia_path = os.path.join(ori_data_dir, "GENIA_term_3.02", "GENIAcorpus3.02.xml") soup = BeautifulSoup(open(genia_path, "r", encoding = "utf-8"), "lxml") article_list = soup.select("set > article") len(article_list) model_path = "/home/wangyucheng/opt/transformers_models_h5/bert-base-cased" tokenizer = BertTokenizerFast.from_pretrained(model_path, add_special_tokens = False, do_lower_case = False) def get_char_ind2tok_ind(tok2char_span): char_num = None for tok_ind in range(len(tok2char_span) - 1, -1, -1): if tok2char_span[tok_ind][1] != 0: char_num = tok2char_span[tok_ind][1] break char_ind2tok_ind = [0 for _ in range(char_num)] # 除了空格,其他字符均有对应token for tok_ind, sp in enumerate(tok2char_span): for char_ind in range(sp[0], sp[1]): char_ind2tok_ind[char_ind] = tok_ind return char_ind2tok_ind def convert_to_dict(article): ''' article: article tag return: article_dict: { "id": medline_id, "text": article_text, "entity_list": [(lex, sem, span), ] } ''' article_cp = copy.copy(article) # extract tag fr a copy, avoid removing it from the dom tree medline_id = article_cp.select_one("articleinfo").extract().select_one("bibliomisc").get_text() art_text = article_cp.get_text() article_dict = { "id": medline_id, "text": art_text, } segs = re.sub("(<[^>]+>)", r"⺀\1⺀", str(article_cp)).split("⺀") # s中某些符号会被转义,在这里要转义回来,如> &lg; # 如果不转义回来,char pos的计算会错误,如把>算作4个字符(&lg;) # 因为get_text()会自动转义回去 segs = [html.unescape(s) for s in segs if s != ""] # count tokens' position str_w_pos = "" all_char_num = 0 for s in segs: if re.match("<[^>]+>", s): str_w_pos += s continue char_num = len(s) char_pos = [str(all_char_num + i) for i in range(char_num)] if len(char_pos) > 0: str_w_pos += " " + " ".join(char_pos) + " " all_char_num += char_num # print(str_w_pos) # set_trace() # parse terms' spans soup = BeautifulSoup(str_w_pos, "lxml") cons_w_pos_list = soup.select("cons") ori_cons_list = article_cp.select("cons") assert len(cons_w_pos_list) == len(ori_cons_list) # 检查是否影响了原来的标注 term_list = [] offset_map = tokenizer.encode_plus(art_text, return_offsets_mapping = True, add_special_tokens = False)["offset_mapping"] char_ind2tok_ind = get_char_ind2tok_ind(offset_map) for ind, cons in enumerate(cons_w_pos_list): sem_text = "[UNK]" if "sem" not in cons.attrs else cons["sem"] # subtype subtype = re.search("G#[^\s()]+", sem_text) if subtype is not None: subtype = subtype.group().split("#")[1] lex = "[UNK]" if "lex" not in cons.attrs else re.sub("_", " ", cons["lex"]) # position pos_num = cons.get_text().strip().split(" ") span = (int(pos_num[0]), int(pos_num[-1]) + 1) cons_text = ori_cons_list[ind].get_text() term = { "text": cons_text, "lex": lex, "sem": sem_text, "subtype": subtype, "char_span": span, "tok_span": (char_ind2tok_ind[span[0]], char_ind2tok_ind[span[1] - 1] + 1), } term_list.append(term) article_dict["entity_list"] = term_list return article_dict def get_tok2char_span_map(text): tok2char_span = tokenizer.encode_plus(text, return_offsets_mapping = True, add_special_tokens = False)["offset_mapping"] return tok2char_span # check spans for art in tqdm(article_list): art_dict = convert_to_dict(art) art_text = art_dict["text"] tok2char_span = get_tok2char_span_map(art_text) for term in art_dict["entity_list"]: # # check char span # char_span = term["char_span"] # pred_text = art_text[char_span[0]:char_span[1]] # assert pred_text == term["text"] # check tok span # # voc 里必须加两个token:hypo, mineralo tok_span = term["tok_span"] char_span_list = tok2char_span[tok_span[0]:tok_span[1]] pred_text = art_text[char_span_list[0][0]:char_span_list[-1][1]] assert pred_text == term["text"] def collapse(article_dict): ''' only keep 5 types: RNA, DNA, protein, cell_type, cell_line ''' new_term_list = [] save_types = {"RNA", "DNA", "protein", "cell_line", "cell_type"} for term in article_dict["entity_list"]: subtype = term["subtype"] if subtype is None: continue type_ = subtype.split("_")[0] if subtype not in {"cell_type", "cell_line"} else subtype if type_ in save_types: term["type"] = type_ new_term_list.append(term) article_dict["entity_list"] = new_term_list # + # for art in tqdm(article_list): # art_dict = convert_to_dict(art) # pprint(art_dict["term_list"]) # print() # collapse(art_dict) # for term in art_dict["term_list"]: # if "type" not in term: # set_trace() # pprint(art_dict["term_list"]) # # print("------------------")) # - # convert to dict article_dict_list = [] for art in tqdm(article_list): art_dict = convert_to_dict(art) collapse(art_dict) article_dict_list.append(art_dict) # split into train and eval set train_num = int(len(article_dict_list) * 0.9) train_data, eval_data = article_dict_list[:train_num], article_dict_list[train_num:] print(len(train_data), len(eval_data)) # # Output exp_path = os.path.join(preprocessed_data_dir, exp_name) if not os.path.exists(exp_path): os.mkdir(exp_path) train_save_path = os.path.join(preprocessed_data_dir, exp_name, "train_data.json") eval_save_path = os.path.join(preprocessed_data_dir, exp_name, "eval_data.json") json.dump(train_data, open(train_save_path, "w", encoding = "utf-8"), ensure_ascii = False) json.dump(eval_data, open(eval_save_path, "w", encoding = "utf-8"), ensure_ascii = False) tags = ["RNA", "DNA", "protein", "cell_line", "cell_type"] tag_path = os.path.join(preprocessed_data_dir, exp_name, "tags.json") json.dump(tags, open(tag_path, "w", encoding = "utf-8"), ensure_ascii = False)
yucheng_ner/preprocess/Build_GENIA(mine).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: dspy3 # language: python # name: dspy3 # --- # # Advanced Notebook # + # %matplotlib inline import numpy as np import pandas as pd from pandas.tools.plotting import scatter_matrix from sklearn.datasets import load_boston import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns sns.set_context('poster') sns.set_style('whitegrid') plt.rcParams['figure.figsize'] = 12, 8 # plotsize import warnings warnings.filterwarnings('ignore') # - df_dict = load_boston() features = pd.DataFrame(data=df_dict.data, columns = df_dict.feature_names) target = pd.DataFrame(data=df_dict.target, columns = ['MEDV']) df = pd.concat([features, target], axis=1) df['Zone'] = df['ZN'].astype('category') df.head() # ## QGrid # # Interactive pandas dataframes: https://github.com/quantopian/qgrid import qgrid qgrid_widget = qgrid.show_grid(df[['CRIM', 'Zone', 'INDUS', # 'CHAS', 'NOX', # 'RM', 'AGE', # 'DIS', 'RAD', 'TAX', # 'PTRATIO', # 'B', 'LSTAT', 'MEDV', ]], show_toolbar=True) qgrid_widget df2 = qgrid_widget.get_changed_df() df2.head() # # BQPlot # # Examples here are shamelessly stolen from the amazing: https://github.com/maartenbreddels/jupytercon-2017/blob/master/jupytercon2017-widgets.ipynb # mixed feelings about this import import bqplot.pyplot as plt import numpy as np x = np.linspace(0, 2, 50) y = x**2 fig = plt.figure() scatter = plt.scatter(x, y) plt.show() fig.animation_duration = 500 scatter.y = 2 * x**.5 scatter.selected_style = {'stroke':'red', 'fill': 'orange'} plt.brush_selector(); scatter.selected scatter.selected = [1,2,10,40] # ## ipyvolume import ipyvolume as ipv N = 1000 x, y, z = np.random.random((3, N)) fig = ipv.figure() scatter = ipv.scatter(x, y, z, marker='box') ipv.show() scatter.x = scatter.x - 0.5 scatter.x = x scatter.color = "green" scatter.size = 5 scatter.color = np.random.random((N,3)) scatter.size = 2 ex = ipv.datasets.animated_stream.fetch().data ex.shape ex[:, ::, ::4].shape ipv.figure() ipv.style.use('dark') quiver = ipv.quiver(*ipv.datasets.animated_stream.fetch().data[:,::,::4], size=5) ipv.animation_control(quiver, interval=200) ipv.show() ipv.style.use('light') ipv.style.use('light') quiver.geo = "cat" N = 1000*1000 x, y, z = np.random.random((3, N)).astype('f4') ipv.figure() s = ipv.scatter(x, y, z, size=0.2) ipv.show() ipv.save("3d-example-plot.html") # !open 3d-example-plot.html
notebooks/Advanced-Notebook-Tricks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Batch anomaly detection with the Anomaly Detector API # ### Use this Jupyter notebook to start visualizing anomalies as a batch with the Anomaly Detector API in Python. # # This notebook shows you how to send a batch anomaly detection request, and vizualize the anomalies found throughout the example data set. The graph created at the end of this notebook will display the following: # * Anomalies found throughout the data set, highlighted. # * The expected values versus the values contained in the data set. # * Anomaly detection boundaries # # # Azure Stream Analytics Query # # If you are exporting telemetry from IoT Hub/Central to ASA then this query will be a useful starting point # # ```sql # WITH Telemetry AS ( # SELECT # deviceId as DeviceId, # enrichments.deviceName as DeviceName, # telemetry.latitude as Latitude, # telemetry.longitude as Longitude, # MAX(telemetry.temperature) AS Temperature, # AVG(telemetry.humidity) AS Humidity, # AVG(telemetry.pressure) AS Pressure, # System.Timestamp() as Timestamp, # Count(*) as Count # FROM [weather-eh] TIMESTAMP BY enqueuedTime # GROUP BY # deviceId, # enrichments.deviceName, # telemetry.latitude, # telemetry.longitude, # TumblingWindow(minute,4) # ) # # SELECT DeviceId, DeviceName, LEFT(CAST(Timestamp as nvarchar(max)), 16) as timestamp, Temperature AS value INTO [anomaly-data] FROM Telemetry # ``` # + # To start sending requests to the Anomaly Detector API, paste your Anomaly Detector resource access key below, # and replace the endpoint variable with the endpoint for your region or your on-premise container endpoint. # Endpoint examples: # https://westus2.api.cognitive.microsoft.com/anomalydetector/v1.0/timeseries/entire/detect # http://127.0.0.1:5000/anomalydetector/v1.0/timeseries/entire/detect # Output from Azure Stream Analytics apikey = 'ac1a8b946fb647d29e65d8e0b6a6a5c5' endpoint = 'https://weather-anomaly.cognitiveservices.azure.com/anomalydetector/v1.0/timeseries/entire/detect' blob_conn_str="DefaultEndpointsProtocol=https;AccountName=weatherstgglovebox;AccountKey=<KEY>;EndpointSuffix=core.windows.net" # IoT Central Blog Storage Logging endpoint = 'https://weather-anomaly.cognitiveservices.azure.com/anomalydetector/v1.0/timeseries/entire/detect' blob_filter = "6a4d8c94-7f2d-4ff9-b15a-38749d4ebe61/18/2022/04/" input_container = "weather-tracker" device_id = "rpi44" # + import requests import json import pandas as pd import numpy as np import warnings warnings.filterwarnings('ignore') # Import library to display results import matplotlib.pyplot as plt # %matplotlib inline # + from bokeh.plotting import figure,output_notebook, show from bokeh.palettes import Blues4 from bokeh.models import ColumnDataSource,Slider import datetime from bokeh.io import push_notebook from dateutil import parser from ipywidgets import interact, widgets, fixed from IPython.display import clear_output output_notebook() # - def detect(endpoint, apikey, request_data): headers = {'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': apikey} response = requests.post(endpoint, data=json.dumps(request_data), headers=headers) if response.status_code == 200: return json.loads(response.content.decode("utf-8")) else: print(response.status_code) raise Exception(response.text) # # format_json_data iterates through the blob data and adds to a python dictionary. # A dictionary is used as it dedups any duplicate timestamps. The dictionary is later converted to a # list of json objects. # Add to a dictionary to dedup any duplication def format_json_data(telemetry_data, data): parse_json_records = [json.loads(str(item)) for item in telemetry_data.strip().split('\n')] for item in parse_json_records: di = {} di['timestamp'] = item["timestamp"] di['value'] = item['value'] data.append(di) # + from azure.storage.blob import BlobServiceClient from azure.storage.blob import ContainerClient import json input_container="anomaly-data" def process_raw_data(): container = ContainerClient.from_connection_string( conn_str=blob_conn_str, container_name=input_container) blob_name = '' json_paths = [] blob_list = container.list_blobs(name_starts_with = device_id) for blob in blob_list: # read raw data from blob storage blob_name = blob.name blob_client = container.get_blob_client(blob_name) filestream = blob_client.download_blob() filecontents = filestream.content_as_text() if filecontents: format_json_data(filecontents, data) # - def build_figure(sample_data, sensitivity): sample_data['sensitivity'] = sensitivity result = detect(endpoint, apikey, sample_data) columns = {'expectedValues': result['expectedValues'], 'isAnomaly': result['isAnomaly'], 'isNegativeAnomaly': result['isNegativeAnomaly'], 'isPositiveAnomaly': result['isPositiveAnomaly'], 'upperMargins': result['upperMargins'], 'lowerMargins': result['lowerMargins'], 'timestamp': [parser.parse(x['timestamp']) for x in sample_data['series']], 'value': [x['value'] for x in sample_data['series']]} response = pd.DataFrame(data=columns) values = response['value'] label = response['timestamp'] anomalies = [] anomaly_labels = [] index = 0 anomaly_indexes = [] p = figure(x_axis_type='datetime', title="Batch Anomaly Detection ({0} Sensitvity)".format(sensitivity), width=800, height=600) for anom in response['isAnomaly']: if anom == True and (values[index] > response.iloc[index]['expectedValues'] + response.iloc[index]['upperMargins'] or values[index] < response.iloc[index]['expectedValues'] - response.iloc[index]['lowerMargins']): anomalies.append(values[index]) anomaly_labels.append(label[index]) anomaly_indexes.append(index) index = index+1 upperband = response['expectedValues'] + response['upperMargins'] lowerband = response['expectedValues'] -response['lowerMargins'] band_x = np.append(label, label[::-1]) band_y = np.append(lowerband, upperband[::-1]) boundary = p.patch(band_x, band_y, color=Blues4[2], fill_alpha=0.5, line_width=1, legend='Boundary') p.line(label, values, legend='Value', color="#2222aa", line_width=1) p.line(label, response['expectedValues'], legend='ExpectedValue', line_width=1, line_dash="dotdash", line_color='olivedrab') anom_source = ColumnDataSource(dict(x=anomaly_labels, y=anomalies)) anoms = p.circle('x', 'y', size=5, color='tomato', source=anom_source) p.legend.border_line_width = 1 p.legend.background_fill_alpha = 0.1 show(p, notebook_handle=True) def read_data(): with open('temperature.json',mode='r') as file: filecontents = file.read() format_json_data(filecontents, data) def add_entries_from_log(telemetry_data): parse_json_records = [json.loads(str(item)) for item in telemetry_data.strip().split('\n') ] for item in parse_json_records: if item["deviceId"] == device_id: di = {} di['timestamp'] = item["enqueuedTime"] di['value'] = item['telemetry']['temperature'] # print(di) data.append(di) def read_iot_central_log(): input_container = "weather-tracker" container = ContainerClient.from_connection_string( conn_str=blob_conn_str, container_name=input_container) blob_name = '' json_paths = [] blob_list = container.list_blobs(name_starts_with = blob_filter ) for blob in blob_list: # read raw data from blob storage blob_name = blob.name # print(blob.name + '\n') blob_client = container.get_blob_client(blob_name) filestream = blob_client.download_blob() filecontents = filestream.content_as_text() if filecontents: add_entries_from_log(filecontents) # ## Vizualizing anomalies throughout your data # # The following cells call the Anomaly Detector API with two different example time series data sets, and different sensitivities for anomaly detection. Varying the sensitivity of the Anomaly Detector API can improve how well the response fits your data. # ### Example 1: time series with an hourly sampling frequency # # + data = [] # Get data from blob storage # process_raw_data() read_iot_central_log() # Clean data # convert list object to a pandas dataframe df = pd.DataFrame(data) # Convert datetime string to dataframe timestamp type # https://cumsum.wordpress.com/2022/02/26/pandas-typeerror-only-valid-with-datetimeindex-timedeltaindex-or-periodindex-but-got-an-instance-of-index/ df.index = pd.to_datetime(df['timestamp']) # Resample into 10 minute buckets # https://towardsdatascience.com/using-the-pandas-resample-function-a231144194c4 res = df.resample('10min').max() # remove existing timestamp column convert the datetime index to a timestamp column # https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.drop.html # https://stackoverflow.com/questions/44773714/convert-index-to-column-pandas-dataframe res = res.drop(columns=['timestamp']).reset_index() # Convert timestamp column of type timestamp to type string res['timestamp']=res['timestamp'].astype(str) # convert dataframe to a list of json objects ts_dict = res.to_dict('records') # Create data for anomaly detection sample_data = {} sample_data['series'] = ts_dict sample_data['granularity'] = 'minutely' sample_data['customInterval'] = 10 # sample_data['period'] = 24 # 95 sensitivity build_figure(sample_data,95) # - # 90 sensitivity build_figure(sample_data,90) #85 sensitivity build_figure(sample_data,85)
AnomalyDetector/Batch anomaly detection with the Anomaly Detector API.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import folium print(folium.__version__) # + import pandas as pd df = pd.DataFrame.from_csv( os.path.join('data', 'highlight_flight_trajectories.csv') ) # - # Let us take a glance at the data. # Each row represents the trajectory of a flight, # and the last column contains the coordinates of the flight path in `GeoJSON` format. df # + m = folium.Map( location=[40, 10], zoom_start=4, control_scale=True, prefer_canvas=True ) def style_function(feature): return { 'fillColor': '#ffaf00', 'color': 'blue', 'weight': 1.5, 'dashArray': '5, 5' } def highlight_function(feature): return { 'fillColor': '#ffaf00', 'color': 'green', 'weight': 3, 'dashArray': '5, 5' } for index, row in df.iterrows(): c = folium.GeoJson( row['geojson'], name=('{}{}'.format(row['dep'], row['dest'])), overlay=False, style_function=style_function, highlight_function=highlight_function ) folium.Popup('{}\n{}'.format(row['dep'], row['dest'])).add_to(c) c.add_to(m) folium.LayerControl().add_to(m) m.save(os.path.join('results', 'Highlight_Function.html')) m
examples/Highlight_Function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.2 64-bit # language: python # name: python3 # --- import torch import torch.nn as nn import torchvision import d2l.torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) # + input_dims, hidden_dims, output_dims = 28*28, 256, 10 w1 = nn.Parameter(torch.randn(input_dims, hidden_dims, requires_grad=True)*0.01) b1 = nn.Parameter(torch.zeros(hidden_dims, requires_grad=True)) w2 = nn.Parameter(torch.randn(hidden_dims, output_dims, requires_grad=True)*0.01) b2 = nn.Parameter(torch.zeros(output_dims)) params = [w1,b1,w2,b2] # - def relu(X): a = torch.zeros_like(X) return torch.max(a, X) def net(X): return ( relu(X.reshape(-1, input_dims)@w1 + b1)@w2 + b2 ) loss = nn.CrossEntropyLoss() epochs, lr = 10, 0.1 optimizer = torch.optim.SGD(params, lr) d2l.train_ch3(net, train_iter, test_iter, loss, epochs, optimizer)
chapter 4 MLP/MLP from zero.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="XdMI8iNub6Ye" # <img src="https://storage.googleapis.com/arize-assets/arize-logo-white.jpg" width="200"/> # # # Getting Started with the Arize Platform- Demand Forecasting for a Retail Company # # # **In this example, you are part of a team for a retail company that maintains and monitors a demand forecasting regression model that predicts the one week unit quantity demanded for items in your stores.** The business objective of your ML model is so that your store fronts can supply them exactly the number of items demanded on time, as predicted by your model. # # **You have been alerted their are calls about stores overshelfing and unhappy customers in the last month due to mispredictions by your demand forecasting model, so you turn to Arize to gain insight as to why**. # # # In this walkthrough, we are going to investigate your production demand forecasting model model. We will first set-up monitors and dashboard to provide better insights to into when these events happened and what happened on the days we had unhappy customers. Then, we will go into a deep dive to investigate the root causes of those mispredictions, and what kind of insights ML Engineer can gain from using Arize features. # # **The steps to this tutorial will be:** # # 1. Tracking your model to the Arize platform # 2. Set-up Performance Monitor and Dashboard to better understand our model performance # 3. Understand when underprediction and overprediction events happen, and what they represent # 4. Discover feature drifts corresponding to time periods of performance degredation, and takeaways for ML Engineers to fix # # The goal of this is to see how the Arize platform can help your team quickly dive into issues critical to your operations through: # - (1) Model observability & business insights # - (2) Model performance troubleshooting. # + [markdown] id="_Ilv3i0Qqbfu" # # Part 0: Setup and Getting the Data # The first step is to load our pre-existing dataset which includes training and production environments for our demand forecast example. Using a pre-existing dataset illustrates how simple it is to get started with the Arize platform. # - # ## Install Dependencies and Import Libraries 📚 # + colab={"base_uri": "https://localhost:8080/"} id="nKTMZgVLqm1a" outputId="9371de4a-b8b1-4667-c72b-3ccba533bf24" # !pip install arize -q # !pip install tables --upgrade -q from arize.utils.types import ModelTypes, Environments from arize.pandas.logger import Client, Schema import pandas as pd import datetime, uuid, tempfile from datetime import timedelta # + [markdown] id="UUrO-FG8q2e7" # ## **🌐 Download the Data** # In this walkthrough, we’ll be sending real historical data (with privacy conscious changes to feature names and values). Note, that while feature names and values are made explicit in this dataset, you can achieve the same level of ML Observability using obfuscated features. # # | Feature | Type | Description ||| # |:-|:-|:-|---|---| # | `item_size`| `int`| shelf item physical size ||| # | `supplier_id`| `int`| unique identifier of item supplier ||| # | `avg_historical_sales`| `float`| average sales of item in the last 6 month ||| # | `cur_projected_sales`| `float`| sales projected based on seasonality from another times series model ||| # | `item_new_release_flag`| `int (0 or 1)`| flag indicating if item was released in the last 2 month ||| # | `item_stickyness_factor`| `float`| a number that represents whether an item will likely be purchased by the same customer again ||| # | `item_release_year`| `int`| the year which item has been released ||| # | `shelf_life_weeks`| `int`| how long the item is intended to be on sale for ||| # # # ## Inspect the Data # # The data represents a regression model trained to forecast demand for an item one week in advance. The dataset contains one month of data and the performance will be evaluated by comparing: # # * **`prediction`**: Predicted number of items to demanded by customers this week (also the number of items we will shelf in operations) # * **`actual`**: Actual number of items recorded as demanded within a week # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="EkJTxfbKrDhV" outputId="010a204a-a194-4dc2-d003-afc2aedd8ff3" # import dataset into two dataframes for logging val_data = pd.read_csv("https://storage.googleapis.com/arize-assets/fixtures/demand_forecast_val.csv") prod_data = pd.read_csv("https://storage.googleapis.com/arize-assets/fixtures/demand_forecast_prod.csv") feature_column_names = val_data[['item_size', 'supplier_id', 'avg_historical_sales', 'cur_projected_sales', 'item_new_release_flag', 'item_stickyness_factor', 'item_release_year', 'shelf_life_weeks']].columns print("✅ Dependencies installed and data successfully downloaded!") # - # select features, prediction, and actual columns only prod_data[list(feature_column_names) + ['predictions', 'actuals']] # + [markdown] id="EIM5Q70nb8vy" # # Step 1. Sending Data into Arize 💫 # First, copy the Arize `API_KEY` and `ORG_KEY` from your admin page linked below! # # [![Button_Open.png](https://storage.googleapis.com/arize-assets/fixtures/Button_Open.png)](https://app.arize.com/admin) # # <img src="https://storage.googleapis.com/arize-assets/fixtures/copy-keys.jpeg" width="600"> # + colab={"base_uri": "https://localhost:8080/"} id="JCiFPCYi64R-" outputId="9a4f6eb7-6055-4c14-acc1-9ed319a6d510" ORGANIZATION_KEY = "ORGANIZATION_KEY" API_KEY = "API_KEY" arize_client = Client(organization_key=ORGANIZATION_KEY, api_key=API_KEY) model_id = "demand-forecast-demo-model" # This is the model name that will show up in Arize model_version = "v1.0" # Version of model - can be any string if ORGANIZATION_KEY == "ORGANIZATION_KEY" or API_KEY == "API_KEY": raise ValueError("❌ NEED TO CHANGE ORGANIZATION AND/OR API_KEY") else: print("✅ Arize setup complete!") # + [markdown] id="f812QnU9ynm-" # ### Using the Python SDK # For our dataset, we have pre-formatted the feature names and dataframes for logging to Arize using our Python SDK through `arize.pandas.logger`. The `Schema` of your model specifies a mapping from column names for your logging DataFrame. # # Here's a summary below: # # | Schema Argument Name | Description ||| # |:- |:-|---|---| # | `feature_column_names`| names of the columns representing features ||| # | `prediction_id_column_name`| list of unique ids you can use to use to match each record ||| # | `prediction_label_column_name`| predictions column name ||| # | `actual_label_column_name`| actuals column name ||| # | `timestamp_column_name`| timestamps for when predictions were made ||| # # For more details on how to send data in production to Arize, check out some of our other logging tutorials and SDK documentations in Gitbook. # # [![Buttons_OpenOrange.png](https://storage.googleapis.com/arize-assets/fixtures/Buttons_OpenOrange.png)](https://docs.arize.com/arize/sdks-and-integrations/python-sdk/arize.pandas) # - # ## Log Validation & Production Data to Arize # + colab={"base_uri": "https://localhost:8080/"} id="z7j94jt8cBlx" outputId="609c8818-9e55-4258-df15-923f4b36f9f0" # Define a Schema() object for Arize to pick up data from the correct columns for logging validation_schema = Schema( feature_column_names=feature_column_names, prediction_id_column_name="prediction_ids", prediction_label_column_name="predictions", actual_label_column_name="actuals", ) # Logging to Arize platform using arize_client.log val_response = arize_client.log( dataframe=val_data, model_id=model_id, model_version=model_version, batch_id="baseline", model_type=ModelTypes.NUMERIC, environment=Environments.VALIDATION, schema=validation_schema, ) production_schema = Schema( feature_column_names=feature_column_names, prediction_id_column_name="prediction_ids", timestamp_column_name="prediction_ts", prediction_label_column_name="predictions", actual_label_column_name="actuals", ) prod_response = arize_client.log( dataframe=prod_data, model_id=model_id, model_version=model_version, model_type=ModelTypes.NUMERIC, environment=Environments.PRODUCTION, schema=production_schema, ) # Checking responses to make sure our data was successfully ingested if val_response.status_code != 200 or prod_response.status_code != 200: print(f"logging failed with response code {response.status_code}, {response.text}") else: print(f"✅ You have successfully logged data to Arize") # + [markdown] id="0tmeTIX1aSfC" # # Step 2. Confirm Data in Arize ✅ # Note that the Arize performs takes about 10 minutes to index the data. While the model should appear immediately, the data will not show up untill the indexing is done. Feel free to go grab a cup of coffee as Arize works its magic! 🔮 # # **The next sections (Part 2 and Part 3) are screen captures for tutorials to setting-up the model we just sent in.** # # Feel free to follow and mirror our instructions to set-up the dashboards yourself, or simply read the guide below to see how Arize can quickly generate value for demand forecasting models. # # **⚠️ DON'T SKIP:** # In order to move on to the next step, make sure your actuals and training/production sets are loaded into the platform. To check: # 1. Navigate to models from the left bar, locate and click on model **demand-forecast-tutorial** # 2. On the **Overview Tab**, make sure you can see Predictions and Actuals under the **Model Health** section. Once production actuals have been fully recorded on Arize, the row title will change from **0 Actuals** to **Actuals** with summary statistics such as cardinality listed in the tables. # 3. Verify the list of **Features** below **Actuals**. # # <img src="https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-waiting-actuals.png" width="800"> # + [markdown] id="JT6lV0qJNlmW" # # Step 3. Improving Model Observability & Business Insight # Now that our data has been logged to the Arize platform, let's investigate the low performances events we have been hearing about! # + [markdown] id="uL6RgKSpNpLN" # ## **Baseline Configurations** # We will first need to set-up a baseline distribution by clicking on the **Config** button. This will serve as the reference distribution and benchmark for our production data. We will use a validation set we sent in, but you can choose a production window or training set as our reference distribution. # # # Here are the steps to follow: # 1. Click on **Config** button on top right # 2. Click on **Configure Baseline** button to select a reference distribution # 3. Select **Validation** for **Version v1.0** # # # # ⚠️ We recommend doing this first for all of models you track to Arize. # # <img src="https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-baseline-configgif.gif" width="1200"> # + [markdown] id="Ixkj8771aYoO" # ## **Understanding Error Biases** # Each prediction of our model in production translates to an operational decision by our retail company. In this case, **demand forecasting** is only important in so far as it can allow better **supply management**. # # In our example retail demand forecasting company, **under-forecasting** is much more problematic since customers aren’t delivered on what they wanted, we could lose out on customer lifetime value. Thus, we want to monitor our **Mean Error** in addition to **Mean Absolute Error**. # # <img src="https://storage.googleapis.com/arize-assets/fixtures/forecasting-bias-problem.png" width="600"> # # ## **Monitoring Biases with Performance Monitors** # # Even if our model is only trained on loss functions of **Mean Squared Error** or **Mean Absolute Error**, we sometimes still care about the **Mean Error** because it often tells us about the biases in our predictions, and these biases has a tangibly different impact on our business. # # Let’s set-up an Arize **Performance Monitor** to visualize and monitor our performance following these steps... # # # 1. Navigate to the **Monitors** tab # 2. Click on **Create Model Performance Monitor** # 3. Select **`Mean Error`** as your **Evaluation Metric** # 4. Select to trigger alert when **metrics is below -7.5** # # # # **You can click on the gifs to replay it** # [<img src="https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-perf-monitor.gif" width="1200">](https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-perf-monitor.gif) # # ### **Summary** # Performance Dashboards can help you monitor production using the metric important to your business function, such as Mean Error in this case. You can access them under performance tab and set-up alerts for when metric dips below or above a certain number. # + [markdown] id="OjSOk64uNuse" # ## **Arize Dashboard Configurations** # Now that we understand the importance of **Mean Error** as a measure of prediction bias, we also want to monitor and visualize it along with **Mean Absolute Error** side by side. Many Data Scientists reading this section will immediately understand that Mean Error alone isn't the informative, because there are often **coinciding event** of **both** over and under prediction, resulting in a zero-sum Mean Error. # # We can avoid this with a side-by-side time series chart in our custom times series widget on our Dashboard. # # ### **1. Performance Dashboard** # Performance Dashboard is a customizable feature where you can monitor time series data, feature/prediction distribution, and model metrics all in one place. You can even monitor only a slice or subset of your production data based on your model performance metric. # # Following these steps # 1. Click on **Create Dashboard** and select **Regression Performance Dashboard** # # This will create a dashboard with many useful default widget already created for your regression model. # # 2. Under the card **Model Evaluation Metric By Day**, we delete **MAPE** curve and change **RMSE** to **Mean Error**. # 3. Save the widget, and publish changes to our dashboard. # # ### **2. Setting-up initial dashboard** # [<img src="https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-dashboard.gif" width="1200">](https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-dashboard.gif) # # ### **3. Creating Data Metric Times Series Widget** # Let's also create a data metric time series widget to visualize the average values of our predictions vs actuals. In this way, we can **visualize errors along with actuals** to validate the magnitude of our prediction error. # # # <img src="https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-data-metrics-create.png" width="1200"> # # We want create a new card right under our `Model Evaluation Metrics by Day` card by doing the following: # # 1. Select **Times Series** Widges # 2. Select **Data Metrics** # 3. Choose `Prediction/Actual` and `Average` for curves # # ### **4. Interpreting Our Dashboard** # Here's the final product of what our dashboard would look like, and when those error biases happen. # # <img src="https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-visualize-bias.png" width="1200"> # # # Now we can clearly visualize prediction biases and overall model accuracy with two charts. # # 1. The top chart compares the errors using MAE and ME, showing us the magnitude and direction of our error biases # 2. The second shows us the averages of our predictions and actuals, giving us additional information to identify validate the over or under estimation event. # # + [markdown] id="eST3BeIjOOut" # ## **Observability & Business Insight Summary** # [The Arize platform](https://app.arize.com/) provides the tools for engineers, product teams, and even data scientists to quickly gain business insight for better strategical decision making. # # In this section we... # 1. Set-up a **Baseline** from our validation set to continiously compare it our production data. # 2. Created a **Mean Error Performance Monitor** so that we will be alerted whenever we detects a negative bias (i.e turned away customers) # 3. Customized a times series widget on **Dashboard** to visualize **Mean Error** side by side with **Mean Absolute Error** to understand both the magnitude and direction of our prediction biases. # # # + [markdown] id="p1K_1zp8dFS3" # # **Step 4. Empowering ML Engineers to Troubleshoot** # Now that we have identified when our underprediction and overprediction event happened, lets go into a deep dive to understand why they happened. # # Arize can also be used to triage your ML model performance. The model performance troubleshooting tools are designed by ML engineers for engineers to help you understand and solve your model performance issues. # + [markdown] id="TmoN5A8NdJDF" # ## **Investigating production windows with low performance** # In our **Drift Tab** overview, we clearly see two time periods where the distribution has changed. You can click on the dates to see the feature distribution and drift of that particular day. You might have noticed that the the first drift corresponds to when we observed an over-estimating event in Part 2, and the second corresponds to an under-estimating event. # # <img src="https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-whats-drift-tab.png" width="800"> # # Let's sort by **Drift(PSI)** and investigate several of these features and click on one of the days of the second prediction drift, we can see that a number of features have drifted in these days. # # [<img src="https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-drift.gif" width="1200">](https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-drift.gif) # + [markdown] id="uQ5I14nh0Oa7" # ## **Deep Dive into Root Cause** # ⚠️ Two features `item_new_release_flag` and `item_size` seem to have high drift # 1. Click on one of the days during the second drift period (underpredicting period) # 2. Click into either feature through the red alert button # 3. Observe that their feature drift timeline coincided with prediction drift # # <img src="https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-new-release-drift.png" width="800"> # # <img src="https://storage.googleapis.com/arize-assets/fixtures/demand-forecast-item-size-drift.png" width="800"> # # + [markdown] id="leCWCB8TdN2B" # ## **Turning Insight into Action** # # We used the **Drift Tab** to investigate dates of feature drifts and prediction drifts. Not all feature drifts are inherently maligant and impact our model performances -- only some do. With the insights provided on Arize, you can deep dive into root causes and quickly gain intuitions, allowing for ML teams to quickly iterate, experiment, and ship new models in production. # # In this case, by visualizing the feature drift and understanding the features responsible, our ML Engineers now have additional information to work with when improving our models when troubleshooting model performance issues with the drift tab. # # Some possible conclusions and action items our engineers could make might be... # # 1. Examining possible concept drifts relating to the two features in question # 2. Retraining our model to fit new distributions specific to this drift # + [markdown] id="RoHhu8awNdxG" # # 📚 Conclusion # In this tutorial, we first logged 30 days of production data to Arize using our **Python SDK** and developed an understanding that Mean Error is an important metric to monitor under-prediction and therefore customer satisfaction. # # We then set-up a **Performance Monitor** to monitor Mean Error, and set-up a **Performance Dashboard** to ensure we can gain greater performance observability and insight into both MAE and ME side-by-side for our business objective. # # Lastly, we used **Drift Tab** to investigate potential reasons for our model under-prediction event. One feature drift in `item_new_release_flag` and `item_size` coincided with only the under-prediction event, so we came up with several conclusions that our ML Engineers could have drawn from this observation to improve and troubleshoot our model in the future. # + [markdown] id="vbXv-W3JgatG" # # About Arize # Arize is an end-to-end ML observability and model monitoring platform. The platform is designed to help ML engineers and data science practitioners surface and fix issues with ML models in production faster with: # - Automated ML monitoring and model monitoring # - Workflows to troubleshoot model performance # - Real-time visualizations for model performance monitoring, data quality monitoring, and drift monitoring # - Model prediction cohort analysis # - Pre-deployment model validation # - Integrated model explainability # # ### Website # Visit Us At: https://arize.com/model-monitoring/ # # ### Additional Resources # - [What is ML observability?](https://arize.com/what-is-ml-observability/) # - [Playbook to model monitoring in production](https://arize.com/the-playbook-to-monitor-your-models-performance-in-production/) # - [Using statistical distance metrics for ML monitoring and observability](https://arize.com/using-statistical-distance-metrics-for-machine-learning-observability/) # - [ML infrastructure tools for data preparation](https://arize.com/ml-infrastructure-tools-for-data-preparation/) # - [ML infrastructure tools for model building](https://arize.com/ml-infrastructure-tools-for-model-building/) # - [ML infrastructure tools for production](https://arize.com/ml-infrastructure-tools-for-production-part-1/) # - [ML infrastructure tools for model deployment and model serving](https://arize.com/ml-infrastructure-tools-for-production-part-2-model-deployment-and-serving/) # - [ML infrastructure tools for ML monitoring and observability](https://arize.com/ml-infrastructure-tools-ml-observability/) # # Visit the [Arize Blog](https://arize.com/blog) and [Resource Center](https://arize.com/resource-hub/) for more resources on ML observability and model monitoring.
arize/examples/tutorials/Use_Cases/demand_forecast_usecase1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp core # - # # OHLCV Preprocessing # # > This module holds all the functions necessary to preprocess the ohlcv data in a dataframe. The goal is to calculate what the highest percentage change was over the following n days. This will serve as a prediction target for ML purposes. #hide from nbdev.showdoc import * import numpy as np import pandas as pd from pandas.io.json import json_normalize def json_parse(filename): data = pd.read_json('data/' + filename) data = json_normalize(data['result']) return data filename = 'btc-ltc-daily.json' data = json_parse(filename) data.head() # truncate timestamp from T column (only call once, otherwise reload json into dataframe) data['T'] = data['T'].str[:-9] data[0:30] len(data.index) highs = data.rolling(window=30, min_periods=0)['H'].max().shift(-30) highs # > Calculates the highest percentage change in the next n days compared to the close ('C'). def high_pct_change(data, n): highs = data.rolling(window=n, min_periods=0)['H'].max().shift(-n) pcts = highs/data['C'] - 1 return pcts pcts = high_pct_change(data, 30) pcts.head() def round_pct(pct, interval=.25, cutoff=1): if pct >= cutoff: pct = cutoff elif pct < 0: pct = 0 else: pct = interval*(pct // interval) return pct pct = round_pct(.45) pct # > Takes list of percents and rounds them down based on interval. Anything above the cutoff is set to the cutoff. For example, if interval is 0.25 and cutoff is 1: 0.12 -> 0, 0.26 -> 0.25, 0.73 -> 0.50, 0.99 -> 0.75, 2.25 -> 1.00. def round_pcts(pcts, interval=.25, cutoff=1): pcts = pcts.apply(np.vectorize(round_pct)) return pcts rounded_pcts = round_pcts(pcts) rounded_pcts.head() data_plus_predict_col = data data_plus_predict_col['P'] = rounded_pcts data_plus_predict_col.head() data_plus_predict_col.to_csv('data/' + filename[:-4] + 'csv')
00_core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:env] # language: python # name: conda-env-env-py # --- # # Data Preparation # This notebook cleans and prepares the data for training, validation, and testing. # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import os import os.path import pickle import glob import PIL # ### Filter out filler pages # Remove filler pages that do not contain sheet music information (e.g. title, foreword). The file filler.txt indicates which pages are filler. def getFillerList(filler_file, feat_dir): d = {} # list of pages to remove with open(filler_file, 'r') as f: for line in f: parts = line.strip().split('\t') if len(parts) > 1: relpath = parts[0] # e.g. Bach/00748 scoreID = os.path.basename(relpath) # e.g. 00748 removeField = parts[1].strip('"') # e.g. "0,1,-2,-1" or "r" or "rl" numPages = getNumPages(relpath, feat_dir) if removeField == 'r' or removeField == 'rl': # remove all pages for pkl_file in glob.glob('{}/{}/*.pkl'.format(feat_dir, parts[0])): pageID = os.path.splitext(os.path.basename(pkl_file))[0] # e.g. 00822-3 d[pageID] = 1 else: for pageNumStr in removeField.split(','): pageNum = int(pageNumStr) if pageNum < 0: pageID = '{}-{}'.format(scoreID, numPages + pageNum) d[pageID] = 1 else: pageID = '{}-{}'.format(scoreID, pageNum) d[pageID] = 1 return d def getNumPages(relpath, indir): numPages = len(glob.glob('{}/{}/*.pkl'.format(indir, relpath))) return numPages def getNonFillerFeatures(filler_file, feat_dir): ''' Collect bootleg score features from all pages that are (a) not filler and (b) have a valid bootleg score matrix. ''' filler = getFillerList(filler_file, feat_dir) feats = {} for pieceDir in glob.glob('{}/*/*/'.format(feat_dir)): # e.g. score_feat/Bach/00748/ pieceID = pieceDir.split('/')[-2] composer = pieceDir.split('/')[-3] accum = [] # features from all pages in this score (or None if filler/no features extracted) numPages = len(glob.glob('{}/*.pkl'.format(pieceDir))) for i in range(numPages): pkl_file = '{}/{}-{}.pkl'.format(pieceDir, pieceID, i) pageID = '{}-{}'.format(pieceID, i) # e.g. 00748-2 if pageID in filler: # filler page, skip accum.append(None) continue with open(pkl_file, 'rb') as f: bscore = pickle.load(f)['bscore'] if bscore is None: # if None, no features were computed accum.append(None) else: accum.append(bscore == 1) # convert from float to bool to compress memory if len(accum) > 0: feats[pieceDir] = accum return feats filler_file = 'cfg_files/filler.txt' score_feat_dir = 'score_feat' feats = getNonFillerFeatures(filler_file, score_feat_dir) # key: pieceDir, value: list of bscore matrices # ### Investigate Feature Statistics def getFeatureStats(feats): # count number of features per page featsPerPage = [] for pieceDir in feats: for elem in feats[pieceDir]: if elem is not None: featsPerPage.append(elem.shape[1]) featsPerPage = np.array(featsPerPage) printStats(featsPerPage, "Number of Features Per Page") # plot histogram plt.subplot(2,1,1) plt.hist(featsPerPage, bins=100) plt.xlabel('Number of Events In Single Page') plt.ylabel('Frequency') plt.show() # count total number of pages by composer pages = {} for pieceDir in feats: # e.g. score_feat/Bach/00748/ composer = pieceDir.split('/')[-3] if composer not in pages: pages[composer] = 0 pages[composer] += len([1 for elem in feats[pieceDir] if elem is not None]) pageCnts = [pages[composer] for composer in pages] composers = [composer[0:5] for composer in pages] printStats(pageCnts, "Total Number of Pages by Composer") # plot histogram x_pos = np.arange(len(pageCnts)) plt.bar(x_pos, pageCnts) plt.xticks(x_pos, composers) plt.ylabel('Total # Pages') plt.show() # count total number of note events by composer noteEvents = {} for pieceDir in feats: # e.g. score_feat/Bach/00748/ composer = pieceDir.split('/')[-3] if composer not in noteEvents: noteEvents[composer] = 0 for elem in feats[pieceDir]: if elem is not None: noteEvents[composer] += elem.shape[1] noteEventCnts = [noteEvents[composer] for composer in noteEvents] printStats(noteEventCnts, "Total Number of Note Events by Composer") # plot histogram x_pos = np.arange(len(composers)) plt.bar(x_pos, noteEventCnts) plt.xticks(x_pos, composers) plt.ylabel('Total # Note Events') plt.show() return def printStats(arr, title = None): if title: print(title) print('Mean: {}'.format(np.mean(arr))) print('Std: {}'.format(np.std(arr))) print('Min: {}'.format(np.min(arr))) print('Max: {}'.format(np.max(arr))) getFeatureStats(feats) # Note that there is a wide variation in the number of features per page and a significant class imbalance in the data. We will define a proxy task which tries to classify short fixed-length chunks of bootleg score features, and resample the classes to ensure class balance. # ### Split Data into Train, Validation, & Test # Below we separate the data into train, validation, & test sets. We split the data by piece (as opposed to page) to ensure total separation. This data corresponds to the original task of classifying a single page of sheet music. def splitTrainValidTest(d, train=.6, validation=.2, test=.2, savefile = None): # shuffle assert(train + validation + test == 1.0) np.random.seed(0) pieceDirs = list(d.keys()) np.random.shuffle(pieceDirs) # split breakpt1 = int(len(pieceDirs) * train) breakpt2 = breakpt1 + int(len(pieceDirs) * validation) pieceDirs_train = pieceDirs[0:breakpt1] pieceDirs_valid = pieceDirs[breakpt1:breakpt2] pieceDirs_test = pieceDirs[breakpt2:] # save d_train = getDataSubset(d, pieceDirs_train) d_valid = getDataSubset(d, pieceDirs_valid) d_test = getDataSubset(d, pieceDirs_test) if savefile: saveToPickle([d, pieceDirs_train, pieceDirs_valid, pieceDirs_test], savefile) return d_train, d_valid, d_test def getDataSubset(dAll, toKeep): dSubset = {} for pieceDir in toKeep: dSubset[pieceDir] = dAll[pieceDir] return dSubset def saveToPickle(d, outfile): with open(outfile, 'wb') as f: pickle.dump(d, f) def loadPickle(infile): with open(infile, 'rb') as f: d = pickle.load(f) return d save_pages_file = '{}/data.pages.pkl'.format(score_feat_dir) d_train, d_valid, d_test = splitTrainValidTest(feats, train=.6, validation=.2, test=.2, savefile=save_pages_file) # ### Format data in chunks # Below we prepare the data for the proxy task, which assumes a fixed-length (L=64) chunk of bootleg features. To ensure class balance, we randomly sample the same number of chunks from each class. def getComposer2IndexMapping(feat_dir): composers = [] for composerDir in sorted(glob.glob('{}/*/'.format(feat_dir))): composer = composerDir.split('/')[-2] composers.append(composer) c_to_i = {c:i for i, c in enumerate(composers)} return c_to_i, composers def getChunkedData_uniformSampling(d, chunkSize, c_to_i): ''' Uniform sampling with 50% overlap. Doesn't keep page location info. ''' frags = [] labels = [] pieceDir2idxRange = {} for pieceDir in d: merged = np.hstack(d[pieceDir]) composerIdx = c_to_i[pieceDir.split('/')[-3]] startChunkIdx = len(frags) for startIdx in range(0, merged.shape[1], chunkSize // 2): endIdx = startIdx + chunkSize if endIdx <= merged.shape[1]: frags.append(merged[:,startIdx:endIdx]) labels.append(composerIdx) endChunkIdx = len(frags) pieceDir2idxRange[pieceDir] = (startChunkIdx, endChunkIdx) frags = np.array(frags) labels = np.array(labels) return frags, labels, pieceDir2idxRange def getChunkedData_balanced(d, chunkSize, samplesPerComposer, composers): ''' Samples the same number of windows from each composer to avoid class imbalance. Also keeps the page location for debugging and error analysis. ''' np.random.seed(0) frags = [] labels = [] pagelocs = [] # list of (pieceDir, startPage, endPage) tuples for composerIdx, composer in enumerate(composers): samples, locinfo = sampleFromComposer(d, chunkSize, samplesPerComposer, composer) frags.extend(samples) labels.extend([composerIdx] * samplesPerComposer) pagelocs.extend(locinfo) frags = np.array(frags) labels = np.array(labels) # shuffle indices shuffled_idxs = np.arange(len(labels)) np.random.shuffle(shuffled_idxs) frags = frags[shuffled_idxs] labels = labels[shuffled_idxs] pagelocs = [pagelocs[i] for i in shuffled_idxs] return frags, labels, pagelocs def sampleFromComposer(d, chunkSize, N, composer): ''' Samples N windows of length chunkSize from the bootleg score data from the specified composer. Returns the sampled data, along with piece & page location for error analysis. ''' # generate list of all valid sample locations allLocs = [] # list of (pieceNum, offset) tuples validPieceDirs = [] # pieceDirs that correspond to this composer bscores = [] # list of bootleg scores from each piece for pieceDir in d: if pieceDir.split('/')[-3] == composer: pieceNum = len(validPieceDirs) validPageFeats = [elem for elem in d[pieceDir] if elem is not None] if len(validPageFeats) == 0: continue merged = np.hstack(validPageFeats) for col_offset in range(merged.shape[1] - chunkSize + 1): allLocs.append((pieceNum, col_offset)) validPieceDirs.append(pieceDir) bscores.append(merged) # generate samples frags = [] pagelocs = [] sample_idxs = np.random.choice(len(allLocs), N) for sample_idx in sample_idxs: pieceNum, offset = allLocs[sample_idx] pieceDir = validPieceDirs[pieceNum] frag = bscores[pieceNum][:, offset:offset+chunkSize] locStart = determinePageLocation(d, pieceDir, offset) # (pageLocFloat, pageNum, pageOffset) locEnd = determinePageLocation(d, pieceDir, offset + chunkSize - 1) frags.append(frag) pagelocs.append((pieceDir, locStart, locEnd)) return frags, pagelocs def determinePageLocation(d, pieceDir, offset): ''' Determines the page location of the given bootleg score column offset. The page location is calculated as a floating point number. ''' accum = 0 pageloc = (-1, -1, -1) # (pageLocFloat, pageNum, pageOffset) for pageIdx, elem in enumerate(d[pieceDir]): if elem is not None: numEvents = elem.shape[1] if accum + numEvents > offset: frac = (offset - accum) / (numEvents - 1) * 1.0 pageFloat = pageIdx + frac pageloc = (pageFloat, pageIdx, offset - accum) break accum += numEvents return pageloc composer2idx, composers = getComposer2IndexMapping(score_feat_dir) chunkSize = 128 # pick a chunk size a bit larger than 64 to allow for additional data augmentation samplesPerComposer = 3600 X_train, y_train, pageinfo_train = getChunkedData_balanced(d_train, chunkSize, samplesPerComposer, composers) X_valid, y_valid, pageinfo_valid = getChunkedData_balanced(d_valid, chunkSize, samplesPerComposer//3, composers) X_test, y_test, pageinfo_test = getChunkedData_balanced(d_test, chunkSize, samplesPerComposer//3, composers) X_train.shape, X_valid.shape, X_test.shape save_chunks_file = f'{score_feat_dir}/data.chunks{chunkSize}.pkl' saveToPickle([X_train, y_train, pageinfo_train, X_valid, y_valid, pageinfo_valid, X_test, y_test, pageinfo_test], save_chunks_file) # ### Verify data # Compare the bootleg score data chunks against the original png images to verify that the data has been processed correctly. def visualizeBootlegScore(bs, lines = [13, 15, 17, 19, 21, 35, 37, 39, 41, 43]): plt.figure(figsize = (10,10)) plt.imshow(1 - bs, cmap = 'gray', origin = 'lower') for l in range(1, bs.shape[0], 2): plt.axhline(l, c = 'grey') for l in lines: plt.axhline(l, c = 'r') def getCorrespondingImage(tup, png_dir = 'data/png', showNext = False): pieceDir, startLoc, endLoc = tup composer = pieceDir.split('/')[-3] pieceID = pieceDir.split('/')[-2] startpage = startLoc[1] if showNext: startpage += 1 pngfile = '{}/{}/{}/{}-{}.png'.format(png_dir, composer, pieceID, pieceID, startpage) im = PIL.Image.open(pngfile) return im i = 27 visualizeBootlegScore(X_train[i]) pageinfo_train[i] im = getCorrespondingImage(pageinfo_train[i], showNext = True) # # Prepare data for fastai # In the sections below, we will prepare the data for use with the fastai library. This is adapted from the fast.ai [ULMFit tutorial](https://github.com/fastai/course-nlp/blob/master/nn-vietnamese.ipynb). # %reload_ext autoreload # %autoreload 2 from fastai import * from fastai.text import * import glob bs=48 # + #torch.cuda.set_device(0) # - data_path = Config.data_path() name = 'bscore_lm' path = data_path/name path.mkdir(exist_ok=True, parents=True) # ### Target Language Model Databunch # Convert the bootleg score features into string representations of decimal integers. Generate one document per pdf. path_docs = path/'docs_target' path_docs.mkdir(exist_ok=True) def convertBinaryToInt(X): mask = np.power(2, np.arange(X.shape[0])).reshape((1,-1)) ints = np.squeeze(mask @ X).astype(np.uint64) return list(ints) def isValid(matrixList): for elem in matrixList: if elem is None: continue else: return True return False def generateBootlegStringFiles(bscore_feats_file, outdir): ''' Converts the bootleg score features to string decimal representation, and writes them to text files in the specified directory. ''' with open(bscore_feats_file, 'rb') as f: bscore_feats = pickle.load(f)[0] for pieceDir in bscore_feats: # e.g. score_feat/Bach/00748/ pid = pieceDir.split('/')[-2] # 00748 outfile = outdir/ (pid + '.txt') if isValid(bscore_feats[pieceDir]): # has at least one valid page of features with open(outfile,'w') as fout: for binaryMatrix in bscore_feats[pieceDir]: if binaryMatrix is not None: ints = convertBinaryToInt(binaryMatrix) pageStr = ' '.join([str(i) for i in ints]) fout.write(pageStr) fout.write('\n\n') fout.write('</doc>') generateBootlegStringFiles(save_pages_file, path_docs) path_docs.ls()[:5] # + basicTokenizer = Tokenizer(pre_rules=[], post_rules=[]) lm_target_data = (TextList.from_folder(path_docs, processor=[OpenFileProcessor(), TokenizeProcessor(tokenizer=basicTokenizer), NumericalizeProcessor()]) .split_by_rand_pct(0.1, seed=42) .label_for_lm() .databunch(bs=bs, num_workers=1)) lm_target_data.save(path/'lm_target_databunch') # - # ### IMSLP Language Model Databunch # Same as above, but using the entire IMSLP piano bootleg score dataset. # + # #!git clone https://github.com/HMC-MIR/piano_bootleg_scores.git # - imslp_bscores_filelist = 'imslp_bscores.list' # !find piano_bootleg_scores/imslp_bootleg_dir-v1/ -name *.pkl > {imslp_bscores_filelist} path_docs = path/'docs_imslp' path_docs.mkdir(exist_ok=True) def imslp2text(filelist, outdir): with open(filelist, 'r') as f: for line in f: bscorefile = line.strip() # path/to/dest/283513.pkl fileid = os.path.splitext(os.path.basename(bscorefile))[0] # e.g. 283513 outfile = outdir/f'{fileid}.txt' bscore2textfile(bscorefile, outfile) def bscore2textfile(infile, outfile, min_thresh = 100): ''' Converts a bootleg score .pkl file to text and writes to the specified output file. ''' with open(infile, 'rb') as f: d = pickle.load(f) with open(outfile, 'w') as fout: for l in d: # each page if len(l) > min_thresh: # to avoid filler pages pageStr = ' '.join([str(i) for i in l]) fout.write(pageStr) fout.write('\n\n') fout.write('</doc>') imslp2text(imslp_bscores_filelist, path_docs) path_docs.ls()[:5] # + lm_imslp_data = (TextList.from_folder(path_docs, processor=[OpenFileProcessor(), TokenizeProcessor(tokenizer=basicTokenizer), NumericalizeProcessor()]) .split_by_rand_pct(0.1, seed=42) .label_for_lm() .databunch(bs=bs, num_workers=2)) lm_imslp_data.save(path/'lm_imslp_databunch') # - # ### Classification data # Here we prepare the train.csv, valid.csv, and test.csv files for the proxy classification task. def generateBootlegCSVFiles(bscore_feats_file, idx2composer, outfile_train, outfile_valid, outfile_test): ''' Generates train.csv and test.csv from bootleg score fragments. ''' with open(bscore_feats_file, 'rb') as f: X_train, y_train, _, X_valid, y_valid, _, X_test, y_test, _ = pickle.load(f) y_train = [idx2composer[idx] for idx in y_train] y_valid = [idx2composer[idx] for idx in y_valid] y_test = [idx2composer[idx] for idx in y_test] with open(outfile_train, 'w') as fout: fout.write('label,text\n') for i in range(X_train.shape[0]): ints = convertBinaryToInt(X_train[i,:,:]) pageStr = ' '.join([str(i) for i in ints]) fout.write(f'{y_train[i]},') fout.write(pageStr) fout.write('\n') with open(outfile_valid, 'w') as fout: fout.write('label,text\n') for i in range(X_valid.shape[0]): ints = convertBinaryToInt(X_valid[i,:,:]) pageStr = ' '.join([str(i) for i in ints]) fout.write(f'{y_valid[i]},') fout.write(pageStr) fout.write('\n') with open(outfile_test, 'w') as fout: fout.write('label,text\n') for i in range(X_test.shape[0]): ints = convertBinaryToInt(X_test[i,:,:]) pageStr = ' '.join([str(i) for i in ints]) fout.write(f'{y_test[i]},') fout.write(pageStr) fout.write('\n') return save_chunks_file = 'score_feat/data.chunks256.pkl' csv_train_file = path/'train256.csv' csv_valid_file = path/'valid256.csv' csv_test_file = path/'test256.csv' generateBootlegCSVFiles(save_chunks_file, composers, csv_train_file, csv_valid_file, csv_test_file) # We also generate csv files for evaluating on the original page classification task. def generateFullPageCSVFiles(bscore_pages_file, outfile_train, outfile_valid, outfile_test): ''' Generates csv files for the original task of classifying full pages of music. ''' with open(bscore_pages_file, 'rb') as f: d, pieceDirs_train, pieceDirs_valid, pieceDirs_test = pickle.load(f) generateFullPageCSV(d, pieceDirs_train, outfile_train) generateFullPageCSV(d, pieceDirs_valid, outfile_valid) generateFullPageCSV(d, pieceDirs_test, outfile_test) def generateFullPageCSV(d, keys, outfile): with open(outfile, 'w') as fout: fout.write('label,text\n') for pieceDir in keys: # e.g. score_feat/Bach/00748/ composer = pieceDir.split('/')[-3] for m in d[pieceDir]: # d[pieceDir] -> list of binary bootleg score matrices, one per page if m is not None: ints = convertBinaryToInt(m) textStr = ' '.join([str(i) for i in ints]) fout.write(f'{composer},{textStr}\n') csv_train_file = path/'train.fullpage.csv' csv_valid_file = path/'valid.fullpage.csv' csv_test_file = path/'test.fullpage.csv' generateFullPageCSVFiles(save_pages_file, csv_train_file, csv_valid_file, csv_test_file) # Finally, we also generate csv files to facilitate evaluating fixed-length classifiers on the full page classification task. These classifiers will be applied to multiple windows of features, and the predictions will be averaged. def generateEnsembleCSV(bscore_pages_file, chunkSz, outfile_test): ''' Generates a csv file to facilitate evaluating fixed-length classifiers on the full page classification task. Each line in the file corresponds to a fixed-length window of samples within a page. The predictions from all windows within a single page can then be averaged and evaluated. ''' with open(bscore_pages_file, 'rb') as f: d, pieceDirs_train, pieceDirs_valid, pieceDirs_test = pickle.load(f) with open(outfile_test, 'w') as fout: fout.write('id,label,text\n') for pieceDir in pieceDirs_test: # e.g. score_feat/Bach/00748/ pieceID = pieceDir.split('/')[-2] composer = pieceDir.split('/')[-3] for i, m in enumerate(d[pieceDir]): # d[pieceDir] -> list of binary bootleg score matrices, one per page if m is not None and m.shape[1] > 0: if m.shape[1] <= chunkSz: # only 1 window ints = convertBinaryToInt(m) textStr = ' '.join([str(i) for i in ints]) idString = f'{pieceID}_{i}_0' # id: pieceID_pageIdx_chunkIdx fout.write(f'{idString},{composer},{textStr}\n') else: # multiple windows numWindows = int(np.ceil(m.shape[1]/(chunkSz/2))) - 1 # hop by half the chunk size for j in range(numWindows - 1): startIdx = chunkSz // 2 * j endIdx = startIdx + chunkSz ints = convertBinaryToInt(m[:,startIdx:endIdx]) textStr = ' '.join([str(i) for i in ints]) idString = f'{pieceID}_{i}_{j}' # id: pieceID_pageIdx_chunkIdx fout.write(f'{idString},{composer},{textStr}\n') # handle last window ints = convertBinaryToInt(m[:,-chunkSz:]) textStr = ' '.join([str(i) for i in ints]) idString = f'{pieceID}_{i}_{numWindows-1}' fout.write(f'{idString},{composer},{textStr}\n') csv_test_file = path/'test.ensemble256.csv' generateEnsembleCSV(save_pages_file, 256, csv_test_file) # # Data Preparation for Transformer models # Here we prepare the data for training and testing the Transformer-based models. Instead of using decimal string representations, we represent each 62-bit bootleg score feature as a sequence of 8 one-byte characters. Rather than generating these from scratch, we will simply convert the existing files to the new format. # ### Prep data for language modeling def generateLMTrainFiles(indir, out_train, out_valid, val_frac=0.1): # split train/validation by file filelist = sorted(glob.glob('{}/*.txt'.format(indir))) np.random.seed(0) np.random.shuffle(filelist) endIdx = int(len(filelist) * (1-val_frac)) + 1 train_files = filelist[0:endIdx] valid_files = filelist[endIdx:] # convert to binary string representation convertToByteChars(train_files, out_train) convertToByteChars(valid_files, out_valid) def convertToByteChars(filelist, outfile): ''' Split each 62-bit bootleg score feature into 8 bytes, and express each byte as a single character. Consecutive bootleg score feature `words' will be separated by space. ''' with open(outfile, 'w') as fout: for infile in filelist: with open(infile, 'r') as fin: for line in fin: line = line.strip() if len(line) > 0: if line == '</doc>': pass # skip else: converted = convertLineToCharSeq(line) fout.write(f'{converted}\n') fout.write('\n') def convertLineToCharSeq(line): ints = [int(p) for p in line.split()] result = ' '.join([int2charseq(i) for i in ints]) return result def int2charseq(int64): chars = '' for i in range(8): numshift = i * 8 charidx = (int64 >> numshift) & 255 chars += chr(19968 + charidx) # 19968 ensures that all chars are chinese characters (not newline, space, etc) return ''.join(chars) bpe_path = path/'bpe_data' bpe_path.mkdir(exist_ok=True, parents=True) # Convert target data lm_train_file = bpe_path/'bpe_lm_target_train.txt' lm_valid_file = bpe_path/'bpe_lm_target_valid.txt' dir_to_convert = path/'docs_target' generateLMTrainFiles(dir_to_convert, lm_train_file, lm_valid_file) # Convert IMSLP data lm_train_file = bpe_path/'bpe_lm_imslp_train.txt' lm_valid_file = bpe_path/'bpe_lm_imslp_valid.txt' dir_to_convert = path/'docs_imslp' generateLMTrainFiles(dir_to_convert, lm_train_file, lm_valid_file) # ### Prep data for classification def convertSingleCSVFile(infile, outfile): ''' Convert .csv file with decimal string representation of bootleg score features to a .csv file with byte character representation. ''' with open(infile, 'r') as f: lines = f.readlines() with open(outfile, 'w') as fout: for i, line in enumerate(lines): if i==0: fout.write(line) # header else: parts = line.strip().split(',') feats = parts.pop() charseq = convertLineToCharSeq(feats) strToWrite = ','.join(parts) + ',' + charseq + '\n' fout.write(strToWrite) def convertAllCSVFiles(indir, outdir): assert indir != outdir os.makedirs(outdir, exist_ok = True) for infile in glob.glob(f'{indir}/*.csv'): print(f'Converting {os.path.basename(infile)}') basename = os.path.splitext(os.path.basename(infile))[0] outfile = f'{outdir}/{basename}.char.csv' convertSingleCSVFile(infile, outfile) convertAllCSVFiles(str(path), str(bpe_path))
02_prepData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Pima Indians Diabetes Database Analysis #The datasets consist of several medical predictor (independent) variables and one target (dependent) variable, Outcome. Independent variables include the number of pregnancies the patient has had, their BMI, insulin level, age, and so on #Source - https://www.kaggle.com/datasets/uciml/pima-indians-diabetes-database # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import pandas as pd import torch.nn.functional as F import torch import matplotlib.pyplot as plt import numpy as np import torch.nn as nn from torch.utils.data import TensorDataset, DataLoader from torchvision import datasets, transforms import seaborn as sns dataset = '/Users/1flo/Downloads/Machine-learning/2022Practice/Pima_Diabetes/diabetes.csv' df = pd.read_csv(dataset) dfh = df.head() device = 'cuda' if torch.cuda.is_available() else 'cpu' print(' Using {} device'.format(device)) # - dfh df.shape df.columns train_data = df.iloc[:, 0:8] test_data = df.iloc[:,[8]] # + # #Define and setup the dataset using pytorch inbuilt functions # train_data = torch.from_numpy(np.asarray(train_data)) # test_data = torch.from_numpy(np.asarray(test_data)) # train_ds = TensorDataset(train_data,test_data) #Define and setup the dataset using pytorch inbuilt functions train_data = torch.tensor(np.asarray(train_data),dtype=torch.float32) test_data = torch.tensor(np.asarray(test_data),dtype=torch.float32) combined_ds = TensorDataset(train_data,test_data) # - print(train_data.dtype) print(test_data.dtype) combined_ds[0:3] # + #Define and setup the DataLoader batch_size = 5 train_ds = DataLoader(train_data, batch_size, shuffle=True) test_ds = DataLoader(test_data, batch_size, shuffle=True) combined_dl = DataLoader(combined_ds, batch_size, shuffle=True) for xi,yi in combined_dl: print(xi) print(yi) break # + #Define the model for weights and bias using in-built nn.Linear which does ot automatically class DiabetesDetectionNet(nn.Module): def __init__(self, input_size, hidden1_size, hidden2_size, num_classes): super(DiabetesDetectionNet, self).__init__() self.fc1 = nn.Linear(input_size, hidden1_size) self.relu1 = nn.ReLU() self.fc2 = nn.Linear(hidden1_size, hidden2_size) self.relu2 = nn.ReLU() self.fc3 = nn.Linear(hidden2_size, num_classes) #self.relu3 = nn.ReLU() def forward(self, x): out = self.fc1(x) out = self.relu1(out) out = self.fc2(out) out = self.relu2(out) out = self.fc3(out) #out = self.relu3(out) return out # - model = DiabetesDetectionNet(8, 100, 50, 2) print(model) # + #Define Loss Function import torch.nn.functional as F loss_fn = nn.CrossEntropyLoss() #loss = loss_fn(model(train_data), test_data) #print(loss) # + #Define Optimizer/Algorithm - SGD (GD) & Train the model opt1 = torch.optim.SGD(model.parameters(), lr=0.001) opt2 = torch.optim.Adam(model.parameters(), lr=0.001) def Fit(num_epochs, model, loss_fn, opt, combined_dl): for epoch in range(num_epochs): for xb,yb in combined_dl: pred = model(xb) loss = loss_fn(pred, yb) loss.backward() opt.step() opt.zero_grad() if (epoch+1) % 10 ==0: print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) # - Fit(500, model, loss_fn, opt2, combined_dl) # + print("targets are",test_data) print("predictions are", model(train_data)) #Since test_data is binary and train is not - we likely need to add activation function that converts the output to binary or so # - model(train_data)
2022Practice/Pima_Diabetes/.ipynb_checkpoints/Pima_Diabetes-Take2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Facial Keypoint Detection # # This project will be all about defining and training a convolutional neural network to perform facial keypoint detection, and using computer vision techniques to transform images of faces. The first step in any challenge like this will be to load and visualize the data you'll be working with. # # Let's take a look at some examples of images and corresponding facial keypoints. # # <img src='images/key_pts_example.png' width=50% height=50%/> # # Facial keypoints (also called facial landmarks) are the small magenta dots shown on each of the faces in the image above. In each training and test image, there is a single face and **68 keypoints, with coordinates (x, y), for that face**. These keypoints mark important areas of the face: the eyes, corners of the mouth, the nose, etc. These keypoints are relevant for a variety of tasks, such as face filters, emotion recognition, pose recognition, and so on. Here they are, numbered, and you can see that specific ranges of points match different portions of the face. # # <img src='images/landmarks_numbered.jpg' width=30% height=30%/> # # --- # ## Load and Visualize Data # # The first step in working with any dataset is to become familiar with your data; you'll need to load in the images of faces and their keypoints and visualize them! This set of image data has been extracted from the [YouTube Faces Dataset](https://www.cs.tau.ac.il/~wolf/ytfaces/), which includes videos of people in YouTube videos. These videos have been fed through some processing steps and turned into sets of image frames containing one face and the associated keypoints. # # #### Training and Testing Data # # This facial keypoints dataset consists of 5770 color images. All of these images are separated into either a training or a test set of data. # # * 3462 of these images are training images, for you to use as you create a model to predict keypoints. # * 2308 are test images, which will be used to test the accuracy of your model. # # The information about the images and keypoints in this dataset are summarized in CSV files, which we can read in using `pandas`. Let's read the training CSV and get the annotations in an (N, 2) array where N is the number of keypoints and 2 is the dimension of the keypoint coordinates (x, y). # # --- # First, before we do anything, we have to load in our image data. This data is stored in a zip file and in the below cell, we access it by it's URL and unzip the data in a `/data/` directory that is separate from the workspace home directory. # -- DO NOT CHANGE THIS CELL -- # # !mkdir /data # !wget -P /data/ https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip # !unzip -n /data/train-test-data.zip -d /data # + # import the required libraries import glob import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 # - # Then, let's load in our training data and display some stats about that dat ato make sure it's been loaded in correctly! # + key_pts_frame = pd.read_csv('/data/training_frames_keypoints.csv') n = 0 image_name = key_pts_frame.iloc[n, 0] key_pts = key_pts_frame.iloc[n, 1:].as_matrix() key_pts = key_pts.astype('float').reshape(-1, 2) print('Image name: ', image_name) print('Landmarks shape: ', key_pts.shape) print('First 4 key pts: {}'.format(key_pts[:4])) # - # print out some stats about the data print('Number of images: ', key_pts_frame.shape[0]) # ## Look at some images # # Below, is a function `show_keypoints` that takes in an image and keypoints and displays them. As you look at this data, **note that these images are not all of the same size**, and neither are the faces! To eventually train a neural network on these images, we'll need to standardize their shape. def show_keypoints(image, key_pts): """Show image with keypoints""" plt.imshow(image) plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m') # + # Display a few different types of images by changing the index n # select an image by index in our data frame n = 0 image_name = key_pts_frame.iloc[n, 0] key_pts = key_pts_frame.iloc[n, 1:].as_matrix() key_pts = key_pts.astype('float').reshape(-1, 2) plt.figure(figsize=(5, 5)) show_keypoints(mpimg.imread(os.path.join('/data/training/', image_name)), key_pts) plt.show() # - # ## Dataset class and Transformations # # To prepare our data for training, we'll be using PyTorch's Dataset class. Much of this this code is a modified version of what can be found in the [PyTorch data loading tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). # # #### Dataset class # # ``torch.utils.data.Dataset`` is an abstract class representing a # dataset. This class will allow us to load batches of image/keypoint data, and uniformly apply transformations to our data, such as rescaling and normalizing images for training a neural network. # # # Your custom dataset should inherit ``Dataset`` and override the following # methods: # # - ``__len__`` so that ``len(dataset)`` returns the size of the dataset. # - ``__getitem__`` to support the indexing such that ``dataset[i]`` can # be used to get the i-th sample of image/keypoint data. # # Let's create a dataset class for our face keypoints dataset. We will # read the CSV file in ``__init__`` but leave the reading of images to # ``__getitem__``. This is memory efficient because all the images are not # stored in the memory at once but read as required. # # A sample of our dataset will be a dictionary # ``{'image': image, 'keypoints': key_pts}``. Our dataset will take an # optional argument ``transform`` so that any required processing can be # applied on the sample. We will see the usefulness of ``transform`` in the # next section. # # + from torch.utils.data import Dataset, DataLoader class FacialKeypointsDataset(Dataset): """Face Landmarks dataset.""" def __init__(self, csv_file, root_dir, transform=None): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.key_pts_frame = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.key_pts_frame) def __getitem__(self, idx): image_name = os.path.join(self.root_dir, self.key_pts_frame.iloc[idx, 0]) image = mpimg.imread(image_name) # if image has an alpha color channel, get rid of it if(image.shape[2] == 4): image = image[:,:,0:3] key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix() key_pts = key_pts.astype('float').reshape(-1, 2) sample = {'image': image, 'keypoints': key_pts} if self.transform: sample = self.transform(sample) return sample # - # Now that we've defined this class, let's instantiate the dataset and display some images. # + # Construct the dataset face_dataset = FacialKeypointsDataset(csv_file='/data/training_frames_keypoints.csv', root_dir='/data/training/') # print some stats about the dataset print('Length of dataset: ', len(face_dataset)) # + # Display a few of the images from the dataset num_to_display = 3 for i in range(num_to_display): # define the size of images fig = plt.figure(figsize=(20,10)) # randomly select a sample rand_i = np.random.randint(0, len(face_dataset)) sample = face_dataset[rand_i] # print the shape of the image and keypoints print(i, sample['image'].shape, sample['keypoints'].shape) ax = plt.subplot(1, num_to_display, i + 1) ax.set_title('Sample #{}'.format(i)) # Using the same display function, defined earlier show_keypoints(sample['image'], sample['keypoints']) # - # ## Transforms # # Now, the images above are not of the same size, and neural networks often expect images that are standardized; a fixed size, with a normalized range for color ranges and coordinates, and (for PyTorch) converted from numpy lists and arrays to Tensors. # # Therefore, we will need to write some pre-processing code. # Let's create four transforms: # # - ``Normalize``: to convert a color image to grayscale values with a range of [0,1] and normalize the keypoints to be in a range of about [-1, 1] # - ``Rescale``: to rescale an image to a desired size. # - ``RandomCrop``: to crop an image randomly. # - ``ToTensor``: to convert numpy images to torch images. # # # We will write them as callable classes instead of simple functions so # that parameters of the transform need not be passed everytime it's # called. For this, we just need to implement ``__call__`` method and # (if we require parameters to be passed in), the ``__init__`` method. # We can then use a transform like this: # # tx = Transform(params) # transformed_sample = tx(sample) # # Observe below how these transforms are generally applied to both the image and its keypoints. # # # + import torch from torchvision import transforms, utils # tranforms class Normalize(object): """Convert a color image to grayscale and normalize the color range to [0,1].""" def __call__(self, sample): image, key_pts = sample['image'], sample['keypoints'] image_copy = np.copy(image) key_pts_copy = np.copy(key_pts) # convert image to grayscale image_copy = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # scale color range from [0, 255] to [0, 1] image_copy= image_copy/255.0 # scale keypoints to be centered around 0 with a range of [-1, 1] # mean = 100, sqrt = 50, so, pts should be (pts - 100)/50 key_pts_copy = (key_pts_copy - 100)/50.0 return {'image': image_copy, 'keypoints': key_pts_copy} class Rescale(object): """Rescale the image in a sample to a given size. Args: output_size (tuple or int): Desired output size. If tuple, output is matched to output_size. If int, smaller of image edges is matched to output_size keeping aspect ratio the same. """ def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) self.output_size = output_size def __call__(self, sample): image, key_pts = sample['image'], sample['keypoints'] h, w = image.shape[:2] if isinstance(self.output_size, int): if h > w: new_h, new_w = self.output_size * h / w, self.output_size else: new_h, new_w = self.output_size, self.output_size * w / h else: new_h, new_w = self.output_size new_h, new_w = int(new_h), int(new_w) img = cv2.resize(image, (new_w, new_h)) # scale the pts, too key_pts = key_pts * [new_w / w, new_h / h] return {'image': img, 'keypoints': key_pts} class RandomCrop(object): """Crop randomly the image in a sample. Args: output_size (tuple or int): Desired output size. If int, square crop is made. """ def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) if isinstance(output_size, int): self.output_size = (output_size, output_size) else: assert len(output_size) == 2 self.output_size = output_size def __call__(self, sample): image, key_pts = sample['image'], sample['keypoints'] h, w = image.shape[:2] new_h, new_w = self.output_size top = np.random.randint(0, h - new_h) left = np.random.randint(0, w - new_w) image = image[top: top + new_h, left: left + new_w] key_pts = key_pts - [left, top] return {'image': image, 'keypoints': key_pts} class ToTensor(object): """Convert ndarrays in sample to Tensors.""" def __call__(self, sample): image, key_pts = sample['image'], sample['keypoints'] # if image has no grayscale color channel, add one if(len(image.shape) == 2): # add that third color dim image = image.reshape(image.shape[0], image.shape[1], 1) # swap color axis because # numpy image: H x W x C # torch image: C X H X W image = image.transpose((2, 0, 1)) return {'image': torch.from_numpy(image), 'keypoints': torch.from_numpy(key_pts)} # - # ## Test out the transforms # # Let's test these transforms out to make sure they behave as expected. As you look at each transform, note that, in this case, **order does matter**. For example, you cannot crop a image using a value smaller than the original image (and the orginal images vary in size!), but, if you first rescale the original image, you can then crop it to any size smaller than the rescaled size. # + # test out some of these transforms rescale = Rescale(100) crop = RandomCrop(50) composed = transforms.Compose([Rescale(250), RandomCrop(224)]) # apply the transforms to a sample image test_num = 500 sample = face_dataset[test_num] fig = plt.figure() for i, tx in enumerate([rescale, crop, composed]): transformed_sample = tx(sample) ax = plt.subplot(1, 3, i + 1) plt.tight_layout() ax.set_title(type(tx).__name__) show_keypoints(transformed_sample['image'], transformed_sample['keypoints']) plt.show() # - # ## Create the transformed dataset # # Apply the transforms in order to get grayscale images of the same shape. Verify that your transform works by printing out the shape of the resulting data (printing out a few examples should show you a consistent tensor size). # + # define the data tranform # order matters! i.e. rescaling should come before a smaller crop data_transform = transforms.Compose([Rescale(250), RandomCrop(224), Normalize(), ToTensor()]) # create the transformed dataset transformed_dataset = FacialKeypointsDataset(csv_file='/data/training_frames_keypoints.csv', root_dir='/data/training/', transform=data_transform) # + # print some stats about the transformed data print('Number of images: ', len(transformed_dataset)) # make sure the sample tensors are the expected size for i in range(5): sample = transformed_dataset[i] print(i, sample['image'].size(), sample['keypoints'].size()) # - # ## Data Iteration and Batching # # Right now, we are iterating over this data using a ``for`` loop, but we are missing out on a lot of PyTorch's dataset capabilities, specifically the abilities to: # # - Batch the data # - Shuffle the data # - Load the data in parallel using ``multiprocessing`` workers. # # ``torch.utils.data.DataLoader`` is an iterator which provides all these # features, and we'll see this in use in the *next* notebook, Notebook 2, when we load data in batches to train a neural network! # # --- # # # ## Ready to Train! # # Now that you've seen how to load and transform our data, you're ready to build a neural network to train on this data. # # In the next notebook, you'll be tasked with creating a CNN for facial keypoint detection.
1. Load and Visualize Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="iZeVHGPovyw4" colab_type="code" outputId="1938a104-c396-4a88-90c1-6eb852532d98" executionInfo={"status": "ok", "timestamp": 1581778437406, "user_tz": -60, "elapsed": 3755, "user": {"displayName": "<NAME>142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 202} # !pip install eli5 # + id="BExswzIxzPwi" colab_type="code" colab={} import pandas as pd import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score import eli5 from eli5.sklearn import PermutationImportance from ast import literal_eval from tqdm import tqdm_notebook # + id="ROoKhDEc0VE7" colab_type="code" outputId="ec0a6ea2-9b9e-446d-d309-23f6cc468822" executionInfo={"status": "ok", "timestamp": 1581778446011, "user_tz": -60, "elapsed": 653, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # cd "/content/drive/My Drive/Colab Notebooks/dw_matrix" # + id="iczq2lc10cEJ" colab_type="code" colab={} df = pd.read_csv('data/men_shoes.csv', low_memory=False) # + id="9vtZIXYJ0dFi" colab_type="code" outputId="f880a858-ff59-4992-9b05-999e9c3738ca" executionInfo={"status": "ok", "timestamp": 1581778451533, "user_tz": -60, "elapsed": 681, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 218} df.columns # + id="f87vTzIR1xWp" colab_type="code" colab={} def run_model(feats, model = DecisionTreeRegressor(max_depth=5)): X = df[feats].values y = df['prices.amountmin'].values scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores) # + id="sbf7g2qs2Z_R" colab_type="code" outputId="e0041bdf-038e-463c-eff3-466126f30275" executionInfo={"status": "ok", "timestamp": 1581778456698, "user_tz": -60, "elapsed": 548, "user": {"displayName": "<NAME>0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 34} df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0] run_model(['brand_cat']) # + id="iJ2Cc9V62pI4" colab_type="code" outputId="9cac3a41-d55e-4f02-9bea-bcc8ccc9c2aa" executionInfo={"status": "ok", "timestamp": 1581778462511, "user_tz": -60, "elapsed": 3620, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 34} model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) run_model(['brand_cat'], model) # + id="5fcKzwqd3VBJ" colab_type="code" outputId="db8f4dd9-34a8-4fd1-ed90-9861a140c0fd" executionInfo={"status": "ok", "timestamp": 1581778464424, "user_tz": -60, "elapsed": 627, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 138} df.features.head().values # + id="Qlc-_xE23aYs" colab_type="code" outputId="da1e041f-34f5-4c4b-de61-a5344cae237d" executionInfo={"status": "ok", "timestamp": 1581778467262, "user_tz": -60, "elapsed": 559, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 34} str_dict = '[{"key":"Gender","value":["Men"]},{"key":"Shoe Size","value":["M"]},{"key":"Shoe Category","value":["Men\'s Shoes"]},{"key":"Color","value":["Multicolor"]},{"key":"Manufacturer Part Number","value":["8190-W-NAVY-7.5"]},{"key":"Brand","value":["Josmo"]}]' literal_eval(str_dict)[0]['value'][0] # + id="FTt7zg2v__gS" colab_type="code" colab={} def parse_features(x): output_dict = {} if str(x) == 'nan': return output_dict features = literal_eval(x.replace('\\"', '"')) for item in features: key = item['key'].lower().strip() value = item['value'][0].lower().strip() output_dict[key] = value return output_dict df['features_parsed'] = df['features'].map(parse_features) # + id="p3cvflXsBsbj" colab_type="code" outputId="d4161b34-20f8-4491-f550-b9cf76bd586d" executionInfo={"status": "ok", "timestamp": 1581778474619, "user_tz": -60, "elapsed": 1012, "user": {"displayName": "<NAME>0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 34} keys = set() df['features_parsed'].map( lambda x: keys.update(x.keys()) ) len(keys) # + id="eUxqHH84COmW" colab_type="code" outputId="aefc61e1-cddd-4787-93fe-857b086e56a0" executionInfo={"status": "ok", "timestamp": 1581778481021, "user_tz": -60, "elapsed": 4701, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["1ac5110b2abe4faeada8093a33d727c5", "dbfb0525dd5b443face69a272febb79e", "<KEY>", "693b33ef8ea048e5b508d3cd3903b5ba", "9d23f63b7e134fd1b5bf933fc78669e8", "<KEY>", "<KEY>", "f80f85bd03a34165902ac0664977c3ef"]} def get_name_feat(key): return 'feat_' + key for key in tqdm_notebook(keys): df[get_name_feat(key)] = df.features_parsed.map( lambda feats: feats[key] if key in feats else np.nan) # + id="1JTf7JvsHso1" colab_type="code" outputId="8c367d98-4316-4936-fae3-a13b27bbb3b1" executionInfo={"status": "ok", "timestamp": 1581778482980, "user_tz": -60, "elapsed": 633, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 168} df.columns # + id="DCBiUauAIUWB" colab_type="code" colab={} keys_stat = {} for key in keys: keys_stat[key] = df[ False == df[get_name_feat(key)].isnull() ].shape[0] / df.shape[0] *100 # + id="L0LgM2UbIhpd" colab_type="code" outputId="458ec99f-e15b-4cb6-c6af-6155d4b40cff" executionInfo={"status": "ok", "timestamp": 1581778489705, "user_tz": -60, "elapsed": 576, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 101} {k:v for k,v in keys_stat.items() if v > 30} # + id="6cbmceY8Ij3c" colab_type="code" colab={} df['feat_brand_cat'] = df['feat_brand'].factorize()[0] df['feat_color_cat'] = df['feat_color'].factorize()[0] df['feat_gender_cat'] = df['feat_gender'].factorize()[0] df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0] df['feat_material_cat'] = df['feat_material'].factorize()[0] df['feat_sport_cat'] = df['feat_sport'].factorize()[0] df['feat_style_cat'] = df['feat_style'].factorize()[0] for key in keys: df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0] # + id="AbNJ3jchPDa6" colab_type="code" outputId="31cc84f1-d14c-4849-deb7-81507377ae23" executionInfo={"status": "ok", "timestamp": 1581778496681, "user_tz": -60, "elapsed": 750, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 34} df['brand'] = df['brand'].map(lambda x: str(x).lower()) df[df.brand == df.feat_brand].shape # + id="C8KtXWgrEoSo" colab_type="code" outputId="9b7a1398-0d6e-4888-e1ff-841cfc35fe1b" executionInfo={"status": "ok", "timestamp": 1581778498692, "user_tz": -60, "elapsed": 635, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} feats_cat = [x for x in df.columns if 'cat' in x] feats_cat # + id="skdy0xYh0h5C" colab_type="code" outputId="a45e0c24-584a-409f-c0c2-e5e970fe1f02" executionInfo={"status": "ok", "timestamp": 1581778507656, "user_tz": -60, "elapsed": 4980, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 34} feats = ['brand_cat', 'feat_brand_cat', 'feat_gender_cat', 'feat_material_cat', 'feat_style_cat', 'feat_sport_cat'] #feats += feats_cat #feats = list(set(feats)) model = RandomForestRegressor(max_depth=5, n_estimators=100) run_model(feats, model) # + id="eMcKUrqw5DkA" colab_type="code" outputId="55ca212a-80d4-47ee-c6ef-f1b55e907808" executionInfo={"status": "ok", "timestamp": 1581778512919, "user_tz": -60, "elapsed": 3652, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 134} X = df[feats].values y = df['prices.amountmin'].values m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) m.fit(X,y) perm = PermutationImportance(m, random_state=1).fit(X,y); eli5.show_weights(perm, feature_names=feats) # + id="6-9820nF6Zbs" colab_type="code" outputId="e36c5247-fbe0-42e7-92b3-c0a002045ace" executionInfo={"status": "ok", "timestamp": 1581778517127, "user_tz": -60, "elapsed": 559, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} colab={"base_uri": "https://localhost:8080/", "height": 101} df[df['brand'] == 'nike'].features_parsed.head().values # + id="Us0nf91HWNYv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="009a4fcb-5924-4523-db1d-ce1627a020a3" executionInfo={"status": "ok", "timestamp": 1581778537450, "user_tz": -60, "elapsed": 1655, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} # ls # + id="d5GJ2lvJWQUR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3fa4fa5d-82fd-41c4-af3f-50d17a1b3e3c" executionInfo={"status": "ok", "timestamp": 1581778553113, "user_tz": -60, "elapsed": 1641, "user": {"displayName": "<NAME>\u0142ek", "photoUrl": "", "userId": "03002042608607390078"}} # ls matrix_one/ # + id="UA5aAdV1WUJM" colab_type="code" colab={} # !git add
matrix_one/day5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" # # [deplacy](https://koichiyasuoka.github.io/deplacy/) за синтактичен анализ # + [markdown] colab_type="text" # ## с [Trankit](https://github.com/nlp-uoregon/trankit) # # + colab_type="code" # !pip install deplacy trankit transformers import trankit nlp=trankit.Pipeline("bulgarian") doc=nlp("Гладна мечка хоро не играе.") import deplacy deplacy.render(doc) deplacy.serve(doc,port=None) # import graphviz # graphviz.Source(deplacy.dot(doc)) # + [markdown] colab_type="text" # ## с [Camphr-Udify](https://camphr.readthedocs.io/en/latest/notes/udify.html) # # + colab_type="code" # !pip install deplacy camphr 'unofficial-udify>=0.3.0' en-udify@https://github.com/PKSHATechnology-Research/camphr_models/releases/download/0.7.0/en_udify-0.7.tar.gz import pkg_resources,imp imp.reload(pkg_resources) import spacy nlp=spacy.load("en_udify") doc=nlp("Гладна мечка хоро не играе.") import deplacy deplacy.render(doc) deplacy.serve(doc,port=None) # import graphviz # graphviz.Source(deplacy.dot(doc)) # + [markdown] colab_type="text" # ## с [UDPipe 2](http://ufal.mff.cuni.cz/udpipe/2) # # + colab_type="code" # !pip install deplacy def nlp(t): import urllib.request,urllib.parse,json with urllib.request.urlopen("https://lindat.mff.cuni.cz/services/udpipe/api/process?model=bg&tokenizer&tagger&parser&data="+urllib.parse.quote(t)) as r: return json.loads(r.read())["result"] doc=nlp("Гладна мечка хоро не играе.") import deplacy deplacy.render(doc) deplacy.serve(doc,port=None) # import graphviz # graphviz.Source(deplacy.dot(doc)) # + [markdown] colab_type="text" # ## с [CLASSLA](https://github.com/clarinsi/classla-stanfordnlp) # # + colab_type="code" # !pip install deplacy classla import classla classla.download("bg") nlp=classla.Pipeline("bg") doc=nlp("Гладна мечка хоро не играе.") import deplacy deplacy.render(doc) deplacy.serve(doc,port=None) # import graphviz # graphviz.Source(deplacy.dot(doc)) # + [markdown] colab_type="text" # ## с [NLP-Cube](https://github.com/Adobe/NLP-Cube) # # + colab_type="code" # !pip install deplacy nlpcube from cube.api import Cube nlp=Cube() nlp.load("bg") doc=nlp("Гладна мечка хоро не играе.") import deplacy deplacy.render(doc) deplacy.serve(doc,port=None) # import graphviz # graphviz.Source(deplacy.dot(doc)) # + [markdown] colab_type="text" # ## с [Stanza](https://stanfordnlp.github.io/stanza) # # + colab_type="code" # !pip install deplacy stanza import stanza stanza.download("bg") nlp=stanza.Pipeline("bg") doc=nlp("Гладна мечка хоро не играе.") import deplacy deplacy.render(doc) deplacy.serve(doc,port=None) # import graphviz # graphviz.Source(deplacy.dot(doc)) # + [markdown] colab_type="text" # ## с [spaCy-COMBO](https://github.com/KoichiYasuoka/spaCy-COMBO) # # + colab_type="code" # !pip install deplacy spacy_combo import spacy_combo nlp=spacy_combo.load("bg_btb") doc=nlp("Гладна мечка хоро не играе.") import deplacy deplacy.render(doc) deplacy.serve(doc,port=None) # import graphviz # graphviz.Source(deplacy.dot(doc)) # + [markdown] colab_type="text" # ## с [spaCy-jPTDP](https://github.com/KoichiYasuoka/spaCy-jPTDP) # # + colab_type="code" # !pip install deplacy spacy_jptdp import spacy_jptdp nlp=spacy_jptdp.load("bg_btb") doc=nlp("Гладна мечка хоро не играе.") import deplacy deplacy.render(doc) deplacy.serve(doc,port=None) # import graphviz # graphviz.Source(deplacy.dot(doc)) # + [markdown] colab_type="text" # ## с [Turku-neural-parser-pipeline](https://turkunlp.org/Turku-neural-parser-pipeline/) # # + colab_type="code" # !pip install deplacy ufal.udpipe configargparse 'tensorflow<2' torch==0.4.1 torchtext==0.3.1 torchvision==0.2.1 # !test -d Turku-neural-parser-pipeline || git clone --depth=1 https://github.com/TurkuNLP/Turku-neural-parser-pipeline # !cd Turku-neural-parser-pipeline && git submodule update --init --recursive && test -d models_bg_btb || python fetch_models.py bg_btb import sys,subprocess nlp=lambda t:subprocess.run([sys.executable,"full_pipeline_stream.py","--gpu","-1","--conf","models_bg_btb/pipelines.yaml"],cwd="Turku-neural-parser-pipeline",input=t,encoding="utf-8",stdout=subprocess.PIPE).stdout doc=nlp("Гладна мечка хоро не играе.") import deplacy deplacy.render(doc) deplacy.serve(doc,port=None) # import graphviz # graphviz.Source(deplacy.dot(doc)) # + [markdown] colab_type="text" # ## с [spacy-udpipe](https://github.com/TakeLab/spacy-udpipe) # # + colab_type="code" # !pip install deplacy spacy-udpipe import spacy_udpipe spacy_udpipe.download("bg") nlp=spacy_udpipe.load("bg") doc=nlp("Гладна мечка хоро не играе.") import deplacy deplacy.render(doc) deplacy.serve(doc,port=None) # import graphviz # graphviz.Source(deplacy.dot(doc))
doc/bg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="aOtjaFH_s_k7" # # Data Structure # # + [markdown] id="noqHl-LL2f-k" # ## Array # **[Python Time Complexity Table](https://wiki.python.org/moin/TimeComplexity)**: Internally, a Python list is represented as an array; <br> # - The largest costs come from: # 1. __growing beyond the current allocation size__ (because everything must move), or # 2. from __inserting or deleting somewhere near the beginning__ (because everything after that must move). # - If you need to add/remove at both ends, consider using a __`collections.deque`__ instead. # <br> # # **Array Property:** # $$ # \begin{array}{c|l|c} # \hline \text { Operations } & \text { Working Destails } & \text { Complexity } \\ # \hline \text { indexing/lookup } & \text { You can look up any element in your array instantly } & \text { O(1) } \\ # \hline \text { Append } & \begin{array}{l} # \text { Adding a new element at the end of the array (might fail } \\ # \text { if not having enough space) } # \end{array} & \text { O(1) } \\ # \hline \text { insert } & \text { You have to shift all the rest of the elements down. } & \mathrm{O}(\mathrm{n}) \\ # \hline \text { deletion } & \begin{array}{l} # \text { Everything needs to be moved up when you delete an } \\ # \text { element. } # \end{array} & \text { O(n) } \\ # \hline # \end{array}$$ # + [markdown] id="-UJfqtUT3EsC" # ## List # + [markdown] id="KnJwqfHU3FIW" # ### Linked List # Linked list is an unordered List: # - need to maintain the relative positioning of the items # - don't need to maintain positioning in contiguous memory # >![link list](http://drive.google.com/uc?export=view&id=1UyYTk8mGcAA21YRfZ_x16xIXZyoP2qc6) # + colab={"base_uri": "https://localhost:8080/"} id="n1ok-tZm3MpW" executionInfo={"status": "ok", "timestamp": 1606066727556, "user_tz": 0, "elapsed": 474, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjy_Dk7Ahwv2lZiGE0sUFyBO_xe7OiTtIJ9Qlezaw=s64", "userId": "02447232751254445182"}} outputId="6a84eef3-c3e8-481f-dda8-ea9a4b147742" class Node: """Node is the basic building block for the linked list implementation""" def __init__(self, init_data): self.data = init_data self.next = None def get_data(self): return self.data def get_next(self): return self.next def set_data(self, new_data): self.data = new_data def set_next(self, new_next): self.next = new_next class UnorderedList: """ The unordered list will be built from a collection of nodes, each linked to the next by explicit references. """ def __init__(self): self.head = None # head will be assigned with a node def is_empty(self): return self.head == None # True if no node in Linked List def add(self, item): temp = Node(item) temp.set_next(self.head) self.head = temp # all methods below need the traversal technique (traverse the linked list) def size(self): # O(n) current = self.head count = 0 while current != None: count = count + 1 current = current.get_next() return def search(self, item): # O(n) """If item is in the Linked List or not""" current = self.head found = False while current != None and not found: if current.get_data() == item: found = True else: current = current.get_next() return found def remove(self, item): # O(n) current = self.head previous = None found = False while not found: if current.get_data() == item: found = True else: previous = current current = current.get_next() if previous == None: self.head = current.get_next() else: previous.set_next(current.get_next()) # + id="s9vasV5YCXAO" # + id="fIqbi7xQCMqO" # + [markdown] id="xHpGgiV-tCBb" # ## Stack # # \begin{array}{c|l|c} # \hline \text { push } & \text { add } 1 \text { item to the top of stack } & \text { O(1) } \\ # \hline \text { pop } & \text { pop the 1st item from the top of stack } & \text { O(1) } \\ # \hline \text { peek } & \text{retrive the 1st item from the top of stck} & \text { O(1) } \\ # \hline # \end{array} # + id="y7QSYiR9tFKr" class Stack: def __init__(self): self.items = [] def is_empty(self): return self.items == [] def push(self, item): self.items.append(item) # O(1) def pop(self): return self.items.pop() # O(1) def peek(self): """peek the top most element""" return self.items[len(self.items)-1] # O(1) def size(self): return len(self.items) # + [markdown] id="d23tbRrxsxaa" # ## Queue # - **First-In First-Out (FIFO)** # 1. 用 **Array** (数组)实现: 要处理扩容缩容的问题 # 2. 用 **Link List** (链表, default)实现: 没有上面的扩容问题,但需要更多的Memory存储节点指针(pointer)。 # # **Python Queue** Implementation: # [`class queue.Queue(maxsize=0)`](https://docs.python.org/3/library/queue.html#queue.Queue) # # **Queue Property (Link List version):** # \begin{array}{c|l|c} # \hline \text { enqueue } & \text { insert } 1 \text { item to the end of queue } & \text { O(1) } \\ # \hline \text { dequeue } & \text { pop 1st item from the start of queue } & \text { O(1) } \\ # \hline \text { peek } & & \text { O(1) } \\ # \hline # \end{array} # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="9x3lILksuItG" executionInfo={"status": "ok", "timestamp": 1606040391401, "user_tz": 0, "elapsed": 1154, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjy_Dk7Ahwv2lZiGE0sUFyBO_xe7OiTtIJ9Qlezaw=s64", "userId": "02447232751254445182"}} outputId="b588e5fc-e93f-41b9-ac2a-34e0875cbc51" """Use Array to Creat Queue (It by default use Link List instead)""" class Queue: def __init__(self): self.items = [] def is_empty(self): return self.items == [] def enqueue(self, item): self.items.insert(0, item) # enqueue by insertions will be O(n) def dequeue(self): return self.items.pop() # O(1) def size(self): return len(self.items) q = Queue() q.enqueue("hello") q.enqueue("world") q.enqueue("!") q.dequeue() # + [markdown] id="GSaQ5JjBF6iw" # **Hot Potato Example** # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="5Tf7W533F-a9" executionInfo={"status": "ok", "timestamp": 1606001058999, "user_tz": 0, "elapsed": 783, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjy_Dk7Ahwv2lZiGE0sUFyBO_xe7OiTtIJ9Qlezaw=s64", "userId": "02447232751254445182"}} outputId="2067211e-5db0-4ab3-ebe3-50cdc24f57d8" # 大逃杀: 轮流报数,每次第num的人会死,争取做名单上最后一个人 def hot_potato(name_list, num): q = Queue() for name in name_list: q.enqueue(name) while q.size() > 1: for i in range(num-1): q.enqueue(q.dequeue()) q.dequeue() return q.dequeue() name_list = ["Bill", "David", "Susan", "Jane", "Kent", "Brad"] hot_potato(name_list, 7) # + [markdown] id="DCI1K7FZKsvi" # **Printing Tasks** # + id="ae6tDeT5RRs5" import random class Printer: def __init__(self, ppm): self.page_rate = ppm self.current_task = None self.time_remaining = 0 def tick(self): """action in a second""" if self.current_task != None: self.time_remaining = self.time_remaining - 1 if self.time_remaining <= 0: self.current_task = None def is_busy(self): if self.current_task != None: return True else: return False def start_next(self, new_task): self.current_task = new_task self.time_remaining = new_task.get_pages() * 60 / self.page_rate class Task: def __init__(self, time): self.timestamp = time # the time that the task was created and placed in the printer queue self.pages = random.randrange(1, 21) def get_stamp(self): return self.timestamp def get_pages(self): return self.pages def wait_time(self, current_time): return current_time - self.timestamp # + colab={"base_uri": "https://localhost:8080/"} id="nLTo-MKydtKK" executionInfo={"status": "ok", "timestamp": 1606041181623, "user_tz": 0, "elapsed": 795, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjy_Dk7Ahwv2lZiGE0sUFyBO_xe7OiTtIJ9Qlezaw=s64", "userId": "02447232751254445182"}} outputId="b31dae34-72df-42dd-d59c-3e3ff7ec7cea" def simulation(num_seconds, pages_per_minute): lab_printer = Printer(pages_per_minute) print_queue = Queue() waiting_times = [] for current_second in range(num_seconds): if new_print_task(): task = Task(current_second) print_queue.enqueue(task) if (not lab_printer.is_busy()) and (not print_queue.is_empty()): next_task = print_queue.dequeue() waiting_times.append(next_task.wait_time(current_second)) lab_printer.start_next(next_task) lab_printer.tick() average_wait = sum(waiting_times)/len(waiting_times) print("Average Wait %6.2f sec %3d tasks remaining."%(average_wait, print_queue.size())) def new_print_task(): num = random.randrange(1, 181) if num == 180: return True else: return False for i in range(10): simulation(3600, 5) # + [markdown] id="FEjKTJBds_F7" # ## Deque # ie. double-ended queue (pronunce "deck") # # It has two ends, a front and a rear, and the items remain positioned in the collection # - New items can be added at either the front or the rear. # - Likewise, existing items can be removed from either end. # # This hybrid linear structure provides all the capabilities of **stacks** and **queues** in a single data structure. # - it does not require the LIFO and FIFO orderings that are enforced by those data structures. It is up to you to make consistent use of the addition and removal operations. # + id="_w4PpNEItytF" class Deque: """Implement Deque with Python List (ie. Array)""" def __init__(self): self.items = [] def is_empty(self): return self.items == [] def add_front(self, item): self.items.append(item) # O(1) def add_rear(self, item): self.items.insert(0, item) # O(n) def remove_front(self): return self.items.pop() # O(1) def remove_rear(self): return self.items.pop(0) # O(n) def size(self): return len(self.items) # + [markdown] id="vEVibLRD1JVr" # **Palindrome Checker** # + colab={"base_uri": "https://localhost:8080/"} id="MU5T5YMb2Kyq" executionInfo={"status": "ok", "timestamp": 1606063711040, "user_tz": 0, "elapsed": 478, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjy_Dk7Ahwv2lZiGE0sUFyBO_xe7OiTtIJ9Qlezaw=s64", "userId": "02447232751254445182"}} outputId="eb51f648-9f4c-41de-f80e-7c7376c567a3" def pal_checker(a_string): char_deque = Deque() for ch in a_string: char_deque.add_rear(ch) still_equal = True while char_deque.size() > 1 and still_equal: first = char_deque.remove_front() last = char_deque.remove_rear() if first != last: still_equal = False return still_equal print(pal_checker("lsdkjfskf")) print(pal_checker("radar")) # + id="j_Hk6f5s2wHV"
Python/data_struct_dated.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="73ztBH5yK_bS" # # Multiple changepoint detection and Bayesian model selection # # + colab={} colab_type="code" id="n5om5yiB_zvF" #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="Qianaf6u_7G_" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Multiple_changepoint_detection_and_Bayesian_model_selection.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Multiple_changepoint_detection_and_Bayesian_model_selection.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="-o9zA5TO_-hx" # ## Imports # + colab={} colab_type="code" id="No2QPkJ1_9z9" import numpy as np import tensorflow.compat.v2 as tf tf.enable_v2_behavior() import tensorflow_probability as tfp from tensorflow_probability import distributions as tfd from matplotlib import pylab as plt # %matplotlib inline import scipy.stats # + [markdown] colab_type="text" id="UoIGcwDcLK8s" # ## Task: changepoint detection with multiple changepoints # + [markdown] colab_type="text" id="MkPCuGGp464l" # Consider a changepoint detection task: events happen at a rate that changes over time, driven by sudden shifts in the (unobserved) state of some system or process generating the data. # # For example, we might observe a series of counts like the following: # + colab={"height": 285} colab_type="code" id="kmk8w7-vuKSm" outputId="26a4f9b6-1ceb-4c01-ecdc-705a3ce5ff41" true_rates = [40, 3, 20, 50] true_durations = [10, 20, 5, 35] observed_counts = np.concatenate([ scipy.stats.poisson(rate).rvs(num_steps) for (rate, num_steps) in zip(true_rates, true_durations) ]).astype(np.float32) plt.plot(observed_counts) # + [markdown] colab_type="text" id="TWx9cuas0EcE" # These could represent the number of failures in a datacenter, number of visitors to a webpage, number of packets on a network link, etc. # # Note it's not entirely apparent how many distinct system regimes there are just from looking at the data. Can you tell where each of the three switchpoints occurs? # + [markdown] colab_type="text" id="09nB0iTzky85" # ## Known number of states # # We'll first consider the (perhaps unrealistic) case where the number of unobserved states is known a priori. Here, we'd assume we know there are four latent states. # # We model this problem as a switching (inhomogeneous) Poisson process: at each point in time, the number of events that occur is Poisson distributed, and the *rate* of events is determined by the unobserved system state $z_t$: # # $$x_t \sim \text{Poisson}(\lambda_{z_t})$$ # # The latent states are discrete: $z_t \in \{1, 2, 3, 4\}$, so $\lambda = [\lambda_1, \lambda_2, \lambda_3, \lambda_4]$ is a simple vector containing a Poisson rate for each state. To model the evolution of states over time, we'll define a simple transition model $p(z_t | z_{t-1})$: let's say that at each step we stay in the previous state with some probability $p$, and with probability $1-p$ we transition to a different state uniformly at random. The initial state is also chosen uniformly at random, so we have: # # $$ # \begin{align*} # z_1 &\sim \text{Categorical}\left(\left\{\frac{1}{4}, \frac{1}{4}, \frac{1}{4}, \frac{1}{4}\right\}\right)\\ # z_t | z_{t-1} &\sim \text{Categorical}\left(\left\{\begin{array}{cc}p & \text{if } z_t = z_{t-1} \\ \frac{1-p}{4-1} & \text{otherwise}\end{array}\right\}\right) # \end{align*}$$ # # These assumptions correspond to a [hidden Markov model](http://mlg.eng.cam.ac.uk/zoubin/papers/ijprai.pdf) with Poisson emissions. We can encode them in TFP using `tfd.HiddenMarkovModel`. First, we define the transition matrix and the uniform prior on the initial state: # + colab={"height": 135} colab_type="code" id="0qs_l4p4nygq" outputId="381dd947-787e-4b1e-afce-d473a96661f7" num_states = 4 initial_state_logits = np.zeros([num_states], dtype=np.float32) # uniform distribution daily_change_prob = 0.05 transition_probs = daily_change_prob / (num_states-1) * np.ones( [num_states, num_states], dtype=np.float32) np.fill_diagonal(transition_probs, 1-daily_change_prob) print("Initial state logits:\n{}".format(initial_state_logits)) print("Transition matrix:\n{}".format(transition_probs)) # + [markdown] colab_type="text" id="vWshnDRepxaT" # Next, we build a `tfd.HiddenMarkovModel` distribution, using a trainable variable to represent the rates associated with each system state. We parameterize the rates in log-space to ensure they are positive-valued. # + colab={} colab_type="code" id="bvEpqBxvoleY" # Define variable to represent the unknown log rates. trainable_log_rates = tf.Variable( np.log(np.mean(observed_counts)) + tf.random.normal([num_states]), name='log_rates') hmm = tfd.HiddenMarkovModel( initial_distribution=tfd.Categorical( logits=initial_state_logits), transition_distribution=tfd.Categorical(probs=transition_probs), observation_distribution=tfd.Poisson(log_rate=trainable_log_rates), num_steps=len(observed_counts)) # + [markdown] colab_type="text" id="4JA6D9EsqNTe" # Finally, we define the model's total log density, including a weakly-informative LogNormal prior on the rates, and run an optimizer to compute the [maximum a posteriori](https://en.wikipedia.org/wiki/Maximum_a_posteriori_estimation) (MAP) fit to the observed count data. # + colab={} colab_type="code" id="6mirKxnNqJSu" rate_prior = tfd.LogNormal(5, 5) def log_prob(): return (tf.reduce_sum(rate_prior.log_prob(tf.math.exp(trainable_log_rates))) + hmm.log_prob(observed_counts)) optimizer = tf.keras.optimizers.Adam(learning_rate=0.1) @tf.function(autograph=False) def train_op(): with tf.GradientTape() as tape: neg_log_prob = -log_prob() grads = tape.gradient(neg_log_prob, [trainable_log_rates])[0] optimizer.apply_gradients([(grads, trainable_log_rates)]) return neg_log_prob, tf.math.exp(trainable_log_rates) # + colab={"height": 236} colab_type="code" id="gSjyTtkDrOHu" outputId="97e77b1a-d8f3-4f69-8086-f7a5dc4f1308" for step in range(201): loss, rates = [t.numpy() for t in train_op()] if step % 20 == 0: print("step {}: log prob {} rates {}".format(step, -loss, rates)) print("Inferred rates: {}".format(rates)) print("True rates: {}".format(true_rates)) # + [markdown] colab_type="text" id="9kGRv8gwrtP5" # It worked! Note that the latent states in this model are identifiable only up to permutation, so the rates we recovered are in a different order, and there's a bit of noise, but generally they match pretty well. # + [markdown] colab_type="text" id="43AfcMTjvs7a" # ### Recovering the state trajectory # # Now that we've fit the model, we might want to reconstruct *which* state the model believes the system was in at each timestep. # # This is a *posterior inference* task: given the observed counts $x_{1:T}$ and model parameters (rates) $\lambda$, we want to infer the sequence of discrete latent variables, following the posterior distribution $p(z_{1:T} | x_{1:T}, \lambda)$. In a hidden Markov model, we can efficiently compute marginals and other properties of this distribution using standard message-passing algorithms. In particular, the `posterior_marginals` method will efficiently compute (using the [forward-backward algorithm](https://en.wikipedia.org/wiki/Forward%E2%80%93backward_algorithm)) the marginal probability distribution $p(Z_t = z_t | x_{1:T})$ over the discrete latent state $Z_t$ at each timestep $t$. # + colab={} colab_type="code" id="IpTbdyah-IyX" # Runs forward-backward algorithm to compute marginal posteriors. posterior_dists = hmm.posterior_marginals(observed_counts) posterior_probs = posterior_dists.probs_parameter().numpy() # + [markdown] colab_type="text" id="cOYMlvssFDwx" # Plotting the posterior probabilities, we recover the model's "explanation" of the data: at which points in time is each state active? # + colab={"height": 731} colab_type="code" id="oZ7C937t-Xh3" outputId="6dba937e-9a29-4a6b-e8dd-dbea76dd1d23" def plot_state_posterior(ax, state_posterior_probs, title): ln1 = ax.plot(state_posterior_probs, c='blue', lw=3, label='p(state | counts)') ax.set_ylim(0., 1.1) ax.set_ylabel('posterior probability') ax2 = ax.twinx() ln2 = ax2.plot(observed_counts, c='black', alpha=0.3, label='observed counts') ax2.set_title(title) ax2.set_xlabel("time") lns = ln1+ln2 labs = [l.get_label() for l in lns] ax.legend(lns, labs, loc=4) ax.grid(True, color='white') ax2.grid(False) fig = plt.figure(figsize=(10, 10)) plot_state_posterior(fig.add_subplot(2, 2, 1), posterior_probs[:, 0], title="state 0 (rate {:.2f})".format(rates[0])) plot_state_posterior(fig.add_subplot(2, 2, 2), posterior_probs[:, 1], title="state 1 (rate {:.2f})".format(rates[1])) plot_state_posterior(fig.add_subplot(2, 2, 3), posterior_probs[:, 2], title="state 2 (rate {:.2f})".format(rates[2])) plot_state_posterior(fig.add_subplot(2, 2, 4), posterior_probs[:, 3], title="state 3 (rate {:.2f})".format(rates[3])) plt.tight_layout() # + [markdown] colab_type="text" id="_QhFHJ01NPVj" # In this (simple) case, we see that the model is usually quite confident: at most timesteps it assigns essentially all probability mass to a single one of the four states. Luckily, the explanations look reasonable! # + [markdown] colab_type="text" id="92psCOwMGiQp" # We can also visualize this posterior in terms of the rate associated with the *most likely* latent state at each timestep, condensing the probabilistic posterior into a single explanation: # + colab={} colab_type="code" id="PsXpBrH3DKbl" most_probable_states = np.argmax(posterior_probs, axis=1) most_probable_rates = rates[most_probable_states] # + colab={"height": 312} colab_type="code" id="CCIwVTnyOcsW" outputId="26db7d37-bfb8-4609-a5d3-e96fdef53fa7" fig = plt.figure(figsize=(10, 4)) ax = fig.add_subplot(1, 1, 1) ax.plot(most_probable_rates, c='green', lw=3, label='inferred rate') ax.plot(observed_counts, c='black', alpha=0.3, label='observed counts') ax.set_ylabel("latent rate") ax.set_xlabel("time") ax.set_title("Inferred latent rate over time") ax.legend(loc=4) # + [markdown] colab_type="text" id="4MhfH3a4OBGV" # Technical note: instead of the most probable state at each individual timestep, $z^*_t = \text{argmax}_{z_t} p(z_t | x_{1:T})$, we could have asked for the most probable latent *trajectory*, $z^* = \text{argmax}_z p(z | x_{1:T})$ (or even samples from the posterior over trajectories!), taking dependence between timesteps into account. To illustrate the difference, suppose a rock-paper-scissors player plays rock 40% of the time, but never twice in a row: rock may be the most likely marginal state at every point in time, but "rock, rock, rock...'' is definitely *not* the most likely trajectory -- in fact, it has zero probability! # # TODO(davmre): once `tfp.HiddenMarkovModel` implements the [Viterbi algorithm](https://en.wikipedia.org/wiki/Viterbi_algorithm) to find highest-probability trajectories, update this section to use it. # + [markdown] colab_type="text" id="7ytq0tN7tteU" # ## Unknown number of states # # In real problems, we may not know the 'true' number of states in the system we're modeling. This may not always be a concern: if you don't particularly care about the identities of the unknown states, you could just run a model with more states than you know the model will need, and learn (something like) a bunch of duplicate copies of the actual states. But let's assume you do care about inferring the 'true' number of latent states. # # We can view this as a case of [Bayesian model selection](http://alumni.media.mit.edu/~tpminka/statlearn/demo/): we have a set of candidate models, each with a different number of latent states, and we want to choose the one that is most likely to have generated the observed data. To do this, we compute the marginal likelihood of the data under each model (we could also add a prior on the models themselves, but that won't be necessary in this analysis; the [Bayesian Occam's razor](https://www.cs.princeton.edu/courses/archive/fall09/cos597A/papers/MacKay2003-Ch28.pdf) turns out to be sufficient to encode a preference towards simpler models). # # Unfortunately, the true marginal likelihood, which integrates over both the discrete states $z_{1:T}$ and the (vector of) rate parameters $\lambda$, $$p(x_{1:T}) = \int p(x_{1:T}, z_{1:T}, \lambda) dz d\lambda,$$ is not tractable for this model. For convenience, we'll approximate it using a so-called "[empirical Bayes](https://www.cs.ubc.ca/~schmidtm/Courses/540-W16/L19.pdf)" or "type II maximum likelihood" estimate: instead of fully integrating out the (unknown) rate parameters $\lambda$ associated with each system state, we'll optimize over their values: # # $$\tilde{p}(x_{1:T}) = \max_\lambda \int p(x_{1:T}, z_{1:T}, \lambda) dz$$ # # This approximation may overfit, i.e., it will prefer more complex models than the true marginal likelihood would. We could consider more faithful approximations, e.g., optimizing a variational lower bound, or using a Monte Carlo estimator such as [annealed importance sampling](https://www.tensorflow.org/probability/api_docs/python/tfp/mcmc/sample_annealed_importance_chain); these are (sadly) beyond the scope of this notebook. (For more on Bayesian model selection and approximations, chapter 7 of the excellent [Machine Learning: a Probabilistic Perspective # ](https://www.cs.ubc.ca/~murphyk/MLbook/) is a good reference.) # # In principle, we could do this model comparison simply by rerunning the optimization above many times with different values of `num_states`, but that would be a lot of work. Here we'll show how to consider multiple models in parallel, using TFP's `batch_shape` mechanism for vectorization. # + [markdown] colab_type="text" id="dtClNe6fyZAD" # **Transition matrix and initial state prior**: rather than building a single model description, now we'll build a *batch* of transition matrices and prior logits, one for each candidate model up to `max_num_states`. For easy batching we'll need to ensure that all computations have the same 'shape': this must correspond to the dimensions of the largest model we'll fit. To handle smaller models, we can 'embed' their descriptions in the topmost dimensions of the state space, effectively treating the remaining dimensions as dummy states that are never used. # + colab={"height": 270} colab_type="code" id="vqyTuY5hrmdR" outputId="d2002f4c-f293-49aa-c400-a8c38c474132" max_num_states = 10 def build_latent_state(num_states, max_num_states, daily_change_prob=0.05): # Give probability exp(-100) ~= 0 to states outside of the current model. initial_state_logits = -100. * np.ones([max_num_states], dtype=np.float32) initial_state_logits[:num_states] = 0. # Build a transition matrix that transitions only within the current # `num_states` states. transition_probs = np.eye(max_num_states, dtype=np.float32) if num_states > 1: transition_probs[:num_states, :num_states] = ( daily_change_prob / (num_states-1)) np.fill_diagonal(transition_probs[:num_states, :num_states], 1-daily_change_prob) return initial_state_logits, transition_probs # For each candidate model, build the initial state prior and transition matrix. batch_initial_state_logits = [] batch_transition_probs = [] for num_states in range(1, max_num_states+1): initial_state_logits, transition_probs = build_latent_state( num_states=num_states, max_num_states=max_num_states) batch_initial_state_logits.append(initial_state_logits) batch_transition_probs.append(transition_probs) batch_initial_state_logits = np.array(batch_initial_state_logits) batch_transition_probs = np.array(batch_transition_probs) print("Shape of initial_state_logits: {}".format(batch_initial_state_logits.shape)) print("Shape of transition probs: {}".format(batch_transition_probs.shape)) print("Example initial state logits for num_states==3:\n{}".format(batch_initial_state_logits[2, :])) print("Example transition_probs for num_states==3:\n{}".format(batch_transition_probs[2, :, :])) # + [markdown] colab_type="text" id="k9NMBMBq2UQw" # Now we proceed similarly as above. This time we'll use an extra batch dimension in `trainable_rates` to separately fit the rates for each model under consideration. # + colab={} colab_type="code" id="Ok-3Nzt1suyw" trainable_log_rates = tf.Variable( (np.log(np.mean(observed_counts)) * np.ones([batch_initial_state_logits.shape[0], max_num_states]) + tf.random.normal([1, max_num_states])), name='log_rates') hmm = tfd.HiddenMarkovModel( initial_distribution=tfd.Categorical( logits=batch_initial_state_logits), transition_distribution=tfd.Categorical(probs=batch_transition_probs), observation_distribution=tfd.Poisson(log_rate=trainable_log_rates), num_steps=len(observed_counts)) # + [markdown] colab_type="text" id="eC5vFBX12PvA" # In computing the total log prob, we are careful to sum over only the priors for the rates actually used by each model component: # # + colab={} colab_type="code" id="ly0mT_mqdubx" rate_prior = tfd.LogNormal(5, 5) def log_prob(): prior_lps = rate_prior.log_prob(tf.math.exp(trainable_log_rates)) prior_lp = tf.stack( [tf.reduce_sum(prior_lps[i, :i+1]) for i in range(max_num_states)]) return prior_lp + hmm.log_prob(observed_counts) # + colab={} colab_type="code" id="PR5zL24UDkPW" @tf.function(autograph=False) def train_op(): with tf.GradientTape() as tape: neg_log_prob = -log_prob() grads = tape.gradient(neg_log_prob, [trainable_log_rates])[0] optimizer.apply_gradients([(grads, trainable_log_rates)]) return neg_log_prob, tf.math.exp(trainable_log_rates) # + [markdown] colab_type="text" id="yPqvJ9TS5F98" # Now we optimize the *batch* objective we've constructed, fitting all candidate models simultaneously: # + colab={"height": 388} colab_type="code" id="hAb22rYe1K_O" outputId="724a59a2-dbb9-4385-c7ae-eb3733445538" for step in range(201): loss, rates = [t.numpy() for t in train_op()] if step % 20 == 0: print("step {}: loss {}".format(step, loss)) # + colab={"height": 312} colab_type="code" id="_Jsthql_IxhW" outputId="e9f65ce2-2752-4ae9-8653-8829d6644e93" num_states = np.arange(1, max_num_states+1) plt.plot(num_states, -loss) plt.ylim([-400, -200]) plt.ylabel("marginal likelihood $\\tilde{p}(x)$") plt.xlabel("number of latent states") plt.title("Model selection on latent states") # + [markdown] colab_type="text" id="Kq7SKiR-6c1l" # Examining the likelihoods, we see that the (approximate) marginal likelihood prefers a three- or four-state model (the specific ordering may vary between runs of this notebook). This seems quite plausible -- the 'true' model had four states, but from just looking at the data it's hard to rule out a three-state explanation. # # + [markdown] colab_type="text" id="u0tqU6Lo6pFD" # We can also extract the rates fit for each candidate model: # + colab={"height": 253} colab_type="code" id="lnXTiGX4d6e4" outputId="7c26811f-ad32-4cad-dc61-1d0f315174d8" for i, learned_model_rates in enumerate(rates): print("rates for {}-state model: {}".format(i+1, learned_model_rates[:i+1])) # + [markdown] colab_type="text" id="8eArj7lke9Ei" # And plot the explanations each model provides for the data: # + colab={} colab_type="code" id="XEuhytSKcn4g" posterior_probs = hmm.posterior_marginals( observed_counts).probs_parameter().numpy() most_probable_states = np.argmax(posterior_probs, axis=-1) # + colab={"height": 874} colab_type="code" id="g3RiZCjzuL8o" outputId="43717bab-1eff-4ed5-83e3-8fea9048fd4f" fig = plt.figure(figsize=(14, 12)) for i, learned_model_rates in enumerate(rates): ax = fig.add_subplot(4, 3, i+1) ax.plot(learned_model_rates[most_probable_states[i]], c='green', lw=3, label='inferred rate') ax.plot(observed_counts, c='black', alpha=0.3, label='observed counts') ax.set_ylabel("latent rate") ax.set_xlabel("time") ax.set_title("{}-state model".format(i+1)) ax.legend(loc=4) plt.tight_layout() # + [markdown] colab_type="text" id="sw25-htzfxLZ" # It's easy to see how the one-, two-, and (more subtly) three-state models provide inadequate explanations. Interestingly, all models above four states provide essentially the same explanation! This is likely because our 'data' is relatively clean and leaves little room for alternative explanations; on messier real-world data we would expect the higher-capacity models to provide progressively better fits to the data, with some tradeoff point where the improved fit is outweighted by model complexity. # + [markdown] colab_type="text" id="fY5E0BaPI7lz" # ### Extensions # # The models in this notebook could be straightforwardly extended in many ways. For example: # # - allowing latent states to have different probabilities (some states may be common vs rare) # - allowing nonuniform transitions between latent states (e.g., to learn that a machine crash is usually followed by a system reboot is usually followed by a period of good performance, etc.) # - other emission models, e.g. `NegativeBinomial` to model varying dispersions in count data, or continous distributions such as `Normal` for real-valued data. #
tensorflow_probability/examples/jupyter_notebooks/Multiple_changepoint_detection_and_Bayesian_model_selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests # These are the search queries for the [Spotify Web API](https://developer.spotify.com/web-api/search-item/) # response = requests.get('https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&market=US') Lil_data = response.json() Lil_data.keys() Lil_data['artists'].keys() # **1) With "<NAME>" and "<NAME>" there are a lot of "Lil" musicians. Do a search and print a list of 50 that are playable in the USA (or the country of your choice), along with their popularity score.** Lil_artists = Lil_data['artists']['items'] for artist in Lil_artists: print(artist['name'], artist['popularity']) # **2 a) What genres are most represented in the search results?** # Finding all the genres and combining into one list. # + Lil_artists = Lil_data['artists']['items'] for artist in Lil_artists: print(artist['name'], artist['popularity']) #joining if len(artist['genres']) == 0: print("No genres listed") else: genres = ", ".join(artist['genres']) print("Genres: ", genres) # + Lil_artists = Lil_data['artists']['items'] Lil_genres_list = [] for genres in Lil_artists: Lil_genres_list = genres["genres"] + Lil_genres_list print(Lil_genres_list) # - # Counting the genres. Genre_list = [[x,Lil_genres_list.count(x)] for x in set(Lil_genres_list)] print(Genre_list) # Sorting the genres by occurences. # + sorted(Genre_list, key = lambda x: int(x[1]), reverse=True) Sorted_by_occurences_Genre_list = sorted(Genre_list, key = lambda x: int(x[1]), reverse=True) print("The most frequent genre of the musicians called Lil is", Sorted_by_occurences_Genre_list[0]) # - # **2 b) Edit your previous printout to also display a list of their genres in the format "GENRE_1, GENRE_2, GENRE_3". If there are no genres, print "No genres listed".** Lil_artists = Lil_data['artists']['items'] for artist in Lil_artists: if artist['genres'] == []: print(artist['name'], artist['popularity'], "No genres listed.") else: print(artist['name'], artist['popularity'], artist['genres']) # + Lil_artists = Lil_data['artists']['items'] #Genres all_genres = [] #The Loop for artist in Lil_artists: #print("All Genres we have heard of:", all_genres) #print('Current artist has', artist['genres']) all_genres = all_genres + artist['genres'] print(all_genres) all_genres.count('dirty south rap') # your_list # - #This shows duplicates for genre in all_genres: genre_count = all_genres.count(genre) print(genre, "shows up", genre_count, "times.") #Unique list of all genres: #Unique List = set(list_with_duplicates) unique_genres = set(all_genres) for genre in unique_genres: genre_count = all_genres.count(genre) print(genre, "shows up", genre_count, "times.") # + #There is a library tha comes with Python called Collections #Inside of this library is Counter # - import collections # + from collections import Counter counts = Counter(all_genres) counts.most_common(1) # - # print(counts['crunk']) # + from collections import Counter counts = Collections.Counter(all_genres) counts.most_common(1) # - # #how to automate all of the results response = requests.get('https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&market=US') small_data = response.json() small_data['artists'] len(small_data['artists']) print("test") # **3 a) Use a for loop to determine who BESIDES <NAME> has the highest popularity rating.** for artist in Lil_artists: if artist['popularity'] >= 72 and artist['name'] != '<NAME>': print(artist['name']) # + #Better solution: most_popular_name = "" most_popular_score = 0 for artist in Lil_artists: #print("Comparing", artist['popularity'], 'to', most_popular_score) if artist['popularity'] > most_popular_score: print("checking for Lil Wayne") if artist['name'] == '<NAME>': print('go away') else: #The change you are keeping track of #a.k.a. what you are keeping track of print('not Lil Wayne, updating our notebook') most_popular_name = artist['name'] most_popular_score = artist['popularity'] print(most_popular_name, most_popular_score) # + ####### This doesn't work #name = '<NAME>' #target_score = 72 #1 INITIAL CONDITION #second_best_artists = [] #second_best_artists = [Lil Yachty] #Aggregation Problem #When you're looping through a series of serious objects #and sometimes you want to add one of those objects #to a different list #for artist in artists: # print('Looking at', artist['name']) #2 COndition #wehen we want someone on the list # if artist['popularity'] == 72: # print('!!! The artist is popularity is 72.') # second_best_artists.append(second_best_artists) # - Lil_data['artists'].keys() # **3 b) Is it the same artist who has the largest number of followers?** type(artist['followers']) artist['followers'] # Creating a list of the popularity values, so we can sort them and say which one is the highest) Lil_artists = Lil_data['artists']['items'] List_of_Followers = [] for artist in Lil_artists: List_of_Followers.append(artist['followers']['total']) print(List_of_Followers) # Deciding which one is highest: List_of_Followers.sort(reverse=True) print(List_of_Followers) Highest_Number_of_Followers = (List_of_Followers[0]) print(Highest_Number_of_Followers) for artist in Lil_artists: if artist['followers']['total'] > List_of_Followers[0] and artist['name'] != '<NAME>': print(artist['name'], "has more followers than <NAME>.") else: print("Their are no artists with more followers that <NAME>.") break # **4) Print a list of Lil's that are more popular than <NAME>.** # Establishing how high <NAME>'s popularity is. **Would this be possible in one go?** for artist in Lil_artists: if artist['name'] == "<NAME>": print(artist['popularity']) # + for artist in Lil_artists: if artist['popularity'] > 62: print(artist['name'], artist['popularity']) # - # **5) Pick two of your favorite Lils to fight it out, and use their IDs to print out their top tracks.** # Tip: You're going to be making two separate requests, be sure you DO NOT save them into the same variable. for artist in Lil_artists: print(artist['name'], artist['id']) response = requests.get('https://api.spotify.com/v1/artists/5einkgXXrjhfYCyac1FANB/top-tracks?country=US') Lil_Scrappy_data = response.json() type(Lil_Scrappy_data) response = requests.get('https://api.spotify.com/v1/artists/5qK5bOC6wLtuLhG5KvU17c/top-tracks?country=US') Lil_Mama_data = response.json() type(Lil_Mama_data) Lil_Scrappy_data.keys() Lil_Mama_data.keys() type(Lil_Scrappy_data.keys()) type(Lil_Mama_data.keys()) # + Scrappy_tracks = Lil_Scrappy_data['tracks'] for tracks in Scrappy_tracks: print(tracks['name']) # + Mama_tracks = Lil_Mama_data['tracks'] for tracks in Mama_tracks: print(tracks['name']) # - # **6 Will the world explode if a musicians swears? Get an average popularity for their explicit songs vs. their non-explicit songs. How many minutes of explicit songs do they have? Non-explicit?** # Number of Explicit Tracks for Lil Scrappy. # + explicit_track_scrappy = 0 non_explicit_track_scrappy = 0 unknown_scrappy = 0 for tracks in Scrappy_tracks: if tracks['explicit'] == True: explicit_track_scrappy = explicit_track_scrappy + 1 elif tracks['explicit'] == False: non_explicit_track_scrappy = non_explicit_track_scrappy + 1 else: unknown_scrappy = unknown_scrappy + 1 explicit_track_pop_total = 0 non_explicit_track_pop_total = 0 for tracks in Scrappy_tracks: if tracks['explicit'] == True: explicit_track_pop_total = explicit_track_pop_total + tracks['popularity'] elif tracks['explicit'] == False: non_explicit_track_pop_total = non_explicit_track_pop_total + tracks['popularity'] explicit_track_duration_total = 0 non_explicit_track_duration_total = 0 for tracks in Scrappy_tracks: if tracks['explicit'] == True: explicit_track_duration_total = explicit_track_duration_total + tracks['duration_ms'] elif tracks['explicit'] == False: non_explicit_track_duration_total = non_explicit_track_duration_total + tracks['duration_ms'] print("The average rating of explicit songs by Lil Scrappy is", round(explicit_track_pop_total / explicit_track_scrappy), ".") print("The average rating of non-explicit songs by Lil Scrappy is", round(non_explicit_track_pop_total / non_explicit_track_scrappy), ".") print("The duration of explicit song material of Lil Scrappy is", round(explicit_track_duration_total / 1000), "minutes, and of non explicit material is", round(non_explicit_track_duration_total / 1000), "minutes.") # - # And this is the same for Lil Mama: # + explicit_track_Mama = 0 non_explicit_track_Mama = 0 unknown = 0 for tracks in Mama_tracks: if tracks['explicit'] == True: explicit_track_Mama = explicit_track_Mama + 1 elif tracks['explicit'] == False: non_explicit_track_Mama = non_explicit_track_Mama + 1 else: unknown = unknown + 1 explicit_track_pop_total_Mama = 0 non_explicit_track_pop_total_Mama = 0 for tracks in Mama_tracks: if tracks['explicit'] == True: explicit_track_pop_total_Mama = explicit_track_pop_total_Mama + tracks['popularity'] elif tracks['explicit'] == False: non_explicit_track_pop_total_Mama = non_explicit_track_pop_total_Mama + tracks['popularity'] explicit_track_duration_total_Mama = 0 non_explicit_track_duration_total_Mama = 0 for tracks in Mama_tracks: if tracks['explicit'] == True: explicit_track_duration_total_Mama = explicit_track_duration_total_Mama + tracks['duration_ms'] elif tracks['explicit'] == False: non_explicit_track_duration_total_Mama = non_explicit_track_duration_total_Mama + tracks['duration_ms'] print("The average rating of explicit songs by Lil Mama is", round(explicit_track_pop_total_Mama / explicit_track_Mama), ".") print("The average rating of non-explicit songs by Lil Mama is", round(non_explicit_track_pop_total_Mama / non_explicit_track_Mama), ".") print("The duration of explicit song material of Lil Mama is", round(explicit_track_duration_total_Mama / 1000), "minutes, and of non explicit material is", round(non_explicit_track_duration_total_Mama / 1000), "minutes.") # - # **7 a) Since we're talking about Lils, what about Biggies? How many total "Biggie" artists are there? How many total "Lil"s? If you made 1 request every 5 seconds, how long would it take to download information on all the Lils vs the Biggies?** response = requests.get('https://api.spotify.com/v1/search?query=Biggie&type=artist&limit=50&market=US') Biggie_data = response.json() response = requests.get('https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&market=US') Lil_data = response.json() Biggie_artists = Biggie_data['artists']['total'] Lil_artists = Lil_data['artists']['total'] print("There are", Biggie_artists, "artists named Biggie on Spotify and", Lil_artists, "named Lil",) Total_Download_Time_Biggie = Biggie_artists / 50 * 5 Total_Download_Time_Lil = Lil_artists / 50 * 5 print("It would take", round(Total_Download_Time_Biggie), "seconds to download all the Biggie artists and", round(Total_Download_Time_Lil), "seconds to download the Lil artists." ) # **8) Out of the top 50 "Lil"s and the top 50 "Biggie"s, who is more popular on average?** Lil_artists_popularity = Lil_data['artists']['items'] popularity_total = 0 for popularity in Lil_artists_popularity: popularity_total = popularity_total + popularity['popularity'] print("The average rating for the top 50 artists called Lil is:", round(popularity_total / 50)) Biggie_artists_popularity = Biggie_data['artists']['items'] Biggie_popularity_total = 0 for popularity2 in Biggie_artists_popularity: Biggie_popularity_total = Biggie_popularity_total + popularity2['popularity'] print("The average rating for the top 50 artists called Biggie is:", round(Biggie_popularity_total / 49) ) Biggie_popularity = Biggie_data['artists']['items'] for artist in Biggie_popularity: print(artist['name'], artist['popularity'])
05/.ipynb_checkpoints/Spotify_Homework_5_Skinner_Class_solutions-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/chiranjeet14/ML_Projects/blob/master/Competition/churn_rate_prediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="respiratory-canal" import sklearn # ^^^ pyforest auto-imports - don't write above this line # # Additional packages # # !pip3 install category_encoders > /dev/null # !pip3 install pandas_profiling > /dev/null # # !pip3 install imbalanced-learn > /dev/null # !pip3 install catboost > /dev/null # !pip3 install xgboost > /dev/null # !pip3 install boto3 > /dev/null # !pip3 install lazypredict > /dev/null # + id="efficient-chuck" import pandas as pd import numpy as np import io import gc # settings import warnings warnings.filterwarnings("ignore") gc.enable() # + id="aging-integer" # !mkdir -p /home/jovyan/work/dataset # + id="renewable-provision" import requests def download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params = { 'id' : id }, stream = True) token = get_confirm_token(response) if token: params = { 'id' : id, 'confirm' : token } response = session.get(URL, params = params, stream = True) save_response_content(response, destination) def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) # + id="caring-pathology" # Calculating Precision, Recall and f1-score def model_score(actual_value,predicted_values): from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import recall_score from sklearn.metrics import roc_auc_score actual = actual_value predicted = predicted_values results = confusion_matrix(actual, predicted) print('Confusion Matrix :') print(results) print('Accuracy Score :',accuracy_score(actual, predicted)) print('Report : ') print(classification_report(actual, predicted)) print('Recall Score : ') print(recall_score(actual, predicted)) print('ROC AUC Score : ') print(roc_auc_score(actual, predicted)) # + id="rental-batch" train_file_id = '12M1iRXbgtpON2cDlu0bvwePhgHcc9d9I' test_file_id = '12CnSdigZ1wYDEsVJfD8kWlOrlx38Dt-u' train_file_destination = '/home/jovyan/work/dataset/churn_train.csv' test_file_destination = '/home/jovyan/work/dataset/churn_test.csv' download_file_from_google_drive(train_file_id, train_file_destination) download_file_from_google_drive(test_file_id, test_file_destination) # + id="committed-oregon" rows_to_read = None df_train = pd.read_csv('/home/jovyan/work/dataset/churn_train.csv', na_values=['?', 'Unknown'], nrows=rows_to_read) df_test = pd.read_csv('/home/jovyan/work/dataset/churn_test.csv', na_values=['?', 'Unknown'], nrows=rows_to_read) target_feature = 'churn_risk_score' # + id="direct-institution" # from pandas_profiling import ProfileReport # report = ProfileReport(df_train, title='Pandas Profiling Report', explorative=True) # report.to_file("churn_report.html") # # report # + [markdown] id="palestinian-dependence" # ## Preprocessing # + [markdown] id="certified-hardware" # ### Drop unnecessary columns # + id="infrared-alarm" columns_to_drop = ['customer_id', 'Name', 'security_no', 'referral_id', 'last_visit_time'] df_train.drop(columns_to_drop, axis=1, inplace=True, errors='ignore') df_test.drop(columns_to_drop, axis=1, inplace=True, errors='ignore') # + id="successful-canberra" def preprocess_date(dataFrame, field): dataFrame[field] = pd.to_datetime(dataFrame[field]) dataFrame['joining_year'] = dataFrame[field].dt.year dataFrame['joining_month'] = dataFrame[field].dt.month dataFrame['joining_day'] = dataFrame[field].dt.day dataFrame['joining_dayOfWeek'] = dataFrame[field].dt.dayofweek dataFrame['joining_weekdayName'] = dataFrame[field].dt.day_name() dataFrame['joining_week'] = dataFrame[field].dt.week dataFrame['joining_weekOfYear'] = dataFrame[field].dt.weekofyear dataFrame['joining_dayOfYear'] = dataFrame[field].dt.dayofyear dataFrame['joining_daysInMonth'] = dataFrame[field].dt.days_in_month return dataFrame # + id="fancy-villa" df_train = preprocess_date(df_train, 'joining_date') df_test = preprocess_date(df_test, 'joining_date') df_train.drop('joining_date', axis=1, inplace=True, errors='ignore') df_test.drop('joining_date', axis=1, inplace=True, errors='ignore') # + id="average-password" def get_categorical_features(dataFrame): categorical_feats = [ f for f in dataFrame.columns if dataFrame[f].dtype == 'object' ] return categorical_feats def get_non_categorical_features(dataFrame, categorical_feats): non_categorical_features = [ f for f in dataFrame.columns if f not in categorical_feats ] return non_categorical_features def remove_element_from_list(list_of_elements, element): if element in list_of_elements: list_of_elements.remove(element) return list_of_elements # + id="operating-moisture" categorical_features = get_categorical_features(df_train) non_categorical_features = get_non_categorical_features(df_train, categorical_features) non_categorical_features = remove_element_from_list(non_categorical_features, target_feature) # + id="outside-hacker" # categorical_features # + id="opposed-relation" # non_categorical_features # + id="pediatric-harris" def convert_to_categorical(dataFrame, field): dataFrame[field] = dataFrame[field].astype('category') return dataFrame def convert_to_numeric(dataFrame, field): dataFrame[field] = dataFrame[field].apply(pd.to_numeric, errors='coerce') return dataFrame def imput_to_numeric(dataFrame, field): return dataFrame # + id="identical-violence" df_train = convert_to_categorical(df_train, categorical_features) df_train = convert_to_categorical(df_train, [target_feature]) df_train = convert_to_numeric(df_train, non_categorical_features) df_test = convert_to_categorical(df_test, categorical_features) df_test = convert_to_numeric(df_test, non_categorical_features) # + id="homeless-transmission" # df_train.head() # + id="gentle-jumping" # Check column type # for col in df_train_new.columns: # print("{} : {}".format(col,df_train_new[col].dtype)) # + id="fatal-locator" # non_categorical_features NAN values # df_train[non_categorical_features].isna().sum() # + id="obvious-guatemala" # categorical_features NAN values # df_train[categorical_features].isna().sum() # + [markdown] id="recorded-funds" # ### Replacing categorical_features NaN values # + id="editorial-gates" # def replace_nan_category(dataFrame, field, new_category): # available_categories = list(dataFrame[field].value_counts().index) # if new_category not in available_categories: # dataFrame[field] = dataFrame[field].cat.add_categories([new_category]) # dataFrame[field].fillna(new_category, inplace=True) # return dataFrame # + id="secondary-savannah" # df_train = replace_nan_category(df_train, 'region_category', 'Region Not Specified') # df_train = replace_nan_category(df_train, 'joined_through_referral', 'Not Applicable') # df_train = replace_nan_category(df_train, 'preferred_offer_types', 'Not Applicable') # df_train = replace_nan_category(df_train, 'medium_of_operation', 'Not Applicable') # df_train = replace_nan_category(df_train, 'gender', 'Not Applicable') # df_test = replace_nan_category(df_test, 'region_category', 'Region Not Specified') # df_test = replace_nan_category(df_test, 'joined_through_referral', 'Not Applicable') # df_test = replace_nan_category(df_test, 'preferred_offer_types', 'Not Applicable') # df_test = replace_nan_category(df_test, 'medium_of_operation', 'Not Applicable') # df_test = replace_nan_category(df_test, 'gender', 'Not Applicable') # + id="unlike-senator" # df_train.head() # + id="applied-portal" def fill_NaN_values(dataFrame, field, value): dataFrame[field].fillna(value, inplace=True) return dataFrame # + id="metallic-guarantee" df_train = fill_NaN_values(df_train, 'region_category', df_train['region_category'].mode()[0]) df_train = fill_NaN_values(df_train, 'joined_through_referral', df_train['joined_through_referral'].mode()[0]) df_train = fill_NaN_values(df_train, 'preferred_offer_types', df_train['preferred_offer_types'].mode()[0]) df_train = fill_NaN_values(df_train, 'medium_of_operation', df_train['medium_of_operation'].mode()[0]) df_train = fill_NaN_values(df_train, 'gender', df_train['gender'].mode()[0]) df_test = fill_NaN_values(df_test, 'region_category', df_test['region_category'].mode()[0]) df_test = fill_NaN_values(df_test, 'joined_through_referral', df_test['joined_through_referral'].mode()[0]) df_test = fill_NaN_values(df_test, 'preferred_offer_types', df_test['preferred_offer_types'].mode()[0]) df_test = fill_NaN_values(df_test, 'medium_of_operation', df_test['medium_of_operation'].mode()[0]) df_test = fill_NaN_values(df_test, 'gender', df_test['gender'].mode()[0]) # + [markdown] id="registered-pottery" # ### Transfroming target_feature # + id="focused-scheduling" outputId="f8b9bf02-2d6f-4705-ffd5-de83ccb4ed61" # Replacing -1 in train data with 1 df_train[target_feature] = df_train[target_feature].apply(lambda x:1 if x == -1 else 0 if x == 5 else x) df_train[target_feature].unique() # + id="greenhouse-charity" outputId="59d8c9a1-744e-4a1c-ac56-343bf582d5d6" df_train.head() # + [markdown] id="affiliated-julian" # ### Label Encoding categorical_features # + [markdown] id="outside-aurora" # ref: http://kagglesolutions.com/r/feature-engineering--label-encoding # # Since we have two different datasets (X_train and X_test) we need to fit it on all of your data otherwise there might be some categories in the test set X_test that were not in the train set X_train and we will get errors. # # To resolve this issue we will first concatenate X_train and X_test together and then perform label encoding. # + id="hidden-maximum" df_train2=df_train.assign(dataType="train-data") df_test2=df_test.assign(dataType="test-data") # + id="standard-clearance" # Add an indicator column while concatenating the two dataframes, so we can later seperate them again: df_combined = pd.concat([df_train2, df_test2], ignore_index=True, axis=0) # + id="premier-reduction" from sklearn import preprocessing label_object = {} for col in categorical_features: labelencoder = preprocessing.LabelEncoder() labelencoder.fit(df_combined[col].astype(str)) df_combined[col] = labelencoder.fit_transform(df_combined[col].astype(str)) label_object[col] = labelencoder # + id="bound-glory" # Split data into tain and test df_test, df_train = df_combined[df_combined["dataType"].eq("test-data")], df_combined[df_combined["dataType"].eq("train-data")] # Drop dataType column df_train.drop('dataType', axis=1, inplace=True, errors='ignore') df_test.drop('dataType', axis=1, inplace=True, errors='ignore') # + id="derived-concert" outputId="72970fe1-8e62-464c-b65b-1a4e1bbdaafc" # Sample inverse_transform label_object['medium_of_operation'].inverse_transform(df_train['medium_of_operation'][:5]) # + [markdown] id="studied-establishment" # ### Checking if the dataset is balanced/imbalanced # + id="sunset-cosmetic" # df_train['churn_risk_score'].value_counts() # + id="phantom-bermuda" outputId="5b983fd2-0d67-438a-c088-393a9fb35857" # python check if dataset is imbalanced : https://www.kaggle.com/rafjaa/resampling-strategies-for-imbalanced-datasets target_count = df_train['churn_risk_score'].value_counts() target_count.plot(kind='bar', title='Churn Proportions') # + id="defined-public" labels = df_train[target_feature].values df_train.drop(target_feature, axis=1, inplace=True, errors='ignore') df_test.drop(target_feature, axis=1, inplace=True, errors='ignore') # + [markdown] id="outdoor-costs" # ### Replacing non_categorical_features NaN values - MICE imputation # + id="dirty-progressive" outputId="a18e90c9-57a5-4ac2-bf5a-038fe1cbbcae" from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer as MICE # Create a copy of my original dataset dataset_impute = df_train.copy() # Apply MICE dataset_impute_complete = MICE(max_iter=150, verbose=10, random_state=10).fit_transform(dataset_impute.values) # Turning into df again df_train = pd.DataFrame(data=dataset_impute_complete, columns=dataset_impute.columns, index=dataset_impute.index) df_train.head() # + id="environmental-brazilian" outputId="d45b094d-160d-499a-d295-f22139a42328" print("NaN Values:",df_train.isna().any().sum()) # + id="fixed-arrest" outputId="fff78f18-e9f5-4930-d6bc-aff932f8bd47" print("NaN Values:",df_test.isna().any().sum()) # + id="broken-stage" outputId="9ebd9800-9541-4f93-a970-b2237441ac4e" df_test.fillna(df_test.mean(), inplace=True) print("NaN Values:",df_test.isna().any().sum()) # + id="fatal-picking" # # Check column type # for col in df_train.columns: # print("{} : {}".format(col,df_train[col].dtype)) # + [markdown] id="matched-wrapping" # ### Splitting Data into train-cv # + id="retired-destruction" from sklearn.model_selection import train_test_split X_train, X_cv, y_train, y_cv = train_test_split(df_train, labels, test_size=0.15, shuffle=True) # + [markdown] id="transparent-throw" # ### Scaling data # + id="blond-decrease" from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_cv_scaled = scaler.transform(X_cv) X_test_scaled = scaler.transform(df_test) # + [markdown] id="stock-dover" # ### Modelling & Cross-Validation # + id="fitted-copying" outputId="8c2e9e2f-097c-4d40-ab19-cd108b47f9bf" import sklearn.utils as skutils class_weights = skutils.class_weight.compute_class_weight('balanced', np.unique(y_train), y_train) class_weights = dict(enumerate(class_weights)) class_weights # + id="joint-episode" sample_weights = skutils.class_weight.compute_sample_weight('balanced', y_train) # sample_weights # + id="contrary-carter" # # helper functions # import sklearn.metrics as skm # def log1p(vec): # return np.log1p(abs(vec)) # def expm1(x): # return np.expm1(x) # def clipExp(vec): # return np.clip(expm1(vec), 0, None) # def printScore(y_train, y_train_pred): # print(skm.f1_score(y_train, y_train_pred, average="macro")) # + id="growing-notice" # # !pip install hyperopt > /dev/null # + id="through-newfoundland" # from hyperopt import hp, fmin, tpe, STATUS_OK, Trials # from sklearn.model_selection import cross_val_score # space = {'criterion': hp.choice('criterion', ['entropy', 'gini']), # 'max_depth': hp.quniform('max_depth', 10, 12, 10), # 'max_features': hp.choice('max_features', ['auto', 'sqrt','log2', None]), # 'min_samples_leaf': hp.uniform ('min_samples_leaf', 0, 0.5), # 'min_samples_split' : hp.uniform ('min_samples_split', 0, 1), # 'n_estimators' : hp.choice('n_estimators', [10, 50]) # } # def objective(space): # model = RandomForestClassifier(criterion = space['criterion'], # max_depth = space['max_depth'], # max_features = space['max_features'], # min_samples_leaf = space['min_samples_leaf'], # min_samples_split = space['min_samples_split'], # n_estimators = space['n_estimators'], # ) # accuracy = cross_val_score(model, X_train_scaled, y_train, cv = 4, n_jobs=-1, verbose=0).mean() # # We aim to maximize accuracy, therefore we return it as a negative value # return {'loss': -accuracy, 'status': STATUS_OK } # trials = Trials() # best = fmin(fn= objective, # space= space, # algo= tpe.suggest, # max_evals = 20, # trials= trials) # best # + id="pending-kruger" outputId="4b410432-d4f1-41ff-8f98-a392b487c0de" # # !pip3 install git+https://github.com/hyperopt/hyperopt-sklearn > /dev/null # + id="binding-miller" # from hpsklearn import HyperoptEstimator, any_classifier # from hyperopt import tpe # # Instantiate a HyperoptEstimator with the search space and number of evaluations # estim = HyperoptEstimator( classifier=any_classifier('clf'), # algo=tpe.suggest, trial_timeout=300) # # Search the hyperparameter space based on the data # estim.fit( X_train_scaled, y_train ) # # Predicting on CV data # print("Accuracy on Test Data: {}".format(estim.score(X_cv_scaled, y_cv))) # print(estim.best_model()) # + id="subtle-raleigh" # import json # print(json.dumps(nested_dict, sort_keys=True, indent=4)) # + id="terminal-importance" # from pprint import pprint # print("Predicting using RandomForest") # # alg = XGBClassifier # alg = RandomForestClassifier(bootstrap=False, criterion='entropy', # max_features=0.34067731242165933, n_estimators=223, # n_jobs=1, random_state=4, verbose=False) # alg.fit(X_train_scaled, y_train) # predictions = alg.predict(X_cv_scaled) # # pprint(alg.get_params()) # + id="identified-remains" # model_score(y_cv,predictions) # + id="excited-banks" # # !pip3 install lazypredict > /dev/null # from lazypredict.Supervised import LazyClassifier # clf = LazyClassifier(verbose=0,ignore_warnings=True) # models, predictions = clf.fit(X_train_scaled, X_cv, y_train, y_cv) # models # + id="serious-cambodia" # from hyperopt import hp # import numpy as np # from sklearn.metrics import mean_squared_error # # XGB parameters # xgb_clf_params = { # 'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)), # 'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)), # 'min_child_weight': hp.choice('min_child_weight', np.arange(1, 8, 1, dtype=int)), # 'reg_alpha': hp.choice('reg_alpha', np.arange(0.1, 1, 0.1)), # 'reg_lambda': hp.choice('reg_lambda', np.arange(1, 5, 0.5)), # 'colsample_bytree': hp.choice('colsample_bytree', np.arange(0.3, 1, 0.1)), # 'gamma': hp.choice('gamma', np.arange(0.1, 2, 0.1)), # 'subsample': hp.uniform('subsample', 0.2, 1), # 'n_estimators': hp.choice('n_estimators', np.arange(100, 3000, 100, dtype=int)), # } # xgb_fit_params = { # 'eval_metric': 'mlogloss', # 'early_stopping_rounds': 10, # 'verbose': False # } # xgb_para = dict() # xgb_para['clf_params'] = xgb_clf_params # xgb_para['fit_params'] = xgb_fit_params # xgb_para['loss_func' ] = lambda y, pred: np.sqrt(mean_squared_error(y, pred)) # # LightGBM parameters # lgb_clf_params = { # 'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)), # 'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)), # 'min_child_weight': hp.choice('min_child_weight', np.arange(1, 8, 1, dtype=int)), # 'colsample_bytree': hp.choice('colsample_bytree', np.arange(0.3, 0.8, 0.1)), # 'subsample': hp.uniform('subsample', 0.8, 1), # 'n_estimators': 100, # } # lgb_fit_params = { # # 'eval_metric': 'l2', # 'early_stopping_rounds': 10, # 'verbose': False # } # lgb_para = dict() # lgb_para['clf_params'] = lgb_clf_params # lgb_para['fit_params'] = lgb_fit_params # lgb_para['loss_func' ] = lambda y, pred: np.sqrt(mean_squared_error(y, pred)) # # CatBoost parameters # ctb_clf_params = { # 'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)), # 'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)), # 'colsample_bylevel': hp.choice('colsample_bylevel', np.arange(0.3, 0.8, 0.1)), # 'n_estimators': 100, # # 'eval_metric': 'RMSE', # } # ctb_fit_params = { # 'early_stopping_rounds': 10, # 'verbose': False # } # ctb_para = dict() # ctb_para['clf_params'] = ctb_clf_params # ctb_para['fit_params'] = ctb_fit_params # ctb_para['loss_func' ] = lambda y, pred: np.sqrt(mean_squared_error(y, pred)) # + id="brave-summary" # import lightgbm as lgb # import xgboost as xgb # import catboost as ctb # from hyperopt import fmin, tpe, STATUS_OK, STATUS_FAIL, Trials # class HPOpt(object): # def __init__(self, x_train, x_test, y_train, y_test): # self.x_train = x_train # self.x_test = x_test # self.y_train = y_train # self.y_test = y_test # def process(self, fn_name, space, trials, algo, max_evals): # fn = getattr(self, fn_name) # try: # result = fmin(fn=fn, space=space, algo=algo, max_evals=max_evals, trials=trials) # except Exception as e: # return {'status': STATUS_FAIL, # 'exception': str(e)} # return result, trials # def xgb_clf(self, para): # clf = xgb.XGBClassifier(**para['clf_params'], n_jobs=-1) # return self.train_clf(clf, para) # def lgb_clf(self, para): # clf = lgb.LGBMClassifier(**para['clf_params'], n_jobs=-1) # return self.train_clf(clf, para) # def ctb_clf(self, para): # clf = ctb.CatBoostClassifier(**para['clf_params'], n_jobs=-1) # return self.train_clf(clf, para) # def train_clf(self, clf, para): # clf.fit(self.x_train, self.y_train, # eval_set=[(self.x_train, self.y_train), (self.x_test, self.y_test)], # **para['fit_params']) # pred = clf.predict(self.x_test) # loss = para['loss_func'](self.y_test, pred) # return {'loss': loss, 'status': STATUS_OK} # + id="after-forwarding" # obj = HPOpt(X_train_scaled, X_cv_scaled, y_train, y_cv) # xgb_opt = obj.process(fn_name='xgb_clf', space=xgb_para, trials=Trials(), algo=tpe.suggest, max_evals=100) # # lgb_opt = obj.process(fn_name='lgb_clf', space=lgb_para, trials=Trials(), algo=tpe.suggest, max_evals=100) # # ctb_opt = obj.process(fn_name='ctb_clf', space=ctb_para, trials=Trials(), algo=tpe.suggest, max_evals=100) # + id="narrative-quarter" # print(xgb_opt) # + id="special-intro" outputId="13350dd0-c17b-425c-e38c-d70f4bbea8c7" import time import xgboost as xgb from sklearn.model_selection import RandomizedSearchCV clf = xgb.XGBClassifier() # XGB parameters param_grid = { 'learning_rate': np.arange(0.05, 0.31, 0.05), 'subsample': np.arange(0.3, 1, 0.1), 'colsample_bytree': np.arange(0.3, 1, 0.1), 'colsample_bylevel': np.arange(0.3, 1, 0.1), 'max_depth': np.arange(5, 16, 1, dtype=int), 'min_child_weight': np.arange(1, 10, 1, dtype=int), 'reg_alpha': np.arange(0.1, 1, 0.1), 'reg_lambda': np.arange(1, 5, 0.5), 'gamma': np.arange(0.1, 2, 0.1), 'n_estimators': np.arange(100, 3000, 100, dtype=int), } fit_params = { 'eval_metric': 'mlogloss', 'early_stopping_rounds': 10, 'eval_set': [(X_cv_scaled, y_cv)], 'verbose': False } # https://stats.stackexchange.com/questions/431022/error-while-performing-multiclass-classification-using-gridsearch-cv multiclass_scorer = sklearn.metrics.make_scorer(sklearn.metrics.f1_score, average = 'weighted') rs_clf = RandomizedSearchCV(clf, param_grid, n_iter=50, n_jobs=-1, verbose=2, scoring=multiclass_scorer, random_state=42) print("Randomized search..") search_time_start = time.time() rs_clf.fit(X_train_scaled, y_train, **fit_params) print("Randomized search time:", time.time() - search_time_start) best_score = rs_clf.best_score_ best_params = rs_clf.best_params_ print("Best score: {}".format(best_score)) print("Best params: ") for param_name in sorted(best_params.keys()): print('%s: %r' % (param_name, best_params[param_name])) # + [markdown] id="burning-richards" # ### Predicting on test Data # + id="individual-commissioner" # On test data # trained_model = alg ## Selecting the model to be used trained_model = rs_clf read = pd.read_csv('/home/jovyan/work/dataset/churn_test.csv') predictions_trained_model_test = trained_model.predict(X_test_scaled) # predictions_trained_model_test = estim.predict(X_test_scaled) # + id="ranking-portuguese" outputId="9f90d65a-e56a-4a64-ad9c-dc7f9ecac879" submission = pd.DataFrame({ "customer_id": read["customer_id"], "churn_risk_score": predictions_trained_model_test }) # revert back 0 to 5 for predictions submission[target_feature] = submission[target_feature].apply(lambda x:5 if x == 0 else x) submission[target_feature].value_counts() # + id="organizational-feedback" submission.to_csv('submission.csv', index=False)
Competition/churn_rate_prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:21CMMC] # language: python # name: conda-env-21CMMC-py # --- # # MCMC Introduction # + init_cell=true # %matplotlib inline import matplotlib.pyplot as plt import numpy as np from py21cmmc import analyse from py21cmmc import mcmc import py21cmmc as p21mc # %load_ext autoreload # %autoreload 2 # - # In this notebook we demonstrate how to do the simplest possible MCMC to fit just two astrophysical parameters to a series of coeval brightness temperature boxes without noise, and then visualise the results. # # This tutorial will introduce the basic components of using ``py21cmmc`` for doing MCMC to fit parameters. For more advanced topics, see the relevant tutorials, guides or FAQs. p21mc.__version__ # ## The Structure of 21MCMC # As a bit of a primer, we discuss some of the implementation structure for how likelihoods are actually evaluated in 21CMMC. Understanding this structure will help to identify any issues that may arise, construct useful likelihoods, utilise the various options that the built-in structures have, and eventually to create your own (see XXX for more details of how to do this). # # The structure of an MCMC routine in 21CMMC is based on that offered by the ``cosmoHammer`` library, but with heavy modifications. This structure is based on a single master ``LikelihoodComputationChain`` (let's just call it the ``Chain``), which houses a number of what we'll call ``core``s and ``likelihood``s. These are instances of Python classes which can be named arbitrarily (and in principle do not need to be subclassed from any particular object), but follow a minimal API, which we'll outline shortly. In short, any ``Chain`` requires at _least_ one ``core``, and at least one ``likelihood``. Multiples of each are allowed and will work together seamlessly. # # Any MCMC should be run using the ``run_mcmc`` function in 21CMMC. While the MCMC can be run manually by setting up a ``Chain`` with its ``core``s and ``likelihood``s, there are various pitfalls and gotchas associated with this that usually make it easier just to use the in-built function. This function will take a list of ``core``s and ``likelihood``s, along with some specifications of how the MCMC is to proceed, set up a ``Chain``, run the MCMC sampling, and return the ``Chain`` object to you. # # Thus, almost all of the flexibility of 21CMMC lies in the construction of the various ``core`` and ``likelihood`` modules. # ### Core and Likelihood Modules # Let's briefly discuss these important ``core`` and ``likelihood`` modules -- the philosophy behind them, and what they can and can't do. We'll leave a discussion of how to go about implementing your own for another tutorial. # # The basic idea behind ``core`` and ``likelihood`` modules is that the ``core``s are supposed to do the calculations to fill up what is called a ``context`` dictionary on every iteration of the MCMC. The ``likelihood``s then use the contents of this ``context`` to evaluate a likelihood (this ends up being the sum of the likelihood returned by each ``likelihood`` module). # # In practice, there is no hard-and-fast limit to the scope of the ``core`` or ``likelihood``: the ``core`` could evaluate the likelihood itself, store it in the ``context``, and the ``likelihood`` could merely access and return it. Likewise, the ``core`` could do nothing, and let the ``likelihood`` perform the entire calculation. # # For practical and philosophical reasons, however there are a few properties of each which dictate how _best_ to separate the work that each kind of module does: # # 1. All ``core`` modules constructively interfere. That is, they are invoked in sequence, and the output of one propagates as potential input to the next. The various quantities that are computed by the ``core``s are not over-written (unless specifically directed to), but are rather accumulated. # 2. Conversely, all ``likelihood`` modules are invoked after all of the ``core`` modules have been invoked. Each likelihood module is expected to have access to all information from the sequency of ``core``s, and is expected not to modify that information. The operation of each ``likelihood`` is thus in principle independent of each of the other ``likelihood``s. This implies that the total posterior is the sum of each of the likelihoods, which are considered statistically independent. # 3. Due to the first two points, we consider ``core``s as *constructive* and ``likelihood``s as *reductive*. That is, it is most useful to put calculations that _build_ data given a set of parameters in ``core``s, and operations that _reduce_ that data to some final form (eg. a power spectrum) in the ``likelihoods``. This is because a given dataset, produced by the accumulation of ``core``s, may yield a number of independent likelihoods, while these ``likelihood``s may be equally valid for a range of different data models (eg. inclusion or exclusion of various systematic effects). # 4. Point 3 implies that it is cleanest if all operations that explicitly require the model parameters occur in ``core``s. The reduction of data should not in general be model-dependent. In practice, the current parameters _are_ available in the ``likelihood``s, but we consider it cleaner if this can be separated. # 5. In general, as both the ``core``s and ``likelihood``s are used to build a probabilistic _model_ which can be used to evaluate the likelihood of given _data_, both of their output should in principle be deterministic with respect to input parameters. Nevertheless, one may consider the process as a forward-model, and a forward-model is able to _produce_ mock data. Indeed, 21CMMC adds a framework to the base ``cosmoHammer`` for producing such mock data, which are inherently stochastic. A useful way to conceptualize the separation of ``core`` and ``likelihood`` is to ensure that all stochasticity can in principle be added in the ``core``, and that the ``likelihood`` should have no aspect of randomness in any of its calculations. The ``likelihood`` should reduce real/mock data in the same way that it reduces model data. # # Given these considerations, the ``core`` modules that are implemented within ``21CMMC`` perform ``21cmFAST`` simulations and add these large datasets to the ``context``, while the various ``likelihoods`` will use this "raw" data and compress it down to a likelihood -- either by taking a power spectrum, global average or some other reduction. # # Some of the features of ``core``s as implemented in 21CMMC are the following. Each ``core``: # * has access to the entire ``Chain`` in which it is embedded (if it is indeed embedded), which enables sharing of information between ``core``s (and ensuring that they are consistent with one another, if applicable). # * has access to the names of the parameters which are currently being constrained. # * is enabled for equality testing with other ``core``s. # * has a special method (and runtime parameters) for storing arbitrary data in the ``Chain`` on a per-iteration basis (see the advanced MCMC FAQ for more info). # * has an optional method for converting a data model into a "mock" (i.e. incorporating a random component), which can be used to simulate mock data for consistency testing. # # Some of the features of ``likelihood``s as implemented in 21CMMC are that each ``likelihood``: # * also has access to the ``Chain`` and parameter names, as well as equality testing, like the ``core``s. # * computes the likelihood in two steps: first reducing model data to a "final form", and then computing the likelihood from this form (eg. reducing a simulation cube to a 1D power spectrum, and then computing a $\chi^2$ likelihood on the power spectrum). This enables two things: (i) production and saving of reduced mock data, which can be used directly for consistency tests, and (ii) the ability to use _either_ raw data or reduced data as input to the likelihood. # * has methods for loading data and noise from files. # * has the ability to check that a list of ``required_cores`` are loaded in the ``Chain``. # ## Running MCMC # Enough discussion, let's create our ``core`` and ``likelihood``. In this tutorial we use a single ``core`` -- one which evaluates the coeval brightness temperature field at an arbitrary selection of redshifts, and a single ``likelihood`` -- one which reduces the 3D field(s) into a 1D power spectrum/spectra and evaluates the likelihood based on a $\chi$-square fit to data. # + init_cell=true core = p21mc.CoreCoevalModule( # All core modules are prefixed by Core* and end with *Module redshift = [7,8,9], # Redshifts of the coeval fields produced. user_params = dict( # Here we pass some user parameters. Also cosmo_params, astro_params and flag_options HII_DIM = 50, # are available. These specify only the *base* parameters of the data, *not* the BOX_LEN = 125.0 # parameters that are fit by MCMC. ), regenerate=False # Don't regenerate init_boxes or perturb_fields if they are already in cache. ) # For other available options, see the docstring. # Now the likelihood... datafiles = ["data/simple_mcmc_data_%s.npz"%z for z in core.redshift] likelihood = p21mc.Likelihood1DPowerCoeval( # All likelihood modules are prefixed by Likelihood* datafile = datafiles, # All likelihoods have this, which specifies where to write/read data noisefile= None, # All likelihoods have this, specifying where to find noise profiles. logk=False, # Should the power spectrum bins be log-spaced? min_k=0.1, # Minimum k to use for likelihood max_k=1.0, # Maximum "" simulate = True, # Simulate the data, instead of reading it in. # will be performed. ) # For other available options, see the docstring # - # Now we have all we need to start running the MCMC. The most important part of the call to ``run_mcmc`` is the specification of ``params``, which specifies which are the parameters *to be fit*. This is passed as a dictionary, where the keys are the parameter names, and *must* come from either ``cosmo_params`` or ``astro_params``, and be of ``float`` type. The values of the dictionary are length-4 lists: ``(guess, min, max, width)``. The first specifies where the best guess of the true value lies, and the initial walkers will be chosen around here. The ``min/max`` arguments provide upper and lower limits on the parameter, outside of which the likelihood will be ``-infinity``. The ``width`` affects the initial distribution of walkers around the best-guess (it does *not* influence any kind of "prior"). # # Finally, the ``model_name`` merely affects the file name of the output chain data, along with the ``datadir`` argument. # + model_name = "SimpleTest" chain = mcmc.run_mcmc( core, likelihood, # Use lists if multiple cores/likelihoods required. These will be eval'd in order. datadir='data', # Directory for all outputs model_name=model_name, # Filename of main chain output params=dict( # Parameter dict as described above. HII_EFF_FACTOR = [30.0, 10.0, 50.0, 3.0], ION_Tvir_MIN = [4.7, 4, 6, 0.1], ), walkersRatio=3, # The number of walkers will be walkersRatio*nparams burninIterations=0, # Number of iterations to save as burnin. Recommended to leave as zero. sampleIterations=150, # Number of iterations to sample, per walker. threadCount=6, # Number of processes to use in MCMC (best as a factor of walkersRatio) continue_sampling=False # Whether to contine sampling from previous run *up to* sampleIterations. ) # - # ## Analysis # ### Accessing chain data # The full chain data, as well as any stored data (as "blobs") is available within the chain as the ``samples`` attribute. # If access to this "chain" object is lost (eg. the MCMC was run via CLI and is finished), an exact replica of the store object can be read in from file. Unified access is provided through the ``get_samples`` function in the ``analyse`` module. Thus all these are equivalent: # + samples1 = chain.samples samples2 = analyse.get_samples(chain) samples3 = analyse.get_samples("data/%s"%model_name) # Equivalent: # samples = analyse.get_samples("data/%s"%model_name) # - print(np.all(samples1.accepted == samples2.accepted)) print(np.all(samples1.accepted == samples3.accepted)) # Do note that while the first two methods return exactly the same object, occupying the same memory: samples1 is samples2 # this is not true when reading in the samples from file: print(samples1 is samples3) del samples3; samples=samples1 # Remove unnecessary memory, and rename to samples # Several methods are defined on the sample object (which has type ``HDFStore``), to ease interactions. For example, one can access the various dimensions of the chain: niter = samples.size nwalkers, nparams = samples.shape # We can also check what the parameter names of the run were, and their initial "guess" (this is the first value passed to the "parameters" dictionary in ``run_mcmc``: print(samples.param_names) print([samples.param_guess[k] for k in samples.param_names]) # Or one can view how many iterations were accepted for each walker: samples.accepted, np.mean(samples.accepted/niter) # We can also see what extra data we saved along the way by getting the blob names (see below for more details on this): samples.blob_names # Finally, we can get the actual chain data, using ``get_chain`` and friends. However, this is best done dynamically, as the ``samples`` object itself does *not* hold the chain in memory, rather transparently read it from file when accessed. # ### Trace Plot # Often, for diagnostic purposes, the most useful plot to start with is the trace plot. This enables quick diagnosis of burnin time and walkers that haven't converged. The function in ``py21cmmc`` by default plots the log probability along with the various parameters that were fit. It also supports setting a starting iteration, and a thinning amount. analyse.trace_plot(samples, include_lnl=True, start_iter=0, thin=1, colored=False, show_guess=True); # ### Corner Plot # One of the most useful plots to come out of an MCMC analysis is the "corner" plot, which shows the correlation of each parameter with every other parameter. The function in ``py21cmmc`` will by default also show the original "guess" for each parameter as a blue line, and also show the log-likelihood as a psuedo-"parameter", though this can be turned off. samples.param_guess analyse.corner_plot(samples); # ### Model Comparison Plot # Another plot of interest is a "model comparison" plot -- i.e. comparing the range of outputted models to the data itself. These will differ significantly depending on the kind of data produced by the likelihood function, and thus they depend also on the actual data used. We thus do not provide a general function for plotting this. We do however show how one might go about this task in the function below. # # First, however, we show how one might interact with the data and saved models/blobs. # To extract all blob data from the samples: blobs = samples.get_blobs() # For simplicity, let's extract each kind of blob from the blob structured array: # + k = blobs['k_z7'] model_power = blobs['delta_z7'], blobs['delta_z8'], blobs['delta_z9'] print(k.shape, model_power[0].shape) nk = k.shape[-1] # - # Here we notice that ``k`` should be the same on each iteration, so we take just the first: print(np.all(k[0] == k[1])) k = k[0] # Finally, we also want to access the *data* to which the models have been compared. Since we have access to the original ``likelihood`` object, we can easily pull this from it. However, we equivalently could have read it in from file (this file is *not* always present, only if ``datafile`` is present in the likelihood constructor): # + p_data = np.array([d['delta'] for d in likelihood.data]) k_data = np.array([d['k'] for d in likelihood.data]) # Equivalent # data = np.genfromtxt("simple_mcmc_data.txt") # k_data = data[:,0] # p_data = data[:,1:] # - # Now, let's define a function which will plot our model comparison: # + def model_compare_plot(samples, p_data, k_data, thin=1, start_iter=0): chain = samples.get_chain(thin=thin, discard=start_iter, flat=True) blobs = samples.get_blobs(thin=thin, discard=start_iter, flat=True) k = blobs['k_z7'][0] model_power = [blobs[name] for name in samples.blob_names if name.startswith("delta_")] print(k.shape) nz = len(model_power) nk = k.shape[-1] fig, ax = plt.subplots(1, nz, sharex=True, sharey=True, figsize=(6*nz, 4.5), subplot_kw={"xscale":'log', "yscale":'log'}, gridspec_kw={"hspace":0.05, 'wspace':0.05}, squeeze=False) for i in range(nz): this_power = model_power[i] this_data = p_data[i] label="models" for pp in this_power: ax[0,i].plot(k, pp, color='k', alpha=0.2, label=label, zorder=1) if label: label=None mean = np.mean(this_power, axis=0) std = np.std(this_power, axis=0) md = np.median(this_power, axis=0) ax[0,i].fill_between(k, mean - std, mean+std, color="C0", alpha=0.6) ax[0,i].plot(k, md, color="C0", label="median model") ax[0,i].errorbar(k_data, this_data, yerr = (0.15*this_data), color="C1", label="data", ls="None", markersize=5, marker='o') ax[0,i].set_xlabel("$k$ [Mpc$^{-3}$]", fontsize=15) ax[0,i].text(0.1, 0.86, "z=%s"%core.redshift[i], transform=ax[0,i].transAxes, fontsize=15, fontweight='bold') ax[0,0].legend(fontsize=12) #plt.ylim((3.1e2, 3.5e3)) ax[0,0].set_ylabel("$\Delta^2_{21}$", fontsize=15) #plt.savefig(join(direc, modelname+"_power_spectrum_plot.pdf")) # - model_compare_plot(samples, p_data, k_data[0], thin=10)
docs/tutorials/mcmc_intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # We wanted to check our hypothesis that increasing the augmentation will always give us a better score. Started two models, one with 8 rotations instead of 4 and one turning on random crops as well. These both used a very large proportion of RAM because of the high augmentation factor. # cd .. # %run check_test_score.py -v run_settings/alexnet_based_16aug.json # %run check_test_score.py -v run_settings/alexnet_based_40aug.json
notebooks/augmentation/More Augmentation Results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import socket # + temp_json = None with open('./test.json', 'r', encoding='utf-8') as f: temp_json = json.load(f) print(type(temp_json)) print(temp_json) # - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) return_s = s.connect(('192.168.0.2', 12321)) print(return_s) temp_json_dump = json.dumps(temp_json, ensure_ascii=False) type(temp_json_dump), temp_json_dump temp_json_dump.encode('utf-8') s.sendall(temp_json_dump.encode('utf-8')) s.close()
test/socket_json/json_socket_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from tensorflow.keras.preprocessing.image import load_img from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.applications.resnet50 import decode_predictions mymodel = ResNet50() myimage = load_img('im1.jpg', target_size=(224, 224)) myimage myimage = img_to_array(myimage) myimage = myimage.reshape((1, 224, 224, 3)) myimage = preprocess_input(myimage) myresult = mymodel.predict(myimage) mylabel = decode_predictions(myresult) mylabel = mylabel[0][0] print("This is a : " + mylabel[1]) sayit="This is a "+mylabel[1] # !pip install gtts from gtts import gTTS import os myobj = gTTS(text=sayit) myobj.save("prediction.mp3") os.system("prediction.mp3")
Exercise01/Exercise01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-bank-marketing/auto-ml-classification-bank-marketing.png) # # Automated Machine Learning # _**Regression on remote compute using Computer Hardware dataset with model explanations**_ # # ## Contents # 1. [Introduction](#Introduction) # 1. [Setup](#Setup) # 1. [Train](#Train) # 1. [Results](#Results) # 1. [Explanations](#Explanations) # ## Introduction # # In this example we use the Hardware Performance Dataset to showcase how you can use AutoML for a simple regression problem. After training AutoML models for this regression data set, we show how you can compute model explanations on your remote compute using a sample explainer script. # # If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. # # In this notebook you will learn how to: # 1. Create an `Experiment` in an existing `Workspace`. # 2. Configure AutoML using `AutoMLConfig`. # 3. Train the model using remote compute. # 4. Explore the results. # 5. Setup remote compute for computing the model explanations for a given AutoML model. # 6. Start an AzureML experiment on your remote compute to compute explanations for an AutoML model. # 7. Download the feature importance for engineered features and visualize the explanations for engineered features. # 8. Download the feature importance for raw features and visualize the explanations for raw features. # # ## Setup # # As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments. # + import logging from matplotlib import pyplot as plt import pandas as pd import os import azureml.core from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace from azureml.core.dataset import Dataset from azureml.train.automl import AutoMLConfig # + ws = Workspace.from_config() # choose a name for experiment experiment_name = 'automl-regression-computer-hardware' experiment=Experiment(ws, experiment_name) output = {} output['SDK version'] = azureml.core.VERSION output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Experiment Name'] = experiment.name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T # - # ### Create or Attach existing AmlCompute # You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource. # #### Creation of AmlCompute takes approximately 5 minutes. # If the AmlCompute with that name is already in your workspace this code will skip the creation process. # As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota. # + from azureml.core.compute import AmlCompute from azureml.core.compute import ComputeTarget # Choose a name for your cluster. amlcompute_cluster_name = "automlcl" found = False # Check if this compute target already exists in the workspace. cts = ws.compute_targets if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute': found = True print('Found existing compute target.') compute_target = cts[amlcompute_cluster_name] if not found: print('Creating a new compute target...') provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_D2_V2", # for GPU, use "STANDARD_NC6" #vm_priority = 'lowpriority', # optional max_nodes = 6) # Create the cluster. compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config) print('Checking cluster status...') # Can poll for a minimum number of nodes and for a specific timeout. # If no min_node_count is provided, it will use the scale settings for the cluster. compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20) # For a more detailed view of current AmlCompute status, use get_status(). # - # ### Conda Dependecies for AutoML training experiment # # Create the conda dependencies for running AutoML experiment on remote compute. # + from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies import pkg_resources # create a new RunConfig object conda_run_config = RunConfiguration(framework="python") # Set compute target to AmlCompute conda_run_config.target = compute_target conda_run_config.environment.docker.enabled = True cd = CondaDependencies.create(conda_packages=['numpy','py-xgboost<=0.80']) conda_run_config.environment.python.conda_dependencies = cd # - # ### Setup Training and Test Data for AutoML experiment # # Here we create the train and test datasets for hardware performance dataset. We also register the datasets in your workspace using a name so that these datasets may be accessed from the remote compute. # + # Data source data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv" # Create dataset from the url dataset = Dataset.Tabular.from_delimited_files(data) # Split the dataset into train and test datasets train_dataset, test_dataset = dataset.random_split(percentage=0.8, seed=223) # Register the train dataset with your workspace train_dataset.register(workspace = ws, name = 'hardware_performance_train_dataset', description = 'hardware performance training data', create_new_version=True) # Register the test dataset with your workspace test_dataset.register(workspace = ws, name = 'hardware_performance_test_dataset', description = 'hardware performance test data', create_new_version=True) # Drop the labeled column from the train dataset X_train = train_dataset.drop_columns(columns=['ERP']) y_train = train_dataset.keep_columns(columns=['ERP'], validate=True) # Drop the labeled column from the test dataset X_test = test_dataset.drop_columns(columns=['ERP']) # Display the top rows in the train dataset X_train.take(5).to_pandas_dataframe() # - # ## Train # # Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment. # # |Property|Description| # |-|-| # |**task**|classification or regression| # |**primary_metric**|This is the metric that you want to optimize. Regression supports the following primary metrics: <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>| # |**iteration_timeout_minutes**|Time limit in minutes for each iteration.| # |**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.| # |**n_cross_validations**|Number of cross validation splits.| # |**X**|(sparse) array-like, shape = [n_samples, n_features]| # |**y**|(sparse) array-like, shape = [n_samples, ], targets values.| # # **_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric) # + automl_settings = { "iteration_timeout_minutes": 5, "iterations": 10, "n_cross_validations": 2, "primary_metric": 'spearman_correlation', "preprocess": True, "max_concurrent_iterations": 1, "verbosity": logging.INFO, } automl_config = AutoMLConfig(task = 'regression', debug_log = 'automl_errors_model_exp.log', run_configuration=conda_run_config, X = X_train, y = y_train, **automl_settings ) # - # Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. # In this example, we specify `show_output = True` to print currently running iterations to the console. remote_run = experiment.submit(automl_config, show_output = True) remote_run # ## Results # #### Widget for Monitoring Runs # # The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete. # # **Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details from azureml.widgets import RunDetails RunDetails(remote_run).show() # ## Explanations # This section will walk you through the workflow to compute model explanations for an AutoML model on your remote compute. # # ### Retrieve any AutoML Model for explanations # # Below we select the some AutoML pipeline from our iterations. The `get_output` method returns the a AutoML run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*. automl_run, fitted_model = remote_run.get_output(iteration=5) # ### Setup model explanation run on the remote compute # The following section provides details on how to setup an AzureML experiment to run model explanations for an AutoML model on your remote compute. # #### Sample script used for computing explanations # View the sample script for computing the model explanations for your AutoML model on remote compute. with open('train_explainer.py', 'r') as cefr: print(cefr.read()) # #### Substitute values in your sample script # The following cell shows how you change the values in the sample script so that you can change the sample script according to your experiment and dataset. # + import shutil # create script folder script_folder = './sample_projects/automl-regression-computer-hardware' if not os.path.exists(script_folder): os.makedirs(script_folder) # Copy the sample script to script folder. shutil.copy('train_explainer.py', script_folder) # Create the explainer script that will run on the remote compute. script_file_name = script_folder + '/train_explainer.py' # Open the sample script for modification with open(script_file_name, 'r') as cefr: content = cefr.read() # Replace the values in train_explainer.py file with the appropriate values content = content.replace('<<experimnet_name>>', automl_run.experiment.name) # your experiment name. content = content.replace('<<run_id>>', automl_run.id) # Run-id of the AutoML run for which you want to explain the model. content = content.replace('<<target_column_name>>', 'ERP') # Your target column name content = content.replace('<<task>>', 'regression') # Training task type # Name of your training dataset register with your workspace content = content.replace('<<train_dataset_name>>', 'hardware_performance_train_dataset') # Name of your test dataset register with your workspace content = content.replace('<<test_dataset_name>>', 'hardware_performance_test_dataset') # Write sample file into your script folder. with open(script_file_name, 'w') as cefw: cefw.write(content) # - # #### Create conda configuration for model explanations experiment # We need `azureml-explain-model`, `azureml-train-automl` and `azureml-core` packages for computing model explanations for your AutoML model on remote compute. # + from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies import pkg_resources # create a new RunConfig object conda_run_config = RunConfiguration(framework="python") # Set compute target to AmlCompute conda_run_config.target = compute_target conda_run_config.environment.docker.enabled = True azureml_pip_packages = [ 'azureml-train-automl', 'azureml-core', 'azureml-explain-model' ] # specify CondaDependencies obj conda_run_config.environment.python.conda_dependencies = CondaDependencies.create( conda_packages=['scikit-learn', 'numpy','py-xgboost<=0.80'], pip_packages=azureml_pip_packages) # - # #### Submit the experiment for model explanations # Submit the experiment with the above `run_config` and the sample script for computing explanations. # + # Now submit a run on AmlCompute for model explanations from azureml.core.script_run_config import ScriptRunConfig script_run_config = ScriptRunConfig(source_directory=script_folder, script='train_explainer.py', run_config=conda_run_config) run = experiment.submit(script_run_config) # Show run details run # - # %%time # Shows output of the run on stdout. run.wait_for_completion(show_output=True) # ### Feature importance and explanation dashboard # In this section we describe how you can download the explanation results from the explanations experiment and visualize the feature importance for your AutoML model. # #### Setup for visualizing the model explanation results # For visualizing the explanation results for the *fitted_model* we need to perform the following steps:- # 1. Featurize test data samples. # # The *automl_explainer_setup_obj* contains all the structures from above list. from azureml.train.automl.automl_explain_utilities import AutoMLExplainerSetupClass, automl_setup_model_explanations explainer_setup_class = automl_setup_model_explanations(fitted_model, 'regression', X_test=X_test) # #### Download engineered feature importance from artifact store # You can use *ExplanationClient* to download the engineered feature explanations from the artifact store of the *automl_run*. You can also use ExplanationDashboard to view the dash board visualization of the feature importance values of the engineered features. from azureml.explain.model._internal.explanation_client import ExplanationClient from azureml.contrib.explain.model.visualize import ExplanationDashboard client = ExplanationClient.from_run(automl_run) engineered_explanations = client.download_model_explanation(raw=False) print(engineered_explanations.get_feature_importance_dict()) ExplanationDashboard(engineered_explanations, explainer_setup_class.automl_estimator, explainer_setup_class.X_test_transform) # #### Download raw feature importance from artifact store # You can use *ExplanationClient* to download the raw feature explanations from the artifact store of the *automl_run*. You can also use ExplanationDashboard to view the dash board visualization of the feature importance values of the raw features. raw_explanations = client.download_model_explanation(raw=True) print(raw_explanations.get_feature_importance_dict()) ExplanationDashboard(raw_explanations, explainer_setup_class.automl_pipeline, explainer_setup_class.X_test_raw)
how-to-use-azureml/automated-machine-learning/model-explanation-remote-amlcompute/auto-ml-model-explanations-remote-compute.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # (NVM)= # # 1.3 Normas vectoriales y matriciales # ```{admonition} Notas para contenedor de docker: # # Comando de docker para ejecución de la nota de forma local: # # nota: cambiar `<ruta a mi directorio>` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker y `<versión imagen de docker>` por la versión más actualizada que se presenta en la documentación. # # `docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_optimizacion -p 8888:8888 -d palmoreck/jupyterlab_optimizacion:<versión imagen de docker>` # # password para jupyterlab: `<PASSWORD>` # # Detener el contenedor de docker: # # `docker stop jupyterlab_optimizacion` # # Documentación de la imagen de docker `palmoreck/jupyterlab_optimizacion:<versión imagen de docker>` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/optimizacion). # # ``` # --- # Nota generada a partir de [liga](https://www.dropbox.com/s/5bc6tn39o0qqg35/1.3.Condicion_estabilidad_y_normas.pdf?dl=0) # ```{admonition} Al final de esta nota la comunidad lectora: # :class: tip # # * Aprenderá las definiciones de algunas normas vectoriales y matriciales más utilizadas en las Matemáticas para la medición de errores, residuales, en general cercanía a cantidades de interés. # # * Comprenderá la interpretación que tiene una norma matricial. # # ``` # Una norma define una medida de distancia en un conjunto y da nociones de tamaño, vecindad, convergencia y continuidad. # ## Normas vectoriales # Sea $\mathbb{R}^n$ el conjunto de $n$-tuplas o vectores columna o $1$-arreglo de orden $1$, esto es: # # $$x \in \mathbb{R}^n \iff x = \left[\begin{array}{c} # x_1\\ # x_2\\ # \vdots\\ # x_n # \end{array} \right] \text{ con } x_i \in \mathbb{R}$$ # # Una norma vectorial en $\mathbb{R}^n$ es una función $g: \mathbb{R}^n \rightarrow \mathbb{R}$ que satisface las siguientes propiedades: # # * $g$ es no negativa: $g(x) \geq 0 \forall x \in \mathbb{R}^n$. # # * $g$ es definida: $g(x) = 0 \iff x = 0$. # # * $g$ satisface la desigualdad del triángulo: # # $$g(x+y) \leq g(x) + g(y) \forall x,y \in \mathbb{R}^n.$$ # # # * $g$ es homogénea: $g(\alpha x)=|\alpha|g(x), \forall \alpha \in \mathbb{R}, \forall x \in \mathbb{R}^n$. # # Notación: $g(x) = ||x||$. # ```{admonition} Definición # # Un conjunto $V \neq \emptyset$ en el que se le han definido las operaciones $(+, \cdot)$ se le nombra **espacio vectorial** sobre $\mathbb{R}$ si satisface las siguientes propiedades $\forall x, y, z \in V$, $\forall a,b \in \mathbb{R}$: # # * x + (y + z) = (x + y) + z # * x + y = y + x # * $\exists 0 \in V$ tal que $x + 0 = 0 + x = x$ $\forall x \in V$. # * $\forall x \in V$ $\exists -x \in V$ tal que $x + (-x) = 0$. # * a(bx) = (ab)x. # * $1x = x$ con $1 \in \mathbb{R}$. # * $a(x + y) = ax + ay$. # * $(a+b)x = ax + bx$. # ``` # ```{admonition} Comentarios y propiedades # # * Una norma es una generalización del valor absoluto de $\mathbb{R}$: $|x|, x \in \mathbb{R}.$ # # * Un espacio vectorial con una norma definida en éste se le llama **espacio vectorial normado**. # # * Una norma es una medida de la longitud de un vector. # # * Con una norma es posible definir conceptos como distancia entre vectores: $x,y \in \mathbb{R}^n: \text{dist}(x,y) = ||x-y||$. # # * Existen varias normas en $\mathbb{R}^n$ siendo las más comunes: # # * La norma $\mathcal{l}_2$, Euclidiana o norma $2$: $||x||_2$. # # * La norma $\mathcal{l}_1$ o norma $1$: $||x||_1$. # # * La norma $\infty$ o de Chebyshev o norma infinito: $||x||_\infty$. # # Las normas anteriores pertenecen a una familia parametrizada por una constante $p, p \geq 1$ cuyo nombre es norma $\mathcal{l}_p$: # # $$ ||x||_p = \left(\displaystyle \sum_{i=1}^n|x_i|^p \right )^{1/p}.$$ # # # * Un resultado para $x \in \mathbb{R}^n$ es la **equivalencia** entre normas: # # $$\exists \alpha, \beta > 0 \text{ tales que }: \alpha||x||_a \leq ||x||_b \leq \beta ||x||_a \forall x \in \mathbb{R}^n$$ # # donde: $||\cdot||_a, ||\cdot||_b$ son normas cualesquiera en $\mathbb{R}^n$. Por la propiedad anterior decimos que si se cumple convergencia en la norma $||\cdot||_a$ entonces también se cumple convergencia en la norma $||\cdot||_b$. # ``` # (EGNP)= # ## Ejemplos de gráficas de normas en el plano. import numpy as np import matplotlib.pyplot as plt # ### Norma $2$: $\{ x \in \mathbb{R}^2 \text{ tales que } ||x||_2 < 1\}$ f=lambda x: np.sqrt(x[:,0]**2 + x[:,1]**2) #definición de norma2 density=1e-5 density_p=int(2.5*10**3) x=np.arange(-1,1,density) y1=np.sqrt(1-x**2) y2=-np.sqrt(1-x**2) x_p=np.random.uniform(-1,1,(density_p,2)) ind=f(x_p)<1 x_p_subset=x_p[ind] plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen $||x||_2 < 1$') plt.grid() plt.show() # ### Norma $1$: $\{ x \in \mathbb{R}^2 \text{ tales que } ||x||_1 \leq 1\}$ f=lambda x:np.abs(x[:,0]) + np.abs(x[:,1]) #definición de norma1 density=1e-5 density_p=int(2.5*10**3) x1=np.arange(0,1,density) x2=np.arange(-1,0,density) y1=1-x1 y2=1+x2 y3=x1-1 y4=-1-x2 x_p=np.random.uniform(-1,1,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.plot(x1,y1,'b',x2,y2,'b',x1,y3,'b',x2,y4,'b') plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen $||x||_1 \leq 1$') plt.grid() plt.show() # ### Norma $\infty$: $\{ x \in \mathbb{R}^2 \text{ tales que } ||x||_\infty \leq 1\}$ f=lambda x:np.max(np.abs(x),axis=1) #definición de norma infinito point1 = (-1, -1) point2 = (-1, 1) point3 = (1, 1) point4 = (1, -1) point5 = point1 arr = np.row_stack((point1, point2, point3, point4, point5)) density_p=int(2.5*10**3) x_p=np.random.uniform(-1,1,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.plot(arr[:,0], arr[:,1]) plt.title('Puntos en el plano que cumplen $||x||_{\infty} \leq 1$') plt.grid() plt.show() # ```{admonition} Observación # :class: tip # # La norma $\infty$ se encuentra en la familia de las normas-p como límite: # # $$||x||_\infty = \displaystyle \lim_{p \rightarrow \infty} ||x||_p.$$ # # ``` # ```{admonition} Comentario # # En la norma $\mathcal{l}_2$ o Euclidiana $||x||_2$ tenemos una desigualdad muy importante, la desigualdad de **Cauchy-Schwartz**: # # $$|x^Ty| \leq ||x||_2||y||_2 \forall x,y \in \mathbb{R}^n$$ # # la cual relaciona el producto interno estándar para $x,y \in \mathbb{R}^n$: $<x,y> = x^Ty = \displaystyle \sum_{i=1}^nx_iy_i$ con la norma $\mathcal{l}_2$ de $x$ y la norma $\mathcal{l}_2$ de $y$. Además se utiliza lo anterior para definir el ángulo (sin signo por el intervalo en el que está $\cos^{-1}$) entre $x,y$: # # $$\measuredangle x,y = \cos ^{-1}\left(\frac{x^Ty}{||x||_2||y||_2} \right )$$ # # para $\cos^{-1}(u) \in [0,\pi]$ y se nombra a $x,y$ ortogonales si $x^Ty=0$. Obsérvese que $||x||_2 = \sqrt{x^Tx}$. # # ``` # ### Ejemplo # También se utilizan matrices para definir normas. # ```{admonition} Definición # # Recuérdese que una matriz es un arreglo $2$-dimensional de datos o $2$ arreglo de orden $2$. Se utiliza la notación $A \in \mathbb{R}^{m\times n}$ para denotar: # # # $$A = \left[\begin{array}{cccc} # a_{11} &a_{12}&\dots&a_{1n}\\ # a_{21} &a_{22}&\dots&a_{2n}\\ # \vdots &\vdots& \vdots&\vdots\\ # a_{n1} &a_{n2}&\dots&a_{nn}\\ # \vdots &\vdots& \vdots&\vdots\\ # a_{m-11} &a_{m-12}&\dots&a_{m-1n}\\ # a_{m1} &a_{m2}&\dots&a_{mm} # \end{array} # \right] # $$ # # con $a_{ij} \mathbb{R} \forall i=1,\dots,m, j=1,\dots,n$. Y se utilizan las siguientes notaciones para describir a la matriz $A$: # # * $A=(a_1,\dots a_n), a_j \in \mathbb{R}^m (=\mathbb{R}^{m\times1}) \forall j=1,\dots,n$. # # * $A=\left ( \begin{array}{c} # a_1^T\\ # \vdots\\ # a_m^T # \end{array} \right ), a_i \in \mathbb{R}^n (=\mathbb{R}^{n\times1}) \forall i=1,\dots,m$. # # La multiplicación de una matriz de tamaño $m\times n$ por un vector se define como: # # $$y=Ax=\displaystyle \sum_{j=1}^n a_jx_j$$ # # con $a_j \in \mathbb{R}^m, x \in \mathbb{R}^n$. Obsérvese que $x \in \mathbb{R}^n, Ax \in \mathbb{R}^m$. # # ``` # Un ejemplo de norma-$2$ ponderada es: $\{x \in \mathbb{R}^2 \text{ tales que } ||x||_D \leq 1, ||x||_D = ||Dx||_2, \text{con matriz diagonal } D \text{ y entradas positivas}\}$: d1_inv=1/5 d2_inv=1/3 f=lambda x: np.sqrt((d1_inv*x[:,0])**2 + (d2_inv*x[:,1])**2) #definición de norma2 density=1e-5 density_p=int(2.5*10**3) x=np.arange(-1/d1_inv,1/d1_inv,density) y1=1.0/d2_inv*np.sqrt(1-(d1_inv*x)**2) y2=-1.0/d2_inv*np.sqrt(1-(d1_inv*x)**2) x_p=np.random.uniform(-1/d1_inv,1/d1_inv,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.plot(x,y1,'b',x,y2,'b') plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen $||x||_D \leq 1$') plt.grid() plt.show() # en este caso $D=\left[\begin{array}{cc} \frac{1}{25} &0\\ 0 &\frac{1}{9} \end{array}\right ] = \left[\begin{array}{cc} \frac{1}{d_1} &0\\ 0 &\frac{1}{d_2} \end{array}\right ]$ # (NMAT)= # ## Normas matriciales # ### Inducidas # De las normas matriciales más importantes se encuentran las **inducidas** por normas vectoriales. Estas normas matriciales se definen en términos de los vectores en $\mathbb{R}^n$ a los que se les aplica la multiplicación $Ax$: # # Dadas las normas vectoriales $||\cdot||_{(n)}, ||\cdot||_{(m)}$ en $\mathbb{R}^n$ y $\mathbb{R}^m$ respectivamente, la norma matricial inducida $||A||_{(m,n)}$ para $A \in \mathbb{R}^{m \times n}$ es el **menor número** $C$ para el cual la desigualdad: # # $$||Ax||_{(m)} \leq C||x||_{(n)}$$ # # se cumple $\forall x \in \mathbb{R}^n$. Esto es: # $$||A||_{(m,n)} = \displaystyle \sup_{x \in \mathbb{R}^n-\{0\}} \frac{||Ax||_{(m)}}{||x||_{(n)}}$$ # Ver {ref}`Nota sobre sup e inf <SI>` para definición de $\sup$. # ```{admonition} Comentarios # # * $||A||_{(m,n)}$ representa el **máximo** factor por el cual $A$ puede modificar el tamaño de $x$ sobre todos los vectores $x \in \mathbb{R}^n$, es una medida de un tipo de **worst case stretch factor**. # # * Así definidas, la norma $||\cdot||_{(m,n)}$ es la norma matricial inducida por las normas vectoriales $||\cdot||_{(m)}, ||\cdot||_{(n)}$. # # * Son definiciones equivalentes: # # $$||A||_{(m,n)} = \displaystyle \sup_{x \in \mathbb{R}^n-\{0\}} \frac{||Ax||_{(m)}}{||x||_{(n)}} = \displaystyle \sup_{||x||_{(n)} \leq 1} \frac{||Ax||_{(m)}}{||x||_{(n)}} = \displaystyle \sup_{||x||_{(n)}=1} ||Ax||_{(m)}$$ # # ``` # ### Ejemplo # La matriz $A=\left[\begin{array}{cc} # 1 &2\\ # 0 &2 # \end{array}\right ]$ mapea $\mathbb{R}^2$ a $\mathbb{R}^2$, en particular se tiene: # # * $A$ mapea $e_1 = \left[\begin{array}{c} # 1 \\ # 0 # \end{array}\right ]$ a la columna $a_1 = \left[\begin{array}{c} # 1 \\ # 0 # \end{array}\right ]$ de $A$. # # * $A$ mapea $e_2 = \left[\begin{array}{c} # 0 \\ # 1 # \end{array}\right ]$ a la columna $a_2 = \left[\begin{array}{c} # 2 \\ # 2 # \end{array}\right ]$ de $A$. # # Considerando $||A||_p := ||A||_{(p,p)}$ con $p=1, p=2, p=\infty$ se tiene: # <img src="https://dl.dropboxusercontent.com/s/3fqz9uspfwdurjf/normas_matriciales.png?dl=0" heigth="500" width="500"> # # # ```{admonition} Observación # :class: tip # # Al observar la segunda gráfica se tiene la siguiente afirmación: la acción de una matriz sobre una circunferencia es una elipse con longitudes de semiejes iguales a $|d_i|$. En general la acción de una matriz sobre una hiper esfera es una hiperelipse. Por lo que los vectores unitarios en $\mathbb{R}^n$ que son más amplificados por la acción de una matriz diagonal $D \in \mathbb{R}^{m\times n}$ con entradas iguales a $d_i$ son aquellos que se mapean a los semiejes de una hiperelipse en $\mathbb{R}^m$ de longitud igual a $\max\{|d_i|\}$ y así tenemos: si $D$ es una matriz diagonal con entradas $d_i$ entonces $||D||_2 = \displaystyle \max_{i=1,\dots,m}\{|d_i|\}$. # # ``` # ### Ejemplo A=np.array([[1,2],[0,2]]) density=1e-5 x1=np.arange(0,1,density) x2=np.arange(-1,0,density) x1_y1 = np.column_stack((x1,1-x1)) x2_y2 = np.column_stack((x2,1+x2)) x1_y3 = np.column_stack((x1,x1-1)) x2_y4 = np.column_stack((x2,-1-x2)) apply_A = lambda vec : np.transpose(A@np.transpose(vec)) A_to_vector_1 = apply_A(x1_y1) A_to_vector_2 = apply_A(x2_y2) A_to_vector_3 = apply_A(x1_y3) A_to_vector_4 = apply_A(x2_y4) plt.subplot(1,2,1) plt.plot(x1_y1[:,0],x1_y1[:,1],'b', x2_y2[:,0],x2_y2[:,1],'b', x1_y3[:,0],x1_y3[:,1],'b', x2_y4[:,0],x2_y4[:,1],'b') e1 = np.array([[0,0], [1, 0]]) e2 = np.array([[0, 0], [0, 1]]) plt.plot(e2[:,0], e2[:,1],'g', e1[:,0], e1[:,1],'b') plt.xlabel('Vectores con norma 1 menor o igual a 1') plt.grid() plt.subplot(1,2,2) plt.plot(A_to_vector_1[:,0],A_to_vector_1[:,1],'b', A_to_vector_2[:,0],A_to_vector_2[:,1],'b', A_to_vector_3[:,0],A_to_vector_3[:,1],'b', A_to_vector_4[:,0],A_to_vector_4[:,1],'b') A_to_vector_e2 = apply_A(e2) plt.plot(A_to_vector_e2[:,0],A_to_vector_e2[:,1],'g') plt.grid() plt.title('Efecto de la matriz A sobre los vectores con norma 1 menor o igual a 1') plt.show() print(np.linalg.norm(A,1)) # ```{admonition} Ejercicio # :class: tip # # Obtener las otras dos gráficas con Python usando norma $2$ y norma $\infty$. Para el caso de la norma $2$ el vector en color azul está dado por la descomposición en valores singulares (SVD) de A. En específico la primer columna de la matriz $U$ multiplicado por el primer valor singular. En el ejemplo resulta en: # # $$\sigma_1U[:,0] \approx 2.9208*\left[ \begin{array}{c} # 0.74967 \\ # 0.66180 # \end{array} \right] \approx \left[\begin{array}{c} # 2.189\\ # 1.932 # \end{array} # \right] # $$ # # y el vector $v$ que será multiplicado por la matriz $A$ es la primer columna de $V$ dada por: # # $$V[:,0] \approx \left[ # \begin{array}{c} # 0.2566\\ # 0.9664 # \end{array} # \right] # $$ # ``` # ### Resultados computacionales que son posibles probar # # 1.$||A||_1 = \displaystyle \max_{j=1,\dots,n}\sum_{i=1}^n|a_{ij}|$. # # 2.$||A||_\infty = \displaystyle \max_{i=1,\dots,n}\sum_{j=1}^n|a_{ij}|$. # # 3.$\begin{eqnarray}||A||_2 = \sqrt{\lambda_{\text{max}}(A^TA)} &=& \max \left \{\sqrt{\lambda}\in \mathbb{R} | \lambda \text{ es eigenvalor de } A^TA \right \} \nonumber \\ &=& \max \left \{ \sigma \in \mathbb{R} | \sigma \text{ es valor singular de A } \right \} \nonumber \\ &=& \sigma_{\text{max}}(A) \end{eqnarray}$. # por ejemplo para la matriz anterior se tiene: print(np.linalg.norm(A,2)) _,s,_ = np.linalg.svd(A) print(np.max(s)) # ## Otras normas matriciales # * Norma de Frobenius: $||A||_F = \text{tr}(A^TA)^{1/2} = \left ( \displaystyle \sum_{i=1}^m \sum_{j=1}^n a_{ij}^2 \right ) ^{1/2}$. # # * Norma "sum-absolute-value": $||A||_{sav} = \displaystyle \sum_{i=1}^m \sum_{j=1}^n |a_{ij}|$. # # * Norma "max-absolute-value": $||A||_{mav} = \displaystyle \max \left\{|a_{ij}| \text{ para } i=1,\dots,m , j=1,\dots,n \right \}$. # # # ```{admonition} Comentarios # # * El producto interno estándar en $\mathbb{R}^{m\times n}$ es: $<A,B> = tr(A^TB) = \displaystyle \sum_{i=1}^m \sum_{j=1}^n a_{ij}b_{ij}$. # # * La norma $2$ (también llamada norma espectral o $\mathcal{l}_2$) y la norma de Frobenius cumplen la propiedad de **consistencia**: # # $$||Ax|| \leq ||A|| ||x|| \forall x \in \mathbb{R}^n, \forall A \in \mathbb{R}^{m\times n}.$$ # # $$||AB|| \leq ||A|| ||B|| \forall A,B \text{ matrices con dimensiones correspondientes para su multiplicación}.$$ # # # ``` # ```{admonition} Observación # :class: tip # # La propiedad de consistencia también es cumplida por las normas-$p$ matriciales. # ``` # --- # (SI)= # ## Nota sobre $\sup$ e $\inf$ # Si $C \subseteq \mathbb{R}$ entonces $a \subseteq \mathbb{R}$ es una **cota superior** en $C$ si # # $$ x \leq a, \forall x \in C.$$ # # En $\mathbb{R}$ el conjunto de cotas superiores es $\emptyset, \mathbb{R}$ ó un intervalo de la forma $[b,\infty]$. En el último caso, $b$ se llama **mínima cota superior o supremo del conjunto** $C$ y se denota $\sup C$. Por convención $\sup\emptyset = -\infty$ y $\sup C=\infty$ si $C$ no es acotado por arriba. # ```{admonition} Observación # :class: tip # # Si $C$ es finito, $\sup C$ es el máximo de los elementos de $C$ y típicamente se denota como $\max C$. # ``` # Análogamente, $a \in \mathbb{R}$ es una **cota inferior** en $C \subseteq \mathbb{R}$ si # # $$a \leq x, \forall x \in C.$$ # El **ínfimo o máxima cota inferior** de $C$ es $\inf C = -\sup (-C)$. Por convención $\inf \emptyset = \infty$ y si $C$ no es acotado por debajo entonces $\inf C = -\infty$. # ```{admonition} Observación # :class: tip # # Si $C$ es finito, $\inf C$ es el mínimo de sus elementos y se denota como $\min C$. # # ``` # --- # ```{admonition} Ejercicios # :class: tip # # 1. Resuelve los ejercicios y preguntas de la nota. # # ``` # **Preguntas de comprehensión** # # 1)Menciona $5$ propiedades que un conjunto debe cumplir para que sea considerado un espacio vectorial. # # 2)Menciona las propiedades que debe cumplir una función para que se considere una norma. # # 3)¿Qué es una norma matricial inducida? ¿qué mide una norma matricial inducida? # # 4)¿La norma de Frobenius es una norma matricial inducida? # # 5)¿A qué son iguales $\text{sup}(\emptyset)$, $\text{inf}(\emptyset)$ ? (el conjunto $\emptyset$ es el conjunto vacío) # # **Referencias** # 1. <NAME>, <NAME>, Numerical linear algebra, SIAM, 1997. # # 2. <NAME>, <NAME>,Matrix Computations. John Hopkins University Press, 2013
libro_optimizacion/temas/1.computo_cientifico/1.3/Normas_vectoriales_y_matriciales.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp models.TSTPlus # - # # TSTPlus (Time Series Transformer) # # > This is an unofficial PyTorch implementation created by <NAME> (<EMAIL>) based on TST (Zerveas, 2020) and Transformer (Vaswani, 2017). # **References:** # # * <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020). [A Transformer-based Framework for Multivariate Time Series Representation Learning. arXiv preprint arXiv:2010.02803v2.](https://arxiv.org/pdf/2010.02803) # * No official implementation available as far as I know (Oct 10th, 2020) # # * <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (2017). [Attention is all you need. In Advances in neural information processing systems (pp. 5998-6008).](https://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf) # # * <NAME>., <NAME>., <NAME>., & <NAME>. (2020). Realformer: Transformer Likes Informed Attention. arXiv preprint arXiv:2012.11747. # # This implementation is adapted to work with the rest of the `tsai` library, and contain some hyperparameters that are not available in the original implementation. I included them for experimenting. # ## Tips on how to use transformers: # * In general, transformers require a lower lr compared to other time series models when used with the same datasets. It's important to use `learn.lr_find()` to learn what a good lr may be. In general, I've found lr between 1e-4 to 3e-4 work well. # # * The paper authors recommend to standardize data by feature. This can be done by adding `TSStandardize(by_var=True)` as a batch_tfm when creating the `TSDataLoaders`. # # * When using TST with a long time series, you may use `max_w_len` to reduce the memory size and thus avoid gpu issues. By default it's set to 512. # # * I've tried different types of positional encoders. In my experience, the default one works just fine. # # * In some of the cases I've used it, you may need to increase the res_dropout > .1 and/ or fc_dropout > 0 in order to achieve a good performance. # ## Imports #export from tsai.imports import * from tsai.utils import * from tsai.models.layers import * from tsai.models.utils import * from tsai.data.core import * # ## Positional encoders #export def SinCosPosEncoding(q_len, d_model, normalize=True): pe = torch.zeros(q_len, d_model, device=default_device()) position = torch.arange(0, q_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) if normalize: pe = pe - pe.mean() pe = pe / (pe.std() * 10) return pe.to(device=device) pe = SinCosPosEncoding(1000, 512).detach().cpu().numpy() plt.pcolormesh(pe, cmap='viridis') plt.title('SinCosPosEncoding') plt.colorbar() plt.show() pe.mean(), pe.std(), pe.min(), pe.max(), pe.shape #export def Coord2dPosEncoding(q_len, d_model, exponential=False, normalize=True, eps=1e-3, verbose=False, device=default_device()): x = .5 if exponential else 1 i = 0 for i in range(100): cpe = 2 * (torch.linspace(0, 1, q_len).reshape(-1, 1) ** x) * (torch.linspace(0, 1, d_model).reshape(1, -1) ** x) - 1 pv(f'{i:4.0f} {x:5.3f} {cpe.mean():+6.3f}', verbose) if abs(cpe.mean()) <= eps: break elif cpe.mean() > eps: x += .001 else: x -= .001 i += 1 if normalize: cpe = cpe - cpe.mean() cpe = cpe / (cpe.std() * 10) return cpe.to(device=device) cpe = Coord2dPosEncoding(1000, 512, exponential=True, normalize=True).cpu().numpy() plt.pcolormesh(cpe, cmap='viridis') plt.title('Coord2dPosEncoding') plt.colorbar() plt.show() plt.plot(cpe.mean(0)) plt.show() plt.plot(cpe.mean(1)) plt.show() cpe.mean(), cpe.std(), cpe.min(), cpe.max() #export def Coord1dPosEncoding(q_len, exponential=False, normalize=True, device=default_device()): cpe = (2 * (torch.linspace(0, 1, q_len).reshape(-1, 1)**(.5 if exponential else 1)) - 1) if normalize: cpe = cpe - cpe.mean() cpe = cpe / (cpe.std() * 10) return cpe.to(device=device) cpe = Coord1dPosEncoding(1000, exponential=True, normalize=True).detach().cpu().numpy() plt.pcolormesh(cpe, cmap='viridis') plt.title('Coord1dPosEncoding') plt.colorbar() plt.show() plt.plot(cpe.mean(1)) plt.show() cpe.mean(), cpe.std(), cpe.min(), cpe.max(), cpe.shape cpe = Coord1dPosEncoding(1000, exponential=True, normalize=True).detach().cpu().numpy() plt.pcolormesh(cpe, cmap='viridis') plt.title('Coord1dPosEncoding') plt.colorbar() plt.show() plt.plot(cpe.mean(1)) plt.show() cpe.mean(), cpe.std(), cpe.min(), cpe.max() # ## TST #export class ScaledDotProductAttention(Module): def __init__(self, d_k:int, res_attention:bool=False): self.d_k,self.res_attention = d_k,res_attention def forward(self, q:Tensor, k:Tensor, v:Tensor, prev:Optional[Tensor]=None, key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None): ''' Input shape: q : [bs x n_heads x q_len x d_k] k : [bs x n_heads x d_k x seq_len] v : [bs x n_heads x seq_len x d_k] key_padding_mask: [bs x seq_len] attn_mask : [seq_len x seq_len] Output shape: context: [bs x n_heads x q_len x d_v] attn : [bs x n_heads x q_len x seq_len] ''' # MatMul (q, k) - similarity scores for all pairs of positions in an input sequence scores = torch.matmul(q, k) # scores : [bs x n_heads x q_len x seq_len] # Scale scores = scores / (self.d_k ** 0.5) # Add previous scores (optional) if prev is not None: scores = scores + prev # Attention mask (optional) if attn_mask is not None: # attn_mask with shape [q_len x seq_len] - only used when q_len == seq_len if attn_mask.dtype == torch.bool: scores.masked_fill_(attn_mask, float('-inf')) else: scores += attn_mask # Key padding mask (optional) if key_padding_mask is not None: # key_padding_mask with shape [bs x seq_len] scores.masked_fill_(key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf')) # SoftMax attn = F.softmax(scores, dim=-1) # attn : [bs x n_heads x q_len x seq_len] # MatMul (attn, v) context = torch.matmul(attn, v) # context: [bs x n_heads x q_len x d_v] if self.res_attention: return context, attn, scores else: return context, attn # + B = 16 C = 3 H = 1 D = 128 M = 1500 N = 512 d_k = D // N xb = torch.randn(B, C, M) # Attention # q lin = nn.Linear(M, N, bias=False) Q = lin(xb).transpose(1,2) to_q = nn.Linear(C, D, bias=False) q = to_q(Q) # k, v context = xb.transpose(1,2) to_kv = nn.Linear(C, D * 2, bias=False) k, v = to_kv(context).chunk(2, dim = -1) k = k.transpose(-1, -2) q, k, v = q.unsqueeze(1), k.unsqueeze(1), v.unsqueeze(1) output, attn = ScaledDotProductAttention(d_k=d_k)(q, k, v) output.shape, attn.shape, Q.shape, q.shape, k.shape, v.shape # - q = torch.rand([16, 3, 50, 8]) k = torch.rand([16, 3, 50, 8]).transpose(-1, -2) v = torch.rand([16, 3, 50, 6]) attn_mask = torch.triu(torch.ones(50, 50)) # shape: q_len x q_len key_padding_mask = torch.zeros(16, 50) key_padding_mask[[1, 3, 6, 15], -10:] = 1 key_padding_mask = key_padding_mask.bool() output, attn = ScaledDotProductAttention(d_k=8)(q, k, v, attn_mask=attn_mask, key_padding_mask=key_padding_mask) output.shape, attn.shape #export class MultiHeadAttention(Module): def __init__(self, d_model:int, n_heads:int, d_k:int, d_v:int, res_attention:bool=False): r""" Input shape: Q, K, V:[batch_size (bs) x q_len x d_model], mask:[q_len x q_len] """ self.n_heads, self.d_k, self.d_v = n_heads, d_k, d_v self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=False) self.W_K = nn.Linear(d_model, d_k * n_heads, bias=False) self.W_V = nn.Linear(d_model, d_v * n_heads, bias=False) self.W_O = nn.Linear(n_heads * d_v, d_model, bias=False) self.res_attention = res_attention # Scaled Dot-Product Attention (multiple heads) if self.res_attention: self.sdp_attn = ScaledDotProductAttention(self.d_k, self.res_attention) else: self.sdp_attn = ScaledDotProductAttention(self.d_k) def forward(self, Q:Tensor, K:Tensor, V:Tensor, prev:Optional[Tensor]=None, key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None): bs = Q.size(0) # Linear (+ split in multiple heads) q_s = self.W_Q(Q).view(bs, -1, self.n_heads, self.d_k).transpose(1,2) # q_s : [bs x n_heads x q_len x d_k] k_s = self.W_K(K).view(bs, -1, self.n_heads, self.d_k).permute(0,2,3,1) # k_s : [bs x n_heads x d_k x q_len] - transpose(1,2) + transpose(2,3) v_s = self.W_V(V).view(bs, -1, self.n_heads, self.d_v).transpose(1,2) # v_s : [bs x n_heads x q_len x d_v] # Scaled Dot-Product Attention (multiple heads) if self.res_attention: context, attn, scores = self.sdp_attn(q_s, k_s, v_s, prev=prev, key_padding_mask=key_padding_mask, attn_mask=attn_mask) else: context, attn = self.sdp_attn(q_s, k_s, v_s, key_padding_mask=key_padding_mask, attn_mask=attn_mask) # context: [bs x n_heads x q_len x d_v], attn: [bs x n_heads x q_len x q_len] # Concat context = context.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * self.d_v) # context: [bs x q_len x n_heads * d_v] # Linear output = self.W_O(context) # context: [bs x q_len x d_model] if self.res_attention: return output, attn, scores else: return output, attn # output: [bs x q_len x d_model] t = torch.rand(16, 50, 128) output, attn = MultiHeadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t, key_padding_mask=key_padding_mask, attn_mask=attn_mask) output.shape, attn.shape # + t = torch.rand(16, 50, 128) att_mask = (torch.rand((50, 50)) > .85).float() att_mask[att_mask == 1] = -float("Inf") mha = MultiHeadAttention(d_model=128, n_heads=3, d_k=8, d_v=6) output, attn = mha(t, t, t, attn_mask=att_mask) test_eq(torch.isnan(output).sum().item(), 0) test_eq(torch.isnan(attn).sum().item(), 0) loss = output[:2, :].sum() test_eq(torch.isnan(loss).sum().item(), 0) loss.backward() for n, p in mha.named_parameters(): test_eq(torch.isnan(p.grad).sum().item(), 0) # + t = torch.rand(16, 50, 128) attn_mask = (torch.rand((50, 50)) > .85) # True values will be masked mha = MultiHeadAttention(d_model=128, n_heads=3, d_k=8, d_v=6) output, attn = mha(t, t, t, attn_mask=att_mask) test_eq(torch.isnan(output).sum().item(), 0) test_eq(torch.isnan(attn).sum().item(), 0) loss = output[:2, :].sum() test_eq(torch.isnan(loss).sum().item(), 0) loss.backward() for n, p in mha.named_parameters(): test_eq(torch.isnan(p.grad).sum().item(), 0) # - #exporti class _TSTEncoderLayer(Module): def __init__(self, q_len:int, d_model:int, n_heads:int, d_k:Optional[int]=None, d_v:Optional[int]=None, d_ff:int=256, res_dropout:float=0.1, bias:bool=True, activation:str="gelu", res_attention:bool=False, pre_norm:bool=False): assert not d_model%n_heads, f"d_model ({d_model}) must be divisible by n_heads ({n_heads})" d_k = ifnone(d_k, d_model // n_heads) d_v = ifnone(d_v, d_model // n_heads) # Multi-Head attention self.res_attention = res_attention self.self_attn = MultiHeadAttention(d_model, n_heads, d_k, d_v, res_attention=res_attention) # Add & Norm self.dropout_attn = nn.Dropout(res_dropout) self.batchnorm_attn = nn.BatchNorm1d(q_len) # Position-wise Feed-Forward self.ff = nn.Sequential(nn.Linear(d_model, d_ff, bias=bias), self._get_activation_fn(activation), nn.Linear(d_ff, d_model, bias=bias)) # Add & Norm self.dropout_ffn = nn.Dropout(res_dropout) self.batchnorm_ffn = nn.BatchNorm1d(q_len) self.pre_norm = pre_norm def forward(self, src:Tensor, prev:Optional[Tensor]=None, key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None) -> Tensor: # Multi-Head attention sublayer if self.pre_norm: src = self.batchnorm_attn(src) # Norm: batchnorm ## Multi-Head attention if self.res_attention: src2, attn, scores = self.self_attn(src, src, src, prev, key_padding_mask=key_padding_mask, attn_mask=attn_mask) else: src2, attn = self.self_attn(src, src, src, key_padding_mask=key_padding_mask, attn_mask=attn_mask) self.attn = attn ## Add & Norm src = src + self.dropout_attn(src2) # Add: residual connection with residual dropout if not self.pre_norm: src = self.batchnorm_attn(src) # Norm: batchnorm # Feed-forward sublayer if self.pre_norm: src = self.batchnorm_ffn(src) # Norm: batchnorm ## Position-wise Feed-Forward src2 = self.ff(src) ## Add & Norm src = src + self.dropout_ffn(src2) # Add: residual connection with residual dropout if not self.pre_norm: src = self.batchnorm_ffn(src) # Norm: batchnorm if self.res_attention: return src, scores else: return src def _get_activation_fn(self, activation): if callable(activation): return activation() elif activation.lower() == "relu": return nn.ReLU() elif activation.lower() == "gelu": return nn.GELU() raise ValueError(f'{activation} is not available. You can use "relu", "gelu", or a callable') t = torch.rand(16, 50, 128) encoder = _TSTEncoderLayer(q_len=50, d_model=128, n_heads=8, d_k=None, d_v=None, d_ff=512, res_dropout=0.1, activation='gelu') output = encoder(t, key_padding_mask=key_padding_mask, attn_mask=attn_mask) output.shape cmap='viridis' figsize=(6,5) plt.figure(figsize=figsize) plt.pcolormesh(encoder.attn[0][0].detach().cpu().numpy(), cmap=cmap) plt.title('Self-attention map') plt.colorbar() plt.show() #exporti class _TSTEncoder(Module): def __init__(self, q_len, d_model, n_heads, d_k=None, d_v=None, d_ff=None, res_dropout=0.1, activation='gelu', res_attention=False, n_layers=1, pre_norm:bool=False): self.layers = nn.ModuleList([_TSTEncoderLayer(q_len, d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout, activation=activation, res_attention=res_attention, pre_norm=pre_norm) for i in range(n_layers)]) self.res_attention = res_attention def forward(self, src:Tensor, key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None): output = src scores = None if self.res_attention: for mod in self.layers: output, scores = mod(output, prev=scores, key_padding_mask=key_padding_mask, attn_mask=attn_mask) return output else: for mod in self.layers: output = mod(output, key_padding_mask=key_padding_mask, attn_mask=attn_mask) return output #exporti class _TSTBackbone(Module): def __init__(self, c_in:int, seq_len:int, max_seq_len:Optional[int]=512, n_layers:int=3, d_model:int=128, n_heads:int=16, d_k:Optional[int]=None, d_v:Optional[int]=None, d_ff:int=256, res_dropout:float=0.1, act:str="gelu", key_padding_mask:bool=True, attn_mask:Optional[Tensor]=None, res_attention:bool=True, pre_norm:bool=False, pe:str='zeros', learn_pe:bool=True, verbose:bool=False, **kwargs): # Input encoding q_len = seq_len self.new_q_len = False if max_seq_len is not None and seq_len > max_seq_len: # Control temporal resolution self.new_q_len = True q_len = max_seq_len tr_factor = math.ceil(seq_len / q_len) total_padding = (tr_factor * q_len - seq_len) padding = (total_padding // 2, total_padding - total_padding // 2) self.W_P = nn.Sequential(Pad1d(padding), Conv1d(c_in, d_model, kernel_size=tr_factor, stride=tr_factor)) pv(f'temporal resolution modified: {seq_len} --> {q_len} time steps: kernel_size={tr_factor}, stride={tr_factor}, padding={padding}.\n', verbose) elif kwargs: self.new_q_len = True t = torch.rand(1, 1, seq_len) q_len = Conv1d(1, 1, **kwargs)(t).shape[-1] self.W_P = Conv1d(c_in, d_model, **kwargs) # Eq 2 pv(f'Conv1d with kwargs={kwargs} applied to input to create input encodings\n', verbose) else: self.W_P = nn.Linear(c_in, d_model) # Eq 1: projection of feature vectors onto a d-dim vector space self.seq_len = q_len # Positional encoding self.W_pos = self._positional_encoding(pe, learn_pe, q_len, d_model) # Residual dropout self.res_dropout = nn.Dropout(res_dropout) # Encoder self.encoder = _TSTEncoder(q_len, d_model, n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout, pre_norm=pre_norm, activation=act, res_attention=res_attention, n_layers=n_layers) self.transpose = Transpose(-1, -2, contiguous=True) self.key_padding_mask = key_padding_mask self.attn_mask = attn_mask def forward(self, x:Tensor) -> Tensor: # x: [bs x nvars x q_len] # Padding mask x, key_padding_mask = self._key_padding_mask(x) # Input encoding if self.new_q_len: u = self.W_P(x).transpose(2,1) # Eq 2 # u: [bs x d_model x q_len] transposed to [bs x q_len x d_model] else: u = self.W_P(x.transpose(2,1)) # Eq 1 # u: [bs x q_len x nvars] converted to [bs x q_len x d_model] # Positional encoding u = self.res_dropout(u + self.W_pos) # Encoder z = self.encoder(u, key_padding_mask=key_padding_mask, attn_mask=self.attn_mask) # z: [bs x q_len x d_model] z = self.transpose(z) # z: [bs x d_model x q_len] return z def _positional_encoding(self, pe, learn_pe, q_len, d_model): # Positional encoding if pe == None: W_pos = torch.zeros((q_len, d_model), device=default_device()) # pe = None and learn_pe = False can be used to measure impact of pe learn_pe = False elif pe == 'zero': W_pos = torch.zeros((q_len, 1), device=default_device()) elif pe == 'zeros': W_pos = torch.zeros((q_len, d_model), device=default_device()) elif pe == 'normal' or pe == 'gauss': W_pos = torch.zeros((q_len, 1), device=default_device()) torch.nn.init.normal_(W_pos, mean=0.0, std=0.1) elif pe == 'uniform': W_pos = torch.zeros((q_len, 1), device=default_device()) nn.init.uniform_(W_pos, a=0.0, b=0.1) elif pe == 'lin1d': W_pos = Coord1dPosEncoding(q_len, exponential=False, normalize=True) elif pe == 'exp1d': W_pos = Coord1dPosEncoding(q_len, exponential=True, normalize=True) elif pe == 'lin2d': W_pos = Coord2dPosEncoding(q_len, d_model, exponential=False, normalize=True) elif pe == 'exp2d': W_pos = Coord2dPosEncoding(q_len, d_model, exponential=True, normalize=True) elif pe == 'sincos': W_pos = SinCosPosEncoding(q_len, d_model, normalize=True) else: raise ValueError(f"{pe} is not a valid pe (positional encoder. Available types: 'gauss'=='normal', \ 'zeros', 'zero', uniform', 'lin1d', 'exp1d', 'lin2d', 'exp2d', 'sincos', None.)") return nn.Parameter(W_pos, requires_grad=learn_pe) def _key_padding_mask(self, x): mask = torch.isnan(x) x[mask] = 0 if self.key_padding_mask and mask.any(): mask = TSMaskTensor((mask.float().mean(1)==1).bool()) # key_padding_mask: [bs x q_len] return x, mask else: return x, None # + hide_input=true #hide # class TSTPlus(Module): # def __init__(self, c_in:int, c_out:int, seq_len:int, max_seq_len:Optional[int]=512, # n_layers:int=3, d_model:int=128, n_heads:int=16, d_k:Optional[int]=None, d_v:Optional[int]=None, # d_ff:int=256, res_dropout:float=0.1, act:str="gelu", res_attention:bool=True, # pe:str='zeros', learn_pe:bool=True, flatten:bool=True, fc_dropout:float=0., # concat_pool:bool=True, bn:bool=False, custom_head:Optional=None, # y_range:Optional[tuple]=None, verbose:bool=False, **kwargs): # r"""TST (Time Series Transformer) is a Transformer that takes continuous time series as inputs. # As mentioned in the paper, the input must be standardized by_var based on the entire training set. # Args: # c_in: the number of features (aka variables, dimensions, channels) in the time series dataset. # c_out: the number of target classes. # seq_len: number of time steps in the time series. # max_seq_len: useful to control the temporal resolution in long time series to avoid memory issues. Default=512. # d_model: total dimension of the model (number of features created by the model) # n_heads: parallel attention heads. # d_k: size of the learned linear projection of queries and keys in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32. # d_v: size of the learned linear projection of values in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32. # d_ff: the dimension of the feedforward network model. # res_dropout: amount of residual dropout applied in the encoder. # act: the activation function of intermediate layer, relu or gelu. # res_attention: if True Residual MultiHeadAttention is applied. # num_layers: the number of sub-encoder-layers in the encoder. # pe: type of positional encoder. # Available types (for experimenting): None, 'exp1d', 'lin1d', 'exp2d', 'lin2d', 'sincos', 'gauss' or 'normal', # 'uniform', 'zero', 'zeros' (default, as in the paper). # learn_pe: learned positional encoder (True, default) or fixed positional encoder. # flatten: this will flatten the encoder output to be able to apply an mlp type of head (default=True) # fc_dropout: dropout applied to the final fully connected layer. # concat_pool: indicates whether global adaptive concat pooling will be used instead of global adaptive pooling. # bn: indicates if batchnorm will be applied to the head. # custom_head: custom head that will be applied to the network. It must contain all kwargs (pass a partial function) # y_range: range of possible y values (used in regression tasks). # kwargs: nn.Conv1d kwargs. If not {}, a nn.Conv1d with those kwargs will be applied to original time series. # Input shape: # x: bs (batch size) x nvars (aka features, variables, dimensions, channels) x seq_len (aka time steps) # attn_mask: q_len x q_len # """ # self.c_out, self.seq_len = c_out, seq_len # # Input encoding # q_len = seq_len # self.new_q_len = False # if max_seq_len is not None and seq_len > max_seq_len: # Control temporal resolution # self.new_q_len = True # q_len = max_seq_len # tr_factor = math.ceil(seq_len / q_len) # total_padding = (tr_factor * q_len - seq_len) # padding = (total_padding // 2, total_padding - total_padding // 2) # self.W_P = nn.Sequential(Pad1d(padding), Conv1d(c_in, d_model, kernel_size=tr_factor, stride=tr_factor)) # pv(f'temporal resolution modified: {seq_len} --> {q_len} time steps: kernel_size={tr_factor}, stride={tr_factor}, padding={padding}.\n', verbose) # elif kwargs: # self.new_q_len = True # t = torch.rand(1, 1, seq_len) # q_len = Conv1d(1, 1, **kwargs)(t).shape[-1] # self.W_P = Conv1d(c_in, d_model, **kwargs) # Eq 2 # pv(f'Conv1d with kwargs={kwargs} applied to input to create input encodings\n', verbose) # else: # self.W_P = nn.Linear(c_in, d_model) # Eq 1: projection of feature vectors onto a d-dim vector space # # Positional encoding # if pe == None: # W_pos = torch.zeros((q_len, d_model), device=default_device()) # pe = None and learn_pe = False can be used to measure impact of pe # learn_pe = False # elif pe == 'zero': W_pos = torch.zeros((q_len, 1), device=default_device()) # elif pe == 'zeros': W_pos = torch.zeros((q_len, d_model), device=default_device()) # elif pe == 'normal' or pe == 'gauss': # W_pos = torch.zeros((q_len, 1), device=default_device()) # torch.nn.init.normal_(W_pos, mean=0.0, std=0.1) # elif pe == 'uniform': # W_pos = torch.zeros((q_len, 1), device=default_device()) # nn.init.uniform_(W_pos, a=0.0, b=0.1) # elif pe == 'lin1d': W_pos = Coord1dPosEncoding(q_len, exponential=False, normalize=True) # elif pe == 'exp1d': W_pos = Coord1dPosEncoding(q_len, exponential=True, normalize=True) # elif pe == 'lin2d': W_pos = Coord2dPosEncoding(q_len, d_model, exponential=False, normalize=True) # elif pe == 'exp2d': W_pos = Coord2dPosEncoding(q_len, d_model, exponential=True, normalize=True) # elif pe == 'sincos': W_pos = SinCosPosEncoding(q_len, d_model, normalize=True) # else: raise ValueError(f"{pe} is not a valid pe (positional encoder. Available types: 'gauss'=='normal', \ # 'zeros', 'zero', uniform', 'lin1d', 'exp1d', 'lin2d', 'exp2d', 'sincos', None.)") # self.W_pos = nn.Parameter(W_pos, requires_grad=learn_pe) # # Residual dropout # self.res_dropout = nn.Dropout(res_dropout) # # Encoder # encoder_layer = TSTEncoderLayer(q_len, d_model, n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout, activation=act, # res_attention=res_attention) # self.encoder = TSTEncoder(encoder_layer, n_layers, res_attention=res_attention) # self.transpose = Transpose(-1, -2, contiguous=True) # # Head # self.head_nf = d_model # self.c_out = c_out # self.seq_len = q_len # if custom_head: self.head = custom_head(self.head_nf, c_out, q_len) # custom head passed as a partial func with all its kwargs # else: self.head = self.create_head(self.head_nf, c_out, q_len, flatten=flatten, concat_pool=concat_pool, fc_dropout=fc_dropout, bn=bn, y_range=y_range) # def create_head(self, nf, c_out, seq_len, flatten=True, concat_pool=False, fc_dropout=0., bn=False, y_range=None): # if flatten: # nf *= seq_len # layers = [Flatten()] # else: # if concat_pool: nf *= 2 # layers = [GACP1d(1) if concat_pool else GAP1d(1)] # layers += [LinBnDrop(nf, c_out, bn=bn, p=fc_dropout)] # if y_range: layers += [SigmoidRange(*y_range)] # return nn.Sequential(*layers) # def forward(self, x:Tensor, attn_mask:Optional[Tensor]=None) -> Tensor: # x: [bs x nvars x q_len], attn_mask: [q_len x q_len] # # Input encoding # if self.new_q_len: u = self.W_P(x).transpose(2,1) # Eq 2 # u: [bs x d_model x q_len] transposed to [bs x q_len x d_model] # else: u = self.W_P(x.transpose(2,1)) # Eq 1 # u: [bs x q_len x d_model] transposed to [bs x q_len x d_model] # # Positional encoding # u = self.res_dropout(u + self.W_pos) # # Encoder # z = self.encoder(u, attn_mask=attn_mask) # z: [bs x q_len x d_model] # z = self.transpose(z) # z: [bs x d_model x q_len] # # Classification/ Regression head # return self.head(z) # def show_pe(self, cmap='viridis', figsize=None): # plt.figure(figsize=figsize) # plt.pcolormesh(self.W_pos.detach().cpu().T, cmap=cmap) # plt.title('Positional Encoding') # plt.colorbar() # plt.show() # plt.figure(figsize=figsize) # plt.title('Positional Encoding - value along time axis') # plt.plot(F.relu(self.W_pos.data).mean(1).cpu()) # plt.plot(-F.relu(-self.W_pos.data).mean(1).cpu()) # plt.show() # - #export class TSTPlus(nn.Sequential): def __init__(self, c_in:int, c_out:int, seq_len:int, max_seq_len:Optional[int]=512, n_layers:int=3, d_model:int=128, n_heads:int=16, d_k:Optional[int]=None, d_v:Optional[int]=None, d_ff:int=256, res_dropout:float=0.1, act:str="gelu", key_padding_mask:bool=True, attn_mask:Optional[Tensor]=None, res_attention:bool=False, pre_norm:bool=False, pe:str='zeros', learn_pe:bool=True, flatten:bool=True, fc_dropout:float=0., concat_pool:bool=True, bn:bool=False, custom_head:Optional=None, y_range:Optional[tuple]=None, verbose:bool=False, **kwargs): r"""TST (Time Series Transformer) is a Transformer that takes continuous time series as inputs. As mentioned in the paper, the input must be standardized by_var based on the entire training set. Args: c_in: the number of features (aka variables, dimensions, channels) in the time series dataset. c_out: the number of target classes. seq_len: number of time steps in the time series. max_seq_len: useful to control the temporal resolution in long time series to avoid memory issues. Default=512. d_model: total dimension of the model (number of features created by the model). Default: 128 (range(64-512)) n_heads: parallel attention heads. Default:16 (range(8-16)). d_k: size of the learned linear projection of queries and keys in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32. d_v: size of the learned linear projection of values in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32. d_ff: the dimension of the feedforward network model. Default: 512 (range(256-512)) res_dropout: amount of residual dropout applied in the encoder. act: the activation function of intermediate layer, relu or gelu. key_padding_mask: a boolean padding mask will be applied to attention if True to those steps in a sample where all features are nan. attn_mask: a boolean mask will be applied to attention if a tensor of shape [min(seq_len, max_seq_len) x min(seq_len, max_seq_len)] if provided. res_attention: if True Residual MultiHeadAttention is applied. pre_norm: if True normalization will be applied as the first step in the sublayers. Defaults to False num_layers: number of layers (or blocks) in the encoder. Default: 3 (range(1-4)) pe: type of positional encoder. Available types (for experimenting): None, 'exp1d', 'lin1d', 'exp2d', 'lin2d', 'sincos', 'gauss' or 'normal', 'uniform', 'zero', 'zeros' (default, as in the paper). learn_pe: learned positional encoder (True, default) or fixed positional encoder. flatten: this will flatten the encoder output to be able to apply an mlp type of head (default=True) fc_dropout: dropout applied to the final fully connected layer. concat_pool: indicates whether global adaptive concat pooling will be used instead of global adaptive pooling. bn: indicates if batchnorm will be applied to the head. custom_head: custom head that will be applied to the network. It must contain all kwargs (pass a partial function) y_range: range of possible y values (used in regression tasks). kwargs: nn.Conv1d kwargs. If not {}, a nn.Conv1d with those kwargs will be applied to original time series. Input shape: x: bs (batch size) x nvars (aka features, variables, dimensions, channels) x seq_len (aka time steps) attn_mask: q_len x q_len """ # Backbone backbone = _TSTBackbone(c_in, seq_len=seq_len, max_seq_len=max_seq_len, n_layers=n_layers, d_model=d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout, act=act, key_padding_mask=key_padding_mask, attn_mask=attn_mask, res_attention=res_attention, pe=pe, learn_pe=learn_pe, verbose=verbose, **kwargs) # Head self.head_nf = d_model self.c_out = c_out self.seq_len = backbone.seq_len if custom_head: head = custom_head(self.head_nf, c_out, self.seq_len) # custom head passed as a partial func with all its kwargs else: head = self.create_head(self.head_nf, c_out, self.seq_len, flatten=flatten, concat_pool=concat_pool, fc_dropout=fc_dropout, bn=bn, y_range=y_range) super().__init__(OrderedDict([('backbone', backbone), ('head', head)])) def create_head(self, nf, c_out, seq_len, flatten=True, concat_pool=False, fc_dropout=0., bn=False, y_range=None): if flatten: nf *= seq_len layers = [Flatten()] else: if concat_pool: nf *= 2 layers = [GACP1d(1) if concat_pool else GAP1d(1)] layers += [LinBnDrop(nf, c_out, bn=bn, p=fc_dropout)] if y_range: layers += [SigmoidRange(*y_range)] return nn.Sequential(*layers) def show_pe(self, cmap='viridis', figsize=None): plt.figure(figsize=figsize) plt.pcolormesh(self.backbone.W_pos.detach().cpu().T, cmap=cmap) plt.title('Positional Encoding') plt.colorbar() plt.show() plt.figure(figsize=figsize) plt.title('Positional Encoding - value along time axis') plt.plot(F.relu(self.backbone.W_pos.data).mean(1).cpu()) plt.plot(-F.relu(-self.backbone.W_pos.data).mean(1).cpu()) plt.show() # + from tsai.models.utils import build_ts_model bs = 8 c_in = 9 # aka channels, features, variables, dimensions c_out = 2 seq_len = 1_500 xb = torch.randn(bs, c_in, seq_len).to(device) # standardize by channel by_var based on the training set xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True) # Settings max_seq_len = 256 d_model = 128 n_heads = 16 d_k = d_v = None # if None --> d_model // n_heads d_ff = 256 res_dropout = 0.1 activation = "gelu" n_layers = 3 fc_dropout = 0.1 pe = None learn_pe = True kwargs = {} model = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout, activation=activation, n_layers=n_layers, fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, **kwargs).to(device) test_eq(model(xb).shape, [bs, c_out]) test_eq(model[0], model.backbone) test_eq(model[1], model.head) model2 = build_ts_model(TSTPlus, c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout, activation=activation, n_layers=n_layers, fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, **kwargs).to(device) test_eq(model2(xb).shape, [bs, c_out]) test_eq(model2[0], model2.backbone) test_eq(model2[1], model2.head) print(f'model parameters: {count_parameters(model)}') # - model = TSTPlus(c_in, c_out, seq_len, pre_norm=True).to(device) test_eq(model(xb).shape, [bs, c_out]) from tsai.models.TST import TST c_in = 9 # aka channels, features, variables, dimensions c_out = 2 seq_len = 150 test_eq(count_parameters(TSTPlus(c_in, c_out, seq_len)), count_parameters(TST(c_in, c_out, seq_len))) # + bs = 8 c_in = 9 # aka channels, features, variables, dimensions c_out = 2 seq_len = 5000 xb = torch.randn(bs, c_in, seq_len) # standardize by channel by_var based on the training set xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True) model = TSTPlus(c_in, c_out, seq_len, res_attention=True) test_eq(model(xb).shape, [bs, c_out]) print(f'model parameters: {count_parameters(model)}') # - custom_head = partial(create_pool_head, concat_pool=True) model = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout, activation=activation, n_layers=n_layers, fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, flatten=False, custom_head=custom_head, **kwargs) test_eq(model(xb).shape, [bs, c_out]) print(f'model parameters: {count_parameters(model)}') custom_head = partial(create_pool_plus_head, concat_pool=True) model = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout, activation=activation, n_layers=n_layers, fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, flatten=False, custom_head=custom_head, **kwargs) test_eq(model(xb).shape, [bs, c_out]) print(f'model parameters: {count_parameters(model)}') # + bs = 8 c_in = 9 # aka channels, features, variables, dimensions c_out = 2 seq_len = 60 xb = torch.randn(bs, c_in, seq_len) # standardize by channel by_var based on the training set xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True) # Settings max_seq_len = 120 d_model = 128 n_heads = 16 d_k = d_v = None # if None --> d_model // n_heads d_ff = 256 res_dropout = 0.1 act = "gelu" n_layers = 3 fc_dropout = 0.1 pe='zeros' learn_pe=True kwargs = {} # kwargs = dict(kernel_size=5, padding=2) model = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout, act=act, n_layers=n_layers, fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, **kwargs) test_eq(model(xb).shape, [bs, c_out]) print(f'model parameters: {count_parameters(model)}') body, head = model[0], model[1] test_eq(body(xb).ndim, 3) test_eq(head(body(xb)).ndim, 2) head # - model.show_pe() model = TSTPlus(3, 2, 10) xb = torch.randn(4, 3, 10) yb = torch.randint(0, 2, (4,)) test_eq(model.backbone._key_padding_mask(xb)[1], None) random_idxs = np.random.choice(len(xb), 2, False) xb[random_idxs, :, -5:] = float('nan') xb[random_idxs, 0, 1] = float('nan') test_eq(model.backbone._key_padding_mask(xb.clone())[1].data, (torch.isnan(xb).float().mean(1)==1).bool()) test_eq(model.backbone._key_padding_mask(xb.clone())[1].data.shape, (4,10)) print(torch.isnan(xb).sum()) pred = model(xb.clone()) loss = CrossEntropyLossFlat()(pred, yb) loss.backward() torch.isnan(xb), model.backbone._key_padding_mask(xb)[1].data #export @delegates(TSTPlus.__init__) class MultiTSTPlus(nn.Sequential): _arch = TSTPlus def __init__(self, feat_list, c_out, seq_len, max_seq_len:Optional[int]=512, custom_head=None, **kwargs): r""" MultiTST is a class that allows you to create a model with multiple branches of TST. Args: * feat_list: list with number of features that will be passed to each body. """ self.feat_list = [feat_list] if isinstance(feat_list, int) else feat_list self.device = ifnone(device, default_device()) # Backbone branches = nn.ModuleList() self.head_nf = 0 for feat in self.feat_list: m = build_ts_model(self._arch, c_in=feat, c_out=c_out, seq_len=seq_len, max_seq_len=max_seq_len, **kwargs) with torch.no_grad(): self.head_nf += m[0](torch.randn(1, feat, ifnone(seq_len, 10)).to(self.device)).shape[1] branches.append(m.backbone) backbone = _Splitter(self.feat_list, branches) # Head self.c_out = c_out q_len = min(seq_len, max_seq_len) self.seq_len = q_len if custom_head is None: head = self._arch.create_head(self, self.head_nf, c_out, q_len) else: head = custom_head(self.head_nf, c_out, q_len) layers = OrderedDict([('backbone', nn.Sequential(backbone)), ('head', nn.Sequential(head))]) super().__init__(layers) self.to(self.device) #exporti class _Splitter(Module): def __init__(self, feat_list, branches): self.feat_list, self.branches = feat_list, branches def forward(self, x): x = torch.split(x, self.feat_list, dim=1) for i, branch in enumerate(self.branches): out = branch(x[i]) if i == 0 else torch.cat([out, branch(x[i])], dim=1) return out bs = 8 c_in = 7 # aka channels, features, variables, dimensions c_out = 2 seq_len = 10 xb2 = torch.randn(bs, c_in, seq_len) model1 = MultiTSTPlus([2, 5], c_out, seq_len, ) model2 = MultiTSTPlus(7, c_out, seq_len) test_eq(model1(xb2).shape, model2(xb2).shape) test_eq(count_parameters(model1) > count_parameters(model2), True) model1 = MultiTSTPlus([2, 5], c_out, seq_len, y_range=(0.5, 5.5)) body, head = split_model(model1) test_eq(body(xb2).ndim, 3) test_eq(head(body(xb2)).ndim, 2) head model = MultiTSTPlus([2, 5], c_out, seq_len, pre_norm=True) bs = 8 n_vars = 3 seq_len = 12 c_out = 2 xb = torch.rand(bs, n_vars, seq_len) net = MultiTSTPlus(n_vars, c_out, seq_len) change_model_head(net, create_pool_plus_head, concat_pool=False) print(net(xb).shape) net.head bs = 8 n_vars = 3 seq_len = 12 c_out = 10 xb = torch.rand(bs, n_vars, seq_len) new_head = partial(conv_lin_3d_head, d=(5 ,2)) net = MultiTSTPlus(n_vars, c_out, seq_len, custom_head=new_head) print(net(xb).shape) net.head #hide out = create_scripts(); beep(out)
nbs/108c_models.TSTPlus.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Milestone Project 2 - Complete Walkthrough Solution # This notebook walks through a proposed solution to the Blackjack Game milestone project. The approach to solving and the specific code used are only suggestions - there are many different ways to code this out, and yours is likely to be different! # ## Game Play # To play a hand of Blackjack the following steps must be followed: # 1. Create a deck of 52 cards # 2. Shuffle the deck # 3. Ask the Player for their bet # 4. Make sure that the Player's bet does not exceed their available chips # 5. Deal two cards to the Dealer and two cards to the Player # 6. Show only one of the Dealer's cards, the other remains hidden # 7. Show both of the Player's cards # 8. Ask the Player if they wish to Hit, and take another card # 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again. # 10. If a Player Stands, play the Dealer's hand. The dealer will always Hit until the Dealer's value meets or exceeds 17 # 11. Determine the winner and adjust the Player's chips accordingly # 12. Ask the Player if they'd like to play again # ## Playing Cards # A standard deck of playing cards has four suits (Hearts, Diamonds, Spades and Clubs) and thirteen ranks (2 through 10, then the face cards Jack, Queen, King and Ace) for a total of 52 cards per deck. Jacks, Queens and Kings all have a rank of 10. Aces have a rank of either 11 or 1 as needed to reach 21 without busting. As a starting point in your program, you may want to assign variables to store a list of suits, ranks, and then use a dictionary to map ranks to values. # ## The Game # ### Imports and Global Variables # ** Step 1: Import the random module. This will be used to shuffle the deck prior to dealing. Then, declare variables to store suits, ranks and values. You can develop your own system, or copy ours below. Finally, declare a Boolean value to be used to control <code>while</code> loops. This is a common practice used to control the flow of the game.** # # suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs') # ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace') # values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10, # 'Queen':10, 'King':10, 'Ace':11} # + import random suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs') ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace') values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10, 'Queen':10, 'King':10, 'Ace':11} playing = True # - # ### Class Definitions # Consider making a Card class where each Card object has a suit and a rank, then a Deck class to hold all 52 Card objects, and can be shuffled, and finally a Hand class that holds those Cards that have been dealt to each player from the Deck. # **Step 2: Create a Card Class**<br> # A Card object really only needs two attributes: suit and rank. You might add an attribute for "value" - we chose to handle value later when developing our Hand class.<br>In addition to the Card's \_\_init\_\_ method, consider adding a \_\_str\_\_ method that, when asked to print a Card, returns a string in the form "Two of Hearts" class Card: def __init__(self,suit,rank): self.suit = suit self.rank = rank def __str__(self): return self.rank + ' of ' + self.suit # **Step 3: Create a Deck Class**<br> # Here we might store 52 card objects in a list that can later be shuffled. First, though, we need to *instantiate* all 52 unique card objects and add them to our list. So long as the Card class definition appears in our code, we can build Card objects inside our Deck \_\_init\_\_ method. Consider iterating over sequences of suits and ranks to build out each card. This might appear inside a Deck class \_\_init\_\_ method: # # for suit in suits: # for rank in ranks: # # In addition to an \_\_init\_\_ method we'll want to add methods to shuffle our deck, and to deal out cards during gameplay.<br><br> # OPTIONAL: We may never need to print the contents of the deck during gameplay, but having the ability to see the cards inside it may help troubleshoot any problems that occur during development. With this in mind, consider adding a \_\_str\_\_ method to the class definition. class Deck: def __init__(self): self.deck = [] # start with an empty list for suit in suits: for rank in ranks: self.deck.append(Card(suit,rank)) # build Card objects and add them to the list def __str__(self): deck_comp = '' # start with an empty string for card in self.deck: deck_comp += '\n '+card.__str__() # add each Card object's print string return 'The deck has:' + deck_comp def shuffle(self): random.shuffle(self.deck) def deal(self): single_card = self.deck.pop() return single_card # TESTING: Just to see that everything works so far, let's see what our Deck looks like! test_deck = Deck() print(test_deck) # Great! Now let's move on to our Hand class. # **Step 4: Create a Hand Class**<br> # In addition to holding Card objects dealt from the Deck, the Hand class may be used to calculate the value of those cards using the values dictionary defined above. It may also need to adjust for the value of Aces when appropriate. class Hand: def __init__(self): self.cards = [] # start with an empty list as we did in the Deck class self.value = 0 # start with zero value self.aces = 0 # add an attribute to keep track of aces def add_card(self,card): self.cards.append(card) self.value += values[card.rank] def adjust_for_ace(self): pass # TESTING: Before we tackle the issue of changing Aces, let's make sure we can add two cards to a player's hand and obtain their value: test_deck = Deck() test_deck.shuffle() test_player = Hand() test_player.add_card(test_deck.deal()) test_player.add_card(test_deck.deal()) test_player.value # Let's see what these two cards are: for card in test_player.cards: print(card) # Great! Now let's tackle the Aces issue. If a hand's value exceeds 21 but it contains an Ace, we can reduce the Ace's value from 11 to 1 and continue playing. class Hand: def __init__(self): self.cards = [] # start with an empty list as we did in the Deck class self.value = 0 # start with zero value self.aces = 0 # add an attribute to keep track of aces def add_card(self,card): self.cards.append(card) self.value += values[card.rank] if card.rank == 'Ace': self.aces += 1 # add to self.aces def adjust_for_ace(self): while self.value > 21 and self.aces: self.value -= 10 self.aces -= 1 # We added code to the add_card method to bump self.aces whenever an ace is brought into the hand, and added code to the adjust_for_aces method that decreases the number of aces any time we make an adjustment to stay under 21. # **Step 5: Create a Chips Class**<br> # In addition to decks of cards and hands, we need to keep track of a Player's starting chips, bets, and ongoing winnings. This could be done using global variables, but in the spirit of object oriented programming, let's make a Chips class instead! class Chips: def __init__(self): self.total = 100 # This can be set to a default value or supplied by a user input self.bet = 0 def win_bet(self): self.total += self.bet def lose_bet(self): self.total -= self.bet # A NOTE ABOUT OUR DEFAULT TOTAL VALUE:<br> # Alternatively, we could have passed a default total value as an parameter in the \_\_init\_\_. This would have let us pass in an override value at the time the object was created rather than wait until later to change it. The code would have looked like this: # # def __init__(self,total=100): # self.total = total # self.bet = 0 # # Either technique is fine, it only depends on how you plan to start your game parameters. # ### Function Defintions # A lot of steps are going to be repetitive. That's where functions come in! The following steps are guidelines - add or remove functions as needed in your own program. # **Step 6: Write a function for taking bets**<br> # Since we're asking the user for an integer value, this would be a good place to use <code>try</code>/<code>except</code>. Remember to check that a Player's bet can be covered by their available chips. def take_bet(chips): while True: try: chips.bet = int(input('How many chips would you like to bet? ')) except ValueError: print('Sorry, a bet must be an integer!') else: if chips.bet > chips.total: print("Sorry, your bet can't exceed",chips.total) else: break # We used a <code>while</code> loop here to continually prompt the user for input until we received an integer value that was within the Player's betting limit. # A QUICK NOTE ABOUT FUNCTIONS:<br> # If we knew in advance what we were going to call our Player's Chips object, we could have written the above function like this: # # def take_bet(): # while True: # try: # player_chips.bet = int(input('How many chips would you like to bet? ')) # except ValueError: # print('Sorry, a bet must be an integer!') # else: # if player_chips.bet > player_chips.total: # print("Sorry, your bet can't exceed",player_chips.total) # else: # break # # and then we could call the function without passing any arguments. This is generally not a good idea! It's better to have functions be self-contained, able to accept any incoming value than depend on some future naming convention. Also, this makes it easier to add players in future versions of our program! # **Step 7: Write a function for taking hits**<br> # Either player can take hits until they bust. This function will be called during gameplay anytime a Player requests a hit, or a Dealer's hand is less than 17. It should take in Deck and Hand objects as arguments, and deal one card off the deck and add it to the Hand. You may want it to check for aces in the event that a player's hand exceeds 21. def hit(deck,hand): hand.add_card(deck.deal()) hand.adjust_for_ace() # **Step 8: Write a function prompting the Player to Hit or Stand**<br> # This function should accept the deck and the player's hand as arguments, and assign playing as a global variable.<br> # If the Player Hits, employ the hit() function above. If the Player Stands, set the playing variable to False - this will control the behavior of a <code>while</code> loop later on in our code. def hit_or_stand(deck,hand): global playing # to control an upcoming while loop while True: x = input("Would you like to Hit or Stand? Enter 'h' or 's' ") if x[0].lower() == 'h': hit(deck,hand) # hit() function defined above elif x[0].lower() == 's': print("Player stands. Dealer is playing.") playing = False else: print("Sorry, please try again.") continue break # **Step 9: Write functions to display cards**<br> # When the game starts, and after each time Player takes a card, the dealer's first card is hidden and all of Player's cards are visible. At the end of the hand all cards are shown, and you may want to show each hand's total value. Write a function for each of these scenarios. # + def show_some(player,dealer): print("\nDealer's Hand:") print(" <card hidden>") print('',dealer.cards[1]) print("\nPlayer's Hand:", *player.cards, sep='\n ') def show_all(player,dealer): print("\nDealer's Hand:", *dealer.cards, sep='\n ') print("Dealer's Hand =",dealer.value) print("\nPlayer's Hand:", *player.cards, sep='\n ') print("Player's Hand =",player.value) # - # QUICK NOTES ABOUT PRINT STATEMENTS:<br> # # * The asterisk <code>*</code> symbol is used to print every item in a collection, and the <code>sep='\n '</code> argument prints each item on a separate line. # # * In the fourth line where we have # # print('',dealer.cards[1]) # # the empty string and comma are there just to add a space. # # - Here we used commas to separate the objects being printed in each line. If you want to concatenate strings using the <code>+</code> symbol, then you have to call each Card object's \_\_str\_\_ method explicitly, as with # # print(' ' + dealer.cards[1].__str__()) # # **Step 10: Write functions to handle end of game scenarios**<br> # Remember to pass player's hand, dealer's hand and chips as needed. # + def player_busts(player,dealer,chips): print("Player busts!") chips.lose_bet() def player_wins(player,dealer,chips): print("Player wins!") chips.win_bet() def dealer_busts(player,dealer,chips): print("Dealer busts!") chips.win_bet() def dealer_wins(player,dealer,chips): print("Dealer wins!") chips.lose_bet() def push(player,dealer): print("Dealer and Player tie! It's a push.") # - # ### And now on to the game!! while True: # Print an opening statement print('Welcome to BlackJack! Get as close to 21 as you can without going over!\n\ Dealer hits until she reaches 17. Aces count as 1 or 11.') # Create & shuffle the deck, deal two cards to each player deck = Deck() deck.shuffle() player_hand = Hand() player_hand.add_card(deck.deal()) player_hand.add_card(deck.deal()) dealer_hand = Hand() dealer_hand.add_card(deck.deal()) dealer_hand.add_card(deck.deal()) # Set up the Player's chips player_chips = Chips() # remember the default value is 100 # Prompt the Player for their bet take_bet(player_chips) # Show cards (but keep one dealer card hidden) show_some(player_hand,dealer_hand) while playing: # recall this variable from our hit_or_stand function # Prompt for Player to Hit or Stand hit_or_stand(deck,player_hand) # Show cards (but keep one dealer card hidden) show_some(player_hand,dealer_hand) # If player's hand exceeds 21, run player_busts() and break out of loop if player_hand.value > 21: player_busts(player_hand,dealer_hand,player_chips) break # If Player hasn't busted, play Dealer's hand until Dealer reaches 17 if player_hand.value <= 21: while dealer_hand.value < 17: hit(deck,dealer_hand) # Show all cards show_all(player_hand,dealer_hand) # Run different winning scenarios if dealer_hand.value > 21: dealer_busts(player_hand,dealer_hand,player_chips) elif dealer_hand.value > player_hand.value: dealer_wins(player_hand,dealer_hand,player_chips) elif dealer_hand.value < player_hand.value: player_wins(player_hand,dealer_hand,player_chips) else: push(player_hand,dealer_hand) # Inform Player of their chips total print("\nPlayer's winnings stand at",player_chips.total) # Ask to play again new_game = input("Would you like to play another hand? Enter 'y' or 'n' ") if new_game[0].lower()=='y': playing=True continue else: print("Thanks for playing!") break # And that's it! Remember, these steps may differ significantly from your own solution. That's OK! Keep working on different sections of your program until you get the desired results. It takes a lot of time and patience! As always, feel free to post questions and comments to the QA Forums. # # Good job!
Complete-Python-3-Bootcamp-master/07-Milestone Project - 2/03-Milestone Project 2 - Complete Walkthrough Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import json import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_columns', None) # + with open('compiled_results_kley.json', 'r') as f: data = json.load(f) df_games = pd.json_normalize(data, record_path='games', meta=['id_match', 'date'], errors='ignore') df_games.index = df_games['id_match'] df_games = df_games[df_games['date'] > '2020-04-07'] df_games.drop(columns=['id_match', 'date'], inplace=True) display(df_games.head(3)) print(df_games.shape) # - sns.distplot(df_games[df_games['winner'] == 'player']['last_turn'], label='wins') sns.distplot(df_games[df_games['winner'] == 'opponent']['last_turn'], label='losses') plt.legend() plt.show()
mtgo_data_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/gist/Panchajanya1999/8d6372a0a2e5f77cebe80e36b2d18d1c/pigeonhole.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="m157O_3_szr1" # PigeonHole Sort def pigeonhole_sort(array): # Find the minimum & maximum element of the array minimum = min(array) maximum = max(array) # Size is the size of the pigeonholes to be made size = maximum - minimum + 1 # Pigeonholes to be allocated pigeonholes = [0] * size # put the pigeons in the holes by taking each pigeon in buckets for i in array: pigeonholes[ i - minimum ] += 1 j = 0 for count in range(size): while pigeonholes[count] > 0: pigeonholes[count] -= 1 array[j] = count + minimum j += 1 # + colab={"base_uri": "https://localhost:8080/"} id="OaFUxZF6PHn4" outputId="e92568eb-afe4-42a0-aef4-501476158e6c" # runnable code hooking the driver code a = [ 10, 9, 8, 7, 6, 5, 4, 3, 2, 1] # Print the original code print("The original array was : ") print(a) # Sort the unsorted original array by hooking driver code print("The sorted code is :") pigeonhole_sort(a) print(a)
1st-sem-pg/python/pigeonhole.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/zerotodeeplearning/ztdl-masterclasses/blob/master/notebooks/Word_Embeddings_with_Gensim.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="2bwH96hViwS7" # ## Learn with us: www.zerotodeeplearning.com # # Copyright © 2021: Zero to Deep Learning ® Catalit LLC. # + colab={} colab_type="code" id="bFidPKNdkVPg" # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="DvoukA2tkGV4" # # Word Embeddings with Gensim # + colab={} colab_type="code" id="C9yGuqMlt3uv" import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import gzip import os import gensim import gensim.downloader as api # - info = api.info() info.keys() info['models'].keys() glove_model = api.load('glove-wiki-gigaword-50') glove_model.most_similar(positive=['good'], topn=5) glove_model.most_similar(positive=['two'], topn=5) glove_model.most_similar(positive=['king', 'woman'], negative=['man'], topn=3) glove_model.n_similarity('this is a cat'.split(), 'this is a dog'.split(),) glove_model.n_similarity('this is a cat'.split(), 'the sky is blue'.split(),) glove_model.n_similarity('i really like learning new concepts'.split(), 'i am delighted to understand new ideas'.split(),) glove_model.n_similarity('i really like learning new concepts'.split(), "the table is green and the light is blue".split(),) glove_size = len(glove_model['cat']) glove_size # + plt.subplot(211) plt.plot(glove_model['two']) plt.plot(glove_model['three']) plt.plot(glove_model['four']) plt.title("A few numbers") plt.ylim(-2, 5) plt.subplot(212) plt.plot(glove_model['cat']) plt.plot(glove_model['dog']) plt.plot(glove_model['rabbit']) plt.title("A few animals") plt.ylim(-2, 5) plt.tight_layout() # - # ### Exercise 1 # # Let's take a look at the embeddings in a lower dimensional space. Use a PCA dimensionality reduction technique to project the first 300 words in the `glove_model` vector onto a 3D space. # # - select the first 300 words and the corresponding vectors using `glove_model.vocab` and `glove_model.vectors` # - Instantiate a PCA with 3 principal components and use the method `.fit_transform` to project the vectors onto a 3D space. # - use `px.scatter_3d` from `plotly.express` to visualize the first 300 words and see how they are arranged in the embedded space from sklearn.decomposition import PCA import plotly.express as px
notebooks/Word_Embeddings_with_Gensim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.0 64-bit ('emma-watson') # metadata: # interpreter: # hash: 64aefe5b869f1290ff4de3ee336b91b598f3cf379f1c85f644156f79c1a37abc # name: python3 # --- # # "BERT applied to Multiple Choice" # > "Step by step about a specific fine tuning task:)" # # - toc: true # - branch: master # - author: <NAME> # - badges: true # - hide_binder_badge: false # - hide_colab_badge: false # - comments: true # - categories: [masters, nlp, knowledge-distill] # - hide: false # - search_exclude: false # # Drilling down Multiple Choice downstream task # # > Note: I have learned how to use bibtex citations with fastpages! Therefore, all my next post are going to follow these kind of formatting whenever possible. If you are interested, check [this](https://drscotthawley.github.io/devblog4/2020/07/01/Citations-Via-Bibtex.html) out. # # When I started studying Language Models, I remember when I've found the following image from Open AI transformer paper {% cite Radford2018ImprovingLU %} : # # # ![](images/downstream-gpt.png "Example of fine-tuning tasks from GPT paper") # However, the only difference is that the **input data** should be _slightly_ different: # # > For these tasks, we are given a context # document $z$, a question $q$, and a set of possible answers ${a_k}$. We concatenate the document context # and question with each possible answer, adding a delimiter token in between to get [$z$; $q$; $ \$ $; $a_k$]. Each of these sequences are **processed independently with our model and then normalized via a softmax layer to produce an output distribution over possible answers**. # Therefore, these inputs could be optimized via [Categorical Cross Entropy Loss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html), where $C$ is the number of options available. For a specific question. # # # From GPT to BERT # # As we will see with [Hugging Face's transformer library](https://huggingface.co/transformers/), when we considerer application from a fine tuning task, the approach of BERT can be derived directly from the tecnique presented by {% cite Radford2018ImprovingLU %}. # It is possible to check it from [documentation](https://huggingface.co/transformers/model_doc/bert.html#transformers.BertForMultipleChoice) # # > Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. #collapse import numpy as np import torch from transformers import BertTokenizer, BertForMultipleChoice tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") model = BertForMultipleChoice.from_pretrained("bert-base-uncased") question = "George wants to warm his hands quickly by rubbing them. Which skin surface will produce the most heat?" option_a = "dry palms" option_b = "wet palms" option_c = "palms covered with oil" option_d = "palms covered with lotion" # In this case, option A is the correct one. Furthermore, the batch size here would be 1 labels = torch.tensor(0).unsqueeze(0) # Notice that the question is the same for each option # + encoding = tokenizer( [question, question, question, question], [option_a, option_b, option_c, option_d], return_tensors='pt', padding=True ) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # - # > Important: Notice that if we have a dataset such as SquaD where each question comes with a context, we could append this context to either the question text or the option text and we would then have the tuple cited by [Open AI transformer paper](https://www.cs.ubc.ca/~amuham01/LING530/papers/radford2018improving.pdf) # # The output is a linear layer which would still be trained through a Cross Entropy loss. Then, as stated by the documentation, we still need to apply softmax to the logits loss = outputs.loss logits = outputs.logits # # Linear Logits output: # hide-input logits # Logits after the softmax function. Since this model did not learn anything, the result below is expected: # + # hide-input torch.nn.functional.softmax(logits) # - # # Conclusion # # Congratulations! Adding up with the [first part](https://abarbosa94.github.io/personal_blog/masters/nlp/knowledge-distill/2020/09/19/Distilling-BERT.html), you have learned the end-to-end BERT Flow :) # # References # {% bibliography --cited %}
_notebooks/2021-02-21-Downstream-BERT-QA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/brenoslivio/Statistics-Python/blob/main/1-DescriptiveAnalysis/1_DescriptiveAnalysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Mk33yY8stlEd" # # Descriptive Analysis # + [markdown] id="ImoMuyWWtu-a" # *There are many ways to analyse data, but one of the most known methods in Statistics is probably descriptive analysis, which seeks to describe or summarize past and present data, helping to create accessible data insights. It's the field of Statistics that deals with the description and summarization of data. It will be given some practical examples of this kind of analysis using a dataset. Recommended text for the subject are the Chapters 2 and 3 from **Introductory Statistics** by **<NAME>**, which will be our main source of knowledge in this notebook.* # + [markdown] id="7vHn5AdQthNh" # # # --- # # # + [markdown] id="8YcFlMMrZn7p" # ## Table of contents # # # 1. [Data type](#type) # # 1.1 [Qualitative variables](#qualita) # # * [Nominal](#nominal) # # * [Ordinal](#ordinal) # # * [Scales](#scale1) # # 1.2 [Quantitative variables](#quantita) # # * [Discrete](#discrete) # # * [Continuous](#cont) # # * [Scales](#scale2) # # 2. [Measures of position](#mespos) # # 2.1 [Quantiles](#quantiles) # # * [Percentiles](#percentiles) # # * [Deciles](#deciles) # # * [Quartiles](#quartiles) # # * [Median](#median) # # 2.2 [Mean](#mean) # # 2.3 [Mode](#mode) # # 3. [Measures of dispersion](#mesdis) # # 3.1 [Variance](#variance) # # 3.2 [Standard deviation](#sd) # # 3.3 [Range](#range) # # 3.4 [Interquantile range](#iqr) # # 4. [Table of frequencies](#table) # # 4.1 [Absolute](#absolute) # # 4.2 [Relative](#relative) # # 4.3 [Simple](#simple) # # 4.4 [Cumulative](#cumulative) # # 4.5 [Complete frequency table examples](#freqex) # # 5. [Graphs](#graphs) # # 5.1 [Pie chart](#pie) # # 5.2 [Bar chart](#bar) # # 5.3 [Pareto chart](#pareto) # # 5.4 [Boxplot](#boxplot) # # 5.5 [Scatter plot](#dispersion) # # 5.6 [Line chart](#line) # # 6. [References](#refs) # # + [markdown] id="YjQZ8oMkzNvR" # # # --- # # # + [markdown] id="2HqNyurdzXpR" # ## Data type <a name="type"></a> # # We can divide our data in some types which are definitely important to treat and understand the data to visualize and plot graphs. # # + [markdown] id="NGKi7JBv23Zy" # ### Qualitative variables <a name="qualita"></a> # # Data like this don't have a proper mathematical meaning and they are usually used as labels for variables. They represent quality and the values can be associated with categories. Some can be sorted, but arithmetic operations are not aplicable. # + [markdown] id="3iV1mQlY3ioK" # #### Nominal <a name="nominal"></a> # + [markdown] id="Nz0Qbud0kvy3" # We have examples as labeling data. They don't indicate a order relation. # # $$\text{e.g.}\;\text{Sex:}\;\{Male, Female\}; \text{Bank Id:}\;\{44641, 22442\}.$$ # + [markdown] id="iAS3CaJN3it3" # #### Ordinal <a name="ordinal"></a> # + [markdown] id="omrO8klSkwy9" # Like nominal data, they are labels but can indicate a kind of order. # # $$\text{e.g.}\;\text{Fever:}\;\{High, Medium, Low\}; \text{Height:}\;\{Tall, Short\}.$$ # + [markdown] id="WsfFlKvqGBSs" # #### Scales <a name="scale1"></a> # + [markdown] id="2lfqBNx8GEMm" # We have also nominal and ordinal scales. # # **Nominal scales**: We can use operations such as $=$ and $\neq$. # # **Ordinal scales**: We can use operations such as $=,\neq, \lt, \gt, \le, \ge $. # + [markdown] id="2FJD_vgX23jf" # ### Quantitative variables <a name="quantita"></a> # # This type of data have proper mathematical meaning and we can use for number operations. They are numeric and have some associated unit. They also can be sorted. # + [markdown] id="4ujbKVqB4aZq" # #### Discrete <a name="discrete"></a> # + [markdown] id="hLTGKY7Bl-zf" # We have usually $\mathbb{Z}$ values, which are countable. Binary variables (boolean) are also discrete. # # $$\text{e.g.}\;\{5, 26, 51\}; \{0, 1\}$$ # + [markdown] id="n0O2T-PE4aZz" # #### Continuous <a name="cont"></a> # + [markdown] id="DDcezdH6mKEI" # In this case we have $\mathbb{R}$ values, which can be infinite. Continuous variables are usually defined as measurements. # # $$\text{e.g.}\;\text{Weight:}\;\{2.57kg, 5.45kg, 3.89kg\}; \text{Distance:}\;\{6.7km, 2.3km\}.$$ # + [markdown] id="aU9DDbo2GfmQ" # #### Scales <a name="scale2"></a> # + [markdown] id="5qlzYP4XGyGn" # We also have interval and rational scales # # **Interval scales**: We can use operations such as $=, \neq, \lt, \gt, \le, \ge, +, -$ e.g. ºC or ºF. # # **Rational scales**: We can use operations such as $=, \neq, \lt, \gt, \le, \ge, +, -, *, /$ e.g. Kelvin temperature, money. This scale have an absolute zero. # + [markdown] id="MwXOJ6JrQBpK" # ## Loading the data # # With the explanation about data types, we will load a dataset to use as example to take different measurements and observations. We load with a Pandas data frame. # + id="tYBTuSCpPxAD" outputId="3931c8e6-713b-4a3b-830a-e22f050446cf" colab={"base_uri": "https://localhost:8080/", "height": 206} import pandas as pd import numpy as np dfPoll = pd.read_csv("https://raw.githubusercontent.com/brenoslivio/Statistics-Python/main/1-DescriptiveAnalysis/dataset.csv", dtype={ "Qual sua idade?": np.int32, "Qual sua altura em metros?": np.float64, "Seu peso em kg.": np.int32, "Sexo?": str, "Grau de escolaridade?": str, "Em qual estado você nasceu?": str, "Em quantos irmãos vocês são (contando contigo)?": np.int32, "Quantos membros tem sua família? (Quantos moram contigo, ou 1 caso more sozinho)": np.int32, "Você trabalha/estuda atualmente?": str, "Qual atividade realiza com mais frequência?": str, }, na_values="", ) dfPoll.columns =['age', 'height_m', 'weight_kg', 'sex', 'schooling', 'state', 'brothersplus_you', 'family_members', 'work_study', 'freq_activity'] dfPoll.sample(5) # Five random sample to show # + [markdown] id="uaaVvrjr48X4" # ## Measures of position <a name="mespos"></a> # + [markdown] id="0TpZaLgm6Q9X" # #### Mean <a name="mean"></a> # + [markdown] id="39KpH2wuXiKU" # We can only use for quantitative data for using numeric data. We get the average value of the data: # # $$\overline{X} = \displaystyle\sum_{i=1}^{N}\frac{X_i}{N}$$ # # Let's get the mean of the ages in our dataset: # + id="yLAMia7c_fLA" outputId="ffdf04ad-a34e-414a-a08c-add36dd57bd1" colab={"base_uri": "https://localhost:8080/"} sum(dfPoll['age'])/len(dfPoll['age']) # Like the formula # + id="Tq2Pk1zE_3Ou" outputId="2ccadd46-fe1d-4d3f-f4e2-f80753425549" colab={"base_uri": "https://localhost:8080/"} dfPoll['age'].mean() # Pandas method to return the mean # + [markdown] id="jJqnByob6Ntm" # #### Median <a name="median"></a> # + [markdown] id="AX7pBHNpYQT1" # We find the value in the center position, but for this we must analyse if the data is odd or even: # # $$median(X) = \left\{ \begin{array}{cl} # \frac{1}{2}(X_r + X_{r+1}) & : \ X\;\text{is even}\;(n = 2r)\\ # X_{r+1} & : \ X\;\text{is odd}\;(n = 2r + 1) # \end{array} \right.$$ # # If we have assymetric data, $median(X) \neq \overline{X}$. On a normal distribution we can have a much better approximation. # # Median can also work better for outliers/discrepant values. # # Getting the median for a variable of the dataset: # + id="8nfzoWFNAWRi" outputId="b8b0945c-9a93-4da0-cfbc-abc840cadbd3" colab={"base_uri": "https://localhost:8080/"} height = dfPoll['height_m'].sort_values(ascending=True) height = height.reset_index(drop=True) median = 0 if(len(height) % 2 == 0): # if data is even median = (height[int((len(height)/2)-1)] + height[int(len(height)/2)])/2 else: # if data is odd median = height[int(len(height)/2)] median # + id="IcLh1xQMAWm2" outputId="c6e5bd23-2405-4762-f394-4d9fc5da056d" colab={"base_uri": "https://localhost:8080/"} dfPoll['height_m'].median() # direct approach # + [markdown] id="G2Sy8cTC6Ux2" # ### Mode <a name="mode"></a> # + [markdown] id="amwq0zERWmGB" # Returns the value that appears more times on a given data. We can use for any data type. # # Let's find the mode for the column state: # + id="Za3RqD2iG-po" outputId="7688b759-a143-453f-e764-9855ec2b537e" colab={"base_uri": "https://localhost:8080/"} state = dfPoll['state'].value_counts().to_frame() # Creating a data frame that count the frequency that all values appear state.reset_index(level=0, inplace=True) state.columns = ['State', 'Count'] state.max() # + id="TcKZgQsyG-_w" outputId="f3eb0b41-bcd5-48fc-a910-4c583068d124" colab={"base_uri": "https://localhost:8080/"} dfPoll['state'].mode() # direct approach # + [markdown] id="8pszjfsA5ZFg" # ### Quantiles <a name="quantiles"></a> # # Quantiles are the idea of splitting the data distribution that we are working with. We have methods like Percentiles, Deciles and Quartiles. The list must be ordered. # + [markdown] id="BYEL8p4c55X0" # #### Percentiles <a name="percentiles"></a> # + [markdown] id="NPA0yRoNNNKz" # We can divide the distribution in 100 parts with $(0 < p \leq 100)$. To find the P-th percentile for $N$ we can use an ordinal rank given by: # # $$ n = \frac{p}{100}\times (N - 1) + 1.$$ # # If $n$ is whole, we would have that $P_{p} = x_n$ # # So if we have an ordered list like $\{3, 7, 9, 24, 30\}$, with $N = 5$. # # And we want the 30th Percentile, we would have: # # $$n = \frac{30}{100}\times (5 - 1) + 1 = 2.2 $$ # # We don't have a whole number so we will use: # # $$P_{p} = x_{ni} + n_{f} \times (x_{ni+1} - x_{ni})$$ # # $ni$ would be the integer part of n, $n_f$ is the fractional part of n, so we would have: # # $$P_{30th} = x_{2} + 0.2 \times (x_{3} - x_{2}) = 7 + 0.2\times(9 - 7) = 7.4$$ # # Let's see the idea using a code to find the 39th Percentile for the weights: # + id="w9TlXlAAfTBb" outputId="fde7300d-16b1-4767-ae24-037aec3eec3c" colab={"base_uri": "https://localhost:8080/"} weight = dfPoll['weight_kg'].sort_values(ascending=True) weight = weight.reset_index(drop=True) p = 39 n = (p/100)*(len(weight) - 1) + 1 P = 0 if(n % 1 == 0): P = weight[n - 1] else: P = weight[int(n) - 1] + (n % 1)*(weight[int(n)] - weight[int(n) - 1]) P # + id="Lu0_rBo5fTyX" outputId="2889e513-2216-4bfc-ad2d-2de1de9dbdc5" colab={"base_uri": "https://localhost:8080/"} dfPoll['weight_kg'].quantile(0.39) # direct approach # + [markdown] id="ooI1N7QC6Co1" # #### Deciles <a name="deciles"></a> # + [markdown] id="jixbwFvWSN5A" # Uses the idea of percentiles, but splitting the data distribution into 10 parts. So each part represents $\frac{1}{10}$ of the sample or population. So finding the 4th Decile is finding the 40th Percentile. # # So finding 4th Decile for the weights: # + id="iw6R31qziiUB" outputId="639b632a-ea29-484f-d491-ca5ae7709b13" colab={"base_uri": "https://localhost:8080/"} dfPoll['weight_kg'].quantile(0.4) # + [markdown] id="EVzUZu1J6Co-" # #### Quartiles <a name="quartiles"></a> # + [markdown] id="Z-E1Py6ScKDo" # Quartiles split the data in four more or less quarters. # # 1st Quartile $Q_1$ $\rightarrow$ value that have 25% of values below or to the left of it; # # 2nd Quartile $Q_2$ $\rightarrow$ 50%, median; # # 3rd Quartile $Q_3$ $\rightarrow$ value that have 75% of values below or to the left of it; # # Again we can use the concept of Percentile to find the $Q_1 = P_{25th}, Q_2 = P_{50th}$ and $Q_3 = P_{75th}$. # # Let's check if the median is really $Q_2$: # + id="YNoQ5A8b7fen" outputId="06327bdf-45db-42e0-bb63-7d5d6c7edce1" colab={"base_uri": "https://localhost:8080/"} Q2 = dfPoll['weight_kg'].quantile(0.50) Q2 # + id="VzBERrp37zG2" outputId="caec4b10-694b-4a06-818c-7ac57d63a9ba" colab={"base_uri": "https://localhost:8080/"} dfPoll['weight_kg'].median() # + [markdown] id="fpfPBhsC6dYw" # ## Measures of dispersion <a name="mesdis"></a> # + [markdown] id="BJAyGQOg6w0f" # ### Variance <a name="variance"></a> # + [markdown] id="eMuYImG25RPW" # Variance is the difference of the squared expected value and the expected value, so we would have: # # $$\sigma^2 = E[X^2] - E[X] = \frac{1}{N}\displaystyle\sum_{i=1}^{N}(x_i - \mu)^2$$ # # And for sample ([unbiased estimator](https://en.wikipedia.org/wiki/Bessel%27s_correction)): # # $$\sigma^2 = \frac{1}{N - 1}\displaystyle\sum_{i=1}^{N}(x_i - \mu)^2$$ # # It's the mean of the quadratic distance in relation to the mean of the data. # + id="A8jT6G-ZBzGL" outputId="e886028b-bb08-426d-a39d-2c84df74ee76" colab={"base_uri": "https://localhost:8080/"} mu = dfPoll['family_members'].mean() sum((dfPoll['family_members'] - mu)**2)/(len(dfPoll['family_members'])) # + id="DWiKaG2jBzS6" outputId="2004b096-d8b6-4b84-a154-8ca27f36a506" colab={"base_uri": "https://localhost:8080/"} dfPoll['family_members'].var(ddof = 0) # direct approach for population, degrees of freedom # + [markdown] id="oKhzpPNv6xMt" # ### Standard deviation <a name="sd"></a> # # For the standard deviation, we must analyse if we are dealing with a population or sample, so for the population: # # $$\sigma = \sqrt{\sigma^2} = \sqrt{\frac{1}{N}\displaystyle\sum_{i=1}^{N}(x_i - \mu)^2}$$ # # And for sample (unbiased estimator): # # $$\sigma = \sqrt{\sigma^2} = \sqrt{\frac{1}{N - 1}\displaystyle\sum_{i=1}^{N}(x_i - \mu)^2}$$ # + id="bdRDSGxORysI" outputId="1eac8479-ece5-44b2-9323-0e48cc635029" colab={"base_uri": "https://localhost:8080/"} import math mu = dfPoll['family_members'].mean() math.sqrt(sum((dfPoll['family_members'] - mu)**2)/(len(dfPoll['family_members']))) # + id="LjUKcOnKSMzj" outputId="7073eba9-f91e-4dc3-b4b9-3e1476d4287a" colab={"base_uri": "https://localhost:8080/"} dfPoll['family_members'].std(ddof = 0) # direct approach for population # + [markdown] id="eGW8lkpoSfbG" # Note that for both variance and standard deviation we must use `ddof = 0` if we want to deal with a population. [By default](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.var.html) is `ddof = 1`, which is used for samples, with the equation demonstrated. # + [markdown] id="QNGpOWbI9TiR" # ### Interquartile range <a name="iqr"></a> # # # + [markdown] id="bj4dRCnH8Hsw" # It's simply the difference between the 3rd quartile $Q_3$ and the 1st quartile $Q_1$: # # $$IQR = Q_3 - Q_1$$ # # Finding the IQR for the height: # + id="Paamv6kqTq27" outputId="6cb005e7-b0ea-4f14-8499-fa063e5cd99e" colab={"base_uri": "https://localhost:8080/"} height = dfPoll['height_m'] Q1 = height.quantile(0.25) Q3 = height.quantile(0.75) IQR = Q3 - Q1 IQR # + [markdown] id="N1iot1pFVcdj" # Usually we use the interquantile range to find discrepant values called outliers. We can use the following idea: # # $$outliers = x_i\;<\;(Q_1 - k.IQR)\;\vee\;x_i\;>\;(Q_3 + k.IQR)$$ # # In many places we would find $k = 1.5$, and usually is enough for to treat data, but there are (somewhat rare) exceptions. # # Let's code it for finding outliers in height's column: # + id="3QakRmYOUO96" outputId="2e067ed9-33db-4d99-cbfd-59481cd4874c" colab={"base_uri": "https://localhost:8080/", "height": 81} outliers = dfPoll.loc[ (dfPoll.loc[:, "height_m"] < (Q1 - 1.5 * IQR)) | (dfPoll.loc[:, "height_m"] > (Q3 + 1.5 * IQR)), :,] outliers # + [markdown] id="0ObzV9qf7E53" # ### Range <a name="range"></a> # + [markdown] id="0TTOpbmv8llK" # It's difference between the maximum value and the minimum value of the data: # # $$R(X) = max(X) - min(X)$$ # + id="3MaQoqBwXtKa" outputId="ec0180e9-2d79-4b41-8b4e-efdcaa77d15c" colab={"base_uri": "https://localhost:8080/"} R = dfPoll['brothersplus_you'].max() - dfPoll['brothersplus_you'].min() R # + [markdown] id="YisCzs11-Urz" # ## Table of frequencies <a name="table"></a> # # Each variable can have different values. For example, we can have the variable `Fruit` with the situation that some values are 'Apple', 'Banana' and many others. We can analyse the frequency these values appear. # + [markdown] id="XsWBEr21Sgus" # | Fruit | Simple absolute frequency $f_i$ | Simple relative frequency $n_i$ | Cumulative frequency $F_i$ | Relative cumulative frequency $N_i$ | # |-|-|-|-|-| # | Apple | 3 | $\frac{3}{15}$ | 3 | $\frac{3}{15}$ | # | Orange | 3 | $\frac{3}{15}$ | 3 + 3 = 6 | $\frac{6}{15}$ | # | Banana | 7 | $\frac{7}{15}$ | 3 + 3 + 7 = 13 | $\frac{13}{15}$ | # | Pear | 2 | $\frac{2}{15}$ | 3 + 3 + 7 + 2 = 15 | $\frac{15}{15}$ | # | $\sum$ | 15 | 1 | | | # + [markdown] id="tWc9_0EB_2yE" # ### Absolute <a name="absolute"></a> # + [markdown] id="4hJcQESpJ-5g" # It's the number of times that a value appears. If we sum all the absolute frequencies we can find the total number of data $N$, so with $f_i$ being an absolute frequency for a value we have: # # $$\displaystyle\sum_{i=1}^{n}f_i = N$$ # # We can count the frequency for each value with the following code: # + id="MkECPccHizGr" outputId="9ecf054f-daa9-47af-889d-04dc2ae1097f" colab={"base_uri": "https://localhost:8080/"} dfPoll['freq_activity'].value_counts() # + [markdown] id="ETsosUOJjfHR" # And summing them give us the number of rows for the column: # + id="4dlGz_jgjfPZ" outputId="4157ad89-b5ca-4ca3-d477-82161f00e548" colab={"base_uri": "https://localhost:8080/"} sum(dfPoll['freq_activity'].value_counts()) # + [markdown] id="yo0nTmsm_2yK" # ### Relative <a name="relative"></a> # + [markdown] id="VbfRlqCJK8ap" # It's proportion of the frequency of a value $f_i$ compared to all data $N$. The sum of the relative frequencies must be 1: # # $$n_i = \frac{f_i}{N}$$ # # The same idea of counting the times a value appears but in relation to all data: # + id="erO2SS3TizHs" outputId="73607aeb-8717-464c-ebba-d437897edde9" colab={"base_uri": "https://localhost:8080/"} df = dfPoll['freq_activity'].value_counts() df / len(dfPoll['freq_activity']) # + [markdown] id="4_GL406S_2yP" # ### Simple <a name="simple"></a> # + [markdown] id="guojaYKlM2Iu" # It works independently for each value. So we would find a frequency based only on a value and the total of the data. # + [markdown] id="pSInRnXv_2yT" # ### Cumulative <a name="cumulative"></a> # # # + [markdown] id="NMMpmJSxNbOl" # Cumulative frequency depends on all values because we must sum them until we have the data $N$. It works for Absolute and Relative frequencies. So, in the first row we would have $f_1$ or $n_1$, second row $f_1 + f_2$ or $n_1 + n_2$ and so on. # # Coding we would have: # + id="ldHDVDhMizHi" outputId="253a333b-25a5-40fd-9b82-7b38e9929f73" colab={"base_uri": "https://localhost:8080/"} dfPoll['freq_activity'].value_counts().cumsum() # + [markdown] id="0ehAISERBksK" # ### Complete frequency table examples <a name="freqex"></a> # # + [markdown] id="s8JDmo-LkG5A" # Let's make a complete table of frequencies for the variable `freq_activity`: # + id="_clBkSx5kQHM" outputId="c027d012-224c-4eee-fdd6-1fd27896c67f" colab={"base_uri": "https://localhost:8080/", "height": 269} dfFrequencies = dfPoll['freq_activity'].value_counts().to_frame() dfFrequencies['relative'] = dfPoll['freq_activity'].value_counts()/len(dfPoll['freq_activity']) dfFrequencies['cumulative'] = dfPoll['freq_activity'].value_counts().cumsum() dfFrequencies['relcum'] = (dfPoll['freq_activity'].value_counts()/len(dfPoll['freq_activity'])).cumsum() dfFrequencies = dfFrequencies.reset_index() row = {'index':'∑', 'freq_activity': sum(dfFrequencies['freq_activity']), 'relative': sum(dfFrequencies['relative'])} dfFrequencies = dfFrequencies.append(row, ignore_index = True) dfFrequencies.columns = ['Activity', 'Simple absolute frequency', 'Simple relative frequency', 'Cumulative absolute frequency', 'Cumulative relative frequency'] dfFrequencies = dfFrequencies.fillna('-') dfFrequencies # + [markdown] id="N_Xh3G5hwt1W" # Let's make a complete table of frequencies for the variable `state`: # + id="JjUHx6t0wt1p" outputId="73fb90e5-4785-42ac-88d3-3a0485d757a1" colab={"base_uri": "https://localhost:8080/", "height": 363} dfFrequencies = dfPoll['state'].value_counts().to_frame() dfFrequencies['relative'] = dfPoll['state'].value_counts()/len(dfPoll['state']) dfFrequencies['cumulative'] = dfPoll['state'].value_counts().cumsum() dfFrequencies['relcum'] = (dfPoll['state'].value_counts()/len(dfPoll['state'])).cumsum() dfFrequencies = dfFrequencies.reset_index() row = {'index':'∑', 'state': sum(dfFrequencies['state']), 'relative': sum(dfFrequencies['relative'])} dfFrequencies = dfFrequencies.append(row, ignore_index = True) dfFrequencies.columns = ['State', 'Simple absolute frequency', 'Simple relative frequency', 'Cumulative absolute frequency', 'Cumulative relative frequency'] dfFrequencies = dfFrequencies.fillna('-') dfFrequencies # + [markdown] id="ZOr0-0LYAK3g" # ## Graphs <a name="graphs"></a> # + [markdown] id="dECWcxBDANlN" # ### Pie chart <a name="pie"></a> # + [markdown] id="doHnAnnwXM7N" # Pie charts uses the concept of relative frequency considering how we want to represent a piece of the circle as a value of a variable, usually showing the percentage that it occupies. Pie charts are a good manner to represent qualitative nominal data. Because it also avoids the idea of show some trending. # # Let's do it for `schooling` variable: # + id="LaSkPXEQTpVn" outputId="77c86f2f-279f-4153-d66e-5e6ab3418cb9" colab={"base_uri": "https://localhost:8080/", "height": 922} import matplotlib.pyplot as plt import matplotlib as mpl mpl.style.use('seaborn-dark') schooling = dfPoll['schooling'].value_counts() fig = plt.gcf() fig.set_size_inches(16, 16) plt.pie(schooling, labels = schooling.index, autopct = '%1.2f%%', shadow = True, startangle = 90) plt.title("Grau de escolaridade", fontweight = 'bold', fontsize = '20', loc = 'left', bbox={'facecolor':'0.8', 'pad':5}) plt.axis('equal') plt.show() # + [markdown] id="8HcOLDGBedit" # There are some in cases we want to represent quantitative data in a pie chart.One way is to create categories assigning a range for each one. A variable known for using in this way is weights, creating a range of weights to represent each category in a pie chart. # # Let's do it for the heights: # # + id="mWgRf1WNb3Rb" outputId="0575282d-fdbe-4fdc-9b60-ce607d66f9c8" colab={"base_uri": "https://localhost:8080/", "height": 922} ranges = [1.2, 1.6, 1.7, 1.75, 1.8, 1.85] height = dfPoll['height_m'].groupby(pd.cut(dfPoll['height_m'], ranges)).count() fig = plt.gcf() fig.set_size_inches(16, 16) plt.pie(height, labels = height.index, autopct = '%1.2f%%', shadow = True, startangle = 90) plt.title("Faixas de altura", fontweight = 'bold', fontsize = '20', loc = 'left', bbox={'facecolor':'0.8', 'pad':5}) plt.axis('equal') plt.show() # + [markdown] id="Ghoy5ys-g4VQ" # Activities: # + id="Q6NS878EzKD7" outputId="b544a2c9-d705-45e4-b979-bcb72c5e9600" colab={"base_uri": "https://localhost:8080/", "height": 922} freq = dfPoll['freq_activity'].value_counts() fig = plt.gcf() fig.set_size_inches(16, 16) plt.pie(freq, labels = freq.index, autopct = '%1.2f%%', shadow = True, startangle = 90) plt.title("Atividades mais frequentes", fontweight = 'bold', fontsize = '20', loc = 'left', bbox={'facecolor':'0.8', 'pad':5}) plt.axis('equal') plt.show() # + [markdown] id="O14R9fNRg_O1" # Number of people who work/study: # + id="1mY2Dkm5Yj8O" outputId="67a7cb23-420f-4384-c123-26150ffa5751" colab={"base_uri": "https://localhost:8080/", "height": 922} work = dfPoll['work_study'].value_counts() fig = plt.gcf() fig.set_size_inches(16, 16) plt.pie(work, labels = work.index, autopct = '%1.2f%%', shadow = True, startangle = 90) plt.title("Trabalha/estuda", fontweight = 'bold', fontsize = '20', loc = 'left', bbox={'facecolor':'0.8', 'pad':5}) plt.axis('equal') plt.show() # + [markdown] id="2PmB39aXAQjp" # ### Bar chart <a name="bar"></a> # + [markdown] id="Y_cmeRLRf3zi" # For bar charts generally we count the absolute frequency for each value on a variable and represent using bars. The bigger the bar, the bigger the frequency the value appears. # + id="BOGzlyYyrdEG" outputId="a4c7dbf0-334f-42f1-f711-01aca6ffe1e2" colab={"base_uri": "https://localhost:8080/", "height": 508} sex = dfPoll['sex'].value_counts().to_frame() sex.reset_index(level=0, inplace=True) sex.columns = ['Sexo', 'Quantidade'] sex.plot.bar(x = 'Sexo', y = 'Quantidade', rot = 0, title = "Número de homens e mulheres"); fig = plt.gcf() fig.set_size_inches(8, 8) plt.show(block=True); # + [markdown] id="KxAUAs3hhDGs" # Age range: # + id="2nCT7lO1SDBC" outputId="522c4f02-550b-45e6-a86b-05a6e28dded4" colab={"base_uri": "https://localhost:8080/", "height": 508} ranges = [10, 20, 30, 40, 50, 60] age = dfPoll['age'].groupby(pd.cut(dfPoll['age'], ranges)).count().to_frame() age.columns = ['Quantidade'] age.reset_index(level=0, inplace=True) age.columns = ['Faixa etária', 'Quantidade'] age.plot.bar(x = 'Faixa etária', y = 'Quantidade', rot = 0, title = "Número de pessoas por faixa etária"); fig = plt.gcf() fig.set_size_inches(8, 8) plt.show(block=True); # + [markdown] id="a_Ks-GsjhLnl" # States: # + id="DM_EES0EUh3_" outputId="934f573b-9b7a-4f79-f0a5-b729ffe8d777" colab={"base_uri": "https://localhost:8080/", "height": 508} state = dfPoll['state'].value_counts().to_frame() state.reset_index(level=0, inplace=True) state.columns = ['Estado', 'Quantidade'] state.plot.bar(x = 'Estado', y = 'Quantidade', rot = 0, title = "Número de pessoas por estado"); fig = plt.gcf() fig.set_size_inches(8, 8) plt.show(block=True); # + [markdown] id="9lTwfUlrhNDy" # Number of brothers including the person: # + id="j5_N7m1RX3dk" outputId="ae84130f-301a-4adf-f2ca-eb1ff964fd48" colab={"base_uri": "https://localhost:8080/", "height": 508} brothers = dfPoll['brothersplus_you'].value_counts().to_frame() brothers.reset_index(level=0, inplace=True) brothers.columns = ['Irmãos', 'Quantidade'] brothers.plot.bar(x = 'Irmãos', y = 'Quantidade', rot = 0, title = "Pessoas com um número de irmãos contando elas mesmas"); fig = plt.gcf() fig.set_size_inches(8, 8) plt.show(block=True); # + [markdown] id="xAWObdvE_DSg" # ### Pareto chart <a name="pareto"></a> # + [markdown] id="R3XL5tFs_IWg" # A Pareto chart combines both bar and line graph, where individual values are represented in descending order by bars, and the cumulative total is represented by the line. It's usually used for quality control. With the complete table of frequencies made before we can do this graph without much problem. # # Let's do it for `freq_activity`: # + id="E7p5FLOp0W8e" outputId="61ad12be-7b73-43b4-8b11-a48d567fc351" colab={"base_uri": "https://localhost:8080/", "height": 479} import matplotlib.pyplot as plt from matplotlib.ticker import PercentFormatter freq = dfPoll['freq_activity'].value_counts().to_frame() freq.columns = ['Quantidade'] freq = freq.sort_values(by='Quantidade',ascending=False) freq["cumpercentage"] = freq["Quantidade"].cumsum()/freq["Quantidade"].sum()*100 fig, ax = plt.subplots() ax.bar(freq.index, freq["Quantidade"], color="C0") ax2 = ax.twinx() ax2.plot(freq.index, freq["cumpercentage"], color="C1", marker="D", ms=7) ax2.yaxis.set_major_formatter(PercentFormatter()) ax.tick_params(axis="y", colors="C0") ax2.tick_params(axis="y", colors="C1") fig = plt.gcf() fig.set_size_inches(14, 8) plt.show() # + [markdown] id="tY7WE4490XN8" # Doing Pareto chart for variable `state`: # + id="6r9wVIEU_Iev" outputId="e5c502b3-31cc-481c-91a4-e11750b3f518" colab={"base_uri": "https://localhost:8080/", "height": 479} import matplotlib.pyplot as plt from matplotlib.ticker import PercentFormatter state = dfPoll['state'].value_counts().to_frame() state.columns = ['Quantidade'] state = state.sort_values(by='Quantidade',ascending=False) state["cumpercentage"] = state["Quantidade"].cumsum()/state["Quantidade"].sum()*100 fig, ax = plt.subplots() ax.bar(state.index, state["Quantidade"], color="C0") ax2 = ax.twinx() ax2.plot(state.index, state["cumpercentage"], color="C1", marker="D", ms=7) ax2.yaxis.set_major_formatter(PercentFormatter()) ax.tick_params(axis="y", colors="C0") ax2.tick_params(axis="y", colors="C1") fig = plt.gcf() fig.set_size_inches(14, 8) plt.show() # + [markdown] id="qlr0pNKgAQjx" # ### Boxplot <a name="boxplot"></a> # + [markdown] id="yvMy6rR8_Au4" # (Image from [Wikipedia](https://en.wikipedia.org/wiki/Box_plot)) # ![Boxplot](https://upload.wikimedia.org/wikipedia/commons/thumb/1/1a/Boxplot_vs_PDF.svg/550px-Boxplot_vs_PDF.svg.png) # # Boxplot is probably one of the graphs that need more explanations. It's important to have the knowledge of the measures of position and dispersion to make a boxplot, given that it combines them. # # The main points of the boxplot are the quantiles. We use the $Q_1$ which is $P_{25th}$, $P_{50th} = median(x)$ and $Q_3 = P_{75th}$. Beyond that we use the concept of outliers demonstrated before. Now we can plot a proper boxplot. # # Doing it for heights (as we did before calculating the outliers): # + id="QyNvDWREv8TE" outputId="f93770db-6869-4537-cd23-b30272244971" colab={"base_uri": "https://localhost:8080/", "height": 496} fig = plt.gcf() fig.set_size_inches(8, 8) dfPoll.boxplot(column=['height_m']) # + [markdown] id="cNmjm9nkjNTQ" # As expected the outlier was the same we encountered before. # # For age: # + id="8JEglLPoxWqo" outputId="67d23331-85ef-4c69-d4bd-86b318cb449b" colab={"base_uri": "https://localhost:8080/", "height": 496} fig = plt.gcf() fig.set_size_inches(8, 8) dfPoll.boxplot(column=['age']) # + id="aNTANJnYXVKc" outputId="a687f215-3c3d-4397-8842-511a6c7d7c83" colab={"base_uri": "https://localhost:8080/", "height": 496} fig = plt.gcf() fig.set_size_inches(8, 8) dfPoll.boxplot(column=['weight_kg']) # + [markdown] id="BPHc1Ft_AQj3" # ### Scatter plot <a name="dispersion"></a> # + [markdown] id="xMdzdHc1kDgA" # Usually we use scatter plots to find relationships between variables. In case we see a correlation we could even do a trending line (which is not part of the scatter plot). Weight and height generally has some degree of correlation. # # Let's do it for them: # + id="6yWcVejyx07q" outputId="fec6c1fa-b462-4221-db0c-c163444ab4f9" colab={"base_uri": "https://localhost:8080/", "height": 588} x = dfPoll['height_m'] y = dfPoll['weight_kg'] plt.scatter(x, y, c = 'DarkBlue') z = np.polyfit(x, y, 1) p = np.poly1d(z) plt.plot(x,p(x),"r--") fig = plt.gcf() fig.set_size_inches(12, 10) plt.show() # + [markdown] id="2fGQWpMbAQj8" # ### Line chart <a name="line"></a> # # # + [markdown] id="O_dFL-cppXQV" # A line chart is often used to visualize a trend in data over intervals of time – a time series – thus the line is often drawn chronologically. We don't have any data to represent in this way, but we can see how a variable change when other variable grows like the scatter plot. # # Let's do it for age and height: # + id="rscmMPzmzprl" outputId="5bd54a0b-dd12-4859-9446-c1e59f942714" colab={"base_uri": "https://localhost:8080/", "height": 602} df = dfPoll df = df.sort_values(by=['age', 'height_m']) df.plot(x = 'age', y = 'height_m', kind = 'line') fig = plt.gcf() fig.set_size_inches(12, 10) plt.show() # + [markdown] id="dJsBdIHfpxv_" # Note that we had to sort the values to make some sense of the data. Scatterplot the values are sorted by default to observe a possible correlation. # + [markdown] id="JUAGT57c4t8C" # ## References <a name="refs"></a> # + [markdown] id="spmxpXYK5Rju" # <NAME>. Introductory Statistics. 4th Edition. Academic Press. February 2017 # + [markdown] id="cmpLHf_859CK" # <NAME>. Noções de Probabilidade e Estatística. 7th edition. Edusp. 2015.
1-DescriptiveAnalysis/1_DescriptiveAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- % matplotlib inline import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA fname = 'events.csv' events = np.loadtxt(fname, delimiter=',') event = events[:, 2] # look at one trace # ### Create a new data variable X by scanning the trace X = [] window_size = 50 for i in range(len(event) - window_size): x = [] for j in range(window_size): x.append(event[i + j]) X.append(x) X = np.asarray(X) # ### Perform PCA on scanned trace # + n_components = 5 pca = PCA(n_components = n_components) scores = pca.fit_transform(X) variance = pca.explained_variance_ratio_ fig = plt.figure(figsize = (12, 15)) for i in range(n_components): fig.add_subplot(n_components, 1, 1 + i) plt.plot(scores[:, i]) plt.title('PCA: ' + str(i + 1) + ', Explained variance: ' + str(variance[i] * 100)[:4] + '%') plt.tight_layout() # -
.ipynb_checkpoints/PCA_on_traces-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SLU 05 - Covariance and Correlation: Example notebook # + import pandas as pd import numpy as np # just to get rid of an annoying numpy warning import warnings warnings.filterwarnings("ignore", message="numpy.dtype size changed") import seaborn as sns from matplotlib import pyplot as plt # %matplotlib inline # - # In this notebook, you will see examples on how to calculate: # - Covariance # - Pearson correlation # - Spearman correlation # - Correlation matrix # # You will also find examples of: # - Spurious correlations # - Observational and Experimental Data # ## Loading and previewing the data # + housing_data = pd.read_csv('data/HousingData.csv') housing_data.head() # - # # Covariance # # Calculating the covariance between `crime` and `poverty`: housing_data['crime'].cov(housing_data['poverty']) # # Pearson correlation # # Calculating the Pearson correlation between `crime` and `poverty`: housing_data['crime'].corr(housing_data['poverty'], method='pearson') # # Spearman correlation # # Calculating the Spearman correlation between `crime` and `poverty`: housing_data['crime'].corr(housing_data['poverty'], method='spearman') # # Correlation matrix # Using the standard Pandas method: housing_data.corr() # With visualization: sns.heatmap(housing_data.corr(), annot=True) # # Spurious correlation in Observational data # # The Pearson correlation between `crime` and `road_quality` is high: housing_data['crime'].corr(housing_data['roads_quality'], method='pearson') # However, this is likely a spurious correlation; It's unlikely that people are being directly driven to crime because of bad roads: Rather, it is poverty that results in more crime: housing_data['crime'].corr(housing_data['poverty'], method='pearson') # And it is also poverty that causes less investment in road infrastructure: housing_data['poverty'].corr(housing_data['roads_quality'], method='pearson')
S01 - Bootcamp and Binary Classification/SLU05 - Covariance and Correlation/Examples notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Objectives" data-toc-modified-id="Objectives-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Objectives</a></span></li><li><span><a href="#Decision-Trees-at-a-High-Level" data-toc-modified-id="Decision-Trees-at-a-High-Level-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Decision Trees at a High Level</a></span><ul class="toc-item"><li><span><a href="#Simple-Example-of-a-Decision-Tree" data-toc-modified-id="Simple-Example-of-a-Decision-Tree-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Simple Example of a Decision Tree</a></span><ul class="toc-item"><li><span><a href="#Picturing-Decisions-as-a-Tree" data-toc-modified-id="Picturing-Decisions-as-a-Tree-2.1.1"><span class="toc-item-num">2.1.1&nbsp;&nbsp;</span>Picturing Decisions as a Tree</a></span></li></ul></li><li><span><a href="#Overview-of-Algorithm's-Steps" data-toc-modified-id="Overview-of-Algorithm's-Steps-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Overview of Algorithm's Steps</a></span></li></ul></li><li><span><a href="#Entropy/Information-Gain-and-Gini" data-toc-modified-id="Entropy/Information-Gain-and-Gini-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Entropy/Information Gain and Gini</a></span><ul class="toc-item"><li><span><a href="#Entropy" data-toc-modified-id="Entropy-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Entropy</a></span><ul class="toc-item"><li><span><a href="#Information-Gain" data-toc-modified-id="Information-Gain-3.1.1"><span class="toc-item-num">3.1.1&nbsp;&nbsp;</span>Information Gain</a></span></li></ul></li><li><span><a href="#Gini-Impurity" data-toc-modified-id="Gini-Impurity-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Gini Impurity</a></span></li></ul></li><li><span><a href="#With-sklearn" data-toc-modified-id="With-sklearn-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>With <code>sklearn</code></a></span><ul class="toc-item"><li><span><a href="#Setting-up-Data" data-toc-modified-id="Setting-up-Data-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Setting up Data</a></span></li><li><span><a href="#Training-the-Model" data-toc-modified-id="Training-the-Model-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Training the Model</a></span></li><li><span><a href="#Predictions-and-Evaluation" data-toc-modified-id="Predictions-and-Evaluation-4.3"><span class="toc-item-num">4.3&nbsp;&nbsp;</span>Predictions and Evaluation</a></span></li></ul></li><li><span><a href="#Important-Terminology-Related-to-Decision-Trees" data-toc-modified-id="Important-Terminology-Related-to-Decision-Trees-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Important Terminology Related to Decision Trees</a></span></li><li><span><a href="#Challenges-with-Decision-Trees" data-toc-modified-id="Challenges-with-Decision-Trees-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Challenges with Decision Trees</a></span><ul class="toc-item"><li><span><a href="#Decision-Trees-are-Prone-to-Overfitting" data-toc-modified-id="Decision-Trees-are-Prone-to-Overfitting-6.1"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>Decision Trees are Prone to Overfitting</a></span></li><li><span><a href="#Bias-Variance-with-Decision-Trees" data-toc-modified-id="Bias-Variance-with-Decision-Trees-6.2"><span class="toc-item-num">6.2&nbsp;&nbsp;</span>Bias-Variance with Decision Trees</a></span><ul class="toc-item"><li><span><a href="#Stopping-Criterion---Pruning-Parameters" data-toc-modified-id="Stopping-Criterion---Pruning-Parameters-6.2.1"><span class="toc-item-num">6.2.1&nbsp;&nbsp;</span>Stopping Criterion - Pruning Parameters</a></span></li></ul></li></ul></li><li><span><a href="#Feature-Importances" data-toc-modified-id="Feature-Importances-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Feature Importances</a></span></li><li><span><a href="#Conclusions" data-toc-modified-id="Conclusions-8"><span class="toc-item-num">8&nbsp;&nbsp;</span>Conclusions</a></span><ul class="toc-item"><li><span><a href="#Pros" data-toc-modified-id="Pros-8.1"><span class="toc-item-num">8.1&nbsp;&nbsp;</span>Pros</a></span></li><li><span><a href="#Cons" data-toc-modified-id="Cons-8.2"><span class="toc-item-num">8.2&nbsp;&nbsp;</span>Cons</a></span></li></ul></li></ul></div> # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier, plot_tree from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, plot_roc_curve, plot_confusion_matrix from sklearn.datasets import load_iris # %matplotlib inline # + [markdown] heading_collapsed=true # # Objectives # + [markdown] hidden=true # - Describe the decision tree modeling algorithm # - Use attribute selection methods to build different trees # - Explain the pros and cons of decision trees # - Interpret the feature importances of a fitted model # + [markdown] heading_collapsed=true # # Decision Trees at a High Level # + [markdown] hidden=true # > **Decision trees** are a supervised learning model that makes uses past data to form a graph/pathway which leads to the model making _decisions_ on it's predictions. # + [markdown] hidden=true # I like to think of decision trees to be a bunch of forks in the road. # + [markdown] hidden=true # <a title="<NAME> / Fork in the road" href="https://commons.wikimedia.org/wiki/File:Fork_in_the_road_-_geograph.org.uk_-_1355424.jpg"><img width="512" alt="Fork in the road - geograph.org.uk - 1355424" src="https://upload.wikimedia.org/wikipedia/commons/7/71/Fork_in_the_road_-_geograph.org.uk_-_1355424.jpg"></a> # + [markdown] hidden=true # Every time we make a decision, we split up, or _partition_, the data based on the features. # + [markdown] heading_collapsed=true hidden=true # ## Simple Example of a Decision Tree # + [markdown] hidden=true # Let's say we have this set of data: # # Work Status | Age | Favorite Website # ------------|-------|------------------------- # Student | Young | A # Working | Young | B # Working | Old | C # Working | Young | B # Student | Young | A # Student | Young | A # # # + [markdown] hidden=true # This can help us answer a couple questions: # # - If someone is a young worker, what website do we recommend? # - If someone is an old worker, what website then? # + [markdown] heading_collapsed=true hidden=true # ### Picturing Decisions as a Tree # + [markdown] hidden=true # ![](img/simple_decision_tree.png) # + [markdown] hidden=true # > Note our tree would look different depending on where we made our decisions. # + [markdown] heading_collapsed=true hidden=true # ## Overview of Algorithm's Steps # + [markdown] hidden=true # > Here's a great visual of a decision tree http://www.r2d3.us/visual-intro-to-machine-learning-part-1/ # + [markdown] hidden=true # 1. Organize data features and target # 2. Make a *decision* (a split) based on some *metric* using the features # * Data are split into partitions via *branches* # 3. Continue on with each partition, and do more splits for each using the features in that partition # 4. Keep doing that until a **stopping condition** is hit # - Number of data points in a final partition # - Layers deep # 5. To make predictions, run through the decision nodes (the forks in the road) # + [markdown] hidden=true # Now we have to determine what metric we use to make our split/decision! # + [markdown] heading_collapsed=true # # Entropy/Information Gain and Gini # + [markdown] hidden=true # The goal is to have our ultimate classes be fully "ordered" (for a binary dependent variable, we'd have the 1's in one group and the 0's in the other). # + [markdown] hidden=true # ![](img/information_gain_split.png) # + [markdown] hidden=true # One way to assess the value of a split is to measure how *disordered* our groups are, and there is a notion of *entropy* that measures precisely this. # + [markdown] heading_collapsed=true hidden=true # ## Entropy # + [markdown] hidden=true # The entropy of the whole dataset is given by: # # $\large E = -\Sigma^n_i p_i\log_2(p_i)$, # # where $p_i$ is the probability of belonging to the $i$th group, where $n$ is the number of groups (i.e. target values). # + [markdown] hidden=true # **Entropy will always be between 0 and 1. The closer to 1, the more disordered your group.** # + [markdown] heading_collapsed=true hidden=true # ### Information Gain # + [markdown] hidden=true # To calculate the entropy of a *split*, we're going to want to calculate the entropy of each of the groups made by the split, and then calculate a weighted average of those groups' entropies––weighted, that is, by the size of the groups. # + [markdown] hidden=true # For a given split, the **information gain** is simply the entropy of the parent group less the entropy of the split. # + [markdown] hidden=true # > We can essentially say **information gain** is the **_difference_** of the **parent's entropy** and the **_average_** of the **children's entropy** # + [markdown] hidden=true # For a given parent, then, we maximize our model's performance by *minimizing* the split's entropy. # # What we'd like to do then is: # # 1. Look at the entropies of all possible splits # 2. Choose the split with the lowest entropy # # In practice there are far too many splits for it to be practical for a person to calculate all these different entropies ... # # ... but we can make computers do these calculations for us! # # Moreover, we can **iterate** this algorithm on the resultant groups until we reach pure groups! # + [markdown] hidden=true # ----- # + [markdown] hidden=true # **Question**: Are we in fact guaranteed, proceeding in this way, to reach pure groups, no matter what our data looks like? # + [markdown] hidden=true # **Observation**: This algorithm looks for the best split **locally**. There is no regard for how an overall tree might look. That's what makes this algorithm ***greedy***. # + [markdown] heading_collapsed=true hidden=true # ## Gini Impurity # + [markdown] hidden=true # An alternative metric to entropy comes from the work of Corrado Gini. The Gini Impurity is defined as: # # $\large G = 1 - \Sigma_ip_i^2$, or, equivalently, $\large G = \Sigma_ip_i(1-p_i)$. # # where, again, $p_i$ is the probability of belonging to the $i$th group. # + [markdown] hidden=true # **Gini Impurity will always be between 0 and 0.5. The closer to 0.5, the more disordered your group.** # + [markdown] heading_collapsed=true # # With `sklearn` # + [markdown] heading_collapsed=true hidden=true # ## Setting up Data # + hidden=true iris = load_iris() print(iris.target_names) print(iris.feature_names) # + hidden=true # petal length and width features feature_used = iris.feature_names[2:] X = iris.data[:, 2:] y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2) X_train.shape, X_test.shape # + [markdown] heading_collapsed=true hidden=true # ## Training the Model # + [markdown] hidden=true # > Check out the [DecisionTreeClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html) documentation # + hidden=true tree_clf = DecisionTreeClassifier(max_depth=2) tree_clf.fit(X_train , y_train) # + [markdown] heading_collapsed=true hidden=true # ## Predictions and Evaluation # + hidden=true y_pred = tree_clf.predict(X_test) y_pred # + hidden=true acc = accuracy_score(y_test,y_pred) * 100 print("Accuracy: {0}".format(acc)) # + hidden=true plot_confusion_matrix(tree_clf,X_test,y_test) # + hidden=true f,ax = plt.subplots(figsize=(10,10)) plot_tree(tree_clf, ax=ax); # + [markdown] heading_collapsed=true # # Important Terminology Related to Decision Trees # + [markdown] hidden=true # - **Root Node:** Represents entire population or sample. # - **Decision Node:** Node that is split. # - **Leaf/ Terminal Node:** Node with no children. # - **Pruning:** Removing nodes. # - **Branch / Sub-Tree:** A sub-section of a decision tree. # - **Parent and Child Node:** A node divided into sub-nodes is the parent; the sub-nodes are its children. # # <img src='./img/decision_leaf.webp' width=600 /> # + [markdown] heading_collapsed=true # # Challenges with Decision Trees # + [markdown] heading_collapsed=true hidden=true # ## Decision Trees are Prone to Overfitting # + hidden=true # Using more data features this time feature_used = iris.feature_names[:] X = iris.data[:, :] y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2) X_train.shape, X_test.shape # + hidden=true # Allow it to run the full default hyperparameters tree_clf = DecisionTreeClassifier() tree_clf.fit(X_train, y_train) # + hidden=true # Accuracy on training data tree_clf.score(X_train, y_train) # + hidden=true # Accuracy on test data tree_clf.score(X_test, y_test) # + [markdown] heading_collapsed=true hidden=true # ## Bias-Variance with Decision Trees # + [markdown] hidden=true # The CART algorithm will repeatedly partition data into smaller and smaller subsets until those final subsets are homogeneous in terms of the outcome variable. In practice this often means that the final subsets (known as the leaves of the tree) each consist of only one or a few data points. # # This tends to result in low-bias, high variance models. # + hidden=true # Allow it to run the full default hyperparameters tree_clf = DecisionTreeClassifier() tree_clf.fit(X_train, y_train) # Accuracy on training data & test data print('Training:', tree_clf.score(X_train, y_train)) print('Testing:', tree_clf.score(X_test, y_test)) # + hidden=true f,ax = plt.subplots(figsize=(10,10)) plot_tree(tree_clf, ax=ax); # + [markdown] heading_collapsed=true hidden=true # ### Stopping Criterion - Pruning Parameters # + [markdown] hidden=true # The recursive binary splitting procedure described above needs to know when to stop splitting as it works its way down the tree with the training data. # # **min_samples_leaf:** The most common stopping procedure is to use a minimum count on the number of training instances assigned to each leaf node. If the count is less than some minimum then the split is not accepted and the node is taken as a final leaf node. # # **max_leaf_nodes:** # Reduce the number of leaf nodes. # # **max_depth:** # Reduce the depth of the tree to build a generalized tree. # # **min_impurity_split :** # A node will split if its impurity is above the threshold, otherwise it will be a leaf. # + hidden=true #generally, think about what seems reasonable. #kind of figure out how to make the tree make a tree. # + hidden=true # Stop it from running too long tree_clf = DecisionTreeClassifier(max_depth=3) tree_clf.fit(X_train, y_train) # Accuracy on training data & test data print('Training:', tree_clf.score(X_train, y_train)) print('Testing:', tree_clf.score(X_test, y_test)) # + [markdown] heading_collapsed=true # # Feature Importances # + [markdown] hidden=true # The fitted tree has an attribute called `ct.feature_importances_`. What does this mean? Roughly, the importance (or "Gini importance") of a feature is a sort of weighted average of the impurity decrease at internal nodes that make use of the feature. The weighting comes from the number of samples that depend on the relevant nodes. # # > The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. See [`sklearn`'s documentation](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier.feature_importances_). # + hidden=true dt = DecisionTreeClassifier() dt.fit(X, y) for fi, feature in zip(dt.feature_importances_, feature_used): print(fi, feature) # + [markdown] hidden=true # More on feature importances [here](https://towardsdatascience.com/the-mathematics-of-decision-trees-random-forest-and-feature-importance-in-scikit-learn-and-spark-f2861df67e3). # + [markdown] heading_collapsed=true # # Conclusions # + [markdown] hidden=true # - The decision tree is a "white-box" type of ML algorithm. It shares internal decision-making logic, which is not available in the black-box type of algorithms such as Neural Network. # - Its training time is faster compared to other algorithms such as neural networks. # - The decision tree is a non-parametric method, which does not depend upon probability distribution assumptions. # - Decision trees can handle high-dimensional data with good accuracy. # + [markdown] heading_collapsed=true hidden=true # ## Pros # + [markdown] hidden=true # - Easy to interpret and visualize # - Can easily capture non-linear patterns # - Require little data preprocessing from the user (no need to normalize data) # - Can be used for feature engineering such as variable selection and predicting missing values # - Make no assumptions about distribution because its non-parametric # + [markdown] heading_collapsed=true hidden=true # ## Cons # + [markdown] hidden=true # - Sensitive to noisy data (overfit) # - Trouble with imbalanced datasets
Phase_3/ds-decision_trees-main/decision_tree_modeling.ipynb