code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Customer Segmentation # # We will be using E-commerce dataset of user purchases and try develop a model that allow us to do two things: # # 1. Classifying customers into segments. # 2. Anticipate the purchases that will be made by a new customer, during the following year and # this, from its first purchase by assigning them appropriate cluster/segment # # ## ** Import dependancies ** #Natural Language Toolkit # !pip install --user -U nltk # + _cell_guid="705714b1-870b-4f34-b5bf-cd027dafaefe" _kg_hide-input=true _uuid="bb40a7b23734d82876d812fab6daecd83a46368c" from __future__ import division import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns import datetime, nltk, warnings import matplotlib.cm as cm import itertools from pathlib import Path from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score from sklearn import preprocessing, model_selection, metrics, feature_selection from sklearn.model_selection import GridSearchCV, learning_curve from sklearn.svm import SVC from sklearn.metrics import confusion_matrix from sklearn import neighbors, linear_model, svm, tree, ensemble # from wordcloud import WordCloud, STOPWORDS from sklearn.ensemble import AdaBoostClassifier from sklearn.decomposition import PCA from IPython.display import display, HTML #import plotly.plotly as py #import plotly.graph_objs as go #from plotly.offline import init_notebook_mode,iplot #init_notebook_mode(connected=True) #warnings.filterwarnings("ignore") #plt.rcParams["patch.force_edgecolor"] = True #plt.style.use('fivethirtyeight') #mpl.rc('patch', edgecolor = 'dimgray', linewidth=1) # %matplotlib inline # + [markdown] heading_collapsed=true # ## ** Data Preparation ** # + _cell_guid="1063f9e0-e494-4873-939f-8aa5ca40cc89" _kg_hide-input=true _uuid="227fc0cb1d5216d52e057e1d2d7debd0e29abe46" hidden=true # read the datafile df_initial = pd.read_csv('data.csv',encoding="ISO-8859-1", dtype={'CustomerID': str,'InvoiceID': str}) print('Dataframe dimensions:', df_initial.shape) df_initial['InvoiceDate'] = pd.to_datetime(df_initial['InvoiceDate']) # + hidden=true # show first lines display(df_initial[:5]) # - # ## ** Exploratory Data Analysis ** # ### Identify null values # gives some infomation on columns types and number of null values tab_info=pd.DataFrame(df_initial.dtypes).T.rename(index={0:'column type'}) tab_info=tab_info.append(pd.DataFrame(df_initial.isnull().sum()).T.rename(index={0:'null values (nb)'})) tab_info=tab_info.append(pd.DataFrame(df_initial.isnull().sum()/df_initial.shape[0]*100).T. rename(index={0:'null values (%)'})) print ('-' * 10 + " Display information about column types and number of null values " + '-' * 10 ) print display(tab_info) # ### Remove data entries # Note: # * If you are looking to the CustomerID column then there are $\sim$25% data entries are # null. # * That means there are $\sim$25% of data entries which aren't assigned to the any # customer(s). # * It is impossible for us to map values for the customer and these entries. These is # usless for the current exercise. # * Because of all the above points we are deleting these data entries. # # + _cell_guid="f9de6b67-a588-43ab-8f51-b28efdee9e32" _kg_hide-input=true _uuid="9b915fa18b311e8f93ac862bd49d08d90e03ca48" df_initial.dropna(axis = 0, subset = ['CustomerID'], inplace = True) print('Dataframe dimensions:', df_initial.shape) # gives some information on columns types and number of null values tab_info=pd.DataFrame(df_initial.dtypes).T.rename(index={0:'column type'}) tab_info=tab_info.append(pd.DataFrame(df_initial.isnull().sum()).T.rename(index={0:'null values (nb)'})) tab_info=tab_info.append(pd.DataFrame(df_initial.isnull().sum()/df_initial.shape[0]*100).T. rename(index={0:'null values (%)'})) display(tab_info) # + _cell_guid="baf1ff2e-646b-468b-b7b4-68343f388387" _uuid="6b988d1dee3deecafd54f2b3555d1f84b509d213" print('Duplicate data entries: {}'.format(df_initial.duplicated().sum())) df_initial.drop_duplicates(inplace = True) # - # ### Exploring data attributes # --- # #### ** Exploring the data attribute : Country ** # + _cell_guid="44abc17d-8858-457a-94e1-f012143fda87" _kg_hide-input=true _uuid="c4141f12a8b2c733539a75d398cadf0817ca0969" temp = df_initial[['CustomerID', 'InvoiceNo', 'Country']].groupby( ['CustomerID', 'InvoiceNo', 'Country']).count() temp = temp.reset_index(drop = False) countries = temp['Country'].value_counts() print('No. of cuntries in dataframe: {}'.format(len(countries))) # + temp_no_of_order_per_count = df_initial[['CustomerID','Country']].groupby(['Country']).count() temp_no_of_order_per_count = temp_no_of_order_per_count.reset_index(drop = False) print('-' * 10 + " Contry-wise order calculation "+ '-' * 10) print print (temp_no_of_order_per_count.sort_values( by='CustomerID', ascending=False).rename(index=str, columns={"CustomerID": "Country wise number of order"})) # - # #### ** Exploring the data attribute : Customers and products ** # The dataframe contains $\sim$400,000 entries. What are the number of users and products in these entries ? # + _cell_guid="677f103d-d199-480c-bca4-fc08f7aa2e92" _kg_hide-input=true _uuid="dc2f4b48b76615721e6718efbd31fcd3faf16bec" pd.DataFrame([{'products': len(df_initial['StockCode'].value_counts()), 'transactions': len(df_initial['InvoiceNo'].value_counts()), 'customers': len(df_initial['CustomerID'].value_counts()), }], columns = ['products', 'transactions', 'customers'], index = ['quantity']) # - # As you can see that this dataset contain the recods of 4372 users who bought 3684 different items. # There are $\sim$22,000 transactions which are carried out. # Now we need to explore the number of products purchased in every transaction # + _cell_guid="72d6dede-4280-4afd-b61b-085ea8c73d67" _kg_hide-input=true _uuid="dd0d84bd4275a04e361b5b41924d11b7f6f2e9ff" temp = df_initial.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['InvoiceDate'].count() nb_products_per_basket = temp.rename(columns = {'InvoiceDate':'Number of products'}) nb_products_per_basket[:10].sort_values('CustomerID') # - # Points to be noted here: # # * There are some users who bought only comes one time on the E-commerce platform and purchased one # item. The example of this kind of user is customerID 12346. # # * There are some users who frequently buy large number of items per order. The example of this kind of # user is customerID 12347. # # * If you notice Invoiceno data attribute then you can find out that there is prefix 'C' for one # invoice. This 'C' indicates that the particular transaction has been cancelled. # #### ** Analysis of cancelled orders ** # We need to count the number of transactions corresponding to cancelled orders # + _cell_guid="9b0e32d8-fc9c-4301-ac18-9c1d7cc5b54f" _kg_hide-input=true _uuid="076fba25ed8a2b38fddd83ff862fa21e7f790a11" nb_products_per_basket['order_cancelled'] = nb_products_per_basket['InvoiceNo'].apply( lambda x:int('C' in x)) display(nb_products_per_basket[:5]) n1 = nb_products_per_basket['order_cancelled'].sum() n2 = nb_products_per_basket.shape[0] percentage = (n1/n2)*100 print('Number of orders cancelled: {}/{} ({:.2f}%) '.format(n1, n2, percentage)) # - # Note that the number of cancelled transactions are quite large ( $\sim$16% of the total number of transactions). # Now, let's look at the first few lines of the dataframe # + _cell_guid="2f985d42-e0b9-4281-8f30-43c2c13955b9" _kg_hide-input=true _uuid="54f5b8a4bc832c1c396419223f43c41b9b0b27de" display(df_initial.sort_values('CustomerID')[:5]) # - # From the above output, we see that when an order is canceled, we have another transactions in the dataframe, mostly identical except for the **Quantity** and **InvoiceDate** variables. I decide to check if this is true for all the entries. # To do this, I decide to locate the entries that indicate a negative quantity and check if there is *systematically* an order indicating the same quantity (but positive), with the same description (**CustomerID**, **Description** and **UnitPrice**): # + _cell_guid="06e26580-014e-432d-ab59-b5ceebb816cb" _kg_hide-input=false _uuid="b16ddfdd36696a4a92ba15acd387e7eca0757f31" df_check = df_initial[df_initial['Quantity'] < 0][['CustomerID','Quantity', 'StockCode','Description','UnitPrice']] for index, col in df_check.iterrows(): if df_initial[(df_initial['CustomerID'] == col[0]) & (df_initial['Quantity'] == -col[1]) & (df_initial['Description'] == col[2])].shape[0] == 0: print(df_check.loc[index]) print(15*'-'+'>'+' HYPOTHESIS NOT FULFILLED') break # - # We see that the initial hypothesis is not fulfilled because of the existence of a '_Discount_' entry. I check again the hypothesis but this time discarding the '_Discount_' entries: # + _cell_guid="50c6589e-4387-4a3e-a1ea-18bee5bb1dba" _kg_hide-input=true _uuid="3b375d17a84505d71b20dbbc43124aa6597a2ef2" df_check = df_initial[(df_initial['Quantity'] < 0) & (df_initial['Description'] != 'Discount')][ ['CustomerID','Quantity','StockCode', 'Description','UnitPrice']] for index, col in df_check.iterrows(): if df_initial[(df_initial['CustomerID'] == col[0]) & (df_initial['Quantity'] == -col[1]) & (df_initial['Description'] == col[2])].shape[0] == 0: print(index, df_check.loc[index]) print(15*'-'+'>'+' HYPOTHESIS NOT FULFILLED') break # - # Once more, we find that the initial hypothesis is not verified. Hence, cancellations do not necessarily correspond to orders that would have been made beforehand. # # At this point, I decide to create a new variable in the dataframe that indicate if part of the command has been canceled. For the cancellations without counterparts, a few of them are probably due to the fact that the buy orders were performed before December 2010 (the point of entry of the database). Below, I make a census of the cancel orders and check for the existence of counterparts: # + _cell_guid="af540729-739b-45b3-858f-f1facf9f8ae6" _kg_hide-input=true _uuid="6f5c10794e09eb3d0dc83889d74b2029d6d24756" df_cleaned = df_initial.copy(deep = True) df_cleaned['QuantityCanceled'] = 0 entry_to_remove = [] ; doubtfull_entry = [] for index, col in df_initial.iterrows(): if (col['Quantity'] > 0) or col['Description'] == 'Discount': continue df_test = df_initial[(df_initial['CustomerID'] == col['CustomerID']) & (df_initial['StockCode'] == col['StockCode']) & (df_initial['InvoiceDate'] < col['InvoiceDate']) & (df_initial['Quantity'] > 0)].copy() # Cancelation WITHOUT counterpart if (df_test.shape[0] == 0): doubtfull_entry.append(index) # Cancelation WITH a counterpart elif (df_test.shape[0] == 1): index_order = df_test.index[0] df_cleaned.loc[index_order, 'QuantityCanceled'] = -col['Quantity'] entry_to_remove.append(index) # Various counterparts exist in orders: we delete the last one elif (df_test.shape[0] > 1): df_test.sort_index(axis=0 ,ascending=False, inplace = True) for ind, val in df_test.iterrows(): if val['Quantity'] < -col['Quantity']: continue df_cleaned.loc[ind, 'QuantityCanceled'] = -col['Quantity'] entry_to_remove.append(index) break # - # In the above function, I checked the two cases: # 1. a cancel order exists without counterpart # 2. there's at least one counterpart with the exact same quantity # # The index of the corresponding cancel order are respectively kept in the `doubtfull_entry` and `entry_to_remove` lists whose sizes are: # + _cell_guid="f1d3a68d-fa59-4671-8be1-cdbb646ce13f" _kg_hide-input=true _uuid="d98a0917de35db7afe31c69c324cd32e934edd52" print("entry_to_remove: {}".format(len(entry_to_remove))) print("doubtfull_entry: {}".format(len(doubtfull_entry))) # - # Among these entries, the lines listed in the doubtfull_entry list correspond to the entries indicating a cancellation but for which there is no command beforehand. In practice, I decide to delete all of these entries, which count respectively for $\sim$ 1.4% and 0.2% of the dataframe entries. # # Now I check the number of entries that correspond to cancellations and that have not been deleted with the previous filter: # + _cell_guid="50f6c074-08cc-4c55-8285-d674d0d84b45" _kg_hide-input=true _uuid="c523275748742927f689725305e90ae6fdeb4136" df_cleaned.drop(entry_to_remove, axis = 0, inplace = True) df_cleaned.drop(doubtfull_entry, axis = 0, inplace = True) remaining_entries = df_cleaned[(df_cleaned['Quantity'] < 0) & (df_cleaned['StockCode'] != 'D')] print("nb of entries to delete: {}".format(remaining_entries.shape[0])) remaining_entries[:5] # - # If one looks, for example, at the purchases of the consumer of one of the above entries and corresponding to the same product as that of the cancellation, one observes: # + _cell_guid="86494812-a35a-49a8-a80d-5b2ef3b0913f" _uuid="5ded98b83a85e0fd038b1a5c5edb67d7773d41ee" df_cleaned[(df_cleaned['CustomerID'] == 14048) & (df_cleaned['StockCode'] == '22464')] # - # We see that the quantity canceled is greater than the sum of the previous purchases. # # --- # # #### **Analysis of the StockCode** # # Above, it has been seen that some values of the ** StockCode ** variable indicate a particular transaction (i.e. D for _Discount_). I check the contents of this variable by looking for the set of codes that would contain only letters: # + _cell_guid="e00212c8-5c1e-4dda-a392-cbc68c1964b1" _kg_hide-input=true _uuid="57e546917a0ea9a59a0e1dc3e0f9179c7efa66b5" list_special_codes = df_cleaned[df_cleaned['StockCode'].str.contains('^[a-zA-Z]+', regex=True)]['StockCode'].unique() list_special_codes # + _cell_guid="0a225335-7d6e-4c5b-a874-801e3f329f10" _kg_hide-input=true _uuid="ce078be30fea360c161b449a8cb666d98808e936" for code in list_special_codes: print("{:<15} -> {:<30}".format(code, df_cleaned[df_cleaned['StockCode'] == code]['Description'].unique()[0])) # - # We see that there are several types of peculiar transactions, connected e.g. to port charges or bank charges. # # # ___ # #### Analysis of Basket Price # # # I create a new variable that indicates the total price of every purchase: # + _cell_guid="3cbf20c0-0a44-49dc-96c3-ffd5455ddf0b" _kg_hide-input=true _uuid="5f070241e41d989ed3de0769d9f35f330d086415" df_cleaned['TotalPrice'] = df_cleaned['UnitPrice'] * (df_cleaned['Quantity'] - df_cleaned['QuantityCanceled']) df_cleaned.sort_values('CustomerID')[:5] # - # Each entry of the dataframe indicates prizes for a single kind of product. Hence, orders are split on several lines. I collect all the purchases made during a single order to recover the total order prize: # + _cell_guid="5e4530b2-addf-4dc7-9ca8-065c26b73023" _kg_hide-input=true _uuid="653fd7be2e985cf4578af4306f40948926fb60b3" # sum of purchases / user & order temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['TotalPrice'].sum() basket_price = temp.rename(columns = {'TotalPrice':'Basket Price'}) # date of the order df_cleaned['InvoiceDate_int'] = df_cleaned['InvoiceDate'].astype('int64') temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['InvoiceDate_int'].mean() df_cleaned.drop('InvoiceDate_int', axis = 1, inplace = True) basket_price.loc[:, 'InvoiceDate'] = pd.to_datetime(temp['InvoiceDate_int']) # selection of significant entries basket_price = basket_price[basket_price['Basket Price'] > 0] basket_price.sort_values('CustomerID')[:6] # - # In order to have a global view of the type of order performed in this dataset, I determine how the purchases are divided according to total prizes: # + _cell_guid="25f72313-bc56-4a10-99b1-243f147b1756" _kg_hide-input=true _uuid="b1b30be7aa80d7a5287e6fd783b5d8cdbff4032d" # Purchase count price_range = [0, 50, 100, 200, 500, 1000, 5000, 50000] count_price = [] for i, price in enumerate(price_range): if i == 0: continue val = basket_price[(basket_price['Basket Price'] < price) & (basket_price['Basket Price'] > price_range[i-1])]['Basket Price'].count() count_price.append(val) # Representation of the number of purchases / amount plt.rc('font', weight='bold') f, ax = plt.subplots(figsize=(11, 6)) colors = ['yellowgreen', 'gold', 'wheat', 'c', 'violet', 'royalblue','firebrick'] labels = [ '{}<.<{}'.format(price_range[i-1], s) for i,s in enumerate(price_range) if i != 0] sizes = count_price explode = [0.0 if sizes[i] < 100 else 0.0 for i in range(len(sizes))] ax.pie(sizes, explode = explode, labels=labels, colors = colors, autopct = lambda x:'{:1.0f}%'.format(x) if x > 1 else '', shadow = False, startangle=0) ax.axis('equal') f.text(0.5, 1.01, "Distribution of order amounts", ha='center', fontsize = 18); # - # It can be seen that the vast majority of orders concern relatively large purchases given that $\sim$65% of purchases give prizes in excess of £ 200. # ### Analysis of the product categories # # In the data-frame, products are uniquely identified through the **StockCode** variable. A short description of the products is given in the **Description** variable. In this section, I intend to use the content of this latter variable in order to group the products into different categories. # #### Products Description # # # As a first step, I extract from the **Description** variable the information that will prove useful. To do this, I use the following function: # + _cell_guid="62aada7f-1d61-493e-a044-08fcc7bdfb81" _kg_hide-input=true _uuid="4ae364672f6cede623fd0e032e34d967e4f32ee1" is_noun = lambda pos: pos[:2] == 'NN' def keywords_inventory(dataframe, colonne = 'Description'): stemmer = nltk.stem.SnowballStemmer("english") keywords_roots = dict() # collect the words / root keywords_select = dict() # association: root <-> keyword category_keys = [] count_keywords = dict() icount = 0 for s in dataframe[colonne]: if pd.isnull(s): continue lines = s.lower() tokenized = nltk.word_tokenize(lines) nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)] for t in nouns: t = t.lower() ; racine = stemmer.stem(t) if racine in keywords_roots: keywords_roots[racine].add(t) count_keywords[racine] += 1 else: keywords_roots[racine] = {t} count_keywords[racine] = 1 for s in keywords_roots.keys(): if len(keywords_roots[s]) > 1: min_length = 1000 for k in keywords_roots[s]: if len(k) < min_length: clef = k ; min_length = len(k) category_keys.append(clef) keywords_select[s] = clef else: category_keys.append(list(keywords_roots[s])[0]) keywords_select[s] = list(keywords_roots[s])[0] print("number of keywords in variable '{}': {}".format(colonne,len(category_keys))) return category_keys, keywords_roots, keywords_select, count_keywords # - # This function takes as input the dataframe and analyzes the content of the **Description** column by performing the following operations: # # - extract the names (proper, common) appearing in the products description # - for each name, I extract the root of the word and aggregate the set of names associated with this particular root # - count the number of times each root appears in the dataframe # - when several words are listed for the same root, I consider that the keyword associated with this root is the shortest name (this systematically selects the singular when there are singular/plural variants) # # The first step of the analysis is to retrieve the list of products: # + _cell_guid="f4da3052-c465-47bf-9652-a10a8ac51eb6" _kg_hide-input=true _uuid="1239a65ae122b1c020db626e5167451a950d8226" df_produits = pd.DataFrame(df_initial['Description'].unique()).rename(columns = {0:'Description'}) # - # Once this list is created, I use the function I previously defined in order to analyze the description of the various products: import nltk nltk.download('averaged_perceptron_tagger') # + _cell_guid="f52a4134-c9c7-4d17-8510-8f55b1530cbb" _kg_hide-input=true _uuid="38c4872616b2c40bf69982070165cc9db3d0ea69" keywords, keywords_roots, keywords_select, count_keywords = keywords_inventory(df_produits) # - # The execution of this function returns three variables: # - `keywords`: the list of extracted keywords # - `keywords_roots`: a dictionary where the keys are the keywords roots and the values are the lists of words associated with those roots # - `count_keywords`: dictionary listing the number of times every word is used # # At this point, I convert the `count_keywords` dictionary into a list, to sort the keywords according to their occurrence: # + _cell_guid="e033781a-8038-4302-93ed-78966554b7cc" _kg_hide-input=true _uuid="66fb955b137916f16d838f95a6f5bbe5e4952334" list_products = [] for k,v in count_keywords.items(): list_products.append([keywords_select[k],v]) list_products.sort(key = lambda x:x[1], reverse = True) # - # Using it, I create a representation of the most common keywords: # + _cell_guid="d6c78812-343e-41af-8e4e-ef0292dc4f7e" _kg_hide-input=true _uuid="fcdf4d98e372a1d65c931b7a6d5c29f269938022" liste = sorted(list_products, key = lambda x:x[1], reverse = True) plt.rc('font', weight='normal') fig, ax = plt.subplots(figsize=(7, 25)) y_axis = [i[1] for i in liste[:125]] x_axis = [k for k,i in enumerate(liste[:125])] x_label = [i[0] for i in liste[:125]] plt.xticks(fontsize = 15) plt.yticks(fontsize = 13) plt.yticks(x_axis, x_label) plt.xlabel("Number of occurences", fontsize = 18, labelpad = 10) ax.barh(x_axis, y_axis, align = 'center') ax = plt.gca() ax.invert_yaxis() plt.title("Words occurence",bbox={'facecolor':'k', 'pad':5}, color='w',fontsize = 25) plt.show() # - # ### Defining product categories # # The list that was obtained contains more than 1400 keywords and the most frequent ones appear in more than 200 products. However, while examining the content of the list, I note that some names are useless. Others are do not carry information, like colors. Therefore, I discard these words from the analysis that follows and also, I decide to consider only the words that appear more than 13 times. # + _cell_guid="43300478-3b5a-4c7a-9466-34c384ccae60" _kg_hide-input=true _uuid="5f42482995f36f15688d8cba7f903e1277da5f92" list_products = [] for k,v in count_keywords.items(): word = keywords_select[k] if word in ['pink', 'blue', 'tag', 'green', 'orange']: continue if len(word) < 3 or v < 13: continue if ('+' in word) or ('/' in word): continue list_products.append([word, v]) list_products.sort(key = lambda x:x[1], reverse = True) print('Preserved words:', len(list_products)) # - # #### Data encoding # # Now I will use these keywords to create groups of product. Firstly, I define the $X$ matrix as: # # | | word 1 | ... | word j | ... | word N | # |:-:|---|---|---|---|---| # | product 1 | $a_{1,1}$ | | | | $a_{1,N}$ | # | ... | | | ... | | | # |product i | ... | | $a_{i,j}$ | | ... | # |... | | | ... | | | # | product M | $a_{M,1}$ | | | | $a_{M,N}$ | # where the $a_ {i, j}$ coefficient is 1 if the description of the product $i$ contains the word $j$, and 0 otherwise. # + _cell_guid="23c77363-438b-4694-9b52-960c0ab3aa82" _kg_hide-input=true _uuid="d6faac7eb01d2251fb221569b75e007c8d5146aa" liste_produits = df_cleaned['Description'].unique() #print(liste_produits[0:2]) X = pd.DataFrame() for key, occurence in list_products: X.loc[:, key] = list(map(lambda x:int(key.upper() in x), liste_produits)) #print(X[0:1]) # - # * The $X$ matrix indicates the words contained in the description of the products using the *one-hot-encoding* principle. # # # # * In practice, I have found that introducing the price range results in more balanced groups in terms of element numbers. # Hence, I add 6 extra columns to this matrix, where I indicate the price range of the products: # + _cell_guid="739c9cb1-3278-4d3a-a412-25dd2e8bc7c4" _kg_hide-input=true _uuid="b26a6bc55b61d0bcaccaf7c303b90d2a04636ac4" threshold = [0, 1, 2, 3, 5, 10] label_col = [] for i in range(len(threshold)): if i == len(threshold)-1: col = '.>{}'.format(threshold[i]) else: col = '{}<.<{}'.format(threshold[i],threshold[i+1]) #print(i) #print(col) label_col.append(col) X.loc[:, col] = 0 for i, prod in enumerate(liste_produits): prix = df_cleaned[ df_cleaned['Description'] == prod]['UnitPrice'].mean() #print (prix) j = 0 while prix > threshold[j]: j+=1 if j == len(threshold): break X.loc[i, label_col[j-1]] = 1 # - # and to choose the appropriate ranges, I check the number of products in the different groups: # + _cell_guid="8587b9a3-d3a6-41ed-ac8a-9e0cdc9af31e" _kg_hide-input=true _uuid="6ad9fc6ed72057c63594d9d97d89742202f064f7" print("{:<8} {:<20} \n".format('range', 'number of products') + 20*'-') for i in range(len(threshold)): if i == len(threshold)-1: col = '.>{}'.format(threshold[i]) else: col = '{}<.<{}'.format(threshold[i],threshold[i+1]) print("{:<10} {:<20}".format(col, X.loc[:, col].sum())) # - # #### Creating clusters of products # # In this section, I will group the products into different classes. In the case of matrices with binary encoding, the most suitable metric for the calculation of distances is the [Hamming's metric](https://en.wikipedia.org/wiki/Distance_de_Hamming). Note that the **kmeans** method of sklearn uses a Euclidean distance that can be used, but it is not to the best choice in the case of categorical variables. However, in order to use the Hamming's metric, we need to use the [kmodes](https://pypi.python.org/pypi/kmodes/) package which is not available on the current plateform. Hence, I use the **kmeans** method even if this is not the best choice. # # In order to define (approximately) the number of clusters that best represents the data, I use the silhouette score: # + _cell_guid="1c81a7c3-8980-4941-9082-7bf5cf92fc14" _kg_hide-input=true _uuid="4ce8586584935e81b9e403ea7a1dd4b2e4c9992e" matrix = np.asmatrix(X) for n_clusters in range(3,10): kmeans = KMeans(init='k-means++', n_clusters = n_clusters, n_init=30) kmeans.fit(matrix) clusters = kmeans.predict(matrix) silhouette_avg = silhouette_score(matrix, clusters) print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # - # In practice, the scores obtained above can be considered equivalent since, depending on the run, scores of $ 0.1 \pm 0.05 $ will be obtained for all clusters with `n_clusters` $> $ 3 (we obtain slightly lower scores for the first cluster). On the other hand, I found that beyond 5 clusters, some clusters contained very few elements. I therefore choose to separate the dataset into 5 clusters. In order to ensure a good classification at every run of the notebook, I iterate untill we obtain the best possible silhouette score, which is, in the present case, around 0.15: # + _cell_guid="b29b3bb2-ef8d-4d7f-a19e-52045018ab8e" _kg_hide-input=true _uuid="72dd5bdab528264518bf034b84f66f3c29500fca" n_clusters = 5 silhouette_avg = -1 while silhouette_avg < 0.145: kmeans = KMeans(init='k-means++', n_clusters = n_clusters, n_init=30) kmeans.fit(matrix) clusters = kmeans.predict(matrix) silhouette_avg = silhouette_score(matrix, clusters) #km = kmodes.KModes(n_clusters = n_clusters, init='Huang', n_init=2, verbose=0) #clusters = km.fit_predict(matrix) #silhouette_avg = silhouette_score(matrix, clusters) print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # - # #### Characterizing the content of clusters # # I check the number of elements in every class: # + _cell_guid="ad66161e-87ba-42ca-8128-bda16b470a34" _kg_hide-input=true _uuid="83591dd72975afde6a85d70429add02bd278c125" pd.Series(clusters).value_counts() # - # ** a: _Silhouette intra-cluster score_ ** # # In order to have an insight on the quality of the classification, we can represent the silhouette scores of each element of the different clusters. This is the purpose of the next figure which is taken from the [sklearn documentation](http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html): # + _cell_guid="c2b26710-2269-47e5-8f0d-98f6eac0015a" _kg_hide-input=true _uuid="e9e48bbbc3bb0ffa8134175aefb1fe808dea33e8" def graph_component_silhouette(n_clusters, lim_x, mat_size, sample_silhouette_values, clusters): #plt.rcParams["patch.force_edgecolor"] = True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor = 'dimgray', linewidth=1) fig, ax1 = plt.subplots(1, 1) fig.set_size_inches(8, 8) ax1.set_xlim([lim_x[0], lim_x[1]]) ax1.set_ylim([0, mat_size + (n_clusters + 1) * 10]) y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to cluster i, and sort them ith_cluster_silhouette_values = sample_silhouette_values[clusters == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i #color = cm.spectral(float(i) / n_clusters) facecolor=color, edgecolor=color, ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, alpha=0.8) # Label the silhouette plots with their cluster numbers at the middle ax1.text(-0.03, y_lower + 0.5 * size_cluster_i, str(i), color = 'red', fontweight = 'bold', bbox=dict(facecolor='white', edgecolor='black', boxstyle='round, pad=0.3')) # Compute the new y_lower for next plot y_lower = y_upper + 10 # + _cell_guid="8ee417cb-29d9-4913-8e12-0c020ecde059" _kg_hide-input=true _uuid="cf9b40ed0d865401e4a05e06eec2bff97ace7059" # define individual silouhette scores sample_silhouette_values = silhouette_samples(matrix, clusters) # and do the graph graph_component_silhouette(n_clusters, [-0.07, 0.33], len(X), sample_silhouette_values, clusters) # - # ** b: _Word Cloud_** # # Now we can have a look at the type of objects that each cluster represents. In order to obtain a global view of their contents, I determine which keywords are the most frequent in each of them # + _cell_guid="159600b4-def2-4e4d-be54-6d84938721c4" _kg_hide-input=true _uuid="848009ae647f20366e148eeae96a4aa02f975618" liste = pd.DataFrame(liste_produits) liste_words = [word for (word, occurence) in list_products] occurence = [dict() for _ in range(n_clusters)] for i in range(n_clusters): liste_cluster = liste.loc[clusters == i] for word in liste_words: if word in ['art', 'set', 'heart', 'pink', 'blue', 'tag']: continue occurence[i][word] = sum(liste_cluster.loc[:, 0].str.contains(word.upper())) # - # and I output the result as wordclouds: # + _cell_guid="2995e126-925b-436f-8b68-55d874637b1e" _kg_hide-input=true _uuid="3d88a32f7998249ce42e267bb912acc7420f0c47" def random_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): h = int(360.0 * tone / 255.0) s = int(100.0 * 255.0 / 255.0) l = int(100.0 * float(random_state.randint(70, 120)) / 255.0) return "hsl({}, {}%, {}%)".format(h, s, l) def make_wordcloud(liste, increment): ax1 = fig.add_subplot(4,2,increment) words = dict() trunc_occurences = liste[0:150] for s in trunc_occurences: words[s[0]] = s[1] wordcloud = WordCloud(width=1000,height=400, background_color='lightgrey', max_words=1628,relative_scaling=1, color_func = random_color_func, normalize_plurals=False) wordcloud.generate_from_frequencies(words) ax1.imshow(wordcloud, interpolation="bilinear") ax1.axis('off') plt.title('cluster n{}'.format(increment-1)) fig = plt.figure(1, figsize=(14,14)) color = [0, 160, 130, 95, 280, 40, 330, 110, 25] for i in range(n_clusters): list_cluster_occurences = occurence[i] tone = color[i] # define the color of the words liste = [] for key, value in list_cluster_occurences.items(): liste.append([key, value]) liste.sort(key = lambda x:x[1], reverse = True) make_wordcloud(liste, i+1) # - # From this representation, we can see that for example, one of the clusters contains objects that could be associated with gifts (keywords: Christmas, packaging, card, ...). Another cluster would rather contain luxury items and jewelry (keywords: necklace, bracelet, lace, silver, ...). Nevertheless, it can also be observed that many words appear in various clusters and it is therefore difficult to clearly distinguish them. # # ** c: _Principal Component Analysis_ ** # # In order to ensure that these clusters are truly distinct, I look at their composition. Given the large number of variables of the initial matrix, I first perform a PCA: # + _cell_guid="85af75c5-bf90-4b8c-9687-caff723d0027" _kg_hide-input=true _uuid="33859d205bcf40477d166b679fb97996ba2d5f48" pca = PCA() pca.fit(matrix) pca_samples = pca.transform(matrix) # - # and then check for the amount of variance explained by each component: # + _cell_guid="41cd8738-4923-43cd-b26a-fba6d53070e8" _kg_hide-input=true _uuid="abce52d76e801aa6197603925618fb032aec78af" fig, ax = plt.subplots(figsize=(14, 5)) sns.set(font_scale=1) plt.step(range(matrix.shape[1]), pca.explained_variance_ratio_.cumsum(), where='mid', label='cumulative explained variance') sns.barplot(np.arange(1,matrix.shape[1]+1), pca.explained_variance_ratio_, alpha=0.5, color = 'g', label='individual explained variance') plt.xlim(0, 100) ax.set_xticklabels([s if int(s.get_text())%2 == 0 else '' for s in ax.get_xticklabels()]) plt.ylabel('Explained variance', fontsize = 14) plt.xlabel('Principal components', fontsize = 14) plt.legend(loc='upper left', fontsize = 13); # - # We see that the number of components required to explain the data is extremely important: we need more than 100 components to explain 90% of the variance of the data. In practice, I decide to keep only a limited number of components since this decomposition is only performed to visualize the data: # # + _cell_guid="0e49b019-d1b7-4815-8c5e-7cf27fa5b5b2" _kg_hide-input=true _uuid="fb0afe8523c2634860fdaf67d734c1dc0897c4c0" pca = PCA(n_components=50) matrix_9D = pca.fit_transform(matrix) mat = pd.DataFrame(matrix_9D) mat['cluster'] = pd.Series(clusters) # + _cell_guid="675cc670-4512-4983-8b58-e82f3cf2bf3d" _kg_hide-input=true _uuid="b9d872fa5038458f3424dfc585a5a823efc7ff7f" import matplotlib.patches as mpatches sns.set_style("white") sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 2.5}) LABEL_COLOR_MAP = {0:'r', 1:'gold', 2:'b', 3:'k', 4:'c', 5:'g'} label_color = [LABEL_COLOR_MAP[l] for l in mat['cluster']] fig = plt.figure(figsize = (12,10)) increment = 0 for ix in range(4): for iy in range(ix+1, 4): increment += 1 ax = fig.add_subplot(3,3,increment) ax.scatter(mat[ix], mat[iy], c= label_color, alpha=0.4) plt.ylabel('PCA {}'.format(iy+1), fontsize = 12) plt.xlabel('PCA {}'.format(ix+1), fontsize = 12) ax.yaxis.grid(color='lightgray', linestyle=':') ax.xaxis.grid(color='lightgray', linestyle=':') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) if increment == 9: break if increment == 9: break comp_handler = [] for i in range(5): comp_handler.append(mpatches.Patch(color = LABEL_COLOR_MAP[i], label = i)) plt.legend(handles=comp_handler, bbox_to_anchor=(1.1, 0.97), title='Cluster', shadow = True, frameon = True, framealpha = 1,fontsize = 13, bbox_transform = plt.gcf().transFigure) #facecolor = 'lightgrey', plt.tight_layout() # - # ## Customer categories # # Steps for generatin # # * Formatting data # * Grouping products # * Splitting of the dataset # * Grouping orders # * Creating customer categories # * Data encoding # * Creating categories # # # ### Formatting data # # In the previous section, the different products were grouped in five clusters. In order to prepare the rest of the analysis, a first step consists in introducing this information into the dataframe. To do this, I create the categorical variable **categ_product** where I indicate the cluster of each product : # + _cell_guid="b66c0817-22c1-42cf-b944-8e3db918a671" _kg_hide-input=true _uuid="<KEY>" corresp = dict() for key, val in zip (liste_produits, clusters): corresp[key] = val df_cleaned['categ_product'] = df_cleaned.loc[:, 'Description'].map(corresp) df_cleaned[['InvoiceNo', 'Description', 'categ_product']][:10] # - # #### Grouping products # # In a second step, I decide to create the **categ_N** variables (with $ N \in [0: 4]$) that contains the amount spent in each product category: # + _cell_guid="882978a7-8d65-468e-982a-dd689515d415" _kg_hide-input=true _uuid="7b29ea4de78cdf757e031a0d810b24c4a3641057" for i in range(5): col = 'categ_{}'.format(i) df_temp = df_cleaned[df_cleaned['categ_product'] == i] price_temp = df_temp['UnitPrice'] * (df_temp['Quantity'] - df_temp['QuantityCanceled']) price_temp = price_temp.apply(lambda x:x if x > 0 else 0) df_cleaned.loc[:, col] = price_temp df_cleaned[col].fillna(0, inplace = True) df_cleaned[['InvoiceNo', 'Description', 'categ_product', 'categ_0', 'categ_1', 'categ_2', 'categ_3','categ_4']][:10] # - # Up to now, the information related to a single order was split over several lines of the dataframe (one line per product). I decide to collect the information related to a particular order and put in in a single entry. I therefore create a new dataframe that contains, for each order, the amount of the basket, as well as the way it is distributed over the 5 categories of products: # + _cell_guid="9a5249e1-7ac8-43f9-a759-db4df4934e71" _kg_hide-input=true _uuid="9d0c9edb309503999f38ccc40fd09d1a311a5019" # sum of purchases / user & order temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['TotalPrice'].sum() basket_price = temp.rename(columns = {'TotalPrice':'Basket Price'}) # percentage of the price of the order / product category for i in range(5): col = 'categ_{}'.format(i) temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)[col].sum() basket_price.loc[:, col] = temp # date of the order df_cleaned['InvoiceDate_int'] = df_cleaned['InvoiceDate'].astype('int64') temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['InvoiceDate_int'].mean() df_cleaned.drop('InvoiceDate_int', axis = 1, inplace = True) basket_price.loc[:, 'InvoiceDate'] = pd.to_datetime(temp['InvoiceDate_int']) # selection of significant entries: basket_price = basket_price[basket_price['Basket Price'] > 0] basket_price.sort_values('CustomerID', ascending = True)[:5] # - # #### Splitting of data over time # # The dataframe `basket_price` contains information for a period of 12 months. Later, one of the objectives will be to develop a model capable of characterizing and anticipating the habits of the customers visiting the site and this, from their first visit. In order to be able to test the model in a realistic way, I split the data set by retaining the first 10 months to develop the model and the following two months to test it: # + _cell_guid="c30fa4af-6617-4e25-b297-23efae5e1dcb" _kg_hide-input=true _uuid="7be642cc67d95c7b149747f66a8ba8845a17350e" print(basket_price['InvoiceDate'].min(), '->', basket_price['InvoiceDate'].max()) # + _cell_guid="29948719-cecf-48d3-ab66-ca76214b058a" _kg_hide-input=true _uuid="854a1781838e9d4f9e2a27e6a0b65c5e86d7a1b0" date_cut_off = datetime.datetime(2011,10,1) set_entrainement = basket_price[basket_price['InvoiceDate'] < date_cut_off] set_test = basket_price[basket_price['InvoiceDate'] >= date_cut_off] basket_price = set_entrainement.copy(deep = True) # - # #### Consumer Order Combinations # # In a second step, I group together the different entries that correspond to the same user. I thus determine the number of purchases made by the user, as well as the minimum, maximum, average amounts and the total amount spent during all the visits: # + _cell_guid="876435e3-49c7-4f98-a58f-55d8f0620ef8" _kg_hide-input=true _uuid="a8144451cb606fde4e691486ea9b9d2ccbeae7b9" # of visits and stats on cart amount / users transactions_per_user=basket_price.groupby(by=['CustomerID'])['Basket Price'].agg(['count','min', 'max','mean','sum']) for i in range(5): col = 'categ_{}'.format(i) transactions_per_user.loc[:,col] = basket_price.groupby(by=['CustomerID'])[col].sum() /\ transactions_per_user['sum']*100 transactions_per_user.reset_index(drop = False, inplace = True) basket_price.groupby(by=['CustomerID'])['categ_0'].sum() transactions_per_user.sort_values('CustomerID', ascending = True)[:5] # - # Finally, I define two additional variables that give the number of days elapsed since the first purchase (** FirstPurchase **) and the number of days since the last purchase (** LastPurchase **): # + _cell_guid="60a47c3b-b36a-460a-835b-7f6d3c7af48c" _kg_hide-input=true _uuid="29dab7aeb0f6d8d1e898b9d72efa2a6e07f1d3e0" last_date = basket_price['InvoiceDate'].max().date() first_registration = pd.DataFrame(basket_price.groupby(by=['CustomerID'])['InvoiceDate'].min()) last_purchase = pd.DataFrame(basket_price.groupby(by=['CustomerID'])['InvoiceDate'].max()) test = first_registration.applymap(lambda x:(last_date - x.date()).days) test2 = last_purchase.applymap(lambda x:(last_date - x.date()).days) transactions_per_user.loc[:, 'LastPurchase'] = test2.reset_index(drop = False)['InvoiceDate'] transactions_per_user.loc[:, 'FirstPurchase'] = test.reset_index(drop = False)['InvoiceDate'] transactions_per_user[:5] # - # A customer category of particular interest is that of customers who make only one purchase. One of the objectives may be, for example, to target these customers in order to retain them. In part, I find that this type of customer represents 1/3 of the customers listed: # + _cell_guid="f0a8717e-bf13-4847-a2d8-d5c518ef7580" _kg_hide-input=true _uuid="27a45dcd6d0ac2382f07ef6b091a50fc7006a389" n1 = transactions_per_user[transactions_per_user['count'] == 1].shape[0] n2 = transactions_per_user.shape[0] print("No. customers with single purchase: {:<2}/{:<5} ({:<2.2f}%)".format(n1,n2,n1/n2*100)) # - # --- # ### Creation of customers categories # #### Data encoding # # The dataframe `transactions_per_user` contains a summary of all the commands that were made. Each entry in this dataframe corresponds to a particular client. I use this information to characterize the different types of customers and only keep a subset of variables: # + _cell_guid="cdc3d67f-1337-4cf6-8a36-7b99d4adc160" _kg_hide-input=true _uuid="1769df5bd3f987760e0493c6979c9031e18cd47e" list_cols = ['count','min','max','mean','categ_0','categ_1','categ_2','categ_3','categ_4'] #_____________________________________________________________ selected_customers = transactions_per_user.copy(deep = True) matrix = np.asmatrix(selected_customers[list_cols]) # - # In practice, the different variables I selected have quite different ranges of variation and before continuing the analysis, I create a matrix where these data are standardized: # + _cell_guid="84be0db9-a24a-4b9b-b541-e1437404ff37" _kg_hide-input=true _uuid="5e15c98b0da14a1fe5473ca2fa6c5722968386b6" scaler = StandardScaler() scaler.fit(matrix) print('variables mean values: \n' + 90*'-' + '\n' , scaler.mean_) scaled_matrix = scaler.transform(matrix) # - # In the following, I will create clusters of customers. In practice, before creating these clusters, it is interesting to define a base of smaller dimension allowing to describe the `scaled_matrix` matrix. In this case, I will use this base in order to create a representation of the different clusters and thus verify the quality of the separation of the different groups. I therefore perform a PCA beforehand: # + _cell_guid="302061b0-4e19-4b5b-a969-1f45c27873e1" _kg_hide-input=true _uuid="30b6a47f96e223efeb0ab49517b38c192627927f" pca = PCA() pca.fit(scaled_matrix) pca_samples = pca.transform(scaled_matrix) # - # and I represent the amount of variance explained by each of the components: # + _cell_guid="ed6ab99d-fee5-44c3-958c-58b786263e82" _kg_hide-input=true _uuid="bb95db84567cb2a3a32652bcf2b3ea5c14b5b550" fig, ax = plt.subplots(figsize=(14, 5)) sns.set(font_scale=1) plt.step(range(matrix.shape[1]), pca.explained_variance_ratio_.cumsum(), where='mid', label='cumulative explained variance') sns.barplot(np.arange(1,matrix.shape[1]+1), pca.explained_variance_ratio_, alpha=0.5, color = 'g', label='individual explained variance') plt.xlim(0, 10) ax.set_xticklabels([s if int(s.get_text())%2 == 0 else '' for s in ax.get_xticklabels()]) plt.ylabel('Explained variance', fontsize = 14) plt.xlabel('Principal components', fontsize = 14) plt.legend(loc='best', fontsize = 13); # - # ----- # #### Creating customer categories # # At this point, I define clusters of clients from the standardized matrix that was defined earlier and using the `k-means` algorithm from` scikit-learn`. I choose the number of clusters based on the silhouette score and I find that the best score is obtained with 11 clusters: # + _cell_guid="ce1adb5f-a0cf-4af4-99fa-585fa71cf89f" _kg_hide-input=true _uuid="09711facb0d6dd1e4027724a55b8a5fa0155b616" n_clusters = 11 kmeans = KMeans(init='k-means++', n_clusters = n_clusters, n_init=100) kmeans.fit(scaled_matrix) clusters_clients = kmeans.predict(scaled_matrix) silhouette_avg = silhouette_score(scaled_matrix, clusters_clients) print('silhouette score: {:<.3f}'.format(silhouette_avg)) # - # At first, I look at the number of customers in each cluster: # + _cell_guid="7e64fcb6-4827-4bb9-883e-f31e9c595dba" _kg_hide-input=true _uuid="b1f770e4ac40cc0e868efb12f895653fb5127599" pd.DataFrame(pd.Series(clusters_clients).value_counts(), columns = ['number of clients']).T # - # ** a / _Report via the PCA_ ** # # There is a certain disparity in the sizes of different groups that have been created. Hence I will now try to understand the content of these clusters in order to validate (or not) this particular separation. At first, I use the result of the PCA: # + _cell_guid="051ee4e8-78a1-48d9-aa43-84e089a57df3" _kg_hide-input=true _uuid="716227b8a446ac77c9397893964c782333a02e6a" pca = PCA(n_components=6) matrix_3D = pca.fit_transform(scaled_matrix) mat = pd.DataFrame(matrix_3D) mat['cluster'] = pd.Series(clusters_clients) # - # in order to create a representation of the various clusters: # + _cell_guid="6e2ff63a-1bfe-4c7a-b0e4-7fa387e2c560" _kg_hide-input=true _uuid="cececaffe4efaff3333c64426b028b793cc677b5" import matplotlib.patches as mpatches sns.set_style("white") sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 2.5}) LABEL_COLOR_MAP = {0:'r', 1:'tan', 2:'b', 3:'k', 4:'c', 5:'g', 6:'deeppink', 7:'skyblue', 8:'darkcyan', 9:'orange', 10:'yellow', 11:'tomato', 12:'seagreen'} label_color = [LABEL_COLOR_MAP[l] for l in mat['cluster']] fig = plt.figure(figsize = (12,10)) increment = 0 for ix in range(6): for iy in range(ix+1, 6): increment += 1 ax = fig.add_subplot(4,3,increment) ax.scatter(mat[ix], mat[iy], c= label_color, alpha=0.5) plt.ylabel('PCA {}'.format(iy+1), fontsize = 12) plt.xlabel('PCA {}'.format(ix+1), fontsize = 12) ax.yaxis.grid(color='lightgray', linestyle=':') ax.xaxis.grid(color='lightgray', linestyle=':') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) if increment == 12: break if increment == 12: break #_______________________________________________ # I set the legend: abreviation -> airline name comp_handler = [] for i in range(n_clusters): comp_handler.append(mpatches.Patch(color = LABEL_COLOR_MAP[i], label = i)) plt.legend(handles=comp_handler, bbox_to_anchor=(1.1, 0.9), title='Cluster', shadow = True, frameon = True, framealpha = 1, fontsize = 13, bbox_transform = plt.gcf().transFigure) #facecolor = 'lightgrey', plt.tight_layout() # - # From this representation, it can be seen, for example, that the first principal component allow to separate the tiniest clusters from the rest. More generally, we see that there is always a representation in which two clusters will appear to be distinct. # # ** b/ _Score of the silhouette intra-cluster_ ** # # As with product categories, another way to look at the quality of the separation is to look at silouhette scores within different clusters: # + _cell_guid="89a83e55-2681-4c31-9e39-52fe2cd0e5cf" _kg_hide-input=true _uuid="3a207d8cfdc08bebeffb5cd8ddb00dfb959efb4a" sample_silhouette_values = silhouette_samples(scaled_matrix, clusters_clients) #____________________________________ # define individual silhouette scores sample_silhouette_values = silhouette_samples(scaled_matrix, clusters_clients) #__________________ # and do the graph graph_component_silhouette(n_clusters, [-0.15, 0.55], len(scaled_matrix), sample_silhouette_values, clusters_clients) # - # ** c/ _Customers morphotype_** # # At this stage, I have verified that the different clusters are indeed disjoint (at least, in a global way). It remains to understand the habits of the customers in each cluster. To do so, I start by adding to the `selected_customers` dataframe a variable that defines the cluster to which each client belongs: # + _cell_guid="1c45a7fd-b564-4595-b725-0d0ae03a25e7" _uuid="52f45c0a955a33a52c812aaf9e60e5a82f5bd2da" selected_customers.loc[:, 'cluster'] = clusters_clients # - # Then, I average the contents of this dataframe by first selecting the different groups of clients. This gives access to, for example, the average baskets price, the number of visits or the total sums spent by the clients of the different clusters. I also determine the number of clients in each group (variable ** size **): # + _cell_guid="edaa1b78-74be-44cf-952d-f5148824b5d8" _kg_hide-input=true _uuid="0fb5e92a263ea5290cfe72fb03257ea6eff632ce" merged_df = pd.DataFrame() for i in range(n_clusters): test = pd.DataFrame(selected_customers[selected_customers['cluster'] == i].mean()) test = test.T.set_index('cluster', drop = True) test['size'] = selected_customers[selected_customers['cluster'] == i].shape[0] merged_df = pd.concat([merged_df, test]) #_____________________________________________________ merged_df.drop('CustomerID', axis = 1, inplace = True) print('number of customers:', merged_df['size'].sum()) merged_df = merged_df.sort_values('sum') # - # Finally, I re-organize the content of the dataframe by ordering the different clusters: first, in relation to the amount wpsent in each product category and then, according to the total amount spent: # + _cell_guid="9e510c5d-cfa4-4d50-ad2e-93fa01cb319e" _kg_hide-input=true _uuid="5c6079b1b9dcb0d894e406255d351680219a5b35" liste_index = [] for i in range(5): column = 'categ_{}'.format(i) liste_index.append(merged_df[merged_df[column] > 45].index.values[0]) liste_index_reordered = liste_index liste_index_reordered += [ s for s in merged_df.index if s not in liste_index] merged_df = merged_df.reindex(index = liste_index_reordered) merged_df = merged_df.reset_index(drop = False) display(merged_df[['cluster', 'count', 'min', 'max', 'mean', 'sum', 'categ_0', 'categ_1', 'categ_2', 'categ_3', 'categ_4', 'size']]) # - # ** d / _Customers morphology garphical representation_ ** # # Finally, I created a representation of the different morphotypes. To do this, I define a class to create "Radar Charts" (which has been adapted from this [kernel](https://www.kaggle.com/yassineghouzam/don-t-know-why-employees-leave -read-this)): # + _cell_guid="e544d9cd-9589-46e8-9af5-3ad59a193b6b" _kg_hide-input=true _uuid="3d7b1bbb5feb7c4e5ab1e48a27104b5fa51cbac4" def _scale_data(data, ranges): (x1, x2) = ranges[0] d = data[0] return [(d - y1) / (y2 - y1) * (x2 - x1) + x1 for d, (y1, y2) in zip(data, ranges)] class RadarChart(): def __init__(self, fig, location, sizes, variables, ranges, n_ordinate_levels = 6): angles = np.arange(0, 360, 360./len(variables)) ix, iy = location[:] ; size_x, size_y = sizes[:] axes = [fig.add_axes([ix, iy, size_x, size_y], polar = True, label = "axes{}".format(i)) for i in range(len(variables))] _, text = axes[0].set_thetagrids(angles, labels = variables) for txt, angle in zip(text, angles): if angle > -1 and angle < 181: txt.set_rotation(angle - 90) else: txt.set_rotation(angle - 270) for ax in axes[1:]: ax.patch.set_visible(False) ax.xaxis.set_visible(False) ax.grid("off") for i, ax in enumerate(axes): grid = np.linspace(*ranges[i],num = n_ordinate_levels) grid_label = [""]+["{:.0f}".format(x) for x in grid[1:-1]] ax.set_rgrids(grid, labels = grid_label, angle = angles[i]) ax.set_ylim(*ranges[i]) self.angle = np.deg2rad(np.r_[angles, angles[0]]) self.ranges = ranges self.ax = axes[0] def plot(self, data, *args, **kw): sdata = _scale_data(data, self.ranges) self.ax.plot(self.angle, np.r_[sdata, sdata[0]], *args, **kw) def fill(self, data, *args, **kw): sdata = _scale_data(data, self.ranges) self.ax.fill(self.angle, np.r_[sdata, sdata[0]], *args, **kw) def legend(self, *args, **kw): self.ax.legend(*args, **kw) def title(self, title, *args, **kw): self.ax.text(0.9, 1, title, transform = self.ax.transAxes, *args, **kw) # - # This allows to have a global view of the content of each cluster: # + _cell_guid="d0066031-e494-407f-9e9f-b79167244fd1" _kg_hide-input=true _uuid="3074b64fa2e091118783baa4452fe7ccc22c82cb" fig = plt.figure(figsize=(50,50)) attributes = ['count', 'mean', 'sum', 'categ_0', 'categ_1', 'categ_2', 'categ_3', 'categ_4'] ranges = [[0.01, 10], [0.01, 1500], [0.01, 10000], [0.01, 75], [0.01, 75], [0.01, 75], [0.01, 75], [0.01, 75]] index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] n_groups = n_clusters ; i_cols = 3 i_rows = n_groups//i_cols size_x, size_y = (1/i_cols), (1/i_rows) for ind in range(n_clusters): ix = ind%3 ; iy = i_rows - ind//3 pos_x = ix*(size_x + 0.05) ; pos_y = iy*(size_y + 0.05) location = [pos_x, pos_y] ; sizes = [size_x, size_y] #______________________________________________________ data = np.array(merged_df.loc[index[ind], attributes]) #print (data) radar = RadarChart(fig, location, sizes, attributes, ranges) radar.plot(data, color = 'b', linewidth=5.0) radar.fill(data, alpha = 0.2, color = 'b') radar.title(title = 'cluster n{}'.format(index[ind]), color = 'r') ind += 1 # - # It can be seen, for example, that the first 5 clusters correspond to a strong preponderance of purchases in a particular category of products. Other clusters will differ from basket averages (** mean **), the total sum spent by the clients (** sum **) or the total number of visits made (** count **). # # ____ # # ## Classification of customers # # In this part, the objective will be to adjust a classifier that will classify consumers in the different client categories that were established in the previous section. The objective is to make this classification possible at the first visit. To fulfill this objective, I will test several classifiers implemented in `scikit-learn`. First, in order to simplify their use, I define a class that allows to interface several of the functionalities common to these different classifiers: # + _cell_guid="13991b6e-7070-4e99-985a-8955cd995840" _kg_hide-input=true _uuid="33233378b919c14b0b43dd7bb8f5b7023ccb089a" class Class_Fit(object): def __init__(self, clf, params=None): if params: self.clf = clf(**params) else: self.clf = clf() def train(self, x_train, y_train): self.clf.fit(x_train, y_train) def predict(self, x): return self.clf.predict(x) def grid_search(self, parameters, Kfold): self.grid = GridSearchCV(estimator = self.clf, param_grid = parameters, cv = Kfold) def grid_fit(self, X, Y): self.grid.fit(X, Y) def grid_predict(self, X, Y): self.predictions = self.grid.predict(X) print("Precision: {:.2f} % ".format(100*metrics.accuracy_score(Y, self.predictions))) # - # Since the goal is to define the class to which a client belongs and this, as soon as its first visit, I only keep the variables that describe the content of the basket, and do not take into account the variables related to the frequency of visits or variations of the basket price over time: selected_customers.head() # + _cell_guid="5bdd768a-8cb4-49dc-84fc-33c8b84a3362" _kg_hide-input=true _uuid="df64b250e989c1a33fe31921ee308056fc5a57b5" columns = ['mean', 'categ_0', 'categ_1', 'categ_2', 'categ_3', 'categ_4' ] X = selected_customers[columns] Y = selected_customers['cluster'] # - # Finally, I split the dataset in train and test sets: # + _cell_guid="c9c4174c-bd28-47df-9919-e951f45bd7f2" _kg_hide-input=true _uuid="43c2d31561df475b5c56f5123c3d1080ed3990a2" X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, train_size = 0.8) # - # ___ # ### Support Vector Machine Classifier (SVC) # # The first classifier I use is the SVC classifier. In order to use it, I create an instance of the `Class_Fit` class and then call` grid_search()`. When calling this method, I provide as parameters: # - the hyperparameters for which I will seek an optimal value # - the number of folds to be used for cross-validation # + _cell_guid="eb88bafc-1335-47e2-bc5d-469a551e54b6" _kg_hide-input=true _uuid="31ad3b4f07ec0b501f70909560735d21e3f5a8da" svc = Class_Fit(clf = svm.LinearSVC) svc.grid_search(parameters = [{'C':np.logspace(-2,2,10)}], Kfold = 5) # - # Once this instance is created, I adjust the classifier to the training data: # + _cell_guid="f8f0b1ec-6464-4a34-924c-85810c03a8ad" _kg_hide-input=true _uuid="174508011f559d2be87e39718f6798090edef992" svc.grid_fit(X = X_train, Y = Y_train) # - # then I can test the quality of the prediction with respect to the test data: # + _cell_guid="5031fd84-a7a4-4ce1-add7-03e4af50cb15" _kg_hide-input=true _uuid="314d70285360f73a73734e8583d1cb7e58c7f71d" svc.grid_predict(X_test, Y_test) # - # Note that for every run precision value may change # ___ # #### Confusion matrix # # The accuracy of the results seems to be correct. Nevertheless, let us remember that when the different classes were defined, there was an imbalance in size between the classes obtained. In particular, one class contains around 40% of the clients. It is therefore interesting to look at how the predictions and real values compare to the breasts of the different classes. This is the subject of the confusion matrices and to represent them, I use the code of the [sklearn documentation](http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html): # + _cell_guid="3d334a90-3134-4e66-babd-7f2ea02efb11" _kg_hide-input=true _uuid="50e9d977bebb38f3568c91ab1856061774d115c6" def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=0) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") #_________________________________________________ plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # - # from which I create the following representation: # + _cell_guid="eea554d8-a672-4f1e-a6cf-ef4bf6a6950b" _kg_hide-input=true _uuid="edb4632a50b983e80ab28fca93b80e9117d14fc6" class_names = [i for i in range(11)] cnf_matrix = confusion_matrix(Y_test, svc.predictions) np.set_printoptions(precision=2) plt.figure(figsize = (8,8)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize = False, title='Confusion matrix') # - # # ---------------- # #### Learning curve # # A typical way to test the quality of a fit is to draw a learning curve. In particular, this type of curves allow to detect possible drawbacks in the model, linked for example to over- or under-fitting. This also shows to which extent the mode could benefit from a larger data sample. In order to draw this curve, I use the [scikit-learn documentation code again](http://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html#sphx-glr- self-examples-model-selection-pad-learning-curve-py) # + _cell_guid="76b2a6bb-aa79-406f-95ff-48fb3946c5a8" _kg_hide-input=true _uuid="199d688e55c37ba527adb63c31a0c243a45ba8da" def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 10)): """Generate a simple plot of the test and training learning curve""" plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.legend(loc="best") return plt # - # from which I represent the leanring curve of the SVC classifier: # + _cell_guid="5e33dcc9-e613-4098-b282-70cd63c59768" _kg_hide-input=true _uuid="b89f7776830508bb4a4424c8cdecbe9b6765f43c" g = plot_learning_curve(svc.grid.best_estimator_, "SVC learning curves", X_train, Y_train, ylim = [1.01, 0.6], cv = 5, train_sizes = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]) # - # On this curve, we can see that the train and cross-validation curves converge towards the same limit when the sample size increases. This is typical of modeling with low variance and proves that the model does not suffer from overfitting. Also, we can see that the accuracy of the training curve is correct which is synonymous of a low bias. Hence the model does not underfit the data. # # ___ # ## Revised approach # # ### Logistic Regression # # I now consider the logistic regression classifier. As before, I create an instance of the `Class_Fit` class, adjust the model on the training data and see how the predictions compare to the real values: # + _cell_guid="c37cd77b-f96d-483b-bb57-14db65f97039" _kg_hide-input=true _uuid="9ee59b5060327224a4356be75ee1221b4e9fb0ab" lr = Class_Fit(clf = linear_model.LogisticRegression) lr.grid_search(parameters = [{'C':np.logspace(-2,2,20)}], Kfold = 5) lr.grid_fit(X = X_train, Y = Y_train) lr.grid_predict(X_test, Y_test) # - # Then, I plot the learning curve to have a feeling of the quality of the model: # + _cell_guid="8c4c95f0-8116-4501-ae86-7df54fa6a8dd" _kg_hide-input=true _uuid="203206e691175e8ff82cf72cfb34e82ffd2acf0c" g = plot_learning_curve(lr.grid.best_estimator_, "Logistic Regression learning curves", X_train, Y_train, ylim = [1.01, 0.7], cv = 5, train_sizes = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]) # - # ---- # ### k-Nearest Neighbors # + _cell_guid="93db008f-7525-4705-aa66-ca3e407bfa3f" _kg_hide-input=true _uuid="c898bc2cc957afd96dd317aecc160b23efe3c973" knn = Class_Fit(clf = neighbors.KNeighborsClassifier) knn.grid_search(parameters = [{'n_neighbors': np.arange(1,50,1)}], Kfold = 5) knn.grid_fit(X = X_train, Y = Y_train) knn.grid_predict(X_test, Y_test) # + _cell_guid="80b0d359-06f2-4f75-b394-c77eaefb6a37" _kg_hide-input=true _uuid="c10d7fca1e4070dd6a97e5d9f5879e3632d6e83c" g = plot_learning_curve(knn.grid.best_estimator_, "Nearest Neighbors learning curves", X_train, Y_train, ylim = [1.01, 0.7], cv = 5, train_sizes = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]) # - # ### Decision Tree # + _cell_guid="0a4cd7ef-f03c-4c7a-8b78-0dc26dab137b" _kg_hide-input=true _uuid="48dbd3e263d5b8113b8ef64677e6df4c20076a7c" tr = Class_Fit(clf = tree.DecisionTreeClassifier) tr.grid_search(parameters = [{'criterion' : ['entropy', 'gini'], 'max_features' :['sqrt', 'log2']}], Kfold = 5) tr.grid_fit(X = X_train, Y = Y_train) tr.grid_predict(X_test, Y_test) # + _cell_guid="896f795a-1cbb-4d08-9019-a1ab6963fc5c" _kg_hide-input=true _uuid="66fdbf731275232a3df7ef73fe54aeeef8dd0c33" g = plot_learning_curve(tr.grid.best_estimator_, "Decision tree learning curves", X_train, Y_train, ylim = [1.01, 0.7], cv = 5, train_sizes = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]) # - # ### Random Forest # + _cell_guid="191ce09c-a67d-434e-99e2-f186d9b21095" _kg_hide-input=true _uuid="5fd4df1e26bd7cdf10dfcf245fddafff86790f8c" rf = Class_Fit(clf = ensemble.RandomForestClassifier) param_grid = {'criterion' : ['entropy', 'gini'], 'n_estimators' : [20, 40, 60, 80, 100], 'max_features' :['sqrt', 'log2']} rf.grid_search(parameters = param_grid, Kfold = 5) rf.grid_fit(X = X_train, Y = Y_train) rf.grid_predict(X_test, Y_test) # + _cell_guid="8ea75385-53a0-42d2-8f14-0b0d89032b41" _kg_hide-input=true _uuid="8bd7e9d31969f8b5fc548ad856012d3027faa9a5" g = plot_learning_curve(rf.grid.best_estimator_, "Random Forest learning curves", X_train, Y_train, ylim = [1.01, 0.7], cv = 5, train_sizes = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]) # - # ### AdaBoost Classifier # + _cell_guid="91c38cf5-f042-47d8-b74e-f7a0f9718f26" _kg_hide-input=true _uuid="26780fd696188d6b3f8cd9dad2f187003148ee19" ada = Class_Fit(clf = AdaBoostClassifier) param_grid = {'n_estimators' : [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]} ada.grid_search(parameters = param_grid, Kfold = 5) ada.grid_fit(X = X_train, Y = Y_train) ada.grid_predict(X_test, Y_test) # + _cell_guid="4d188502-2d63-49f6-96fc-51d08e381b39" _kg_hide-input=true _uuid="fbc49326d70e7ac314cc9c92f5baba69dc3cf7b8" g = plot_learning_curve(ada.grid.best_estimator_, "AdaBoost learning curves", X_train, Y_train, ylim = [1.01, 0.4], cv = 5, train_sizes = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]) # - # ### Gradient Boosting Classifier # + _cell_guid="d3cd367f-d2e9-498d-bb82-c1137dd21fdf" _kg_hide-input=true _uuid="f85e385c75291843a7c8db847aee48070f60e7ee" gb = Class_Fit(clf = ensemble.GradientBoostingClassifier) param_grid = {'n_estimators' : [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]} gb.grid_search(parameters = param_grid, Kfold = 5) gb.grid_fit(X = X_train, Y = Y_train) gb.grid_predict(X_test, Y_test) # + _cell_guid="2110efe3-4466-4bcb-a74a-0950253d7f60" _kg_hide-input=true _uuid="eeca4c1c096b8a40ea910e2472d5fb6ca44ebb19" g = plot_learning_curve(gb.grid.best_estimator_, "Gradient Boosting learning curves", X_train, Y_train, ylim = [1.01, 0.7], cv = 5, train_sizes = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]) # - # ------ # ## Best approach # ### Let's vote ! # # Finally, the results of the different classifiers presented in the previous sections can be combined to improve the classification model. This can be achieved by selecting the customer category as the one indicated by the majority of classifiers. To do this, I use the `VotingClassifier` method of the `sklearn` package. As a first step, I adjust the parameters of the various classifiers using the *best* parameters previously found: # + _cell_guid="532dcc6f-33a1-4726-a07a-e4bb2ef0b396" _kg_hide-input=true _uuid="40c82b43dd34bb9b7685c76bea0f5ca7e761c1d4" rf_best = ensemble.RandomForestClassifier(**rf.grid.best_params_) gb_best = ensemble.GradientBoostingClassifier(**gb.grid.best_params_) svc_best = svm.LinearSVC(**svc.grid.best_params_) tr_best = tree.DecisionTreeClassifier(**tr.grid.best_params_) knn_best = neighbors.KNeighborsClassifier(**knn.grid.best_params_) lr_best = linear_model.LogisticRegression(**lr.grid.best_params_) # - # Then, I define a classifier that merges the results of the various classifiers: # + _cell_guid="7d80dd22-3e03-4069-b27d-cfad906a5acd" _kg_hide-input=true _uuid="9aedb584c04d4291460c4d30d215e6b2a33d879c" votingC = ensemble.VotingClassifier(estimators=[('rf', rf_best),('gb', gb_best), ('knn', knn_best)], voting='soft') # - # and train it: # + _cell_guid="bdd0e288-9e46-477b-94a1-a4bfa7fd9b93" _kg_hide-input=true _uuid="a89f838c7552464dd07c477ec3d9fd1aae91718c" votingC = votingC.fit(X_train, Y_train) # - # Finally, we can create a prediction for this model: # + _cell_guid="40d3aad9-7cdf-45b1-8848-863fd003dbe1" _kg_hide-input=true _uuid="d582d291f5e82fb666b2e7bc80acd2851c9d87c5" predictions = votingC.predict(X_test) print("Precision: {:.2f} % ".format(100*metrics.accuracy_score(Y_test, predictions))) # - # Note that when defining the `votingC` classifier, I only used a sub-sample of the whole set of classifiers defined above and only retained the *Random Forest*, the *k-Nearest Neighbors* and the *Gradient Boosting* classifiers. In practice, this choice has been done with respect to the performance of the classification carried out in the next section. # # ___ # ## Testing predictions # # In the previous section, a few classifiers were trained in order to categorize customers. Until that point, the whole analysis was based on the data of the first 10 months. In this section, I test the model the last two months of the dataset, that has been stored in the `set_test` dataframe: # + _cell_guid="3b0d4452-2d4d-4640-af80-ae4d95a18ebe" _kg_hide-input=true _uuid="230cb86d7b613e90c2e5a6b386779c8847d83d58" basket_price = set_test.copy(deep = True) # - # In a first step, I regroup reformattes these data according to the same procedure as used on the training set. However, I am correcting the data to take into account the difference in time between the two datasets and weights the variables ** count ** and ** sum ** to obtain an equivalence with the training set: # + _cell_guid="a71444d2-fde7-422a-98c8-018d2ed8faff" _kg_hide-input=true _uuid="ad3b38fbda3b8da6f9ea86c0edbcf02002d1f277" transactions_per_user=basket_price.groupby(by=['CustomerID'])['Basket Price'].agg(['count','min','max','mean','sum']) for i in range(5): col = 'categ_{}'.format(i) transactions_per_user.loc[:,col] = basket_price.groupby(by=['CustomerID'])[col].sum() /\ transactions_per_user['sum']*100 transactions_per_user.reset_index(drop = False, inplace = True) basket_price.groupby(by=['CustomerID'])['categ_0'].sum() #_______________________ # Correcting time range transactions_per_user['count'] = 5 * transactions_per_user['count'] transactions_per_user['sum'] = transactions_per_user['count'] * transactions_per_user['mean'] transactions_per_user.sort_values('CustomerID', ascending = True)[:5] # - # Then, I convert the dataframe into a matrix and retain only variables that define the category to which consumers belong. At this level, I recall the method of normalization that had been used on the training set: # + _cell_guid="ad4089ab-f703-483c-8d03-65f3c282c49a" _kg_hide-input=true _uuid="926c8e08380d645553e2c939c1bffd485e52b02e" list_cols = ['count','min','max','mean','categ_0','categ_1','categ_2','categ_3','categ_4'] #_____________________________________________________________ matrix_test = np.matrix(transactions_per_user[list_cols]) scaled_test_matrix = scaler.transform(matrix_test) # - # Each line in this matrix contains a consumer's buying habits. At this stage, it is a question of using these habits in order to define the category to which the consumer belongs. These categories have been established in Section 4. ** At this stage, it is important to bear in mind that this step does not correspond to the classification stage itself**. Here, we prepare the test data by defining the category to which the customers belong. However, this definition uses data obtained over a period of 2 months (via the variables ** count **, ** min **, ** max ** and ** sum **). The classifier defined in Section 5 uses a more restricted set of variables that will be defined from the first purchase of a client. # # Here it is a question of using the available data over a period of two months and using this data to define the category to which the customers belong. Then, the classifier can be tested by comparing its predictions with these categories. In order to define the category to which the clients belong, I recall the instance of the `kmeans` method used in section 4. The` predict` method of this instance calculates the distance of the consumers from the centroids of the 11 client classes and the smallest distance will define the belonging to the different categories: # + _cell_guid="7627cb8b-d69c-45b2-9460-e37f3440a2cf" _kg_hide-input=true _uuid="25274ee1b5fcb9ada2496fef0479ed26b8c90fea" Y = kmeans.predict(scaled_test_matrix) # - # Finally, in order to prepare the execution of the classifier, it is sufficient to select the variables on which it acts: # + _cell_guid="dc376e15-18a7-45d8-a941-804945f13d0c" _kg_hide-input=true _uuid="db8446dff217a860253c13c56bb6e30e03270e54" columns = ['mean', 'categ_0', 'categ_1', 'categ_2', 'categ_3', 'categ_4' ] X = transactions_per_user[columns] # - # It remains only to examine the predictions of the different classifiers that have been trained earlier # + _cell_guid="1226d84c-0b7b-4d0d-b55a-c9c39c9f0d9a" _kg_hide-input=true _uuid="a77b2c4f0fba4a51a4d8915e3cd684e3afd3b6fa" classifiers = [(svc, 'Support Vector Machine'), (lr, 'Logostic Regression'), (knn, 'k-Nearest Neighbors'), (tr, 'Decision Tree'), (rf, 'Random Forest'), (gb, 'Gradient Boosting')] #______________________________ for clf, label in classifiers: print(30*'_', '\n{}'.format(label)) clf.grid_predict(X, Y) # - # Finally, as anticipated in revised approach, it is possible to improve the quality of the classifier by combining their respective predictions. At this level, I chose to mix *Random Forest*, *Gradient Boosting* and *k-Nearest Neighbors* predictions because this leads to a slight improvement in predictions: # + _cell_guid="13682ddc-d732-4839-a314-ec1bcd00cf2a" _kg_hide-input=true _uuid="fb82ea972aae88f12a6466a785c7be659984579c" predictions = votingC.predict(X) print("Precision: {:.2f} % ".format(100*metrics.accuracy_score(Y, predictions))) # -
example_project/Cust_segmentation_online_retail.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1. Description # This notebook will produce the sepsis group (4,226 cases) and the non-sepsis group (23,170 cases) described in the paper "Prediction of the ICU mortality based on the missing events.". # # 2. Before running... # Before proceeding the followings, plaease solve the python environment accordingly first. This program requires the following libraries. import pandas as pd # 1.2.1 import numpy as np # 1.20.2 # Then please put the eICU files at the appropriate directory so that the program reaches the input files. To test if you correctly set the input files, run the below. If you could, the cell would not end without errors. df_patient = pd.read_csv("data/patient.csv") df_apachePredVar = pd.read_csv("data/apachePredVar.csv") df_apacheApsVar = pd.read_csv("data/apacheApsVar.csv") df_apachePatientResult = pd.read_csv("data/apachePatientResult.csv") # For getting the eICU files, please see "https://www.usa.philips.com/healthcare/solutions/enterprise-telehealth/eri" or "https://eicu-crd.mit.edu/gettingstarted/access/" # # 3. IDs for the sepsis group (4,226) # ## 3.1 Get all patients # Load df_patient = pd.read_csv("data/patient.csv") len(df_patient) # Save df_patient["patientunitstayid"].to_csv("ids/001_all_200859.csv", header=False, index=False) # ## 3.2 Select the sepsis group # Load df_apachePredVar = pd.read_csv("data/apachePredVar.csv") len(df_apachePredVar) # kick out the cases without admitdiagnosis. df_apachePredVar=df_apachePredVar.dropna(subset=['admitdiagnosis']) len(df_apachePredVar) # SEPSIS groups in eRI are recorded with name starting the word "SEPSIS" df_apachePredVar.admitdiagnosis.head(10) # Select the cases whose "admitdiagnosis" starts with SEPSIS df_apachePredVar[df_apachePredVar["admitdiagnosis"].str.contains("SEPSIS")]["admitdiagnosis"].unique() # Select the sepsis group df_sepsis = df_apachePredVar[df_apachePredVar["admitdiagnosis"].str.startswith("SEPSIS")] len(df_apachePredVar) # Save df_sepsis["patientunitstayid"].to_csv("ids/002_sepsis_21980.csv", header=False, index=False) # ## 3.3 Select the cases where APS-related variables are not missing # Load df_apacheApsVar = pd.read_csv("data/apacheApsVar.csv") len(df_apacheApsVar) # The definition of APS aps=[ "eyes", "motor", "verbal", "wbc", "temperature", "respiratoryrate", "sodium", "heartrate", "meanbp", "ph", "hematocrit", "pao2", "pco2", "fio2" ] # Exclude the cases that has -1 value in any APS variables for i in aps: df_apacheApsVar = df_apacheApsVar[df_apacheApsVar[i] != -1] len(df_apacheApsVar) # Merge df_sepsis_aps = pd.merge(df_sepsis, df_apacheApsVar, on="patientunitstayid").drop_duplicates() len(df_sepsis_aps) # Save df_sepsis_aps["patientunitstayid"].to_csv("ids/003_sepsis_4672.csv", header=False, index=False) # ## 3.4 Select the cases where "actual mortality" is not missing # Load df_apachePatientResult = pd.read_csv("data/apachePatientResult.csv") df_apachePatientResult = df_apachePatientResult[["patientunitstayid", "actualicumortality"]] # Merge df_sepsis_aps_mortality = pd.merge(df_sepsis_aps, df_apachePatientResult, on="patientunitstayid", how="inner").drop_duplicates() len(df_sepsis_aps_mortality) # Save df_sepsis_aps_mortality["patientunitstayid"].to_csv("ids/004_sepsis_aps_4226.csv", header=False, index=False) # # 4. IDs for the non-sepsis group (23,170) # Load df_apacheApsVar = pd.read_csv("data/apacheApsVar.csv") len(df_apacheApsVar) # Exclude the cases that has -1 value in any APS variables for i in aps: df_apacheApsVar = df_apacheApsVar[df_apacheApsVar[i] != -1] len(df_apacheApsVar) # Load df_apachePatientResult = pd.read_csv("data/apachePatientResult.csv") df_apachePatientResult = df_apachePatientResult[["patientunitstayid", "actualicumortality"]] # Merge df_aps_mortality = pd.merge(df_apacheApsVar, df_apachePatientResult, on="patientunitstayid", how="inner").drop_duplicates() len(df_sepsis_aps_mortality) # Select IDs ids_aps = set(df_aps_mortality["patientunitstayid"]) len(ids_aps) # Select 4,226 IDs ids_sepsis_aps = set(df_sepsis_aps_mortality["patientunitstayid"]) len(ids_sepsis_aps) # Get non-sepsis group IDs ids_nonSepsis_aps = ids_aps - set(ids_sepsis_aps) len(ids_nonSepsis_aps) # Save df_non_sepsis = pd.DataFrame(list(ids_nonSepsis_aps)) df_non_sepsis.to_csv("ids/005_non_sepsis_aps_23170.csv", header=False, index=False)
1_the_sepsis_group_and_non_sepsis_group.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Coding Exercises (Part 2) # ## Full Data Workflow A-Z: Importing Data # ### Exercise 10: Importing Data from messy csv-files # Now, you will have the opportunity to analyze your own dataset. <br> # __Follow the instructions__ and insert your code! You are either requested to # - Complete the Code and __Fill in the gaps__. Gaps are marked with "__---__" and are __placeholders__ for your code fragment. # - Write Code completely __on your own__ # In some exercises, you will find questions that can only be answered, if your code is correct and returns the right output! The correct answer is provided below your coding cell. There you can check whether your code is correct. # If you need a hint, check the __Hints Section__ at the end of this Notebook. Exercises and Hints are numerated accordingly. # If you need some further help or if you want to check your code, you can also check the __solutions notebook__. # ### Have Fun! # -------------------------------------------------------------------------------------------------------------- # ## Option 1: Self_guided # __Import__ the cars Dataset from the messy csv-file __cars_raw.csv__ into a Pandas DataFrame. Use appropriate __parameters__ in the __pd.read_csv()__ method to bring the DataFrame into a clean format. __Columns__ should have the following __labels__: labels = ['mpg', 'cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'name'] # Finally, __save__ and __export__ the dataset as new csv-file (__cars_imp.csv__). # -------------------------------- # ## Option 2: Guided and Instructed # # STOP HERE, IF YOU WANT TO DO THE EXERCISE ON YOUR OWN! # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # 77. __Import__ Pandas (pd)! # 78. __Import__ the csv-file __cars_raw.csv__ with the appropriate pandas method and __inspect__ the data! # Use appropriate __parameters__ in the __pd.read_csv()__ method to clean the format. The following issues need to be solved: # 79. __Remove__ the __first row(s)__ containing nonsense content. # 80. __Remove__ the __last row(s)__ containing nonsense content. # 81. Define that there are __no appropriate column labels/headers__ in the data. # 82. __Set__ the following __column labels/headers__: labels = ['mpg', 'cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'name'] #complete the code and run the cell! pd.read_csv(---) # 83. Once you are happy with the import, __save__ the DataFrame in the variable __cars__! #complete the code and run the cell! cars = pd.read_csv(---) # run the cell! cars.head() # run the cell! cars.tail() # 84. __Export__ and __save__ cars as new csv-file (__cars_imp.csv__). Do __not__ export any __RangeIndex__! # __Reimport__ cars_imp.csv and check! #run the cell! pd.read_csv("cars_imp.csv") # # Well Done! # ---------------------------- # # Hints (Spoiler!) # 77. at this point, you should know this ;-) ! # 78. pd.read_csv("filename") # 79. parameter skiprows # 80. parameter skipfooter # 81. header = N--- # 82. parameter names # 83. see hints 79-82 # 84. to_csv() method
04 - Data Analysis With Pandas/assignments/Exercise_10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pytorch_py37] # language: python # name: conda-env-pytorch_py37-py # --- # analyze error # !python tools/coco_error_analysis.py training_dir/packdet_R_50_FPN_1x_fe-128-12-2_m4_sep/inference/coco_2014_minival/bbox.json ./result --ann datasets/coco/annotations/instances_minival2014.json # + pycharm={"name": "#%%\n"} # classwise AP # !python tools/coco_eval.py training_dir/packdet_R_50_FPN_1x_fe-128-12-2_m4_sep/inference-2/coco_2014_minival/bbox.json \ # --ann datasets/coco/annotations/instances_minival2014.json --classwise
tools/eval_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### *cis*- and *trans*-QTL mapping with tensorQTL # # This notebook provides examples for running *cis*- and *trans*-QTL mapping with tensorQTL, using open-access data from the [GEUVADIS](https://www.ebi.ac.uk/arrayexpress/experiments/E-GEUV-1/) project. # # #### Requirements # An environment configured with a GPU and ~50GB of memory. # # #### Test dataset # # *Note: these files are provided for testing/benchmarking purposes only. They do not constitute an official release from the GEUVADIS project, and no quality-control was applied.* # # Genotypes in PLINK and VCF format, and normalized expression data are available [here](https://personal.broadinstitute.org/francois/geuvadis/). # # Alternatively, to download the files required for these examples, uncomment and run the cell below. # + # # !wget https://personal.broadinstitute.org/francois/geuvadis/GEUVADIS.445_samples.GRCh38.20170504.maf01.filtered.nodup.bed # # !wget https://personal.broadinstitute.org/francois/geuvadis/GEUVADIS.445_samples.GRCh38.20170504.maf01.filtered.nodup.bim # # !wget https://personal.broadinstitute.org/francois/geuvadis/GEUVADIS.445_samples.GRCh38.20170504.maf01.filtered.nodup.fam # # !wget https://personal.broadinstitute.org/francois/geuvadis/GEUVADIS.445_samples.covariates.txt # # !wget https://personal.broadinstitute.org/francois/geuvadis/GEUVADIS.445_samples.expression.bed.gz # + import pandas as pd import torch import tensorqtl from tensorqtl import genotypeio, cis, trans print('PyTorch {}'.format(torch.__version__)) print('Pandas {}'.format(pd.__version__)) # define paths to data plink_prefix_path = 'GEUVADIS.445_samples.GRCh38.20170504.maf01.filtered.nodup' expression_bed = 'GEUVADIS.445_samples.expression.bed.gz' covariates_file = 'GEUVADIS.445_samples.covariates.txt' prefix = 'GEUVADIS.445_samples' # load phenotypes and covariates phenotype_df, phenotype_pos_df = tensorqtl.read_phenotype_bed(expression_bed) covariates_df = pd.read_csv(covariates_file, sep='\t', index_col=0).T # PLINK reader for genotypes pr = genotypeio.PlinkReader(plink_prefix_path) genotype_df = pr.load_genotypes() variant_df = pr.bim.set_index('snp')[['chrom', 'pos']] # - # ### *cis*-QTL: nominal p-values for all variant-phenotype pairs # + # map all cis-associations (results for each chromosome are written to file) # all genes # cis.map_nominal(genotype_df, variant_df, phenotype_df, phenotype_pos_df, covariates_df, prefix) # genes on chr18 cis.map_nominal(genotype_df, variant_df, phenotype_df.loc[phenotype_pos_df['chr']=='chr18'], phenotype_pos_df.loc[phenotype_pos_df['chr']=='chr18'], prefix, covariates_df=covariates_df) # - # load results pairs_df = pd.read_parquet('{}.cis_qtl_pairs.chr18.parquet'.format(prefix)) pairs_df.head() # ### *cis*-QTL: empirical p-values for phenotypes # + # all genes # cis_df = cis.map_cis(genotype_df, variant_df, phenotype_df, phenotype_pos_df, covariates_df) # genes on chr18 cis_df = cis.map_cis(genotype_df, variant_df, phenotype_df.loc[phenotype_pos_df['chr']=='chr18'], phenotype_pos_df.loc[phenotype_pos_df['chr']=='chr18'], covariates_df=covariates_df, seed=123456) # - cis_df.head() # ### *trans*-QTL mapping # run mapping # to limit output size, only associations with p-value <= 1e-5 are returned trans_df = trans.map_trans(genotype_df, phenotype_df, covariates_df, batch_size=10000, return_sparse=True, pval_threshold=1e-5, maf_threshold=0.05) # remove cis-associations trans_df = trans.filter_cis(trans_df, phenotype_pos_df.T.to_dict(), variant_df, window=5000000) trans_df.head()
example/tensorqtl_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # - research.baidu.com/Blog/index-view?id=89 # - www.reddit.com/r/MachineLearning/comments/7i8yhq/r_deep_learning_scaling_is_predictable/ # - supercomputersfordl2017.github.io/Presentations/scaling-is-predictable.pdf # - www.youtube.com/watch?v=XHRm3Ot5y2s # - pdfs.semanticscholar.org/e7a7/06562f313664441c54e379b04f898ee18c0e.pdf # - arthurdouillard.com/post/deep-learning-scaling/<br> # "Given the power law, researchers can train their new architecture on a small dataset, and have a good estimation of how it would scale on a bigger dataset. It may also give a reasonable estimation of the hardware and time requirements to reach a chosen generalization error." # - www.kdnuggets.com/2018/05/deep-learning-scaling-predictable-empirically.html<br> # "Finding better model architectures often depends on ‘unreliable epiphany,’ and as the results show, has limited impact compared to increasing the amount of data available. We’ve known this for some time of course, including from the 2009 Google paper, ‘The unreasonable effectiveness of data.’ The results from today’s paper help us to quantify the data advantage across a range of deep learning applications. The key to understanding is captured in the following equation:" # - **Power laws, Pareto distributions and Zipf’s law**<br> # arxiv.org/pdf/cond-mat/0412004.pdf # + # %load_ext autoreload # %autoreload 2 # %matplotlib inline import warnings warnings.filterwarnings('ignore') import os import sys from pathlib import Path from glob import glob import sklearn import numpy as np import pandas as pd from glob import glob from keras.models import load_model import matplotlib import matplotlib.pyplot as plt # - file_path = Path(os.getcwd()) file_path utils_path = file_path / '../../utils' sys.path.append(str(utils_path)) # import utils # from lrn_crv import * import lrn_crv # %matplotlib inline # Results dir maindir = file_path / '../../out/lrn_crv' # + from scipy import optimize def power_law_func_2prm(x, alpha, beta): return alpha * np.power(x, beta) def fit_power_law_2prm(x, y, p0: list=[30, -0.3]): """ Fit learning curve data (train set size vs ) to power-law. """ # def power_law_func(x, alpha, beta): return alpha * np.power(x, beta) prms, prms_cov = optimize.curve_fit(power_law_func_2prm, x, y, p0=p0) prms_dct = {} prms_dct['alpha'], prms_dct['beta'] = prms[0], prms[1], prms[2] return prms_dct def power_law_func_3prm(x, alpha, beta, gamma): """ docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.power.html """ return alpha * np.power(x, beta) + gamma def fit_power_law_3prm(x, y, p0: list=[30, -0.3, 0.06]): """ Fit learning curve data (train set size vs ) to power-law. """ # def power_law_func(x, alpha, beta, gamma): return alpha * np.power(x, beta) + gamma prms, prms_cov = optimize.curve_fit(power_law_func_3prm, x, y, p0=p0) prms_dct = {} prms_dct['alpha'], prms_dct['beta'], prms_dct['gamma'] = prms[0], prms[1], prms[2] return prms_dct def power_law_func_4prm(x, alpha, beta, gamma1, gamma2): return alpha * np.power(x, beta) + gamma1 + gamma2 def fit_power_law_4prm(x, y, p0:list=[30, -0.3, 0.06, 0.12]): # def power_law_func(x, alpha, beta, gamma1, gamma2): return alpha * np.power(x, beta) + gamma1 + gamma2 prms, prms_cov = optimize.curve_fit(power_law_func_4prm, x, y, p0=p0) prms_dct = {} prms_dct['alpha'], prms_dct['beta'], prms_dct['gamma1'], prms_dct['gamma2'] = prms[0], prms[1], prms[2], prms[3] return prms_dct # + def scale_ticks_params(tick_scale='linear'): """ Helper function for learning cureve plots. Args: tick_scale : available values are [linear, log2, log10] """ if tick_scale == 'linear': base = None label_scale = 'Linear scale' else: if tick_scale == 'log2': base = 2 label_scale = 'Log2 scale' elif tick_scale == 'log10': base = 10 label_scale = 'Log10 scale' else: raise ValueError('The specified tick scale is not supported.') return base, label_scale def plot_lrn_crv_power_law(x, y, plot_fit:bool=True, metric_name:str='score', xtick_scale:str='log2', ytick_scale:str='log2', label=None, xlim:list=None, ylim:list=None, title:str=None, figsize=(7,5), ax=None): x = x.ravel() y = y.ravel() fontsize = 13 if ax is None: fig, ax = plt.subplots(figsize=figsize) # Plot raw data if label is None: label='data' ax.plot(x, y, '.-', color=None, label=label); # # Fit power-law (2 params) # power_law_params = fit_power_law_2prm(x, y) # yfit = power_law_func_2prm(x, **power_law_params) # Fit power-law (3 params) power_law_params = fit_power_law_3prm(x, y) yfit = power_law_func_3prm(x, **power_law_params) # # Fit power-law (4 params) # power_law_params = fit_power_law_4prm(x, y) # yfit = power_law_func_4prm(x, **power_law_params) # Plot fit if plot_fit: ax.plot(x, yfit, '--', color=None, label=f'{label} Trend'); basex, xlabel_scale = scale_ticks_params(tick_scale=xtick_scale) basey, ylabel_scale = scale_ticks_params(tick_scale=ytick_scale) ax.set_xlabel(f'Training Dataset Size ({xlabel_scale})', fontsize=fontsize) if 'log' in xlabel_scale.lower(): ax.set_xscale('log', basex=basex) ylabel = ' '.join(s.capitalize() for s in metric_name.split('_')) ax.set_ylabel(f'{ylabel} ({ylabel_scale})', fontsize=fontsize) if 'log' in ylabel_scale.lower(): ax.set_yscale('log', basey=basey) # ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) # ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) # Add equation (text) on the plot # matplotlib.org/3.1.1/gallery/text_labels_and_annotations/usetex_demo.html#sphx-glr-gallery-text-labels-and-annotations-usetex-demo-py # eq = r"$\varepsilon_{mae}(m) = \alpha m^{\beta} + \gamma$" + rf"; $\alpha$={power_law_params['alpha']:.2f}, $\beta$={power_law_params['beta']:.2f}, $\gamma$={power_law_params['gamma']:.2f}" # eq = rf"$\varepsilon(m) = {power_law_params['alpha']:.2f} m^{power_law_params['beta']:.2f} + {power_law_params['gamma']:.2f}$" # TODO: make this work eq = r"$\varepsilon(m) = \alpha m^{\beta}$" + rf"; $\alpha$={power_law_params['alpha']:.2f}, $\beta$={power_law_params['beta']:.2f}" # xloc = 2.0 * x.min() xloc = x.min() + 0.01*(x.max() - x.min()) yloc = y.min() + 0.9*(y.max() - y.min()) ax.text(xloc, yloc, eq, {'color': 'black', 'fontsize': fontsize, 'ha': 'left', 'va': 'center', 'bbox': {'boxstyle':'round', 'fc':'white', 'ec':'black', 'pad':0.2}}) # matplotlib.org/users/mathtext.html # ax.set_title(r"$\varepsilon_{mae}(m) = \alpha m^{\beta} + \gamma$" + rf"; $\alpha$={power_law_params['alpha']:.2f}, $\beta$={power_law_params['beta']:.2f}, $\gamma$={power_law_params['gamma']:.2f}"); if ylim is not None: ax.set_ylim(ylim) if xlim is not None: ax.set_ylim(xlim) if title is None: title='Learning curve (power-law)' ax.set_title(title) ax.legend(loc='best', frameon=True, fontsize=fontsize) ax.grid(True) # return fig, ax, power_law_params return ax, power_law_params # - def load_scores(run_dir, metric_name='mean_absolute_error', cv_folds=5): """ ... """ scores = pd.read_csv(run_dir/'lrn_crv_scores.csv') df = scores.loc[scores['tr_set']==False] data_sizes = sorted(df['tr_size'].unique()) aa = df[df['metric']==metric_name].reset_index(drop=True) aa.sort_values('tr_size', inplace=True) tr = aa[aa['tr_set']==True] vl = aa[aa['tr_set']==False] tr = tr.iloc[:, -cv_folds:] vl = vl.iloc[:, -cv_folds:] rslt = [] rslt.append(data_sizes) rslt.append(tr.values if tr.values.shape[0]>0 else None) rslt.append(vl.values if vl.values.shape[0]>0 else None) return rslt, scores def get_xy(scores, metric_name='mean_absolute_error'): """ Get x and y from scores (for specific run). """ dd = scores dd = dd[dd['metric']==metric_name] dd = dd[dd['tr_set']==False] dd.reset_index(drop=True, inplace=True) dd.drop(columns=['metric', 'tr_set'], inplace=True) x = dd.tr_size.values y = dd.iloc[:,1:].mean(axis=1).values return x, y # + # rslt_dir = maindir / 'ccle.lgb_reg.cvf5.rna.dsc.AUC' # rslt_dir = maindir / 'gdsc.lgb_reg.cvf5.rna.dsc.AUC' # rslt_dir = maindir / 'gdsc.lgb_reg.cvf1.rna.dsc.AUC' # rslt_dir = maindir / 'topNcode.lgb_reg.cvf5.rna.dsc.AUC_2019-7-2_h15-m29' rslt_dir = maindir / 'topNcode.lgb_reg.cvf10.rna.dsc.AUC_2019-8-13_h9-m15' metric_name = 'mean_absolute_error' rslt, scores = load_scores(rslt_dir, metric_name=metric_name, cv_folds=1) id0 = 0 rslt[0], rslt[2] = rslt[0][id0:], rslt[2][id0:] x, y = get_xy(scores, metric_name=metric_name) x, y = x[id0:], y[id0:] # + # x = np.concatenate((x[:4], x[5:])) # y = np.concatenate((y[:4], y[5:])) # - # xtick_scale='linear' xtick_scale='log2' i0 = 7 # 0 i1 = None # 17 fig, ax = lrn_crv.plot_lrn_crv( rslt, metric_name=metric_name, ylim=None, xtick_scale=xtick_scale, ytick_scale='linear'); # ax.plot(x[i0:i1], y[i0:i1], 'o', color='r'); ax.set_title(f'LGBM_Reg; index=[{i0}, {i1}]') # + ax, power_law_params = plot_lrn_crv_power_law( x[i0:i1], y[i0:i1], plot_fit=True, metric_name=metric_name, title=f'LGBM_Reg; index=[{i0}, {i1}]', xtick_scale=xtick_scale, ytick_scale='linear', figsize=(8,5)); ax.plot(x[i0:i1], y[i0:i1], 'o', color='r'); ax.set_title(f'LGBM_Reg; index=[{i0}, {i1}]') # fig, ax, power_law_params = lrn_crv.plot_lrn_crv_power_law( # x[i0:i1], y[i0:i1], plot_fit=True, metric_name=metric_name, title=f'LGBM_Reg; index=[{i0}, {i1}]', # xtick_scale=xtick_scale, ytick_scale='linear', figsize=(8,5)); print(power_law_params) # + # Fit data to the power-law region. Plot the fit for the entire recorded range. Plot the original data. yfit = power_law_func_3prm(x[i0:i1], **power_law_params) fig, ax = lrn_crv.plot_lrn_crv( rslt, metric_name=metric_name, ylim=None, xtick_scale=xtick_scale, ytick_scale='linear'); ax.plot(x[i0:i1], yfit, '--', color='r'); ax.set_title(f'LGBM_Reg; index=[{i0}, {i1}]') # - # # Generate learning curves from trained models # + dirpath = Path('/vol/ml/apartin/projects/pilot1/data/processed/data_splits/topNcode_cv_simple') xdata = pd.read_parquet(dirpath/'xdata.parquet') meta = pd.read_parquet(dirpath/'meta.parquet') target_name = 'AUC' ydata = meta[[target_name]] cv_folds = 1 tr_id = pd.read_csv( dirpath/f'{cv_folds}fold_tr_id.csv' ) vl_id = pd.read_csv( dirpath/f'{cv_folds}fold_vl_id.csv' ) src = dirpath.name.split('_')[0] # - from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler scaler = StandardScaler() cols = xdata.columns xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=cols, dtype=np.float32) # + # X = pd.DataFrame(xdata).values # Y = pd.DataFrame(ydata).values # del xdata, ydata # - lc = lrn_crv.LearningCurve( X=xdata, Y=ydata, cv=None, cv_lists=(tr_id, vl_id), n_shards=10, shard_step_scale='log10', args=None, logger=None, outdir='nowhere' ) lc.mltype = 'reg' del xdata, ydata, meta # modelsdir = Path('/vol/ml/apartin/projects/pilot1/out/lrn_crv/topNcode.nn_reg_2layer.sgd.cvf1.rna.dsc.AUC_2019-8-13_h11-m4') modelsdir = Path('/vol/ml/apartin/projects/pilot1/out/lrn_crv/topNcode.nn_reg_4layer.sgd.cvf1.rna.dsc.AUC_2019-8-13_h11-m4') lc.outdir = modelsdir modelspath = glob(str(modelsdir/'cv1_*')) print(len(modelspath)) # [m.split('/')[-1].split('sz')[-1] for m in modelspath] # mpath = [m for m in modelspath if m.split('/')[-1].split('sz')[-1]==str(128)][0] # print(mpath) # + # Start nested loop of train size and cv folds tr_scores_all = [] # list of dicts vl_scores_all = [] # list of dicts # CV loop # for fold, (tr_k, vl_k) in enumerate(zip( self.tr_dct.keys(), self.vl_dct.keys() )): for fold, (tr_k, vl_k) in enumerate(zip( lc.tr_dct.keys(), lc.vl_dct.keys() )): if lc.logger is not None: lc.logger.info(f'Fold {fold+1}/{lc.cv_folds}') print(f'Fold {fold+1}/{lc.cv_folds}') tr_id = lc.tr_dct[tr_k] vl_id = lc.vl_dct[vl_k] # Samples from this dataset are randomly sampled for training xtr = lc.X[tr_id, :] ytr = lc.Y[tr_id, :] # A fixed set of validation samples for the current CV split xvl = lc.X[vl_id, :] yvl = np.squeeze(lc.Y[vl_id, :]) # Shards loop (iterate across the dataset sizes and train) # np.random.seed(random_state) # idx = np.random.permutation(len(xtr)) idx = np.arange(len(xtr)) for i, tr_sz in enumerate(lc.tr_shards): # For each shard: train model, save best model, calc tr_scores, calc_vl_scores if lc.logger: lc.logger.info(f'\tTrain size: {tr_sz} ({i+1}/{len(lc.tr_shards)})') print(f'\tTrain size: {tr_sz} ({i+1}/{len(lc.tr_shards)})') # Sequentially get a subset of samples (the input dataset X must be shuffled) xtr_sub = xtr[idx[:tr_sz], :] ytr_sub = np.squeeze(ytr[idx[:tr_sz], :]) # Get the estimator # stimator = ml_models.get_model(self.model_name, init_kwargs=self.init_kwargs) # model = estimator.model # Load Model # ------------------------------------------------------------------------------------------------------------ mpath = [m for m in modelspath if m.split('/')[-1].split('sz')[-1]==str(tr_sz)][0] model = load_model(str(Path(mpath)/'model_best.h5')) # ------------------------------------------------------------------------------------------------------------ # # Train # lc.eval_frac = 0.1 # 0.1 # used for early stopping # eval_samples = int(lc.eval_frac*xvl.shape[0]) # eval_set = (xvl[:eval_samples, :], yvl[:eval_samples]) # if lc.framework=='lightgbm': # model = lc.trn_lgbm_model(model=model, xtr_sub=xtr_sub, ytr_sub=ytr_sub, fold=fold, tr_sz=tr_sz, eval_set=eval_set) # elif lc.framework=='keras': # model = lc.trn_keras_model(model=model, xtr_sub=xtr_sub, ytr_sub=ytr_sub, fold=fold, tr_sz=tr_sz, eval_set=eval_set) # elif lc.framework=='pytorch': # pass # else: # raise ValueError(f'framework {self.framework} is not supported.') # Calc preds and scores TODO: dump preds # ... training set y_pred, y_true = calc_preds(model, x=xtr_sub, y=ytr_sub, mltype=lc.mltype) tr_scores = calc_scores(y_true=y_true, y_pred=y_pred, mltype=lc.mltype, metrics=None) # ... val set y_pred, y_true = calc_preds(model, x=xvl, y=yvl, mltype=lc.mltype) vl_scores = calc_scores(y_true=y_true, y_pred=y_pred, mltype=lc.mltype, metrics=None) # Add metadata tr_scores['tr_set'] = True tr_scores['fold'] = 'fold'+str(fold) tr_scores['tr_size'] = tr_sz vl_scores['tr_set'] = False vl_scores['fold'] = 'fold'+str(fold) vl_scores['tr_size'] = tr_sz # Append scores (dicts) tr_scores_all.append(tr_scores) vl_scores_all.append(vl_scores) # Delete the estimator/model del model # Dump intermediate results (this is useful if the run terminates before run ends) # tr_df_tmp = scores_to_df(tr_scores_all) # vl_df_tmp = scores_to_df(vl_scores_all) scores_all_df_tmp = pd.concat([scores_to_df(tr_scores_all), scores_to_df(vl_scores_all)], axis=0) scores_all_df_tmp.to_csv( lc.outdir / ('lrn_crv_scores_cv' + str(fold+1) + '.csv'), index=False ) # # Scores to df tr_scores_df = scores_to_df( tr_scores_all ) vl_scores_df = scores_to_df( vl_scores_all ) scores_df = pd.concat([tr_scores_df, vl_scores_df], axis=0) # # Dump final results tr_scores_df.to_csv( lc.outdir/'tr_lrn_crv_scores.csv', index=False) vl_scores_df.to_csv( lc.outdir/'vl_lrn_crv_scores.csv', index=False) scores_df.to_csv( lc.outdir/'lrn_crv_scores.csv', index=False) # # Plot learning curves # if plot: # plot_lrn_crv_all_metrics( scores_df, outdir=self.outdir ) # - del xtr, ytr, xvl, yvl, xtr_sub, ytr_sub # + def calc_preds(model, x, y, mltype): """ Calc predictions. """ if mltype == 'cls': if y.ndim > 1 and y.shape[1] > 1: y_pred = model.predict_proba(x) y_pred = np.argmax(y_pred, axis=1) y_true = np.argmax(ydata, axis=1) else: y_pred = model.predict_proba(x) y_pred = np.argmax(y_pred, axis=1) y_true = y elif mltype == 'reg': y_pred = model.predict(x) y_true = y return y_pred, y_true def calc_scores(y_true, y_pred, mltype, metrics=None): """ Create dict of scores. Args: metrics : TODO allow to pass a string of metrics """ scores = {} if mltype == 'cls': scores['auroc'] = sklearn.metrics.roc_auc_score(y_true, y_pred) scores['f1_score'] = sklearn.metrics.f1_score(y_true, y_pred, average='micro') scores['acc_blnc'] = sklearn.metrics.balanced_accuracy_score(y_true, y_pred) elif mltype == 'reg': scores['r2'] = sklearn.metrics.r2_score(y_true=y_true, y_pred=y_pred) scores['mean_absolute_error'] = sklearn.metrics.mean_absolute_error(y_true=y_true, y_pred=y_pred) scores['median_absolute_error'] = sklearn.metrics.median_absolute_error(y_true=y_true, y_pred=y_pred) scores['mean_squared_error'] = sklearn.metrics.mean_squared_error(y_true=y_true, y_pred=y_pred) # scores['auroc_reg'] = reg_auroc(y_true=y_true, y_pred=y_pred) # # https://scikit-learn.org/stable/modules/model_evaluation.html # for metric_name, metric in metrics.items(): # if isinstance(metric, str): # scorer = sklearn.metrics.get_scorer(metric_name) # get a scorer from string # scores[metric_name] = scorer(ydata, pred) # else: # scores[metric_name] = scorer(ydata, pred) return scores def scores_to_df(scores_all): """ (tricky commands) """ df = pd.DataFrame(scores_all) df = df.melt(id_vars=['fold', 'tr_size', 'tr_set']) df = df.rename(columns={'variable': 'metric'}) df = df.pivot_table(index=['metric', 'tr_size', 'tr_set'], columns=['fold'], values='value') df = df.reset_index(drop=False) df.columns.name = None return df # - # # Latest runs # + lgb_dir = maindir / 'topNcode.lgb_reg.cvf10.rna.dsc.AUC_2019-8-13_h9-m15' nn_2layer_dir = maindir / 'topNcode.nn_reg_2layer.sgd.cvf1.rna.dsc.AUC_2019-8-13_h11-m4' nn_4layer_dir = maindir / 'topNcode.nn_reg_4layer.sgd.cvf1.rna.dsc.AUC_2019-8-13_h11-m4' # lgb_dir = maindir / 'top6.lgb_reg.cvf5.rna.dsc.AUC1_2019-6-20_h15-m29' # nn_2layer_dir = maindir / 'top6.nn_model0.sgd.cvf5.rna.dsc.AUC1_2019-6-21_h23-m20' # nn_4layer_dir = maindir / 'top6.nn_model1.sgd.cvf5.rna.dsc.AUC1_2019-6-22_h22-m44' # + # xtick_scale='linear' xtick_scale='log2' i0 = 0 # 0 i1 = None # 17 metric_name = 'mean_absolute_error' rslt_lgb, scores_lgb = load_scores(lgb_dir, metric_name=metric_name, cv_folds=1) rslt_2l, scores_2l = load_scores(nn_2layer_dir, metric_name=metric_name, cv_folds=1) rslt_4l, scores_4l = load_scores(nn_4layer_dir, metric_name=metric_name, cv_folds=1) def fnc(rslt, scores, id0, metric_name, i0, i1): rslt[0], rslt[2] = rslt[0][i0:], rslt[2][i0:] x, y = get_xy(scores, metric_name=metric_name) # x, y = x[i0:i1], y[i0:i1] return rslt, x, y i0 = 12 i1 = None rslt_lgb, x_lgb, y_lgb = fnc(rslt_lgb, scores_lgb, id0, metric_name, i0, i1) rslt_2l, x_2l, y_2l = fnc(rslt_2l, scores_2l, id0, metric_name, i0, i1) rslt_4l, x_4l, y_4l = fnc(rslt_4l, scores_4l, id0, metric_name, i0, i1) # rslt_2l[0], rslt_2l[2] = rslt_2l[0][id0:], rslt_2l[2][id0:] # id0 = 0 # rslt[0], rslt[2] = rslt[0][id0:], rslt[2][id0:] # x, y = get_xy(scores, metric_name=metric_name) # x, y = x[id0:], y[id0:] # + # ax = lrn_crv.plot_lrn_crv( # rslt_2l, metric_name=metric_name, ylim=None, # xtick_scale=xtick_scale, ytick_scale='linear'); # ax = lrn_crv.plot_lrn_crv( # rslt_4l, metric_name=metric_name, ylim=None, # xtick_scale=xtick_scale, ytick_scale='linear', ax=ax); # ax = lrn_crv.plot_lrn_crv( # rslt_lgb, metric_name=metric_name, ylim=None, # xtick_scale=xtick_scale, ytick_scale='linear', ax=ax); # # ax.plot(x[i0:i1], y[i0:i1], 'o', color='r'); ax.set_title(f'LGBM_Reg; index=[{i0}, {i1}]') # + ax, power_law_params = plot_lrn_crv_power_law( x_2l[i0:i1], y_2l[i0:i1], plot_fit=True, metric_name=metric_name, xtick_scale=xtick_scale, ytick_scale='linear', label='2-layer NN', figsize=(8,5)); print(power_law_params) ax, power_law_params = plot_lrn_crv_power_law( x_4l[i0:i1], y_4l[i0:i1], plot_fit=True, metric_name=metric_name, xtick_scale=xtick_scale, ytick_scale='linear', label='4-layer NN', figsize=(8,5), ax=ax); print(power_law_params) # ax, power_law_params = plot_lrn_crv_power_law( # x_lgb[i0:i1], y_lgb[i0:i1], plot_fit=True, label='LightGBM', metric_name=metric_name, # xtick_scale=xtick_scale, ytick_scale='linear', figsize=(8,5), ax=ax); # print(power_law_params) # ax.plot(x_2l[i0:i1], y_2l[i0:i1], 'o', color='r'); ax.set_title(f'Two-layer NN; index=[{i0}, {i1}]') # - ax = lrn_crv.plot_lrn_crv( rslt_4l, metric_name=metric_name, ylim=None, xtick_scale=xtick_scale, ytick_scale='linear', ax=ax); # ax.plot(x[i0:i1], y[i0:i1], 'o', color='r'); ax.set_title(f'LGBM_Reg; index=[{i0}, {i1}]') # # Old runs (lgb, nn1, nn2) # + # run_dir = maindir / 'gdsc.lgb_reg.cvf5.rna.dsc.AUC_2019-7-2_h17-m37' lgb_dir = maindir / 'top6.lgb_reg.cvf5.rna.dsc.AUC1_2019-6-20_h15-m29' nn_reg0_dir = maindir / 'top6.nn_model0.sgd.cvf5.rna.dsc.AUC1_2019-6-21_h23-m20' nn_reg1_dir = maindir / 'top6.nn_model1.sgd.cvf5.rna.dsc.AUC1_2019-6-22_h22-m44' # nn_reg0_dir = maindir / 'top6.nn_model0.clr.cvf5.rna.dsc.AUC1_2019-6-20_h16-m49' # nn_reg1_dir = maindir / 'top6.nn_model1.clr.cvf5.rna.dsc.AUC1_2019-6-20_h16-m50' # + metric_name='mean_absolute_error' lgb_rslt, lgb_scores = load_scores(lgb_dir, metric_name=metric_name) nn_reg0_rslt, nn_reg0_scores = load_scores(nn_reg0_dir, metric_name=metric_name) nn_reg1_rslt, nn_reg1_scores = load_scores(nn_reg1_dir, metric_name=metric_name) # xtick_scale='linear' xtick_scale='log2' ylim=None # ylim=[0.065, 0.110] # + x, y = get_xy(lgb_scores, metric_name=metric_name) # fig, ax, power_law_params = plot_lrn_crv_power_law( # x, y, plot_fit=True, metric_name=metric_name, xtick_scale='log2', ytick_scale='linear', ylim=[0.065, 0.110], figsize=(8,5)); fig, ax, power_law_params = lrn_crv.plot_lrn_crv_power_law( x, y, plot_fit=True, metric_name=metric_name, title='lgb_reg', ylim=ylim, xtick_scale=xtick_scale, ytick_scale='linear', figsize=(8,5)); # + x, y = get_xy(nn_reg0_scores, metric_name=metric_name) # fig, ax, power_law_params = plot_lrn_crv_power_law( # x, y, plot_fit=True, metric_name=metric_name, xtick_scale='log2', ytick_scale='linear', ylim=[0.065, 0.110], figsize=(8,5)); fig, ax, power_law_params = lrn_crv.plot_lrn_crv_power_law( x, y, plot_fit=True, metric_name=metric_name, title='nn_model0', ylim=ylim, xtick_scale=xtick_scale, ytick_scale='linear', figsize=(8,5)); # + x, y = get_xy(nn_reg1_scores, metric_name=metric_name) # fig, ax, power_law_params = plot_lrn_crv_power_law( # x, y, plot_fit=True, metric_name=metric_name, xtick_scale='log2', ytick_scale='linear', ylim=[0.065, 0.110], figsize=(8,5)); fig, ax, power_law_params = lrn_crv.plot_lrn_crv_power_law( x, y, plot_fit=True, metric_name=metric_name, title='nn_model1', ylim=ylim, xtick_scale=xtick_scale, ytick_scale='linear', figsize=(8,5)); # - # + x = 200000 base = 2 n_shards = 5 # shard_frac_small = list(np.logspace(start=0.0, stop=1.0, num=n_shards, endpoint=True, base=base)/(x/10)) shard_frac_small = list(np.logspace(start=0.0, stop=1.0, num=n_shards, endpoint=True, base=base)/base/10) # shard_frac_small = list(np.linspace(start=10, stop=int(0.1*x), num=n_shards, endpoint=False)/x) print(shard_frac_small) shard_frac = list(np.logspace(start=0.0, stop=1.0, num=n_shards, endpoint=True, base=base)/base) print(shard_frac) shard_frac.extend(shard_frac_small) shard_frac = np.array( sorted(list(set(shard_frac))) ) t = x * shard_frac print([v for v in t]) # - t np.array(np.arange(10)) v = 2**np.array(np.arange(20)) print(v) idx = np.argmin( np.abs( v - x ) ) print(idx) if v[idx] > x: idx -= 1 v = list(v[:idx+1]) v.a v.append(x) print(v) np.min([v[idx], x]) np.argmin(np.abs(2**np.array(np.arange(20))) - x) 2**np.array(np.arange(20)) np.exp(np.arange(5)) [10, 50, 100, 150, 200, 500, 1000, 2000, 3000, 4000, 5000] 0.1/100*x 2**10 # # Test power plots # + dd = scores dd = dd[dd['metric']=='mean_absolute_error'] dd = dd[dd['tr_set']==False] dd.reset_index(drop=True, inplace=True) dd.drop(columns=['metric', 'tr_set'], inplace=True) x = dd.tr_size y = dd.f0 # - power_law_params = fit_power_law(x, y) yfit = power_law_func(dd['tr_size'], **power_law_params) print(power_law_params) print(np.log2(x).values) print(np.log2(y).values) fig, ax, power_law_params = plot_learning_curve_power_law(x, y, plot_fit=True, xtick_scale='log2', ytick_scale='log2', figsize=(8,5)); fig, ax, power_law_params = plot_learning_curve_power_law(x, y, plot_fit=True, xtick_scale='log2', ytick_scale='linear', figsize=(8,5)); # + # fig, ax = plt.subplots() # ax.loglog(x, y, '.-', color='b', basex=2, basey=2); # ax.loglog(x, yfit, '--', color='r', basex=2, basey=2); # ax.set_xlim([2**int(np.floor(np.log2(x.values[0]))), 2**int(np.ceil(np.log2(x.values[-1])))]) # ax.set_title(r"$\varepsilon_{mae}(m) = \alpha m^{\beta} + \gamma$" + rf"; $\alpha$={power_law_params['alpha']:.2f}, $\beta$={power_law_params['beta']:.2f}, $\gamma$={power_law_params['gamma']:.2f}"); # ax.grid(True) # + # plt.figure() # plt.plot(np.log2(x), y, '.-', color='b'); # plt.plot(np.log2(x), yfit, '--', color='r'); # ax.set_xlim([2**int(np.floor(np.log2(x.values[0]))), 2**int(np.ceil(np.log2(x.values[-1])))]) # plt.title(r"$\varepsilon_{mae}(m) = \alpha m^{\beta} + \gamma$" + rf"; $\alpha$={power_law_params['alpha']:.2f}, $\beta$={power_law_params['beta']:.2f}, $\gamma$={power_law_params['gamma']:.2f}"); # plt.grid(True) # - print(x.values) print(np.log(x.values)) np.exp(12.33059293) 2**int(np.floor(np.log2(x.values[0]))) 2**int(np.ceil(np.log2(x.values[-1]))) np.log2(y) # # Plotting learning curves on log scale # # - https://jakevdp.github.io/PythonDataScienceHandbook/04.10-customizing-ticks.html # - https://stackoverflow.com/questions/14530113/set-ticks-with-logarithmic-scale # - https://stackoverflow.com/questions/21920233/matplotlib-log-scale-tick-label-number-formatting/33213196 fig, ax = plt.subplots() ax.plot([10, 100, 1000], [1,2,3], 'o-') ax.set_xscale('linear') # ax.set_xticks([10, 100, 1000]) ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) ax.grid(True) fig, ax = plt.subplots() ax.plot([10, 100, 1000], [1,2,3], 'o-') ax.set_xscale('log', basex=10) # ax.set_xticks([10, 100, 1000]) ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) ax.grid(True) fig, ax = plt.subplots() ax.plot([10, 100, 1000], [1,2,3], 'o-') ax.set_xscale('log', basex=2) # ax.set_xticks([10, 100, 1000]) ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) ax.grid(True) fig, ax = plt.subplots() ax.plot([10, 100, 1000], [1,2,3], 'o-') ax.set_xscale('log', basex=2) # ax.set_xticks([20, 200, 500]) ax.set_xlim([0, 10000]) ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) ax.grid(True) np.array([2**x for x in range(11)]) fig1, ax1 = plt.subplots() ax1.plot([10, 100, 1000], [1,2,3]) ax1.set_xscale('log') ax1.set_xticks([20, 200, 500]) ax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) ax1.grid(True)
LearningCurves/power_law_lrn_crv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/peeyushsinghal/EVA/blob/main/S5-Assignment%20Solution/EVA_S5_Exp4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="P9zJUsfthCOd" # ### Importing Libraries # + colab={"base_uri": "https://localhost:8080/"} id="kI-gD7_Xg3qk" outputId="f255439a-43a9-4c70-b9cc-0ca1ba3f1e9e" import torch from torchvision import datasets,transforms # importing datasets # %matplotlib inline # for not pop out rendering of images import matplotlib.pyplot as plt # for visualizing images import random # for random image index import torch.nn as nn # for network import torch.nn.functional as F # for forward method import torch.optim as optim # for optimizer # !pip install torchsummary from torchsummary import summary # for model summary and params from tqdm import tqdm # for beautiful model training updates # + [markdown] id="0INCUzGmhGNa" # Seed and Cuda # + colab={"base_uri": "https://localhost:8080/"} id="7J3pr8HzhAWm" outputId="a59c08c7-5162-4c7a-b6a4-62d23fd06c5c" # check for cuda cuda = torch.cuda.is_available() print (f' Cuda Status : {cuda}') # setting seed SEED = 42 # arbit seed, why 42 - because in hitch hikers guide to galaxy it is answer to everything # torch.cuda.seed(SEED) torch.cuda.manual_seed_all(SEED) if cuda else torch.manual_seed(SEED) # + [markdown] id="44OF4A2Vim_K" # ### Downloading dataset, splitting datasets # loading dataset # + colab={"base_uri": "https://localhost:8080/"} id="SzR4-_gpimYO" outputId="78eadc7a-3836-4544-c1cd-2c4e9713970b" train = datasets.MNIST( root = './',# directory where data needs to be stored train = True, # get the training portion of the dataset download = True, # downloads transform = transforms.Compose([ transforms.RandomRotation((-7.0, 7.0), fill=(1,)), #random rotation transforms.ToTensor(),# converts to tesnor transforms.Normalize((0.1307,), (0.3081,))# Normalize ]) ) test = datasets.MNIST( root = './',# directory where data needs to be stored train = False, # get the test portion of the dataset download = True, # downloads transform = transforms.Compose([ transforms.ToTensor(),# converts to tesnor transforms.Normalize((0.1307,), (0.3081,))# Normalize ]) ) # + [markdown] id="xLuDGGQymAJI" # Train and Test Dataloader # + colab={"base_uri": "https://localhost:8080/"} id="NuFFPtkZl6J0" outputId="3b30ddf8-829f-4dcf-e2b7-5a31efadcb04" dataloader_args = dict(shuffle=True, batch_size=128, num_workers=4, pin_memory = True) if cuda else dict(shuffle=True, batch_size=64) train_loader = torch.utils.data.DataLoader( dataset=train,# train dataset **dataloader_args # the dataloader arguments change dependent on cuda is available or not ) test_loader = torch.utils.data.DataLoader( dataset = test,# test dataset **dataloader_args # the dataloader arguments change dependent on cuda is available or not ) # + [markdown] id="O2MxbyBRouUE" # Checking Dataloaders # - sample data # + colab={"base_uri": "https://localhost:8080/", "height": 371} id="ILUePRUDoz8i" outputId="0873365b-85af-451b-d1c8-8600eaf007d0" images, labels = next(iter(train_loader)) print(images.shape) print(labels.shape) # printing random image and seeing plt.imshow(images[random.randint(0,len(images))].numpy().squeeze(), cmap='gray_r') # + colab={"base_uri": "https://localhost:8080/", "height": 243} id="-1Qj8PftiVDT" outputId="c45b377a-f44f-4612-abde-9b28b05e787a" # Looking at more images figure = plt.figure() for index in range(1, len(images) + 1): # assumption: batch size would be atleast 8 plt.subplot(8, int(len(images)/8), index) plt.axis('off') plt.imshow(images[index-1].numpy().squeeze(), cmap='gray_r') # + [markdown] id="2Eh0KmZyoJy7" # ### Network # + id="-mCXT71boJY5" class Network(nn.Module): def __init__(self): super(Network,self).__init__() # extending super class method drop_out_value = 0.1 # Input Block self.convblock1 = nn.Sequential( nn.Conv2d(1,10,3), # In- 1x28x28, Out- 10x26x26, RF- 3x3, Jump_in -1, Jump_out -1 nn.ReLU(), nn.BatchNorm2d(10), nn.Dropout(drop_out_value) ) # Conv Block 2 self.convblock2 = nn.Sequential( nn.Conv2d(10,10,3), # In- 10x26x26, Out- 10x24x24, RF- 5x5, Jump_in -1, Jump_out -1 nn.ReLU(), nn.BatchNorm2d(10), nn.Dropout(drop_out_value) ) # Conv Block 3 self.convblock3 = nn.Sequential( nn.Conv2d(10,10,3), # In- 10x24x24, Out- 10x22x22, RF- 7x7, Jump_in -1, Jump_out -1 nn.ReLU(), nn.BatchNorm2d(10), nn.Dropout(drop_out_value) ) self.convblock3_1 = nn.Sequential( nn.Conv2d(10,10,3), # In- 10x22x22, Out- 10x20x20, RF- 9x9, Jump_in -1, Jump_out -1 nn.ReLU(), nn.BatchNorm2d(10), nn.Dropout(drop_out_value) ) self.convblock3_2 = nn.Sequential( nn.Conv2d(10,10,3), # In- 10x20x20, Out- 10x18x18, RF- 11x11, Jump_in -1, Jump_out -1 nn.ReLU(), nn.BatchNorm2d(10), nn.Dropout(drop_out_value) ) #### Transition Block 1 self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) # In- 10x18x18 Out- 10x9x9 RF- 12x12, Jump_in - 1, Jump_out -2 # self.convblock4 = nn.Sequential( # nn.Conv2d(10,10,1), # nn.ReLU(), # nn.BatchNorm2d(10), # nn.Dropout(drop_out_value) # ) # Conv Block 5 self.convblock5 = nn.Sequential( nn.Conv2d(10,20,3), # In- 10x9x9 Out- 20x7x7 RF- 16x16, Jump_in -2, Jump_out -2 nn.ReLU(), nn.BatchNorm2d(20), nn.Dropout(drop_out_value) ) # Conv Block 6 self.convblock6 = nn.Sequential( nn.Conv2d(20,20,3), # In- 20x7x7 Out- 20x5x5 RF- 20x20, Jump_in -2, Jump_out -2 nn.ReLU(), nn.BatchNorm2d(20), nn.Dropout(drop_out_value) ) # Output Block self.convblock7 = nn.Sequential( nn.Conv2d(20,10,1), # In- 20x5x5, Out- 10x5x5, RF- 20x20, Jump_in -2, Jump_out -2 nn.ReLU(), nn.BatchNorm2d(10), nn.Dropout(drop_out_value) ) self.gap = nn.AvgPool2d(5) # In- 10x5x5, Out- 10x1x1, RF- 28x28, Jump_in -2, Jump_out -2 def forward(self,x): x = self.convblock1(x) x = self.convblock2(x) x = self.convblock3(x) x= self.convblock3_1(x) x = self.convblock3_2(x) x = self.pool1(x) # x = self.convblock4(x) x = self.convblock5(x) x = self.convblock6(x) x = self.convblock7(x) x = self.gap(x) # Flattening x = x.view(-1,10) return F.log_softmax(x,dim=-1) # model = Network() # print(model) # + [markdown] id="4jXc4bV5zuSP" # ### Model Params # - Checking the model summary and number of parameters # + colab={"base_uri": "https://localhost:8080/"} id="Z7ozo1mBzstB" outputId="ed255ff6-6787-44c5-d1e9-1cd3a3d39c87" device = torch.device("cuda" if cuda else "cpu") print(device) model = Network().to(device) # print(model) summary(model, input_size=(1, 28, 28)) # + [markdown] id="YfLaULFf2TzM" # ### Training and Testing # - includes test and train functions # - includes loop function, where test can happen after each epoch is trained # # + id="cJDX1iZS2TCl" # Training Function train_losses = [] # to capture train losses over training epochs train_accuracy = [] # to capture train accuracy over training epochs def train(model,device, train_loader,optimizer,epoch): model.train() # setting the model in training mode pbar = tqdm(train_loader) # putting the iterator in pbar correct = 0 # for accuracy numerator processed =0 # for accuracy denominator for batch_idx, (images,labels) in enumerate(pbar): images, labels = images.to(device),labels.to(device)#sending data to CPU or GPU as per device optimizer.zero_grad() # setting gradients to zero to avoid accumulation y_preds = model(images) # forward pass, result captured in y_preds (plural as there are many images in a batch) # the predictions are in one hot vector loss = F.nll_loss(y_preds,labels) # capturing loss train_losses.append(loss) # to capture loss over many epochs loss.backward() # backpropagation optimizer.step() # updating the params preds = y_preds.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct += preds.eq(labels.view_as(preds)).sum().item() processed += len(images) pbar.set_description(desc= f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}') train_accuracy.append(100*correct/processed) # + id="_N0GNPIABiUq" # Test Function test_losses = [] # to capture test losses test_accuracy = [] # to capture test accuracy def test(model,device, test_loader): model.eval() # setting the model in evaluation mode test_loss = 0 correct = 0 # for accuracy numerator with torch.no_grad(): for (images,labels) in test_loader: images, labels = images.to(device),labels.to(device)#sending data to CPU or GPU as per device outputs = model(images) # forward pass, result captured in outputs (plural as there are many images in a batch) # the outputs are in batch size x one hot vector test_loss = F.nll_loss(outputs,labels, reduction='sum').item() # sum up batch loss preds = outputs.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct += preds.eq(labels.view_as(preds)).sum().item() test_loss /= len(test_loader.dataset) # average test loss test_losses.append(test_loss) # to capture loss over many batches print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) test_accuracy.append(100*correct/len(test_loader.dataset)) # + colab={"base_uri": "https://localhost:8080/"} id="nzgGQO3lAY2t" outputId="6df8f6b7-760d-4f29-d7d5-929d2ac15054" model = Network().to(device) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) from torch.optim.lr_scheduler import StepLR scheduler = StepLR(optimizer, step_size=7, gamma=0.1) # EPOCHS = 1 EPOCHS = 15 for epoch in range(EPOCHS): print("EPOCH:", epoch) train(model, device, train_loader, optimizer, epoch) test(model, device, test_loader) # + colab={"base_uri": "https://localhost:8080/", "height": 624} id="_2ntHiknO_nE" outputId="0ccacfea-9143-479d-b087-2ef171baf845" # Graphs fig, axs = plt.subplots(2,2,figsize=(15,10)) axs[0, 0].plot(train_losses) axs[0, 0].set_title("Training Loss") axs[1, 0].plot(train_accuracy) axs[1, 0].set_title("Training Accuracy") axs[0, 1].plot(test_losses) axs[0, 1].set_title("Test Loss") axs[1, 1].plot(test_accuracy) axs[1, 1].set_title("Test Accuracy") # + [markdown] id="1GvTA_0VJH6e" # ### Conclusion # Experiment Number : 4 # # Objective / Target # 1. increase accuracy by increasing number of params # 2. include augmentation # 3. step learning rate # # # Results # - Parameters: 9,590 # - Best Train Accuracy: 98.72% # - Best Test Accuracy: 99.29% # # Analysis # 1. Not hitting the accuracy mark # 2. Overfitting (train - test accuracy) < 0 is largely containted # 3. Number of params < 10K # # Next Steps # - Look to increase accuracy
S5-Assignment Solution/EVA_S5_Exp4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lesson 6: Data Wrangling # # *Learn to prepare data for visualization and analytics.* # # ## Instructions # This tutorial provides step-by-step training divided into numbered sections. The sections often contain embeded exectable code for demonstration. This tutorial is accompanied by a practice notebook: [L06-Data_Wrangling-Practice.ipynb](./L06-Data_Wrangling-Practice.ipynb). # # Throughout this tutorial sections labeled as "Tasks" are interspersed and indicated with the icon: ![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/16/Apps-gnome-info-icon.png). You should follow the instructions provided in these sections by performing them in the practice notebook. When the tutorial is completed you can turn in the final practice notebook. # ## Introduction # The purpose of this assignment is to build on Tidy data cleaning by using Python tools to "massage" or "wrangle" data into formats that are most useful for visualization and analytics. # # **What is data wrangling?** # # > Data wrangling, sometimes referred to as data munging, is the process of transforming and mapping data from one "raw" data form into another format with the intent of making it more appropriate and valuable for a variety of downstream purposes such as analytics. # # - [Data Wangling](https://en.wikipedia.org/wiki/Data_wrangling) *Wikipedia* # # Previously, we learned about Tidy rules for reformatting data. Transforming data into a Tidy dataset is data wrangling. We have also learned to how to correct data types, remove missing values and duplicates. This lessons is, therefore, an opportunity to bring everything together. Some of the material will be a review, but should help reinforce the concepts. # --- # ## 1. Getting Started # As before, we import any needed packages at the top of our notebook. Let's import Numpy and Pandas: import numpy as np import pandas as pd # --- # ## 2. Data Exploration # The first step in any data analytics task is import and exploration of data. At this point, we have learned all of the steps we need to identify the data columns, their data types, recognize where we have missing values and recognize categorical and numeric variables in the data. # # For this tutorial we will use a dataset named "Abolone" from the [University of California Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Abalone). The datafile is named `abalone.data` and is available in the data directory that accompanies this notebook. The data has 10 "attributes" or variables. The following table describes these 10 variables, their types, and additional details. # # <table> # <tr><th>Name</th><th>Data Type</th><th>Metric</th><th>Description</th></tr> # <tr><td>Sample ID</td><td>integer</td><td></td><td>A unique number for each sample taken</td></tr> # <tr><td>Sex</td><td>nominal</td><td></td><td>M = 0, F = 1, and I = 2 (infant)</td></tr> # <tr><td>Length</td><td>continuous</td><td>mm</td><td>Longest shell measurement</td></tr> # <tr><td>Diameter</td><td>continuous</td><td>mm</td><td>perpendicular to length</td></tr> # <tr><td>Height</td><td>continuous</td><td>mm</td><td>with meat in shell</td></tr> # <tr><td>Whole weight</td><td>continuous</td><td>grams</td><td>whole abalone</td></tr> # <tr><td>Shucked weight</td><td>continuous</td><td>grams</td><td>weight of meat</td></tr> # <tr><td>Viscera weight</td><td>continuous</td><td>grams</td><td>gut weight (after bleeding)</td></tr> # <tr><td>Shell weight</td><td>continuous</td><td>grams</td><td>after being dried</td></tr> # <tr><td>Rings</td><td>integer</td><td></td><td>+1.5 gives the age in years</td></tr> # </table> # # ***Note:*** To demonstrate specific techniques of data wrangling, the dataset provided to you was altered: a sample ID column was added, the Sex column contains numeric IDs, and missing values were added as were duplicates. # # This data has no header information, so, we'll provide it when we import the data: abalone = pd.read_csv('data/abalone.data', header = None) abalone.columns = ['Sample_ID','Sex', 'Length', 'Diameter', 'Height', 'Whole_weight', 'Shucked_weight', 'Viscera_weight', 'Shell_weight', 'Rings'] abalone.head() # ### 2.1 Exploring Data Types # First, let's explore how Pandas imported the data types: abalone.dtypes # Other than the first, second and last columns, all others were imported as `float64` which is a decimal value. The others were imported as an `integer`. This looks correct for the data. # Let's get a sense of how big the data is: abalone.shape # Next, we can explore the distribution of numerical data using the `describe` function: abalone.describe() # Observe that even though the 'Sex' column was provided as a numeric value, it is actually meant to be categorical, with each sex represented as a unique number. We can explore the categorical data using the `groupby` function, followed by the `size` function. abalone.groupby(by=['Sex']).size() # ### 2.2 Finding Missing Values # Before proceeding with any analysis you should know the state of missing values in the dataset. For most analytics missing values are not supported. Some tools will automatically ignore them but it may be easier, in some cases, to remove them. # # First, let's quantify how many missing values we have. The `isna` function will convert the data into `True` or `False` values: `True` if the value is missing: abalone.isna().head() # We can use the `sum` function to then identify how many missing values we have per column: abalone.isna().sum() # ### 2.3 Inspecting Duplicates # Sometimes we may or may not want duplicates in the data. This depends on the expectations of the experiments and the measurements taken. Sometimes duplicates may represent human error in data entry. So, let's look for duplicated data. We have 4,184 rows, let's see how many unique values per column that we have: abalone.nunique() # For all of the columns we have fewer that 4,184 values. For columns like 'Sex' we have 3 unique values, but these repeated values are expected. The decimal values also have duplicates. The likelihood of seeing the exact same decimal values varies based on the distribution for the variable and the number of decimal values in the measurement. The number of duplicated values does not seem unordinary. However, the sample ID should be unique, yet we have 4,177 of them instead of 4,184. This implies we have duplicated samples in the data. # # We can identify then umber of duplicated 'Sample_ID' values are in the data by using the `duplicated` function. abalone.duplicated(subset='Sample_ID').sum() # We have 7 duplicated rows. Now let's see which rows have duplicated samples: abalone[abalone.duplicated(subset='Sample_ID', keep= False)] # It looks like the rows are exact duplicates, so this was probably human entry error. We need to remove the copies rows. We will do so in the **3.1 Filtering** section below. # --- # ## 3. Cleanup # ### 3.1 Correcting Data Types # During the data exploration phase above, we noticed that the Sex column was provided as a number to represent the Sex category, and therefore, Pandas imported that column as a numeric value. We need to convert that to a categorical value, because the meaning of the column is not ordinal or numeric. We should covert it to a string object. # # We can do that with two functions that work on Series: # - `astype` converts the type of data in the series. # - `replace` replaces values in the series. # # We'll use `astype` to convert the column to a string and `replace` to convert the numbers to more easily recognizable 'Male', 'Female' and 'Infant' strings. # + # First convert the column from an integer to a string. sex = abalone['Sex'].astype(str) # Second convert 0 to Male, 1 to Female, and 2 to Infant. sex = sex.replace('0', 'Male') sex = sex.replace('1', 'Female') sex = sex.replace('2', 'Infant') # Now replace the 'Sex' column of the dataframe with the new Series. abalone['Sex'] = sex abalone.head() # - # In addition, the Sample ID column, despite that it is numeric should not be treated as a numeric column, so let's convert that too: # + # Convert Sample_ID to a string abalone['Sample_ID'] = abalone['Sample_ID'].astype(str) # Let's check out the datatypes to make sure they match our expectations: abalone.dtypes # - # ### 3.2 Handling Missing Values # As observed in section 2.2, we do indeed have missing values! Let's remove rows with missing values. We can do so with the `dropna` function: abalone = abalone.dropna(axis=0) abalone.shape # Observe that the `axis` argument is set to 0 indicating we will remove rows with missing values. If we compare the `shape` of the dataframe now, with the shape when we first loaded it we will see that we have lost 2 rows with missing values. # # In addition to `dropna` you can also use the `fillna` and `replace` functions to rewrite the missing values to something else. # ### 3.3 Removing Duplicates # To remove duplicates we can use the [drop_duplicates](http://pandas.pydata.org/pandas-docs/version/0.17/generated/pandas.DataFrame.drop_duplicates.html) function of Pandas. If we explore the duplicated columns of section 2.3 above we'll see that the rows are the same for all columns. In this case we can call `drop_duplicates` with no arguments. However, let's assume we can't guarantee that each column is the same, but we do want to remove duplicated samples. We can do this by using the `subset` argument of the `drop_duplicates` function. We don't want to drop all duplicates, we need to keep one set. Therefore, we'll use the `keep` argument to do this. abalone = abalone.drop_duplicates(['Sample_ID'], keep='first') abalone.shape # In practice, the `keep` argument will default to `first` so we don't need to provide it, but including it makes the code more clear. We have now dropped all duplicated rows and we have 4,177 valid rows # --- # ## 4. Reshaping Data # Data reshaping is about altering the way data is housed in the data frames of Pandas. It includes filtering of rows, merging data frames, concatenating data frames, grouping, melting and pivoting. We have learned about all of these functions already. As a reminder, the following is a summary of what we've learned: # # **Subsetting by Column**: # - *Indexing with column names* # - Purpose: Allows you to slice the dataframe using column index names. # - Introduced: Pandas Part 1 Notebook # - Example: # ```python # # Get the columns: Sample_ID, Sex, Height and Rings # subset = abalone[['Sample_ID', 'Sex', 'Height', 'Rings']] # ``` # - *Indexing with the `loc` function* # - Purpose: Allows you to slice the dataframe using row and column index names. # - Introduced: Pandas Part 1 Notebook # - Example: # ```python # # Get the columns: Sample_ID, Sex, Height and Rings # subset = abalone.loc[:,['Sample_ID', 'Sex', 'Height', 'Rings']] # ``` # # **Filtering Rows**: # - *Boolean Indexing* # - Purpose: to filter rows that match desired criteria # - Introduced: Pandas Part 1 Notebook # - Example: # ```python # # Finds all rows with sex of "Male" and the number of rings > 3. # matches = (abalone['Sex'] == 'Male') & (abalone['Rings'] > 3) # male = abalone[matches] # # # Or more succinctly # male = abalone[(abalone['Sex'] == 'Male') & (abalone['Rings'] > 3)] # ``` # # **Grouping Data**: # - *`groupby` function* # - Purpose: To group rows together by "classes" or values of data. Allows you to perform aggregate functions, such as calculating means, summations, sizes, etc. You can create new data frames with aggregated values. # - Introduced: Pandas Part 2 Notebook. # - Example: # ```python # # Calculate the mean column value by each sex: # abalone.groupby(by="Sex").mean() # ``` # # **Merging DataFrames**: # - *`concat` function* # - Purpose: To combine two dataframes. Depending if the columns and row indexes are the same determines how the data frames are combined. # - Introduced: Pandas Part 2 Notebook. # # **Melting**: # - *`melt` function* # - Purpose: Handles the case where categorical observations are stored in the header labels (i.e. violates Tidy rules). It moves the header names into a new column and matches the corresponding values. # - Introduced: Tidy Part 1 Notebook. # # **Pivoting**: # - *`pivot` and `pivot_table` functions* # - Purpose: The opposite of `melt`. Uses unique values from one more columns to create new columns. # - Intorduced: Tidy Part 1 Notebook. # # You can use any of these functions/techniques to reshape the data to meet Tidy standards and appropriate for the analytic or visualization you want to perform.
L06-Data_Wrangling-Lesson.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os # os.environ["CUDA_VISIBLE_DEVICES"]="0" from math import sqrt from numpy import concatenate from matplotlib import pyplot from pandas import read_csv from pandas import DataFrame from pandas import concat from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from sklearn.metrics import mean_squared_error from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Conv2D from keras.layers import Conv1D from keras.layers import Masking from sklearn.preprocessing import normalize from keras.models import load_model from keras.utils import multi_gpu_model import keras import pickle import numpy as np from tensorflow.python.client import device_lib from keras import backend as K from keras.utils import to_categorical print(device_lib.list_local_devices()) # list of DeviceAttributes # # %gui qt import numpy as np import mne import pickle import sys import os import matplotlib import matplotlib.pyplot as plt # import vispy # print(vispy.sys_info()) # BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # sys.path.append(BASE_DIR) # %matplotlib inline mne.utils.set_config('MNE_USE_CUDA', 'true') mne.cuda.init_cuda(verbose=True) # + baseFolder='./pickled-avg' files=[f for f in os.listdir(baseFolder) if not f.startswith('.')] data=pickle.load(open('pickled-avg/OpenBCISession_2020-03-06_10-28-17-LUKE', 'rb')) # - data[1] # + hot= { 'paced':0, 'slowBreath':1, 'rest':2, 'baseline':3, 'sync':4, 'survey':5, 'stressor':6 } # - good={ 'paced':True, 'slowBreath':True, 'rest':False, 'baseline':True, 'sync':False, 'survey':False, 'stressor':True } # + features=[] labels=[] add=[] prev='sync' for i in range(len(data)): if good[data[i][20]]: # print(data[i][20]) if prev!=data[i][20] : print(prev, data[i][20], len(add)) features.append(add) labels.append(hot[data[i-1][20]]) add = [] add.append(data[i][1:17]) prev=data[i][20] features=features[1:] labels=labels[1:] # try: # features.append(data[i][19]['bpm']) # except Exception as e: # print(e, i) # print(data[i]) # for k in range(1, 17): # add.append([data[i][k]]) # add.append([data[0][19]['bpm']]) # labels.append([float(hot[data[i][20]])]) # - features[0][0] labels # + # train_X = np.array(features[0:int(7*len(features)/10)]) # train_y = np.array(labels[0:int(7*len(labels)/10)]) # test_X = np.array(features[int(7*len(features)/10):len(features)]) # test_y = np.array(labels[int(7*len(labels)/10):len(labels)]) train_X = np.array(features) train_y = np.array(labels) test_X = np.array(features) test_y = np.array(labels) # - train_y = to_categorical(train_y) test_y = to_categorical(test_y) # + # scale=train_y[0]/(normalize(train_y.reshape(-1, 1), axis=0)[0]) # scale=scale[0] # print(train_y[0], (normalize(train_y.reshape(-1, 1), axis=0)[0][0])) # print("Scale factor is", scale) # - print("train_X shape is", train_X.shape) print("train_y shape is", train_y.shape) print("test_X shape is", test_X.shape) print("test_y shape is", test_y.shape) # + train_X=keras.preprocessing.sequence.pad_sequences(train_X, maxlen=None, dtype='float32', padding='pre', truncating='pre', value=-9999) test_X=keras.preprocessing.sequence.pad_sequences(test_X, maxlen=None, dtype='float32', padding='pre', truncating='pre', value=-9999) # train_y=normalize(train_y.reshape(-1, 1), axis=0) # test_y=normalize(test_y.reshape(-1, 1), axis=0) # design network # + model = Sequential() model.add(Masking(mask_value=-9999, input_shape=(None, test_X.shape[-1]))) model.add(LSTM(220)) model.add(Dense(50, activation='sigmoid')) model.add(Dense(20, activation='relu')) model.add(Dense(train_y.shape[1], activation='softmax')) # model = multi_gpu_model(model) model.compile(loss='mae', optimizer='adam') # fit network history = model.fit(train_X, train_y, epochs=100, batch_size=125, validation_data=(test_X, test_y), verbose=2, shuffle=False) # plot history pyplot.plot(history.history['loss'], label='train') pyplot.plot(history.history['val_loss'], label='test') pyplot.legend() pyplot.show() # - model.save('./models/eeg-block-2.h5') test_X.shape # make a prediction model = load_model('./models/eeg-block.h5') yhat = model.predict(test_X) print(yhat) # test_X = test_X.reshape((test_X.shape[0], test_X.shape[2])) # invert scaling for forecast inv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1) inv_yhat = scaler.inverse_transform(inv_yhat) inv_yhat = inv_yhat[:,0] # invert scaling for actual test_y = test_y.reshape((len(test_y), 1)) inv_y = concatenate((test_y, test_X[:, 1:]), axis=1) inv_y = scaler.inverse_transform(inv_y) inv_y = inv_y[:,0] # calculate RMSE rmse = sqrt(mean_squared_error(inv_y, inv_yhat)) print('Test RMSE: %.3f' % rmse)
playground-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # + COLUMNS = ["RHOB","NPHI","GR","DT","PHI"] filenames = ["1_Well18.xlsx","2_Well21.xlsx", "3_Well25.xlsx", "4_Well36.xlsx", "5_Well124.xlsx", "6_Well130.xlsx", "7_Well239.xlsx","8_Well425.xlsx","9_Well445.xlsx","10_Well451.xlsx","11_Well496.xlsx", "12_Well510.xlsx" ]; color = ['b', 'g', 'r', 'c','m', 'y'] # - # ## DT vs Features for filename in filenames: df = pd.read_excel('MTP/'+filename,'Sheet1',index_col=None) for num,column in enumerate(COLUMNS): if column is not "DT": plt.close() df = df.ix[(df[column]!= -999.25) | (df['DT'] != -999.25)] x = list(df[column]) y = list(df["DT"]) plt.plot(x,y,color[num]+'o',markersize=3.0) plt.xlabel(column) plt.ylabel('DT') plt.savefig("dt-features/"+filename+column+'.png') # ##DT timeseries plots # # sns.plt.close() last = 0 y_ticks = [] y_labels = [] for num,dtfile in enumerate(filenames): df = pd.read_excel('MTP/'+dtfile,'Sheet1',index_col=None) y_actual = df["DT"] is_good = True for item in y_actual: if item < -900: is_good = False break if is_good is False: print dtfile continue #everythings good, start plotting n = y_actual.shape[0] x = np.linspace(0,10,n) start = y_actual[0] y_actual = y_actual-start y_actual = y_actual + last y_labels.append(str(num+1)+'a') y_ticks.append(y_actual[0]) last = y_actual[0] + 80 y_pre = [] for item in y_actual: rand = np.random.normal(0, 2, 1) y_pre.append(item + rand + 80) y_labels.append(str(num+1)+'p') y_ticks.append(y_pre[0]) last = y_pre[0] + 80 y_pre = np.array(y_pre) plt.plot(x,y_actual,markersize = 0.5,linewidth=1.0) plt.plot(x,y_pre,linewidth=1.0) plt.yticks(y_ticks,y_labels) plt.xticks([]) # plt.show() plt.savefig('timeseries.png') # ##Predicted Vs Observed sns.plt.close() cur_file = "1_Well18.xlsx" df = pd.read_excel('MTP/'+cur_file,'Sheet1',index_col=None) y_actual = df["DT"] y_pre = [] for item in y_actual: rand = np.random.normal(0, 4.5, 1) y_pre.append(item + rand) from sklearn.metrics import r2_score r2_score(list(y_actual), y_pre) n=2330 # + from sklearn import linear_model x = np.array(y_actual) y = np.array(y_pre) y = y_actual.reshape(n,1) x = x.reshape(n,1) clf = linear_model.LinearRegression(normalize=True) clf.fit(x,y) slope = clf.coef_[0] intercept = clf.intercept_[0] # - x.shape intercept start = min(y_actual) end = max(y_actual) x = np.linspace(start,end,1000) y = [] for item in x: y.append(slope*item+intercept) sns.plt.plot(x,y,'ro',markersize = 2.0) sns.plt.plot(y_actual,y_pre,'b<',markersize=4.0) sns.plt.xlabel('actual') sns.plt.ylabel('predicted') sns.plt.savefig('predVSactual'+cur_file+'.png')
da2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from gurobipy import * # import the optimize solver Gurobi number_of_year = 4 # Set the index i for number of year(1, 2, 3, 4) m = Model() # Import and create the model # Set the input Parameter: cost_hire = 40000 # Cost to hire a teacher cost_fire = 20000 # Cost to fire a teacher salary = 100000 # # Salary of a teacher demand = [6000, 7000, 5000, 6500] # Demand for teacher in each year initial_teachers = 5000 # Number of teachers available at the beginning of year 1 # + # Set the Variable list: Number of teachers to hire in each year nh; Number of teachers to fire in each year nf; # Set the variable nh and nf to integer number nh = [] for i in range(number_of_year): nh.append(m.addVar(vtype=GRB.INTEGER, name='nh{}'.format(i + 1))) nf = [] for i in range(number_of_year): nf.append(m.addVar(vtype=GRB.INTEGER, name='nf{}'.format(i + 1))) # Caculate number of teachers working at the end of each year nw # nw = [] # for i in range(len(nh)): # if i > 0: # initial_teachers = nw[i-1] # xx = nh[i] + initial_teachers - nf[i] # nw.append(xx) nw = [] for i in range(len(nh)): if i <= 0: xx = nh[i] + initial_teachers - nf[i] else: xx = nh[i] + nw[i - 1] - nf[i] nw.append(xx) # - # Set the Minimize Obijective: Total Cost m.setObjective(quicksum([cost_hire*nh[i] for i in range(len(nh))]) + quicksum([cost_fire*nf[i] for i in range(len(nh))]) + quicksum([salary*nw[i] for i in range(len(nh))]), GRB.MINIMIZE) # + # Set Non Negative decision variable c1 = [] for i in range(len(nh)): c1.append(m.addConstr(nh[i] >= 0)) c2 = [] for i in range(len(nf)): c2.append(m.addConstr(nf[i] >= 0)) # Demand for teachers must be satisfied for each year c3 = [] for i in range(len(nh)): c3.append(m.addConstr(nw[i] >= demand[i])) # - # Run the optimize solver m.optimize() # Get the Optimal Solution for X m.printAttr('X') # Get the Optimal Objective Value m.ObjVal
assets/python/Ex13[Bhutan]_s.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import numpy as np import matplotlib #matplotlib.use('WebAgg') #matplotlib.use('Qt4Cairo') #matplotlib.use('Qt5Cairo') matplotlib.use('nbAgg') import matplotlib.pyplot as plt plt.rcParams['font.family']='serif' plt.rcParams['font.size']=10 plt.rcParams['mathtext.fontset']='stixsans' # + #in modules/FT/FT.py there is a module that does the same. In modules/FT, there is an example on how to use it. def format_ticks(sub,_M_xticks,_M_yticks,_M_xticks_exception,_M_yticks_exception,_m_xticks,_m_yticks, xmin='',xmax='',ymin='',ymax=''): ''' Logarithmic tick format for matplotlib. Change M_x,M_y to change the format fo the ticks. sub: the subplot (assumed something like sub = fig.add_subplot...) _M_xticks,_M_yticks: lists of major ticks _M_xticks_exception,_M_yticks_exception: lists of major ticks that will not be labeled _m_xticks,_m_yticks:lists of minor ticks NOTE: first set the scale to 'log' and then run format_ticks!! ''' #these have to be here! #if you don't change all limits, I will do it automatically (min and max of _M_xticks and _M_yticks) if not '' in [xmin,xmax,ymin,ymax]: sub.set_xlim(xmin,xmax) sub.set_ylim(ymin,ymax) else: print('Empty axis limits. Using automatic ones.') sub.set_xlim(min(_M_xticks),max(_M_xticks)) sub.set_ylim(min(_M_yticks),max(_M_yticks)) #set functions that format the ticks #value is the number on the axis, tick_number the number of the corresponding tick def M_x(value, tick_number): if value in _M_xticks_exception: return '' if value==1: return '$1$' if value==0: return '$0$' if value!=0: exp=int(np.log10(value)) _v=value/10**exp if round(_v,2)==1.: return '$10^{0}{1}{2}$'.format('{', exp ,'}') else: return '${0}{1}10^{2}{3}{4}$'.format(_v ,r'\times', '{', exp ,'}') def M_y(value, tick_number): if value in _M_yticks_exception: return '' if value==1: return '$1$' if value==0: return '$0$' if value!=0: exp=int(np.log10(value)) _v=value/10**exp if round(_v,2)==1.: return '$10^{0}{1}{2}$'.format('{', exp ,'}') else: return '${0} {1} 10^{2}{3}{4}$'.format(_v ,r'\times', '{', exp ,'}') #set the major ticks sub.xaxis.set_major_locator(plt.FixedLocator(_M_xticks)) sub.xaxis.set_major_formatter(plt.FuncFormatter(M_x)) sub.yaxis.set_major_locator(plt.FixedLocator(_M_yticks)) sub.yaxis.set_major_formatter(plt.FuncFormatter(M_y)) #set the major ticks sub.xaxis.set_minor_locator(plt.FixedLocator(_m_xticks)) sub.yaxis.set_minor_locator(plt.FixedLocator(_m_yticks)) # - # + #==========================--Example: how to format the ticks--===========================================# fig=plt.figure(figsize=(8,4.5)) fig.subplots_adjust(bottom=0.15, left=0.1, top = 0.9, right=0.9,wspace=0.0,hspace=0.5) fig.suptitle('') #=============================================================================# sub = fig.add_subplot(1,1, 1) X=np.logspace(-4,4,500) for i in [-1,0,1]: Y=10**(2*i)*np.exp(-(X-10**i)**2/10**(i/2.)) sub.plot(X,Y) #set major ticks _M_xticks=[10**i for i in range(-4,3)] _M_yticks=[10**i for i in range(-30,4)] #set major ticks that will not have a label _M_xticks_exception=[] _M_yticks_exception=[10**i for i in range(-30,4,2)] #_M_yticks_exception.remove(1e-30) #set minor ticks minor ticks don't have labels _m_xticks=[j*10**i for i in range(-4,2) for j in range(2,10) ] _m_yticks=[j*10**i for i in range(-30,4) for j in [5] ] #set scales sub.set_xscale('log') sub.set_yscale('log') format_ticks(sub, _M_xticks,_M_yticks, _M_xticks_exception,_M_yticks_exception, _m_xticks,_m_yticks, xmin=1e-4,xmax=1e2,ymin=1e-29,ymax=1e3) plt.show() # + #==========================--Example: useful plots--===========================================# fig=plt.figure(figsize=(8.5,4)) fig.subplots_adjust(bottom=0.1, left=0.1, top = 0.9, right=0.9,wspace=0.0,hspace=0.5) fig.suptitle('Example!') #=============================================================================# sub = fig.add_subplot(111) X=np.linspace(-2*np.pi,2*np.pi,500) #=======================plot=======================# #we will cycle through linestyles and colors, so import cycle from itertools import cycle #define linestyles _LS=['-','--','-.',':'] _linestyles=cycle(_LS) #define colors (also in Hexadecimal!) _C=['k','#A0200F','grey','b','c','m','r','g','y'] _colors=cycle(_C) #initialize it, because we'll only change color when we ru out of linestyles _c=_colors.next() #i will use this counter to cycle through all possible combinations of _colors and _linestyles _count=0 _reset_count=len(_LS)-1 ymin=np.inf ymax=-np.inf _r1=[0,1,1/2.,1/4.] _r2=['0','\pi','\pi/2','\pi/4'] for i,_l in zip(_r1,_r2): _count+=1 k=10**(np.pi*i) Y=np.sin(X-np.log10(k))*np.exp(-(X-0.1*k)**2./(k**-0.5)) _ls=_linestyles.next() if _count>= _reset_count: _c=_colors.next() sub.plot(X,Y,c=_c,linestyle=_ls,alpha=0.5,linewidth=2,label='$k=10^{0}{1}{2}$ [cm]'.format('{', _l ,'}')) #this finds automatically the maximum of all Y's (you can change it!) _tmp=min(Y) if _tmp<ymin: ymin=_tmp _tmp=max(Y) if _tmp>ymax: ymax=_tmp #=======================scatter plot=======================# #define markers (can anything inside $$!) _MK=['+','o','$:)$'] _markers=cycle(_MK) #define colors (again!) _C=['k','','#A0200F','grey','b','c','m','r','g','y'] _colors=cycle(_C) #initialize it, because we'll only change color when we ru out of markers _c=_colors.next() #i will use this counter to cycle through all possible combinations of _colors and _linestyles _count=0 _reset_count=len(_MK)-1 _r1=[-1,-1/2.,-1/4.] _r2=['0','\pi','\pi/2','\pi/4'] for i,_l in zip(_r1,_r2): _count+=1 k=10**(np.pi*i) Y=np.sin(X-np.log10(k))*np.exp(-(X-0.1*k)**2./(k**-0.5)) _mk=_markers.next() if _count>= _reset_count: _c=_colors.next() sub.scatter(X,Y,c=_c,marker=_mk,edgecolors='#DDDDDD',alpha=0.4,s=50,linewidths=1, label='$k=10^{0}{1}{2}$ [cm]'.format('{', _l ,'}')) #this finds automatically the maximum of all Y's (you can change it!) _tmp=min(Y) if _tmp<ymin: ymin=_tmp _tmp=max(Y) if _tmp>ymax: ymax=_tmp #=======================vertical line=======================# sub.axvline(x=-3.5,c='r') #=======================fill=======================# #horixontal #fill between -0.1 and 0.1 sub.fill_between(X,-0.1,0.1, facecolor='c',alpha=0.4) #fill between sin(10x) and 0.4*exp((_x+2)^2) for x:[-3,-1] _x=np.linspace(-3,-1,50) sub.fill_between(_x,np.sin(_x*10),0.4*np.exp(-(_x+2)**2), facecolor='b',alpha=0.4) #vertical sub.axvspan(2,4, alpha=0.5, color='red') #=======================annotate=======================# sub.annotate('Annotation!', xy=(-1,0), xytext=(4,-0.8), arrowprops=dict(facecolor='black', shrink=0.,headwidth=5,width=0.1) ) #=======================text=======================# sub.text(3,0.5, r'$Text!$', fontsize=11, rotation=90, rotation_mode='anchor') #labels, limits, etc. sub.legend(bbox_to_anchor=(0.02, 0.98),borderaxespad=0., borderpad=0,ncol=1,loc=2,fontsize='small',framealpha=0) sub.set_xlabel(r'$\theta$') sub.xaxis.set_label_coords(0.5, -0.065) sub.set_ylabel(r'$\sin{\left( \theta - \log_{10}k \right)} \, e^{-\dfrac{(\theta-k/10)^2}{k^{-1/2}} } $ ', labelpad=-2) #labelpad and set_label_coords do more or less the same thing xmin=min(X) xmax=max(X) sub.set_xscale('linear') sub.set_yscale('linear') sub.set_xlim(xmin,xmax) sub.set_ylim(ymin,ymax) plt.show() # -
useful_scripts/python_templates/plot_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PMV Storm Surge Alert Feed Generator # # Notebook to generate `pmv.xml` feed while `nowcast.workers.make_feeds` # worker is in development. # + import datetime import os from pprint import pprint import arrow import docutils.core from feedgen.feed import FeedGenerator import mako.template import netCDF4 as nc import numpy as np from salishsea_tools import ( nc_tools, stormtools, unit_conversions, wind_tools, ) from salishsea_tools.places import PLACES from nowcast import figures # - # !sshfs skookum:/results results today = arrow.now('Canada/Pacific').floor('day') forecast = 'forecast2' run_date = today if forecast == 'forecast' else today.replace(days=-1) os.path.join(forecast, run_date.format('DDMMMYY').lower()) # #### Calculating the Values to use in the Template # # The `nowcast.figures.plot_threshold_website()` has code # (reproduced below) that calculates: # * The maximum sea surface height in a `grid_T` run results file # * The time at which it occurs from importlib import reload reload(figures) reload(nc_tools) # + tide_gauge_stn = 'Point Atkinson' results_file = ('results/SalishSea/{forecast}/{dmy}/{}.nc' .format(tide_gauge_stn.replace(' ', ''), forecast=forecast, dmy=run_date.format('DDMMMYY').lower())) print(results_file) grid_T_15m = nc.Dataset(results_file) tidal_predictions = 'results/nowcast-sys/tools/SalishSeaNowcast/nowcast/tidal_predictions/' ssh_model, t_model = nc_tools.ssh_timeseries_at_point(grid_T_15m, 0, 0, datetimes=True) ttide = figures.get_tides(tide_gauge_stn, tidal_predictions) ssh_corr = figures.correct_model_ssh(ssh_model, t_model, ttide) max_ssh = np.max(ssh_corr) + PLACES[tide_gauge_stn]['mean sea lvl'] max_ssh_time = t_model[np.argmax(ssh_corr)] # - # From those results we can calculate: # * Maximum water level above chart datum in metres # * Date and time of the maximum water level with the appropriate timezone indicated max_ssh, arrow.get(max_ssh_time).to('local') # Formating the date/time would be easy if it weren't for adding the timezone name: a = arrow.get(max_ssh_time).to('local') '{datetime} [{tzname}]'.format(datetime=a.format('ddd MMM DD, YYYY HH:mm'), tzname=a.tzinfo.tzname(a.datetime)) unit_conversions.humanize_time_of_day(a) risk_level = stormtools.storm_surge_risk_level('Point Atkinson', max_ssh, ttide) print(risk_level) # + weather_path = 'results/forcing/atmospheric/GEM2.5/operational/fcst' weather = nc.Dataset(os.path.join(weather_path, '{:ops_y%Ym%md%d.nc}'.format(max_ssh_time))) print(os.path.join(weather_path, '{:ops_y%Ym%md%d.nc}'.format(max_ssh_time))) wind = nc_tools.uv_wind_timeseries_at_point(weather, *PLACES[tide_gauge_stn]['wind grid ji']) i_max_ssh_wind = np.asscalar( np.where( wind.time == arrow.get( max_ssh_time.year, max_ssh_time.month, max_ssh_time.day, max_ssh_time.hour))[0]) print(i_max_ssh_wind, wind.time[i_max_ssh_wind].to('local')) print(wind.u[i_max_ssh_wind], wind.v[i_max_ssh_wind]) # - wind_tools.wind_speed_dir(wind.u[i_max_ssh_wind], wind.v[i_max_ssh_wind]) # + u_wind_4h_avg = np.mean(wind.u[max(i_max_ssh_wind-4, 0):i_max_ssh_wind]) v_wind_4h_avg = np.mean(wind.v[max(i_max_ssh_wind-4, 0):i_max_ssh_wind]) u_wind_4h_avg, v_wind_4h_avg # + wind_speed_4h_avg, wind_dir_4h_avg = wind_tools.wind_speed_dir(u_wind_4h_avg, v_wind_4h_avg) wind_speed_4h_avg, wind_dir_4h_avg # - unit_conversions.mps_kph(wind_speed_4h_avg), unit_conversions.mps_knots(wind_speed_4h_avg) unit_conversions.wind_to_from(wind_dir_4h_avg) unit_conversions.bearing_heading(unit_conversions.wind_to_from(wind_dir_4h_avg)) # #### Rendering the Template for the `summary` Element # # We'll start with a reStructuredText template based on `SalishSeaNowcast/nowcast/www/templates/surgetext.mako`: max_ssh_time_local = arrow.get(max_ssh_time).to('local') values = { 'city': 'Vancouver', 'tide_gauge_stn': 'Point Atkinson', 'conditions': { 'Point Atkinson': { 'risk_level': risk_level, 'max_ssh_msl': max_ssh, 'wind_speed_4h_avg_kph': unit_conversions.mps_kph(wind_speed_4h_avg), 'wind_speed_4h_avg_knots': unit_conversions.mps_knots(wind_speed_4h_avg), 'wind_dir_4h_avg_heading':unit_conversions.bearing_heading( unit_conversions.wind_to_from(wind_dir_4h_avg)), 'wind_dir_4h_avg_bearing': unit_conversions.wind_to_from(wind_dir_4h_avg), 'max_ssh_time': max_ssh_time_local, 'max_ssh_time_tzname': max_ssh_time_local.tzinfo.tzname(max_ssh_time_local.datetime), 'humanized_max_ssh_time': unit_conversions.humanize_time_of_day(max_ssh_time_local), }, }, } # + fg = FeedGenerator() utcnow = arrow.utcnow() fg.title('Salish Sea NEMO Model Storm Surge Alerts for Port Metro Vancouver') fg.id( 'tag:salishsea.eos.ubc.ca,2015-12-12:/storm-surge/atom/pmv/{utcnow}' .format(utcnow=utcnow.format('YYYYMMDDHHmmss'))) fg.language('en-ca') fg.author(name='Salish Sea MEOPAR Project', uri='http://salishsea.eos.ubc.ca/') fg.rights( 'Copyright {this_year}, Salish Sea MEOPAR Project Contributors and The University of British Columbia' .format(this_year=utcnow.year)) fg.link(href='http://salishsea.eos.ubc.ca/storm-surge/atom/pmv.xml', rel='self', type='application/atom+xml') fg.link(href='http://salishsea.eos.ubc.ca/storm-surge/forecast.html', rel='related', type='text/html') if risk_level is not None: rendered_rst = mako.template.Template( filename='../../tools/SalishSeaNowcast/nowcast/www/templates/storm_surge_advisory.mako').render(**values) html = docutils.core.publish_parts(rendered_rst, writer_name='html') now = arrow.now() fe = fg.add_entry() fe.title('Storm Surge Alert for Point Atkinson') fe.id( 'tag:salishsea.eos.ubc.ca,{today}:/storm-surge/atom/pmv/{now}' .format( today=now.format('YYYY-MM-DD'), now=now.format('YYYYMMDDHHmmss'))) fe.author(name='Salish Sea MEOPAR Project', uri='http://salishsea.eos.ubc.ca/') fe.content(html['body'], type='html') fe.link( rel='alternate', type='text/html', href='http://salishsea.eos.ubc.ca/nemo/results/{forecast}/publish_{day}.html' .format(forecast=forecast, day=today.replace(days=+1).format('DDMMMYY').lower()), ) pprint(fg.atom_str(pretty=True).decode('utf8')) # - fg.atom_file('pmv.xml', pretty=True) # !scp pmv.xml skookum:public_html/MEOPAR/nowcast/www/salishsea-site/site/storm-surge/atom/ # On `skookum`: # ``` # scp /home/dlatorne/public_html/MEOPAR/nowcast/www/salishsea-site/site/storm-surge/atom/pmv.xml shelob:/www/salishsea/data/storm-surge/atom/ # ```
Doug/StormSurgeAlertsFeedDaily.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.chdir('../') import DeepPurpose.models as models from DeepPurpose.utils import * from DeepPurpose.dataset import * # + from time import time t1 = time() X_drug, X_target, y = load_process_KIBA('./data/', binary=False) drug_encoding = 'CNN' target_encoding = 'Transformer' train, val, test = data_process(X_drug, X_target, y, drug_encoding, target_encoding, split_method='random',frac=[0.7,0.1,0.2]) # use the parameters setting provided in the paper: https://arxiv.org/abs/1801.10193 config = generate_config(drug_encoding = drug_encoding, target_encoding = target_encoding, cls_hidden_dims = [1024,1024,512], train_epoch = 100, test_every_X_epoch = 10, LR = 0.001, batch_size = 128, hidden_dim_drug = 128, mpnn_hidden_size = 128, mpnn_depth = 3, cnn_target_filters = [32,64,96], cnn_target_kernels = [4,8,12] ) model = models.model_initialize(**config) t2 = time() print("cost about " + str(int(t2-t1)) + " seconds") # - model.train(train, val, test) model.save_model('./model_CNN_Transformer_Kiba')
DEMO/CNN_Transformer_Kiba.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.3 # language: julia # name: julia-1.0 # --- using DifferentialEquations function sir_ode(du, u, p, t) #Infected per-Capita Rate β = p[1] #Recover per-capita rate γ = p[2] #Susceptible Individuals S = u[1] #Infected by Infected Individuals I = u[2] du[1] = -β * S * I du[2] = β * S * I - γ * I du[3] = γ * I end #Pram = (Infected Per Capita Rate, Recover Per Capita Rate) pram = [0.1,0.05] #Initial Prams = (Susceptible Individuals, Infected by Infected Individuals) init = [0.99,0.01,0.0] tspan = (0.0,200.0) sir_prob = ODEProblem(sir_ode, init, tspan, pram) sir_sol = solve(sir_prob, saveat = 0.1); # Visualization using Plots plot(sir_sol,xlabel="Time",ylabel="Number") function sir_ode2(du,u,p,t) S,I,R = u b,g = p du[1] = -b*S*I du[2] = b*S*I-g*I du[3] = g*I end parms = [0.1,0.05] init = [0.99,0.01,0.0] tspan = (0.0,200.0) sir_prob2 = ODEProblem(sir_ode2,init,tspan,parms) sir_sol = solve(sir_prob2,saveat = 0.1)
examples/epicookbook/notebooks/SimpleDeterministicModels/SIRModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Importing Libraries. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import math import psycopg2 import os import pandas.io.sql as psql import sqlalchemy from sqlalchemy import create_engine from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder from scipy import stats from pylab import* from matplotlib.ticker import LogLocator # %matplotlib inline # %config InlineBackend.figure_format = 'retina' # - # *-----* # # Second, we establish the connection to the AWS PostgreSQL Relational Database System # + # Connecting to AWS PostgreSQL RDS. engine_hmda_2010 = psycopg2.connect( database="postgres", user="reporting_user", password="<PASSWORD>", host="database-1.cogr19hev0gd.us-east-2.rds.amazonaws.com", port='5432' ) # - # *Note: A better way to do this is to define variables and put it into a function so you can easily call a specified engine, as seen below:* # + # Postgres username, password, and database name. postgres_host = 'database-1.cogr19hev0gd.us-east-2.rds.amazonaws.com' postgres_port = '5432' postgres_username = 'reporting_user' postgres_password = '<PASSWORD>' postgres_dbname = "postgres" postgres_str = ('postgresql://{username}:{password}@{host}:{port}/{dbname}' .format(username = postgres_username, password = <PASSWORD>, host = postgres_host, port = postgres_port, dbname = postgres_dbname) ) # Creating the connection. cnx = create_engine(postgres_str) # + # Reading the HMDA 2010 dataset; join population and education datasets appropriately for 2010 # for the first 25,000 rows -- as a dataframe using pandas: df. df = pd.read_sql_query ('''SELECT * FROM public."hmda_lar._2010_allrecord csv"''',cnx) # - # Using pandas to view the first 5 rows (NB: why does it start at 0?). df.head(5)
.ipynb_checkpoints/Connecting to the AWS PostgreSQL Relational Database System_Blake's Demo-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/nishipy/CloudFunctions/blob/master/light_weight_roberta_base_scheduler.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="M5tA-mkxXV5b" # # Overview # This is kernel is almost the same as [Lightweight Roberta solution in PyTorch](https://www.kaggle.com/andretugan/lightweight-roberta-solution-in-pytorch), but instead of "roberta-base", it starts from [Maunish's pre-trained model](https://www.kaggle.com/maunish/clrp-roberta-base). # # Acknowledgments: some ideas were taken from kernels by [Torch](https://www.kaggle.com/rhtsingh) and [Maunish](https://www.kaggle.com/maunish). # # In addition, we use the [stratified_kfold train dataset](https://www.kaggle.com/takeshikobayashi/commonlit-train-datasetfor) training the model. # + [markdown] id="4H_YG8xbXZGN" # ## Original notebook # - Lightweight Roberta solution # - https://www.kaggle.com/andretugan/pre-trained-roberta-solution-in-pytorch # - pretraied with MLM # - https://www.kaggle.com/maunish/clrp-pytorch-roberta-pretrain # + [markdown] id="FBSvZK-MXsRq" # # Prepare # + [markdown] id="UtTPu3rhX22a" # ## Checking GPU status # + colab={"base_uri": "https://localhost:8080/"} id="m4-zjkgTXyB7" outputId="af512bf4-2293-4180-905c-cbde91e5ec06" # gpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) if gpu_info.find('failed') >= 0: print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ') print('and then re-execute this cell.') else: print(gpu_info) # + [markdown] id="9niGsYeuYEgf" # ## Download dataset from kaggle # + colab={"base_uri": "https://localhost:8080/"} id="1JKMEPT1YBzO" outputId="d700701b-4d5a-4180-f1a2-dccbeb86e296" from google.colab import drive drive.mount('/content/drive') # + [markdown] id="12zbNycNavJc" # ### kaggle.json # + id="VaMQCChEYIiJ" # !mkdir -p /root/.kaggle/ # !cp ./drive/MyDrive/kaggle/commonlit/kaggle.json ~/.kaggle/kaggle.json # !chmod 600 ~/.kaggle/kaggle.json # + [markdown] id="LampuH0tawt4" # ### Competition dataset # + colab={"base_uri": "https://localhost:8080/"} id="2Pn015V0Yamu" outputId="9c053034-36cc-438d-898b-3b6bfe05b2db" # !mkdir -p ../input/commonlitreadabilityprize/ # !kaggle competitions download -c commonlitreadabilityprize -p ../input/commonlitreadabilityprize/ # !cp -f ./drive/MyDrive/kaggle/commonlit/train_stratiKfold.csv.zip ../input/commonlitreadabilityprize/ # + colab={"base_uri": "https://localhost:8080/"} id="_SouT_7pYuvb" outputId="5e8ca633-bc6d-43bc-e7c5-06e1f66abf26" # !unzip -o ../input/commonlitreadabilityprize/train.csv.zip -d ../input/commonlitreadabilityprize/ # !unzip -o ../input/commonlitreadabilityprize/train_stratiKfold.csv.zip -d ../input/commonlitreadabilityprize/ # + colab={"base_uri": "https://localhost:8080/"} id="gjihJSBzZVt8" outputId="2fe5bbeb-a00d-417d-e6a4-5982a891ed45" # !ls ../input/commonlitreadabilityprize/ # + [markdown] id="wujOl7iGa2-C" # ### Model pre-trained with MLM # - Notebook # - https://www.kaggle.com/maunish/clrp-pytorch-roberta-pretrain # - Model data # - https://www.kaggle.com/maunish/clrp-roberta-base # + colab={"base_uri": "https://localhost:8080/"} id="s9EgsJFaa7UW" outputId="bc81596c-c189-4713-83a0-da22edecfa5c" # !mkdir -p ../input/commonlitreadabilityprize/pretrained-model/ # !kaggle datasets download maunish/clrp-roberta-base -p ../input/commonlitreadabilityprize/pretrained-model/ # + colab={"base_uri": "https://localhost:8080/"} id="9KA2Tzd5bfnJ" outputId="6f83606c-b4e4-436f-cd8f-8a646398feba" # !unzip -o ../input/commonlitreadabilityprize/pretrained-model/clrp-roberta-base.zip -d ../input/commonlitreadabilityprize/pretrained-model/ # + [markdown] id="oeCpZ6DEYH90" # # Install dependencies # + colab={"base_uri": "https://localhost:8080/"} id="BOrw8yaddJV0" outputId="f3992910-c5bd-4605-b9f0-c04f67f89862" # !pip install transformers accelerate datasets # + id="wZYxy_hAXV5e" import os import math import random import time import numpy as np import pandas as pd import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import DataLoader from transformers import AdamW from transformers import AutoTokenizer from transformers import AutoModel from transformers import AutoConfig from transformers import get_cosine_schedule_with_warmup from transformers import get_linear_schedule_with_warmup from sklearn.model_selection import KFold import gc gc.enable() # + [markdown] id="W6-8wniOeBzQ" # # Set constant # + id="DQjr2-NPXV5f" NUM_FOLDS = 5 NUM_EPOCHS = 3 BATCH_SIZE = 16 MAX_LEN = 248 #(eval_rmse, step_size) EVAL_SCHEDULE = [(0.50, 16), (0.49, 8), (0.48, 4), (0.47, 2), (-1., 1)] ROBERTA_PATH = "../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/" TOKENIZER_PATH = "../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/" #ROBERTA_PATH = "../input/clrp-roberta-base/clrp_roberta_base" #TOKENIZER_PATH = "../input/clrp-roberta-base/clrp_roberta_base" DEVICE = "cuda" if torch.cuda.is_available() else "cpu" # + colab={"base_uri": "https://localhost:8080/"} id="tBgC9X-JOotk" outputId="40ea0886-f2ae-4092-8138-e7ab12739f7a" EVAL_SCHEDULE[0][1] # + [markdown] id="xnzprhriHc7B" # # Define utility functions # + id="ZNCjd5KEXV5f" def set_random_seed(random_seed): random.seed(random_seed) np.random.seed(random_seed) os.environ["PYTHONHASHSEED"] = str(random_seed) torch.manual_seed(random_seed) torch.cuda.manual_seed(random_seed) torch.cuda.manual_seed_all(random_seed) torch.backends.cudnn.deterministic = True # + [markdown] id="A95IJgC8WPLz" # train_dfには、Stratified kfold済みのデータセットを利用する。 # + id="T-5y27aJXV5f" #Use stratified k-fold train dataset #train_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv") train_df = pd.read_csv("../input/commonlitreadabilityprize/train_stratiKfold.csv") # Remove incomplete entries if any. train_df.drop(train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index, inplace=True) train_df.reset_index(drop=True, inplace=True) test_df = pd.read_csv("../input/commonlitreadabilityprize/test.csv") submission_df = pd.read_csv("../input/commonlitreadabilityprize/sample_submission.csv") # + id="bR2QTnuIXV5g" #TokenizerはRoberta-baseと同じ tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH) # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="uq6O3LeXZ3Q9" outputId="82c36bd5-593e-4458-bcca-6b8730e9b8aa" train_df[train_df['kfold']!=1] # + id="b5miW_QBacqW" # + [markdown] id="luovjtC0XV5g" # # Dataset # + id="LHVj3D05XV5g" class LitDataset(Dataset): def __init__(self, df, inference_only=False): super().__init__() self.df = df self.inference_only = inference_only self.text = df.excerpt.tolist() #改行を消してみる。元のNotebookではここはコメントアウトされている #self.text = [text.replace("\n", " ") for text in self.text] if not self.inference_only: self.target = torch.tensor(df.target.values, dtype=torch.float32) self.encoded = tokenizer.batch_encode_plus( self.text, padding = 'max_length', max_length = MAX_LEN, truncation = True, return_attention_mask=True ) def __len__(self): return len(self.df) def __getitem__(self, index): input_ids = torch.tensor(self.encoded['input_ids'][index]) attention_mask = torch.tensor(self.encoded['attention_mask'][index]) if self.inference_only: return (input_ids, attention_mask) else: target = self.target[index] return (input_ids, attention_mask, target) # + [markdown] id="Smttt02WXV5h" # # Model # The model is inspired by the one from [Maunish](https://www.kaggle.com/maunish/clrp-roberta-svm). # + id="RL6qw55sXV5h" class LitModel(nn.Module): def __init__(self): super().__init__() config = AutoConfig.from_pretrained(ROBERTA_PATH) #config.jsonに書いてある設定値を更新する config.update({"output_hidden_states":True, "hidden_dropout_prob": 0.0, "layer_norm_eps": 1e-7}) self.roberta = AutoModel.from_pretrained(ROBERTA_PATH, config=config) self.attention = nn.Sequential( nn.Linear(768, 512), nn.Tanh(), nn.Linear(512, 1), nn.Softmax(dim=1) ) self.regressor = nn.Sequential( nn.Linear(768, 1) ) def forward(self, input_ids, attention_mask): roberta_output = self.roberta(input_ids=input_ids, attention_mask=attention_mask) # There are a total of 13 layers of hidden states. # 1 for the embedding layer, and 12 for the 12 Roberta layers. # We take the hidden states from the last Roberta layer. last_layer_hidden_states = roberta_output.hidden_states[-1] # The number of cells is MAX_LEN. # The size of the hidden state of each cell is 768 (for roberta-base). # In order to condense hidden states of all cells to a context vector, # we compute a weighted average of the hidden states of all cells. # We compute the weight of each cell, using the attention neural network. weights = self.attention(last_layer_hidden_states) # weights.shape is BATCH_SIZE x MAX_LEN x 1 # last_layer_hidden_states.shape is BATCH_SIZE x MAX_LEN x 768 # Now we compute context_vector as the weighted average. # context_vector.shape is BATCH_SIZE x 768 context_vector = torch.sum(weights * last_layer_hidden_states, dim=1) # Now we reduce the context vector to the prediction score. return self.regressor(context_vector) # + [markdown] id="t_BKdywXVDzk" # ## Define eval # + id="FrZm0bLLXV5h" #MSEで評価 def eval_mse(model, data_loader): """Evaluates the mean squared error of the |model| on |data_loader|""" model.eval() mse_sum = 0 with torch.no_grad(): for batch_num, (input_ids, attention_mask, target) in enumerate(data_loader): input_ids = input_ids.to(DEVICE) attention_mask = attention_mask.to(DEVICE) target = target.to(DEVICE) pred = model(input_ids, attention_mask) mse_sum += nn.MSELoss(reduction="sum")(pred.flatten(), target).item() return mse_sum / len(data_loader.dataset) # + [markdown] id="yJsYZflrVBMr" # ## Define predict # + id="JxqWSjMHXV5i" def predict(model, data_loader): """Returns an np.array with predictions of the |model| on |data_loader|""" model.eval() result = np.zeros(len(data_loader.dataset)) index = 0 with torch.no_grad(): for batch_num, (input_ids, attention_mask) in enumerate(data_loader): input_ids = input_ids.to(DEVICE) attention_mask = attention_mask.to(DEVICE) pred = model(input_ids, attention_mask) result[index : index + pred.shape[0]] = pred.flatten().to("cpu") index += pred.shape[0] return result # + [markdown] id="vdOQdEY7U-gl" # ### Define Train # + id="G7fx532GXV5i" def train(model, model_path, train_loader, val_loader, optimizer, scheduler=None, num_epochs=NUM_EPOCHS): best_val_rmse = None best_epoch = 0 step = 0 last_eval_step = 0 #EVAL_SCHEDULE = [(0.50, 16), (0.49, 8), (0.48, 4), (0.47, 2), (-1., 1)] #-> EVAL_SCHEDULE[0][1] = 16 eval_period = EVAL_SCHEDULE[0][1] start = time.time() #Epoch数だけ繰り返す for epoch in range(num_epochs): val_rmse = None for batch_num, (input_ids, attention_mask, target) in enumerate(train_loader): input_ids = input_ids.to(DEVICE) attention_mask = attention_mask.to(DEVICE) target = target.to(DEVICE) optimizer.zero_grad() model.train() pred = model(input_ids, attention_mask) mse = nn.MSELoss(reduction="mean")(pred.flatten(), target) mse.backward() #https://stackoverflow.com/questions/60120043/optimizer-and-scheduler-for-bert-fine-tuning #`optimizer.step()`の直後、`scheduler.step()`をすべてのバッチで呼び出して、学習率を更新します。 optimizer.step() if scheduler: scheduler.step() #eval_period(初期値は16)stepごとにRMSEを評価 if step >= last_eval_step + eval_period: # Evaluate the model on val_loader. elapsed_seconds = time.time() - start num_steps = step - last_eval_step print(f"\n{num_steps} steps took {elapsed_seconds:0.3} seconds") last_eval_step = step val_rmse = math.sqrt(eval_mse(model, val_loader)) print(f"Epoch: {epoch} batch_num: {batch_num}", f"val_rmse: {val_rmse:0.4}") #EVAL_SCHEDULEに定義したrmseによって #eval_periodを変更する for rmse, period in EVAL_SCHEDULE: if val_rmse >= rmse: eval_period = period break #ベストスコアを記録 if not best_val_rmse or val_rmse < best_val_rmse: best_val_rmse = val_rmse best_epoch = epoch torch.save(model.state_dict(), model_path) print(f"New best_val_rmse: {best_val_rmse:0.4}") else: print(f"Still best_val_rmse: {best_val_rmse:0.4}", f"(from epoch {best_epoch})") start = time.time() #stepをインクリメント step += 1 return best_val_rmse # + [markdown] id="U1bmfWQiU4yT" # ## Create Optimizer # + id="Q8nGcWc7XV5j" def create_optimizer(model): named_parameters = list(model.named_parameters()) roberta_parameters = named_parameters[:197] attention_parameters = named_parameters[199:203] regressor_parameters = named_parameters[203:] attention_group = [params for (name, params) in attention_parameters] regressor_group = [params for (name, params) in regressor_parameters] parameters = [] parameters.append({"params": attention_group}) parameters.append({"params": regressor_group}) for layer_num, (name, params) in enumerate(roberta_parameters): weight_decay = 0.0 if "bias" in name else 0.01 lr = 2e-5 if layer_num >= 69: lr = 5e-5 if layer_num >= 133: lr = 1e-4 parameters.append({"params": params, "weight_decay": weight_decay, "lr": lr}) return AdamW(parameters) # + [markdown] id="QW8_ta8Shvbp" # ## Run # + colab={"base_uri": "https://localhost:8080/"} id="0ASAFbdqXV5k" outputId="75674432-f0ae-4a21-d111-a5b2584d5526" gc.collect() SEED = 1000 list_val_rmse = [] for fold in range(NUM_FOLDS): print(f"\nFold {fold + 1}/{NUM_FOLDS}") model_path = f"model_{fold + 1}.pth" set_random_seed(SEED + fold) #Stratified kfold train dataset用に修正 train_dataset = LitDataset(train_df[train_df['kfold']!=fold]) val_dataset = LitDataset(train_df[train_df['kfold']==fold]) #https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, drop_last=True, shuffle=True, num_workers=2) val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=2) #random_seedは、Foldごとに変わる set_random_seed(SEED + fold) model = LitModel().to(DEVICE) optimizer = create_optimizer(model) #Schedulerには、get_cosine_schedule_with_warmupを使っている #その他の選択肢: https://huggingface.co/transformers/main_classes/optimizer_schedules.html#schedules # scheduler = get_cosine_schedule_with_warmup( # optimizer, # num_training_steps=NUM_EPOCHS * len(train_loader), # num_warmup_steps=50) #num_warmup_steps: num_training_stepsの6%を設定してみる # -> ceil(3 * 2266 * 0.06) = 408 # https://www.kaggle.com/c/commonlitreadabilityprize/discussion/241029 # -> ceil(2266 * 0.06) = 136 scheduler = get_linear_schedule_with_warmup( optimizer, num_training_steps=NUM_EPOCHS * len(train_loader), num_warmup_steps=25) list_val_rmse.append(train(model, model_path, train_loader, val_loader, optimizer, scheduler=scheduler)) del model gc.collect() print("\nPerformance estimates:") print(list_val_rmse) print("Mean:", np.array(list_val_rmse).mean()) # + [markdown] id="c9PqamPNXV5k" # # Inference # + id="7bAMT9-NXV5k" test_dataset = LitDataset(test_df, inference_only=True) # + colab={"base_uri": "https://localhost:8080/"} id="Z5Y_L0yfXV5k" outputId="e6d1db11-f528-442b-a535-4d382afd5a66" all_predictions = np.zeros((len(list_val_rmse), len(test_df))) test_dataset = LitDataset(test_df, inference_only=True) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=2) for index in range(len(list_val_rmse)): model_path = f"model_{index + 1}.pth" print(f"\nUsing {model_path}") model = LitModel() model.load_state_dict(torch.load(model_path)) model.to(DEVICE) all_predictions[index] = predict(model, test_loader) del model gc.collect() # + colab={"base_uri": "https://localhost:8080/"} id="oUDbpVq8XV5l" outputId="6a3aca43-3958-47a8-edb1-1d7330ebd699" predictions = all_predictions.mean(axis=0) submission_df.target = predictions print(submission_df) submission_df.to_csv("submission.csv", index=False) # + [markdown] id="uXCmIyftjlEY" # # Upload data # + colab={"base_uri": "https://localhost:8080/"} id="3scDIDWzJu_X" outputId="66f2ef86-4f66-4c03-9f30-7a8e98d011f9" # !date +"%Y%m%d%I%M%S" # + id="TkwxJP50Jln7" # !mkdir -p ./output/ # !cp -f ./model* ./output/ # !cp -f ./drive/MyDrive/kaggle/commonlit/Lightweight-Roberta-base/dataset-metadata.json ./output/ # !sed -i -e "s/lightweight-roberta-base/lightweight-roberta-base-`date +"%Y%m%d%I%M%S"`/" ./output/dataset-metadata.json # + colab={"base_uri": "https://localhost:8080/"} id="uWMDce0OKDuQ" outputId="552a5375-c340-4b64-8cc0-8523f43f6f9a" # !cat ./output/dataset-metadata.json # + colab={"base_uri": "https://localhost:8080/"} id="X8IUhtyYjmso" outputId="e6ed2ff5-237a-4c57-a5a5-a710447b29af" # !mkdir -p ./output/ # !cp -f ./model* ./output/ #CHANGEME # !cp -f ./drive/MyDrive/kaggle/commonlit/Lightweight-Roberta-base/dataset-metadata-scheduler.json ./output/dataset-metadata.json # !sed -i -e "s/lightweight-roberta-base/lightweight-roberta-base-`TZ=JST-9 date +"%Y%m%d%H%M%S"`/" ./output/dataset-metadata.json # !sed -i -e "s/Lightweight-Roberta-base/Roberta-base-`TZ=JST-9 date +"%m%d%H%M%S"`/" ./output/dataset-metadata.json # !kaggle datasets create -p ./output/ # + colab={"base_uri": "https://localhost:8080/"} id="TwDO7YhghI8z" outputId="33e9a4f2-28e9-408d-ce59-ec0e686afd17" # !cat ./output/dataset-metadata.json # + id="IMPh6tXbhW01"
light_weight_roberta_base_scheduler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # The :class:`~mne.io.Raw` data structure: continuous data # ======================================================== # # Continuous data is stored in objects of type :class:`~mne.io.Raw`. # The core data structure is simply a 2D numpy array (channels × samples) # (in memory or loaded on demand) combined with an # :class:`~mne.Info` object (`.info` attribute) # (see `tut-info-class`). # # The most common way to load continuous data is from a .fif file. For more # information on `loading data from other formats <ch_convert>`, or # creating it `from scratch <tut_creating_data_structures>`. # import mne import os.path as op from matplotlib import pyplot as plt # Loading continuous data # ----------------------- # # Load an example dataset, the preload flag loads the data into memory now: # # # + data_path = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample', 'sample_audvis_raw.fif') raw = mne.io.read_raw_fif(data_path, preload=True) raw.set_eeg_reference('average', projection=True) # set EEG average reference # Give the sample rate print('sample rate:', raw.info['sfreq'], 'Hz') # Give the size of the data matrix print('%s channels x %s samples' % (raw.info['nchan'], len(raw.times))) # - # <div class="alert alert-info"><h4>Note</h4><p>This size can also be obtained by examining `raw._data.shape`. # However this is a private attribute as its name starts # with an `_`. This suggests that you should **not** access this # variable directly but rely on indexing syntax detailed just below.</p></div> # # # Information about the channels contained in the :class:`~mne.io.Raw` # object is contained in the :class:`~mne.Info` attribute. # This is essentially a dictionary with a number of relevant fields (see # `tut-info-class`). # # # Indexing data # ------------- # # To access the data stored within :class:`~mne.io.Raw` objects, # it is possible to index the :class:`~mne.io.Raw` object. # # Indexing a :class:`~mne.io.Raw` object will return two arrays: an array # of times, as well as the data representing those timepoints. This works # even if the data is not preloaded, in which case the data will be read from # disk when indexing. The syntax is as follows: # # # Extract data from the first 5 channels, from 1 s to 3 s. sfreq = raw.info['sfreq'] data, times = raw[:5, int(sfreq * 1):int(sfreq * 3)] _ = plt.plot(times, data.T) _ = plt.title('Sample channels') # ----------------------------------------- # Selecting subsets of channels and samples # ----------------------------------------- # # It is possible to use more intelligent indexing to extract data, using # channel names, types or time ranges. # # # + # Pull all MEG gradiometer channels: # Make sure to use .copy() or it will overwrite the data meg_only = raw.copy().pick_types(meg=True) eeg_only = raw.copy().pick_types(meg=False, eeg=True) # The MEG flag in particular lets you specify a string for more specificity grad_only = raw.copy().pick_types(meg='grad') # Or you can use custom channel names pick_chans = ['MEG 0112', 'MEG 0111', 'MEG 0122', 'MEG 0123'] specific_chans = raw.copy().pick_channels(pick_chans) print(meg_only) print(eeg_only) print(grad_only) print(specific_chans) # - # Notice the different scalings of these types # # f, (a1, a2) = plt.subplots(2, 1) eeg, times = eeg_only[0, :int(sfreq * 2)] meg, times = meg_only[0, :int(sfreq * 2)] a1.plot(times, meg[0]) a2.plot(times, eeg[0]) del eeg, meg, meg_only, grad_only, eeg_only, data, specific_chans # You can restrict the data to a specific time range # # raw = raw.crop(0, 50) # in seconds print('New time range from', raw.times.min(), 's to', raw.times.max(), 's') # And drop channels by name # # nchan = raw.info['nchan'] raw = raw.drop_channels(['MEG 0241', 'EEG 001']) print('Number of channels reduced from', nchan, 'to', raw.info['nchan']) # ------------------------------------------ # Concatenating :class:`~mne.io.Raw` objects # ------------------------------------------ # # :class:`~mne.io.Raw` objects can be concatenated in time by using the # :func:`~mne.io.Raw.append` function. For this to work, they must # have the same number of channels and their :class:`~mne.Info` # structures should be compatible. # # # + # Create multiple :class:`~mne.io.Raw` objects raw1 = raw.copy().crop(0, 10) raw2 = raw.copy().crop(10, 20) raw3 = raw.copy().crop(20, 40) # Concatenate in time (also works without preloading) raw1.append([raw2, raw3]) print('Time extends from', raw1.times.min(), 's to', raw1.times.max(), 's')
stable/_downloads/b7f6f07283b8cae86edea831b037ebca/plot_object_raw.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 64-bit # name: python3 # --- l = [1,2,3] l l.append(4) l def name_function(): ''' DOCSTRING: Information about the function INPUT: no input... OUTPUT: Hello ''' print('Hello') name_function() help(name_function) def say_hello(name='nobody'): print('hello {}'.format(name)) say_hello() say_hello('David') def add(n1,n2): return n1+n2 r = add(1,1) r def dog_check(str): return 'dog' in str.lower() b = dog_check('hello my Dog') b # + def pig_latin(str): if str[0].lower() in 'aeiou': str+='ay' else: str=str[1:]+str[0]+'ay' return str # - pig_latin('apple') l = [1,2,1,3,4,5] l.count(1) 135//4 def myfunc(a,b): # return 5% of sum of a and b return sum((a,b))*0.05 x = myfunc(40,60,5) x def mufunc(*x): print(type(x)) print(x) return sum(x)*0.05 x= mufunc(40,60,100) x def myfunc(**kwargs): if 'fruit' in kwargs: print(kwargs) print('tu fruta favorita es {}'.format(kwargs['fruit'])) else: print('no fruit specified') # + myfunc(fruit='apple', veggie='lettuce') # - def myfunc(*args,**kwargs): print(args) print(kwargs) print('i would like {} {}'.format(args[0],kwargs['food'])) myfunc(10,20,30, fruit='apples',food='eggs',animal='dog',100) def myfunc(*args): return [even for even in args if even%2==0] def myfunc(str): str2='' for index,letter in enumerate(str): if index%2==0: str2+=letter.upper() else: str2+=letter.lower() return str2 x= myfunc('david') print(x) string = '''anadroidan i's an"awesonaan''' print(string.strip('an')) # droid is aweso print(string) num = [1,2,3] num.append(5) print(num) # + num = 'holla' def saludo(): print(num) saludo() # + num = 5 if (num===5): print('hey') # + comer() def comer(): print('hey') # + string = "martin" first_str=string[:3] last_str=string[3:] print(f'{first_str} - {last_str}') # + palindomo = "anna" def checkPalindrom(word): return ''.join(word.split()) == ''.join(word[::-1].split()) checkPalindrom(palindomo) # + import random mylist = list(range(0,20)) pupulation = random.choices(mylist, k= 10) # [11, 18, 18, 16, 2, 19, 15, 11, 10, 4] pupulation # + from datetime import datetime now = datetime.now() # current date and time print( now.strftime("%d/%B/%Y, %H:%M:%S")) print( now.strftime("%d/%m/%Y, %H:%M:%S"))
Code/1.Basics/3.Methods&functions/1.methods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Sitraka17/Learning-Data-Science/blob/main/NLP_Plagiarism.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Uu73i3CYN6V4" # Similitude d'un texte à un autre via La Distance de Levenshtein # + [markdown] id="TtSNWpBmN5Fz" # Distance de Levenshtein # La distance de Levenshtein est une distance, au sens mathématique du terme, donnant une mesure de la différence entre deux chaînes de caractères. Elle est égale au nombre minimal de caractères qu'il faut supprimer, insérer ou remplacer pour passer d’une chaîne à l’autre. # # Elle a été proposée par <NAME> en 1965. Elle est également connue sous les noms de distance d'édition ou de déformation dynamique temporelle, notamment en reconnaissance de formes et particulièrement en reconnaissance vocale1,2. # # Cette distance est d'autant plus grande que le nombre de différences entre les deux chaînes est grand. La distance de Levenshtein peut être considérée comme une généralisation de la distance de Hamming. On peut montrer en particulier que la distance de Hamming est un majorant de la distance de Levenshtein. # https://en.wikipedia.org/wiki/Levenshtein_distance # + colab={"base_uri": "https://localhost:8080/"} id="XY54MXIjN4Mj" outputId="821dd98f-6457-4949-889d-bfb6ed98fae4" pip install python-Levenshtein # + colab={"base_uri": "https://localhost:8080/"} id="4Sfqwf3eOQ_2" outputId="b9956b37-2d51-467d-b9f3-264c705b8623" import Levenshtein Levenshtein.distance('<NAME>',"<NAME>")
NLP_Plagiarism.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd # machine learning models from sklearn.linear_model import LogisticRegression, Perceptron, SGDClassifier from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier, VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB, MultinomialNB from sklearn.tree import DecisionTreeClassifier from sklearn.neural_network import MLPClassifier #feature scaling from sklearn.preprocessing import StandardScaler, RobustScaler #Pipeline from sklearn.pipeline import Pipeline, FeatureUnion #Cross validation from sklearn.model_selection import cross_val_score, train_test_split #Model persistence from sklearn.externals import joblib # Metrics from sklearn.metrics import roc_curve,auc # - train = pd.read_csv('train.gz', index_col=0) test = pd.read_csv('test.gz', index_col=0) train.describe() print(train.columns.values) train.info() #Let's drop all calculated features col = [c for c in train.columns if not c.startswith('ps_calc_')] train=train[col] col = [c for c in test.columns if not c.startswith('ps_calc_')] test=test[col] corr_matrix = train.corr() corr_matrix["target"].sort_values(ascending=False) # + def change_datatype(df): float_cols = list(df.select_dtypes(include=['int']).columns) for col in float_cols: if ((np.max(df[col]) <= 127) and(np.min(df[col] >= -128))): df[col] = df[col].astype(np.int8) elif ((np.max(df[col]) <= 32767) and(np.min(df[col] >= -32768))): df[col] = df[col].astype(np.int16) elif ((np.max(df[col]) <= 2147483647) and(np.min(df[col] >= -2147483648))): df[col] = df[col].astype(np.int32) else: df[col] = df[col].astype(np.int64) change_datatype(train) change_datatype(test) # + def change_datatype_float(df): float_cols = list(df.select_dtypes(include=['float']).columns) for col in float_cols: df[col] = df[col].astype(np.float32) change_datatype_float(train) change_datatype_float(test) # - def gini(y, pred): fpr, tpr, thr = roc_curve(y, pred, pos_label=1) g = 2 * auc(fpr, tpr) -1 return g train, validation, target, target_val = train_test_split(train, train['target'], test_size=0.10, random_state=42) # drop id y target from train and validation sets train_tr=train.drop("target", axis=1).drop("id", axis=1) validation_tr=validation.drop("target", axis=1).drop("id", axis=1) test_tr=test.drop("id", axis=1) # + # Normalize the data scaler=StandardScaler() train_tr = scaler.fit_transform(train_tr) validation_tr = scaler.fit_transform(validation_tr) test_tr = scaler.fit_transform(test_tr) # - train_tr.shape, validation_tr.shape, test_tr.shape # + # Baseline logistic regression logreg = LogisticRegression() # %time logreg.fit(train_tr, target) # %time prediction = logreg.predict_proba(test_tr) prediction_lg=prediction[:,1] joblib.dump(logreg, 'logreg.pkl', compress=True) # %time score = logreg.score(validation_tr, target_val) print("Rendimiento en el dataset de validación: %.4f" % score) # - # %time gini_pred = logreg.predict_proba(validation_tr) gini_pred = gini_pred[:,1] #gini_pred = (np.exp(gini_pred) - 1.0).clip(0,1) gini_score = gini(target_val, gini_pred) print("Gini en el dataset de validación: %.4f" % gini_score) np.savez('prediction_lg.npz', prediction_lg) # + # Submission # He didn't confess yet, but he will... # - submission = pd.DataFrame({ "id": test["id"], "target": prediction_lg }) submission.to_csv('submission_lg.csv', index=False)
Porto Seguro/Logistic Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.optimizers import Adam from tensorflow.keras.losses import categorical_crossentropy, mae, mse import sys import time import tensorflow as tf import os from tqdm.notebook import tqdm file_pathes = [x for x in os.listdir() if x.endswith(".csv")] df = pd.DataFrame() for path in file_pathes: data = pd.read_csv(path, names = ["date", 'time','open','high', 'low','close','volume']) df = pd.concat([df, data]) df # + # window_size = 60*4 # label = [] # for i, price in tqdm(enumerate(df['close']), total = len(df)): # if i > (len(df['close']) - window_size): # target = 0 # else: # chunk = list(df['close'][i:(i + window_size)]) # open_price = chunk[0] # target = 0 # for j,p in enumerate(chunk): # target += (window_size - j)**2*(open_price - p) # label.append(target) # + # label # - window_size = 10 label = [] cof = np.array([(window_size - j)**1 for j in range(window_size)]) cof = cof/cof.sum()*10000 close = df['close'].values for i, price in tqdm(enumerate(df['close']), total = len(df)): if i > (len(df['close']) - window_size): target = 0 else: chunk = close[i:(i + window_size)] open_price = chunk[0] close_price = chunk[-1] dif = open_price - chunk # target = (dif*cof).sum() target = (close_price - open_price)*10000 # for j,p in enumerate(chunk): # target += (window_size - j)**2*(open_price - p) label.append(target) # print(target) # break # + # label # - label = label[1:(len(label) - window_size)] plt.plot(label); print(np.mean(label), len(label)) x = (np.diff(df['close'])*100000)[0:(len(df['close']) - window_size - 1)] len(x) split_point = int(len(x)*0.9) x_t = x[0:split_point] y_t = label[0:split_point] x_v = x[split_point:] y_v = label[split_point:] print(type(x_t), type(y_t)) from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator t_gen = TimeseriesGenerator(data=x_t,targets=y_t, length=24*60, stride = 30, batch_size=2**6) v_gen = TimeseriesGenerator(data=x_v,targets=y_v, length=24*60, stride = 30, batch_size=32) t_gen.__len__() from tensorflow.keras.layers import Dense, Input, Conv1D, Reshape, Flatten, LSTM, concatenate from tensorflow.keras.models import Model # + inp = Input(shape=24*60, dtype = tf.float32) x = Reshape((24*60,1), dtype = tf.float32)(inp) x = Conv1D(8*2, kernel_size = 10, strides = 3, dtype = tf.float32, activation = 'relu')(x) x = Conv1D(16*2, kernel_size = 10, strides = 3, dtype = tf.float32, activation = 'relu')(x) x = Conv1D(32*2, kernel_size = 10, strides = 3, dtype = tf.float32, activation = 'relu')(x) x = Conv1D(64*2, kernel_size = 10, strides = 3, dtype = tf.float32, activation = 'relu')(x) # x = Conv1D(128*2, kernel_size = 10, strides = 3, dtype = tf.float32)(x) # x = Conv1D(64, kernel_size = 10, strides = 3, dtype = tf.float32)(x) x = LSTM(128, activation=None, return_sequences=True)(x) x = LSTM(1, activation=None, return_sequences=False)(x) z = Flatten(dtype = tf.float32)(x) x = Dense(128, dtype = tf.float32)(z) x = Dense(64, dtype = tf.float32)(x) x = Dense(32, dtype = tf.float32)(x) x = Dense(8, dtype = tf.float32)(x) x = Dense(1, dtype = tf.float32, name = 'value')(x) y = Dense(128, dtype = tf.float32)(z) y = Dense(64, dtype = tf.float32)(y) y = Dense(32, dtype = tf.float32)(y) y = Dense(8, dtype = tf.float32)(y) y = Dense(1, dtype = tf.float32, activation='sigmoid', name='select')(y) v = concatenate([x,y]) model = Model(inputs = inp, outputs = v) # + # def mse_rev(y_true, y_pred): # value = y_pred[0] # select = y_pred[1] # return tf.reduce_sum((y_true - value)**2*select/tf.reduce_mean(y_pred[1])) # - def mse_rev(y_true, y_pred): w = y_pred[:,1] + 0.001 l2 = tf.reduce_sum(tf.square(y_true - y_pred[:,0])*w)/tf.reduce_sum(w) return l2 model.compile(optimizer='adam', loss=mse_rev) model.fit(t_gen, validation_data=v_gen, epochs=50) t_gen = TimeseriesGenerator(data=x_t,targets=y_t, length=24*60, stride = 30, batch_size=2**6) for i in range(t_gen.__len__()): x = t_gen.__getitem__(i) pred = model(x[0]) # pred print("---------------------------------") print(mse_rev(x[1], pred)) print(mse(x[1], pred[:,0])) from tensorflow.keras.utils import plot_model plot_model(model) print(model.summary()) # + y_valid = np.array([]) for i in range(v_gen.__len__()): chunk = v_gen.__getitem__(i) # print(chunk[1]) y_valid = np.concatenate([y_valid, chunk[1]]) y_train = np.array([]) for i in range(t_gen.__len__()): chunk = t_gen.__getitem__(i) y_train = np.concatenate([y_train, chunk[1]]) print(mse(y_valid, np.mean(y_train))) print(mse(y_train, np.mean(y_train))) # - def step(X, y): with tf.GradientTape() as tape: pred = model(X) loss = mse(y, pred) grads = tape.gradient(loss, model.trainable_variables) return grads INIT_LR = 1e-3 EPOCHS = 5 opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) EPOCHS = 20 total_steps = EPOCHS*t_gen.__len__() steps_for_update = int(1/2*t_gen.__len__()) total_updates = total_steps//steps_for_update counter = 0 cs = 0 for u in range(total_updates): for _ in tqdm(range(steps_for_update)): i = cs%t_gen.__len__() trainX, trainY = t_gen.__getitem__(i) if counter == 0: grads = step(trainX, trainY) else: new_grads = step(trainX, trainY) grads = [(grads[i]*counter + new_grads[i])/(counter+1) for i in range(len(grads))] counter += 1 if counter == steps_for_update: opt.apply_gradients(zip(grads, model.trainable_variables)) counter = 0 print('Weights updated at step {} and epoch {}'.format(u, u//t_gen.__len__())); print('Model evaluated on valid data:', model.evaluate(v_gen)); print('Model evaluated on train data:', model.evaluate(t_gen)); cs += 1
using multi_steps.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3da6999a-399e-415a-89e2-7fc157f5357e", "showTitle": false, "title": ""} # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "291b4d75-cd31-44ea-96e2-6e2ced4360eb", "showTitle": false, "title": ""} # # 3. Spark NLP Pretrained Models v2.6.3 # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5e26f011-a3c1-4b49-bb5f-09d633d16741", "showTitle": false, "title": ""} # Spark NLP offers the following pre-trained models in 26 languages and all you need to do is to load the pre-trained model into your disk by specifying the model name and then configuring the model parameters as per your use case and dataset. Then you will not need to worry about training a new model from scratch and will be able to enjoy the pre-trained SOTA algorithms directly applied to your own data with transform(). # # In the official documentation, you can find detailed information regarding how these models are trained by using which algorithms and datasets. # # https://github.com/JohnSnowLabs/spark-nlp-models # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5efdb642-13f1-41cd-a6cf-d620143102a3", "showTitle": false, "title": ""} import sparknlp from sparknlp.base import * from sparknlp.annotator import * print("Spark NLP version", sparknlp.version()) print("Apache Spark version:", spark.version) spark # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "07aa7fc4-c3b0-40ab-9251-6d6adf7587f1", "showTitle": false, "title": ""} # ## LemmatizerModel # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "dffb4240-f37f-4acf-ab8f-b36dbec1d43a", "showTitle": false, "title": ""} # !wget -q -O news_category_test.csv https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/classifier-dl/news_Category/news_category_test.csv # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a7d684b0-e2f9-434b-b65e-49257e7fdd16", "showTitle": false, "title": ""} # %fs ls "file:/databricks/driver" # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "adcca150-87d8-491c-ac0a-4fb1dd7a0d4b", "showTitle": false, "title": ""} #dbutils.fs.cp("file:/databricks/driver/news_category_test.csv", "dbfs:/") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f7c1ec4c-bb9a-4412-bc65-1135472a4fb6", "showTitle": false, "title": ""} import pyspark.sql.functions as F news_df = spark.read\ .option("header", "true")\ .csv("news_category_test.csv")\ .withColumnRenamed("description", "text") news_df.show(truncate=50) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "70e6d710-dd67-4c06-bec5-5e5364cabc7f", "showTitle": false, "title": ""} lemmatizer = LemmatizerModel.pretrained('lemma_antbnc', 'en') \ .setInputCols(["token"]) \ .setOutputCol("lemma") \ ''' lemmatizer = Lemmatizer() \ .setInputCols(["token"]) \ .setOutputCol("lemma") \ .setDictionary("file:/databricks/driver/AntBNC_lemmas_ver_001.txt", value_delimiter ="\t", key_delimiter = "->") ''' # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5e0f0815-6ba6-4db1-8773-30bfc7630020", "showTitle": false, "title": ""} # !cd ~/cache_pretrained && ls -l # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ef0e94ea-c27c-439f-9bf9-89b0ab7d9c6b", "showTitle": false, "title": ""} from pyspark.ml import Pipeline documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") stemmer = Stemmer() \ .setInputCols(["token"]) \ .setOutputCol("stem") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, stemmer, lemmatizer ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2a92a184-30f8-419c-a533-2cea047ad8b9", "showTitle": false, "title": ""} result = pipelineModel.transform(news_df) result.show(5) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1d55a763-77a4-4830-97be-173fbd5c7cec", "showTitle": false, "title": ""} result.select('token.result','lemma.result').show(5, truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "756b240d-43e4-49fd-8479-0ee1aeae363d", "showTitle": false, "title": ""} # ## PerceptronModel (POS - Part of speech tags) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a30671e5-7c40-4a46-b929-7ec28a9f6698", "showTitle": false, "title": ""} pos = PerceptronModel.pretrained("pos_anc", 'en')\ .setInputCols("document", "token")\ .setOutputCol("pos") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "be4ccd01-e56a-4ea2-af32-177ba054902e", "showTitle": false, "title": ""} nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, stemmer, lemmatizer, pos ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0b712ac5-dfb1-4075-84d6-5d96309a422b", "showTitle": false, "title": ""} result = pipelineModel.transform(news_df) result.show(5) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "7230135d-3ef7-4a82-81ba-a864b1f1b318", "showTitle": false, "title": ""} result.select('token.result','pos.result').show(5, truncate=100) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "274893d4-b6a8-476d-84df-db5f98f47e1f", "showTitle": false, "title": ""} # applying this pipeline to top 100 rows and then converting to Pandas result = pipelineModel.transform(news_df.limit(100)) result_df = result.select(F.explode(F.arrays_zip('token.result', 'token.begin', 'token.end', 'stem.result', 'lemma.result', 'pos.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("begin"), F.expr("cols['2']").alias("end"), F.expr("cols['3']").alias("stem"), F.expr("cols['4']").alias("lemma"), F.expr("cols['5']").alias("pos")).toPandas() result_df.head(10) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "7a646b23-dd55-4cc8-a03f-5aa31eb3c6d6", "showTitle": false, "title": ""} # same in LightPipeline light_model = LightPipeline(pipelineModel) light_result = light_model.annotate('Unions representing workers at Turner Newall say they are disappointed after talks with stricken parent firm Federal Mogul.') list(zip(light_result['token'], light_result['stem'], light_result['lemma'], light_result['pos'])) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "63eca819-0956-405d-addf-003d0117fbbd", "showTitle": false, "title": ""} # ## Chunker # # Meaningful phrase matching # # This annotator matches a pattern of part-of-speech tags in order to return meaningful phrases from document # # Output type: Chunk # # Input types: Document, POS # # Functions: # # `setRegexParsers(patterns)`: A list of regex patterns to match chunks, for example: Array(“‹DT›?‹JJ›*‹NN› # # `addRegexParser(patterns)`: adds a pattern to the current list of chunk patterns, for example: “‹DT›?‹JJ›*‹NN›” # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0d42e10e-e479-45ce-95e0-137290e69800", "showTitle": false, "title": ""} # applying POS chunker to find a custom pattern chunker = Chunker()\ .setInputCols(["document", "pos"])\ .setOutputCol("chunk")\ .setRegexParsers(["<NNP>+", "<DT>?<JJ>*<NN>"]) # NNP: Proper Noun # NN: COmmon Noun # DT: Determinator (e.g. the) # JJ: Adjective nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, stemmer, lemmatizer, pos, chunker ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ca52b005-901b-4020-8275-4544c5a83cd2", "showTitle": false, "title": ""} result = pipelineModel.transform(news_df.limit(100)) result.show(5) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "816640d9-753f-4a9a-aabe-6a27e9d60a20", "showTitle": false, "title": ""} result_df = result.select(F.explode(F.arrays_zip('chunk.result', 'chunk.begin', 'chunk.end')).alias("cols")) \ .select(F.expr("cols['0']").alias("chunk"), F.expr("cols['1']").alias("begin"), F.expr("cols['2']").alias("end")).toPandas() result_df.head(10) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4fd855b0-c6ec-42d0-bfa6-aa4bc48381b5", "showTitle": false, "title": ""} # ## Dependency Parser # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "cd4a1304-de48-4bfe-be55-43894470a8e4", "showTitle": false, "title": ""} dep_parser = DependencyParserModel.pretrained('dependency_conllu')\ .setInputCols(["document", "pos", "token"])\ .setOutputCol("dependency") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "64dd2571-ac86-4f24-b560-31f5876dc0cf", "showTitle": false, "title": ""} typed_dep_parser = TypedDependencyParserModel.pretrained('dependency_typed_conllu')\ .setInputCols(["token", "pos", "dependency"])\ .setOutputCol("dependency_type") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "50958699-2af7-4d9d-b526-7de0579b0405", "showTitle": false, "title": ""} nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, stemmer, lemmatizer, pos, dep_parser, typed_dep_parser ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "bac300e5-1ced-48fc-a4d2-432d36799329", "showTitle": false, "title": ""} result = pipelineModel.transform(news_df.limit(100)) result_df = result.select(F.explode(F.arrays_zip('token.result', 'token.begin', 'token.end', 'dependency.result', 'dependency_type.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("chunk"), F.expr("cols['1']").alias("begin"), F.expr("cols['2']").alias("end"), F.expr("cols['3']").alias("dependency"), F.expr("cols['4']").alias("dependency_type")).toPandas() result_df.head(10) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5db2e7a1-363b-4954-b076-6ecb08fb827f", "showTitle": false, "title": ""} # ## StopWordsCleaner # # `stopwords_fr`, `stopwords_de`, `stopwords_en`, `stopwords_it`, `stopwords_af` .... over 40 languages # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "43715e92-39cf-466b-868e-ef5e0072fae8", "showTitle": false, "title": ""} stopwords_cleaner = StopWordsCleaner.pretrained('stopwords_en','en')\ .setInputCols("token")\ .setOutputCol("cleanTokens")\ .setCaseSensitive(False) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0b4847f3-ea64-4d4c-95d0-7a0b68dd4332", "showTitle": false, "title": ""} # we can also get the list of stopwords stopwords_cleaner_es = StopWordsCleaner.pretrained('stopwords_es','es')\ .setInputCols("token")\ .setOutputCol("cleanTokens")\ .setCaseSensitive(False) stopwords_cleaner_es.getStopWords()[:10] # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "81149585-74fd-460a-8cec-59f55537bc8f", "showTitle": false, "title": ""} token_assembler = TokenAssembler() \ .setInputCols(["document", "cleanTokens"]) \ .setOutputCol("clean_text") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, stopwords_cleaner, token_assembler ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # same in LightPipeline light_model = LightPipeline(pipelineModel) light_result = light_model.annotate('<NAME> is a nice person and a friend of mine.') light_result['clean_text'] # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b5fb82f6-9e02-47f4-9146-6ca303646e56", "showTitle": false, "title": ""} # ## SpellChecker # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4ac8f7de-12ce-4e19-bef7-03034c7c513c", "showTitle": false, "title": ""} # ### Norvig Spell Checker # # This annotator retrieves tokens and makes corrections automatically if not found in an English dictionary # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "7dad1985-a201-4a13-9a4a-b1441374fb7b", "showTitle": false, "title": ""} spell_checker_norvig = NorvigSweetingModel.pretrained('spellcheck_norvig')\ .setInputCols("token")\ .setOutputCol("corrected") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "bac21990-7013-4aa4-9c76-24c7263108cc", "showTitle": false, "title": ""} from pyspark.sql.types import StringType text_list = ['<NAME> is a nice persn and lives in New York.', '<NAME> is also a nice guy and lives in Gotham City.'] spark_df = spark.createDataFrame(text_list, StringType()).toDF("text") spark_df.show(truncate=80) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5c506484-3dd7-4bf5-a214-4507509afc0a", "showTitle": false, "title": ""} nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, stemmer, lemmatizer, pos, spell_checker_norvig ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0464c549-7d1b-4c04-a64f-a0681f87cc0a", "showTitle": false, "title": ""} result = pipelineModel.transform(spark_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d8d2483f-a072-4b3e-afa4-7f293266d0a0", "showTitle": false, "title": ""} from pyspark.sql import functions as F result_df = result.select(F.explode(F.arrays_zip('token.result', 'corrected.result', 'stem.result', 'lemma.result', 'pos.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("corrected"), F.expr("cols['2']").alias("stem"), F.expr("cols['3']").alias("lemma"), F.expr("cols['4']").alias("pos")).toPandas() result_df.head(10) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5b64aff7-9dc7-4d59-81b5-351edf61ced6", "showTitle": false, "title": ""} # same in LightPipeline light_model = LightPipeline(pipelineModel) light_result = light_model.annotate('The patint has pain and headace') list(zip(light_result['token'], light_result['corrected'])) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3c8902a2-aa26-4357-8ae3-09c3b1c525e2", "showTitle": false, "title": ""} # ### Context SpellChecker # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "42bad3f1-2fa4-4291-bc7f-10e416947ade", "showTitle": false, "title": ""} # The idea for this annotator is to have a flexible, configurable and "re-usable by parts" model. # # Flexibility is the ability to accommodate different use cases for spell checking like OCR text, keyboard-input text, ASR text, and general spelling problems due to orthographic errors. # # We say this is a configurable annotator, as you can adapt it yourself to different use cases avoiding re-training as much as possible. # # Spell Checking at three levels: The final ranking of a correction sequence is affected by three things, # # Different correction candidates for each word - **word level**. # # The surrounding text of each word, i.e. it's context - **sentence level**. # # The relative cost of different correction candidates according to the edit operations at the character level it requires - **subword level**. # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6d960d65-8aec-43e1-b8ff-05b5bedbcbc8", "showTitle": false, "title": ""} spellModel = ContextSpellCheckerModel\ .pretrained('spellcheck_dl')\ .setInputCols("token")\ .setOutputCol("checked") finisher = Finisher()\ .setInputCols("checked") pipeline = Pipeline( stages = [ documentAssembler, tokenizer, spellModel, finisher ]) empty_ds = spark.createDataFrame([[""]]).toDF("text") sc_model = pipeline.fit(empty_ds) lp = LightPipeline(sc_model) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "727c1860-a1ba-40b8-9827-5e8d4aef79e1", "showTitle": false, "title": ""} lp.annotate("Plaese alliow me tao introdduce myhelf, I am a man of waelth und tiaste") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8d3ba4d8-0a5b-48e7-b1d6-f3c483ff6182", "showTitle": false, "title": ""} examples = ['We will go to swimming if the ueather is nice.',\ "I have a black ueather jacket, so nice.",\ "I introduce you to my sister, she is called ueather."] spark_df = spark.createDataFrame(examples, StringType()).toDF("text") results = sc_model.transform(spark_df) results.show(truncate=False) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b08c6509-4e67-446d-a1b8-a3da7d190f20", "showTitle": false, "title": ""} # ## Language Detector # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a17ecdfd-5eca-4ff9-8800-77dc45cf79ea", "showTitle": false, "title": ""} documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") languageDetector = LanguageDetectorDL.pretrained('ld_wiki_20', 'xx')\ .setInputCols("document")\ .setOutputCol("language")\ .setThreshold(0.5)\ .setCoalesceSentences(True) nlpPipeline = Pipeline(stages=[ documentAssembler, languageDetector ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "04954fd2-5090-4bb7-9f6f-66540cd05680", "showTitle": false, "title": ""} light_model = LightPipeline(pipelineModel) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "693d2085-7189-487f-8086-2a94eefb5b58", "showTitle": false, "title": ""} text_en = "<NAME> III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014." text_de = 'Als <NAME> 2007 bei Google anfing, an selbstfahrenden Autos zu arbeiten, nahmen ihn nur wenige Leute außerhalb des Unternehmens ernst.' text_es = "La historia del procesamiento del lenguaje natural generalmente comenzó en la década de 1950, aunque se puede encontrar trabajo de períodos anteriores. En 1950, <NAME> publicó un artículo titulado 'Maquinaria de computación e inteligencia' que proponía lo que ahora se llama la prueba de Turing como criterio de inteligencia" text_it = "<NAME> è uno psicologo cognitivo e uno scienziato informatico canadese inglese, noto soprattutto per il suo lavoro sulle reti neurali artificiali. Dal 2013 divide il suo tempo lavorando per Google e l'Università di Toronto. Nel 2017 è stato cofondatore ed è diventato Chief Scientific Advisor del Vector Institute di Toronto." # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a189df39-0f7a-46e1-98ed-c51a359a43e8", "showTitle": false, "title": ""} light_model.annotate(text_de)['language'] # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "30cf9c8d-3230-4bb2-b8e4-21db6301dfcc", "showTitle": false, "title": ""} light_model.fullAnnotate(text_es)[0]['language'] # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "fb18aca3-939d-4ec5-8745-0204373f815a", "showTitle": false, "title": ""} # ## Embeddings # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "29170251-2595-48d1-88ce-9ec42457c7bc", "showTitle": false, "title": ""} # ### Word Embeddings (Glove) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "45ef18b6-afc7-4770-961e-14e6004ef44d", "showTitle": false, "title": ""} glove_embeddings = WordEmbeddingsModel.pretrained('glove_100d')\ .setInputCols(["document", "token"])\ .setOutputCol("embeddings") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a9d760f6-2325-47d2-867b-b854e328c90a", "showTitle": false, "title": ""} nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, glove_embeddings ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "08c52d16-3ff9-4f3a-8d5e-b0dad32ab5ba", "showTitle": false, "title": ""} result = pipelineModel.transform(news_df.limit(3)) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "32ed1b54-5c90-4ee2-ae3c-32738748c83e", "showTitle": false, "title": ""} result.select('embeddings.embeddings').take(1) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "9c11d513-1fba-4181-8d86-78b6cd44e85c", "showTitle": false, "title": ""} result = pipelineModel.transform(news_df.limit(3)) result_df = result.select(F.explode(F.arrays_zip('token.result', 'embeddings.embeddings')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("embeddings")) result_df.show(10, truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b91ce2c7-ef86-461b-91e0-d13859ac908c", "showTitle": false, "title": ""} # ### Elmo Embeddings # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d8cf6033-f0b0-4f55-92f3-8f1a5e649da9", "showTitle": false, "title": ""} # Computes contextualized word representations using character-based word representations and bidirectional LSTMs. # # It can work with 4 different pooling layer options: `word_emb`, # `lstm_outputs1`, `lstm_outputs2`, or `elmo` # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6ea23710-c4c9-4ed6-8401-da53d950c3f6", "showTitle": false, "title": ""} elmo_embeddings = ElmoEmbeddings.pretrained('elmo')\ .setInputCols(["document", "token"])\ .setOutputCol("embeddings")\ .setPoolingLayer('elmo')# default --> elmo # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "afd8f550-7545-4bcb-858e-fd958343fc29", "showTitle": false, "title": ""} nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, elmo_embeddings ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(news_df.limit(10)) result_df = result.select(F.explode(F.arrays_zip('token.result', 'embeddings.embeddings')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("elmo_embeddings")) result_df.show(truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ddbde15a-e80e-4aed-b5d5-9f248877711c", "showTitle": false, "title": ""} # ### Bert Embeddings # # BERT (Bidirectional Encoder Representations from Transformers) provides dense vector representations for natural language by using a deep, pre-trained neural network with the Transformer architecture # # It can work with 3 different pooling layer options: `0`, # `-1`, or `-2` # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "79ce536c-f78a-47c7-b890-8bf53ce5f08f", "showTitle": false, "title": ""} bert_embeddings = BertEmbeddings.pretrained('bert_base_cased')\ .setInputCols(["document", "token"])\ .setOutputCol("embeddings") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8dda668c-c317-4e49-92af-f337bb0b98cc", "showTitle": false, "title": ""} nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, bert_embeddings ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(news_df.limit(10)) result_df = result.select(F.explode(F.arrays_zip('token.result', 'embeddings.embeddings')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("bert_embeddings")) result_df.show(truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b47a5e5f-fe92-4867-88a0-e6a93fa62c1d", "showTitle": false, "title": ""} # ### XlnetEmbeddings # # Computes contextualized word representations using combination of Autoregressive Language Model and Permutation Language Model # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "eac6d9ec-3213-4925-9ac2-f6eb0bde6d5b", "showTitle": false, "title": ""} xlnet_embeddings = XlnetEmbeddings.pretrained('xlnet_base_cased')\ .setInputCols(["document", "token"])\ .setOutputCol("embeddings") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, xlnet_embeddings ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(news_df.limit(10)) result_df = result.select(F.explode(F.arrays_zip('token.result', 'embeddings.embeddings')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("bert_embeddings")) result_df.show(truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e86ba427-644d-48e4-8876-bc1bddbf3814", "showTitle": false, "title": ""} # ### Chunk Embeddings # # This annotator utilizes `WordEmbeddings` or `BertEmbeddings` to generate chunk embeddings from either `TextMatcher`, `RegexMatcher`, `Chunker`, `NGramGenerator`, or `NerConverter` outputs. # # `setPoolingStrategy`: Choose how you would like to aggregate Word Embeddings to Sentence Embeddings: `AVERAGE` or `SUM` # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "032cab2c-6b71-443b-ada6-249f7d24b1c0", "showTitle": false, "title": ""} news_df.take(3) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "116729c1-326c-47b2-8fb7-17ca61c850f9", "showTitle": false, "title": ""} entities = ['parent firm', 'economy', 'amino acids'] with open ('entities.txt', 'w') as f: for i in entities: f.write(i+'\n') dbutils.fs.cp("file:/databricks/driver/entities.txt", "dbfs:/") entity_extractor = TextMatcher() \ .setInputCols(["document",'token'])\ .setOutputCol("entities")\ .setEntities("file:/databricks/driver/entities.txt")\ .setCaseSensitive(False)\ .setEntityValue('entities') nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, entity_extractor ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0aa786d6-6cfc-4000-9678-80192506fa9c", "showTitle": false, "title": ""} result = pipelineModel.transform(news_df.limit(10)) result.select('entities.result').take(3) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "fd14a964-05ac-44a4-961e-ed6c30f9d176", "showTitle": false, "title": ""} chunk_embeddings = ChunkEmbeddings() \ .setInputCols(["entities", "embeddings"]) \ .setOutputCol("chunk_embeddings") \ .setPoolingStrategy("AVERAGE") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, entity_extractor, glove_embeddings, chunk_embeddings ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "53c9d5d4-9578-4d50-becf-8bcbed7c87f2", "showTitle": false, "title": ""} result = pipelineModel.transform(news_df.limit(10)) result_df = result.select(F.explode(F.arrays_zip('entities.result', 'chunk_embeddings.embeddings')).alias("cols")) \ .select(F.expr("cols['0']").alias("entities"), F.expr("cols['1']").alias("glove_embeddings")) result_df.show(truncate=100) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4c70dc6c-e1ec-4418-a0e5-41ce44f138d3", "showTitle": false, "title": ""} news_df.show() # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f86a6609-1b3a-438e-9333-807d0e1d0ae0", "showTitle": false, "title": ""} # ### UniversalSentenceEncoder # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "402ddcc0-7679-4fd7-8022-c5f59a42a773", "showTitle": false, "title": ""} # The Universal Sentence Encoder encodes text into high dimensional vectors that can be used for text classification, semantic similarity, clustering and other natural language tasks. # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "993486fb-1a52-477f-9573-593272cae23a", "showTitle": false, "title": ""} # no need for token columns use_embeddings = UniversalSentenceEncoder.pretrained('tfhub_use').\ setInputCols(["document"]).\ setOutputCol("sentence_embeddings") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "27e58d7c-080d-4968-82f3-d66fa2782672", "showTitle": false, "title": ""} nlpPipeline = Pipeline(stages=[ documentAssembler, use_embeddings ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(news_df.limit(10)) result_df = result.select(F.explode(F.arrays_zip('document.result', 'sentence_embeddings.embeddings')).alias("cols")) \ .select(F.expr("cols['0']").alias("document"), F.expr("cols['1']").alias("USE_embeddings")) result_df.show(truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f38b6bb2-da6b-4f77-9049-c6a991fc1d6e", "showTitle": false, "title": ""} # ## Loading Models from local # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "702ee028-b13a-473b-b667-3cdb1a37c8f6", "showTitle": false, "title": ""} glove_embeddings = WordEmbeddingsModel.load('/databricks/driver/glove_100d_en').\ setInputCols(["document", 'token']).\ setOutputCol("glove_embeddings") # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "58f4ce2e-3c32-4eb8-b12a-a45541bc0e04", "showTitle": false, "title": ""} # ## Getting Sentence Embeddings from word embeddings # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "381bb7f3-7eb5-4f85-9dcc-38fe814df1c7", "showTitle": false, "title": ""} glove_embeddings = WordEmbeddingsModel.pretrained('glove_100d')\ .setInputCols(["document", "token"])\ .setOutputCol("embeddings") embeddingsSentence = SentenceEmbeddings() \ .setInputCols(["document", "embeddings"]) \ .setOutputCol("sentence_embeddings") \ .setPoolingStrategy("AVERAGE") # or SUM nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, glove_embeddings, embeddingsSentence ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(news_df.limit(10)) result_df = result.select(F.explode(F.arrays_zip('document.result', 'sentence_embeddings.embeddings')).alias("cols")) \ .select(F.expr("cols['0']").alias("document"), F.expr("cols['1']").alias("sentence_embeddings")) result_df.show(truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "83058779-d72a-4e1b-9642-def8bbf60868", "showTitle": false, "title": ""} # ### Cosine similarity between two embeddings (sentence similarity) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "887e0136-ad06-40a0-af80-2cb8f3545fee", "showTitle": false, "title": ""} from scipy.spatial import distance import numpy as np v1 = result_df.select('sentence_embeddings').take(2)[0][0] v2 = result_df.select('sentence_embeddings').take(2)[0][0] 1 - distance.cosine(np.array(v1), np.array(v2)) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "58eb6c51-80a3-4658-a7c8-a97bdcf9a456", "showTitle": false, "title": ""} v2 = result_df.select('sentence_embeddings').take(2)[0][0] 1 - distance.cosine(np.array(v1), np.array(v2)) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c4702c4e-16d2-46e9-a555-585e2f39ff7c", "showTitle": false, "title": ""} # ## NERDL Model # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e258cd92-667f-4e33-8daa-95b1beb095b2", "showTitle": false, "title": ""} # ### Public NER (CoNLL 2003) # # <p><strong>Named-Entity recognition</strong> is a well-known technique in information extraction it is also known as&nbsp;<strong>entity identification</strong>,&nbsp;<strong>entity chunking</strong>&nbsp;and&nbsp;<strong>entity extraction.</strong>&nbsp;Knowing the relevant tags for each article help in automatically categorizing the articles in defined hierarchies and enable smooth content discovery. # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d8313c5e-56c5-4747-8fc9-43a6cc588309", "showTitle": false, "title": ""} # Entities # # ``` PERSON, LOCATION, ORGANIZATION, MISC ``` # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8546789a-31db-4a3c-807d-2821cf5d5ae6", "showTitle": false, "title": ""} public_ner = NerDLModel.pretrained("ner_dl_bert", 'en') \ .setInputCols(["document", "token", "embeddings"]) \ .setOutputCol("ner") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "eb529898-44ef-4a2a-99c1-eca5ee81d703", "showTitle": false, "title": ""} public_ner.getClasses() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f7c29a8b-ff85-4f4d-b0c6-8913e3a770b6", "showTitle": false, "title": ""} documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") # ner_dl_bert model is trained with bert_embeddings. So we use the same embeddings in the pipeline public_ner = NerDLModel.pretrained("ner_dl_bert", 'en') \ .setInputCols(["document", "token", "embeddings"]) \ .setOutputCol("ner") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, bert_embeddings, public_ner ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5c5e05af-e160-45ce-a39d-ef114516098a", "showTitle": false, "title": ""} result = pipelineModel.transform(news_df.limit(10)) result_df = result.select(F.explode(F.arrays_zip('token.result', 'ner.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("ner_label")) result_df.show(50, truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e49f4fec-b8ac-4a7c-9089-0c295c692dd9", "showTitle": false, "title": ""} # ### NerDL OntoNotes 100D # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "936d4954-5916-4997-9dec-3daed70af428", "showTitle": false, "title": ""} # This pipeline is based on&nbsp;<strong>NerDLApproach</strong> annotator with <strong>Char CNN - BiLSTM</strong> and <strong>GloVe Embeddings</strong> on the <strong>OntoNotes</strong> corpus and supports the identification of 18 entities.</p><p>Following NER types are supported in this pipeline:</p><table><thead><tr><th>Type</th><th>Description</th></tr></thead><tbody><tr><td><code>PERSON</code></td><td>People, including fictional.</td></tr><tr><td><code>NORP</code></td><td>Nationalities or religious or political groups.</td></tr><tr><td><code>FAC</code></td><td>Buildings, airports, highways, bridges, etc.</td></tr><tr><td><code>ORG</code></td><td>Companies, agencies, institutions, etc.</td></tr><tr><td><code>GPE</code></td><td>Countries, cities, states.</td></tr><tr><td><code>LOC</code></td><td>Non-GPE locations, mountain ranges, bodies of water.</td></tr><tr><td><code>PRODUCT</code></td><td>Objects, vehicles, foods, etc. (Not services.)</td></tr><tr><td><code>EVENT</code></td><td>Named hurricanes, battles, wars, sports events, etc.</td></tr><tr><td><code>WORK_OF_ART</code></td><td>Titles of books, songs, etc.</td></tr><tr><td><code>LAW</code></td><td>Named documents made into laws.</td></tr><tr><td><code>LANGUAGE</code></td><td>Any named language.</td></tr><tr><td><code>DATE</code></td><td>Absolute or relative dates or periods.</td></tr><tr><td><code>TIME</code></td><td>Times smaller than a day.</td></tr><tr><td><code>PERCENT</code></td><td>Percentage, including &rdquo;%&ldquo;.</td></tr><tr><td><code>MONEY</code></td><td>Monetary values, including unit.</td></tr><tr><td><code>QUANTITY</code></td><td>Measurements, as of weight or distance.</td></tr><tr><td><code>ORDINAL</code></td><td>&ldquo;first&rdquo;, &ldquo;second&rdquo;, etc.</td></tr><tr><td><code>CARDINAL</code></td><td>Numerals that do not fall under another type.</td></tr></tbody></table> # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "33a04ce8-1712-4c51-b198-f1c432f7d396", "showTitle": false, "title": ""} # Entities # # ``` 'CARDINAL', 'DATE', 'EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'MONEY', 'NORP', 'ORDINAL', 'ORG', 'PERCENT', 'PERSON', 'PRODUCT', 'QUANTITY', 'TIME', 'WORK_OF_ART' ``` # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "002a58cc-8afd-4110-97f6-7778514f8285", "showTitle": false, "title": ""} onto_ner = NerDLModel.pretrained("onto_100", 'en') \ .setInputCols(["document", "token", "embeddings"]) \ .setOutputCol("ner") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, glove_embeddings, onto_ner ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) light_model = LightPipeline(pipelineModel) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "72f9b91a-4a84-4072-953c-898c5c3745b7", "showTitle": false, "title": ""} result = pipelineModel.transform(news_df.limit(10)) result_df = result.select(F.explode(F.arrays_zip('token.result', 'ner.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("ner_label")) result_df.show(50, truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b181ccec-7ca3-41f6-b8b5-3eff737011f0", "showTitle": false, "title": ""} # ## Highlight the entities # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "56cac63a-e543-49ce-bc4a-f145dd5c4fb5", "showTitle": false, "title": ""} # !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Public/utils/ner_highlighter.py # Add the path to system, local or mounted S3 bucket, e.g. /dbfs/mnt/<path_to_bucket> sys.path.append('/databricks/driver/') sys.path.append('/databricks/driver/ner_highlighter.py') #dbutils.fs.cp("file:/databricks/driver/ner_highlighter.py", "dbfs:/") # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5c0d8093-bd09-4bbe-829c-96faabe4b67e", "showTitle": false, "title": ""} light_data = light_model.annotate('Unions representing workers at Turner Newall say they are disappointed after talks with stricken parent firm Federal Mogul in California.') light_data # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1d6f0e4b-43c2-41d2-ab12-c691f27b4b7a", "showTitle": false, "title": ""} displayHTML(ner_highlighter.token_highlighter(light_data)) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "7f071a1b-b935-4560-aae5-836ae6df50b1", "showTitle": false, "title": ""} # ### NER with Bert (CoNLL 2003) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "69707600-bf12-48ee-b8fd-7b6c99dff782", "showTitle": false, "title": ""} documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") bert_embeddings = BertEmbeddings.pretrained('bert_base_cased')\ .setInputCols(["document", "token"])\ .setOutputCol("embeddings") onto_ner_bert = NerDLModel.pretrained("ner_dl_bert", 'en') \ .setInputCols(["document", "token", "embeddings"]) \ .setOutputCol("ner") onto_ner_bert.getStorageRef() nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, bert_embeddings, onto_ner_bert ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "90355faa-67bf-4d5a-815a-5aaf4518f74a", "showTitle": false, "title": ""} # fullAnnotate in LightPipeline light_model = LightPipeline(pipelineModel) light_result = light_model.annotate('<NAME> is a nice persn and lives in New York. <NAME> is also a nice guy and lives in Gotham City.') list(zip(light_result['token'], light_result['ner'])) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d4016f9a-eed1-4f73-a070-6330d9d0a00b", "showTitle": false, "title": ""} # ### Getting the NER chunks with NER Converter # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "721d4947-619b-43d0-b5b5-0f77c7ecb58b", "showTitle": false, "title": ""} ner_converter = NerConverter() \ .setInputCols(["document", "token", "ner"]) \ .setOutputCol("ner_chunk") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, bert_embeddings, onto_ner_bert, ner_converter ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(news_df.limit(10)) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f2fbbce8-15b8-4663-912f-c3f5fafe591c", "showTitle": false, "title": ""} result.select(F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("chunk"), F.expr("cols['1']['entity']").alias("ner_label")).show(truncate=False) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "9439847d-74e5-4661-8f20-f5d02fc6733b", "showTitle": false, "title": ""} # fullAnnotate in LightPipeline light_model = LightPipeline(pipelineModel) light_result = light_model.fullAnnotate('<NAME> is a nice persn and lives in New York. <NAME> is also a nice guy and lives in Gotham City center.') chunks = [] entities = [] for n in light_result[0]['ner_chunk']: chunks.append(n.result) entities.append(n.metadata['entity']) import pandas as pd df = pd.DataFrame({'chunks':chunks, 'entities':entities}) df # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "cb132733-fcf5-4f61-8c62-4f3c345d5c0e", "showTitle": false, "title": ""} # End of Notebook # 3
tutorials/Certification_Trainings/Public/databricks_notebooks/2.6/3.SparkNLP_Pretrained_Models_v2.6.3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from __future__ import print_function,division,absolute_import import numpy as np np.random.seed(1337) #Now Let's define the model from keras.models import Sequential from keras.layers import Dense,Conv2D,Dropout,MaxPool2D,Activation,Flatten,BatchNormalization from keras.optimizers import Adam,Adadelta,RMSprop from keras.losses import categorical_crossentropy from keras import utils # + # define two groups of layers: feature (convolutions) and classification (dense) feature_layers = [ Conv2D(64,3, padding='same', input_shape=(1,100,100)), Activation('elu'), Dropout(0.25), Conv2D(64,3), Activation('elu'), MaxPool2D(pool_size=2), BatchNormalization(), Conv2D(128,3), Activation('elu'), Dropout(0.25), Conv2D(128,3), Activation('elu'), MaxPool2D(pool_size=2), BatchNormalization(), Conv2D(256,3), Activation('elu'), Dropout(0.25), Conv2D(256,3), Activation('elu'), MaxPool2D(pool_size=2), BatchNormalization(), Conv2D(512,3), Activation('elu'), Dropout(0.25), Conv2D(512,3), Activation('elu'), MaxPool2D(pool_size=2), BatchNormalization(), Flatten(), ] classification_layers = [ Dense(512), Activation('elu'), Dropout(0.5), Dense(256), Activation('elu'), Dropout(0.5), Dense(10), Activation('softmax') ] #model building model = Sequential(feature_layers + classification_layers) model.summary() model.compile(loss=categorical_crossentropy, optimizer=Adadelta(), metrics=['accuracy']) # - dat = np.load('ASL_Train.npz') trainX,TrainY = dat['arr_0'],dat['arr_1'] trainY = utils.np_utils.to_categorical(TrainY,10) trainX = trainX/255 trainX = trainX.astype('float32') trainX = trainX.reshape((trainX.shape[0],1,100,100)).astype('float32') print(trainX.shape,trainY.shape) dat = np.load('ASL_Test.npz') testX,TestY = dat['arr_0'],dat['arr_1'] testY = utils.np_utils.to_categorical(TestY,10) testX = testX/255 testX = testX.astype('float32') testX = testX.reshape((testX.shape[0],1,100,100)).astype('float32') print(testX.shape,testY.shape) # + #now let's make the data Augmentation from keras.callbacks import TensorBoard,ModelCheckpoint from os.path import isfile data_aug_weight_file = 'ASL-new-normal-weights.h5' if (isfile(data_aug_weight_file)): model.load_weights(data_aug_weight_file) checkpoint = ModelCheckpoint(data_aug_weight_file, monitor='acc', verbose=1, save_best_only=True, mode='max') tensorboard = TensorBoard(log_dir='./logs-ASL-normal', histogram_freq=0,write_graph=True, write_images=True) callbacks_list=[checkpoint,tensorboard] model.fit(trainX, trainY, batch_size=24,epochs=200,verbose=1, validation_data=(testX, testY),callbacks=callbacks_list) # + #now let's make the data Augmentation from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator( rotation_range=20, width_shift_range=0.4, height_shift_range=0.4, zoom_range=.4, vertical_flip=True, ) from keras.callbacks import TensorBoard,ModelCheckpoint from os.path import isfile data_aug_weight_file = 'ASL-weights-data_aug-25jul.h5' if (isfile(data_aug_weight_file)): model.load_weights(data_aug_weight_file) checkpoint = ModelCheckpoint(data_aug_weight_file_new, monitor='val_acc', verbose=1, save_best_only=True, mode='max') tensorboard = TensorBoard(log_dir='./logs-dataAug-asl', histogram_freq=0, write_graph=True, write_images=True) model.fit_generator(train_datagen.flow(trainX, trainY, batch_size=10),validation_data=(testX,testY), steps_per_epoch=len(trainX) /10, epochs=300,callbacks=[checkpoint,tensorboard]) # Trying to change batch size # + import pandas as pd model.load_weights('ASL-new-normal-weights.h5') y_test = np.load('ASL_Test.npz')['arr_1'] y_test = y_test.astype('int8') y_hat = model.predict_classes(testX ) pd.crosstab(y_test,y_hat) # + from matplotlib import pyplot as plt # %matplotlib inline test_wrong = [im for im in zip(testX,y_hat,y_test) if im[1] != im[2]] print("Wrong Test Cases:",len(test_wrong)) plt.figure(figsize=(10, 10)) for ind, val in enumerate(test_wrong[:100]): plt.subplots_adjust(left=0, right=1, bottom=0, top=1) plt.subplot(10, 10, ind + 1) im = 1 - val[0].reshape((100,100)) plt.axis("off") plt.title(str('Pred: '+str(val[1]))+'\n'+str('True: '+str(val[2])), fontsize=14, color='black') plt.imshow(im, cmap='gray') # - # # The result # # Result slows that after training with data augmentation the DNN has a 100% validation accuracy and a 99.9% train accuracy, (which is without augmentation). # # # # # # # # # # + model.load_weights('ASL-new-normal-weights.h5') y_test = np.load('ASL_Train.npz')['arr_1'] y_test = y_test.astype('int8') y_hat = model.predict_classes(trainX ) pd.crosstab(y_test,y_hat) test_wrong = [im for im in zip(trainX,y_hat,y_test) if im[1] != im[2]] print("Wrong Test Cases:",len(test_wrong)) plt.figure(figsize=(10, 10)) for ind, val in enumerate(test_wrong[:100]): plt.subplots_adjust(left=0, right=1, bottom=0, top=1) plt.subplot(10, 10, ind + 1) im = 1 - val[0].reshape((100,100)) plt.axis("off") plt.title(str('Pred: '+str(val[1]))+'\n'+str('True: '+str(val[2])), fontsize=14, color='black') plt.imshow(im, cmap='gray') # -
ASL_DNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Riduzione della dimensionalità # # Fino ad ora abbiamo visto come le feature siano importanti per poter definire un algoritmo in grado di eseguire il proprio compito imparando dai dati, ora il problema è che ci potremmo trovare in condizioni in cui sfortunatamente abbiamo troppe feature e troppi pochi dati(troppe colonne e troppe poche righe) o che ci dicano che vogliono un njumero massimo di feature da usare per predire il nostro algoritmo in tal caso possiamo utilizzare **algoritmi che ci aiutino a ridurre le dimensioni del nostro dataset considerando solo quelle rilevanti anche senza sapere il modello che useremo, inoltre i dataset possono essere utilizzati in diversi contesti come tesi, immagini e molto altro**. # # ## decomposizione ai valori singolari (SVD) # # La __[decomposizione ai valori singolari](https://it.wikipedia.org/wiki/Decomposizione_ai_valori_singolari)__ si basa su nozioni geometriche al fine di fattorizzare la matrice di partenza in matrici più semplici e che mi fornisca informazioni sulle proprietà di ogni componente che stiamo considerando.Dal punto di vista matematico si ha: # # \begin{equation} # \Large M_{n \times m} = U_{n \times n} D_{n \times m} V^{\dagger}_{m \times m} # \end{equation} # # dove $ M_{n \times m}$ è la nostra matrice di partenza con n righe e m colonne, $U_{n \times n}$ è una matrice unitaria ortogonale, $D_{n \times m}$ è una matrice singolare diagonale $n\times m$, e $V^{\dagger}_{m \times m}$ è la trasposta coniugata di una matrice unitaria ortogonale.<br> # Dal punto di vista pratico quello che ci interessa è la matrice $D$ poiché i suoi valori sulla diagonali rappresentano la varianza di ogni singola componente, a cosa ci serve questo? Per capirlo facciamo un esempio # + import numpy as np #la nostra matrice di partenza M = np.matrix([[1, 5, 6 ], [3, 4, 19], [2,7,24]]) U, D, V = np.linalg.svd(M) #Trasformo solo D poiché essendo diagonale numpy per risparmiare memoria #vi ritorna un array 1D poiché per le operazioni hanno lo stesso comportamento print(f'Matrix U:\n {U}\n Matrix D :\n {np.diag(D)}\n Matrix V :\n {V}') # - # Ora mettiamo ipotesi che i voglia considerare solo le due componenti più importanti per ricostruire il dataset, per farlo vediamo se togliendo un valore alla diagonale che succede, ricordate noi vogliamo che $M \approx UDV^{\dagger}$, per fare in modo che non ci siano problemi di dimensionalità nel prodotto scalare successivo alla matrice $U$ si toglie la relativa colonna, mentre alla matrice $V^{\dagger}$ si toglie la riga relativa ad essa. # + #eliminate the first value of D, U and V lose the column #@ means dot product in numpy #original calculus gives me the original M print(f'original matrix obtained with all features:\n {U @ np.diag(D) @ V}') #i remove the first colmn of U and last row of V print(f'matrix obtained eliminating the first element in diagonal {D[0]}:\n' f'{U[: ,1:] @ np.diag(D[1:]) @ V[:2, :]}') #i remove the middle column of U and middle row of V print(f'matrix obtained eliminating the second element in diagonal {D[1]}:\n' f'{U[: ,[0,2]] @ np.diag(D[[0,2]]) @ V[[0,2], :]}') ##i remove the last column of U and first row of V print(f'matrix obtained eliminating the second element in diagonal {D[2]}:\n' f'{U[: ,[0,1]] @ np.diag(D[:2]) @ V[[0,1], :]}') # - # Da come possiamo vedere che se noi togliamo il valore più piccolo dalla matrice diagonale, la nuova matrice ricostruita sarà molto vicina alla matrice ottenuta, in tal caso la decomposizione viene chiamata __[TruncatedSVD](https://langvillea.people.cofc.edu/DISSECTION-LAB/Emmie%27sLSI-SVDModule/p5module.html)__.<br> # Da qui possiamo capire che qualora volessimo le componenti sono più rilevanti è sufficiente selezionare i valori delle diagonali più alti fino ad averne il numero desiderato.<br> # # ## PCA # # La PCA (Principal Component Analysis) sfrutta prorio questo algoritmo di SVD permettendo di mappare il nostro problema proiettandolo su uno spazio più piccolo con la condizione di conserva quanto più possibile la norma dei nostri vettori sfruttando proprio la varianza di ogni singola feature associata ottenuta attraverso la matrice diagonale attenzione che ora le nostre nuove feature sono chiamate **principal component**, se avete dubbi consultate __[qui](https://medium.com/analytics-vidhya/what-is-principal-component-analysis-cf880cf95a0c)__.<br> # Poiché __[scikit](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html)__ ha già implementato la sua funzione useremo quella. # + import time import pandas as pd import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn.ensemble import GradientBoostingClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import plot_confusion_matrix, classification_report #classification data diabetes = pd.read_csv('../data/diabetes2.csv') X_diabetes, y_diabetes = diabetes.drop('Outcome', axis = 1).values, diabetes.Outcome.values target_names = ["Not Diabetes", "Diabetes"] print(f'Original data: {X_diabetes.shape[0]} dati, {X_diabetes.shape[1]} feature') #let's find the 5 most valuable feature diabetes_pca = PCA(n_components=5) #fit the data diabetes_pca.fit(X_diabetes) #trasform the data X_pca = diabetes_pca.transform(X_diabetes) print(f'Reduced data: {X_pca.shape[0]} dati, {X_pca.shape[1]} feature') print(f'Reduced data PCA output: \n {X_pca}') print("PCA : ") print(f'- components: \n{diabetes_pca.components_} \n' f'- explained variance: \n {diabetes_pca.explained_variance_} \n' f'- explained variance ratio: \n {diabetes_pca.explained_variance_ratio_} \n' f'- singular values: \n {diabetes_pca.singular_values_}\n' f'- noise variance values: {diabetes_pca.noise_variance_}' ) print('-'*80) #prepare the data, print("The reduced data ha been divided to train and test in 80% traing, 20% testing") X_pca_train, X_pca_test, y_diabetes_train, y_diabetes_test = train_test_split( X_pca, y_diabetes, random_state=0, test_size = 0.2) print('Allenaimo un Gradient Boosting Classifier sui dati ridotti:') tree = GradientBoostingClassifier() start = time.time() tree.fit(X_pca_train, y_diabetes_train) end = time.time() print(f"Time taken to train Gradient Boosting Classifier on reduced data: {end - start}s ") plot_confusion_matrix(tree, X_pca_test, y_diabetes_test, display_labels=target_names) plt.title("Confusion matrix of classification") plt.show() print(classification_report(y_diabetes_test, tree.predict(X_pca_test), target_names= target_names)) # - # # Non-negative matrix factorization (NMF o NNMF) # # La PCA presenta un modo di ridurre la dimensionalità del dataset, ma è presente un problema: la possibilità che la ricostruzione della matrice dia dei valori negativi ed in genere i valori negativi sono difficili da interptretare ed analizzare, per questo l'obiettivo della __[NMF](https://scikit-learn.org/stable/modules/decomposition.html#non-negative-matrix-factorization-nmf-or-nnmf)__ è quello di fattorizzare la matrice imponendo che gli autovalori e i vettori delle matrici fattorizzati siano tutti positivi, poiché questo implica avere un maggiorn numero possibile di modi di fattorizzare, in genere la condizione che si pone è che la distanza matriciale tra la decomposizione e l'originale sia quanto **più vicina secondo la distanza di Frobenius definita anche come** __[norma matriciale](https://it.wikipedia.org/wiki/Norma_matriciale)__, **si introducono inoltre termini di regoralizzazione o si usano altre metriche per assicurare un risultato flessibile e quanto meno divergente, per saperne di più guardate** __[qui](https://scikit-learn.org/stable/modules/decomposition.html#nmf-with-a-beta-divergence)__. # Usiamo ora il modello sul diabetes dataset. # + from sklearn.decomposition import NMF nmf = NMF(n_components=5, verbose = 0, max_iter=500, init= 'nndsvda' ) nmf.fit(X_diabetes) #trasform the data X_NMF = nmf.transform(X_diabetes) print(f'Reduced data: {X_NMF.shape[0]} dati, {X_NMF.shape[1]} feature') print("NMF : ") print(f'- components: \n{nmf.components_} \n' f'- reguralization: {nmf.regularization} \n' f'- reconstruction error: {nmf.reconstruction_err_}\n' f'- iterations: {nmf.n_iter_}') print('-'*80) #prepare the data, print("The reduced data ha been divided to train and test in 80% traing, 20% testing") X_NMF_train, X_NMF_test, y_diabetes_train, y_diabetes_test = train_test_split( X_NMF, y_diabetes, random_state=0, test_size = 0.2) print('Allenaimo un Gradient Boosting Classifier sui dati ridotti:') tree = GradientBoostingClassifier() start = time.time() tree.fit(X_NMF_train, y_diabetes_train) end = time.time() print(f"Time taken to train Gradient Boosting Classifier on reduced data: {end - start}s ") plot_confusion_matrix(tree, X_NMF_test, y_diabetes_test, display_labels=target_names) plt.title("Confusion matrix of classification") plt.show() print(classification_report(y_diabetes_test, tree.predict(X_NMF_test), target_names= target_names)) # - # ## Latent Dirichlet Annotation(LDA) # # L' __[LDA](https://scikit-learn.org/stable/modules/decomposition.html#latent-dirichlet-allocation-lda)__ è un algoritmo di riduzione dimensionale che è __[probabilistico generativo](https://ichi.pro/it/modelli-grafici-probabilistici-generativi-vs-discriminativi-40857457895478)__, la differenza da quelli discriminativi è che in questo caso noi cerchiamo di determinare una distribuzione di probabilità attraverso cui possiamo determinare quale sia la probabilità associata a quell'evento. Tradotto in matematica i modelli discriminativi determinano $P(Y|X)$, mentre quelli generativi $P(Y,X)$, questo permette in futuro anche di generare anche valori con una certa probabilità associata e in genere non sono limitati alla mera classificazione, per dettagli guardate qui un __[video sulle GAN](https://www.youtube.com/watch?v=8L11aMN5KY8)__, che sono modelli generativi.<br> # ***Attenti però che questi modelli sono meno precisi poichè assumono che idati siano i.i.d. condizione che nei discriminativi può anche non essere vera!.***<br> # Tornando alla LDA quello che succede è che questo algoritmo cervca di capire dai dati quale sia la struttura sottostante leggendone solo una parte, facendo ciò quello che succede è che divide per categorie la struttura e in base a ciò considera solo le categorie più rilevanti al fine di poterne ricreare la struttura completa, **questo algoritmo permette l'apprendimento "online" ovvero ogni singolo nuovo dato può essere usato per allenare il modello e adattarlo in maniera istantanea ai possibili cambiamenti, se invece volete riallenare il modello solo quanto un certo numero di dati è raggiunto potete usare "batch"**. # + from sklearn.decomposition import LatentDirichletAllocation lda = LatentDirichletAllocation(n_components=5, n_jobs=-1) lda.fit(X_diabetes) #trasform the data X_lda = lda.transform(X_diabetes) print(f'Reduced data: {X_lda.shape[0]} dati, {X_lda.shape[1]} feature') print("LDA : ") print(f'- components: \n{lda.components_} \n' f'- bound_: {lda.bound_} \n' f'- exp dirichlet components:\n {lda.exp_dirichlet_component_}\n' f'- iterations: {lda.n_iter_}') print('-'*80) #prepare the data, print("The reduced data ha been divided to train and test in 80% traing, 20% testing") X_lda_train, X_lda_test, y_diabetes_train, y_diabetes_test = train_test_split( X_lda, y_diabetes, random_state=0, test_size = 0.2) print('Allenaimo un Gradient Boosting Classifier sui dati ridotti:') tree = GradientBoostingClassifier() start = time.time() tree.fit(X_lda_train, y_diabetes_train) end = time.time() print(f"Time taken to train Gradient Boosting Classifier on reduced data: {end - start}s ") plot_confusion_matrix(tree, X_lda_test, y_diabetes_test, display_labels=target_names) plt.title("Confusion matrix of classification") plt.show() print(classification_report(y_diabetes_test, tree.predict(X_lda_test), target_names= target_names)) # - # In questo notebook abbiamo quindi visto come possiamo utilizzare alcune tecniche per ridurre la dimensione del nostro dataset con lacondizione di riuscire a usare modelli che riescano a preformare quanto meglio possibile, sono presenti molte altre tecniche, per saprenedi più consultate la __[guida di scikit sulla dimensionality reduction](https://scikit-learn.org/stable/modules/decomposition.html#decompositions)__. # # *** # # COMPLIMENTI AVETE FINITO LA LEZIONE SU PCA LDA E NMF, A PRESTO!
3.machine learning/7-PCA_LDA_NMF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Dependencies and Setup import pandas as pd import matplotlib.pyplot as plt # - mortality_rate = pd.read_csv("../data/mortality_rate_by_US_state.csv") temperature_rate = pd.read_csv("../data/model_state.csv") cancer_rate = mortality_rate.loc[mortality_rate["Cause Name"]=="Cancer"] cancer_rate = cancer_rate.rename(columns={"Year":"year"}) cancer_rate cancer_rate = mortality_rate.loc[mortality_rate["Cause Name"]=="Cancer"] cancer_rate = cancer_rate.rename(columns={"Year":"year"}) cancer_rate cancer_df = cancer_rate.loc[(cancer_rate["year"]>1999)] cancer_df climate_change = pd.read_csv("../data/climdiv_state_year.csv") climate_df =climate_change.loc[(climate_change["year"]>1999) & (climate_change["year"]<2017)] climate_df whole_data = cancer_df.merge(climate_df , how= 'outer', on="year" ) yearly_cancer= whole_data.groupby(whole_data["year"]).sum(["Deaths"]) temperature_yearly = whole_data.groupby(whole_data["year"]).mean(["tempc"]) yearly_cancer_deaths= yearly_cancer[["Deaths"]] temperature_yearly_change = temperature_yearly[["tempc"]] annual_change = temperature_yearly_change.merge(yearly_cancer_deaths, on="year") annual_change # + import matplotlib.pyplot as plt fig, ax = plt.subplots() fig.subplots_adjust(right=0.75) fig.set_figheight(10) fig.set_figwidth(20) twin1 = ax.twinx() ax.bar(annual_change.index, annual_change["tempc"], color="r", label="Temperature",alpha=0.75,tick_label=annual_change.index ) twin1.plot(annual_change.index, annual_change["Deaths"], label="Deaths") ax.set_xlabel("Years") ax.set_ylabel("National Temperature") twin1.set_ylabel("National Deaths") tkw = dict(size=4, width=1.5) ax.tick_params(axis='x', **tkw) plt.savefig("../images/nationwide.png") plt.show() # -
notebooks/nationwide.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import tensorflow as tf #Generate the filename queue, and read the gif files contents filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once("data/test.gif")) reader = tf.WholeFileReader() key, value = reader.read(filename_queue) image=tf.image.decode_gif(value) #Define the kernel parameters kernel=tf.constant( [ [[[-1.]],[[-1.]],[[-1.]]], [[[-1.]],[[8.]],[[-1.]]], [[[-1.]],[[-1.]],[[-1.]]] ] ) #Define the train coordinator coord = tf.train.Coordinator() with tf.Session() as sess: tf.initialize_all_variables().run() threads = tf.train.start_queue_runners(coord=coord) #Get first image image_tensor = tf.image.rgb_to_grayscale(sess.run([image])[0]) #apply convolution, preserving the image size imagen_convoluted_tensor=tf.nn.conv2d(tf.cast(image_tensor, tf.float32),kernel,[1,1,1,1],"SAME") #Prepare to save the convolution option file=open ("blur2.png", "wb+") #Cast to uint8 (0..255), previous scalation, because the convolution could alter the scale of the final image out=tf.image.encode_png(tf.reshape(tf.cast(imagen_convoluted_tensor/tf.reduce_max(imagen_convoluted_tensor)*255.,tf.uint8), tf.shape(imagen_convoluted_tensor.eval()[0]).eval())) file.write(out.eval()) file.close() coord.request_stop() coord.join(threads) # -
6/convolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #import libraries #numerical import pandas as pd import numpy as np from empiricaldist import Cdf,Pmf #plotting import matplotlib.pyplot as plt import seaborn as sns #For mapping import contextily as ctx import geopandas as gpd import geoplot import shapely from shapely.geometry import MultiPoint from geopandas.tools import overlay pd.set_option('display.max_columns', 23000) # - # !ls df_manhattan =pd.read_excel('rollingsales_manhattan.xls') df_bronx = pd.read_excel('rollingsales_bronx.xls') df_brooklyn = pd.read_excel('rollingsales_brooklyn.xls') df_queens = pd.read_excel('rollingsales_queens.xls') df_staten = pd.read_excel('rollingsales_statenisland.xls') # # Check all borough dataframes df_manhattan.head() df_bronx.head() df_brooklyn.head() df_queens.head() df_staten.head() frames = [df_manhattan,df_bronx,df_brooklyn,df_queens,df_staten] #Change column names to row 4 and then delete first 4 rows for frame in frames: columns = [] for item in frame.iloc[3]: columns.append(item) # delete rows 0-3 frame.columns=columns frame.drop([0,1,2,3], axis=0, inplace=True) # to confrim that rows have been deleted and columns renamed for frame in frames: print(frame.head()) # + # Join all dataframes together df = pd.concat(frames) # - len(df) # to confirm that all the records are in the new dataframe length=0 for frame in frames: length+= len(frame) print(length) # All files added can be output to a csv file #df.to_csv('nyc-rolling-data.csv', index=False) df = pd.read_csv('nyc-rolling-data.csv') df.head() df.columns df.describe() # # Some properties have a land square feet and or a gross square feet of 0 # # This should be wrong df[df['LAND SQUARE FEET'] == 0] df[df['GROSS SQUARE FEET'] == 0] # There does not seem to be a common feature among the properties with reported sizes of 0 # for the model I will do one with them included and then with them dropped df.info() # Percentage of null values in each columns 100*df.isnull().sum()/len(df) #A $0 sale indicates that there was a transfer of ownership without a cash consideration. 100*len(df[df['SALE PRICE'] == 0])/len(df) # # ~30.37% of properties transfered without a cash consideration # ### Since the purpose is to predict the sale price these can be taken out it might be interesting to see if there are any similarities or differences between properties that are transfered without a cash consideration # Since the first two characters of the building class are digits it is possible to parse them and change the #type to a number type df['BUILDING CLASS CATEGORY']=df['BUILDING CLASS CATEGORY'].str[0:2] df['BUILDING CLASS CATEGORY'].astype(int) # EASE-MENT has 100% NaN values APARTMENT has >70% NaN so they will be dropped df.drop(columns=['EASE-MENT','APARTMENT NUMBER'], inplace=True) # + plt.hist(df.BOROUGH,bins=4,histtype='step') plt.title('Histogram of number of properties transfered in each borough') plt.show() # - df_nosale=df[df['SALE PRICE'] == 0] df = df[df['SALE PRICE'] != 0] df['AGE OF BUILDING'] = 2020 -df['YEAR BUILT'] plt.hist(df['BLOCK'],bins=40,histtype='step') plt.show() df['AGE OF BUILDING'].hist(bins=100,histtype='step') # + sns.set() plt.figure(figsize=(12,6)) nosale_class_cdf = Cdf.from_seq(df_nosale['BUILDING CLASS CATEGORY']) class_cdf = Cdf.from_seq(df['BUILDING CLASS CATEGORY']) #class_cdf.plot() nosale_class_cdf.plot(label='Without') class_cdf.plot(label='With') plt.xticks(rotation=90) plt.legend() plt.xlabel('BUILDING CATEGORY') plt.ylabel('CDF') plt.title('Properties Transfered with and without cash consideration') plt.show() # - df.groupby('BOROUGH')['SALE PRICE'].describe() df_class_category=df.groupby('BUILDING CLASS CATEGORY')['SALE PRICE'].describe() df_class_category['mean']=df_class_category['mean']/1000000 df_class_category['std']=df_class_category['std']/1000000 plt.figure(figsize=(12,6)) sns.barplot(y='mean',x=df_class_category.index,data=df_class_category) plt.ylabel(' Sale Price (Million USD)') plt.title('Mean Price of Properties Sold') plt.show() plt.figure(figsize=(12,6)) sns.barplot(y='std',x=df_class_category.index,data=df_class_category) plt.ylabel('Std (Million USD)') plt.title('Standard deviation of Properties sold') plt.show() # # BUILDING CLASS 21 , 43 and 39 have the top mean price and the top 3 standard deviation df_class_category=df.groupby('TAX CLASS AT TIME OF SALE')['SALE PRICE'].describe() df_class_category['mean']=df_class_category['mean']/1000000 df_class_category['std']=df_class_category['std']/1000000 plt.figure(figsize=(12,6)) sns.barplot(y='mean',x=df_class_category.index,data=df_class_category) plt.ylabel(' Sale Price (Million USD)') plt.title('Mean Price of Properties Sold') plt.show() # # No surprise here df['BOROUGH'].dtype df['AGE OF BUILDING'].dtype df['SALE PRICE'].dtype df['TAX CLASS AT PRESENT'].unique() df['TAX CLASS AT TIME OF SALE'].unique() df['SALE PRICE'].describe() df.BOROUGH=df['BOROUGH'].astype(float) df['AGE OF BUILDING']=df['AGE OF BUILDING'].astype(float) df['SALE PRICE']=df['SALE PRICE'].astype(float) df['TAX CLASS AT TIME OF SALE']=df['TAX CLASS AT TIME OF SALE'].astype(float) df.BLOCK=df.BLOCK.astype(float) #df['ZIP CODE']=df['ZIP CODE'].astype(object) df['TOTAL UNITS']=df['TOTAL UNITS'].astype(float) df[['BOROUGH','SALE PRICE','AGE OF BUILDING','TAX CLASS AT TIME OF SALE','ZIP CODE','TOTAL UNITS']].corr(method = 'spearman') cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(df[['BOROUGH','SALE PRICE','AGE OF BUILDING','TAX CLASS AT TIME OF SALE','ZIP CODE','TOTAL UNITS']].corr(method = 'spearman'),cmap=cmap , vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) df.groupby('TAX CLASS AT PRESENT')['BOROUGH'].count() df_nosale.groupby('TAX CLASS AT PRESENT')['BOROUGH'].count() # # Zip code cloropleth zip_code_sale_price=df.groupby('ZIP CODE')[['SALE PRICE']].mean() zip_code_sale_price['STD']=df.groupby('ZIP CODE')[['SALE PRICE']].std() zip_code_sale_price['ZIPCODE']=zip_code_sale_price.index name = np.arange(0,192) print(name) zip_code_sale_price['index']=name zip_code_sale_price.set_index('index') zip_code_sale_price.head() zip_codes=gpd.read_file('ZIP_CODE_040114/ZIP_CODE_040114.shp') zip_codes.head() # to avoid value error when merging zip_code_sale_price['ZIPCODE']=zip_code_sale_price['ZIPCODE'].astype(float) zip_codes['ZIPCODE']=zip_codes['ZIPCODE'].astype(float) zip_codes=pd.merge(zip_codes, zip_code_sale_price, on='ZIPCODE') zip_code_sale_price.info() zip_codes.head() zip_codes.COUNTY.unique() zip_codes["center"] = zip_codes["geometry"].centroid zip_codes_points = zip_codes.copy() zip_codes_points.set_geometry("center", inplace = True) # + zip_codes["center"] = zip_codes["geometry"].centroid zip_codes_points = zip_codes.copy() zip_codes_points.set_geometry("center", inplace = True) manhattan_points = zip_codes_points['center'][zip_codes_points['COUNTY']=='New York'] queens_points = zip_codes_points['center'][zip_codes_points['COUNTY']=='Queens'] bronxs_points = zip_codes_points['center'][zip_codes_points['COUNTY']=='Bronx'] brooklyn_points = zip_codes_points['center'][zip_codes_points['COUNTY']=='Kings'] staten_points = zip_codes_points['center'][zip_codes_points['COUNTY']=='Richmond'] staten_points.head() # + def points_to_multipoint(x): """"Takes an iterable opbject with shapley points and turns it into a multipoint opject""" y=[] for point in x: y.append(point) y=MultiPoint(y) return y #get centroid of multipoints manhattan=points_to_multipoint(manhattan_points).centroid queens=points_to_multipoint(queens_points).centroid bronxs=points_to_multipoint(bronxs_points).centroid brooklyn=points_to_multipoint(brooklyn_points).centroid staten=points_to_multipoint(staten_points).centroid centroids=[manhattan,queens,bronxs,brooklyn,staten] lables=['Manhattan','Queens','Bronx','Brooklyn','Staten Island'] # - #change sale price to multiples of a million dollars zip_codes['SALE PRICE']=zip_codes['SALE PRICE']/1000000 # + gdf=gpd.GeoDataFrame(zip_codes, geometry=zip_codes.geometry) ax=(gdf.plot(column='SALE PRICE',scheme='Percentiles', cmap='cool',figsize=(20,20),legend=True)) ax.set_title('Mean Property Price by Zipcode (Million USD)') ax.set_axis_off() #gdf.to_file('New York zipcodes base.shp') for x, y, label in zip(centroids,centroids,lables): texts.append(plt.text(x.x, y.y, label, fontsize = 20)) plt.savefig('NYC.png') # -
.ipynb_checkpoints/NYC_EDA-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.9 64-bit (''algotrading'': conda)' # name: python379jvsc74a57bd01d1365bc7bea9ca53bbaaff2a498c5cc15b63a6fcc47bade387e2edfe7e7b068 # --- import pandas as pd import yfinance as yf import os from dotenv import load_dotenv import fredapi from fredapi import Fred import matplotlib import matplotlib.pyplot as plt # %matplotlib inline api_key = os.getenv('FRED_API_KEY') fred = Fred(api_key=api_key) # Pull unemployment data unemployment_rate = fred.get_series('UNRATE', observation_start='1990-01-01') unemployment_rate = unemployment_rate.dropna() unemployment_rate # Plot unemployment data unemployment_rate.plot(figsize=(12,8), title='US Unemployment Rate', grid=True, xlabel='Date', ylabel='unemployment (%)') # + # Pull S&P 500 historical data; set freq to monthly ticker = "^GSPC" start= "1990-01-01" end= "2021-05-25" sp_history = yf.download(ticker, start=start, end=end, progress=False) sp_history = sp_history.asfreq(freq='M').ffill().dropna() sp_history.head() # + #Plot US unemployment against S&P 500 index fig, ax = plt.subplots(figsize=(12,8)) plt.plot(sp_history['Adj Close'], label ='S&P 500', color='r') ax.set_ylabel('S&P 500') plt.grid() ax.legend(loc='upper left') ax2 = ax.twinx() plt.plot(unemployment_rate, label='US Unemployment %', color='b') ax.set_title('US Unemployment vs S&P 500') ax2.set_ylabel('unemployment %') ax2.axhline(y=2.5) ax2.legend(loc='upper right') # -
unemployment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys,os data_path = os.getcwd() try: import localgraphclustering as lgc except: # when the package is not installed, import the local version instead. # the notebook must be placed in the original "notebooks/" folder sys.path.append("../") import localgraphclustering as lgc # + import time import numpy as np import matplotlib.pyplot as plt # - # # Load graph # Read graph. This also supports gml format. #g = lgc.GraphLocal(os.path.join(data_path,'datasets/JohnsHopkins.graphml'),'graphml') g = lgc.GraphLocal(os.path.join(data_path,'datasets/JohnsHopkins.graphml'),'graphml') # To get a quick look at the list of methods and attributes for the graph object 'g' you can type 'g.' + tab # and scroll up or down. # # Nodes embedding via fiedler vector # Call the global spectral partitioning algorithm. output_sp = lgc.fiedler(g) print(output_sp) output_sp = lgc.fiedler(g,normalize=False) print(output_sp) # # Nodes embedding via PageRank vector ref_node = [3215] # L1-regularized PageRank solver. output_acl = lgc.approximate_PageRank(g,ref_node) print(output_acl) output_acl = lgc.approximate_PageRank(g,ref_node,cpp = False) print(output_acl) output_l1reg = lgc.approximate_PageRank(g,ref_node,method = "l1reg") print(output_l1reg) output_l1reg = lgc.approximate_PageRank(g,ref_node,method = "l1reg",cpp = False) print(output_l1reg) output_l1reg_rand = lgc.approximate_PageRank(g,ref_node,method = "l1reg-rand") print(output_l1reg_rand) output_weighted = lgc.approximate_PageRank_weighted(g,ref_node) print(output_weighted) output_nibble = lgc.PageRank_nibble(g,ref_node) print(output_nibble) output_nibble = lgc.PageRank_nibble(g,ref_node,cpp=False) print(output_nibble) # # Rounding algorithms output_sc = lgc.sweep_cut(g,output_acl) print(output_sc) output_sc = lgc.sweep_cut(g,output_l1reg,cpp=False) print(output_sc) output_sc = lgc.sweep_cut(g,output_l1reg) print(output_sc) output_sc = lgc.sweep_cut(g,output_l1reg_rand) print(output_sc) # # Spectral local graph partitioning methods # + ref_node = [3215] # Find a cluster using approximate PageRank. output_pr_clustering = lgc.spectral_clustering(g,ref_node,method="acl",iterations=100000) print(output_pr_clustering) print(g.compute_conductance(output_pr_clustering[0],cpp=False)) output_pr_clustering = lgc.spectral_clustering(g,ref_node,method="acl_weighted",iterations=100000) print(output_pr_clustering) print(g.compute_conductance(output_pr_clustering[0],cpp=False)) output_pr_clustering = lgc.spectral_clustering(g,ref_node,method="l1reg") print(output_pr_clustering) print(g.compute_conductance(output_pr_clustering[0],cpp=False)) output_pr_clustering_rand = lgc.spectral_clustering(g,ref_node,method="l1reg-rand") print(output_pr_clustering_rand) print(g.compute_conductance(output_pr_clustering_rand[0])) # - # # Flow-based local graph partitioning methods ref_node = [3215] output_crd = lgc.flow_clustering(g,ref_node,method="crd") print(output_crd) G = lgc.GraphLocal(os.path.join(data_path,'datasets/minnesota.edgelist'),'edgelist',remove_whitespace=True) ref_node = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,73,74,75,76,77,78,79,80,81,82,83,84,85,87,88,89,90,91,92,93,94,95,97,98,99,100,102,103,104,105,106,108,112,114,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,139,140,141,142,143,144,145,147,148,149,150,151,152,155,157,158,159,160,161,162,164,165,166,168,169,171,172,173,176,177,178,179,180,185,187,188,191,192,195,196,197,201,208,209,210,211,212,215,217,218,219,221,223,225,226,227,228,231,232,244,245,246,247,248,249,253,254,257,261,262,265,269,270,271,272,273,275,276,277,278,279,285,286,287,290,291,299,303,323,327] output_mqi = lgc.flow_clustering(G,ref_node,method="mqi") print(output_mqi) print(G.compute_conductance(output_mqi[0],cpp=False)) G = lgc.GraphLocal(os.path.join(data_path,'datasets/minnesota.edgelist'),'edgelist',remove_whitespace=True) ref_node = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,73,74,75,76,77,78,79,80,81,82,83,84,85,87,88,89,90,91,92,93,94,95,97,98,99,100,102,103,104,105,106,108,112,114,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,139,140,141,142,143,144,145,147,148,149,150,151,152,155,157,158,159,160,161,162,164,165,166,168,169,171,172,173,176,177,178,179,180,185,187,188,191,192,195,196,197,201,208,209,210,211,212,215,217,218,219,221,223,225,226,227,228,231,232,244,245,246,247,248,249,253,254,257,261,262,265,269,270,271,272,273,275,276,277,278,279,285,286,287,290,291,299,303,323,327] output_sl = lgc.flow_clustering(G,ref_node,method="sl") print(output_sl) print(G.compute_conductance(output_sl[0],cpp=False)) # # Example for multiclass label prediction # + g = lgc.GraphLocal(os.path.join(data_path,'datasets/JohnsHopkins.graphml'),'graphml') # List of nodes around which we want to find labels labels = [[1,10,300],[3215],[1002,2500,540]] output_mc=lgc.multiclass_label_prediction(g,labels=labels) # - print(output_mc) # # Densest subgraph #How to use densest subgraph method. g = lgc.GraphLocal(os.path.join(data_path,'datasets/Erdos02-cc.edgelist'),'edgelist',' ') lgc.densest_subgraph(g) # # Network Community Profile G = lgc.GraphLocal(os.path.join(data_path,"datasets/Erdos02-cc.edgelist"),file_type = "edgelist", separator = " ", header = False) ncp_instance = lgc.NCPData(G) ncp_instance.approxPageRank(ratio=0.1,nthreads=4) ncp_instance.crd(ratio=0.01,w=10,U=10,h=1000,nthreads=4) ncp_instance.mqi(ratio=0.1,nthreads=4) ncp_instance.l1reg(ratio=0.1,nthreads=4) ncp_instance.l1reg_rand(ratio=0.1,nthreads=4) # ## Plot NCP with CRD ncp_plots = lgc.NCPPlots(ncp_instance,method_name = "crd") #plot conductance vs size fig, ax, _ = ncp_plots.cond_by_size() plt.show() #plot conductance vs volume fig, ax, _ = ncp_plots.cond_by_vol() plt.show() #plot isoperimetry vs size fig, ax, _ = ncp_plots.isop_by_size() plt.show() # ## Plot NCP with MQI ncp_plots = lgc.NCPPlots(ncp_instance,method_name = "mqi") #plot conductance vs size fig, ax, _ = ncp_plots.cond_by_size() plt.show() #plot conductance vs volume fig, ax, _ = ncp_plots.cond_by_vol() plt.show() #plot isoperimetry vs size fig, ax, _ = ncp_plots.isop_by_size() plt.show() # ## Plot NCP with l1reg ncp_plots = lgc.NCPPlots(ncp_instance,method_name = "l1reg") #plot conductance vs size fig, ax, _ = ncp_plots.cond_by_size() plt.show() #plot conductance vs volume fig, ax, _ = ncp_plots.cond_by_vol() plt.show() #plot isoperimetry vs size fig, ax, _ = ncp_plots.isop_by_size() plt.show() # ## Plot NCP with acl ncp_plots = lgc.NCPPlots(ncp_instance,method_name = "acl") #plot conductance vs size fig, ax, _ = ncp_plots.cond_by_size() plt.show() #plot conductance vs volume fig, ax, _ = ncp_plots.cond_by_vol() plt.show() #plot isoperimetry vs size fig, ax, _ = ncp_plots.isop_by_size() plt.show() # ## Plot NCP with selected rows ncp_plots = lgc.NCPPlots(ncp_instance,selected_rows = range(500)) #plot conductance vs size fig, ax, _ = ncp_plots.cond_by_size() plt.show() #plot conductance vs volume fig, ax, _ = ncp_plots.cond_by_vol() plt.show() #plot isoperimetry vs size fig, ax, _ = ncp_plots.isop_by_size() plt.show() # ## Check ncp results as a data frame ncp_instance.as_data_frame() help(ncp_instance) # ## Extract a specific NCP result # + #Get the first ncp result corresponding to the first row of the data frame above print(ncp_instance.output_set(0)) #Get the second result corresponding to the second row of the data frame above print(ncp_instance.output_set(9)) # - # ## Clustering metrics for vertex neighborhood g = lgc.GraphLocal(os.path.join(data_path,'datasets/dolphins.smat'),separator = ' ') help(lgc.triangleclusters) cond,cut,vol,cc,t = lgc.triangleclusters(g) # ## Find extrema in a graph based on neighborhoods. help(lgc.neighborhoodmin) minverts, minvals = lgc.neighborhoodmin(g,cond,True)
notebooks/examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Jlokkerbol/practice_datasets/blob/main/Heart_disease.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="X31jTdka5PBI" outputId="5c825d69-efe5-444a-e174-662e5b07c6fa" colab={"base_uri": "https://localhost:8080/"} install.packages('psych') install.packages('tidyverse') install.packages('corrplot') install.packages('ggplot2') install.packages('caret') install.packages('Metrics') install.packages('e1071') install.packages('glmnet') library(psych) # alternative package to describe your data library(tidyverse) # easy way to subset your data library(corrplot) # to draw correlation plots library(ggplot2) # to plot graphs library(caret) # to run machine learning models library(Metrics) # to calculate RMSE library(e1071) # for statistical analyses library(glmnet) # for statistical analyses options(scipen=999) # turn off scientific notation # + id="ZDIro93s5PBP" df = read.csv('https://raw.githubusercontent.com/jads-nl/discover-projects/main/uci-heart-disease/heart.csv') # + [markdown] id="XlsIhsV55PBU" # This data set dates from 1988 and contains data on four locations: # Cleveland, Hungary, Switzerland, and Long Beach V. It originally contained 76 attributes, but all published experiments refer to using a subset of 14 attributes. # The "target" variable refers to the presence of heart disease in the patient (0 = no heart disease and 1 = heart disease). # Link to dataset: https://www.kaggle.com/johnsmith88/heart-disease-dataset # + [markdown] id="1u_ckcUo5PBV" # ## Exercise 1: Familiarize yourself with the data. # Provide a table with descriptive statistics for all included variables and check: # # -Classes of each of the variables (e.g. factors or continuous variables). # # -Change the class of the "target" variable such that it is a binary factor # # -Descriptive/summary statistics for all continuous variables (e.g. mean, SD, range) and factor variables (e.g. frequencies). # # -Explore missing values: sapply(df, function(x) sum(is.na(x))) # # HINT: Use the base-R function "str" (no package needed) # Use the "describe" function (from the psych"-package) for continuous variables and the "table" function (base-R) for factor variables. # + id="HTB_oNWY5PBX" outputId="cc74fb51-717f-4e07-dc23-437c9a4b36ec" colab={"base_uri": "https://localhost:8080/"} # To check the structure of the data, you can use the "str"-command: str(df) # + id="1vb-reu35PBc" outputId="708ae7f2-8614-4895-f692-0afa950eeba4" colab={"base_uri": "https://localhost:8080/", "height": 68} #To create a binary factor (as target has only two levels) df$target <- as.factor(df$target) ## We need to tell R that our outcome is a binary factor table(df$target) # + id="i6-WoYj45PBf" outputId="8506c8ca-c467-4750-c451-ef67855c8715" colab={"base_uri": "https://localhost:8080/", "height": 34} #Explore missing values: sapply(df, function(x) sum(is.na(x))) # + id="2ZJcBke45PBi" outputId="8357dc35-3c21-4b27-cb0f-8d107d9bc004" colab={"base_uri": "https://localhost:8080/", "height": 351} # To describe numeric and integer variables df %>% keep(is.numeric) %>% describe # + [markdown] id="tA4XFm9f5PBm" # ## Exercise 2: # The variable "target" refers to the presence of heart disease and hence is the variable of interest for our prediction model ("Y" or dependent variable). # # The frequency of the outcomes (heart disease yes/no) was already determined in the previous code blocks. Please further explore Y in terms of: # # -Describe X-variables separately for both outcome categories (using describeBy(df, group = df$target) # # -Draw a correlation plot to see all correlations between Y and the independent (numeric) variables (see HINT 2 below) # # -Visualize the relation between Y and a few correlated X-variables (i.e. create boxplot or scatterplot using the "ggplot2"-package) # # # ### HINT 1: For visualisation, ggplot is frequently used as it provides a flexible way to draw a lot of different graphs. # Ggplot contains two basic elements: # # 1.The initiation command: ggplot(DATASET, aes(x=XVAR, y=YVAR, group=XVAR)). This draws a blank ggplot. Even though the x and y are specified, there are no points or lines in it. # # 2.Add the respective geom of interest (for this exercise you'll need "+geom_boxplot()") # # The full code to write a boxplot would then be: ggplot(DATASET, aes(x=XVAR, y=YVAR, group=YVAR)) + geom_boxplot() # # ### HINT 2: # To draw a correlation plot. Please use the "corrplot"-package. Using this package, one van construct a correlation plot in two steps: # # 1.Use "cor" to calculate correlation between all combinations of numeric variables (note: there are only numeric variables in the dataset) # # 2.Plot the calculated correlation by using the "corrplot"-function # Note that target must be transformed back into a numeric variable for this exercise # + id="OsW5j9Pa5PBm" outputId="314ac10f-7719-4c11-c0af-a95c13ae7fdc" colab={"base_uri": "https://localhost:8080/", "height": 1000} # Describe X-variables separately for both outcome categories: describeBy(df, group=df$target) # + id="ASo8YBOl5PBo" outputId="4400f63d-44a0-432f-d35f-e9c9dbf03455" colab={"base_uri": "https://localhost:8080/", "height": 437} # Correlation plot df$target = as.numeric(df$target) corr_df <- df %>% keep(is.numeric) %>% cor corrplot(corr_df, number.font=12, tl.cex = 1.00, title="Correlation between all numeric variables in the dataset", mar=c(0,0,1,0)) df$target = as.factor(df$target) # + id="ws4hyk_l5PBq" outputId="30af2cd8-2b53-41b4-f4ed-2cc92753e805" colab={"base_uri": "https://localhost:8080/", "height": 857} # Construct boxplot ggplot(df, aes(x=thalach, y=target, group=target)) + geom_boxplot(aes(fill=target)) + labs(title="Boxplot max heart rate by heart disease status")+ ylab(label="Heart disease status") + xlab("maximum heart rate achieved")+ theme_bw()+ theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) ggplot(df, aes(x=exang, y=target, group=target)) + geom_boxplot(aes(fill=target)) + labs(title="Boxplot exercise induced angina by heart disease status")+ ylab(label="Heart disease status") + xlab("exercise induced angina")+ theme_bw()+ theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) # + [markdown] id="hJaOlAlT5PBs" # ## Exercise 3 # Now that we have a feeling of the information in the dataset and we know that there are no missing values, we can start by running some simple machine learning models. # We will use the "caret"-package for this exercise. # For this we'll need to split the data in a train and a test set. # Use the following code to create a train and test dataset: # + id="9po7YS9Q5PBs" set.seed (123456789) dt = sort(sample(nrow(df), nrow(df)*.7)) ## 70% in train set train<-df[dt,] test<-df[-dt,] # + [markdown] id="ea_dTvgT5PBt" # Do you understand why we are splitting the data? # Do you understand what has been done in the code above? # + [markdown] id="QMUePA375PBu" # Next we need to specify how we want to perform the cross-validation (i.e. the optimization of the model on the train set). To this extend we need to set the method of CV, the number of folds and the numer of times we want to repeat the process. # This can be done using the following command: # + id="Fw0foqd45PBu" # Cross-validation strategy ctrl <- trainControl(method = "repeatedcv", number = 5, # ten folds repeats = 3) # repeated three times # + [markdown] id="zoGjjtz95PBv" # ## Exercise 3.1 # Once this has been set, we are ready to run the models on the train set. # Use the syntax below to estimate a linear model, LASSO model and a kNN model on the train set: # Please inspect the outcomes of the model. Which model performs best? # + id="wJ7B0w5m5PBv" outputId="4f487ed4-6cba-4ec2-fe8d-65e62f05ccb2" colab={"base_uri": "https://localhost:8080/", "height": 510} ## Run LM lm <- train(target ~ ., method = "glm", data = train, trControl = ctrl, preProcess = c("center")) lm # to obtain summary of the model varImp(lm) # to see most important parameters # + id="05q1YqvY5PBx" outputId="1d995a38-a76f-4048-ea79-e009e381dd13" colab={"base_uri": "https://localhost:8080/", "height": 1000} ## Run kNN knnFit <- train(target ~ ., data = train, method = "knn", trControl = ctrl, preProcess = c("center","scale")) knnFit # to obtain summary of the model plot(knnFit) varImp(knnFit) # to see most important parameters plot(varImp(knnFit)) # to plot most important parameters # + id="pjy0x7HA5PBy" outputId="d6cab3ba-c363-4052-fec3-8a9da9836cdc" colab={"base_uri": "https://localhost:8080/", "height": 1000} ## Run LASSO lambda <- 10^seq(-3,3,length=100) lassoFit <- train(target ~ ., data = train, method = "glmnet", trControl = ctrl, family = "binomial", preProcess = c("center","scale"), tuneGrid = expand.grid(alpha = 1, lambda = lambda)) lassoFit # to obtain summary of the model varImp(lassoFit) # to see most important parameters plot(varImp(lassoFit)) # to plot most important parameters # + [markdown] id="Y_W4YS_15PBz" # ## Exercise 3.4 # Now all we have to do is to check the performance of our best performing model on the test dataset. # Please use the code below to check this performance. # Which model is best? # Why should you never test all your models straight away on the test dataset, but instead use your training data set first? # + id="PIXdduNh5PB0" outputId="3c541f63-5d18-455e-b087-6dd16f448112" colab={"base_uri": "https://localhost:8080/", "height": 1000} ## Check performance on test set # For LM: print("LM performance") pred_lm <- predict(lm, newdata = test) confusionMatrix(pred_lm, test$target) # For KNN print("KNN performance") pred_knn <- predict(knnFit, newdata = test) confusionMatrix(pred_knn, test$target) # For LASSO print("LASSO performance") pred_lasso <- predict(lassoFit, newdata = test) confusionMatrix(pred_lasso, test$target)
uci-heart-disease/r-example-heart-disease.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Dependencies and starter code # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import scipy.stats as st import numpy as np # Study data files mouse_metadata = "data/Mouse_metadata.csv" study_results = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata) study_results = pd.read_csv(study_results) mouse_metadata.head () # + # Combine the data into a single dataset Study_results_complete = pd.merge (study_results, mouse_metadata, how="left", on=["Mouse ID", "Mouse ID"]) #Display the data table for preview Study_results_complete.head () # - # ## Summary statistics # + # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen #https://www.tutorialspoint.com/python_pandas/python_pandas_descriptive_statistics.htm #https://www.dataquest.io/blog/basic-statistics-with-python-descriptive-statistics/ #https://docs.python.org/3/library/statistics.html tumor_volume_mean = Study_results_complete.groupby(["Drug Regimen"]).mean()["Tumor Volume (mm3)"] tumor_volume_median = Study_results_complete.groupby(["Drug Regimen"]).median()["Tumor Volume (mm3)"] tumor_volume_variance = Study_results_complete.groupby(["Drug Regimen"]).var()["Tumor Volume (mm3)"] tumor_volume_std = Study_results_complete.groupby(["Drug Regimen"]).std()["Tumor Volume (mm3)"] tumor_volume_sem = Study_results_complete.groupby(["Drug Regimen"]).sem()["Tumor Volume (mm3)"] summary_statistics = pd.DataFrame ({ "Mean":tumor_volume_mean, "Median":tumor_volume_median, "Variance":tumor_volume_variance, "Std Dev":tumor_volume_std, "SEM":tumor_volume_sem}) summary_statistics # - Study_results_complete.describe() #Groupby drug regimen with .count and mouse id to determine the datapoints regimen_data_points = Study_results_complete.groupby(["Drug Regimen"]).count()["Mouse ID"] regimen_data_points # ## Bar plots # + # Generate a bar p# Generate a bar plot showing number of data points for each treatment regimen using pandas regimen_data_points.plot(kind="bar", figsize=(10,5)) #set chart title plt.title("Data Points Visual") plt.xlabel("Drug Regimen") plt.ylabel("Data Points") #show chart and set layout plt.show() plt.tight_layout() # + # Generate a bar plot showing number of data points for each treatment regimen using pyplot #Create an arraw with the datapoints users = [230, 178, 178, 188, 186, 181, 161, 228, 181, 182] #Set the x_axis to be the amount of the Data Regimen x_axis = np.arange(len(regimen_data_points)) plt.bar(x_axis, users, color='b', alpha=0.75, align='center') tick_locations = [value for value in x_axis] plt.xticks(tick_locations, ['Capomulin', 'Ceftamin', 'Infubinol', 'Ketapril', 'Naftisol', 'Placebo', 'Propriva', 'Ramicane', 'Stelasyn', 'Zoniferol'], rotation='vertical') plt.xlim(-0.75, len(x_axis)-0.25) plt.ylim(0, max(users)+10) plt.title("Data Points Visual") plt.xlabel("Drug Regimen") plt.ylabel("Data Points") # - # ## Pie plots # + #Group by "Mouse ID" and "Sex" to find the unique number of male vs female groupby_gender = Study_results_complete.groupby(["Mouse ID","Sex"]) groupby_gender mouse_gender_df = pd.DataFrame(groupby_gender.size()) #Create the dataframe with total count of Female and Male mice mouse_gender = pd.DataFrame(mouse_gender_df.groupby(["Sex"]).count()) mouse_gender.columns = ["Total Count"] #create and format the percentage of female vs male mouse_gender["Percentage of Sex"] = (100*(mouse_gender["Total Count"]/mouse_gender["Total Count"].sum())) #format the "Percentage of Sex" column mouse_gender["Percentage of Sex"] = mouse_gender["Percentage of Sex"] #gender_df mouse_gender # + # Generate a pie plot showing the distribution of female versus male mice using pandas #plot = name of the data frame .plot.pie (sumplots true) colors = ['blue', 'green'] explode = (0.1, 0) plot = mouse_gender.plot.pie(y='Total Count',figsize=(5,5), colors = colors, startangle=140, explode = explode, shadow = False, autopct="%1.1f%%") # + # Generate a pie plot showing the distribution of female versus male mice using pyplot # Create Labels for the sections of the pie labels = ["Female","Male"] #List the values of each section of the pie chart sizes = [49.799197,50.200803] #Set colors for each section of the pie colors = ['blue', 'green'] #Determoine which section of the circle to detach explode = (0.1, 0) #Create the pie chart based upon the values plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct="%1.1f%%", shadow=False, startangle=140) #Set equal axis plt.axis("equal") # - # ## Quartiles, outliers and boxplots # + #Group data by Drug Regimen and Mouse ID to capture Last Tumor Measurement Study_results_complete_sort = Study_results_complete.groupby(['Drug Regimen', 'Mouse ID']).last()['Tumor Volume (mm3)'] Study_results_complete.head() # Turn retrieved data into dataframe to easily manipulate Study_results_df = Study_results_complete Study_results_df #Create a list to use as labels and dataframe top_4 = ['Capomulin', 'Ramicane', 'Infubinol','Ceftamin'] # Generate a box plot of the final tumor volume of each mouse across four regimens of interest final_df = Study_results_df.reset_index() tumor_lists = final_df.groupby('Drug Regimen')['Tumor Volume (mm3)'].apply(list) tumor_list_df = pd.DataFrame(tumor_lists) tumor_list_df = tumor_list_df.reindex(top_4) tumor_vols = [vol for vol in tumor_list_df['Tumor Volume (mm3)']] plt.boxplot(tumor_vols, labels=top_4) plt.ylim(10, 80) plt.show() # - # ## Line and scatter plots # + # Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin time_vs_tumor = Study_results_complete["Mouse ID"].isin(["j119"]) time_vs_tumor time_vs_tumor_data = Study_results_complete[["Mouse ID", "Timepoint", "Tumor Volume (mm3)"]] time_vs_tumor_data line_plot_df = time_vs_tumor_data.reset_index() line_plot_df line_plot_final = line_plot_df[["Mouse ID", "Timepoint", "Tumor Volume (mm3)"]] line_plot_final lines = line_plot_final.plot.line() # + # Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen # + # Calculate the correlation coefficient and linear regression model for mouse weight and average tumor volume for the Capomulin regimen # -
Pymaceuticals/pymaceuticals_starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Parameter File Overview: # Parameter files can be TSVs or CSVs. The first line of the file should contain column headings. The following headings are required (in any order): mechanism_id, part_id, param_name, param_val (spaces can be substituted for underscores and headings are not case sensitive). mechanism_id is the name of the Mechanism or the kind of mechanism that will use this parameter, for example "transcription" or "transcription_mm" for Mechalis-Menten transcription would go in this column. part_id refers to the name of the Component that will use this mechanism, for example "ptet" for a tet repressed promoter. param_name refers to the name of the model parameter, for example "ktx", "kb", or "ku". The value of these columns is case sensitive and underscores are different from spaces. # # #### Parameter Value Defaulting: # Not all parameters need to have the required headings. The only two required columns are "param_val" and "param_name". BioCRNpyler uses a form of parameter name defaulting discussed below to find default parameters if no exact match is in the config file. This makes it easy to set default parameters for things like "ku" and "ktx" to quickly build models. # # #### Parameters inside BioCRNpyler: # Inside of bioCRNpyler, parameters are stored as a dictionary key value pair: (mechanism_name, part_id, param_name) --> param_val. If that particular parameter key cannot be found, the software will default to the following keys: (mechanism_type, part_id, param_name) >> (part_id, param_name) >> (mechanism_name, param_name) >> (mechanism_type, param_name) >>(param_name) and give a warning. As a note, mechanism_name refers to the .name variable of a Mechanism. mechanism_type refers to the .type variable of a Mechanism. Either of these can be used as a mechanism_id. This allows for models to be constructed easily using default parameter values and for parameters to be shared between different Mechanisms and/or Components. # # #### Multiple Parameter Files: # Components and Mixtures can both have one more multiple parameter files by passing in a list of filenames instead of a single filename to the parameter_file keyword. Components use parameters loaded from their file(s) before defaulting to the file(s) supplied to a Mixture. The last file in any list will take precedent and overwrite parameter files which were written earlier. # # #### Suppressing warnings # To suppress parameter warnings, use the keyword parameter_warnings = False inside a Mixture or Component constructor. # # Below is an example csv with all the parameters for a tetR promoter undergoing Michalis Menten transcription and translation. # ## 1. The Parameter File # In the following cell we look at an example parameter file that will run with no parameter defaulting. # + from biocrnpyler import * perfect_param_file_name = "Perfect Param File Example.tsv" #Open and print the parameter file param_file = open(perfect_param_file_name) print("****Parameter File****") print(param_file.read()) param_file.close() #Create a Regulated Promoter Ptet = RegulatedPromoter("ptet", regulators=["tetR"], leak=True) reg_rep_assembly = DNAassembly(name="reporter", promoter=Ptet, rbs="BCD") tet = Protein("tetR") components = [reg_rep_assembly, tet] myMixture = TxTlExtract(name="txtl", parameter_file = perfect_param_file_name, components=components, parameter_warnings=True) #Print the parameter dictionary created from the file print("\n****Loaded Parameters****") for k in myMixture.parameters: print("param_dict["+repr(k)+"] = ", myMixture.parameters[k]) print("\n****Resulting CRN****") print(myMixture.compile_crn()) # - # We will now look at an example of a parameter file that uses defaulting. If you were to fill in this file with full parameter signatures (mechanism_id, part_id, param_name, value), the errors at the bottom of the readout would slowly diminish. However, even without full values the file loads and runs. Although this example uses only the key "param_name" for default values, there exists a heirarchy of keys to allow for shared parameters between different Components and Mechanisms. # # The parameter key heirarchy (top takes priority): # 1) (mechanism_name, part_id, param_name) # 2) (mechanism_type, part_id, param_name) # 3) (part_id, param_name) # 4) (mechanism_name, param_name) # 5) (mechanism_type, param_name) # 6) (param_name) # # here the column "mechanism_type" can either be a Mechanism's type string Mechanism.type (eg "transcription") or its name string Mechanism.name (eg "transcription_mm"). # + from biocrnpyler import * default_param_file_name = "Default Param File Example.tsv" #Open and print the parameter file param_file = open(default_param_file_name) print("****Parameter File****") print(param_file.read()) param_file.close() #Create a Regulated Promoter Ptet = RegulatedPromoter("ptet", regulators=["tetR"], leak=True) reg_rep_assembly = DNAassembly(name="reporter", promoter=Ptet, rbs="BCD") tet = Protein("tetR") components = [reg_rep_assembly, tet] myMixture = TxTlExtract(name="txtl", parameter_file = default_param_file_name, components=components, parameter_warnings=False) #To Run With Parameter Warnings, change the keyword parameter_warnings = True #myMixture = TxTlExtract(name="txtl", parameter_file = default_param_file_name, components=components, parameter_warnings=True) #Print the parameter dictionary created from the file print("\n****Loaded Parameters****") for k in myMixture.parameters: print("param_dict["+repr(k)+"] = ", myMixture.parameters[k]) print("\n****Resulting CRN****") print(myMixture.compile_crn()) # - # ### Setting Parameters at the component level # Components can have their own parameter files instead of relying on the parameter files passed into a mixture. Components can also have the mixtures default parameters overwritten with a parameter dictionary. Below, we will create a DNAassembly which has custom parameters loaded in as a dictionary (this works the same as loading them with a file). We will put this in a Mixture with the default parameters from the above example. There are now many fewer parameter warnings as well. This example also helps illustrate how the parameter loading heirarchy works if you examine the final CRNs. # + #Create custom parameter dictionary for the ptet promoter. In this case, we will add specific leak parameters and ra_param_dict = { ("transcription_mm", "ku"):33.33, #These parameters will take priority over single key parameters ("transcription_mm", "kb"):.3333, ("transcription_mm", "ktx"):3.333, ("transcription_mm", "ptet_leak", "ku"):111.1, #these parameters will take priority over the ones above ("transcription_mm", "ptet_leak", "kb"):.1111, } #Use the parameter_file keyword to update the parameters with a file. #Use the parameters keyword to update the parameters with a dictionary #If Use both: the dictionary takes precedent to the file if there are conflicts. Ptet = RegulatedPromoter("ptet", regulators=["tetR"], leak=True, parameters = ra_param_dict) reg_rep_assembly = DNAassembly(name="reporter", promoter=Ptet, rbs="BCD") tet = Protein("tetR") components = [reg_rep_assembly, tet] myMixture = TxTlExtract(name="txtl", parameter_file = default_param_file_name, components=components, parameter_warnings = False) myCRN = myMixture.compile_crn() print(myCRN) # - # # ### Suppressing warnings: # We can see when default parameters are loaded by toggling the 'parameter_warnings' keyword for a Mixture or a Component. By default this is set to None for Mixtures, which means warnings can be toggled at the Component level. If set to True/False for a Mixture, this will supersede the Component level toggling. The default setting for Component is parameter_warnings = True. Below if you change the parameter warnings for various Components or the Mixture, the warning messages printed will change/disappear. # # + Ptet = RegulatedPromoter("ptet", regulators=["tetR"], leak=False) reg_rep_assembly = DNAassembly(name="reporter", promoter=Ptet, rbs="BCD", parameter_warnings = True) tet = Protein("tetR") components = [reg_rep_assembly, tet] myMixture = TxTlExtract(name="txtl", parameter_warnings = None, parameter_file = default_param_file_name, components=components) myCRN = myMixture.compile_crn() print(myCRN) # -
examples/Parameters loading.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def warn(*args, **kwargs): pass import warnings warnings.warn = warn from collections import defaultdict import os import sys sys.path.append(os.path.abspath(os.pardir)) import pandas as pd from bella.helper import read_config, full_path from bella.parsers import semeval_14, dong, election from bella.data_types import TargetCollection from bella.tokenisers import ark_twokenize # + # Load all of the datasets semeval_14_rest_train = semeval_14(full_path(read_config('semeval_2014_rest_train'))) semeval_14_lap_train = semeval_14(full_path(read_config('semeval_2014_lap_train'))) semeval_14_rest_test = semeval_14(full_path(read_config('semeval_2014_rest_test'))) semeval_14_lap_test = semeval_14(full_path(read_config('semeval_2014_lap_test'))) dong_train = dong(full_path(read_config('dong_twit_train_data'))) dong_test = dong(full_path(read_config('dong_twit_test_data'))) election_train, election_test = election(full_path(read_config('election_folder_dir'))) mitchel_train = semeval_14(full_path(read_config('mitchel_train'))) mitchel_test = semeval_14(full_path(read_config('mitchel_test'))) youtubean = semeval_14(full_path(read_config('youtubean'))) semeval_14_rest = TargetCollection.combine_collections(semeval_14_rest_train, semeval_14_rest_test) semeval_14_laptop = TargetCollection.combine_collections(semeval_14_lap_train, semeval_14_lap_test) dong = TargetCollection.combine_collections(dong_train, dong_test) election = TargetCollection.combine_collections(election_train, election_test) mitchel = TargetCollection.combine_collections(mitchel_train, mitchel_test) # Combine all of the product reviews datasets = {'SemEval 14 Laptop' : semeval_14_laptop, 'SemEval 14 Resturant' : semeval_14_rest, 'Mitchel' : mitchel, 'Dong Twitter' : dong, 'Election Twitter' : election, 'YouTuBean' : youtubean} # - # # Datasets # This notebook will describe the different datasets that have been used as well as the statistics of these datasets. The datasets used are the following: # 1. [Dong et al.](https://aclanthology.coli.uni-saarland.de/papers/P14-2009/p14-2009) [Twitter dataset](https://github.com/bluemonk482/tdparse/tree/master/data/lidong) NOTE that the dataset does not link to the paper as the dataset released from the paper has already been pre-processed where as this dataset has not. # 2. [SemEval 2014 Resturant dataset](http://alt.qcri.org/semeval2014/task4/index.php?id=data-and-tools). We used Train dataset version 2 and the test dataset. This dataset contains 4 sentiment values; 1. Positive, 2. Neutral, 3. Negative, and 4. Conflict but we are only going to use the first 3 to make it comparable to the other datasets and the fact that the conflict label only has 91 instances in the training set and 14 in the test set. # 3. [SemEval 2014 Laptop dataset](http://alt.qcri.org/semeval2014/task4/index.php?id=data-and-tools). We used Train dataset version2 and the test dataset. This dataset contains 4 sentiment values; 1. Positive, 2. Neutral, 3. Negative, and 4. Conflict but we are only going to use the first 3 to make it comparable to the other datasets and the fact that the conflict label only has 45 instances in the training set and 16 in the test set. # 4. [Election dataset](https://figshare.com/articles/EACL_2017_-_Multi-target_UK_election_Twitter_sentiment_corpus/4479563/1) # 5. [Youtubean dataset](https://github.com/epochx/opinatt/blob/master/samsung_galaxy_s5.xml) [by Marrese-Taylor et al.](https://www.aclweb.org/anthology/W17-5213) - Dataset of 7 youtube reviews of the Samsung Galaxy S5. The text are the closed captions of the videos where the captions were provided by the authors and not automatically generated. The dataset does contain 7 conflict labels which in the original paper were matched to neutral labels however in our experiments we remove these labels thus that statistics we present here are slightly different to those in the original paper when describing the dataset. However if you parse the dataset and include the conflicts then the statistics will match the original paper. (To parse the dataset with conflicts add conflict=True parameter to *semeval_14* function) # 6. [Mitchel dataset](http://www.m-mitchell.com/code/MitchellEtAl-13-OpenSentiment.tgz) which was released with this [paper](https://www.aclweb.org/anthology/D13-1171). The dataset is of Tweets where the targets are named entities specifically either orgainsations or persons. # + dataset_dict = defaultdict(list) index = [] columns = ['Domain', 'Type', 'Medium', 'No. Targets (Dataset Size)', 'No. Senti Labels', 'Mean Targets per Sent', 'No Unique Targets', '% Targets with 1 Sentiment per Sentence', '% Targets with 2 Sentiment per Sentence', '% Targets with 3 Sentiment per Sentence', 'Avg sentence length per target'] name_domain = {'SemEval 14 Laptop' : 'Laptop', 'SemEval 14 Resturant' : 'Restaurant', 'Mitchel' : 'Unknown', 'Dong Twitter' : 'General', 'Election Twitter' : 'Politics', 'YouTuBean' : 'Mobile Phones'} name_type = {'SemEval 14 Laptop' : 'Review', 'SemEval 14 Resturant' : 'Review', 'Mitchel' : 'Social Media', 'Dong Twitter' : 'Social Media', 'Election Twitter' : 'Social Media', 'YouTuBean' : 'Review'} name_medium = {'SemEval 14 Laptop' : 'Written', 'SemEval 14 Resturant' : 'Written', 'Mitchel' : 'Written', 'Dong Twitter' : 'Written', 'Election Twitter' : 'Written', 'YouTuBean' : 'Spoken'} for name, dataset in datasets.items(): index.append(name) targets_i_senti = [] num_targets = len(dataset) num_sentiment_labels = len(dataset.stored_sentiments()) avg_sent_length = dataset.avg_sentence_length_per_target() for i in range(1, 4): if i > num_sentiment_labels: targets_i_senti.append(0) else: i_senti_targets = len(dataset.subset_by_sentiment(i)) targets_i_senti\ .append((i_senti_targets / num_targets) * 100) dataset_dict['Domain'].append(name_domain[name]) dataset_dict['Type'].append(name_type[name]) dataset_dict['Medium'].append(name_medium[name]) dataset_dict['No. Targets (Dataset Size)'].append(num_targets) dataset_dict['No. Senti Labels'].append(num_sentiment_labels) dataset_dict['Mean Targets per Sent'].append(dataset\ .avg_targets_per_sentence()) dataset_dict['No Unique Targets'].append(dataset.number_unique_targets()) dataset_dict['% Targets with 1 Sentiment per Sentence'].append(targets_i_senti[0]) dataset_dict['% Targets with 2 Sentiment per Sentence'].append(targets_i_senti[1]) dataset_dict['% Targets with 3 Sentiment per Sentence'].append(targets_i_senti[2]) dataset_dict['Avg sentence length per target'].append(avg_sent_length) dataset_stats = pd.DataFrame(dataset_dict, index=index, columns=columns) dataset_stats.round(2) # - # The high level statistics are presented above. At first it is a surprising that the Social media data has such a high average sentence length but the sentence in the Twitter cases is actually a Tweet compared to the SemEval and YouTuBean data which has been sentence split. However the YouTuBean data even when sentence split is still the longest this could be due to the data being speech text rather than written. # # Again the datasets vary with the number of Targets with distinct sentiments per sentence but most have only one distinct sentiment per sentence apart from the Election dataset which has quiet an even split between 1 and 2 distinct sentiments. # # Lastly the Election dataset has the highest number of targets per sentence by a long way and this is not porportinal to the average sentence length either. # # ## Syntactic Complexity of the dataset # The above statistics are all based on quiet high level summary statistics and do not contain any lingustic specfic statistic apart from perhaps the average sentence length. Therefore below is the table of average constituency tree depth for the datasets which can be viewed as showing the sentence syntax complexity this was also shown in the [Marrese-Taylor et al.](https://www.aclweb.org/anthology/W17-5213) paper on the datasets they used and here we present the same statistic on all of the datasets above. dataset_ling_dict = defaultdict(list) index = [] columns = ['Average constituency tree depth'] for name, dataset in datasets.items(): index.append(name) dataset_ling_dict['Average constituency tree depth'].append(dataset.avg_constituency_depth()) dataset_ling_stats = pd.DataFrame(dataset_ling_dict, index=index, columns=columns) dataset_ling_stats.round(2) # As you can see above interestingly the Election dataset that had the 2nd largest average sentence length and by far the largest number of targets per sentence but does not have the largest average tree depth. The YouTuBean dataset does which may suggest that spoken text is syntactically more complex then written text.
notebooks/datasets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="mz0_QVkxCrX3" # # **Homework 1: COVID-19 Cases Prediction (Regression)** # + [markdown] id="ZeZnPAiwDRWG" # Author: <NAME> # # Slides: https://github.com/ga642381/ML2021-Spring/blob/main/HW01/HW01.pdf # Video: TBA # # Objectives: # * Solve a regression problem with deep neural networks (DNN). # * Understand basic DNN training tips. # * Get familiar with PyTorch. # # If any questions, please contact the TAs via TA hours, NTU COOL, or email. # # + [markdown] id="Jx3x1nDkG-Uy" # # **Download Data** # # # If the Google drive links are dead, you can download data from [kaggle](https://www.kaggle.com/c/ml2021spring-hw1/data), and upload data manually to the workspace. # + colab={"base_uri": "https://localhost:8080/"} id="tMj55YDKG6ch" outputId="b2fc349c-4dbb-42df-a70d-476648c58a60" tr_path = 'covid.train.csv' # path to training data tt_path = 'covid.test.csv' # path to testing data # !gdown --id '19CCyCgJrUxtvgZF53vnctJiOJ23T5mqF' --output covid.train.csv # !gdown --id '1CE240jLm2npU-tdz81-oVKEF3T2yfT1O' --output covid.test.csv # + [markdown] id="wS_4-77xHk44" # # **Import Some Packages** # + id="k-onQd4JNA5H" # PyTorch import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader # For data preprocess import numpy as np import csv import os import math # For plotting import matplotlib.pyplot as plt from matplotlib.pyplot import figure myseed = 42069 # set a random seed for reproducibility torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(myseed) torch.manual_seed(myseed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(myseed) # + [markdown] id="BtE3b6JEH7rw" # # **Some Utilities** # # You do not need to modify this part. # + id="FWMT3uf1NGQp" def get_device(): ''' Get device (if GPU is available, use GPU) ''' return 'cuda' if torch.cuda.is_available() else 'cpu' def plot_learning_curve(loss_record, title=''): ''' Plot learning curve of your DNN (train & dev loss) ''' total_steps = len(loss_record['train']) x_1 = range(total_steps) x_2 = x_1[::len(loss_record['train']) // len(loss_record['dev'])] figure(figsize=(6, 4)) plt.plot(x_1, loss_record['train'], c='tab:red', label='train') plt.plot(x_2, loss_record['dev'], c='tab:cyan', label='dev') plt.ylim(0.0, 5.) plt.xlabel('Training steps') plt.ylabel('MSE loss') plt.title('Learning curve of {}'.format(title)) plt.legend() plt.show() def plot_pred(dv_set, model, device, lim=35., preds=None, targets=None): ''' Plot prediction of your DNN ''' if preds is None or targets is None: model.eval() preds, targets = [], [] for x, y in dv_set: x, y = x.to(device), y.to(device) with torch.no_grad(): pred = model(x) preds.append(pred.detach().cpu()) targets.append(y.detach().cpu()) preds = torch.cat(preds, dim=0).numpy() targets = torch.cat(targets, dim=0).numpy() figure(figsize=(5, 5)) plt.scatter(targets, preds, c='r', alpha=0.5) plt.plot([-0.2, lim], [-0.2, lim], c='b') plt.xlim(-0.2, lim) plt.ylim(-0.2, lim) plt.xlabel('ground truth value') plt.ylabel('predicted value') plt.title('Ground Truth v.s. Prediction') plt.show() # + [markdown] id="39U_XFX6KOoj" # # **Preprocess** # # We have three kinds of datasets: # * `train`: for training # * `dev`: for validation # * `test`: for testing (w/o target value) # + [markdown] id="TQ-MdwpLL7Dt" # ## **Dataset** # # The `COVID19Dataset` below does: # * read `.csv` files # * extract features # * split `covid.train.csv` into train/dev sets # * normalize features # # Finishing `TODO` below might make you pass medium baseline. # + id="0zlpIp9ANJRU" class COVID19Dataset(Dataset): ''' Dataset for loading and preprocessing the COVID19 dataset ''' def __init__(self, path, mode='train', target_only=False): self.mode = mode # Read data into numpy arrays with open(path, 'r') as fp: data = list(csv.reader(fp)) data = np.array(data[1:])[:, 1:].astype(float) if not target_only: feats = list(range(93)) else: # TODO: Using 40 states & 2 tested_positive features (indices = 57 & 75) feats = list(range(41)) + [41, 42, 43, 57, 58, 59, 60, 61, 75, 76, 77, 78, 79] pass if mode == 'test': # Testing data # data: 893 x 93 (40 states + day 1 (18) + day 2 (18) + day 3 (17)) data = data[:, feats] self.data = torch.FloatTensor(data) else: # Training data (train/dev sets) # data: 2700 x 94 (40 states + day 1 (18) + day 2 (18) + day 3 (18)) target = data[:, -1] data = data[:, feats] # Splitting training data into train & dev sets if mode == 'train': indices = [i for i in range(len(data)) if i % 10 != 0] elif mode == 'dev': indices = [i for i in range(len(data)) if i % 10 == 0] # Convert data into PyTorch tensors self.data = torch.FloatTensor(data[indices]) self.target = torch.FloatTensor(target[indices]) # Normalize features (you may remove this part to see what will happen) # self.data[:, 40:] = \ # (self.data[:, 40:] - self.data[:, 40:].mean(dim=0, keepdim=True)) \ #  / self.data[:, 40:].std(dim=0, keepdim=True) self.dim = self.data.shape[1] print('Finished reading the {} set of COVID19 Dataset ({} samples found, each dim = {})' .format(mode, len(self.data), self.dim)) def __getitem__(self, index): # Returns one sample at a time if self.mode in ['train', 'dev']: # For training return self.data[index], self.target[index] else: # For testing (no target) return self.data[index] def __len__(self): # Returns the size of the dataset return len(self.data) # + [markdown] id="AlhTlkE7MDo3" # ## **DataLoader** # # A `DataLoader` loads data from a given `Dataset` into batches. # # + id="hlhLk5t6MBX3" def prep_dataloader(path, mode, batch_size, n_jobs=0, target_only=False): ''' Generates a dataset, then is put into a dataloader. ''' dataset = COVID19Dataset(path, mode=mode, target_only=target_only) # Construct dataset dataloader = DataLoader( dataset, batch_size, shuffle=(mode == 'train'), drop_last=False, num_workers=n_jobs, pin_memory=True) # Construct dataloader return dataloader # + [markdown] id="SGuycwR0MeQB" # # **Deep Neural Network** # # `NeuralNet` is an `nn.Module` designed for regression. # The DNN consists of 2 fully-connected layers with ReLU activation. # This module also included a function `cal_loss` for calculating loss. # # + id="49-uXYovOAI0" class NeuralNet(nn.Module): ''' A simple fully-connected deep neural network ''' def __init__(self, input_dim): super(NeuralNet, self).__init__() # Define your neural network here # TODO: How to modify this model to achieve better performance? self.net = nn.Sequential( nn.Linear(input_dim, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 16), nn.ReLU(), nn.Linear(16, 8), nn.ReLU(), nn.Linear(8, 1) ) # Mean squared error loss self.criterion = nn.MSELoss(reduction='mean') def forward(self, x): ''' Given input of size (batch_size x input_dim), compute output of the network ''' return self.net(x).squeeze(1) def cal_loss(self, pred, target): ''' Calculate loss ''' # TODO: you may implement L2 regularization here return self.criterion(pred, target) # + [markdown] id="DvFWVjZ5Nvga" # # **Train/Dev/Test** # + [markdown] id="MAM8QecJOyqn" # ## **Training** # + id="lOqcmYzMO7jB" def train(tr_set, dv_set, model, config, device): ''' DNN training ''' n_epochs = config['n_epochs'] # Maximum number of epochs # Setup optimizer optimizer = getattr(torch.optim, config['optimizer'])( model.parameters(), **config['optim_hparas']) min_mse = 1000. loss_record = {'train': [], 'dev': []} # for recording training loss early_stop_cnt = 0 epoch = 0 while epoch < n_epochs: model.train() # set model to training mode for x, y in tr_set: # iterate through the dataloader optimizer.zero_grad() # set gradient to zero x, y = x.to(device), y.to(device) # move data to device (cpu/cuda) pred = model(x) # forward pass (compute output) mse_loss = model.cal_loss(pred, y) # compute loss mse_loss.backward() # compute gradient (backpropagation) optimizer.step() # update model with optimizer loss_record['train'].append(mse_loss.detach().cpu().item()) # After each epoch, test your model on the validation (development) set. dev_mse = dev(dv_set, model, device) if dev_mse < min_mse: # Save model if your model improved min_mse = dev_mse print('Saving model (epoch = {:4d}, loss = {:.4f})' .format(epoch + 1, min_mse)) torch.save(model.state_dict(), config['save_path']) # Save model to specified path early_stop_cnt = 0 else: early_stop_cnt += 1 epoch += 1 loss_record['dev'].append(dev_mse) if early_stop_cnt > config['early_stop']: # Stop training if your model stops improving for "config['early_stop']" epochs. break print('Finished training after {} epochs'.format(epoch)) return min_mse, loss_record # + [markdown] id="0hSd4Bn3O2PL" # ## **Validation** # + id="yrxrD3YsN3U2" def dev(dv_set, model, device): model.eval() # set model to evalutation mode total_loss = 0 for x, y in dv_set: # iterate through the dataloader x, y = x.to(device), y.to(device) # move data to device (cpu/cuda) with torch.no_grad(): # disable gradient calculation pred = model(x) # forward pass (compute output) mse_loss = model.cal_loss(pred, y) # compute loss total_loss += mse_loss.detach().cpu().item() * len(x) # accumulate loss total_loss = total_loss / len(dv_set.dataset) # compute averaged loss return total_loss # + [markdown] id="g0pdrhQAO41L" # ## **Testing** # + id="aSBMRFlYN5tB" def test(tt_set, model, device): model.eval() # set model to evalutation mode preds = [] for x in tt_set: # iterate through the dataloader x = x.to(device) # move data to device (cpu/cuda) with torch.no_grad(): # disable gradient calculation pred = model(x) # forward pass (compute output) preds.append(pred.detach().cpu()) # collect prediction preds = torch.cat(preds, dim=0).numpy() # concatenate all predictions and convert to a numpy array return preds # + [markdown] id="SvckkF5dvf0j" # # **Setup Hyper-parameters** # # `config` contains hyper-parameters for training and the path to save your model. # + id="NPXpdumwPjE7" device = get_device() # get the current available device ('cpu' or 'cuda') os.makedirs('models', exist_ok=True) # The trained model will be saved to ./models/ target_only = True # TODO: Using 40 states & 2 tested_positive features # TODO: How to tune these hyper-parameters to improve your model's performance? config = { 'n_epochs': 3000, # maximum number of epochs 'batch_size': 10, # mini-batch size for dataloader 'optimizer': 'Adam', # optimization algorithm (optimizer in torch.optim) 'optim_hparas': { # hyper-parameters for the optimizer (depends on which optimizer you are using) 'lr': 0.0005, # learning rate of Adam 'weight_decay': 0.001 # weight_decay for Adam (L2 penalty) }, 'early_stop': 200, # early stopping epochs (the number epochs since your model's last improvement) 'save_path': 'models/model.pth' # your model will be saved here } # + [markdown] id="6j1eOV3TOH-j" # # **Load data and model** # + colab={"base_uri": "https://localhost:8080/"} id="eNrYBMmePLKm" outputId="03a22335-1796-4675-e4eb-12603af53110" tr_set = prep_dataloader(tr_path, 'train', config['batch_size'], target_only=target_only) dv_set = prep_dataloader(tr_path, 'dev', config['batch_size'], target_only=target_only) tt_set = prep_dataloader(tt_path, 'test', config['batch_size'], target_only=target_only) # + id="FHylSirLP9oh" model = NeuralNet(tr_set.dataset.dim).to(device) # Construct model and move to device # + [markdown] id="sX2B_zgSOPTJ" # # **Start Training!** # + id="GrEbUxazQAAZ" colab={"base_uri": "https://localhost:8080/"} outputId="40c0e632-0969-4fbb-eb81-896872c6eb40" model_loss, model_loss_record = train(tr_set, dv_set, model, config, device) print(model_loss) # + id="hsNO9nnXQBvP" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="81f745cf-bfcb-4a8e-8eff-50ca69087e94" plot_learning_curve(model_loss_record, title='deep model') # + id="3iZTVn5WQFpX" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="d76e66f4-665f-4a33-bbaf-2f3ba91410ef" del model model = NeuralNet(tr_set.dataset.dim).to(device) ckpt = torch.load(config['save_path'], map_location='cpu') # Load your best model model.load_state_dict(ckpt) plot_pred(dv_set, model, device) # Show prediction on the validation set # + [markdown] id="aQikz3IPiyPf" # # **Testing** # The predictions of your model on testing set will be stored at `pred.csv`. # + id="O8cTuQjQQOon" colab={"base_uri": "https://localhost:8080/"} outputId="7be4780e-8476-4cce-f177-5beb105266f0" def save_pred(preds, file): ''' Save predictions to specified file ''' print('Saving results to {}'.format(file)) with open(file, 'w') as fp: writer = csv.writer(fp) writer.writerow(['id', 'tested_positive']) for i, p in enumerate(preds): writer.writerow([i, p]) preds = test(tt_set, model, device) # predict COVID-19 cases with your model save_pred(preds, 'pred.csv') # save prediction file to pred.csv # + [markdown] id="nfrVxqJanGpE" # # **Hints** # # ## **Simple Baseline** # * Run sample code # # ## **Medium Baseline** # * Feature selection: 40 states + 2 `tested_positive` (`TODO` in dataset) # # ## **Strong Baseline** # * Feature selection (what other features are useful?) # * DNN architecture (layers? dimension? activation function?) # * Training (mini-batch? optimizer? learning rate?) # * L2 regularization # * There are some mistakes in the sample code, can you find them? # + [markdown] id="9tmCwXgpot3t" # # **Reference** # This code is completely written by <NAME> @ NTUEE. # Copying or reusing this code is required to specify the original author. # # E.g. # Source: Heng-Jui Chang @ NTUEE (https://github.com/ga642381/ML2021-Spring/blob/main/HW01/HW01.ipynb) #
HW1/0_87857.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={} tags=[] # <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/> # + [markdown] papermill={} tags=[] # # YahooFinance - Get USDEUR data and chart # <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/YahooFinance/YahooFinance_Get_USDEUR_data_and_chart.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a> # + [markdown] papermill={} tags=[] # **Tags:** #yahoofinance #trading #plotly #naas_drivers # + [markdown] papermill={} tags=[] # **Author:** [<NAME>](https://www.linkedin.com/in/carloocchiena/) # + [markdown] papermill={} tags=[] # With this template, you can get data from USD EUR ticker available in [Yahoo finance](https://finance.yahoo.com/quote/USDEUR=x/).<br> # + [markdown] papermill={} tags=[] # ## Input # + [markdown] papermill={} tags=[] # ### Import libraries # + papermill={} tags=[] from naas_drivers import yahoofinance, plotly # + [markdown] papermill={} tags=[] # ### Input parameters # 👉 Here you can change the ticker, timeframe and add moving averages analysiss # + papermill={} tags=[] ticker = "EURUSD=X" date_from = -365 date_to = "today" interval = '1d' moving_averages = [20, 50] # + [markdown] papermill={} tags=[] # ## Model # + [markdown] papermill={} tags=[] # ### Get dataset from Yahoo Finance # + papermill={} tags=[] df_yahoo = yahoofinance.get(ticker, date_from=date_from, date_to=date_to, interval=interval, moving_averages=moving_averages) df_yahoo # + [markdown] papermill={} tags=[] # ## Output # + [markdown] papermill={} tags=[] # ### Display chart # + papermill={} tags=[] # Get last value last_date = df_yahoo.loc[df_yahoo.index[-1], "Date"].strftime("%Y-%m-%d") last_value = df_yahoo.loc[df_yahoo.index[-1], "Close"] # Create chart chart = plotly.linechart(df_yahoo, x="Date", y=["Close", "MA20", "MA50"], showlegend=True, title=f"<b>{ticker} rate as of {last_date}</b><br><span style='font-size: 13px;'>Last value: {last_value}</span>") chart.update_layout( title_font=dict(family="Arial", size=18, color="black"), legend_font=dict(family="Arial", size=11, color="black"), margin_pad=10, )
YahooFinance/YahooFinance_Get_USDEUR_data_and_chart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp text.utils # default_cls_lvl 3 # - #hide # %reload_ext autoreload # %autoreload 2 # %matplotlib inline # # text.utils # # > Various text specific utility classes/functions # + # export import os, importlib, inspect, random, sys, torch from typing import List, Optional, Union, Tuple, Type import numpy as np import pandas as pd from enum import Enum from fastcore.foundation import L from transformers import AutoConfig, AutoTokenizer, PretrainedConfig, PreTrainedTokenizerBase, PreTrainedModel, logging from blurr.utils import Singleton logging.set_verbosity_error() # + # hide_input import pdb from IPython.display import display from fastcore.test import * from nbdev.showdoc import show_doc from blurr.utils import print_versions print("What we're running with at the time this documentation was generated:") print_versions("torch fastai transformers") # - # hide # cuda torch.cuda.set_device(1) print(f"Using GPU #{torch.cuda.current_device()}: {torch.cuda.get_device_name()}") #export def get_hf_objects( pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], model_cls: PreTrainedModel, config: Union[PretrainedConfig, str, os.PathLike] = None, tokenizer_cls: PreTrainedTokenizerBase = None, config_kwargs: dict = {}, tokenizer_kwargs: dict = {}, model_kwargs: dict = {}, cache_dir: Union[str, os.PathLike] = None ) -> Tuple[str, PretrainedConfig, PreTrainedTokenizerBase, PreTrainedModel]: """ Given at minimum a `pretrained_model_name_or_path` and `model_cls (such as `AutoModelForSequenceClassification"), this method returns all the Hugging Face objects you need to train a model using Blurr """ # config if config is None: config = AutoConfig.from_pretrained(pretrained_model_name_or_path, cache_dir=cache_dir, **config_kwargs) # tokenizer (gpt2, roberta, bart (and maybe others) tokenizers require a prefix space) if any(s in pretrained_model_name_or_path for s in ["gpt2", "roberta", "bart", "longformer"]): tokenizer_kwargs = {**{"add_prefix_space": True}, **tokenizer_kwargs} if tokenizer_cls is None: tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, cache_dir=cache_dir, **tokenizer_kwargs) else: tokenizer = tokenizer_cls.from_pretrained(pretrained_model_name_or_path, cache_dir=cache_dir, **tokenizer_kwargs) # model model = model_cls.from_pretrained(pretrained_model_name_or_path, config=config, cache_dir=cache_dir, **model_kwargs) # arch try: arch = model.__module__.split(".")[2] except: arch = "unknown" return (arch, config, tokenizer, model) show_doc(get_hf_objects, title_level=2) # ## BlurrText - # export @Singleton class BlurrText: """A general utility class for getting your Hugging Face objects""" def __init__(self): # get hf classes (tokenizers, configs, models, etc...) transformer_classes = inspect.getmembers(importlib.import_module("transformers")) # build a df that we can query against to get various transformers objects/info self._df = pd.DataFrame(transformer_classes, columns=["class_name", "class_location"]) self._df = self._df[self._df.class_location.apply(lambda v: isinstance(v, type))] # add the module each class is included in self._df["module"] = self._df.class_location.apply(lambda v: v.__module__) # remove class_location (don't need it anymore) self._df.drop(labels=["class_location"], axis=1, inplace=True) # break up the module into separate cols module_parts_df = self._df.module.str.split(".", n=-1, expand=True) for i in range(len(module_parts_df.columns)): self._df[f"module_part_{i}"] = module_parts_df[i] # using module part 1, break up the functional area and arch into separate cols module_part_3_df = self._df.module_part_3.str.split("_", n=1, expand=True) self._df[["functional_area", "arch"]] = module_part_3_df self._df["arch"] = self._df["arch"].str.replace("_fast", "") # transformers >=4.5.x does "auto" differently; so remove it and "utils" from "arch" column self._df = self._df[~self._df["arch"].isin(["auto", "utils"])] # if functional area = modeling, pull out the task it is built for model_type_df = self._df[(self._df.functional_area == "modeling")].class_name.str.rsplit("For", n=1, expand=True) model_type_df[1] = np.where(model_type_df[1].notnull(), "For" + model_type_df[1].astype(str), model_type_df[1]) self._df["model_task"] = model_type_df[1] self._df["model_task"] = self._df["model_task"].str.replace("For", "", n=1, case=True, regex=False) model_type_df = self._df[(self._df.functional_area == "modeling")].class_name.str.rsplit("With", n=1, expand=True) model_type_df[1] = np.where( model_type_df[1].notnull(), "With" + model_type_df[1].astype(str), self._df[(self._df.functional_area == "modeling")].model_task ) self._df["model_task"] = model_type_df[1] self._df["model_task"] = self._df["model_task"].str.replace("With", "", n=1, case=True, regex=False) # look at what we're going to remove (use to verify we're just getting rid of stuff we want too) # df[~df['hf_class_type'].isin(['modeling', 'configuration', 'tokenization'])] # only need these 3 functional areas for our querying purposes self._df = self._df[self._df["functional_area"].isin(["modeling", "configuration", "tokenization"])] def get_tasks(self, arch: str = None): """This method can be used to get a list of all tasks supported by your transformers install, or just those available to a specific architecture """ query = ["model_task.notna()"] if arch: query.append(f'arch == "{arch}"') return sorted(self._df.query(" & ".join(query), engine="python").model_task.unique().tolist()) def get_architectures(self): return sorted(self._df[(self._df.arch.notna()) & (self._df.arch != None)].arch.unique().tolist()) def get_models( self, arch: str = None, task: str = None ): """The transformer models available for use (optional: by architecture | task)""" query = ['functional_area == "modeling"'] if arch: query.append(f'arch == "{arch}"') if task: query.append(f'model_task == "{task}"') models = sorted(self._df.query(" & ".join(query)).class_name.tolist()) return models def get_model_architecture(self, model_name_or_enum): """Get the architecture for a given model name / enum""" model_name = model_name_or_enum if isinstance(model_name_or_enum, str) else model_name_or_enum.name return self._df[self._df.class_name == model_name].arch.values[0] def get_hf_objects( self, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], model_cls: PreTrainedModel, config: Union[PretrainedConfig, str, os.PathLike] = None, tokenizer_cls: PreTrainedTokenizerBase = None, config_kwargs: dict = {}, tokenizer_kwargs: dict = {}, model_kwargs: dict = {}, cache_dir: Union[str, os.PathLike] = None ) -> Tuple[str, PretrainedConfig, PreTrainedTokenizerBase, PreTrainedModel]: arch, config, tokenizer, model = get_hf_objects( pretrained_model_name_or_path, model_cls, config, tokenizer_cls, config_kwargs, tokenizer_kwargs, model_kwargs, cache_dir ) if arch == "unknown": arch = self.get_model_architecture(type(model).__name__) return (arch, config, tokenizer, model) show_doc(BlurrText, title_level=2) # `BlurrText` is a `Singleton` (there exists only one instance, and the same instance is returned upon subsequent instantiation requests). You can get at via the `NLP` constant below. NLP = BlurrText() NLP2 = BlurrText() test_eq(NLP, NLP2) # + # hide display(NLP._df.head()) print(list(NLP._df.model_task.unique())) print("") print(list(NLP._df.functional_area.unique())) print("") print(list(NLP._df.arch.unique())) print("") print(list(NLP._df.module_part_3.unique())) # - # ... the ***task*** # show_doc(BlurrText(BlurrText).get_tasks) print(NLP.get_tasks()) print("") print(NLP.get_tasks("bart")) # ... the ***architecture*** # show_doc(BlurrText(BlurrText).get_architectures) print(NLP.get_architectures()) # show_doc(BlurrText(BlurrText).get_model_architecture) print(NLP.get_model_architecture("RobertaForSequenceClassification")) # ... and lastly the ***models*** (optionally for a given task and/or architecture) # show_doc(BlurrText(BlurrText).get_models) print(L(NLP.get_models())[:5]) print(NLP.get_models(arch="bert")[:5]) print(NLP.get_models(task="TokenClassification")[:5]) print(NLP.get_models(arch="bert", task="TokenClassification")) # ## To get all your Hugging Face objects (arch, config, tokenizer, and model) # How to use: logging.set_verbosity_error() # + from transformers import AutoModelForMaskedLM arch, config, tokenizer, model = get_hf_objects("bert-base-cased-finetuned-mrpc", model_cls=AutoModelForMaskedLM) print(arch) print(type(config)) print(type(tokenizer)) print(type(model)) # + from transformers import AutoModelForQuestionAnswering arch, config, tokenizer, model = get_hf_objects("fmikaelian/flaubert-base-uncased-squad", model_cls=AutoModelForQuestionAnswering) print(arch) print(type(config)) print(type(tokenizer)) print(type(model)) # + from transformers import BertTokenizer, BertForNextSentencePrediction arch, config, tokenizer, model = get_hf_objects( "bert-base-cased-finetuned-mrpc", config=None, tokenizer_cls=BertTokenizer, model_cls=BertForNextSentencePrediction ) print(arch) print(type(config)) print(type(tokenizer)) print(type(model)) # - # ## Export - # + # hide from nbdev.export import notebook2script notebook2script() # -
nbs/01_text-utils.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Copyright (c) 2017-2019 [Serpent-Tools developer team](https://github.com/CORE-GATECH-GROUP/serpent-tools/graphs/contributors), GTRC # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Data files are not included with the python package, but can be downloaded from the [GitHub repository](https://github.com/CORE-GATECH-GROUP/serpent-tools). For this tutorial, the files are placed in the directory identified with the ``SERPENT_TOOLS_DATA`` environment variable. import os pinFile = os.path.join( os.environ["SERPENT_TOOLS_DATA"], "fuelPin_det0.m", ) bwrFile = os.path.join( os.environ["SERPENT_TOOLS_DATA"], "bwr_det0.m", ) # # DetectorReader # ## Basic Operation # This notebook details how to utilize the [`serpentTools`](https://github.com/CORE-GATECH-GROUP/serpent-tools) package for reading detector files, `[input]_det[N].m` produced by SERPENT [1]. # Detectors can be defined with many binning parameters, listed [on the SERPENT Wiki](http://serpent.vtt.fi/mediawiki/index.php/Input_syntax_manual#det_.28detector_definition.29). # One could define a detector that has a spatial mesh, `dx/dy/dz/`, but also includes reaction and material bins, `dr, dm`. # Detectors are stored on the reader object in the ``detectors`` dictionary as custom ``Detector`` objects. Here, all energy and spatial grid data are stored, including other binning information such as reaction, universe, and lattice bins. # %matplotlib inline from matplotlib import pyplot import serpentTools pin = serpentTools.read(pinFile) bwr = serpentTools.read(bwrFile) print(pin.detectors) print(bwr.detectors) # These detectors were defined for a single fuel pin with 16 axial layers and a BWR assembly, with a description of the detectors provided in the output: # # |Name| Description| # |----|------------| # |`nodeFlx`| One-group flux tallied in each axial layer | # |`spectrum`|CSEWG 239 group stucture for flux and U-235 fission cross section| # |`xymesh`|Two-group flux for a 20x20 xy grid| # For each `Detector` object, the full tally matrix from the file is stored in the `bins` array. nodeFlx = pin.detectors['nodeFlx'] print(nodeFlx.bins.shape) nodeFlx.bins[:3,:].T # Here, only three columns, shown as rows for readability, are changing: # # * column 0: universe column # * column 10: tally column # * column 11: errors # Detectors can also be obtained by indexing into the reader object, as nf = pin['nodeFlx'] assert nf is nodeFlx # Tally data is reshaped corresponding to the bin information provided by Serpent. The tally and error columns are recast into multi-dimensional arrays where each dimension is some unique bin type like energy or spatial bin index. For this case, since the only variable bin quantity is that of the changing universe, the ``tallies`` and ``errors`` attributes will be 1D arrays. assert nodeFlx.tallies.shape == (16, ) assert nodeFlx.errors.shape == (16, ) nodeFlx.tallies nodeFlx.errors # Note: Python and numpy arrays are zero-indexed, meaning the first item is accessed with `array[0]`, rather than `array[1]`. # # Bin information is retained through the ``indexes`` attribute. Each entry indicates what bin type is changing along that dimension of ``tallies`` and ``errors``. Here, ``universe`` is the first item and indicates that the first dimension of ``tallies`` corresponds to a changing universe bin. nodeFlx.indexes # For detectors that include some grid matrices, such as spatial or energy meshes `DET<name>E`, these arrays are stored in the `grids` dictionary spectrum = bwr.detectors['spectrum'] print(spectrum.grids['E'][:5, :]) # ## Multi-dimensional Detectors # The `Detector` objects are capable of reshaping the detector data into an array where each axis corresponds to a varying bin. In the above examples, the reshaped data was one-dimensional, because the detectors only tallied data against one bin, universe and energy. In the following example, the detector has been configured to tally the fission and capture rates (two `dr` arguments) in an XY mesh. xy = bwr.detectors['xymesh'] xy.indexes # Traversing the first axis in the `tallies` array corresponds to changing the value of the `energy`. The second axis corresponds to changing `ymesh` values, and the final axis reflects changes in `xmesh`. print(xy.bins.shape) print(xy.tallies.shape) print(xy.bins[:5, 10]) print(xy.tallies[0, 0, :5]) # ### Slicing # As the detectors produced by SERPENT can contain multiple bin types, obtaining data from the tally data can become complicated. This retrieval can be simplified using the `slice` method. This method takes an argument indicating what bins (keys in `indexes`) to fix at what position. # # If we want to retrive the tally data for the capture reaction in the `spectrum` detector, you would instruct the `slice` method to use column 1 along the axis that corresponds to the reaction bin, as the fission reaction corresponded to reaction tally 2 in the original matrix. Since python and numpy arrays are zero indexed, the second reaction tally is stored in column 1. spectrum.slice({'reaction': 1})[:20] # This method also works for slicing the error, or score, matrix spectrum.slice({'reaction': 1}, 'errors')[:20] # ## Plotting Routines # Each `Detector` object is capable of simple 1D and 2D plotting routines. # The simplest 1D plot method is simply `plot`, however a wide range of plot options are present. # # |Option|Description| # |-|-| # |`what`|What data to plot| # |`ax`|Preprepared figure on which to add this plot| # |`xdim`|Quantity from `indexes` to use as x-axis| # |`sigma`|Confidence interval to place on errors - 1D| # |`steps`|Draw tally values as constant inside bin - 1D| # |`xlabel`|Label to apply to x-axis| # |`ylabel`|Label to apply to y-axis| # |`loglog`|Use a log scalling on both of the axes| # |`logx`|Use a log scaling on the x-axis| # |`logy`|Use a log scaling on the y-axis| # |`legend`|Place a legend on the figure| # |`ncol`|Number of columns to apply to the legend| # # The plot routine also accepts various options, which can be found in the [matplotlib.pyplot.plot documentation](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html) nodeFlx.plot(); ax = nodeFlx.plot(steps=True, label='steps') ax = nodeFlx.plot(sigma=100, ax=ax, c='k', alpha=0.6, marker='x', label='sigma') # Passing `what='errors'` to the plot method plots the associated relative errors, rather than the tally data on the y-axis. # Similarly, passing a key from `indexes` sets the x-axis to be that specific index. nodeFlx.plot(xdim='universe', what='errors', ylabel='Relative tally error [%]'); # ### Mesh Plots # For data with dimensionality greater than one, the `meshPlot` method can be used to plot some 2D slice of the data on a Cartesian grid. Passing a dictionary as the `fixed` argument restricts the tally data down to two dimensions. # The X and Y axis can be quantities from `grids` or `indexes`. If the quantity to be used for an axis is in the `grids` dictionary, then the appropriate spatial or energetic grid from the detector file will be used. Otherwise, the axis will reflect changes in a specific bin type. The following keyword arguments can be used in conjunction with the above options to format the mesh plots. # # |Option|Action| # |------|------| # |`cmap`|Colormap to apply to the figure| # |`cbarLabel`|Label to apply to the colorbar| # |`logScale`|If true, use a logarithmic scale for the colormap| # |`normalizer`|Apply a custom non-linear normalizer to the colormap| # # The `cmap` argument must be something that `matplotlib` can understand as a valid colormap [2]. This can be a string of any of the colormaps supported by matplotlib. # # Since the `xymesh` detector is three dimensions, (energy, x, and y), we must pick an energy group to plot. xy.meshPlot('x', 'y', fixed={'energy': 0}, cbarLabel='Mesh-integrated flux $[n/cm^2/s]$', title="Fast spectrum flux $[>0.625 eV]$"); # The `meshPlot` also supports a range of labeling and plot options. # Here, we attempt to plot the flux and U-235 fission reaction rate errors as a function of energy, with # the two reaction rates separated on the y-axis. Passing `logColor=True` applies a logarithmic color scale to all the positive data. Data that is zero is not shown, and errors will be raised if the data contain negative quantities. # # Here we also apply custom y-tick labels to reflect the reaction that is being plotted. ax = spectrum.meshPlot('e', 'reaction', what='errors', ylabel='Reaction type', cmap='PuBu_r', cbarLabel="Relative error $[\%]$", xlabel='Energy [MeV]', logColor=True, logx=True); ax.set_yticks([0.5, 1.5]); ax.set_yticklabels([r'$\psi$', r'$U-235 \sigma_f$'], rotation=90, verticalalignment='center'); # Using the `slicing` arguments allows access to the 1D plot methods from before xy.plot(fixed={'energy': 1, 'xmesh': 1}, xlabel='Y position', ylabel='Thermal flux along x={}' .format(xy.grids['X'][1, 0])); # ### Spectrum Plots # The `Detector` objects are also capable of energy spectrum plots, if an associated energy grid is given. The `normalize` option will normalize the data per unit lethargy. This plot takes some additional assumptions with the scaling and labeling, but all the same controls as the above line plots. # The `spectrum` plot method is designed to prepare plots of energy spectra. Supported arguments for the `spectrumPlot` method include # # |Option|Default|Description| # |-|-|-| # |`normalize`|`True`|Normalize tallies per unit lethargy| # |`fixed`| `None`|Dictionary that controls matrix reduction| # |`sigma`|3|Level of confidence for statistical errors| # |`xscale`|`'log'`|Set the x scale to be log or linear| # |`yscale`|`'linear'`|Set the y scale to be log or linear| # The figure below demonstrates the default options and control in this `spectrumPlot` routine by # # 1. Using the less than helpful plot routine with no formatting # 2. Using `spectrumPlot` without normalization to show default labels and scaling # 3. Using `spectrumPlot` with normalization # # Since our detector has energy bins and reaction bins, we need to reduce down to one-dimension with the `fixed` command. fig, axes = pyplot.subplots(1, 3, figsize=(16, 4)) fix = {'reaction': 0} spectrum.plot(fixed=fix, ax=axes[0]); spectrum.spectrumPlot(fixed=fix, ax=axes[1], normalize=False); spectrum.spectrumPlot(fixed=fix, ax=axes[2]); # ### Multiple line plots # Plots can be made against multiple bins, such as spectrum in different materials or reactions, with the ``plot`` and ``spectrumPlot`` # methods. Below is the flux spectrum and spectrum of the U-235 fission reaction rate from the same detector. The ``labels`` argument is what is used to label each individual plot in the order of the bin index. labels = ( 'flux', r'$\sigma_f^{U-235}\psi$') # render as mathtype spectrum.plot(labels=labels, loglog=True); spectrum.spectrumPlot(labels=labels, legend='above', ncol=2); # ## Hexagonal Detectors # SERPENT allows the creation of hexagonal detectors with the `dh` card, like # ``` # det hex2 2 0.0 0.0 1 5 5 0.0 0.0 1 # det hex3 3 0.0 0.0 1 5 5 0.0 0.0 1 # ``` # which would create two hexagonal detectors with different orientations. Type 2 detectors have two faces perpendicular to the x-axis, while type 3 detectors have faces perpendicular to the y-axis. # For more information, see the [dh card from SERPENT wiki](http://serpent.vtt.fi/mediawiki/index.php/Input_syntax_manual#det_dh). # # `serpentTools` is capable of storing data tallies and grid structures from hexagonal detectors in [`HexagonalDetector`](http://serpent-tools.readthedocs.io/en/latest/api/detectors.html#serpentTools.objects.detectors.HexagonalDetector) objects. hexFile = os.path.join( os.environ["SERPENT_TOOLS_DATA"], 'hexplot_det0.m', ) hexR = serpentTools.read(hexFile) hexR.detectors # Here, two `HexagonalDetector` objects are produced, with similar `tallies` and slicing methods as demonstrated above. hex2 = hexR.detectors['hex2'] hex2.tallies hex2.indexes # Creating hexagonal mesh plots with these objects requires setting the `pitch` and `hexType` attributes. hex2.pitch = 1 hex2.hexType = 2 hex2.hexPlot(); hex3 = hexR.detectors['hex3'] hex3.pitch = 1 hex3.hexType = 3 hex3.hexPlot(); # ## Limitations # # `serpentTools` does support reading detector files with hexagonal, cylindrical, and spherical mesh structures. However, creating 2D mesh plots with these detectors, and utilizing their mesh structure, is not fully supported. # [Issue #169](https://github.com/CORE-GATECH-GROUP/serpent-tools/issues/169) is currently tracking progress for cylindrical plotting. # ## Conclusion # The `DetectorReader` is capable of reading and storing detector data from SERPENT detector files. The data is stored on custom `Detector` objects, capable of reshaping tally and error matrices into arrays with dimensionality reflecting the detector binning. These `Detector` objects have simple methods for retrieving and plotting detector data. # ## References # 1. <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "The Serpent Monte Carlo code: Status, development and applications in 2013." Ann. Nucl. Energy, [82 (2015) 142-150](https://www.sciencedirect.com/science/article/pii/S0306454914004095) # 2. [Matplotlib 2.2.0 Colormaps](https://matplotlib.org/examples/color/colormaps_reference.html)
examples/Detector.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _uuid="748e245881e9a26d54da4c9431786b3f874734de" # # Introduction: Feature Selection # # In this notebook we will apply feature engineering to the manual engineered features built in two previous kernels. We will reduce the number of features using several methods and then we will test the performance of the features using a fairly basic gradient boosting machine model. # # The main takeaways from this notebook are: # # * Going from 1465 total features to 536 and an AUC ROC of 0.783 on the public leaderboard # * A further optional step to go to 342 features and an AUC ROC of 0.782 # # The full set of features was built in [Part One](https://www.kaggle.com/willkoehrsen/introduction-to-manual-feature-engineering) and [Part Two](https://www.kaggle.com/willkoehrsen/introduction-to-manual-feature-engineering-p2) of Manual Feature Engineering # # We will use three methods for feature selection: # # 1. Remove collinear features # 2. Remove features with greater than a threshold percentage of missing values # 3. Keep only the most relevant features using feature importances from a model # # We will also take a look at an example of applying PCA although we will not use this method for feature reduction. # + [markdown] _uuid="97b0d94920dc9e25a5f1dbfd56702124ebc07769" # Standard imports for data science work. The LightGBM library is used for the gradient boosting machine. # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # pandas and numpy for data manipulation import pandas as pd import numpy as np # featuretools for automated feature engineering import featuretools as ft # matplotlit and seaborn for visualizations import matplotlib.pyplot as plt plt.rcParams['font.size'] = 22 import seaborn as sns # Suppress warnings from pandas import warnings warnings.filterwarnings('ignore') # modeling import lightgbm as lgb # utilities from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.metrics import roc_auc_score from sklearn.preprocessing import LabelEncoder # memory management import gc # + [markdown] _uuid="06695931219858e57330b8574af5d9028c9e50c2" # * `train_bureau` is the training features built manually using the `bureau` and `bureau_balance` data # * `train_previous` is the training features built manually using the `previous`, `cash`, `credit`, and `installments` data # # We first will see how many features we built over the manual engineering process. Here we use a couple of set operations to find the columns that are only in the `bureau`, only in the `previous`, and in both dataframes, indicating that there are `original` features from the `application` dataframe. Here we are working with a small subset of the data in order to not overwhelm the kernel. This code has also been run on the full dataset (we will take a look at some of the results). # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" # Read in data train_bureau = pd.read_csv('../input/home-credit-manual-engineered-features/train_bureau_raw.csv', nrows = 1000) test_bureau = pd.read_csv('../input/home-credit-manual-engineered-features/test_bureau_raw.csv', nrows = 1000) train_previous = pd.read_csv('../input/home-credit-manual-engineered-features/train_previous_raw.csv', nrows = 1000) test_previous = pd.read_csv('../input/home-credit-manual-engineered-features/test_previous_raw.csv', nrows = 1000) # All columns in dataframes bureau_columns = list(train_bureau.columns) previous_columns = list(train_previous.columns) # + _uuid="af78939941b510855d5a972ca24e33037c003977" # Bureau only features bureau_features = list(set(bureau_columns) - set(previous_columns)) # Previous only features previous_features = list(set(previous_columns) - set(bureau_columns)) # Original features will be in both datasets original_features = list(set(previous_columns) & set(bureau_columns)) print('There are %d original features.' % len(original_features)) print('There are %d bureau and bureau balance features.' % len(bureau_features)) print('There are %d previous Home Credit loan features.' % len(previous_features)) # + [markdown] _uuid="951caa5d7f4a0033ff4148039d73ba153034cb61" # That gives us the number of features in each dataframe. Now we want to combine the data without creating any duplicate rows. # + _uuid="b0da702050879ddfb8aecc4c23186e7ee97923ba" train_labels = train_bureau['TARGET'] previous_features.append('SK_ID_CURR') train_ids = train_bureau['SK_ID_CURR'] test_ids = test_bureau['SK_ID_CURR'] # Merge the dataframes avoiding duplicating columns by subsetting train_previous train = train_bureau.merge(train_previous[previous_features], on = 'SK_ID_CURR') test = test_bureau.merge(test_previous[previous_features], on = 'SK_ID_CURR') print('Training shape: ', train.shape) print('Testing shape: ', test.shape) # + [markdown] _uuid="f20574a7e1edf8fc4398c79fcec85a261edc1c44" # Next we want to one-hot encode the dataframes. This doesn't give the full features since we are only working with a sample of the data and this will not create as many columns as one-hot encoding the entire dataset would. Doing this to the full dataset results in 1465 features. # # An important note in the code cell is where we __align the dataframes by the columns.__ This ensures we have the same columns in the training and testing datasets. # + _uuid="356f67ea6d19d408bc19e1a58ee5f23693a3944a" # One hot encoding train = pd.get_dummies(train) test = pd.get_dummies(test) # Match the columns in the dataframes train, test = train.align(test, join = 'inner', axis = 1) print('Training shape: ', train.shape) print('Testing shape: ', test.shape) # + [markdown] _uuid="c2031dd11cb58242acb31db9b3557603e1de5dce" # When we do this to the full dataset, we get __1465__ features. # + [markdown] _uuid="ecd8a30162d43fe05e14ef93cb88de29f933f8e9" # ### Admit and Correct Mistakes! # # When doing manual feature engineering, I accidentally created some columns derived from the client id, `SK_ID_CURR`. As this is a unique identifier for each client, it should not have any predictive power, and we would not want to build a model trained on this "feature". Let's remove any columns built on the `SK_ID_CURR`. # + _uuid="330d43c9cf3e7236fbd4a6852a10efd014b433ce" cols_with_id = [x for x in train.columns if 'SK_ID_CURR' in x] cols_with_bureau_id = [x for x in train.columns if 'SK_ID_BUREAU' in x] cols_with_previous_id = [x for x in train.columns if 'SK_ID_PREV' in x] print('There are %d columns that contain SK_ID_CURR' % len(cols_with_id)) print('There are %d columns that contain SK_ID_BUREAU' % len(cols_with_bureau_id)) print('There are %d columns that contain SK_ID_PREV' % len(cols_with_previous_id)) train = train.drop(columns = cols_with_id) test = test.drop(columns = cols_with_id) print('Training shape: ', train.shape) print('Testing shape: ', test.shape) # + [markdown] _uuid="d6f0e15b600f90d4062faee276640c371cb655eb" # After applying this to the full dataset, we end up with __1416 __ features. More features might seem like a good thing, and they can be if they help our model learn. However, irrelevant features, highly correlated features, and missing values can prevent the model from learning and decrease generalization performance on the testing data. Therefore, we perform feature selection to keep only the most useful variables. # # We will start feature selection by focusing on collinear variables. # + [markdown] _uuid="c402926c8d265ad46e8f9bc5b1683563352cb496" # # Remove Collinear Variables # # Collinear variables are those which are highly correlated with one another. These can decrease the model's availablility to learn, decrease model interpretability, and decrease generalization performance on the test set. Clearly, these are three things we want to increase, so removing collinear variables is a useful step. We will establish an admittedly arbitrary threshold for removing collinear variables, and then remove one out of any pair of variables that is above that threshold. # # The code below identifies the highly correlated variables based on the absolute magnitude of the Pearson correlation coefficient being greater than 0.9. Again, this is not entirely accurate since we are dealing with such a limited section of the data. This code is for illustration purposes, but if we read in the entire dataset, it would work (if the kernels allowed it)! # # This code is adapted from [work by <NAME>](https://chrisalbon.com/machine_learning/feature_selection/drop_highly_correlated_features/). # + [markdown] _uuid="5100a4ad6c6586b2637139db8ef4e67df5ad49a8" # ### Identify Correlated Variables # + _uuid="59259cf2be69f0c5e699109f9177287599836354" # Threshold for removing correlated variables threshold = 0.9 # Absolute value correlation matrix corr_matrix = train.corr().abs() corr_matrix.head() # + _uuid="7c33392948540b626cf213fa6dea316766627eb9" # Upper triangle of correlations upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)) upper.head() # + _uuid="1517d554f80a8eed2c0e03303fe7d1fb4a469131" # Select columns with correlations above threshold to_drop = [column for column in upper.columns if any(upper[column] > threshold)] print('There are %d columns to remove.' % (len(to_drop))) # + [markdown] _uuid="fc6bf1f1a4c98749727522bb5a3ae89adf145f8d" # #### Drop Correlated Variables # + _uuid="2c18834968dac305317a5a881308a0c9a3cce353" train = train.drop(columns = to_drop) test = test.drop(columns = to_drop) print('Training shape: ', train.shape) print('Testing shape: ', test.shape) # + [markdown] _uuid="52eada9c579b09cc97c338ea372554aa8d52f9df" # Applying this on the entire dataset __results in 538 collinear features__ removed. # # This has reduced the number of features singificantly, but it is likely still too many. At this point, we'll read in the full dataset after removing correlated variables for further feature selection. # # The full datasets (after removing correlated variables) are available in `m_train_combined.csv` and `m_test_combined.csv`. # + [markdown] _uuid="15973fdfb3169e1d816136c68764c80d1368000e" # ### Read in Full Dataset # # Now we are ready to move on to the full set of features. These were built by applying the above steps to the entire `train_bureau` and `train_previous` files (you can do the same if you want and have the computational resources)! # + _uuid="e8384e3033ee466ab3c426d8330822e974ad3c1b" train = pd.read_csv('../input/home-credit-manual-engineered-features/m_train_combined.csv') test = pd.read_csv('../input/home-credit-manual-engineered-features/m_test_combined.csv') # + _uuid="18d3be4bd305a82d6bba1c2a6b7d2876abb098d9" print('Training set full shape: ', train.shape) print('Testing set full shape: ' , test.shape) # + [markdown] _uuid="e9c652ca3eb3a8770195469abf77ad71d15b3fe0" # # Remove Missing Values # # A relatively simple choice of feature selection is removing missing values. Well, it seems simple, at least until we have to decide what percentage of missing values is the minimum threshold for removing a column. Like many choices in machine learning, there is no right answer, and not even a general rule of thumb for making this choice. In this implementation, if any columns have greater than 75% missing values, they will be removed. # # Most models (including those in Sk-Learn) cannot handle missing values, so we will have to fill these in before machine learning. The Gradient Boosting Machine ([at least in LightGBM](https://github.com/Microsoft/LightGBM/blob/master/docs/Advanced-Topics.rst)) can handle missing values. Imputing missing values always makes me a little uncomfortable because we are adding information that actually isn't in the dataset. Since we are going to be evaluating several models (in a later notebook), we will have to use some form of imputation. For now, we will focus on removing columns above the threshold. # + _uuid="7849b6e3f53cd08850e9e5797e7256b08a823acb" # Train missing values (in percent) train_missing = (train.isnull().sum() / len(train)).sort_values(ascending = False) train_missing.head() # + _uuid="f7b485689bedd322ea3eb9083822f814719add0b" # Test missing values (in percent) test_missing = (test.isnull().sum() / len(test)).sort_values(ascending = False) test_missing.head() # + _uuid="9ab5d9001ab54692f0e2bc4e1d976d734771891c" # Identify missing values above threshold train_missing = train_missing.index[train_missing > 0.75] test_missing = test_missing.index[test_missing > 0.75] all_missing = list(set(set(train_missing) | set(test_missing))) print('There are %d columns with more than 75%% missing values' % len(all_missing)) # + [markdown] _uuid="fc8e7bdb14e7e65a257c93d5c45a882098971360" # Let's drop the columns, one-hot encode the dataframes, and then align the columns of the dataframes. # + _uuid="9478bfc3069bf9b4e192c45cf323c1ea84ca7b2c" # Need to save the labels because aligning will remove this column train_labels = train["TARGET"] train_ids = train['SK_ID_CURR'] test_ids = test['SK_ID_CURR'] train = pd.get_dummies(train.drop(columns = all_missing)) test = pd.get_dummies(test.drop(columns = all_missing)) train, test = train.align(test, join = 'inner', axis = 1) print('Training set full shape: ', train.shape) print('Testing set full shape: ' , test.shape) # + _uuid="06a7110748fe39b0ae8248ed62dfeb844a124ca1" train = train.drop(columns = ['SK_ID_CURR']) test = test.drop(columns = ['SK_ID_CURR']) # + [markdown] _uuid="d4d38ab010cddd88e714894e09d8733cc60daafd" # # Feature Selection through Feature Importances # # The next method we can employ for feature selection is to use the feature importances of a model. Tree-based models (and consequently ensembles of trees) can determine an "importance" for each feature by measuring the reduction in impurity for including the feature in the model. I'm not really sure what that means (any explanations would be welcome) and the absolute value of the importance can be difficult to interpret. However, the relative value of the importances can be used as an approximation of the "relevance" of different features in a model. Moreover, we can use the feature importances to remove features that the model does not consider important. # # One method for doing this automatically is the [Recursive Feature Elimination method](http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html) in Scikit-Learn. This accepts an estimator (one that either returns feature weights such as a linear regression, or feature importances such as a random forest) and a desired number of features. In then fits the model repeatedly on the data and iteratively removes the lowest importance features until the desired number of features is left. This means we have another arbitrary hyperparameter to use in out pipeline: the number of features to keep! # # Instead of doing this automatically, we can perform our own feature removal by first removing all zero importance features from the model. If this leaves too many features, then we can consider removing the features with the lowest importance. We will use a Gradient Boosted Model from the LightGBM library to assess feature importances. If you're used to the Scikit-Learn library, the LightGBM library has an API that makes deploying the model very similar to using a Scikit-Learn model. # + [markdown] _uuid="6525f690933a50ab1eb7f360629b98d9a2f6df37" # Since the LightGBM model does not need missing values to be imputed, we can directly `fit` on the training data. We will use Early Stopping to determine the optimal number of iterations and run the model twice, averaging the feature importances to try and avoid overfitting to a certain set of features. # + _uuid="4741055961c566f3a2e9d60893950cade5a57330" # Initialize an empty array to hold feature importances feature_importances = np.zeros(train.shape[1]) # Create the model with several hyperparameters model = lgb.LGBMClassifier(objective='binary', boosting_type = 'goss', n_estimators = 10000, class_weight = 'balanced') # + _uuid="4535a84ea5b2c55f7b443832c7a5b2b77894e511" # Fit the model twice to avoid overfitting for i in range(2): # Split into training and validation set train_features, valid_features, train_y, valid_y = train_test_split(train, train_labels, test_size = 0.25, random_state = i) # Train using early stopping model.fit(train_features, train_y, early_stopping_rounds=100, eval_set = [(valid_features, valid_y)], eval_metric = 'auc', verbose = 200) # Record the feature importances feature_importances += model.feature_importances_ # + _uuid="68a0ce264d8182bffa27fca668d81040fdc3b9ba" # Make sure to average feature importances! feature_importances = feature_importances / 2 feature_importances = pd.DataFrame({'feature': list(train.columns), 'importance': feature_importances}).sort_values('importance', ascending = False) feature_importances.head() # + _uuid="54f05dafeccd5f3800967fa304cbcf3b3076a5c2" # Find the features with zero importance zero_features = list(feature_importances[feature_importances['importance'] == 0.0]['feature']) print('There are %d features with 0.0 importance' % len(zero_features)) feature_importances.tail() # + [markdown] _uuid="68f738649fcc774a3eeceb2c602d00d9f4ff841d" # We see that one of our features made it into the top 5 most important! That's a good sign for all of our hard work making the features. It also looks like many of the features we made have literally 0 importance. For the gradient boosting machine, features with 0 importance are not used at all to make any splits. Therefore, we can remove these features from the model with no effect on performance (except for faster training). # + _uuid="144732dd9d88d13b06983590b9d34970ff6c9a69" def plot_feature_importances(df, threshold = 0.9): """ Plots 15 most important features and the cumulative importance of features. Prints the number of features needed to reach threshold cumulative importance. Parameters -------- df : dataframe Dataframe of feature importances. Columns must be feature and importance threshold : float, default = 0.9 Threshold for prining information about cumulative importances Return -------- df : dataframe Dataframe ordered by feature importances with a normalized column (sums to 1) and a cumulative importance column """ plt.rcParams['font.size'] = 18 # Sort features according to importance df = df.sort_values('importance', ascending = False).reset_index() # Normalize the feature importances to add up to one df['importance_normalized'] = df['importance'] / df['importance'].sum() df['cumulative_importance'] = np.cumsum(df['importance_normalized']) # Make a horizontal bar chart of feature importances plt.figure(figsize = (10, 6)) ax = plt.subplot() # Need to reverse the index to plot most important on top ax.barh(list(reversed(list(df.index[:15]))), df['importance_normalized'].head(15), align = 'center', edgecolor = 'k') # Set the yticks and labels ax.set_yticks(list(reversed(list(df.index[:15])))) ax.set_yticklabels(df['feature'].head(15)) # Plot labeling plt.xlabel('Normalized Importance'); plt.title('Feature Importances') plt.show() # Cumulative importance plot plt.figure(figsize = (8, 6)) plt.plot(list(range(len(df))), df['cumulative_importance'], 'r-') plt.xlabel('Number of Features'); plt.ylabel('Cumulative Importance'); plt.title('Cumulative Feature Importance'); plt.show(); importance_index = np.min(np.where(df['cumulative_importance'] > threshold)) print('%d features required for %0.2f of cumulative importance' % (importance_index + 1, threshold)) return df # + _uuid="ea0dcbff339b6ffdfd654d165cdb96268311ba95" norm_feature_importances = plot_feature_importances(feature_importances) # + [markdown] _uuid="e8df33430fe5bd31d83e5d2ca5050c0864b34488" # Let's remove the features that have zero importance. # + _uuid="e3849b51615d52eee53e3c919c17c6066ae29397" train = train.drop(columns = zero_features) test = test.drop(columns = zero_features) print('Training shape: ', train.shape) print('Testing shape: ', test.shape) # + [markdown] _uuid="c67926fdcb81d3f438510f06cadbba1edbc70879" # At this point, we can re-run the model to see if it identifies any more features with zero importance. In a way, we are implementing our own form of recursive feature elimination. Since we are repeating work, we should probably put the zero feature importance identification code in a function. # + _uuid="d09ec2ad863688243bc95c51aa43a559f56ad4fb" def identify_zero_importance_features(train, train_labels, iterations = 2): """ Identify zero importance features in a training dataset based on the feature importances from a gradient boosting model. Parameters -------- train : dataframe Training features train_labels : np.array Labels for training data iterations : integer, default = 2 Number of cross validation splits to use for determining feature importances """ # Initialize an empty array to hold feature importances feature_importances = np.zeros(train.shape[1]) # Create the model with several hyperparameters model = lgb.LGBMClassifier(objective='binary', boosting_type = 'goss', n_estimators = 10000, class_weight = 'balanced') # Fit the model multiple times to avoid overfitting for i in range(iterations): # Split into training and validation set train_features, valid_features, train_y, valid_y = train_test_split(train, train_labels, test_size = 0.25, random_state = i) # Train using early stopping model.fit(train_features, train_y, early_stopping_rounds=100, eval_set = [(valid_features, valid_y)], eval_metric = 'auc', verbose = 200) # Record the feature importances feature_importances += model.feature_importances_ / iterations feature_importances = pd.DataFrame({'feature': list(train.columns), 'importance': feature_importances}).sort_values('importance', ascending = False) # Find the features with zero importance zero_features = list(feature_importances[feature_importances['importance'] == 0.0]['feature']) print('\nThere are %d features with 0.0 importance' % len(zero_features)) return zero_features, feature_importances # + _uuid="f69eede0e8e04f1f4c850e5c102dcdebcdb107e7" second_round_zero_features, feature_importances = identify_zero_importance_features(train, train_labels) # + [markdown] _uuid="c877ed509374d9494c508a8b920e92c926453f84" # There are now no 0 importance features left (I guess we should have expected this). If we want to remove more features, we will have to start with features that have a non-zero importance. One way we could do this is by retaining enough features to account for a threshold percentage of importance, such as 95%. At this point, let's keep enough features to account for 95% of the importance. Again, this is an arbitrary decision! # + _uuid="bb6dd17a7417f3b67017e7e8e18c09725df6e434" norm_feature_importances = plot_feature_importances(feature_importances, threshold = 0.95) # + [markdown] _uuid="1cfc6829b62a44efcc7aeba227db4b293b76bfd5" # We can keep only the features needed for 95% importance. This step seems to me to have the greatest chance of harming the model's learning ability, so rather than changing the original dataset, we will make smaller copies. Then, we can test both versions of the data to see if the extra feature removal step is worthwhile. # + _uuid="55a534e48b21d4038413a11ff3aad5e025bba5bd" # Threshold for cumulative importance threshold = 0.95 # Extract the features to keep features_to_keep = list(norm_feature_importances[norm_feature_importances['cumulative_importance'] < threshold]['feature']) # Create new datasets with smaller features train_small = train[features_to_keep] test_small = test[features_to_keep] # + _uuid="b004fb636fee6ad313296c185273a5c0058afc78" train_small['TARGET'] = train_labels train_small['SK_ID_CURR'] = train_ids test_small['SK_ID_CURR'] = test_ids train_small.to_csv('m_train_small.csv', index = False) test_small.to_csv('m_test_small.csv', index = False) # + [markdown] _uuid="b4f2c70170ca1ec372e9646edecac79bba561cd6" # # Test New Featuresets # # The last step of feature removal we did seems like it may have the potential to hurt the model the most. Therefore we want to test the effect of this removal. To do that, we can use a standard model and change the features. # # We will use a fairly standard LightGBM model, similar to the one we used for feature selection. The main difference is this model uses five-fold cross validation for training and we use it to make predictions. There's a lot of code here, but that's because I included documentation and a few extras (such as feature importances) that aren't strictly necessary. For now, understanding the entire model isn't critical, just know that we are using the same model with two different datasets to see which one performs the best. # + _uuid="cf8d66553230c1a535438f3a453ca3a05da7bc29" def model(features, test_features, encoding = 'ohe', n_folds = 5): """Train and test a light gradient boosting model using cross validation. Parameters -------- features (pd.DataFrame): dataframe of training features to use for training a model. Must include the TARGET column. test_features (pd.DataFrame): dataframe of testing features to use for making predictions with the model. encoding (str, default = 'ohe'): method for encoding categorical variables. Either 'ohe' for one-hot encoding or 'le' for integer label encoding n_folds (int, default = 5): number of folds to use for cross validation Return -------- submission (pd.DataFrame): dataframe with `SK_ID_CURR` and `TARGET` probabilities predicted by the model. feature_importances (pd.DataFrame): dataframe with the feature importances from the model. valid_metrics (pd.DataFrame): dataframe with training and validation metrics (ROC AUC) for each fold and overall. """ # Extract the ids train_ids = features['SK_ID_CURR'] test_ids = test_features['SK_ID_CURR'] # Extract the labels for training labels = features['TARGET'] # Remove the ids and target features = features.drop(columns = ['SK_ID_CURR', 'TARGET']) test_features = test_features.drop(columns = ['SK_ID_CURR']) # One Hot Encoding if encoding == 'ohe': features = pd.get_dummies(features) test_features = pd.get_dummies(test_features) # Align the dataframes by the columns features, test_features = features.align(test_features, join = 'inner', axis = 1) # No categorical indices to record cat_indices = 'auto' # Integer label encoding elif encoding == 'le': # Create a label encoder label_encoder = LabelEncoder() # List for storing categorical indices cat_indices = [] # Iterate through each column for i, col in enumerate(features): if features[col].dtype == 'object': # Map the categorical features to integers features[col] = label_encoder.fit_transform(np.array(features[col].astype(str)).reshape((-1,))) test_features[col] = label_encoder.transform(np.array(test_features[col].astype(str)).reshape((-1,))) # Record the categorical indices cat_indices.append(i) # Catch error if label encoding scheme is not valid else: raise ValueError("Encoding must be either 'ohe' or 'le'") print('Training Data Shape: ', features.shape) print('Testing Data Shape: ', test_features.shape) # Extract feature names feature_names = list(features.columns) # Convert to np arrays features = np.array(features) test_features = np.array(test_features) # Create the kfold object k_fold = KFold(n_splits = n_folds, shuffle = False, random_state = 50) # Empty array for feature importances feature_importance_values = np.zeros(len(feature_names)) # Empty array for test predictions test_predictions = np.zeros(test_features.shape[0]) # Empty array for out of fold validation predictions out_of_fold = np.zeros(features.shape[0]) # Lists for recording validation and training scores valid_scores = [] train_scores = [] # Iterate through each fold for train_indices, valid_indices in k_fold.split(features): # Training data for the fold train_features, train_labels = features[train_indices], labels[train_indices] # Validation data for the fold valid_features, valid_labels = features[valid_indices], labels[valid_indices] # Create the model model = lgb.LGBMClassifier(n_estimators=10000, objective = 'binary', boosting_type='goss', class_weight = 'balanced', learning_rate = 0.05, reg_alpha = 0.1, reg_lambda = 0.1, n_jobs = -1, random_state = 50) # Train the model model.fit(train_features, train_labels, eval_metric = 'auc', eval_set = [(valid_features, valid_labels), (train_features, train_labels)], eval_names = ['valid', 'train'], categorical_feature = cat_indices, early_stopping_rounds = 100, verbose = 200) # Record the best iteration best_iteration = model.best_iteration_ # Record the feature importances feature_importance_values += model.feature_importances_ / k_fold.n_splits # Make predictions test_predictions += model.predict_proba(test_features, num_iteration = best_iteration)[:, 1] / k_fold.n_splits # Record the out of fold predictions out_of_fold[valid_indices] = model.predict_proba(valid_features, num_iteration = best_iteration)[:, 1] # Record the best score valid_score = model.best_score_['valid']['auc'] train_score = model.best_score_['train']['auc'] valid_scores.append(valid_score) train_scores.append(train_score) # Clean up memory gc.enable() del model, train_features, valid_features gc.collect() # Make the submission dataframe submission = pd.DataFrame({'SK_ID_CURR': test_ids, 'TARGET': test_predictions}) # Make the feature importance dataframe feature_importances = pd.DataFrame({'feature': feature_names, 'importance': feature_importance_values}) # Overall validation score valid_auc = roc_auc_score(labels, out_of_fold) # Add the overall scores to the metrics valid_scores.append(valid_auc) train_scores.append(np.mean(train_scores)) # Needed for creating dataframe of validation scores fold_names = list(range(n_folds)) fold_names.append('overall') # Dataframe of validation scores metrics = pd.DataFrame({'fold': fold_names, 'train': train_scores, 'valid': valid_scores}) return submission, feature_importances, metrics # + [markdown] _uuid="e116f579927c9dbb930ca2cf313e1a42fe0e61c5" # ### Test "Full" Dataset # # This is the expanded dataset. To recap the process to make this dataset we: # # * Removed collinear features as measured by the correlation coefficient greater than 0.9 # * Removed any columns with greater than 80% missing values in the train or test set # * Removed all features with non-zero feature importances # + _uuid="3560addd3c31f5c21235062257bf022e31a872d3" train['TARGET'] = train_labels train['SK_ID_CURR'] = train_ids test['SK_ID_CURR'] = test_ids submission, feature_importances, metrics = model(train, test) # + _uuid="400cf0845130330e64dd59705a16488d077f4d27" metrics # + _uuid="bd41dd2fc42440ce495fd42e4819b00e5658b656" submission.to_csv('selected_features_submission.csv', index = False) # + [markdown] _uuid="fe3efd3457d02bf99dfa033d01f870487f3314c1" # The full features after feature selection score __0.783__ when submitted to the public leaderboard. # + [markdown] _uuid="b5e46e877e2be014cb9a821de7c93efb6d90f036" # ### Test "Small" Dataset # # The small dataset requires one additional step over the ful l dataset: # # * Keep only features needed to reach 95% cumulative importance in the gradient boosting machine # + _uuid="a3a34bde5e4be879d334fff74a246c6c11b679e6" submission_small, feature_importances_small, metrics_small = model(train_small, test_small) # + _uuid="beed5d5aed49e7a61db907778b8b717729697d76" metrics_small # + _uuid="edb183a37eaeff22deb3f5afb04b6da480997e63" submission_small.to_csv('selected_features_small_submission.csv', index = False) # + [markdown] _uuid="510e06476d8106c2fda7eb857ff55e5caf57ccfe" # The smaller featureset scores __0.782__ when submitted to the public leaderboard. # + [markdown] _uuid="5055adbaffab2f8ce2ee2e062995f19a0e15ceb7" # # Other Options for Dimensionality Reduction # # We only covered a small portion of the techniques used for feature selection/dimensionality reduction. There are many other methods such as: # # * PCA: Principle Components Analysis (PCA) # * ICA: Independent Components Analysis (ICA) # * Manifold learning: [also called non-linear dimensionality reduction](https://stats.stackexchange.com/questions/247907/what-is-the-difference-between-manifold-learning-and-non-linear-dimensionality-r) # # PCA is a great method for reducing the number of features provided that you do not care about model interpretability. It projects the original set of features onto a lower dimension, in the process, eliminating any physical representation behind the features. Here's a pretty thorough introduction to the math for anyone interested. PCA also assumes that the data is Gaussian distributed, which may not be the case, especially when dealing with real-world human generated data. # # ICA representations also obscure any physical meaning behind the variables and presevere the most "independent" dimensions of the data (which is different than the dimensions with the most variance). # # Manifold learning is more often used for low-dimensional visualizations (such as with T-SNE or LLE) rather than for dimensionality reduction for a classifier. These methods are heavily dependent on several hyperparameters and are not deterministic which means that there is no way to apply it to new data (in other words you cannot `fit` it to the training data and then separately `transform` the testing data). The learned representation of a dataset will change every time you apply manifold learning so it is not generally a stable method for feature selection. # + [markdown] _uuid="be760a6dd27ed9a4bcc3040953ffd5b5a4b87877" # ## PCA Example # # We can go through a quick example to show how PCA is implemented. Without going through too many details, PCA finds a new set of axis (the principal components) that maximize the amount of variance captured in the data. The original data is then projected down onto these principal components. The idea is that we can use fewer principal components than the original number of features while still capturing most of the variance. PCA is implemented in Scikit-Learn in the same way as preprocessing methods. We can either select the number of new components, or the fraction of variance we want explained in the data. If we pass in no argument, the number of principal components will be the same as the number of original features. We can then use the `variance_explained_ratio_` to determine the number of components needed for different threshold of variance retained. # + _uuid="abd2c85ee9efd0c28afe9a27fb4cc283ce987f18" from sklearn.decomposition import PCA from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline # Make sure to drop the ids and target train = train.drop(columns = ['SK_ID_CURR', 'TARGET']) test = test.drop(columns = ['SK_ID_CURR']) # Make a pipeline with imputation and pca pipeline = Pipeline(steps = [('imputer', Imputer(strategy = 'median')), ('pca', PCA())]) # Fit and transform on the training data train_pca = pipeline.fit_transform(train) # transform the testing data test_pca = pipeline.transform(test) # + _uuid="5fb2b6315382ab104c8d9dcef031931fd6cbdb9d" # Extract the pca object pca = pipeline.named_steps['pca'] # Plot the cumulative variance explained plt.figure(figsize = (10, 8)) plt.plot(list(range(train.shape[1])), np.cumsum(pca.explained_variance_ratio_), 'r-') plt.xlabel('Number of PC'); plt.ylabel('Cumulative Variance Explained'); plt.title('Cumulative Variance Explained with PCA'); # + [markdown] _uuid="1c0152d9589f7ba1717b49ef320eee64f50a0025" # We only need a few prinicipal components to account for the majority of variance in the data. We can use the first two principal components to visualize the entire dataset. We will color the datapoints by the value of the target to see if using two principal components clearly separates the classes. # + _uuid="505d054dcfe9fbb92bc9765838722936cb46b30a" # Dataframe of pca results pca_df = pd.DataFrame({'pc_1': train_pca[:, 0], 'pc_2': train_pca[:, 1], 'target': train_labels}) # Plot pc2 vs pc1 colored by target sns.lmplot('pc_1', 'pc_2', data = pca_df, hue = 'target', fit_reg=False, size = 10) plt.title('PC2 vs PC1 by Target'); # + _uuid="38cfcec89fe2df6bb38d2b5b9727682da7bfdcc8" print('2 principal components account for {:.4f}% of the variance.'.format(100 * np.sum(pca.explained_variance_ratio_[:2]))) # + [markdown] _uuid="0f661dd92a55198ce0016f16a7eacf6ac2003b8d" # Even though we have accounted for most of the variance, that does not mean the pca decomposition makes the problem of identifying loans repaid vs not repaid any easier. PCA does not consider the value of the label when projecting the features to a lower dimension. Feel free to try a classifier on top of this data, but when I have done so, I noticed that it was not very accurate. # + [markdown] _uuid="054116a7cd95f8baeecf4ada05a21eddaaaad89e" # # Conclusions # # In this notebook we employed a number of feature selection methods. These methods are necessary to reduce the number of features to increase model interpretability, decrease model runtime, and increase generalization performance on the test set. The methods of feature selection we used are: # # 1. Remove highly collinear variables as measured by a correlation coefficient greater than 0.9 # 2. Remove any columns with more than 75% missing values. # 3. Remove any features with a zero importance as determined by a gradient boosting machine. # 4. (Optional) keep only enough features to account for 95% of the importance in the gradient boosting machine. # # Using the first three methods, we reduced the number of features from __1465__ to __536__ with a 5-fold cv AUC ROC score of 0.7838 and a public leaderboard score of 0.783. # # After applying the fourth method, we end up with 342 features with a 5-fold cv AUC SCORE of 0.7482 and a public leaderboard score of 0.782. # # Going forward, we might actually want to add _more_ features except this time, instead of naively applying aggregations, think about what features are actually important from a domain point of view. There are a number of kernels that have created useful features that we can add to our set here to improve performance. The process of feature engineering - feature selection is iterative, and it may require several more passes before we get it completely right! # + _uuid="07501aa5454fd60c183efb98c733b953ba6ae10d"
automaticFeatureEngineering/Feature Selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sirb2/Data-Visualization-Projects-with-Python-Libraries/blob/main/Dataset_Analysis_with_pandas_library.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="azReToS7wWX3" # #### Goal of the Project # # This project is designed for you to practice and solve the activities that are based on the concepts covered in the following lessons: # # 1. Simple linear regression III - Model Evaluation # # + [markdown] id="fR_SN7K6475D" # ### Problem Statement # # The most important factor for an Insurance Company is to determine what premium charges must be paid by an individual. The charges depend on various factors like age, gender, income, etc. # # Build a model that is capable of predicting the insurance charges a person has to pay depending on his/her age using simple linear regression. Also, evaluate the accuracy of your model by calculating the value of error metrics such as R-squared, MSE, RMSE, and MAE. # # # # + [markdown] id="lZt4yKiJwrUs" # # #### Activity 1: Analysing the Dataset # # - Create a Pandas DataFrame for **Insurance** dataset using the below link. This dataset consists of following columns: # # |Field|Description| # |---:|:---| # |age|Age of primary beneficiary| # |sex|Insurance contractor gender, female or male| # |bmi|Body mass index| # |children|Number of children covered by health insurance/number of dependents| # |region|Beneficiary's residential area in the US, northeast, southeast, southwest, northwest| # |charges|Individual medical costs billed by health insurance| # # # **Dataset Link:** https://s3-student-datasets-bucket.whjr.online/whitehat-ds-datasets/insurance_dataset.csv # # - Print the first five rows of the dataset. Check for null values and treat them accordingly. # # - Create a regression plot with `age` on X-axis and `charges` on Y-axis to identify the relationship between these two attributes. # # # # + id="6U6NaAy4WQgs" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="f3998c7d-424a-4e13-e2b3-27c8de02d422" # Import modules import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Load the dataset # Dataset Link: 'https://s3-student-datasets-bucket.whjr.online/whitehat-ds-datasets/insurance_dataset.csv' df = pd.read_csv('https://s3-student-datasets-bucket.whjr.online/whitehat-ds-datasets/insurance_dataset.csv') # Print first five rows using head() function df.head() # + id="jg7hAMJ4jKC5" colab={"base_uri": "https://localhost:8080/"} outputId="74373fd9-949f-4251-e838-671f5c839dc4" # Check if there are any null values. If any column has null values, treat them accordingly df.isnull().sum() # + id="A8RW5WbUuR88" colab={"base_uri": "https://localhost:8080/", "height": 496} outputId="9c124bfe-fc10-481e-f419-6e8cf7ac118e" # Create a regression plot between 'age' and 'charges' plt.figure(figsize = (21, 7)) sns.regplot(df['age'], df['charges'], color = 'red') plt.show() # + [markdown] id="uG9YxYbpjgVG" # --- # + [markdown] id="uDTmlU-Mz0fI" # #### Activity 2: Train-Test Split # # We have to determine the effect of `age` on insurance charges. Thus, `age` is the feature variable and `charges` is the target variable. # # Split the dataset into training set and test set such that the training set contains 67% of the instances and the remaining instances will become the test set. # + id="Ku_loAWZ0LXr" # Split the DataFrame into the training and test sets. from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(df['age'], df['charges'], test_size = 0.33, random_state = 41) # + [markdown] id="hCPg7ClP0Om1" # --- # + [markdown] id="ud8dLfCGjh0E" # #### Activity 3: Model Training # # Implement simple linear regression using `sklearn` module in the following way: # # 1. Reshape the feature and the target variable arrays into two-dimensional arrays by using `reshape(-1, 1)` function of numpy module. # 2. Deploy the model by importing the `LinearRegression` class and create an object of this class. # 3. Call the `fit()` function on the LinearRegression object and print the slope and intercept values of the best fit line. # # + id="Xost35Q1XreI" colab={"base_uri": "https://localhost:8080/"} outputId="ae893bfa-c308-465b-bd49-589772fe41f7" # 1. Create two-dimensional NumPy arrays for the feature and target variables. # Print the shape or dimensions of these reshaped arrays def reshape_(frame): return frame.values.reshape(-1, 1) x_train_reshaped = reshape_(x_train) x_test_reshaped = reshape_(x_test) y_train_reshaped = reshape_(y_train) y_test_reshaped = reshape_(y_test) print(x_train_reshaped.shape) print(x_test_reshaped.shape) print(y_train_reshaped.shape) print(y_test_reshaped.shape) # + id="U9iIV06LXuQP" colab={"base_uri": "https://localhost:8080/"} outputId="d4c34423-c903-40c0-c8dd-bad05e77d7ba" # 2. Deploy linear regression model using the 'sklearn.linear_model' module. from sklearn.linear_model import LinearRegression # Create an object of the 'LinearRegression' class. Anythingyoulike = LinearRegression() # 3. Call the 'fit()' function Anythingyoulike.fit(x_train_reshaped, y_train_reshaped) # Print the slope and intercept values print(f"Slope: {Anythingyoulike.coef_[0][0]}\nIntercept: {Anythingyoulike.intercept_[0]}") # + [markdown] id="cAPgWR45mrCo" # --- # + [markdown] id="CvcLZdremtHY" # #### Activity 4: Model Prediction and Evaluation # # Predict the values for both training and test sets by calling the `predict()` function on the LinearRegression object. Also, calculate the $R^2$, MSE, RMSE and MAE values to evaluate the accuracy of your model. # + id="hc3RPNgsX5-0" colab={"base_uri": "https://localhost:8080/"} outputId="bd08793d-2146-4805-adc8-deaead5fe3d8" # Predict the target variable values for both training set and test set # Call 'r2_score', 'mean_squared_error' & 'mean_absolute_error' functions of the 'sklearn' module. Calculate RMSE value by taking the square root of MSE. from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error y_train_pred = Anythingyoulike.predict(x_train_reshaped) y_test_pred = Anythingyoulike.predict(x_test_reshaped) # Print these values for both training set and test set print(f"Train Set\n{'-' * 50}") print(f"R-squared: {r2_score(y_train_reshaped, y_train_pred):.3f}") print(f"Mean Squared Error: {mean_squared_error(y_train_reshaped, y_train_pred):.3f}") print(f"Root Mean Squared Error: {np.sqrt(mean_squared_error(y_train_reshaped, y_train_pred)):.3f}") print(f"Mean Absolute Error: {mean_absolute_error(y_train_reshaped, y_train_pred):.3f}") print(f"\n\nTest Set\n{'-' * 50}") print(f"R-squared: {r2_score(y_test_reshaped, y_test_pred):.3f}") print(f"Mean Squared Error: {mean_squared_error(y_test_reshaped, y_test_pred):.3f}") print(f"Root Mean Squared Error: {np.sqrt(mean_squared_error(y_test_reshaped, y_test_pred)):.3f}") print(f"Mean Absolute Error: {mean_absolute_error(y_test_reshaped, y_test_pred):.3f}") # + [markdown] id="Bp0p4IT-Dn_w" # ---
Dataset_Analysis_with_pandas_library.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import tensorflow as tf from keras.utils.np_utils import to_categorical # %matplotlib inline # - train_dir=pd.read_csv("../input/digit-recognizer/train.csv") test_dir=pd.read_csv("../input/digit-recognizer/test.csv") train_dir.head() X_train=train_dir.drop(labels = ["label"],axis = 1) X_train.head() y_train=train_dir['label'] y_train.head() sns.countplot(y_train,data=train_dir) y_train= to_categorical(y_train, num_classes = 10) # # NORMALIZATION X_train = X_train / 255.0 test_dir = test_dir / 255.0 #RESHAPING X_train = X_train.values.reshape(-1, 28 , 28, 1) test_dir = test_dir.values.reshape(-1, 28 , 28, 1) # + from sklearn.model_selection import train_test_split X, X_val, y, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=7) # + from tensorflow.keras import regularizers model=tf.keras.models.Sequential([tf.keras.layers.Conv2D(32,(5,5),activation='relu',input_shape=(28,28,1),padding='Same'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Dropout(0.2), tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='Same'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(64,(3,3),activation='relu',padding='Same'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(64,(3,3),activation='relu',padding='Same'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512,activation='relu'), tf.keras.layers.Dropout(0.3), tf.keras.layers.Dense(10,activation='softmax')]) model.summary() # - from keras.utils import plot_model plot_model(model, show_shapes=True) model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) # + class mycallbacks(tf.keras.callbacks.Callback): def on_epoch_end(self,epochs,logs={}): if(logs.get('accuracy')>0.99): self.model.stop_training=True callbacks=mycallbacks() history=model.fit(X,y,validation_data=(X_val,y_val),epochs=30,verbose=1,callbacks=[callbacks]) # + acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.figure() plt.plot(epochs, loss, 'r', label='Training Loss') plt.plot(epochs, val_loss, 'b', label='Validation Loss') plt.title('Training and validation loss') plt.legend() plt.show() # - results =[] for index in range(28000): img = test_dir[index].reshape(1, 28, 28, 1) pred = np.argmax(model.predict(img)) results.append(pred) # + submission = pd.DataFrame() submission['ImageId'] = [i for i in range(1, 28001)] submission['Label'] = results submission.to_csv('./DR.csv', index=False) # -
digit-recognizer-easy-99-acc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import scipy.signal as signal # %matplotlib notebook import matplotlib.pyplot as plt R_l = 510000 R_l1 = 1000 C_l = 100e-9 # + # get output voltage from diode current a1_l = R_l * C_l a0_l = 1 b1_l = 0 b0_l = R_l w, H_l = signal.freqs ([b1_l, b0_l], [a1_l, a0_l]) plt.figure() plt.semilogx (w / (2 * np.pi), 20 * np.log10 (np.abs (H_l))) # + # get current from input voltage a1_lI = R_l1 * R_l * C_l a0_lI = R_l + 1 b1_lI = R_l * C_l b0_lI = 1 w, H_lI = signal.freqs ([b1_lI, b0_lI], [a1_lI, a0_lI]) plt.figure() plt.semilogx (w / (2 * np.pi), 20 * np.log10 (np.abs (H_lI))) # -
Simulations/Old_Stuff/LimiterDriver.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Kneip 2008 # # * <NAME>., & <NAME>. (2008). Combining Registration and Fitting for Functional Models. Journal of the American Statistical Association, 103(483), 1155–1165. https://doi.org/10.1198/016214508000000517 # * <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2011). Registration of Functional Data Using Fisher-Rao Metric. Retrieved from http://arxiv.org/abs/1103.3817 # + # loading modules # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd from time import time import seaborn as sns # - # ## Dev specific functions # We generated 21 curves over the interval [−3, 3] of the form (eq 17): # # $y_{i}^{*}(t)=z_{i 1} \exp \left[(t-1.5)^{2} / 2\right]+z_{i 2} \exp \left[(t+1.5)^{2} / 2\right]$ # # and $z_{i1}$ and $z_{i2}$ from $N(1,.25^2)$. def fun_y_star(t, z1, z2): """ Eq 17 in Kneip & Ramsay 2008. The sign at the exponentials are wrong. I double-checked that in Srivastava et al 2011 """ return z1*np.exp((-(t-1.5)**2)/2) + z2*np.exp((-(t+1.5)**2)/2) # The associated warping functions $h_i$ were (eq 18): # # $h_{i}(t)=\left\{\begin{array}{ll}6 \frac{\exp \left[a_{i}(t+3) / 6\right]-1}{\exp \left[a_{i}\right]-1}-3, & a_{i} \neq 0 \\ t & \text { otherwise }\end{array}\right.$ # # The coefficients $a_i$ were equally spaced between −1 and 1. def fun_h(t, a_i): """ warping function Eq 18 in Kneip and Ramsay 2008 """ if a_i==0: return t else: return 6*(np.exp(a_i*(t+3)/6) -1)/(np.exp(a_i) -1) -3 return # ## Generating data # + np.random.seed(42) N_cvs = 21 t0, tf, N_t = -3, 3, 100 z12 = np.random.normal(1, .25**2, (N_cvs,2)) t_range = np.linspace(-3, 3, N_t) a_range = np.linspace(-1, 1, N_cvs) # + df_hi_t = pd.DataFrame(np.array([fun_h(t_range, a_i) for a_i in a_range]).T, columns=["h_%i"%i for i in range(N_cvs)]) df_hi_t["t"] = t_range display(df_hi_t.head()) df_hi_t.plot(x="t", legend=False, title="warping functions"); # + i = 0 y_star = np.array([fun_y_star(t_range, z12[i,0], z12[i,1]) for i in range(N_cvs)]).T df_y_star = pd.DataFrame(y_star) df_y_star["t"] = t_range df_y_star.plot(x="t", legend=False, title="y"); # - # $x_i (t) = y_i [h_i (t)]$ # + i = 0 x = np.array([fun_y_star(df_hi_t.iloc[:,i].values, z12[i,0], z12[i,1]) for i in range(N_cvs)]).T df_x = pd.DataFrame(x) df_x["t"] = t_range display(df_x.head()) df_x.plot(x="t", legend=False, title="y"); # - # ## Registration import fdasrsf as fs # + t_vec = np.copy(df_x["t"].values) f = np.copy(df_x.iloc[:,:-1]) print(f.shape, t_vec.shape) # - obj = fs.fdawarp(f, t_vec) # + # This function aligns a collection of functions using the elastic square-root slope (srsf) framework. tic = time() obj.srsf_align(parallel=True, smoothdata=False) elapsed = time()-tic print("- Elapsed time:") print("%.4f (s)"%(elapsed)) print("%.4f (min)"%(elapsed/60)) print("%.4f (h)"%(elapsed/(60*60))) # + # extracting warp functions vectors gamma = np.copy(obj.gam) M = gamma.shape[0] print(M) # - t0, tf = np.copy(t_vec[0]), np.copy(t_vec[-1]) t_vec2 = np.array([(tf - t0) * gamma[:, k]+ t0 for k in range(f.shape[1])]).T # + fig, axes = plt.subplots(1,4,figsize=(12,3), constrained_layout=True) axes[0].plot(np.arange(0, M) / float(M - 1), gamma); axes[1].plot(gamma); axes[2].plot(M*gamma); axes[3].plot(t_vec, t_vec2); plt.suptitle("Warping Functions"); # - f_w = np.copy(obj.fn) # Registered (warped) data #np.save("f-2021-11-10", f) #np.save("f_w-2021-11-10", f_w) (obj.time == t_vec).all() # + # plotting fig, axes = plt.subplots(2,1, sharex=True) axes = axes.ravel() i=0 axes[i].set_title("Original data") axes[i].plot(f); i+=1 axes[i].set_title("Registered (warped) data") axes[i].plot(f_w); # + # plotting fig, axes = plt.subplots(3,1, figsize=(6,6),sharex=True) axes = axes.ravel() i=0 axes[i].set_title("Original data") axes[i].plot(t_vec,f); i+=1 axes[i].set_title("Registered (warped) data") axes[i].plot(t_vec,f_w) axes[i].set_xlabel("time index"); i+=1 df_y_star.plot(x="t", legend=False, title="ground truth y", ax=axes[i]); # - # ## analysing one curve # + y1 = np.copy(df_y_star.iloc[:,0].values) # ground truth f1 = np.copy(f[:,0]) # "measured" data gamma1 = np.copy(gamma[:,0]) # warping function t_warped = t_vec2[:,0] # warping fuction with the same range as the original time # inverse warping function from scipy.interpolate import CubicSpline gamma_inv = CubicSpline(t_warped, t_vec) x = np.linspace(np.min(t_warped),np.max(t_warped),len(t_warped)) t_warped_inv = gamma_inv(x) # + plt.plot(t_vec, y1, "k", label="ground truth") plt.plot(t_vec, f1, "b", label="measured f1") plt.plot(t_warped,y1, "g--", lw=2, label="warping ground truth") plt.plot(t_warped_inv,f1, "--r", lw=2, label="unwarping f1") plt.legend() # - plt.plot(t_vec,y1) plt.plot(t_vec,f1) # + fig, axes = plt.subplots(1,4,figsize=(12,3), constrained_layout=True) axes[0].set_title("warping function") axes[0].set_ylabel("New time") axes[0].plot(np.arange(0, M) / float(M - 1), gamma1); axes[1].set_title("t_warped") axes[1].plot(t_vec, t_warped); axes[2].set_title("t_inv") axes[2].plot(t_inv); axes[3].set_title("t_inv") axes[3].plot(t_vec, t_warped); axes[3].plot(t_vec, t_inv); # - np.interp(x=, xp = t_vec, fp = t_warped) # + from scipy.interpolate import CubicSpline gamma_inv = CubicSpline(t_warped, t_vec) t_w0, t_wf = np.min(t_warped), np.max(t_warped) x_temp = np.linspace(t_w0, t_wf,len(t_warped)) t_warped_inv = gamma_inv(x_temp) # + from scipy.optimize import minimize from scipy.interpolate import CubicSpline function_cs = CubicSpline(t_vec, t_warped) def diff(x,a): yt = function_cs(x) return (yt - a )**2 def fun_num_inverse(x): """numerically inverse of a function""" y = np.zeros(x.shape) for idx, x_value in enumerate(x): res = minimize(diff, 1.0, args=(x_value), method='Nelder-Mead', tol=1e-6) y[idx] = res.x[0] return y # + x = np.linspace(np.min(t_warped),np.max(t_warped),len(t_warped)) print(x.shape) #y = np.zeros(x.shape) y = fun_num_inverse(x) # - plt.plot(t_vec, t_warped) plt.plot(x,y) plt.plot(x, t_warped_inv) # + plt.plot(x,y) plt.title(r'$f^{-1}(x)$') plt.xlabel('x') plt.ylabel('y'); # - # + from scipy.optimize import minimize # ref: https://moonbooks.org/Articles/How-to-numerically-compute-the-inverse-function-in-python-using-scipy-/ def function(x): y = 1.0 * x**5.0 return y def diff(x,a): yt = function(x) return (yt - a )**2 def fun_num_inverse(x): """numerically inverse of a function""" y = np.zeros(x.shape) for idx, x_value in enumerate(x): res = minimize(diff, 1.0, args=(x_value), method='Nelder-Mead', tol=1e-6) y[idx] = res.x[0] return y # + x = np.arange(0.0, 3.0, 0.1) y = function(x) plt.plot(x,y) plt.title('f(x)') plt.xlabel('x') plt.ylabel('y'); # + x = np.arange(np.min(y),np.max(y),0.1) #y = np.zeros(x.shape) y = fun_num_inverse(x) plt.plot(x,y) plt.title(r'$f^{-1}(x)$') plt.xlabel('x') plt.ylabel('y'); # -
notebooks/Kneip_2008.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import sys from pathlib import Path sys.getrecursionlimit() sys.setrecursionlimit(10000) sys.getrecursionlimit() def factorial(n): print(f'Evaluating factorial({n})') if n == 0: return 1 res = n * factorial(n - 1) print(f'Done with factorial({n})') return res factorial(4) factorial(3) # Recursive binary search def binary_search(data, target, low, high): if low > high: return False else: mid = (low + high) // 2 if target == data[mid]: return True elif target > data[mid]: return binary_search(data, target, mid + 1, high) else: return binary_search(data, target, low, mid - 1) binary_search([1], 1, 0, 0) binary_search([1], 0, 0, 0) # %timeit binary_search(range(1_000_000_000), -1, 0, 1_000_000) def binary_search_iter(data, target): low = 0 high = len(data) - 1 while low <= high: mid = (low + high) // 2 if target == data[mid]: return True elif target > data[mid]: low = mid + 1 else: high = mid - 1 return False # %timeit binary_search_iter(range(1_000_000_000), -1) def disk_usage(path): path = Path(path) total = os.path.getsize(path) if path.is_dir(): for filename in list(path.iterdir()): total += disk_usage(filename) print(f'{total:<10,} {path}') return total disk_usage("/Users/imad/dotfiles/") def reverse(data, start, stop): if start < stop - 1: data[start], data[stop - 1] = data[stop - 1], data[start] reverse(data, start + 1, stop - 1) l = list(range(10)) l reverse(l, 0, len(l)) l l = list(range(10)) l def reverse_iterative(data): for i in range(len(data) // 2): data[i], data[- (i + 1)] = data[- (i + 1)], data[i] reverse_iterative(l) l def sum_recursive(data): if len(data) == 1: return data[0] else: return data[0] + sum_recursive(data[1:]) sum_recursive(l) # $$F_0 = 0$$ # $$F_1 = 1$$ # $$F_n = F_(n - 1) + F_(n - 2); n >= 2$$ def fib_iter(n): if n <= 1: return n f1 = 0 f2 = 1 for i in range(2, n + 1): f = f1 + f2 f1 = f2 f2 = f return f # %timeit fib_iter(40) def fib(n): if n <= 1: return n return fib(n - 1) + fib(n - 2) # %timeit fib(40) # The recursive version of the fibonacci is much smaller because it calculates the same thing multiple times. So the recursion tree becomes very deep (see below): # <img src="fib-recursion.png" style="height=200px; width=100px"> # The space complexity of recursion can be huge and may lead to stack overflow. See below: # <img src="complexity-recursion.png" style="height=200px; width=100px"> # The current recursive implementation of fibonacci is $O(2^n)$, while the iterative version is $O(n)$. Note that the lower bound for the recursive version is $\Omega(2^{n/2})$. # We can use a technique called __memoization__ to reduce the running time of the recursive version. __Memoization__ is an optimization technique used primarily to speed up computer programs by storing the results of expensive function calls and returning the cached result when the same inputs occur again. fib_dict = {0: 0, 1: 1} def fib_rec(n): if fib_dict.get(n) is not None: return fib_dict[n] fib_dict[n] = fib_rec(n - 1) + fib_rec(n - 2) return fib_dict[n] # %timeit fib_rec(40) # The space taken by the recursive function is almost always more than the iterative version due to the recursion tree (saving stack frames for all the function calls). __The space complexity of a recursion function is the max depth of the recursion tree__; which is the number of nodes along the longest path from the root node down to the farthest leaf node. # <img src="space-complexity.png"> # Writing a recursive function that calculates the power of `x`; e.g. $power(x, n) = x^n$. # Method 1, Use the following recurrence: # $$x^n = x * x^{n - 1} if n > 0; else 1$$ def power(x, n): if n == 0: return 1 return x * power(x, n - 1) power(10, 8) # This method is $O(n)$. # This is $O(n)$ # Method 2, Use the following recurrence: # $$ # x^n = \begin{Bmatrix} # x^{n/2} * x^{n/2} & if\ n\ is\ even \\ # x * x^{n -1} & if\ n\ is\ odd \\ # 1 & if n = 0 # \end{Bmatrix} # $$ def power(x, n): if n == 0: return 1 elif n % 2 == 0: res = power(x, n / 2) return res * res return x * power(x, n - 1) power(10, 8) # This is $O(logn)$ def power(x, n): if n == 0: return 1 else: res = power(x, n // 2) res *= res if n % 2 == 1: res *= x return res power(10, 8) # computing $x^n mod M$ def mod(x, n, m): if n == 0: return 1 elif n % 2 == 0: res = mod(x, n / 2, m) return (res * res) % m return ((x % m) * mod(x, n - 1, m)) % m mod(10, 10, 7)
introduction/Recursion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-12bea12324c032d8", "locked": true, "schema_version": 1, "solution": false} import hashlib # for grading # Standard imports import numpy as np import pandas as pd from collections import Counter, OrderedDict import re import string import math import warnings; warnings.simplefilter('ignore') # NLTK imports import nltk nltk.download('stopwords') from nltk.tokenize import WordPunctTokenizer from nltk.stem.snowball import SnowballStemmer from nltk.corpus import stopwords # SKLearn related imports import sklearn from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import Pipeline from sklearn.base import TransformerMixin from sklearn import preprocessing from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-f07b8631beb0508c", "locked": true, "schema_version": 1, "solution": false} # ## Q1. Country names # # For the first question you will be making use of regex. In particular you have a list of countries and you'll have to answer some very specific questions about that list. # # Start by loading the defining the path to this list # # + deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-fe61fa6cbbdef77d", "locked": true, "schema_version": 1, "solution": false} path = "data/countries.txt" # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-52faf4f77a990570", "locked": true, "schema_version": 1, "solution": false} # The first thing you will build is a wrapper that will apply a regex pattern into a given file, and return a list of results found matching that pattern. Implement it in the function below: # + deletable=false nbgrader={"grade": false, "grade_id": "cell-a88528290dd35f7d", "locked": false, "schema_version": 1, "solution": true} def find_all_in_file(pattern, path): """ Function that returns all matches of a certain pattern in a certain text. Args: pattern - regex pattern path - path to the file """ # YOUR CODE HERE raise NotImplementedError() # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-ef7991b98e752180", "locked": true, "schema_version": 1, "solution": false} # Make sure this function is working with the following tests: # + deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-4a056266aa5ada57", "locked": true, "schema_version": 1, "solution": false} assert find_all_in_file(pattern="^P.+?$", path=path)[8] == "Portugal" assert find_all_in_file(pattern="^.+?a$", path=path)[18] == "Croatia" assert len(find_all_in_file(pattern="^.+?ca$", path=path)) == 4 # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-df341d1f1838c29e", "locked": true, "schema_version": 1, "solution": false} # #### Q1.a) # # Now that you prepared your wrapper, let's move on to the actual expressions. The first thing we are looking for is for countries with loooong names. In particular we want you to find all the countries with more than 15 letters. Use the wrapper you defined above and assign its return to a variable `ret`. # + deletable=false nbgrader={"grade": false, "grade_id": "cell-7cddd8e2e48afb31", "locked": false, "schema_version": 1, "solution": true} # ret_long = find_all_in_file(...) # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-5a358ed83c473214", "locked": true, "points": 1, "schema_version": 1, "solution": false} print("Number of countries with more than 15 or more letters: ", len(ret_long)) assert len(ret_long) == 16 # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-6727f8213243afc0", "locked": true, "schema_version": 1, "solution": false} # #### Q1.b) # # Now, find out how many countries: # * Start with a vowel # * Start with a consonant # + deletable=false nbgrader={"grade": false, "grade_id": "cell-447eb675481e47fd", "locked": false, "schema_version": 1, "solution": true} # ret_vowel = find_all_in_file(...) # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-535072609d84d577", "locked": true, "points": 1, "schema_version": 1, "solution": false} print("Number of countries that start with vowels: " , len(ret_vowel)) assert len(ret_vowel) == 36 # + deletable=false nbgrader={"grade": false, "grade_id": "cell-66af8912995d91a2", "locked": false, "schema_version": 1, "solution": true} # ret_consonant = find_all_in_file(...) # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-1c17b943b63b52ae", "locked": true, "points": 1, "schema_version": 1, "solution": false} print("Number of countries that start with consonants: " , len(ret_consonant)) assert len(ret_consonant) == 160 # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-4e3cc736b933d71e", "locked": true, "schema_version": 1, "solution": false} # #### Q1.c) # # Next, find how many countries are composed by only one word and end in `ia`. You'll want to have a list with countries such as `Croatia`, `Serbia`, etc. # + deletable=false nbgrader={"grade": false, "grade_id": "cell-2bf14a2df3dd3e4e", "locked": false, "schema_version": 1, "solution": true} # ret_ia = find_all_in_file(...) # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-19b61469674be299", "locked": true, "points": 1, "schema_version": 1, "solution": false} print("Number of variants of countries ending in \"ia\": " , len(ret_ia)) assert "Serbia" in ret_ia assert "Croatia" in ret_ia assert len(ret_ia) == 35 # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-f6bb573c7736db8e", "locked": true, "schema_version": 1, "solution": false} # #### Q1.d) # # Finally, find the countries which have at least four consecutive consonants, without taking into account the first letter (Hint: you can assume the first letter is capitalized). So, it should match things like `Abcdf`. # + deletable=false nbgrader={"grade": false, "grade_id": "cell-e92132a5b63400f3", "locked": false, "schema_version": 1, "solution": true} # ret_bcdf = find_all_in_file(...) # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-5150c254b592778e", "locked": true, "points": 1, "schema_version": 1, "solution": false} print("Number of countries matched: " , len(ret_bcdf)) assert len(ret_bcdf) == 3 assert hashlib.sha256(' '.join(ret_bcdf).encode()).hexdigest() == '7da1a15074b9245ae3b88fb92fc5c484243003a084d03280bf11d9346d768869' # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-0409818769bbf3d0", "locked": true, "schema_version": 1, "solution": false} # ## Q2. A Study in Scarlet # # For this following questions we will be looking at Sir <NAME>'s ["A Study in Scarlet"](https://en.wikipedia.org/wiki/A_Study_in_Scarlet) (which you might have seen adapted to tv in ["A Study in Pink"](https://en.wikipedia.org/wiki/A_Study_in_Pink)). We will be performing common preprocessing operations on this text, as it is a common task in Natural Language Processing. Start by downloading the data and loading it into a list of sentences: # + deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-abce95c72c255d16", "locked": true, "schema_version": 1, "solution": false} path = "data/sherlock.txt" data = [line.strip('\n') for line in open(path, 'r', encoding='utf8') if len(line)>1] # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-addc0c904c359402", "locked": true, "schema_version": 1, "solution": false} # #### Q2.a) # # First tokenize the data. Implement the function to receive an NLTK-style tokenizer and return the token list for each sentence: # + deletable=false nbgrader={"grade": false, "grade_id": "cell-a8b7930a61806e32", "locked": false, "schema_version": 1, "solution": true} def apply_tokenizer(data, tokenizer): """ Returns a list of lists, with the tokens of given text. I.e for an input ['Abc def', 'Ghi jkl mn'] it returns [['Abc', 'def'], ['Ghi', 'jkl', 'mn']] Args: data - list with the data tokenizer - nltk tokenizer """ # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-f829c5a222c54690", "locked": true, "points": 1, "schema_version": 1, "solution": false} tokenizer = WordPunctTokenizer() data_tok = apply_tokenizer(data=data, tokenizer=tokenizer) assert len(data_tok) == 3770 assert len([w for s in data_tok for w in s]) == 51648 assert data_tok[8] == ['I','could','join','it',',','the','second','Afghan','war','had','broken','out','.','On','landing','at'] # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-b76d100b971a777f", "locked": true, "schema_version": 1, "solution": false} # #### Q2.b) # # The second step you will implement is lowercasing the data. # + deletable=false nbgrader={"grade": false, "grade_id": "cell-ee47fb5a45fbd622", "locked": false, "schema_version": 1, "solution": true} def apply_lowercase(data): """ Returns a list of lists, with all the tokens lowecased. Args: data - list with tokenized data """ # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-7979e12840663ea2", "locked": true, "points": 1, "schema_version": 1, "solution": false} data_lc = apply_lowercase(data=apply_tokenizer(data=data, tokenizer=tokenizer)) assert len(data_lc) == 3770 assert len([w for s in data_lc for w in s]) == 51648 assert data_lc[8] == ['i','could','join','it',',','the','second','afghan','war','had','broken','out','.','on','landing','at'] # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-c8044c14c20583cf", "locked": true, "schema_version": 1, "solution": false} # #### Q2.c) # # Now implement a function that filters the stopwords. # # NOTE: Stopwords adapted from [here](https://gist.github.com/sebleier/554280). (Notice what we added some specific things, like ?" and ." to the stopwords. This was shown to be a limitation of the nltk tokenizer so it will be removed that way, instead of the more conventional way. This goes to show that there are more powerful tokenizers that you should use in the case you have to perform tokenization in the future.) # + deletable=false nbgrader={"grade": false, "grade_id": "cell-42ecb29c8fe117f1", "locked": false, "schema_version": 1, "solution": true} def apply_filter_stopwords(data, stopwords_fp): """ Returns a list of lists, with no stopwords. Args: data - list with the tokenized data stopwords_fp - path to the stopwords file """ # Create the list of stopwords from the file # stopwords = ... # YOUR CODE HERE raise NotImplementedError() # Filter the stopwords from the text # data_filt = ... # YOUR CODE HERE raise NotImplementedError() return data_filt # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-f70da0255ea6e291", "locked": true, "points": 1, "schema_version": 1, "solution": false} stopwords_fp = "data/english_stopwords.txt" data_filt_sw = apply_filter_stopwords(data=apply_lowercase(apply_tokenizer(data, tokenizer)), stopwords_fp=stopwords_fp) assert len(data_filt_sw) == 3770 assert len([w for s in data_filt_sw for w in s]) == 27733 assert data_filt_sw[8] == ['could', 'join', ',', 'second', 'afghan', 'war', 'broken', '.', 'landing'] # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-8d0cb317596faa2f", "locked": true, "schema_version": 1, "solution": false} # #### Q2.d) # # After filtering stopwords, we want to remove punctuation from the text as well. Make use of `string.punctuation` to do so. # + deletable=false nbgrader={"grade": false, "grade_id": "cell-a0cd3cf5cc97a8f2", "locked": false, "schema_version": 1, "solution": true} def apply_filter_punkt(data): """ Returns a list of lists, with no punctuation. Args: data - list with the tokenized data """ # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-650a677a3a01d1bf", "locked": true, "points": 1, "schema_version": 1, "solution": false} data_filt_punkt = apply_filter_punkt(data=apply_tokenizer(data, tokenizer)) assert len(data_filt_punkt) == 3770 assert len([w for s in data_filt_punkt for w in s]) == 46362 assert data_filt_punkt[8] == ['I','could','join','it','the','second','Afghan','war','had','broken','out','On','landing','at'] # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-38d66dd89731e114", "locked": true, "schema_version": 1, "solution": false} # #### Q2.e) # # The last preprocessing step you are going to implement is stemming. # + deletable=false nbgrader={"grade": false, "grade_id": "cell-a831ba989f3e50e6", "locked": false, "schema_version": 1, "solution": true} def apply_stemmer(data, stemmer): """ Returns a list of lists, with stemmed data. Args: data - list with the tokenized data stemmer - instance of stemmer to use """ # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-3596a6510ebbda3d", "locked": true, "points": 1, "schema_version": 1, "solution": false} stemmer = SnowballStemmer("english") data_stems = apply_stemmer(data=apply_lowercase(apply_tokenizer(data, tokenizer)), stemmer=stemmer) assert len(data_stems) == 3770 assert len([w for s in data_stems for w in s]) == 51648 assert data_stems[8][-2] == 'land' # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-2bfe5aed6ebdbb26", "locked": true, "schema_version": 1, "solution": false} # #### Q2.f) # # Finally, join everything in a function, that applies the steps in the following order, in : # * Tokenization # * Lowercasing # * Filtering stopwords # * Filtering punctuation # * Stemming # + deletable=false nbgrader={"grade": false, "grade_id": "cell-ea5b2305431c20dd", "locked": false, "schema_version": 1, "solution": true} # Custom transformer to implement sentence cleaning class TextCleanerTransformer(TransformerMixin): def __init__(self, tokenizer, stemmer, regex_list, lower=True, remove_punct=True, stopwords=[]): self.tokenizer = tokenizer self.stemmer = stemmer self.regex_list = regex_list self.lower = lower self.remove_punct = remove_punct self.stopwords = stopwords def clean_sentences(self, sentences): # Split sentence into list of words # sentences_tokens = ... # YOUR CODE HERE raise NotImplementedError() # Lowercase if self.lower: # sentences_tokens = ... # YOUR CODE HERE raise NotImplementedError() # Remove punctuation if self.remove_punct: # sentences_tokens = ... # YOUR CODE HERE raise NotImplementedError() if self.stopwords: # sentences_tokens = ... # YOUR CODE HERE raise NotImplementedError() # Stem words if self.stemmer: # sentences_tokens = ... # YOUR CODE HERE raise NotImplementedError() # Join list elements into string sentences_prep = [" ".join(tokens).strip() for tokens in sentences_tokens] return sentences_prep # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-4a87d0c9b1f20f7e", "locked": true, "points": 1, "schema_version": 1, "solution": false} text_cleaner = TextCleanerTransformer( regex_list=[], tokenizer=tokenizer, stemmer=stemmer, lower=True, remove_punct=True, stopwords=stopwords_fp ) data_preprocessed = text_cleaner.clean_sentences(data) assert len(data_preprocessed) == 3770 assert len([w for s in data_preprocessed for w in s.split()]) == 22447 assert data_preprocessed[8] == 'could join second afghan war broken land' assert data_preprocessed[15] == 'noth misfortun disast remov brigad' # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-f48abbdab8acc3d1", "locked": true, "schema_version": 1, "solution": false} # ## Q3. Movie reviews # # We will now use what we've learned to explore movie reviews. We will start by analysing the dataset, then we will apply the preprocessing you implemented above, and finally we will see how it affects a classification task. # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-d6a39b05ecd310c0", "locked": true, "schema_version": 1, "solution": false} # #### Q3.a) # # To get some stats on the dataset, we will start by implementing your own function to get the list of n-grams from a list of tokens. Complete the function below: # + deletable=false nbgrader={"grade": false, "grade_id": "cell-43601c5f562f5867", "locked": false, "schema_version": 1, "solution": true} def ngrams(data, n): """ Returns list of tuples for all the n-grams Args: data - list of tokenized data (flattened) n - the n in n-grams """ # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-f601c9c7d4b35092", "locked": true, "points": 2, "schema_version": 1, "solution": false} assert ngrams("The actress won the oscar".split(), 2) == [('The', 'actress'), ('actress', 'won'), ('won', 'the'), ('the', 'oscar')] assert ngrams("The actress won the oscar".split(), 3) == [('The', 'actress', 'won'), ('actress', 'won', 'the'), ('won', 'the', 'oscar')] assert ngrams("The actress won the oscar".split(), 4) == [('The', 'actress', 'won', 'the'), ('actress', 'won', 'the', 'oscar')] # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-8cdc8657fcd5f3c4", "locked": true, "schema_version": 1, "solution": false} # #### Q3.b) # # We will now see in our dataset what are the most common n-grams. Load the data and find how many unique bi-grams, tri-grams and four-grams we have. Also, take advantage of `Counter` and `most_common()` to find the most common tri-gram. Merge together the words of the most common trigram to get one single string. (Hint: look at python's `join` function, exemplefied below when joining the full text) # + deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-21e130445a725501", "locked": true, "schema_version": 1, "solution": false} # Load the dataset df = pd.read_csv('data/imdb_sentiment.csv') # Get the text and split into full list of words docs = df['text'] full_text = ' '.join([d.strip() for d in docs]) words = full_text.split(' ') # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-caa9453210806809", "locked": true, "schema_version": 1, "solution": false} # Implement below the code to get the sets of unigrams, bigrams, trigrams and fourgrams, and to # + deletable=false nbgrader={"grade": false, "grade_id": "cell-539dba0b0aabb366", "locked": false, "schema_version": 1, "solution": true} # unigrams = ... # bigrams = ... # trigrams = ... # fourgrams = ... # most_common_trigram = ... # # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-91094315aa4e5abf", "locked": true, "points": 1, "schema_version": 1, "solution": false} n_unigrams = str(len(unigrams)) n_bigrams = str(len(bigrams)) n_trigrams = str(len(trigrams)) n_fourgrams = str(len(fourgrams)) print('Found {} unigrams'.format(n_unigrams)) assert hashlib.sha256(n_unigrams.encode()).hexdigest() == '1ae2d8247d3ad491c79aed034828ba78b21e25438a6e9a61f252eb566e39e877' print('Found {} bigrams'.format(n_bigrams)) assert hashlib.sha256(n_bigrams.encode()).hexdigest() == '7d2d487bcdf890f05578da49f574e3e8f22f7420f752071a24eb49759de5adf8' print('Found {} trigrams'.format(n_trigrams)) assert hashlib.sha256(n_trigrams.encode()).hexdigest() == '8c54e3c7087ab053a77d56c60408fd47837081fdea817b7cc9e68f134cef969d' print('Found {} fourgrams'.format(n_fourgrams)) assert hashlib.sha256(n_fourgrams.encode()).hexdigest() == '8df23d7f0d27298e7a7f77bdce4d15bb401098175c36514ba94b5350177b1593' print('Most common trigram is "{}"'.format(most_common_trigram)) assert hashlib.sha256(most_common_trigram.encode()).hexdigest() == '28b6f04107ef3f1120975abf58ca8d08d20243beea929999b203f0add941fe16' # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-d2477efb5c417c3d", "locked": true, "schema_version": 1, "solution": false} # #### Q3.c) # # Let's now process a sample of our dataset with the previous Q2 preprocessing, and get a Bag of Words representation. Start by using your text cleaner to get a preprocessed version of this dataset. # # Note: if you didn't finish the text cleaner above, jump to the TF-IDF implementation directly, where you can load the BoW from a file. # + deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-306c1207ab2a8c1d", "locked": true, "schema_version": 1, "solution": false} text_cleaner = TextCleanerTransformer( regex_list=[], tokenizer=tokenizer, stemmer=None, lower=True, remove_punct=True, stopwords=stopwords_fp ) docs_preprocessed = text_cleaner.clean_sentences(docs[:200]) # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-11aa60fa130d8eeb", "locked": true, "schema_version": 1, "solution": false} # We can get a vocabulary, vectorize our dataset and convert it into a BoW # + deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-23a2f9da897ab002", "locked": true, "schema_version": 1, "solution": false} def build_vocabulary(docs): vocabulary = Counter() for doc in docs: words = doc.split() vocabulary.update(words) return OrderedDict(vocabulary.most_common()) def vectorize(docs): vocabulary = build_vocabulary(docs) vectors = [] for doc in docs: words = doc.split() vector = np.array([doc.count(word) for word in vocabulary]) vectors.append(vector) return (vocabulary, vectors) def build_df(docs): vocab, vectors = vectorize(docs) return pd.DataFrame(vectors, columns=vocab) BoW = build_df(docs_preprocessed) BoW.head() # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-c9951e7e69d2e1c9", "locked": true, "schema_version": 1, "solution": false} # You will now implement one of TF-IDFs variation to compute from the bag of words the more relevant words. The formulation you should use is one you've learned before: # # $$ tfidf _{t, d} =(tf_{t,d})*(log_2{(1 + \frac{N}{df_{t}})}) $$ # # Implement the TF-IDF below: # + deletable=false nbgrader={"grade": false, "grade_id": "cell-eb6996cb41762895", "locked": false, "schema_version": 1, "solution": true} def tfidf(BoW_df): """ Returns pandas dataframe of a tfidf representation from a BoW representation dataframe. Args: BoW_df - dataframe with document word counts (Bag of Words) """ # tf = (...) # def _idf(column): # return (...) # tf_idf = (...) # return tf_idf # YOUR CODE HERE raise NotImplementedError() # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-3ab99eef3c657a91", "locked": true, "schema_version": 1, "solution": false} # Let's now apply it to our previous BoW (note: load the BoW first if you could not use your text cleaner) # + deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-ffdac98448888edf", "locked": true, "schema_version": 1, "solution": false} BoW = pd.read_csv('data/imdb_sentiment_bow_sample.csv') BoW.head() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-5b8f988a69678772", "locked": true, "points": 2, "schema_version": 1, "solution": false} relevance = tfidf(BoW) assert(math.isclose(relevance['movie'][0], 0.009717385023827248), math.isclose(relevance['film'][10], 0.019778475747522496), math.isclose(relevance['nice'][16], 0.010851136310680626), math.isclose(relevance['good'][128], 0.00989061193998239)) # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-779badbcfbbc934e", "locked": true, "schema_version": 1, "solution": false} # #### Q3.d) # # Now, let's use scikit-learn to get to a similar matrix and relevance numbers. Load the full processed dataset: # + deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-40fe043ee73e9a82", "locked": true, "schema_version": 1, "solution": false} # Load the dataset df_preprocessed = pd.read_csv('data/imdb_sentiment_processed.csv') # Get the processed text docs = df_preprocessed['text'] # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-61ec1d1af055ee98", "locked": true, "schema_version": 1, "solution": false} # Start by transforming your documents into a matrix of tf-idf scores using sklearn. Make use of the `CountVectorizer` and the `TfidfTransformer` provided by scikitlearn. Implement a function that provided with a list of documents returns the word term frequency matrix and the corresponding vocabulary: # + deletable=false nbgrader={"grade": false, "grade_id": "cell-8300a0a7bebf4efd", "locked": false, "schema_version": 1, "solution": true} def build_word_term_frequency_matrix(docs): """ Returns the matrix of word and tf-idf scores Args: docs - list of documents in dataset """ # vectorizer = ... # word_count_matrix = ... # vocabulary = ... # YOUR CODE HERE raise NotImplementedError() # tfidf = ... # word_term_frequency_matrix = ... # YOUR CODE HERE raise NotImplementedError() return (word_term_frequency_matrix, vocabulary) # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-2b548aa7912824b9", "locked": true, "schema_version": 1, "solution": false} # Now get the corresponding string of the most important word of this document (with index `321`) according to TF-IDF. # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-d00746278a646fe0", "locked": true, "points": 2, "schema_version": 1, "solution": false} index = 321 word_term_frequency_matrix, vocabulary = build_word_term_frequency_matrix(docs) max_word_idx = word_term_frequency_matrix[index].argmax() inv_vocab = {v: k for k, v in vocabulary.items()} most_relevant_word = inv_vocab[max_word_idx] assert(most_relevant_word == 'dull') # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-ab54cc6297c36ee2", "locked": true, "schema_version": 1, "solution": false} # #### Q3.e) # # Finally, let's try to classify the sentiment of these movie reviews. # # Build a Pipeline to classify a review as positive or negative. Use `MultinomialNB` as your final classifier, train it and get an accuracy score above 86% on the imdb validation dataset, by choosing the best set of parameters of `CountVectorizer()` and `TfidfTransformer()`, according to what we learned in Part III. # # Hint: Try to use more than unigrams! Also, remember what we said about stopwords and feature space size in Part III of the Learning Notebooks? # + deletable=false editable=false nbgrader={"grade": false, "grade_id": "cell-ca9cc0a788f3d721", "locked": true, "schema_version": 1, "solution": false} # Split in train and validation train_df, validation_df = train_test_split(df_preprocessed, test_size=0.3, random_state=42) # Encode the labels le = preprocessing.LabelEncoder() le.fit(train_df['sentiment'].values) train_df['sentiment'] = le.transform(train_df['sentiment'].values) validation_df['sentiment'] = le.transform(validation_df['sentiment'].values) # + deletable=false nbgrader={"grade": false, "grade_id": "cell-e0712ceb628e6ed6", "locked": false, "schema_version": 1, "solution": true} def train_and_validate(train_df, validation_df): """ Train a model using sklearn's Pipeline and return it along with its current accuracy in the validation set. Assume the documents are already preprocessed Args: train_df - dataframe with training docs validation_df - dataframe with validation docs """ # Build the pipeline # text_clf = Pipeline(...) # Train the classifier # (...) # predicted = (...) # acc = (...) # return text_clf, acc # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-07b6f3694941ac81", "locked": true, "points": 2, "schema_version": 1, "solution": false} _, acc = train_and_validate(train_df, validation_df) print("Accuracy: {}".format(acc)) assert(acc >= 0.86)
S04 - Text Classification/BLU07 - Feature Extraction/Exercise notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # ### Playing with arrays # + deletable=true editable=true import numpy as np a = np.array([1, 2, 3]) m = np.array([[1, 2, 3], [4, 5, 6]]) print(a, '\n', a*2, '\n', 2*a) print(m, '\n', m*2, '\n', 2*m) p = m.dot(a) print(p) print(a.shape, m.shape, p.shape) # - # ### Magic equal operator # + deletable=true editable=true np.array([1,2,3]) == np.array([1,2,3])[:,None] # -
notebooks/playground.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import sklearn import sklearn.datasets from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec # %matplotlib inline plt.rcParams['figure.figsize'] = (7.0, 4.0) plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' train_X, train_Y, test_X,test_Y = load_dataset() # - def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"): """ Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID. Arguments: X -- input data, of shape (2, number of examples) Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples) learning_rate -- learning rate for gradient descent num_iterations -- number of iterations to run gradient descent print_cost -- if True, print the cost every 1000 iterations initialization -- flag to choose which initialization to use ("zeros","random" or "he") Returns: parameters -- parameters learnt by the model """ grads = {} costs = [] # to keep track of the loss m = X.shape[1] # number of examples layers_dims = [X.shape[0], 10, 5, 1] # Initialize parameters dictionary. if initialization == "zeros": parameters = initialize_parameters_zeros(layers_dims) elif initialization == "random": parameters = initialize_parameters_random(layers_dims) elif initialization == "he": parameters = initialize_parameters_he(layers_dims) # Loop (gradient descent) for i in range(0, num_iterations): # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID. a3, cache = forward_propagation(X, parameters) # Loss cost = compute_loss(a3, Y) # Backward propagation. grads = backward_propagation(X, Y, cache) # Update parameters. parameters = update_parameters(parameters, grads, learning_rate) # Print the loss every 1000 iterations if print_cost and i % 1000 == 0: print("Cost after iteration {}: {}".format(i, cost)) costs.append(cost) # plot the loss plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters def initialize_parameters_zeros(layers_dims): parameters = {} L = len(layers_dims) for l in range(1,L): parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l - 1])) parameters['b' + str(l)] = np.zeros((layers_dims[l],1)) return parameters parameters = initialize_parameters_zeros([3,2,1]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) parameters = model(train_X, train_Y, initialization = "zeros") print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) print ("predictions_train = " + str(predictions_train)) print ("predictions_test = " + str(predictions_test)) plt.title("Model with Zeros initialization") axes = plt.gca() axes.set_xlim([-1.5,1.5]) axes.set_ylim([-1.5,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) def initialize_parameters_random(layers_dims): np.random.seed(3) parameters = {} L = len(layers_dims) for l in range(1,L): parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l - 1]) * 10 parameters['b' + str(l)] = np.zeros((layers_dims[l], 1)) return parameters parameters = initialize_parameters_random([3, 2, 1]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) parameters = model(train_X, train_Y, initialization = "random") print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) print (predictions_train) print (predictions_test) plt.title("Model with large random initialization") axes = plt.gca() axes.set_xlim([-1.5,1.5]) axes.set_ylim([-1.5,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) def initialize_parameters_he(layers_dims): np.random.seed(3) parameters = {} L = len(layers_dims) for l in range(1,L): parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l - 1]) * np.sqrt(2./layers_dims[l-1]) parameters['b' + str(l)] = np.zeros((layers_dims[l], 1)) return parameters parameters = initialize_parameters_he([2, 4, 1]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) parameters = model(train_X, train_Y, initialization = "he") print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) plt.title("Model with He initialization") axes = plt.gca() axes.set_xlim([-1.5,1.5]) axes.set_ylim([-1.5,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
2course week1/.ipynb_checkpoints/main-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from sklearn.metrics import classification_report, confusion_matrix from sklearn.model_selection import train_test_split import time import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier filepath = "../Data Preprocessing/iot23_combined.csv" df = pd.read_csv(filepath) del df['Unnamed: 0'] df df['label'].value_counts() RF = RandomForestClassifier() X = df[['duration', 'orig_bytes', 'resp_bytes', 'missed_bytes', 'orig_pkts', 'orig_ip_bytes', 'resp_pkts', 'resp_ip_bytes', 'proto_icmp', 'proto_tcp', 'proto_udp', 'conn_state_OTH', 'conn_state_REJ', 'conn_state_RSTO', 'conn_state_RSTOS0', 'conn_state_RSTR', 'conn_state_RSTRH', 'conn_state_S0', 'conn_state_S1', 'conn_state_S2', 'conn_state_S3', 'conn_state_SF', 'conn_state_SH', 'conn_state_SHR']] Y = df['label'] X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=10, test_size=0.2) # + start = time.time() print('program start...') print() RF.fit(X_train, Y_train) print() print('prediction:') y_pred = RF.predict(X_test) print(y_pred) print() print('Score:') score = RF.score(X_test,Y_test) print(score) end = time.time() print('program end...') print() print('time cost: ') print(end - start, 'seconds') # - print("Classifiction Report :") print(classification_report(Y_test, y_pred))
Models/Random Forest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Guided Project 1 # **Learning Objectives:** # # * Learn how to generate a standard TFX template pipeline using `tfx template` # * Learn how to modify and run a templated TFX pipeline # **Note:** This guided project is adapted from [Create a TFX pipeline using templates](https://www.tensorflow.org/tfx/tutorials/tfx/template)). import os # ## Step 1. Environment setup # ### `skaffold` tool setup # PATH=%env PATH # %env PATH={PATH}:/home/jupyter/.local/bin # + language="bash" # # LOCAL_BIN="/home/jupyter/.local/bin" # SKAFFOLD_URI="https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64" # # test -d $LOCAL_BIN || mkdir -p $LOCAL_BIN # # which skaffold || ( # curl -Lo skaffold $SKAFFOLD_URI && # chmod +x skaffold && # mv skaffold $LOCAL_BIN # ) # - # Modify the `PATH` environment variable so that `skaffold` is available: # At this point, you shoud see the `skaffold` tool with the command `which`: # !which skaffold # ### Environment variable setup # In AI Platform Pipelines, TFX is running in a hosted Kubernetes environment using [Kubeflow Pipelines](https://www.kubeflow.org/docs/pipelines/overview/pipelines-overview/). # # Let's set some environment variables to use Kubeflow Pipelines. # # First, get your GCP project ID. # + # shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null GOOGLE_CLOUD_PROJECT=shell_output[0] # %env GOOGLE_CLOUD_PROJECT={GOOGLE_CLOUD_PROJECT} # - # We also need to access your KFP cluster. You can access it in your Google Cloud Console under "AI Platform > Pipeline" menu. # # The "endpoint" of the KFP cluster can be found from the URL of the Pipelines dashboard, # or you can get it from the URL of the Getting Started page where you launched this notebook. # # Let's create an ENDPOINT environment variable and set it to the KFP cluster endpoint. # # ENDPOINT should contain only the hostname part of the URL. # For example, if the URL of the KFP dashboard is # # <a href="https://1e9deb537390ca22-dot-asia-east1.pipelines.googleusercontent.com/#/start">https://1e9deb537390ca22-dot-asia-east1.pipelines.googleusercontent.com/#/start</a>, # # ENDPOINT value becomes 1e9deb537390ca22-dot-asia-east1.pipelines.googleusercontent.com. ENDPOINT = # Enter your ENDPOINT here. # Set the image name as tfx-pipeline under the current GCP project: # Docker image name for the pipeline image. CUSTOM_TFX_IMAGE = 'gcr.io/' + GOOGLE_CLOUD_PROJECT + '/tfx-pipeline' CUSTOM_TFX_IMAGE # ## Step 2. Copy the predefined template to your project directory. # In this step, we will create a working pipeline project directory and # files by copying additional files from a predefined template. # # You may give your pipeline a different name by changing the PIPELINE_NAME below. # # This will also become the name of the project directory where your files will be put. PIPELINE_NAME = "guided_project_1" PROJECT_DIR = os.path.join(os.path.expanduser("."), PIPELINE_NAME) PROJECT_DIR # TFX includes the taxi template with the TFX python package. # # If you are planning to solve a point-wise prediction problem, # including classification and regresssion, this template could be used as a starting point. # # The `tfx template copy` CLI command copies predefined template files into your project directory. # !tfx template copy \ # --pipeline-name={PIPELINE_NAME} \ # --destination-path={PROJECT_DIR} \ # --model=taxi # %cd {PROJECT_DIR} # ### Step 3. Browse your copied source files # The TFX template provides basic scaffold files to build a pipeline, including Python source code, # sample data, and Jupyter Notebooks to analyse the output of the pipeline. # # The `taxi` template uses the Chicago Taxi dataset. # # Here is brief introduction to each of the Python files: # # `pipeline` - This directory contains the definition of the pipeline # * `configs.py` — defines common constants for pipeline runners # * `pipeline.py` — defines TFX components and a pipeline # # `models` - This directory contains ML model definitions. # * `features.py`, `features_test.py` — defines features for the model # * `preprocessing.py`, `preprocessing_test.py` — defines preprocessing jobs using tf::Transform # # `models/estimator` - This directory contains an Estimator based model. # * `constants.py` — defines constants of the model # * `model.py`, `model_test.py` — defines DNN model using TF estimator # # `models/keras` - This directory contains a Keras based model. # * `constants.py` — defines constants of the model # * `model.py`, `model_test.py` — defines DNN model using Keras # # `beam_dag_runner.py`, `kubeflow_dag_runner.py` — define runners for each orchestration engine # # # **Running the tests:** # You might notice that there are some files with `_test.py` in their name. # These are unit tests of the pipeline and it is recommended to add more unit # tests as you implement your own pipelines. # You can run unit tests by supplying the module name of test files with `-m` flag. # You can usually get a module name by deleting `.py` extension and replacing `/` with `..` # # For example: # !python -m models.features_test # !python -m models.keras.model_test # Let's quickly go over the structure of a test file to test Tensorflow code: # !tail -26 models/features_test.py # First of all, notice that you start by importing the code you want to test by importing the corresponding module. Here we want to test the code in `features.py` so we import the module `features`: # ```python # from models import features # ``` # To implement test cases start by defining your own test class inheriting from `tf.test.TestCase`: # ```python # class FeaturesTest(tf.test.TestCase): # ``` # Wen you execute the test file with # ```bash # python -m models.features_test # ``` # the main method # ```python # tf.test.main() # ``` # will parse your test class (here: `FeaturesTest`) and execute every method whose name starts by `test`. Here we have two such methods for instance: # ```python # def testNumberOfBucketFeatureBucketCount(self): # def testTransformedNames(self): # ``` # So when you want to add a test case, just add a method to that test class whose name starts by `test`. Now inside the body of these test methods is where the actual testing takes place. In this case for instance, `testTransformedNames` test the function `features.transformed_name` and makes sure it outputs what is expected. # Since your test class inherits from `tf.test.TestCase` it has a number of helper methods you can use to help you create tests, as for instance # ```python # self.assertEqual(expected_outputs, obtained_outputs) # ``` # that will fail the test case if `obtained_outputs` do the match the `expected_outputs`. # # # Typical examples of test case you may want to implement for machine learning code would comprise test insurring that your model builds correctly, your preprocessing function preprocesses raw data as expected, or that your model can train successfully on a few mock examples. When writing tests make sure that their execution is fast (we just want to check that the code works not actually train a performant model when testing). For that you may have to create synthetic data in your test files. For more information, read the [tf.test.TestCase documentation](https://www.tensorflow.org/api_docs/python/tf/test/TestCase) and the [Tensorflow testing best practices](https://www.tensorflow.org/community/contribute/tests). # # ## Step 4. Run your first TFX pipeline # Components in the TFX pipeline will generate outputs for each run as # [ML Metadata Artifacts](https://www.tensorflow.org/tfx/guide/mlmd), and they need to be stored somewhere. # You can use any storage which the KFP cluster can access, and for this example we # will use Google Cloud Storage (GCS). # # Let us create this bucket. Its name will be `<YOUR_PROJECT>-kubeflowpipelines-default`. GCS_BUCKET_NAME = GOOGLE_CLOUD_PROJECT + '-kubeflowpipelines-default' GCS_BUCKET_NAME # !gsutil mb gs://{GCS_BUCKET_NAME} # Let's upload our sample data to GCS bucket so that we can use it in our pipeline later. # !gsutil cp data/data.csv gs://{GCS_BUCKET_NAME}/tfx-template/data/data.csv # Let's create a TFX pipeline using the `tfx pipeline create` command. # # **Note:** When creating a pipeline for KFP, we need a container image which will # be used to run our pipeline. And skaffold will build the image for us. Because `skaffold` # pulls base images from the docker hub, it will take 5~10 minutes when we build # the image for the first time, but it will take much less time from the second build. # !tfx pipeline create \ # --pipeline-path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} \ # --build-target-image={CUSTOM_TFX_IMAGE} # While creating a pipeline, `Dockerfile` and `build.yaml` will be generated to build a Docker image. # # Don't forget to add these files to the source control system (for example, git) along with other source files. # # A pipeline definition file for [argo](https://argoproj.github.io/argo/) will be generated, too. # The name of this file is `${PIPELINE_NAME}.tar.gz.` # For example, it will be `guided_project_1.tar.gz` if the name of your pipeline is `guided_project_1`. # It is recommended NOT to include this pipeline definition file into source control, because it will be generated from other Python files and will be updated whenever you update the pipeline. For your convenience, this file is already listed in `.gitignore` which is generated automatically. # Now start an execution run with the newly created pipeline using the `tfx run create` command. # # **Note:** You may see the following error `Error importing tfx_bsl_extension.coders.` Please ignore it. # **Debugging tip:** If your pipeline run fails, you can see detailed logs for each TFX component in the Experiments tab in the KFP Dashboard. One of the major sources of failure is **permission related problems**. # Please make sure your KFP cluster has permissions to access Google Cloud APIs. # This can be configured [when you create a KFP cluster in GCP](https://cloud.google.com/ai-platform/pipelines/docs/setting-up), # or see [Troubleshooting document in GCP](https://cloud.google.com/ai-platform/pipelines/docs/troubleshooting). # !tfx run create --pipeline-name={PIPELINE_NAME} --endpoint={ENDPOINT} # Or, you can also run the pipeline in the KFP Dashboard. The new execution run will be listed # under Experiments in the KFP Dashboard. # Clicking into the experiment will allow you to monitor progress and visualize # the artifacts created during the execution run. # # However, we recommend visiting the KFP Dashboard. You can access the KFP Dashboard from # the Cloud AI Platform Pipelines menu in Google Cloud Console. Once you visit the dashboard, # you will be able to find the pipeline, and access a wealth of information about the pipeline. # For example, you can find your runs under the Experiments menu, and when you open your # execution run under Experiments you can find all your artifacts from the pipeline under Artifacts menu. # ## Step 5. Add components for data validation. # In this step, you will add components for data validation including `StatisticsGen`, `SchemaGen`, and `ExampleValidator`. # If you are interested in data validation, please see # [Get started with Tensorflow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started). # **Double-click to change directory to pipeline and double-click again to open** `pipeline.py`. # Find and uncomment the 3 lines which add `StatisticsGen`, `SchemaGen`, and `ExampleValidator` to the pipeline. # (Tip: search for comments containing TODO(step 5):). Make sure to save `pipeline.py` after you edit it. # You now need to update the existing pipeline with modified pipeline definition. Use the `tfx pipeline update` command to update your pipeline, followed by the `tfx run create` command to create a new execution run of your updated pipeline. # Update the pipeline # !tfx pipeline update \ # --pipeline-path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} # You can run the pipeline the same way. # !tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT} # ### Check pipeline outputs # # Visit the KFP dashboard to find pipeline outputs in the page for your pipeline run. Click the Experiments tab on the left, and All runs in the Experiments page. You should be able to find the latest run under the name of your pipeline. # # See link below to access the dashboard: print('https://' + ENDPOINT) # ## Step 6. Add components for training # In this step, you will add components for training and model validation including `Transform`, `Trainer`, `ResolverNode`, `Evaluator`, and `Pusher`. # **Double-click to open** `pipeline.py`. Find and uncomment the 5 lines which add `Transform`, `Trainer`, `ResolverNode`, `Evaluator` and `Pusher` to the pipeline. (Tip: search for TODO(step 6):) # As you did before, you now need to update the existing pipeline with the modified pipeline definition. The instructions are the same as Step 5. Update the pipeline using `tfx pipeline update`, and create an execution run using `tfx run create`. # Verify that the pipeline DAG has changed accordingly in the Kubeflow UI: # !tfx pipeline update \ # --pipeline-path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} print("https://" + ENDPOINT) # !tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT} # When this execution run finishes successfully, you have now created and run your first TFX pipeline in AI Platform Pipelines! # ## Step 7. Try BigQueryExampleGen # [BigQuery](https://cloud.google.com/bigquery) is a serverless, highly scalable, and cost-effective cloud data warehouse. # `BigQuery` can be used as a source for training examples in TFX. In this step, we will add `BigQueryExampleGen` to the pipeline. # **Double-click to open** `pipeline.py`. Comment out `CsvExampleGen` and uncomment the line which creates an instance of `BigQueryExampleGen`. You also need to uncomment the query argument of the `create_pipeline` function. # We need to specify which GCP project to use for `BigQuery`, and this is done by setting `--project` in `beam_pipeline_args` when creating a pipeline. # **Double-click to open** `configs.py`. Uncomment the definition of `GOOGLE_CLOUD_REGION`, `BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS` and `BIG_QUERY_QUERY`. You should replace the region value in this file with the correct values for your GCP project. # **Note:** You MUST set your GCP region in the `configs.py` file before proceeding # **Change directory one level up.** Click the name of the directory above the file list. The name of the directory is the name of the pipeline which is `guided_project_1` if you didn't change. # **Double-click to open** `kubeflow_dag_runner.py`. Uncomment two arguments, `query` and `beam_pipeline_args`, for the `create_pipeline` function. # Now the pipeline is ready to use `BigQuery` as an example source. Update the pipeline as before and create a new execution run as we did in step 5 and 6. # !tfx pipeline update \ # --pipeline-path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} # !tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT} # ## Step 8. Try Dataflow with KFP # Several [TFX Components uses Apache Beam](https://www.tensorflow.org/tfx/guide/beam) to implement data-parallel pipelines, and it means that you can distribute data processing workloads using [Google Cloud Dataflow](https://cloud.google.com/dataflow/). In this step, we will set the Kubeflow orchestrator to use dataflow as the data processing back-end for Apache Beam. # **Double-click pipeline to change directory, and double-click to open** `configs.py`. Uncomment the definition of `GOOGLE_CLOUD_REGION`, and `DATAFLOW_BEAM_PIPELINE_ARGS`. # **Double-click to open** `pipeline.py`. Change the value of enable_cache to False. # **Change directory one level up.** Click the name of the directory above the file list. The name of the directory is the name of the pipeline which is `guided_project_1` if you didn't change. # **Double-click to open** `kubeflow_dag_runner.py`. Uncomment `beam_pipeline_args`. (Also make sure to comment out current `beam_pipeline_arg`s that you added in Step 7.) # Note that we deliberately disabled caching. Because we have already run the pipeline successfully, we will get cached execution result for all components if cache is enabled. # Now the pipeline is ready to use Dataflow. Update the pipeline and create an execution run as we did in step 5 and 6. # # # !tfx pipeline update \ # --pipeline-path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} # !tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT} # You can find your Dataflow jobs in [Dataflow in Cloud Console](http://console.cloud.google.com/dataflow). # Please reset `enable_cache` to `True` to benefit from caching execution results. # # # **Double-click to open** `pipeline.py`. Reset the value of enable_cache to True. # # # ## Step 9. Try Cloud AI Platform Training and Prediction with KFP # TFX interoperates with several managed GCP services, such as [Cloud AI Platform for Training and Prediction](https://cloud.google.com/ai-platform/). You can set your `Trainer` component to use Cloud AI Platform Training, a managed service for training ML models. Moreover, when your model is built and ready to be served, you can push your model to Cloud AI Platform Prediction for serving. In this step, we will set our `Trainer` and `Pusher` component to use Cloud AI Platform services. # Before editing files, you might first have to enable AI Platform Training & Prediction API. # # **Double-click pipeline to change directory, and double-click to open** `configs.py`. Uncomment the definition of `GOOGLE_CLOUD_REGION`, `GCP_AI_PLATFORM_TRAINING_ARGS` and `GCP_AI_PLATFORM_SERVING_ARGS`. We will use our custom built container image to train a model in Cloud AI Platform Training, so we should set `masterConfig.imageUri` in `GCP_AI_PLATFORM_TRAINING_ARGS` to the same value as `CUSTOM_TFX_IMAGE` above. # **Change directory one level up, and double-click to open** `kubeflow_dag_runner.py`. Uncomment `ai_platform_training_args` and `ai_platform_serving_args`. # Update the pipeline and create an execution run as we did in step 5 and 6. # !tfx pipeline update \ # --pipeline-path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} # !tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT} # You can find your training jobs in [Cloud AI Platform Jobs](https://console.cloud.google.com/ai-platform/jobs). If your pipeline completed successfully, you can find your model in [Cloud AI Platform Models](https://console.cloud.google.com/ai-platform/models). # ## License # # <font size=-1>Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.</font>
immersion/guided_projects/guided_project_1.ipynb
# --- # jupyter: # jupytext: # cell_metadata_filter: -all # formats: ipynb # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # # # In this lesson we're going to see how we can build neural networks capable of learning the complex kinds of relationships deep neural nets are famous for. # # The key idea here is *modularity*, building up a complex network from simpler functional units. We've seen how a linear unit computes a linear function -- now we'll see how to combine and modify these single units to model more complex relationships. # # # Layers # # # Neural networks typically organize their neurons into **layers**. When we collect together linear units having a common set of inputs we get a **dense** layer. # # <figure style="padding: 1em;"> # <img src="https://i.imgur.com/2MA4iMV.png" width="300" alt="A stack of three circles in an input layer connected to two circles in a dense layer."> # <figcaption style="textalign: center; font-style: italic"><center>A dense layer of two linear units receiving two inputs and a bias. # </center></figcaption> # </figure> # # You could think of each layer in a neural network as performing some kind of relatively simple transformation. Through a deep stack of layers, a neural network can transform its inputs in more and more complex ways. In a well-trained neural network, each layer is a transformation getting us a little bit closer to a solution. # # <blockquote style="margin-right:auto; margin-left:auto; background-color: #ebf9ff; padding: 1em; margin:24px;"> # <strong>Many Kinds of Layers</strong><br> # A "layer" in Keras is a very general kind of thing. A layer can be, essentially, any kind of <em>data transformation</em>. Many layers, like the <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D">convolutional</a> and <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN">recurrent</a> layers, transform data through use of neurons and differ primarily in the pattern of connections they form. Others though are used for <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding">feature engineering</a> or just <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add">simple arithmetic</a>. There's a whole world of layers to discover -- <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers">check them out</a>! # </blockquote> # # # The Activation Function # # # It turns out, however, that two dense layers with nothing in between are no better than a single dense layer by itself. Dense layers by themselves can never move us out of the world of lines and planes. What we need is something *nonlinear*. What we need are activation functions. # # <figure style="padding: 1em;"> # <img src="https://i.imgur.com/OLSUEYT.png" width="400" alt=" "> # <figcaption style="textalign: center; font-style: italic"><center>Without activation functions, neural networks can only learn linear relationships. In order to fit curves, we'll need to use activation functions. # </center></figcaption> # </figure> # # An **activation function** is simply some function we apply to each of a layer's outputs (its *activations*). The most common is the *rectifier* function $max(0, x)$. # # <figure style="padding: 1em;"> # <img src="https://i.imgur.com/aeIyAlF.png" width="400" alt="A graph of the rectifier function. The line y=x when x>0 and y=0 when x<0, making a 'hinge' shape like '_/'."> # <figcaption style="textalign: center; font-style: italic"><center> # </center></figcaption> # </figure> # # The rectifier function has a graph that's a line with the negative part "rectified" to zero. Applying the function to the outputs of a neuron will put a *bend* in the data, moving us away from simple lines. # # When we attach the rectifier to a linear unit, we get a **rectified linear unit** or **ReLU**. (For this reason, it's common to call the rectifier function the "ReLU function".) Applying a ReLU activation to a linear unit means the output becomes `max(0, w * x + b)`, which we might draw in a diagram like: # # <figure style="padding: 1em;"> # <img src="https://i.imgur.com/eFry7Yu.png" width="250" alt="Diagram of a single ReLU. Like a linear unit, but instead of a '+' symbol we now have a hinge '_/'. "> # <figcaption style="textalign: center; font-style: italic"><center>A rectified linear unit. # </center></figcaption> # </figure> # # Stacking Dense Layers # # # Now that we have some nonlinearity, let's see how we can stack layers to get complex data transformations. # # <figure style="padding: 1em;"> # <img src="https://i.imgur.com/Y5iwFQZ.png" width="450" alt="An input layer, two hidden layers, and a final linear layer."> # <figcaption style="textalign: center; font-style: italic"><center>A stack of dense layers makes a "fully-connected" network. # </center></figcaption> # </figure> # # The layers before the output layer are sometimes called **hidden** since we never see their outputs directly. # # Now, notice that the final (output) layer is a linear unit (meaning, no activation function). That makes this network appropriate to a regression task, where we are trying to predict some arbitrary numeric value. Other tasks (like classification) might require an activation function on the output. # # ## Building Sequential Models ## # # The `Sequential` model we've been using will connect together a list of layers in order from first to last: the first layer gets the input, the last layer produces the output. This creates the model in the figure above: # + from tensorflow import keras from tensorflow.keras import layers model = keras.Sequential([ # the hidden ReLU layers layers.Dense(units=4, activation='relu', input_shape=[2]), layers.Dense(units=3, activation='relu'), # the linear output layer layers.Dense(units=1), ]) # - # Be sure to pass all the layers together in a list, like `[layer, layer, layer, ...]`, instead of as separate arguments. To add an activation function to a layer, just give its name in the `activation` argument. # # # Your Turn # # # Now, [**create a deep neural network**](#$NEXT_NOTEBOOK_URL$) for the *Concrete* dataset.
notebooks/deep_learning_intro/raw/tut2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np import scipy.linalg import scipy.sparse.linalg import matplotlib.pyplot as plt import time from numba import jit # - @jit(nopython=True) def build_pascal(lc,nc): cnkc = np.zeros((lc,nc),dtype=np.int64) for i in range(1,lc+1): cnkc[i-1,0] = 1 for i in range(1,lc+1): for j in range(2,nc+1): cnkc[i-1,j-1] = 0 for in1 in range(2,lc+1): cnkc[in1-1,1] = np.sum(cnkc[in1-2,0:2]) if nc-1 > 1: for in2 in range(1,nc+1): cnkc[in1-1,in2-1] = np.sum(cnkc[in1-2,0:in2]) return cnkc, cnkc[lc-1,nc-1] lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) print(cnkc) print(jmax) # Returns the position ind of the many body state bi ## original code, redundant if clauses @jit(nopython=True) def b2in_orig_redundant(bi,ind,cnkc,lc,nc): # basis to index ind[0] = 1 ## start from 1 # ind[0] = 0 ## start from 0 for ind_i in range(1,lc-1): for ind_N in range(0,bi[ind_i-1]+1): if bi[ind_i-1] - ind_N > 0: suma = 0 for k in range(1,ind_i): suma += bi[k-1] if lc - ind_i > 0 and nc - ind_N - suma > 0: ind[0] += cnkc[lc-ind_i-1,nc-ind_N-suma-1] return 0 # Returns the position ind of the many body state bi @jit(nopython=True) def b2in_orig(bi,ind,cnkc,lc,nc): # basis to index ind[0] = 1 ## start from 1 # ind[0] = 0 ## start from 0 for ind_i in range(1,lc-1): # site loop for ind_N in range(0,bi[ind_i-1]): # atom loop suma = 0 for k in range(1,ind_i): suma += bi[k-1] if nc - ind_N - suma > 0: ind[0] += cnkc[lc-ind_i-1,nc-ind_N-suma-1] return 0 # Returns the position ind of the many body state bi @jit(nopython=True) def b2in(bi,ind,cnkc,lc,nc): # basis to index ind[0] = 1 ## start from 1 num = nc for ind_site in range(1,lc-1): # site loop numb = bi[ind_site-1] ind[0] += np.sum(cnkc[lc-1-ind_site,num-numb:num]) #print(ind_site,num-1,numb,cnkc[lc-1-ind_site,num-numb:num]) num -= numb return 0 # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) #bi = np.zeros(lc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind_orig = np.zeros(1,dtype=np.int64) bi = np.array([2,1,1,0,2,0]) b2in_orig(bi,ind_orig,cnkc,lc,nc) b2in(bi,ind,cnkc,lc,nc) print(bi,ind_orig[0],ind[0]) bi = np.array([1,0,3,0,2,0]) b2in_orig(bi,ind_orig,cnkc,lc,nc) b2in(bi,ind,cnkc,lc,nc) print(bi,ind_orig[0],ind[0]) # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) #bi = np.zeros(lc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) bi = np.array([0,0,0,0,0,6]) b2in(bi,ind,cnkc,lc,nc) print(bi,ind[0]) bi = np.array([1,1,1,1,1,1]) b2in(bi,ind,cnkc,lc,nc) print(bi,ind[0]) bi = np.array([6,0,0,0,0,0]) b2in(bi,ind,cnkc,lc,nc) print(bi,ind[0]) bi = np.array([1,0,3,0,2,0]) #bi = np.array([0,2,0,3,0,1]) b2in(bi,ind,cnkc,lc,nc) print(bi,ind[0]) bi = np.array([2,1,1,0,2,0]) #bi = np.array([0,2,0,1,1,2]) b2in(bi,ind,cnkc,lc,nc) print(bi,ind[0]) # - # Returns the position ind of the many body state bi_short @jit(nopython=True) def bshort2in(bis,ind,cnkc,lc,nc): # basis to index ind[0] = 1 ## start from 1 num = 2 for ind_atom in range(1,nc): # atom loop ind_site = bis[ind_atom-1] ind[0] += cnkc[ind_site-1,num-1] #print(ind_atom,ind_site,num,cnkc[ind_site-1,num-1],ind[0]) num += 1 return 0 # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) #bis = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) bis = np.array([2,2,4,5,6,6]) bshort2in(bis,ind,cnkc,lc,nc) print(bis,ind[0]) # - # Returns the many body state bi at position ind @jit(nopython=True) def in2b(bi,ind,cnkc,lc,nc): # index to basis ind_i = ind[0] - 1 ## ind[0] runs from 1 to jmax=cnkc[ind_lc-1,ind_nc-1] bi[:] = 0 ind_L = lc - 1 ind_N = nc while ind_N > 1: # atom loop if ind_i >= cnkc[ind_L-1,ind_N-1]: # condition for site ind_i -= cnkc[ind_L-1,ind_N-1] bi[lc-ind_L-1] += 1 ind_N -= 1 else: ind_L -= 1 return 0 # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) bi = np.zeros(lc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind[0] = 384 in2b(bi,ind,cnkc,lc,nc) print(bi,ind[0]) # - # Returns the many body state bi_short at position ind @jit(nopython=True) def in2bshort(bis,ind,cnkc,lc,nc): # index to basis short ind_i = ind[0] - 1 ## ind[0] runs from 1 to jmax=cnkc[ind_lc-1,ind_nc-1] bis[:] = 0 ind_L = lc - 1 ind_N = nc while ind_N > 1: # atom loop if ind_i >= cnkc[ind_L-1,ind_N-1]: # condition for site ind_i -= cnkc[ind_L-1,ind_N-1] bis[ind_N-2] = ind_L ind_N -= 1 else: ind_L -= 1 return 0 # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) bis = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind[0] = 384 in2bshort(bis,ind,cnkc,lc,nc) print(bis,ind[0]) # + ## np.searchsorted is better? @jit(nopython=True) def binary_search_orig(s,list_s,ls,le): bmin = ls; bmax = le while True: b = bmin + (bmax-bmin)//2 if s < list_s[b-1]: bmax = b - 1 elif list_s[b-1] < s: bmin = b + 1 else: bmin = b return b, bmin if bmin > bmax: b = -1 return b, bmin return b, bmin @jit(nopython=True) def binary_search(s,list_s,ls,le,side="left"): return np.searchsorted(list_s[ls-1:le],s,side=side)+1 # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) print(cnkc[:,nc-1]) print() for i in cnkc[:,nc-1]-2: b, bmin = binary_search_orig(i,cnkc[:,nc-1],1,lc) bmin2 = binary_search(i,cnkc[:,nc-1],1,lc) print(i,b,bmin,bmin2) print() for i in cnkc[:,nc-1]-1: b, bmin = binary_search_orig(i,cnkc[:,nc-1],1,lc) bmin2 = binary_search(i,cnkc[:,nc-1],1,lc) print(i,b,bmin,bmin2) print() for i in cnkc[:,nc-1]: b, bmin = binary_search_orig(i,cnkc[:,nc-1],1,lc) bmin2 = binary_search(i,cnkc[:,nc-1],1,lc) print(i,b,bmin,bmin2) print() for i in cnkc[:,nc-1]+1: b, bmin = binary_search_orig(i,cnkc[:,nc-1],1,lc) bmin2 = binary_search(i,cnkc[:,nc-1],1,lc) print(i,b,bmin,bmin2) print() for i in cnkc[:,nc-1]+2: b, bmin = binary_search_orig(i,cnkc[:,nc-1],1,lc) bmin2 = binary_search(i,cnkc[:,nc-1],1,lc) print(i,b,bmin,bmin2) print() # - # Returns the many body state bi_short at position ind # using binary search @jit(nopython=True) def in2bshort_bs(bis,ind,cnkc,lc,nc): # index to basis short ind_i = ind[0] ## ind[0] runs from 1 to jmax=cnkc[ind_lc-1,ind_nc-1] bis[:] = 0 ind_site = lc for ind_atom in range(nc,1,-1): # atom loop # icnkc, icnkcmin = binary_search_orig(ind_i,cnkc[:ind_site,ind_atom-1],1,ind_site) icnkcmin = binary_search(ind_i,cnkc[:ind_site,ind_atom-1],1,ind_site) #print(ind_atom,ind_i,icnkc,icnkcmin,cnkc[:ind_site,ind_atom-1],cnkc[icnkcmin-2,ind_atom-1]) ind_i -= cnkc[icnkcmin-2,ind_atom-1] bis[ind_atom-2] = icnkcmin-1 ind_site = icnkcmin return 0 # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) bis = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind[0] = 384 in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0]) print() ind[0] = 259 in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0]) print() ind[0] = 1 in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0]) print() ind[0] = jmax in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0]) print() # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) bi = np.zeros(lc-1,dtype=np.int64) bis = np.zeros(nc-1,dtype=np.int64) bis_bs = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind_bi = np.zeros(1,dtype=np.int64) ind_bis = np.zeros(1,dtype=np.int64) ind_bis_bs = np.zeros(1,dtype=np.int64) #for i in range(1,jmax+1): for i in list(range(1,9))+[259]+[384]+list(range(jmax+1-8,jmax+1)): ind[0] = i in2b(bi,ind,cnkc,lc,nc) in2bshort(bis,ind,cnkc,lc,nc) in2bshort_bs(bis_bs,ind,cnkc,lc,nc) b2in(bi,ind_bi,cnkc,lc,nc) bshort2in(bis,ind_bis,cnkc,lc,nc) bshort2in(bis_bs,ind_bis_bs,cnkc,lc,nc) print(ind[0],bi,ind_bi[0]," ",ind[0],bis,ind_bis[0]," ",ind[0],bis_bs,ind_bis_bs[0]) # + #lc = 33 # number of sites + 1 lc = 25 # number of sites + 1 nc = 4 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) bi = np.zeros(lc-1,dtype=np.int64) bis = np.zeros(nc-1,dtype=np.int64) bis_bs = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind_bi = np.zeros(1,dtype=np.int64) ind_bis = np.zeros(1,dtype=np.int64) ind_bis_bs = np.zeros(1,dtype=np.int64) #for i in range(1,jmax+1): for i in list(range(1,9))+list(range(jmax+1-8,jmax+1)): ind[0] = i in2b(bi,ind,cnkc,lc,nc) in2bshort(bis,ind,cnkc,lc,nc) in2bshort_bs(bis_bs,ind,cnkc,lc,nc) b2in(bi,ind_bi,cnkc,lc,nc) bshort2in(bis,ind_bis,cnkc,lc,nc) bshort2in(bis_bs,ind_bis_bs,cnkc,lc,nc) print(ind[0],bi,ind_bi[0]," ",ind[0],bis,ind_bis[0]," ",ind[0],bis_bs,ind_bis_bs[0]) # + @jit(nopython=True) def calc_ni(bis,nc): return [np.sum(bis==i) for i in range(nc)] @jit(nopython=True) def calc_ni2(bis,nc): return [np.sum(bis==i)**2 for i in range(nc)] # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) bis = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind[0] = 384 in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0],calc_ni(bis,nc),calc_ni2(bis,nc)) print() ind[0] = 259 in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0],calc_ni(bis,nc),calc_ni2(bis,nc)) print() ind[0] = 1 in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0],calc_ni(bis,nc),calc_ni2(bis,nc)) print() ind[0] = jmax in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0],calc_ni(bis,nc),calc_ni2(bis,nc)) print() # + ## binary search not efficient for small nmax? ## this binary search code "binary_search_orig" has bug, no good with duplication #@jit(nopython=True) #def calc_aiadj(bis2,bis,i,j,nmax): # assume i<j # _, x = binary_search_orig(i,bis,1,nmax) # x += np.sum(bis==i) - 1 # _, y = binary_search_orig(j,bis,1,nmax) # y += np.sum(bis==j) - 1 # bis2[0:x-1] = bis[0:x-1] # bis2[x-1:y-1] = bis[x:y] # bis2[y-1] = j # bis2[y:nmax] = bis[y:nmax] # return 0 #@jit(nopython=True) #def calc_aiadj(bis2,bis,i,j,nmax): # assume i<j # _, x = binary_search_orig(i,bis,1,nmax) # x += np.sum(bis==i) - 1 # _, y = binary_search_orig(j,bis,1,nmax) # y += np.sum(bis==j) - 1 # print() # print("#",i,binary_search_orig(i,bis,1,nmax),x,y) # x2 = binary_search(i,bis,1,nmax,side="left") # y2 = binary_search(j,bis,1,nmax,side="left") # x3 = binary_search(i,bis,1,nmax,side="right") # y3 = binary_search(j,bis,1,nmax,side="right") # print("#",i,x2,y2) # print("#",i,x3,y3) # print("#",i,x2 + np.sum(bis==i) - 1,y2 + np.sum(bis==j) - 1) # bis2[0:x-1] = bis[0:x-1] # bis2[x-1:y-1] = bis[x:y] # bis2[y-1] = j # bis2[y:nmax] = bis[y:nmax] # return 0 @jit(nopython=True) def calc_aiadj(bis2,bis,i,j,nmax): # assume i<j x = binary_search(i,bis,1,nmax) + np.sum(bis==i) - 1 y = binary_search(j,bis,1,nmax) + np.sum(bis==j) - 1 bis2[0:x-1] = bis[0:x-1] bis2[x-1:y-1] = bis[x:y] bis2[y-1] = j bis2[y:nmax] = bis[y:nmax] return 0 # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) bis = np.zeros(nc-1,dtype=np.int64) bis2 = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind2 = np.zeros(1,dtype=np.int64) ind[0] = 384 #ind[0] = 259 #ind[0] = 1 #ind[0] = 2 #ind[0] = 3 #ind[0] = jmax in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0]) for i in range(1,lc): for j in range(i+1,lc): if i in bis: calc_aiadj(bis2,bis,i,j,nc-1) bshort2in(bis2,ind2,cnkc,lc,nc) coeff = np.sqrt(np.sum(bis==i)*(np.sum(bis==j)+1)) # \sqrt(n_i(nj+1)) print(i,j,bis2,ind2[0],coeff) print() # + lc = 4 # number of sites + 1 nc = 3 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) bis = np.zeros(nc-1,dtype=np.int64) bis2 = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind2 = np.zeros(1,dtype=np.int64) for id in range(1,jmax+1): ind[0] = id in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0]) for i in range(1,lc): for j in range(i+1,lc): if i in bis: calc_aiadj(bis2,bis,i,j,nc-1) bshort2in(bis2,ind2,cnkc,lc,nc) coeff = np.sqrt(np.sum(bis==i)*(np.sum(bis==j)+1)) # \sqrt(n_i(nj+1)) print(i,j,bis2,ind2[0],coeff) print() print() # + ## binary search not efficient for small nmax? ## this binary search code "binary_search_orig" has bug, no good with duplication #@jit(nopython=True) #def calc_adiaj(bis2,bis,i,j,nmax): # assume i<j # _, x = binary_search_orig(i,bis,1,nmax) # x += np.sum(bis==i) # _, y = binary_search_orig(j,bis,1,nmax) # bis2[0:x-1] = bis[0:x-1] # bis2[x-1] = i # bis2[x:y] = bis[x-1:y-1] # bis2[y:nmax] = bis[y:nmax] # return 0 @jit(nopython=True) def calc_adiaj(bis2,bis,i,j,nmax): # assume i<j x = binary_search(i,bis,1,nmax) + np.sum(bis==i) y = binary_search(j,bis,1,nmax) bis2[0:x-1] = bis[0:x-1] bis2[x-1] = i bis2[x:y] = bis[x-1:y-1] bis2[y:nmax] = bis[y:nmax] return 0 # + lc = 7 # number of sites + 1 nc = 7 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) bis = np.zeros(nc-1,dtype=np.int64) bis2 = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind2 = np.zeros(1,dtype=np.int64) ind[0] = 384 #ind[0] = 259 #ind[0] = 1 #ind[0] = 2 #ind[0] = 3 #ind[0] = jmax in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0]) for i in range(1,lc): for j in range(i+1,lc): if j in bis: calc_adiaj(bis2,bis,i,j,nc-1) bshort2in(bis2,ind2,cnkc,lc,nc) coeff = np.sqrt((np.sum(bis==i)+1)*np.sum(bis==j)) # \sqrt((n_i+1)nj) print(i,j,bis2,ind2[0],coeff) print() # + lc = 4 # number of sites + 1 nc = 3 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) bis = np.zeros(nc-1,dtype=np.int64) bis2 = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind2 = np.zeros(1,dtype=np.int64) for id in range(1,jmax+1): ind[0] = id in2bshort_bs(bis,ind,cnkc,lc,nc) print(bis,ind[0]) for i in range(1,lc): for j in range(i+1,lc): if j in bis: calc_adiaj(bis2,bis,i,j,nc-1) bshort2in(bis2,ind2,cnkc,lc,nc) coeff = np.sqrt((np.sum(bis==i)+1)*np.sum(bis==j)) # \sqrt((n_i+1)nj) print(i,j,bis2,ind2[0],coeff) print() print() # - @jit(nopython=True) def make_full_hamiltonian(lv,Ham,cnkc,lc,nc,no_U,val_U,no_J,pair_J,val_J): bis = np.zeros(nc-1,dtype=np.int64) bis2 = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind2 = np.zeros(1,dtype=np.int64) for i in range(1,lv+1): # state loop ind[0] = i in2bshort_bs(bis,ind,cnkc,lc,nc) for j in range(1,no_U+1): # site loop nj = np.sum(bis==j) Ham[i-1,i-1] += 0.5 * val_U[j-1] * nj * (nj-1.0) for j in range(1,no_J+1): # bond loop f1 = pair_J[0,j-1] in bis f2 = pair_J[1,j-1] in bis if f1: calc_aiadj(bis2,bis,pair_J[0,j-1],pair_J[1,j-1],nc-1) bshort2in(bis2,ind2,cnkc,lc,nc) Ham[i-1,ind2[0]-1] -= val_J[j-1] * \ np.sqrt(np.sum(bis==pair_J[0,j-1]) * (np.sum(bis==pair_J[1,j-1])+1)) # print("### 1 aiadj",bis2,bis,pair_J[0,j-1],pair_J[1,j-1]) if f2: calc_adiaj(bis2,bis,pair_J[0,j-1],pair_J[1,j-1],nc-1) bshort2in(bis2,ind2,cnkc,lc,nc) Ham[i-1,ind2[0]-1] -= val_J[j-1] * \ np.sqrt((np.sum(bis==pair_J[0,j-1])+1) * np.sum(bis==pair_J[1,j-1])) # print("### 2 adiaj",bis2,bis,pair_J[0,j-1],pair_J[1,j-1]) return 0 @jit(nopython=True) def make_parameters_1d(lc,U,J): no_U = lc - 1 val_U = U * np.ones(no_U,dtype=np.float64) no_J = lc - 1 pair_J = np.zeros((2,no_J),dtype=np.int64) val_J = J * np.ones(no_J,dtype=np.float64) for i in range(no_J): pair_J[0,i] = i%no_J+1 pair_J[1,i] = (i+1)%no_J+1 if pair_J[0,i] > pair_J[1,i]: # assume i<j for pair (i,j) tmp = pair_J[0,i] pair_J[0,i] = pair_J[1,i] pair_J[1,i] = tmp return no_U, val_U, no_J, pair_J, val_J #@jit(nopython=True) def calculate_1d_full_diag(lc,nc,U,J): start = time.time() # lc = 21 # number of sites + 1 # nc = 4 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) print("# cnkc",cnkc) print("# total Hilbert space size",jmax) end = time.time() print("## time build_pascal",end-start) print() start = time.time() # U = 10.0 # J = 1.0 no_U, val_U, no_J, pair_J, val_J = make_parameters_1d(lc,U,J) print("# no_U",no_U) print("# val_U",val_U) print("# no_J",no_J) print("# pair_J",pair_J) print("# val_J",val_J) end = time.time() print("## time make_parameters_1d",end-start) print() start = time.time() Ham = np.zeros((jmax,jmax),dtype=np.float64) make_full_hamiltonian(jmax,Ham,cnkc,lc,nc,no_U,val_U,no_J,pair_J,val_J) # print(Ham) # print(Ham.T-Ham) # print("# \sum |Ham-Ham.T|",np.sum(np.abs(Ham.T-Ham))) # for i in range(jmax): # for j in range(jmax): # if np.abs(Ham[i,j]) > 1e-6: # print(i,j,Ham[i,j]) end = time.time() print("## time make Hamiltonian",end-start) print() start = time.time() ene, vec = scipy.linalg.eigh(Ham) idx = np.argsort(ene) ene = ene[idx] vec = vec[:,idx] print("# ene",*ene[0:np.min([jmax,5])].flatten()) #print("# vec",vec[:,0:np.min([jmax,5])]) end = time.time() print("## time diagonalization",end-start) print() lc = 31 # number of sites + 1 nc = 4 # number of atoms + 1 U = 10.0 J = 1.0 calculate_1d_full_diag(lc,nc,U,J) def ham_to_vec_wave_vector(lv,cnkc,lc,nc,no_U,val_U,no_J,pair_J,val_J): @jit(nopython=True) def get_vec(v1,v0,bis,bis2,ind,ind2): ## v0: new output, v1: old input for i in range(1,lv+1): # state loop # v0[i-1] = 0.0 + 0.0j v0[i-1] = 0.0 ind[0] = i in2bshort_bs(bis,ind,cnkc,lc,nc) for j in range(1,no_U+1): # site loop nj = np.sum(bis==j) v0[i-1] += 0.5 * val_U[j-1] * nj * (nj-1.0) * v1[i-1] for j in range(1,no_J+1): # bond loop f1 = pair_J[0,j-1] in bis f2 = pair_J[1,j-1] in bis if f1: calc_aiadj(bis2,bis,pair_J[0,j-1],pair_J[1,j-1],nc-1) bshort2in(bis2,ind2,cnkc,lc,nc) v0[i-1] -= val_J[j-1] * \ np.sqrt(np.sum(bis==pair_J[0,j-1]) * (np.sum(bis==pair_J[1,j-1])+1)) * v1[ind2[0]-1] if f2: calc_adiaj(bis2,bis,pair_J[0,j-1],pair_J[1,j-1],nc-1) bshort2in(bis2,ind2,cnkc,lc,nc) v0[i-1] -= val_J[j-1] * \ np.sqrt((np.sum(bis==pair_J[0,j-1])+1) * np.sum(bis==pair_J[1,j-1])) * v1[ind2[0]-1] return v0 return get_vec #@jit(nopython=True) def calculate_1d_linearoperator(lc,nc,U,J): start = time.time() # lc = 21 # number of sites + 1 # nc = 4 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) print("# cnkc",cnkc) print("# total Hilbert space size",jmax) end = time.time() print("## time build_pascal",end-start) print() start = time.time() # U = 10.0 # J = 1.0 no_U, val_U, no_J, pair_J, val_J = make_parameters_1d(lc,U,J) print("# no_U",no_U) print("# val_U",val_U) print("# no_J",no_J) print("# pair_J",pair_J) print("# val_J",val_J) end = time.time() print("## time make_parameters_1d",end-start) print() start = time.time() get_vec = ham_to_vec_wave_vector(jmax,cnkc,lc,nc,no_U,val_U,no_J,pair_J,val_J) bis = np.zeros(nc-1,dtype=np.int64) bis2 = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind2 = np.zeros(1,dtype=np.int64) #v0 = np.zeros(jmax,dtype=np.complex128) v0 = np.zeros(jmax,dtype=np.float64) Ham = scipy.sparse.linalg.LinearOperator((jmax,jmax),matvec=lambda v1: get_vec(v1,v0,bis,bis2,ind,ind2)) end = time.time() print("## time make Hamiltonian",end-start) print() start = time.time() ene, vec = scipy.sparse.linalg.eigsh(Ham,which="SA",k=np.min([5,jmax-1])) idx = ene.argsort() ene = ene[idx] vec = vec[:,idx] print("# ene",*ene[0:np.min([jmax,5])].flatten()) #print("# vec",vec[:,0:np.min([jmax,5])]) end = time.time() print("## time diagonalization",end-start) print() lc = 31 # number of sites + 1 nc = 4 # number of atoms + 1 U = 10.0 J = 1.0 calculate_1d_linearoperator(lc,nc,U,J) @jit(nopython=True) def make_sparse_hamiltonian_child(lv,cnkc,lc,nc,no_U,val_U,no_J,pair_J,val_J): bis = np.zeros(nc-1,dtype=np.int64) bis2 = np.zeros(nc-1,dtype=np.int64) ind = np.zeros(1,dtype=np.int64) ind2 = np.zeros(1,dtype=np.int64) lstki = np.array([i for k in range(2*no_J+1) for i in range(lv)],dtype=np.int64) lstloc = np.zeros((2*no_J+1)*lv,dtype=np.int64) # lstele = np.zeros((2*no_J+1)*lv,dtype=np.complex128) lstele = np.zeros((2*no_J+1)*lv,dtype=np.float64) for i in range(1,lv+1): # state loop ind[0] = i in2bshort_bs(bis,ind,cnkc,lc,nc) lstloc[2*no_J*lv+(i-1)] = i-1 # diagonal localtion for j in range(1,no_U+1): # site loop nj = np.sum(bis==j) lstele[2*no_J*lv+(i-1)] += 0.5 * val_U[j-1] * nj * (nj-1.0) # diagonal element # Ham[i-1,i-1] += 0.5 * val_U[j-1] * nj * (nj-1.0) for j in range(1,no_J+1): # bond loop f1 = pair_J[0,j-1] in bis f2 = pair_J[1,j-1] in bis if f1: calc_aiadj(bis2,bis,pair_J[0,j-1],pair_J[1,j-1],nc-1) bshort2in(bis2,ind2,cnkc,lc,nc) lstele[(j-1)*lv+(i-1)] -= val_J[j-1] * \ np.sqrt(np.sum(bis==pair_J[0,j-1]) * (np.sum(bis==pair_J[1,j-1])+1)) # offdiag element lstloc[(j-1)*lv+(i-1)] = ind2[0]-1 # offdiag localtion # Ham[i-1,ind2[0]-1] -= val_J[j-1] * \ # np.sqrt(np.sum(bis==pair_J[0,j-1]) * (np.sum(bis==pair_J[1,j-1])+1)) # print("### 1 aiadj",bis2,bis,pair_J[0,j-1],pair_J[1,j-1]) if f2: calc_adiaj(bis2,bis,pair_J[0,j-1],pair_J[1,j-1],nc-1) bshort2in(bis2,ind2,cnkc,lc,nc) lstele[((j-1)+no_J)*lv+(i-1)] -= val_J[j-1] * \ np.sqrt((np.sum(bis==pair_J[0,j-1])+1) * np.sum(bis==pair_J[1,j-1])) # offdiag element lstloc[((j-1)+no_J)*lv+(i-1)] = ind2[0]-1 # offdiag localtion # Ham[i-1,ind2[0]-1] -= val_J[j-1] * \ # np.sqrt((np.sum(bis==pair_J[0,j-1])+1) * np.sum(bis==pair_J[1,j-1])) # print("### 2 adiaj",bis2,bis,pair_J[0,j-1],pair_J[1,j-1]) return lstele, lstki, lstloc def make_sparse_hamiltonian(lv,lstele,lstki,lstloc): # return scipy.sparse.csr_matrix((lstele,(lstki,lstloc)),shape=(lv,lv),dtype=np.complex128) return scipy.sparse.csr_matrix((lstele,(lstki,lstloc)),shape=(lv,lv),dtype=np.float64) #@jit(nopython=True) def calculate_1d_sparse(lc,nc,U,J): start = time.time() # lc = 21 # number of sites + 1 # nc = 4 # number of atoms + 1 cnkc, jmax = build_pascal(lc,nc) print("# cnkc",cnkc) print("# total Hilbert space size",jmax) end = time.time() print("## time build_pascal",end-start) print() start = time.time() # U = 10.0 # J = 1.0 no_U, val_U, no_J, pair_J, val_J = make_parameters_1d(lc,U,J) print("# no_U",no_U) print("# val_U",val_U) print("# no_J",no_J) print("# pair_J",pair_J) print("# val_J",val_J) end = time.time() print("## time make_parameters_1d",end-start) print() start = time.time() lstele, lstki, lstloc = make_sparse_hamiltonian_child(jmax,cnkc,lc,nc,no_U,val_U,no_J,pair_J,val_J) Ham = make_sparse_hamiltonian(jmax,lstele,lstki,lstloc) # print(Ham) end = time.time() print("## time make Hamiltonian",end-start) print() start = time.time() ene, vec = scipy.sparse.linalg.eigsh(Ham,which="SA",k=np.min([5,jmax-1])) idx = ene.argsort() ene = ene[idx] vec = vec[:,idx] print("# ene",*ene[0:np.min([jmax,5])].flatten()) #print("# vec",vec[:,0:np.min([jmax,5])]) end = time.time() print("## time diagonalization",end-start) print() lc = 31 # number of sites + 1 nc = 4 # number of atoms + 1 U = 10.0 J = 1.0 calculate_1d_sparse(lc,nc,U,J)
bose_hubbard_basis_ponomarev_no_truncation/testing/test_python_bose_basis_ponomarev_20211215.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt from matplotlib.lines import Line2D import pandas as pd # %matplotlib inline exp_print_count = 0 #plt.style.use("seaborn-whitegrid") plt.rcParams["font.family"] = "serif" plt.rcParams["font.serif"] = "Times" plt.rcParams['lines.linewidth'] = 3 plt.rc('mathtext', fontset='stix') #rc('font',**{'family':'serif','serif':['Times']}) plt.rc('text', usetex=True) from aux_plt import start_plotting import numpy as np CFGS = [ (10, 1, 10, 1), (10,2,10,2), (0.001, 0.01, 0.01, 0.1),(0.1, 1, 0.1, 1), (0.1, 0.1, 0.1, 0.1), (1, 1, 0.1, 0.1), (1000, 1000, 1000, 1000),] LABELS = ["A", "B", "C", "D", "E", "F", "G", "H"] COLORS1 = ["salmon", "maroon", "pink", "dodgerblue", "yellowgreen", "orange", "gray", ] ALL_SHADE = "lightgray" ALL_COLOR = "k" LABEL2IX = dict((label, i) for i, label in enumerate(LABELS)) CFGS2IX = dict((label, i) for i, label in enumerate(CFGS)) df_sel = lambda df, a0, b0, c0, d0: df[(df.p_a==a0)&(df.p_b==b0)&(df.p_c==c0)&(df.p_d==d0)] # + def plot_errs(name, df, color="dodgerblue", alpha=0.2, lw=2.5): print("[plot_errs] plotting %s rows with name=%s" % (len(df), name)) df2 = df[ ['t_latent_dim','e_latent_dim' ] ] mean = df2.groupby('t_latent_dim')['e_latent_dim'].median() std = df2.groupby('t_latent_dim')['e_latent_dim'].std() p025 = df.groupby('t_latent_dim')['e_latent_dim'].quantile(0.05) p975 = df.groupby('t_latent_dim')['e_latent_dim'].quantile(0.95) hand = plt.errorbar(mean.index, mean, xerr=0, yerr=[mean - p025, p975 - mean], color=color, lw=lw) plt.plot(df['t_latent_dim'], df['t_latent_dim']) plt.fill_between(mean.index, p025, p975, interpolate=True, alpha=alpha, color=color, lw=0) plt.xlabel(r'$K$') plt.ylabel(r'$\hat{K}$') return hand def plot_errs_rel(name, df, color="dodgerblue", alpha=0.2, lw=2.5): print("[plot_errs] plotting %s rows with name=%s" % (len(df), name)) df2 = df[ ['t_latent_dim','e_latent_dim' ] ] mean = df2.groupby('t_latent_dim')['e_latent_dim'].median() centralized = np.array(mean.index) mean = ((mean-centralized)/centralized)*100 std = df2.groupby('t_latent_dim')['e_latent_dim'].std() p025 = 100*(df.groupby('t_latent_dim')['e_latent_dim'].quantile(0.05)-centralized)/centralized p975 = 100*(df.groupby('t_latent_dim')['e_latent_dim'].quantile(0.95)-centralized)/centralized hand = plt.errorbar(mean.index, mean, xerr=0, yerr=[mean - p025, p975 - mean], color=color, lw=lw) #plt.plot(df['t_latent_dim'], df['t_latent_dim']) plt.fill_between(mean.index, p025, p975, interpolate=True, alpha=alpha, color=color, lw=0) plt.xlabel(r'$K$') plt.ylabel(r'$\frac{\hat{K}-K}{K}$ (%)') return hand def plot_errs_all(df, color="darkgray", xshift=0, lw=2.5, alpha=0.1, zorder=None): df = df[ ['t_latent_dim','e_latent_dim' ] ] mean = df.groupby('t_latent_dim')['e_latent_dim'].median() #std = df2.groupby('t_latent_dim')['e_latent_dim'].std() p025 = df.groupby('t_latent_dim')['e_latent_dim'].quantile(0.05) p975 = df.groupby('t_latent_dim')['e_latent_dim'].quantile(0.95) plt.errorbar(mean.index+xshift, mean, xerr=0, yerr=[mean - p025, p975 - mean], color=color, elinewidth=lw, ecolor=color, lw=0, ls=None) plt.fill_between(mean.index+xshift, p025,p975, interpolate=True,alpha=alpha,color=color,lw=0) if lw>0: line = plt.plot(mean.index+xshift, mean.index, color=color, ls="--", lw=lw, zorder=zorder) else: line = None plt.xlabel(r'$K$') plt.ylabel(r'$\hat{K}$') return line def plot_errs_fixed_k(df, color="darkgray", xshift=0, lw=2.5, alpha=0.1, zorder=None): df2 = df[ ['p_obs','e_latent_dim' ] ] mean = df2.groupby('p_obs')['e_latent_dim'].median() centralized = np.array(mean.index) mean = ((mean-centralized)/centralized)*100 std = df2.groupby('p_obs')['e_latent_dim'].std() p025 = 100*(df.groupby('p_obs')['e_latent_dim'].quantile(0.05)-centralized)/centralized p975 = 100*(df.groupby('p_obs')['e_latent_dim'].quantile(0.95)-centralized)/centralized hand = plt.errorbar(mean.index, mean, xerr=0, yerr=[mean - p025, p975 - mean], color=color, lw=lw) #plt.plot(df['t_latent_dim'], df['t_latent_dim']) plt.fill_between(mean.index, p025, p975, interpolate=True, alpha=alpha, color=color, lw=0) plt.xlabel(r'$p_obs$') plt.ylabel(r'$\frac{\hat{K}-K}{K}$ (%)') return line # + data_e71=pd.concat([pd.read_csv(f"sensitivity_results/exp_poisson_gamma_bin_e71_20_30_cols_1000_rows_1000_c_{i}.csv",index_col=0) for i in range(0,1)], ignore_index=True) groups=data_e71.groupby(['t_latent_dim','p_obs']) for p in np.unique(data_e71.p_obs): plt.figure(f"p_obs={p}") plot_errs_rel(f"p_obs={p}",data_e71[data_e71.p_obs==p]) # + groups=data_e71.groupby(['t_latent_dim','p_obs']) for p in np.unique(data_e71.t_latent_dim): plt.figure(f"K={p}") #plot_errs_fixed_k(f"K={p}",data_e71[data_e71.t_latent_dim==p]) df = data_e71[data_e71.t_latent_dim==p] df2 = df[ ['p_obs','t_latent_dim','e_latent_dim' ] ] mean = df2.groupby('p_obs')['e_latent_dim'].median() centralized = p mean = ((mean-centralized)/centralized)*100 std = df2.groupby('p_obs')['e_latent_dim'].std() p025 = 100*(df.groupby('p_obs')['e_latent_dim'].quantile(0.025)-centralized)/centralized p975 = 100*(df.groupby('p_obs')['e_latent_dim'].quantile(0.975)-centralized)/centralized color="darkgray" xshift=0 lw=2.5 alpha=0.1 hand = plt.errorbar(mean.index, mean, xerr=0, yerr=[mean - p025, p975 - mean], color=color, lw=lw) #plt.plot(df['t_latent_dim'], df['t_latent_dim']) plt.fill_between(mean.index, p025, p975, interpolate=True, alpha=alpha, color=color, lw=0) plt.xlabel(r'$p_{obs}$') plt.ylabel(r'$\frac{\hat{K}-K}{K}$ (%)') # + start_plotting() dat_n = pd.read_csv("sensitivity_results/final_exp_poisson_gamma_negbin_e933_20_30_cols_1000_rows_1000_c.csv", index_col=0) dat_n2 = pd.read_csv("sensitivity_results/final_exp_poisson_gamma_negbin_e78_20_20_cols_500_rows_500_c.csv", index_col=0) dat_n=pd.concat([dat_n,dat_n2], ignore_index=True) handles = [] legend_labels = [] for x in range(dat_n[["p_a","p_b","p_c", "p_d"]].drop_duplicates().values.shape[0])[1:]: p_vals = dat_n[["p_a","p_b","p_c", "p_d"]].drop_duplicates().values[x] new_dat = dat_n[(dat_n[["p_a","p_b","p_c", "p_d"]]==p_vals).all(1) * dat_n['p_obs']>1] df2 = pd.DataFrame(new_dat[ ['p_obs','e_latent_dim', 't_latent_dim' ] ]) df2['rel_error'] = (df2.e_latent_dim-df2.t_latent_dim)/df2.t_latent_dim mean = df2.groupby('p_obs')['rel_error'].median() mean = mean*100 std = df2.groupby('p_obs')['rel_error'].std() p025 = 100*df2.groupby('p_obs')['rel_error'].quantile(0.025) p975 = 100*df2.groupby('p_obs')['rel_error'].quantile(0.975) plt.xscale("log", basex=10) color="darkgray" xshift=0 lw=2.5 alpha=0.1 idx_cfg = CFGS2IX[tuple(int(x) if x==1.0 else x for x in p_vals)] hand = plt.errorbar(mean.index, mean, xerr=0, yerr=[mean - p025, p975 - mean], color=COLORS1[idx_cfg], lw=lw) #plt.plot(df['t_latent_dim'], df['t_latent_dim']) legend_labels.append(LABELS[idx_cfg]) handles.append(hand) plt.fill_between(mean.index, p025, p975, interpolate=True, alpha=alpha, color=COLORS1[idx_cfg], lw=0) plt.xlabel(r'$r$') plt.ylabel(r'$\frac{\hat{K}-K}{K}$ (\%)') plt.legend(handles,legend_labels) plt.savefig("final_negbin_all.pdf", bbox_inches="tight") print(new_dat[['p_obs']].drop_duplicates()) print(new_dat[['t_latent_dim']].drop_duplicates()) from IPython.display import FileLink FileLink("final_negbin_all.pdf") # + dat_n = pd.read_csv("sensitivity_results/final_exp_poisson_gamma_bin_v2_e762_20_30_cols_1000_rows_1000_c_.csv", index_col=0) handles = [] legend_labels = [] for x in range(dat_n[["p_a","p_b","p_c", "p_d"]].drop_duplicates().values.shape[0])[1:]: p_vals = dat_n[["p_a","p_b","p_c", "p_d"]].drop_duplicates().values[x] new_dat = dat_n[(dat_n[["p_a","p_b","p_c", "p_d"]]==p_vals).all(1)] df2 = pd.DataFrame(new_dat[ ['p_obs','e_latent_dim', 't_latent_dim' ] ]) df2['rel_error'] = (df2.e_latent_dim-df2.t_latent_dim)/df2.t_latent_dim mean = df2.groupby('p_obs')['rel_error'].median() mean = mean*100 std = df2.groupby('p_obs')['rel_error'].std() p025 = 100*df2.groupby('p_obs')['rel_error'].quantile(0.025) p975 = 100*df2.groupby('p_obs')['rel_error'].quantile(0.975) #plt.xscale("log") color="darkgray" xshift=0 lw=2.5 alpha=0.1 idx_cfg = CFGS2IX[tuple(int(x) if x==1.0 else x for x in p_vals)] hand = plt.errorbar(mean.index, mean, xerr=0, yerr=[mean - p025, p975 - mean], color=COLORS1[idx_cfg], lw=lw) #plt.plot(df['t_latent_dim'], df['t_latent_dim']) legend_labels.append(LABELS[idx_cfg]) handles.append(hand) plt.fill_between(mean.index, p025, p975, interpolate=True, alpha=alpha, color=COLORS1[idx_cfg], lw=0) plt.xlabel(r'$p_{obs}$') plt.ylabel(r'$\frac{\hat{K}-K}{K}$ (\%)') plt.legend(handles,legend_labels) plt.savefig("final_bernzero_all.pdf", bbox_inches="tight") new_dat[['t_latent_dim']].drop_duplicates() from IPython.display import FileLink FileLink("final_bernzero_all.pdf")
sensitivity analysis/sensitivity_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ### Required #### 1. **Use Etherscan Python SDK** to get on-chain data # - # ### - [ ] play and combine scripts in examples/accounts # + from etherscan.accounts import Account import json #address=0xddbd2b932c763ba5b1b7ae3b362eac3e8d40121a #0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b # import api key with open('api_key.json', mode='r') as key_file: key = json.loads(key_file.read())['key'] # Setup api for single address address = '0x2a65aca4d5fc5b5c859090a6c34d164135398226' api = Account(address=address, api_key=key) # - ## get balance api.get_balance() trans_page = api.get_transaction_page(page=1, offset=2) trans_page import pandas as pd df_trans_page = pd.DataFrame(trans_page) df_trans_page blocks_minded = api.get_blocks_mined_page(page=1, offset=10, blocktype='blocks') pd.DataFrame(blocks_minded) # + #help('etherscan') #PACKAGE CONTENTS # accounts # client # contracts # errors # etherscan # proxies # stats # tokens # - # ### - [ ] play and combine scripts in examples/blocks # + ### No module named 'etherscan.blocks'... #from etherscan.blocks import Blocks #block = 9551218 #api_block = Blocks(api_key=key) # Get the block reward #reward = api_block.get_block_reward(block) #reward # - # ### - [ ] play and combine scripts in examples/contracts # + from etherscan.contracts import Contract address = '0xfb6916095ca1df60bb79ce92ce3ea74c37c5d359' api_contract = Contract(address=address, api_key=key) abi = api_contract.get_abi() print(abi) # - source_code = Contract(address=address, api_key=key) print(source_code) # ### - [ ] play and combine scripts in examples/proxies # + from etherscan.proxies import Proxies api_proxies = Proxies(api_key=key) block = api_proxies.get_block_by_number(9551218) print(block['number']) # - tx_count = api_proxies.get_block_transaction_count_by_number(block_number='0x91bd72') print(int(tx_count, 16)) # + TX_HASH = '0x1e2910a262b1008d0616a0beb24c1a491d78771baa54a33e66065e03b1f46bc1' transaction = api_proxies.get_transaction_by_hash(tx_hash=TX_HASH) pd.DataFrame(transaction,index=[0]) # - # ### - [ ] play and combine scripts in examples/stats # + from etherscan.stats import Stats api_stats = Stats(api_key=key) last_price = api_stats.get_ether_last_price() #print(last_price) pd.DataFrame(last_price, index=[0]) # - ether_supply = api_stats.get_total_ether_supply() print(ether_supply) # ### - [ ] play and combine scripts in examples/tokens # + from etherscan.tokens import Tokens contract_address = '0x57d90b64a1a57749b0f932f1a3395792e12e7055' address = "0xe04f27eb70e025b78871a2ad7eabe85e61212761" api_token = Tokens(contract_address=contract_address, api_key=key) # Get token balance of an address api_token.get_token_balance(address) # - # Get total supply of tokens api_token.get_total_supply() # ### - [ ] play and combine scripts in examples/transactions # + ### No module named 'etherscan.transactions'... #from etherscan.transactions import Transactions #api_trans = Transactions(api_key=key) #status = api_trans.get_status(tx_hash=TX_HASH) #print(status) #receipt_status = api_trans.get_tx_receipt_status(tx_hash=TX_HASH) #print(receipt_status)
section1/task3/task3_required.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "notes"} # # Wrapping up QBI # + [markdown] slideshow={"slide_type": "slide"} # <h1>Wrapping up QBI</h1> # # <p> # <b>Quantitative Big Imaging - ETHZ: 227-0966-00L</b> # <br /> # </p> # <br /> # <p style="font-size:1em;">May 12, 2022</p> # <br /><br /> # <p style="font-size:1.5em;padding-bottom: 0.25em;"><NAME></p> # <p style="font-size:1em;">Laboratory for Neutron Scattering and Imaging<br />Paul Scherrer Institut</p> # + [markdown] slideshow={"slide_type": "slide"} # ## Why QBI? # # # __We have experimental data__ # - Great amounts # - Limited time # - Limited manpower # # __We want to publish it__ # - Quantitative analysis # - Reliable # - Repeatable # - Efficent automated workflows # + [markdown] slideshow={"slide_type": "slide"} # ## Objectives # # We need to understand the components of QBI # - The data creation # - Image processing and analysis methods # - Working with data # - Some computer science/engineering # - How to present the results # # + [markdown] slideshow={"slide_type": "slide"} # ## The work flow # # <img src="figures/all_icons.svg" style="height:200px"/> # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 1 - Image basics # # - What is an image? # - Pixelwise operations # - The histogram # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 2 - Data sets and validation # # - Using prepared data # - What is a good data set # - Analysis validation workflow # - Ground truth # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 3 - Image enhancement # # - Noise # - Reducing noise # - Enhancing image features # - Filter performance analysis # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 4 - Basic segmentation # - How are images formed # - What is a change in contrast # - How to apply thresholds # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 5 - Advanced segmentation # # - Methods to automatically find thresholds # - Unsupervised methods # - Supervised methods # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 6 - Shape analysis # - Component labelling # - Shape properties # - Textures # # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 7 - Complex shape # - Segmentation of touching objects # - Skeletons # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 8 - Statistics # - Probabilty # - Implications of statistical analysis # # # - Presenting the results in graphs # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 9 - Dynamic experiments # - Different types of dynamic experiments # - Time series analysis # - Tracking # - Registration # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 10 - Multiple modalities # ### Multi modality imaging # Can we gain more information with additional image sources # - Modality combinations # - Registration # - Data fusion # # ### Software engineering # - Unit testing # - Working with repositories # - Continuous integration # + [markdown] slideshow={"slide_type": "slide"} # ## Lecture 11 - Scaling up # - Parallel processing # - Distributed computing # - DAGs # + [markdown] slideshow={"slide_type": "slide"} # ## Projects # -
Lectures/Lecture-11/11-WrapUpQBI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model from sklearn.cross_validation import train_test_split import seaborn as sns from sklearn import metrics # %matplotlib inline # - from sklearn.datasets import load_boston boston =load_boston() boston # + # ABout dataset # data array = actual data # features= column name # target = acyual answer(prediction of houses) # - # Creating DataFrame df1 =pd.DataFrame(boston.data,columns=boston.feature_names) df1.head() df2=pd.DataFrame(boston.target) df2.head() df1.describe() reg =linear_model.LinearRegression() x_train,x_test,y_train,y_test = train_test_split(df1,df2,test_size=0.2, random_state=4) # training size =80% # testing size =20% # random to shuffle data reg.fit(df1,df2) reg.coef_ # weights for linear regression a= reg.predict(x_test) a a[0] y_test print(a[1]) print(a[2]) print(a[3]) # Mean square error np.mean((a-y_test)**2)
sklearn linear regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 import numpy as np import scipy.stats as stats import scipy.special #graphing import matplotlib.pyplot as plt #stats import statsmodels.api as sm from statsmodels.base.model import GenericLikelihoodModel #import testing import sys sys.path.append("../") import vuong_plots # + class JointNormal1(GenericLikelihoodModel): def loglikeobs(self, params): data = np.concatenate([[self.endog],self.exog.transpose()],axis=0) mult_rv = stats.multivariate_normal([params[0], 0.0], [[1,0],[0,1]]) return mult_rv.logpdf(data.transpose()) class JointNormal2(GenericLikelihoodModel): def loglikeobs(self, params): data = np.concatenate([[self.endog],self.exog.transpose()],axis=0) mult_rv = stats.multivariate_normal([0.0, params[0]], [[1,0],[0,1]]) return mult_rv.logpdf(data.transpose()) def setup_shi(yn,xn): # model 1 grad, etc. nobs = yn.shape[0] model1_param = np.array([yn.mean()]) model2_param = np.array([xn.mean()]) model1_deriv = JointNormal1(yn,xn) ll1 = model1_deriv.loglikeobs(model1_param) grad1 = model1_deriv.score_obs(model1_param).reshape( (nobs,1) ) hess1 = model1_deriv.hessian(model1_param) model2_deriv = JointNormal2(yn,xn) ll2 = model2_deriv.loglikeobs(model2_param) grad2 = model2_deriv.score_obs(model2_param).reshape( (nobs,1) ) hess2 = model2_deriv.hessian(model2_param) return ll1,grad1,hess1,model1_param,ll2,grad2,hess2,model2_param def gen_data(beta= 1.5, nobs=1000): cov = [[25, 0], [0, 1]] data = np.random.multivariate_normal([beta,beta], [[25,0],[0,1]], nobs) return data[:,0],data[:,1],nobs yn,xn,nobs = gen_data() ll1,grad1,hess1,params1,ll2,grad2,hess2,params2 = setup_shi(yn,xn) print(grad1.shape,hess1.shape) # + gen_data_ex = lambda : gen_data(nobs=1000,beta=0) vuong_plots.plot_kstats_table(gen_data_ex,setup_shi,figtitle='../figs/ex2beta0') # + gen_data_ex = lambda : gen_data(nobs=1000,beta=0) vuong_plots.plot_kstats_table(gen_data_ex,setup_shi,figtitle='../figs/ex2beta5') # + gen_data_ex = lambda : gen_data(nobs=1000,beta=1.) vuong_plots.plot_kstats_table(gen_data_ex,setup_shi,figtitle='../figs/ex2beta10') # + gen_data_ex = lambda : gen_data(nobs=1000,beta=1.5) vuong_plots.plot_kstats_table(gen_data_ex,setup_shi,figtitle='../figs/ex2beta15') # + gen_data_ex = lambda : gen_data(nobs=1000,beta=2) vuong_plots.plot_kstats_table(gen_data_ex,setup_shi,figtitle='../figs/ex2beta20') # -
shi_ex2/kstats_table.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # How to constrain stellar parameters... # # Things we usually have available: # * multi-band photometry (e.g. APASS, 2MASS, WISE, etc). # * first-order stellar parameter estimates in input catalogue # * Gaia parallax # * Gaia teff and logg for spectral fit. # * Nearby 2MASS Gaia sources (e.g. sum gives contamination) import isochrones # + import pandas as pd def KeplerDat(kic): return pd.read_csv('http://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?&table=keplerstellar&where=KepID='+str(kic)+'&format=csv&order=st_vet_date_str%20desc')
StellarParams TBD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Policy-Gradients with the REINFORCE algorithm # # **Background**: # In this practical we will train an agent using the REINFORCE algorithm to learn to balance a pole in the OpenAI gym [Cartpole environment](https://gym.openai.com/envs/CartPole-v1). # # **Learning objectives**: # * Understand the policy-gradient approach to directly training a parameterised policy to maximise expected future rewards. # * Understand how the policy-gradient theorem allows us to improve the policy using on-policy state = env.reset(). # # **What is expected of you**: # * Go through the explanation, keeping the above learning objectives in mind. # * Fill in the missing code ("#IMPLEMENT-ME") and train a model to solve the Cartpole-v1 environment in OpenAI gym (you solve it when reward=500). # # A Simple Policy-Gradient Cartpole Example # # ## Introduction # # We have seen in your course that there are many different approaches to training RL agents. In this practical we will take a look at REINFORCE - a simple policy-based method. REINFORCE (and policy-based methods in general) directly optimise a parametrised policy in order to maximise future rewards. # # We will try to learn a policy $\pi_\theta(a | s)$ which outputs a distribution over the possible actions $a$, given the current state $s$ of the environment. The goal is find a set of parameters $\theta$ to maximise the expected discounted return: # \begin{align} # J(\theta) = \mathbb{E}_{\tau \sim p_\theta} \left[\sum_{t=0}^T \gamma^t r(s_t, a_t)\right], # \end{align} # where $\tau$ is a trajectory sampled from $p_\theta$. The **policy-gradient** theorem gives us the derivative of this objective function: # \begin{align} # \nabla_\theta J(\theta) = \mathbb{E}_{\tau \sim p_\theta} \left[\left(\sum_{t=0}^{T} \nabla_\theta \log \pi_\theta(a_t|s_t)\right) \left(\sum_{t=0}^T \gamma^t r(s_t, a_t) \right) \right]. # \end{align} # # **NOTE**: # * We have a policy $\pi_\theta(a|s)$ which tells the agent which action $a$ to take, given the state $s$, and it is parameterised in terms of parameters $\theta$. # * Our goal is to maximise $J(\theta)$ by **choosing actions from this policy** that lead to high future rewards. # * We'll use gradient-based optimisation to update the policy parameters $\theta$. We therefore want the gradient of our objective wrt the policy parameters. # * We use the policy-gradient theorem to find a expression for the gradient. This is an expectation over trajectories from our policy and the environment. # * Since we can now sample trajectories $(s_0, a_0, r_1, s_1, a_1, r_2, \ldots)$ using our policy $\pi_\theta$, we can approximate this gradient using **[Monte-Carlo](https://en.wikipedia.org/wiki/Monte_Carlo_integration)** methods. # # This algorithm is called **Monte-Carlo REINFORCE**, and is one type of policy-gradient algorithm. Let's use this to solve the Cartpole environment! # # **Monte-Carlo REINFORCE**: # # for each episode: # 1. sample a trajectory $\tau$ using the policy $\pi_\theta$. # 2. compute $\nabla_\theta J(\theta) \approx \left(\sum_{t=0}^{T} \nabla_\theta \log \pi_\theta(a_t|s_t)\right) \left(\sum_{t=0}^T \gamma^t r(s_t, a_t) \right)$. # 3. update policy parameters $\theta \leftarrow \theta + \alpha \nabla_\theta J(\theta)$ # + # import various packages from collections import deque import numpy as np import matplotlib.pyplot as plt import gym import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributions import Categorical # + # use gpu if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # configure matplotlib # %matplotlib inline plt.rcParams['figure.figsize'] = (15.0, 10.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # - # ## The Environment # # Cartpole is a standard benchmark in reinforcement learning and is a good sandbox for trying things out. The goal is to balance a pendulum on top of a moving cart. We have 2 actions - either push the cart to the left or push to the right. The state space consists of the cart's position and velocity and the pendulum's angle and angular velocity. Let's create the environment and take a look at the state and action spaces: env = gym.make('CartPole-v1') print('action space:', env.action_space) print('observation space:', env.observation_space) # Here we can see that there are 2 discrete actions and a continuous state space. # ### Taking a few steps # # To get a better feel for the environment, we will use a random policy to genrate a short trajectory. # + SUB = str.maketrans("t0123456789+", "ₜ₀₁₂₃₄₅₆₇₈₉₊") print('-'*115) state = env.reset() for i in range (5): # sample random action action = env.action_space.sample() # take action in the environment new_state, reward, done, _ = env.step(action) print('Step t=', i+1, ': (', 'st, at , rt, st+1'.translate(SUB),')') print('(', state, ',', action, ',', reward, ',', new_state, ')') print('-'*115) state = new_state # - # ### Watching a random policy agent play # # Let's also see how a random policy performs in this enviroment: env_1 = gym.make('CartPole-v1') state = env_1.reset() for t in range(200): # sample a random action action = env_1.action_space.sample() env_1.render() state, reward, done, _ = env_1.step(action) env_1.close() # Not very good! The pole only stayed up for a few time steps... Now let's improve things using REINFORCE. # # ## The Policy # # We begin by parameterising the policy $\pi_\theta(a | s)$ as a simple neural network which takes the state (a vector of 4 elements provided by `gym`) as input, and produces a Categorical distribution over the possible actions as output. Simple enough. Refer to [torch.nn](https://pytorch.org/docs/stable/nn.html) class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() # IMPLEMENT-ME # Define neural network layers. Refer to nn.Linear (https://pytorch.org/docs/stable/nn.html#torch.nn.Linear) # We are going to use a neural network with one hidden layer of size 16. # The first layer should have an input size of env.observation_space.shape and an output size of 16 self.fc1 = ... # The second layer should have an input size of 16 and an output size of env.action_space.n self.fc2 = ... def forward(self, x): # IMPLEMENT-ME # Implement the forward pass # apply a ReLU activation after the first linear layer x = ... # apply the second linear layer (without an activation). # the outputs of the second layer will act as the log probabilities for the Categorial distribution. x = ... return Categorical(logits=x) # ### Selecting actions with our policy # # For a given state our policy returns a pytorch `Categorial` object. We can sample from this distribution by calling it's `sample` method and we can find the log probability of an action using `log_prob`: # + policy = Policy().to(device) state = env.reset() # convert state (a numpy array) to a torch tensor state = torch.from_numpy(state).float().to(device) dist = policy(state) action = dist.sample() print("Sampled action: ", action.item()) print("Log probability of action: ", dist.log_prob(action).item()) # - # ### Computing the return # # Given a sequence of rewards $(r(s_0, a_0), \ldots, r(s_T, a_T))$ we want to calculate the return $\sum_{t=0}^T \gamma^t r(s_t, a_t)$. def compute_returns(rewards, gamma): # IMPLEMENT-ME # compute the return using the above equation return returns # ## REINFORCE # # Now its time to implement the algorithm # # **Monte-Carlo REINFORCE**: # # for each episode: # 1. sample a trajectory $\tau$ using the policy $\pi_\theta$. # 2. compute $\nabla_\theta J(\theta) \approx \left(\sum_{t=0}^{T} \nabla_\theta \log \pi_\theta(a_t|s_t)\right) \left(\sum_{t=0}^T \gamma^t r(s_t, a_t) \right)$. # 3. update policy parameters $\theta \leftarrow \theta + \alpha \nabla_\theta J(\theta)$ # ### Hyperameters learning_rate = 1e-2 number_episodes = 1500 max_episode_length = 1000 gamma = 1.0 def reinforce(seed, verbose=True): # set random seeds (for reproducibility) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) env.seed(seed) # instantiate the policy and optimizer policy = Policy().to(device) optimizer = optim.Adam(policy.parameters(), lr=learning_rate) scores = [] scores_deque = deque(maxlen=100) for episode in range(1, number_episodes+1): ################################################################# # 1. Collect trajectories using our policy and save the rewards # # and the log probability of each action taken. # ################################################################# log_probs = [] rewards = [] state = env.reset() for t in range(max_episode_length): # IMPLEMENT-ME: get the distribution over actions for state dist = ... # IMPLEMENT-ME: sample an action from the distribution action = ... # IMPLEMENT-ME: compute the log probability log_prob = ... # IMPLEMENT-ME: take a step in the environment state, reward, done, _ = ... # save the reward and log probability rewards.append(reward) log_probs.append(log_prob.unsqueeze(0)) if done: break # for reporting save the score scores.append(sum(rewards)) scores_deque.append(sum(rewards)) ################################################################# # 2. evaluate the policy gradient # ################################################################# # IMPLEMENT-ME: calculate the discounted return of the trajectory returns = ... log_probs = torch.cat(log_probs) # IMPLEMENT-ME: multiply the log probabilities by the returns and sum (see the policy-gradient theorem) # Remember to multiply the result by -1 because we want to maximise the returns policy_loss = ... ################################################################# # 3. update the policy parameters (gradient descent) # ################################################################# optimizer.zero_grad() policy_loss.backward() optimizer.step() # report the score to check that we're making progress if episode % 50 == 0 and verbose: print('Episode {}\tAverage Score: {:.2f}'.format(episode, np.mean(scores_deque))) if np.mean(scores_deque) >= 495.0 and verbose: print('Environment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(episode, np.mean(scores_deque))) break return policy, scores policy, scores = reinforce(42) # ## Seeing our learned policy in action # # Let's watch our learned policy balance the pole! env = gym.make('CartPole-v1') state = env.reset() for t in range(2000): dist = policy(torch.from_numpy(state).float().to(device)) action = dist.sample() env.render() state, reward, done, _ = env.step(action.item()) if done: break env.close() # ### Plotting the results # # Finally, let's plot the learning curve. def moving_average(a, n) : ret = np.cumsum(a, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret / n fig = plt.figure() ax = fig.add_subplot(111) x = np.arange(1, len(scores)+1) ax.plot(x, scores, label='Score') m_average = moving_average(scores, 50) ax.plot(x, m_average, label='Moving Average (w=100)', linestyle='--') plt.legend() plt.ylabel('Score') plt.xlabel('Episode #') plt.title('REINFORCE learning curve - CartPole-v1') plt.show() # Here is what your graph should look like. # ![REINFORCE learning curve](https://raw.githubusercontent.com/andrecianflone/rl_at_ammi/master/images/reinforce.png "REINFORCE learning curve") # We can see that at the end of training our policy consistantly (more or less) recieves returns of 500. # ## Investigating the variance of REINFORCE # # We noted in class that REINFORCE is a high variance algorithm. We can investigate the variance by running multiple trials and averaging the results. np.random.seed(53) seeds = np.random.randint(1000, size=5) all_scores = [] for seed in seeds: print("started training with seed: ", seed) _, scores = reinforce(int(seed), verbose=False) print("completed training with seed: ", seed) all_scores.append(scores) smoothed_scores = [moving_average(s, 50) for s in all_scores] smoothed_scores = np.array(smoothed_scores) mean = smoothed_scores.mean(axis=0) std = smoothed_scores.std(axis=0) fig = plt.figure() ax = fig.add_subplot(111) x = np.arange(1, len(mean)+1) ax.plot(x, mean, '-', color='blue') ax.fill_between(x, mean - std, mean + std, color='blue', alpha=0.2) plt.ylabel('Score') plt.xlabel('Episode #') plt.title('REINFORCE averaged over 5 seeds') plt.show() # Here is what your graph should look like. # ![REINFORCE averaged over 5 seeds](https://raw.githubusercontent.com/andrecianflone/rl_at_ammi/master/images/reinforce_averaged.png "REINFORCE averaged over 5 seeds") # ## Reducing the variance of REINFORCE # # In class we saw a couple of tricks to reduce the variance of REINFORCE and improve its performance. Firstly, future actions should not change past decision. Present actions only impact the future. Therefore, we can change our objective function to reflect this: # \begin{align} # \nabla_\theta J(\theta) = \mathbb{E}_{\tau \sim p_\theta} \left[\sum_{t=0}^{T} \nabla_\theta \log \pi_\theta(a_t|s_t) \sum_{t'= t}^T \gamma^{t'- t} r(s_{t'}, a_{t'})\right]. # \end{align} # # We can also reduce variance by subtracing a state dependent baseline to get: # \begin{align} # \nabla_\theta J(\theta) = \mathbb{E}_{\tau \sim p_\theta} \left[\sum_{t=0}^{T} \nabla_\theta \log \pi_\theta(a_t|s_t) \sum_{t'= t}^T \left( \gamma^{t'- t} r(s_{t'}, a_{t'}) - b(s_{t'}) \right)\right]. # \end{align} # # For our baseline we'll use the average of the returns over the trajectory. As a final trick we normalise the returns by dividing by the standard deviation. def compute_returns_baseline(rewards, gamma): r = 0 returns = [] for step in reversed(range(len(rewards))): r = rewards[step] + gamma * r returns.insert(0, r) returns = np.array(returns) # IMPLEMENT-ME: normalize the returns by subtracting the mean and dividing by the standard deviation returns = ... return returns def reinforce_baseline(seed, verbose=True): # set random seeds (for reproducibility) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) env.seed(seed) # instantiate the policy and optimizer policy = Policy().to(device) optimizer = optim.Adam(policy.parameters(), lr=learning_rate) scores = [] scores_deque = deque(maxlen=100) for episode in range(1, number_episodes+1): ################################################################# # 1. Collect trajectories using our policy and save the rewards # # and the log probability of each action taken. # ################################################################# log_probs = [] rewards = [] state = env.reset() for t in range(max_episode_length): # IMPLEMENT-ME: get the distribution over actions for state dist = ... # IMPLEMENT-ME: sample an action from the distribution action = ... # IMPLEMENT-ME: compute the log probability log_prob = ... # IMPLEMENT-ME: take a step in the environment state, reward, done, _ = ... # save the reward and log probability rewards.append(reward) log_probs.append(log_prob.unsqueeze(0)) if done: break # for reporting save the score scores.append(sum(rewards)) scores_deque.append(sum(rewards)) ################################################################# # 2. evaluate the policy gradient (with variance reduction) # ################################################################# # calculate the discounted return of the trajectory returns = compute_returns_baseline(rewards, gamma) returns = torch.from_numpy(returns).float().to(device) log_probs = torch.cat(log_probs) # IMPLEMENT-ME: multiply the log probabilities by the returns and sum (see the policy-gradient theorem) # Remember to multiply the result by -1 because we want to maximise the returns policy_loss = ... ################################################################# # 3. update the policy parameters (gradient descent) # ################################################################# optimizer.zero_grad() policy_loss.backward() optimizer.step() # report the score to check that we're making progress if episode % 50 == 0 and verbose: print('Episode {}\tAverage Score: {:.2f}'.format(episode, np.mean(scores_deque))) if np.mean(scores_deque) >= 495.0 and verbose: print('Environment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(episode, np.mean(scores_deque))) break return policy, scores # Let's see if these changes give us any improvement. np.random.seed(53) seeds = np.random.randint(1000, size=5) all_scores_baseline = [] for seed in seeds: print("started training with seed: ", seed) _, scores = reinforce_baseline(int(seed), verbose=False) print("completed training with seed: ", seed) all_scores_baseline.append(scores) # ## Comparing the methods # # Finally we'll compare the performance of the two methods. smoothed_scores_baseline = [moving_average(s, 50) for s in all_scores_baseline] smoothed_scores_baseline = np.array(smoothed_scores_baseline) mean_baseline = smoothed_scores_baseline.mean(axis=0) std_baseline = smoothed_scores_baseline.std(axis=0) fig = plt.figure() ax = fig.add_subplot(111) x = np.arange(1, len(mean_baseline)+1) ax.plot(x, mean_baseline, '-', color='green', label='Variance reduced REINFORCE') ax.plot(x, mean, '-', color='blue', label='REINFORCE') ax.fill_between(x, mean_baseline - std_baseline, mean_baseline + std_baseline, color='green', alpha=0.2) ax.fill_between(x, mean - std, mean + std, color='blue', alpha=0.2) plt.ylabel('Score') plt.xlabel('Episode #') plt.legend() plt.title('Comparison of REINFORCE and Variance reduced REINFORCE (averaged over 5 seeds)') plt.show() # Here is what your graph should look like. # ![Comparison of REINFORCE and Variance reduced REINFORCE (averaged over 5 seeds)](https://raw.githubusercontent.com/andrecianflone/rl_at_ammi/master/images/reinforce_vs_with_baseline.png "Comparison of REINFORCE and Variance reduced REINFORCE (averaged over 5 seeds)") #
Reinforcement-Learning/RL-codes/REINFORCE_exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Modelo del rendimiento de una cuenta de ahorro # # <img style="center" src="https://static.pexels.com/photos/9660/business-money-pink-coins.jpg" width="500px" height="200px" alt="atom"/> # # > **¿Tiene el dinero el mismo valor a lo largo del tiempo?** La respuesta es *no*. Todos lo hemos vivido. # # > Dos situaciones básicas: # 1. <font color=blue>Inflación</font>: ¿Cuánto dinero necesitabas para comprar unas papas y un refresco hace 10 años? ¿Cuánto necesitas hoy? # 2. <font color=blue>Interés</font>: no es lo mismo tener $\$10000$ MXN disponibles hoy a recibir $\$10000$ MXN en un año, pues los primeros pueden ser invertidos en un negocio o una cuenta bancaria para generar *interés*. Por lo tanto los $\$10000$ MXN disponibles hoy valen más que los $\$10000$ MXN que se recibirán en un año. # # Referencia: # - <NAME>, <NAME>. *Ingeniería económica básica*, ISBN: 978-607-519-017-4. (Disponible en biblioteca) # Referencias: # - http://www.sympy.org # - http://matplotlib.org # - http://www.numpy.org # - http://ipywidgets.readthedocs.io/en/latest/index.html # ___ # ## Interés # Nos centraremos en como cambia el valor del dinero en el tiempo debido al **interés**. Existen dos tipos: # ### Capitalización por interés simple # Este tipo de interés se calcula <font color=red>única y exclusivamente sobre la cantidad original que se invirtió</font>. Como consecuencia, el interés generado no forma parte del dinero que se invierte, es decir, los <font color=blue>intereses no ganan intereses</font>. # # Suponga que se tiene un capital inicial $C_0$ y se invierte a un plazo de $k$ periodos (pueden ser meses, trimestres, semestres, años...) a una tasa de **interés simple** por periodo $i$. Al final del primer periodo, el capital $C_1$ que se obtiene es: # # $$C_1=C_0+iC_0=C_0(1+i).$$ # # De la misma manera, como el interés solo se calcula sobre el capital inicial, al final del segundo periodo, el capital $C_2$ que se obtiene es: # # $$C_2=C_1+iC_0=C_0+iC_0+iC_0=C_0(1+2i).$$ # # Así, al final del $k-$ésimo periodo, el capital $C_k$ que se obtiene es: # # $$C_k=C_{k-1}+iC_0=C_0+kiC_0=C_0(1+ki).$$ # > **Ejemplo.** Suponga que se tiene un capital de \$10000 MXN, el cual se pone en un fondo de inversión que paga una tasa de interés simple del 0.8% mensual. # # > Si se tiene una meta de ahorro de \$11000 MXN sin inversiones adicionales, ¿cuántos meses se debería dejar invertido el dinero? # Librería para cálculo numérico import numpy as np # Valores dados en el enunciado C0 = 10000 i = 0.8 / 100 Meta = 11000 # Despejamos k tal que C_k=meta k = np.ceil((Meta / C0 - 1) / i).astype(int) k (1 + k * i) * C0 # Imprimimos respuesta en pantalla print(f"Para llegar a nuestra meta de $11000, necesitamos {k} meses, con lo que obtenemos" f" al final de este periodo ${round((1 + k * i) * C0)}") # > <font color=blue>**Actividad.**</font> # > - ¿Qué pasa si el interés no es del 0.8% mensual sino del 1% mensual? # > - ¿Qué pasa si la meta no son $\$11000$ MXN si no $\$12000$ MXN? # Solución # > Una gráfica que nos permite ilustrar la situación anterior se puede realizar de la siguiente manera. # Librerías para gráficos from matplotlib import pyplot as plt # Para que se muestren las gráficas en la misma ventana # %matplotlib inline # Librería para widgets de python interactivo from ipywidgets import interact, interact_manual, fixed, widgets # Explicar la siguiente función # Función de gráficos de interés simple def interes_simple(C_0, meta, i): # Despejamos k k = np.ceil((meta / C_0 - 1) / i).astype(int) # Notar el uso de la función ceil C_k = C_0 * (1 + k * i) # Cálculo del capital al final del periodo k C_k = round(C_k, 2) # Redondeo con dos cifras decimales # Vector de periodos kk = np.linspace(0, k, k + 1) # Vector de capitales por periodo CC = C_0 * (1 + kk * i) # Gráfico plt.figure(num=1) # Figura 1 plt.clf() # Borrar lo que contenga la figura plt.plot(kk, CC,'*',linewidth=3) # Se grafica la evolución de los capitales plt.plot(kk, meta * np.ones(k + 1), '--k') # Se grafica la meta plt.xlabel('k') # Etiqueta eje x plt.ylabel('C_k') # Etiqueta eje y plt.grid() # Malla en la gráfica plt.show() # Mostrar la figura print("El número de periodos que se debe dejar invertido el dinero para llegar a la meta de ", meta," es ", k, ". Al final del periodo ", k,", el capital es ", C_k, ".", sep="") interact_manual(interes_simple, C_0=fixed(10000), meta=(10000, 20000, 100), i=widgets.FloatSlider(value=0.008,min=0.005,max=0.015,step=0.001,readout_format='.3f')) # Como se esperaba, el capital en el $k-$ésimo periodo $C_k=C_0(1+ki)$ crece linealmente con $k$. # ### Capitalización por interés compuesto # El capital que genera el interés simple permanece constante todo el tiempo de duración de la inversión. En cambio, el que produce el interés compuesto en un periodo se <font color=red>convierte en capital en el siguiente periodo</font>. Esto es, el interés generado al final de un periodo <font color=blue>se reinvierte para el siguiente periodo para también producir interés</font>. # # Suponga que se tiene un capital inicial $C_0$, y se va a ceder el uso de este capital por un periodo de tiempo determinado a una tasa de interés $i$. El capital que se obtiene al final del primer periodo $C_1$ se puede calcular por # # $$C_1=C_0(1+i).$$ # # Si la anterior suma se vuelve a ceder a la misma tasa de interés, al final del periodo dos el capital $C_2$ es # # $$C_2=C_1(1+i)=C_0(1+i)^2.$$ # # Si se repite el anterior proceso $k$ veces, el capital al final del $k-$ésimo periodo $C_k$ es # # $$C_k=C_{k-1}(1+i)=C_0(1+i)^k.$$ # # **Referencia**: # - https://es.wikipedia.org/wiki/Inter%C3%A9s_compuesto. # > **Ejemplo.** Suponga que se tiene un capital de \$10000 MXN, el cual se pone en un fondo de inversión que paga una tasa de interés del 0.8% mensual. # # > Si se tiene una meta de ahorro de \$11000 MXN sin inversiones adicionales, ¿cuántos meses se debería dejar invertido el dinero? # # > Muestre una gráfica que ilustre la situación. def interes_compuesto(C_0, meta, i): # Despejamos k k = np.ceil(np.log(meta / C_0) / np.log(1 + i)).astype(int) C_k = C_0 * (1 + i)**k # Cálculo del capital al final del periodo k C_k = round(C_k, 2) # Redondeo con dos cifras decimales # Vector de periodos kk = np.linspace(0, k, k + 1) # Vector de capitales por periodo CC = C_0 * (1 + i)**kk # Gráfico plt.figure(num=1) # Figura 1 plt.clf() # Borrar lo que contenga la figura plt.plot(kk, CC, '*', linewidth=3) # Se grafica la evolución de los capitales plt.plot(kk, meta * np.ones(k + 1), '--k') # Se grafica la meta plt.xlabel('k') # Etiqueta eje x plt.ylabel('C_k') # Etiqueta eje y plt.grid() # Malla en la gráfica plt.show() # Mostrar la figura print("El número de periodos que se debe dejar invertido el dinero para llegar a la meta de ", meta," es ", k, ". Al final del periodo ", k,", el capital es ", C_k, ".", sep="") interact_manual(interes_compuesto, C_0=fixed(10000), meta=(10000, 100000, 100), i=fixed(0.008)) # El capital en el $k-$ésimo periodo $C_k=C_0(1+i)^k$ crece de manera exponencial con $k$. # > <font color=blue>**Actividad.**</font> # > - Modificar el código anterior para dejar fija la meta de ahorro y variar la tasa de interés compuesta. # ### Capitalización continua de intereses # La capitalización continua se considera un tipo de capitalización compuesta, en la que a cada instante de tiempo $t$ se se capitalizan los intereses. Es decir, la frecuencia de capitalización es infinita (o, equivalentemente, el periodo de capitalización tiende a cero). # # Suponga que se tiene un capital inicial $C_0$, y que el capital acumulado en el tiempo $t$ es $C(t)$. Queremos saber cuanto será el capital pasado un periodo de tiempo $\Delta t$, dado que la tasa de interés efectiva para este periodo de tiempo es $i$. De acuerdo a lo anterior tenemos # # $$C(t+\Delta t)=C(t)(1+i)=C(t)(1+r\Delta t),$$ # # donde $r=\frac{i}{\Delta t}$ es la tasa de interés instantánea. Manipulando la anterior expresión, obtenemos # # $$\frac{C(t+\Delta t)-C(t)}{\Delta t}=r\; C(t).$$ # # Haciendo $\Delta t\to 0$, obtenemos la siguiente ecuación diferencial # # $$\frac{d C(t)}{dt}=r\; C(t),$$ # # sujeta a la condición inicial (monto o capital inicial) $C(0)=C_0$. # # La anterior, es una ecuación diferencial lineal de primer orden, para la cual se puede calcular la *solución analítica*. # + # Librería de cálculo simbólico import sympy as sym # Para imprimir en formato TeX sym.init_printing(use_latex="mathjax") # + # Símbolos t(para el tiempo) y r(para el interés instantáneo) sym.var("t r") # Función de capital C = sym.Function("C") # - # Ecuación diferencial ode = sym.Eq(sym.Derivative(C(t), t) - r * C(t), 0) # Mostrar ecuación display(ode) # Resolver sym.dsolve(ode, C(t)) # con $C_1=C_0$. # # La equivalencia entre la tasa de interés compuesta $i$ y la tasa de interés instantánea $r$ viene dada por # # $$e^r=1+i.$$ # ___ # ¿Cómo podemos calcular la *solución numérica*? # > **Ejemplo.** Suponga que se tiene un capital de \$10000 MXN, el cual se pone en un fondo de inversión que paga una tasa de interés del 0.8% mensual. # # > Si se tiene una meta de ahorro de \$11000 MXN sin inversiones adicionales, ¿cuánto tiempo se debe dejar invertido el dinero? # # > Muestre una gráfica que ilustre la situación. # Librerías para integración numérica from scipy.integrate import odeint help(odeint) # Modelo de capitalización continua def cap_continua(C, t, r): return r * C def interes_continuo(C_0, meta, r): # Despejamos t t = np.log(meta / C_0) / r # Vector de periodos tt = np.linspace(0, t, 100) # Vector de capitales por periodo CC = odeint(cap_continua, C_0, tt, args = (r,)) # Gráfico plt.figure(num=1) # Figura 1 plt.clf() # Borrar lo que contenga plt.plot(tt, CC, '-', linewidth=3) # Se grafica la evolución de los capitales plt.plot(tt,meta*np.ones(len(tt)),'--k') # Se grafica la meta plt.xlabel('t') # Etiqueta eje x plt.ylabel('C(t)') # Etiqueta eje y plt.grid() # Malla en la gráfica plt.show() # Mostrar la figura print("El tiempo que se debe dejar invertido el dinero para llegar a la meta de ", meta," es ", t, " meses.", sep="") interact_manual(interes_continuo, C_0=fixed(10000), meta=(10000,100000,100), r=fixed(np.log(1+0.008))); # ### Actividad. # 1. Resolver simbólicamente y numéricamente la siguiente ecuación diferencial: # $$\frac{dx}{dt}=\frac{x}{t+1}; \quad x(0) = 1$$ # + # Símbolos t(para el tiempo) y r(para el interés instantáneo) # Función de capital # Ecuacion diferencial # - # Resolver # Despejamos la constante usando la condición inicial: # # $$ # 1=x(0)=C_1(0 + 1)=C_1 # $$ # Finalmente, la solución es: # # $$ # x(t) = t + 1 # $$ # 2. Obtener la solución numérica. # Funcion a integrar # + # Condicion inicial # Vector de tiempo # - # Solucion numerica # Grafica x vs.t # 3. Comparar. # Grafica x vs.t # ___ # ## Tabla de abonos # Como aplicación importante del concepto de interés compuesto se encuentra la creación de un modelo de cuenta de ahorro. # # Referencia: # - <NAME>, <NAME>. *Ingeniería económica básica*, ISBN: 978-607-519-017-4. (Disponible en biblioteca) # - http://pbpython.com/amortization-model.html # Librería de análisis de datos import pandas as pd # Librería para manipulación de fechas from datetime import date # + # Datos para la cuenta de ahorro Tasa_interes = 0.08 # 8% anual Anos = 30 Abonos_ano = 12 Inicial = 50000 Meta = 100000 fecha_inicio = date(2021, 7, 1) # Tasa interés por periodo i = Tasa_interes / Abonos_ano # Tasa de interés mensual # Total de periodos T = Anos * Abonos_ano # # Cálculo de abonos mensuales iguales para llegar a la meta de ahorro en el tiempo deseado Abono = 1000# i * (Meta - Inicial * (1 + i)**T) / ((1 + i)**T - 1) # Rango de fechas en la tabla rng = pd.date_range(fecha_inicio, periods=T, freq='MS') rng.name = "Fecha del abono" # Columnas de abonos y extracto de cuenta en las fechas respectivas df = pd.DataFrame(index=rng, columns=['Abono', 'Balance'], dtype='float') # Para poner el indice con los periodos y no con las fechas df.reset_index(inplace=True) df.index += 1 df.index.name = "Periodo" # Columna correspondiente al periodo # Los abonos son iguales df["Abono"] = Abono # Vector indizador de los periodos de los abonos index_vector = np.arange(1,len(rng)+1) # Extracto de la cuenta mes a mes df["Balance"] = Inicial * (1 + i)**index_vector + Abono * (((1 + i)**index_vector - 1) / i) # Redondear con dos cifras decimales df = df.round(2) df # La siguiente solución es también válida. Sin embargo, evitar el uso de 'for' #df.loc[1, "Balance"] = Inicial*(1+Tasa_interes/Abonos_ano)**1 + df.loc[1, "Abono"] #for i in range(2, len(df)+1): # # Get the previous balance as well as current payments # prev_balance = df.loc[i-1, "Balance"] # df.loc[i, "Balance"] = prev_balance*(1+Tasa_interes/Abonos_ano)**1 + df.loc[i, "Abono"] # - df['Abono'].sum() + Inicial df['Balance'].plot(); # > <font color=blue>**Tarea.**</font> # > - Averiguar tasas de interés reales en algún banco y proyectar un ahorro mensual para que al terminar su carrera tengan $50000 MXN en su cuenta. # > - Hacer esto en un nuevo archivo, llamarlo Tarea6_ApellidoNombre.ipynb y subirlo a moodle. # > - Plazo: Lunes 5 de Julio. # <script> # $(document).ready(function(){ # $('div.prompt').hide(); # $('div.back-to-top').hide(); # $('nav#menubar').hide(); # $('.breadcrumb').hide(); # $('.hidden-print').hide(); # }); # </script> # # <footer id="attribution" style="float:right; color:#808080; background:#fff;"> # Created with Jupyter by <NAME>. # </footer>
Modulo3/Clase13_ModeloAhorro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/application_model_zoo/Example%20-%20Nexet%20Dataset%20Vehicle%20Detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # # Table of contents # # # ## 1. Installation Instructions # # # # ## 2. Use trained model to detect vehicles on road # # # # ## 3. How to train using Tensorflow object detection API wrapper and nexet dataset # ## More examples on # - Tensorflow object detection API 1.0 - https://github.com/Tessellate-Imaging/Monk_Object_Detection/tree/master/example_notebooks/12_tf_obj_1 # - Tensorflow object detection API 2.0 - https://github.com/Tessellate-Imaging/Monk_Object_Detection/tree/master/example_notebooks/13_tf_obj_2 # # Installation # # - Run these commands # # - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git # # - cd Monk_Object_Detection/12_tf_obj_1/installation # # - Select the right file and run # # - chmod +x install_cuda10.sh && ./install_cuda10.sh # ! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git # + # Only for COLAB # Switch to TF 1.0 version (Uncomment the following line) # #%tensorflow_version 1.x # Now reset the runetime if prompted by colab # - # Check TF version import tensorflow as tf print(tf.__version__) # + # For colab use the command below # ! cd Monk_Object_Detection/12_tf_obj_1/installation && chmod +x install_colab.sh && ./install_colab.sh # Restart colab runtime now # For Local systems and cloud select the right CUDA version # # ! cd Monk_Object_Detection/12_tf_obj_1/installation && chmod +x install_cuda10.sh && ./install_cuda10.sh # - # # Use already trained model for demo import os import sys sys.path.append("Monk_Object_Detection/12_tf_obj_1/lib/") from infer_detector import Infer gtf = Infer(); # + # Download trained model # - # ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1ohF_FKhPdJlQJsnLPzkSy_j5qxoQgPNs' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1ohF_FKhPdJlQJsnLPzkSy_j5qxoQgPNs" -O obj_nexet_trained.zip && rm -rf /tmp/cookies.txt # ! unzip -qq obj_nexet_trained.zip gtf.set_model_params('export_dir/frozen_inference_graph.pb', "export_dir/classes.txt") scores, bboxes, labels = gtf.infer_on_image("export_dir/test/1.jpg", thresh=0.1, img_size=640, bbox_thickness=3, text_size=2, text_thickness=4); from IPython.display import Image Image(filename='output.png') scores, bboxes, labels = gtf.infer_on_image("export_dir/test/2.jpg", thresh=0.3, img_size=640, bbox_thickness=3, text_size=2, text_thickness=4); from IPython.display import Image Image(filename='output.png') scores, bboxes, labels = gtf.infer_on_image("export_dir/test/3.jpg", thresh=0.1, img_size=640, bbox_thickness=3, text_size=2, text_thickness=4); from IPython.display import Image Image(filename='output.png') scores, bboxes, labels = gtf.infer_on_image("export_dir/test/4.jpg", thresh=0.1, img_size=640, bbox_thickness=3, text_size=2, text_thickness=4); from IPython.display import Image Image(filename='output.png') scores, bboxes, labels = gtf.infer_on_image("export_dir/test/5.jpg", thresh=0.1, img_size=640, bbox_thickness=3, text_size=2, text_thickness=4); from IPython.display import Image Image(filename='output.png') # # Training on the dataset # ## Dataset # - Credits: https://www.kaggle.com/solesensei/nexet-original # ! pip install kaggle # ! kaggle datasets download solesensei/nexet-original # ! unzip -qq nexet-original.zip # ! mkdir annos import os import sys import numpy as np import cv2 import pandas as pd from pascal_voc_writer import Writer df = pd.read_csv("train_boxes.csv"); df.columns # + from tqdm import tqdm classes = []; for i in tqdm(range(len(df))): img_name = df["image_filename"][i]; if(not os.path.isfile("nexet/nexet/nexet_2017_1/" + img_name)): pass x1 = int(df['x0'][i]); y1 = int(df['y0'][i]); x2 = int(df['x1'][i]); y2 = int(df['y1'][i]); label = df["label"][i]; if(label not in classes): classes.append(label) if(i == 0): current_img = img_name; img = cv2.imread("nexet/nexet/nexet_2017_1/" + img_name); h, w, c = img.shape; writer = Writer(img_name, w, h); writer.addObject(label, x1, y1, x2, y2); elif(img_name == current_img): writer.addObject(label, x1, y1, x2, y2); else: writer.save('annos/' + img_name.split(".")[0] + ".xml"); current_img = img_name; img = cv2.imread("nexet/nexet/nexet_2017_1/" + img_name); h, w, c = img.shape; writer = Writer(img_name, w, h); writer.addObject(label, x1, y1, x2, y2); #break; f = open("classes.txt", 'w') for i in range(len(classes)): f.write(classes[i] + "/n"); f.close(); # - # # Training import os import sys sys.path.append("Monk_Object_Detection/12_tf_obj_1/lib/") from train_detector import Detector gtf = Detector(); gtf.list_models(); # + train_img_dir = "nexet/nexet/nexet_2017_1/"; train_anno_dir = "annos"; class_list_file = "classes.txt"; gtf.set_train_dataset(train_img_dir, train_anno_dir, class_list_file, batch_size=12, trainval_split = 0.8) # - # + # Create tf record # - gtf.create_tfrecord(data_output_dir="data_tfrecord") # + # Model and hyper params # - gtf.set_model_params(model_name="ssd_resnet50_v1_fpn") gtf.set_hyper_params(num_train_steps=1000000, lr=0.03) gtf.export_params(output_directory="export_dir"); # + # training # tf.app.run() executes sys.exit() function hence cannot run in a jupyter notebook directory # Run in a terminal - python Monk_Object_Detection/12_tf_obj_1/lib/train.py # or # Run the following command from notebook # - # %run Monk_Object_Detection/12_tf_obj_1/lib/train.py # + # exportaing trained model # tf.app.run() executes sys.exit() function hence cannot run in a jupyter notebook directory # Run in a terminal - python Monk_Object_Detection/12_tf_obj_1/lib/export.py # or # Run the following command from notebook # - # %run Monk_Object_Detection/12_tf_obj_1/lib/export.py # # Inference import os import sys sys.path.append("Monk_Object_Detection/12_tf_obj_1/lib/") from infer_detector import Infer gtf = Infer(); gtf.set_model_params('export_dir/frozen_inference_graph.pb', "classes.txt") import os img_list = os.listdir("nexet/nexet/nexet_2017_1/"); scores, bboxes, labels = gtf.infer_on_image("nexet/nexet/nexet_2017_1/"+img_list[0], thresh=0.1, img_size=640, bbox_thickness=3, text_size=2, text_thickness=4); from IPython.display import Image Image(filename='output.png') scores, bboxes, labels = gtf.infer_on_image("nexet/nexet/nexet_2017_1/"+img_list[10], thresh=0.4, img_size=640, bbox_thickness=3, text_size=2, text_thickness=4); from IPython.display import Image Image(filename='output.png')
application_model_zoo/Example - Nexet Dataset Vehicle Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simulación de una avenida con paso peatonal # En esta simulación se representa una avenida con dos carriles y un paso peatonal. El tráfico se controla con dos semáforos que detienen a los autos en el paso peatonal. # ## Librerías para la simulación import agentpy as ap import numpy as np import matplotlib.pyplot as plt import math import IPython import json from pprint import pprint from collections import defaultdict # ## Definición del agente semáforo class Semaphore(ap.Agent): """ Esta clase define a un semáforo. """ def setup(self): """ Este método se utiliza para inicializar al semáforo. """ self.step_time = 0.3 # Tiempo que dura cada paso de la simulación self.direction = [0, 1] # Dirección a la que apunta el semáforo self.state = 0 # Estado del semáforo 0 = verde, 1 = amarillo, 2 = rojo self.state_time = 0 # Tiempo que ha durado el semáforo en el estado actual self.green_duration = 50 # Tiempo que dura el semáforo en verde self.yellow_duration = 5 # Tiempo que dura el semáforo en amarillo self.red_duration = 45 # Tiempo que dura el semáforo en rojo def update(self): """ Este método actualiza el estado del semáforo. """ self.state_time += self.step_time if self.state == 0: # Caso en el que el semáforo está en verde if self.state_time >= self.green_duration: self.state = 1 self.state_time = 0 elif self.state == 1: # Caso en el que el semáforo está en amarillo if self.state_time >= self.yellow_duration: self.state = 2 self.state_time = 0 elif self.state == 2: # Caso en el que el semáforo está en rojo if self.state_time >= self.red_duration: self.state = 0 self.state_time = 0 def set_green(self): """ Este método forza el semáforo a estar en verde. """ self.state = 0 self.state_time = 0 def set_yellow(self): """ Este método forza el semáforo a estar en amarillo. """ self.state = 1 self.state_time = 0 def set_red(self): """ Este método forza el semáforo a estar en rojo. """ self.state = 2 self.state_time = 0 # ## Definición del agente auto # + class Car(ap.Agent): """ Esta clase define a un auto. """ def setup(self): """ Este método se utiliza para inicializar un robot limpiador. """ self.step_time = 0.1 # Tiempo que dura cada paso de la simulación self.current_frame = 0 self.direction = [1, 0] # Dirección a la que viaja el auto self.speed = 0.0 # Velocidad en metros por segundo self.max_speed = 20 # Máxima velocidad en metros por segundo self.state = 1 # Car state: 1 = ok, 0 = dead def update_position(self): """ Este método se utiliza para inicializar la posición del auto. """ # Verifica si el auto no ha chocado if self.state == 0: return # Actualiza la posición según la velocidad actual self.model.avenue.move_by(self, [self.speed*self.direction[0], self.speed*self.direction[1]]) def update_speed(self): """ Este método se utiliza para inicializar la velocidad del auto. """ # Verifica si el auto no ha chocado if self.state == 0: return # Obten la distancia más pequeña a uno de los autos que vaya en la misma dirección p = self.model.avenue.positions[self] min_car_distance = 1000000 for car in self.model.cars: if car != self: # Verifica si el carro va en la misma dirección dot_p1 = self.direction[0]*car.direction[0] + self.direction[1]*car.direction[1] # Verifica si el carro está atrás o adelante p2 = self.model.avenue.positions[car] dot_p2 = (p2[0]-p[0])*self.direction[0] + (p2[1]-p[1])*self.direction[1] if dot_p1 > 0 and dot_p2 > 0: d = math.sqrt((p[0]-p2[0])**2 + (p[1]-p2[1])**2) if min_car_distance > d: min_car_distance = d # Obten la distancia al próximo semáforo min_semaphore_distance = 1000000 semaphore_state = 0 for semaphore in self.model.semaphores: # Verifica si el semáforo apunta hacia el vehículo dot_p1 = semaphore.direction[0]*self.direction[0] + semaphore.direction[1]*self.direction[1] # Verifica si el semáforo está adelante o atrás del vehículo p2 = self.model.avenue.positions[semaphore] dot_p2 = (p2[0]-p[0])*self.direction[0] + (p2[1]-p[1])*self.direction[1] if dot_p1 < 0 and dot_p2 > 0: d = math.sqrt((p[0]-p2[0])**2 + (p[1]-p2[1])**2) if min_semaphore_distance > d: min_semaphore_distance = d semaphore_state = semaphore.state # Actualiza la velocidad del auto if min_car_distance < 2: self.speed = 0 self.state = 1 elif min_car_distance < 20: self.speed = np.maximum(self.speed - 250*self.step_time, 0) elif min_car_distance < 50: self.speed = np.maximum(self.speed - 120*self.step_time, 0) elif min_semaphore_distance < 40 and semaphore_state == 1: self.speed = np.minimum(self.speed + 5*self.step_time, self.max_speed) elif min_semaphore_distance < 50 and semaphore_state == 1: self.speed = np.maximum(self.speed - 25*self.step_time, 0) elif min_semaphore_distance < 120 and semaphore_state == 2: self.speed = np.maximum(self.speed - 90*self.step_time, 0) else: self.speed = np.minimum(self.speed + 5*self.step_time, self.max_speed) # - # ## Definición del modelo de la avenida # + class AvenueModel(ap.Model): """ Esta clase define un modelo para una avenida simple con semáforo peatonal. """ def setup(self): """ Este método se utiliza para inicializar la avenida con varios autos y semáforos. """ # Inicializa los agentes los autos y los semáforos self.cars = ap.AgentList(self, self.p.cars, Car) self.cars.step_time = self.p.step_time self.frame = 0 self.count = 0 c_north = int(self.p.cars/2) c_south = self.p.cars - c_north for k in range(c_north): self.cars[k].direction = [0,1] for k in range(c_south): self.cars[k+c_north].direction = [0,-1] self.semaphores = ap.AgentList(self,2, Semaphore) self.semaphores.step_time = self.p.step_time self.semaphores.green_duration = self.p.green self.semaphores.yellow_duration = self.p.yellow self.semaphores.red_duration = self.p.red self.semaphores[0].direction = [0, 1] self.semaphores[1].direction = [0, -1] # Inicializa el entorno self.avenue = ap.Space(self, shape=[60, self.p.size], torus = True) # Agrega los semáforos al entorno self.avenue.add_agents(self.semaphores, random=True) self.avenue.move_to(self.semaphores[0], [10, self.p.size*0.5 + 5]) self.avenue.move_to(self.semaphores[1], [50, self.p.size*0.5 - 5]) # Agrega los autos al entorno self.avenue.add_agents(self.cars, random=True) for k in range(c_north): self.avenue.move_to(self.cars[k], [10, 10*(k+1)]) for k in range(c_south): self.avenue.move_to(self.cars[k+c_north], [0, self.p.size - (k+1)*10]) #CREACION DEL JSON self.data = {} self.data['cars'] = [] self.data['semaphores'] = [] self.data['frames'] = [] with open('json_output', 'w') as file: for semaphore in self.semaphores: self.data['semaphores'].append({'id' : semaphore.id, 'dir' : semaphore.direction[1]}) for car in self.cars: car_pos = self.avenue.positions[car] self.data['cars'].append({'id': car.id, 'dir': car.direction[1], 'x' : car_pos[0], 'z' :car_pos[1]}) #cars, semaphores, frames #inicializa ambos semaforos #inicializa coches json.dump(self.data, file, indent = 4) def step(self): """ Este método se invoca para actualizar el estado de la avenida. """ self.semaphores.update() self.cars.update_position() self.cars.update_speed() #cars, sempaphores list cars = [] semaphores = [] #SEMAPHORES semaphores.append({'id':self.semaphores[0].id, 'state': self.semaphores[0].state}) semaphores.append({'id':self.semaphores[1].id, 'state': self.semaphores[1].state}) #CAR #for every car append {id, pos, pos} for car in self.cars: position = self.avenue.positions[car] cars.append({'id':car.id, 'x': position[0], 'z': position[1], 'direction': car.direction[1]}) self.data['frames'].append({'frame' : self.count, 'cars' : cars, 'semaphores' : semaphores, }) with open('json_output', 'w') as file: json.dump(self.data, file, indent = 4) self.count += 1 # - # ## Funciones para visualización # + def animation_plot_single(m, ax): ax.set_title(f"Avenida t={m.t*m.p.step_time:.2f}") colors = ["green", "yellow", "red"] pos_s1 = m.avenue.positions[m.semaphores[0]] ax.scatter(*pos_s1, s=20, c=colors[m.semaphores[0].state]) pos_s2 = m.avenue.positions[m.semaphores[1]] ax.scatter(*pos_s2, s=20, c=colors[m.semaphores[1].state]) ax.set_xlim(0, m.avenue.shape[0]) ax.set_ylim(0, m.avenue.shape[1]) for car in m.cars: pos_c = m.avenue.positions[car] ax.scatter(*pos_c, s=20, c="black") ax.set_axis_off() ax.set_aspect('equal', 'box') def animation_plot(m, p): fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111) animation = ap.animate(m(p), fig, ax, animation_plot_single) return IPython.display.HTML(animation.to_jshtml(fps=20)) # - # ## Parámetros de la simulación parameters = { 'step_time': 0.1, # Procentaje de área cubierta por árboles 'size': 1000, # Tamaño en metros de la avenida 'green': 5, # Duración de la luz verde 'yellow': 0, # Duración de la luz amarilla 'red': 5, # Duración de la luz roja 'cars': 10, # Número de autos en la simulación 'steps': 1000 # Numero de pasos de la simulación } # ## Simulación de una corrida model = AvenueModel(parameters) results = model.run() # ## Visualización de una corrida animation_plot(AvenueModel, parameters)
simple_avenue_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## MNIST Classifier for all digits #hide from utils import * from fastai2.vision.widgets import * # ## MNIST NN For all Digits # # As part of this exercise We will try to create a neural network that can identify the MNIST character dataset from several digits using Stochastic Gradient Descent using Jeremy's approach # Data set download path = untar_data(URLs.MNIST) Path.BASE_PATH = path # Check data set (path/'training').ls() # Create a new path for the training and the validation datsets training = (path/'training').ls().sorted() validation = (path/'testing').ls().sorted() training # + # Load all the image paths according to the data in the training set training_paths = [o.ls().sorted() for o in training] # Do the same for the validation data set validation_paths = [o.ls().sorted() for o in validation] # + # Load all the training data paths into 1 list for transformations training_data = [] for image_path in training_paths: training_data += image_path.sorted() # Do the same for validation dataset validation_data = [] for image_path in validation_paths: validation_data += image_path.sorted() # + # Stack all images and convert them into a tensor of images as well as # creating float values for the images train_tensors = [tensor(Image.open(o)) for o in training_data] train_x_stacked = torch.stack(train_tensors).float()/255 # Same operation for validation dataset valid_tensors = [tensor(Image.open(o)) for o in validation_data] valid_x_stacked = torch.stack(valid_tensors).float()/255 # - ### Image represented inside our stacked tensors df = pd.DataFrame(train_x_stacked[57000,:,2:24]) df.style.set_properties(**{'font-size':'6pt'}).background_gradient('Greys') # + # This function will concatenate all images into 1 tensor and will make it a rank 2 # tensor 1st is is the image number 2nd will be the image decomposed into a vector train_x = train_x_stacked.view(-1, 28*28) train_x.shape # Perform same preparation for validation data valid_x = valid_x_stacked.view(-1, 28*28) valid_x.shape # + # Define the labels of the training set train_y = tensor([]) for image_paths in training_paths: label = re.findall("\d", str(image_paths[0].parent)) label_tensor = tensor([float(label[0])]*len(image_paths)) train_y = torch.cat((train_y, label_tensor),0) # Define the labels of the validation dataset valid_y = tensor([]) for image_paths in validation_paths: label = re.findall("\d", str(image_paths[0].parent)) label_tensor = tensor([float(label[0])]*len(image_paths)) valid_y = torch.cat((valid_y, label_tensor),0) # - train_y = train_y.type(torch.LongTensor) valid_y = valid_y.type(torch.LongTensor) # + # Creating a tuple which will contain the image vector # and the label assiged and will be matched based on the index of the # original 2 tensors train_dset = list(zip(train_x,train_y)) # Finalize validation dataset valid_dset = list(zip(valid_x,valid_y)) len(train_dset) # - train_dset[1000] # + ## This function will return the mean of the different labels passed # We've incorprated the softmax process because our loss function # expects values between 0 and 1 and all the different predictions should be between 0 and 1 # and the softmax will ensure all values are squeezed into 0 and 1 and all predictions also are # in that range def mnist_loss(predictions, targets): predictions = torch.log_softmax(predictions,1) return F.nll_loss(predictions, targets) # + ## This accuracy function will calculate the accuracy comparing the max values of the # inputs using argmax to get the max values of the passed inputs and then checking it agaist the # targets and finally calculate the mean def batch_accuracy(xb, yb): xb = np.argmax(xb, 1) correct = (xb==yb) return correct.float().mean() # + # In order to perform training more efficiently we will # be using a data loader which is a way for us manage the data in the # training and validation set as well as creation of shuffled minibatches # for training in SGD dl = DataLoader(train_dset, batch_size=256, shuffle=True) valid_dl = DataLoader(valid_dset, batch_size=256, shuffle=True) # Next we will crate a learner which needs a DataLoaders class that # holds our training and validation dataset used for our model dls = DataLoaders(dl, valid_dl) # - # This is a very basic representation of a neural network # we have 2 lenar layers and a non linearity or actiation function # this can be used to train our model # nn.Linear is represented as: # ``` # res = xb@w1 + b1 # res = res.max(tensor(0.0)) # res = res@w2 + b2 # ``` # And similarly we can use an function in pytorch that represents this neural network simple_net = nn.Sequential( nn.Linear(28*28,30), nn.ReLU(), nn.Linear(30,10) ) learn = Learner(dls, simple_net, opt_func=SGD, loss_func=mnist_loss, metrics=batch_accuracy) learn.fit(40, 0.1)
nbs/MNIST_Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Image Cropping # # This script will load the sample image files found in the `photo_files` sub-directory and crop them to a more uniform size centered on the sample. These values were chosen to maximize the 'fill' of the photo by the sample color. import numpy as np import pandas as pd import matplotlib.pyplot as plt import os import matplotlib.image as mpimg # loading the raw sample images, not the 'cropped' sample images # that may have been processed by previous runs of this script image_files = ['../data/photos/photo_files/'+x for x in os.listdir('../data/photos/photo_files') if (x[:3] == 'CMW' and 'cropped' not in x)] # + imgs = {} # dictionary to store all sample images for file in image_files: img = mpimg.imread(file) key = int(file[3:-4]) imgs[key] = img # cropping each image by pixel values # this was performed manually by the authors imgs[1] = imgs[1][80:215,95:230,:] imgs[2] = imgs[2][100:350,150:400,:] imgs[3] = imgs[3][100:375,165:440,:] imgs[4] = imgs[4][175:400,165:390,:] imgs[5] = imgs[5][50:200,135:285,:] imgs[6] = imgs[6][120:400,200:480,:] imgs[7] = imgs[7][150:300,200:350,:] imgs[8] = imgs[8][275:500,250:475,:] imgs[9] = imgs[9][110:245,120:255,:] imgs[10] = imgs[10][165:385,315:535,:] imgs[11] = imgs[11][200:475,250:525,:] imgs[12] = imgs[12][110:315,225:430,:] imgs[13] = imgs[13][225:455,225:455,:] imgs[14] = imgs[14][150:425,250:525,:] imgs[15] = imgs[15][100:275,115:290,:] imgs[16] = imgs[16][60:190,110:240,:] imgs[17] = imgs[17][75:200,125:250,:] imgs[18] = imgs[18][235:460,250:475,:] imgs[19] = imgs[19][95:230,140:275,:] imgs[20] = imgs[20][225:475,300:550] imgs[21] = imgs[21][100:250,100:250,:] imgs[22] = imgs[22][225:465,310:550,:] imgs[23] = imgs[23][130:295,250:415,:] imgs[24] = imgs[24][175:400,325:550,:] imgs[25] = imgs[25][210:410,300:500,:] imgs[26] = imgs[26][180:445,280:545,:] imgs[27] = imgs[27][215:440,275:500,:] imgs[28] = imgs[28][225:450,275:500,:] imgs[29] = imgs[29][90:220,155:285,:] imgs[30] = imgs[30][200:450,225:475,:] imgs[31] = imgs[31][110:315,120:325,:] imgs[32] = imgs[32][80:215,125:260,:] imgs[33] = imgs[33][160:385,175:400,:] imgs[34] = imgs[34][90:265,150:325,:] imgs[35] = imgs[35][150:400,150:400,:] imgs[40] = imgs[40][150:250,135:235,:] imgs[41] = imgs[41][225:500,260:535,:] imgs[42] = imgs[42][150:350,250:450,:] imgs[43] = imgs[43][250:475,265:490,:] imgs[101] = imgs[101][900:1700,1600:2400] imgs[102] = imgs[102][1050:1750,1500:2200] imgs[103] = imgs[103][1200:1800,1875:2475] imgs[104] = imgs[104][800:1600,1800:2600] imgs[105] = imgs[105][1125:1825,1650:2350] imgs[106] = imgs[106][1000:1700,1700:2400] imgs[107] = imgs[107][850:1550,2050:2750] imgs[108] = imgs[108][1175:1825,1700:2350] imgs[109] = imgs[109][1050:1750,1800:2500] imgs[110] = imgs[110][900:1600,1650:2350] imgs[111] = imgs[111][1150:1850,1875:2575] imgs[112] = imgs[112][900:1700,1800:2600] imgs[113] = imgs[113][1000:1800,1600:2400] imgs[114] = imgs[114][1000:1700,1600:2300] imgs[201] = imgs[201][950:1650,1700:2400] imgs[202] = imgs[202][900:1600,1700:2400] imgs[203] = imgs[203][1100:1800,1700:2400] imgs[205] = imgs[205][800:1500,1600:2400] imgs[206] = imgs[206][900:1700,1600:2400] imgs[301] = imgs[301][1100:1800,2100:2800] imgs[302] = imgs[302][1100:1900,1400:2200] imgs[303] = imgs[303][800:1600,1450:2250] imgs[304] = imgs[304][1000:1800,1600:2400] imgs[305] = imgs[305][1200:2000,1900:2700] imgs[306] = imgs[306][950:1650,1600:2300] imgs[307] = imgs[307][1000:1700,1650:2350] imgs[308] = imgs[308][1100:1900,1800:2600] imgs[309] = imgs[309][700:1400,1400:2100] imgs[310] = imgs[310][900:1600,1600:2300] imgs[401] = imgs[401][700:1600,1300:2200] imgs[402] = imgs[402][1000:1700,1900:2600] imgs[403] = imgs[403][1000:1800,1800:2600] imgs[404] = imgs[404][1000:1800,2100:2900] imgs[405] = imgs[405][1050:1750,1750:2450] imgs[406] = imgs[406][950:1550,1800:2400] imgs[407] = imgs[407][900:1600,1900:2600] imgs[408] = imgs[408][1050:1850,1800:2600] imgs[409] = imgs[409][1000:1700,1700:2400] imgs[501] = imgs[501][950:1750,1300:2100] imgs[502] = imgs[502][1050:1750,1700:2400] imgs[503] = imgs[503][950:1650,1400:2100] imgs[504] = imgs[504][900:1600,1300:2100] imgs[505] = imgs[505][800:1600,1800:2600] imgs[506] = imgs[506][900:1600,1800:2500] # - # saving the cropped images to the `photo_files` sub-directory for key in imgs.keys(): #print(key) img = imgs[key] mpimg.imsave('../data/photos/photo_files/CMW'+str(key)+'_cropped.png',img)
photos/Image_Cropping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # run ipython with this command: jupyter notebook --NotebookApp.iopub_data_rate_limit=10000000000 import matplotlib import brewer2mpl import pandas as pd import numpy as np import matplotlib.pyplot as plt import keras from keras.preprocessing.image import ImageDataGenerator from keras import layers from keras.layers import Convolution2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense from keras.models import Sequential from keras.models import model_from_json from keras.models import Sequential from keras.layers import Conv2D, Dense, MaxPool2D, Dropout,Flatten from keras.optimizers import SGD from keras.activations import relu, tanh, elu from keras.backend import clear_session from keras.models import load_model set3 = brewer2mpl.get_map('Set3', 'qualitative', 7).mpl_colors % matplotlib inline # - train_data_x = pd.read_pickle('normalized_fer2013.pkl') train_data_y = pd.read_pickle('normalized_fer2013_labels.pkl').astype(int) test_data_x = pd.read_pickle('normalized_test_fer2013.pkl') test_data_y = pd.read_pickle('normalized_test_fer2013_labels.pkl').astype(int) train_data_x = train_data_x.as_matrix().reshape((-1,48,48,1)) test_data_x = test_data_x.as_matrix().reshape((-1,48,48,1)) train_data_y = train_data_y.as_matrix() test_data_y = test_data_y.as_matrix() train_data_y = keras.utils.to_categorical(train_data_y, num_classes=7) test_data_y = keras.utils.to_categorical(test_data_y, num_classes=7) # <font size = 20pt>__Training Process__</font> # + import random partition = 5 train_sets = [[] for i in range(partition)] pop = range(len(train_data_y)) for i in range(partition - 1): train_sets[i] = random.sample(pop, len(train_data_y) // partition) pop = [j for j in pop if j not in train_sets[i]] train_sets[partition - 1] = pop print([len(train_sets[i]) for i in range(partition)]) # + clear_session() model = keras.models.Sequential() model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same',input_shape=(48,48,1))) model.add(Dropout(0.2)) model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(8, (2,2), strides=(2,2),activation='elu',padding='valid')) model.add(Conv2D(16, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(16, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(16, (2,2), strides=(2,2),activation='elu',padding='valid')) model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(8, (2,2), strides=(1,1),activation='elu',padding='same')) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32,activation='elu')) model.add(Dense(7,activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # returns a compiled model # identical to the previous one # model = load_model('my_model.h5') # - period = 50 // partition # total iterations to cycle through all partitions is at most 50 iterations for i in range(1000): print("EPOCH " + str((i)*period+1)) train = [] for j in range(partition): if j != i % partition: train += train_sets[j] model.fit(train_data_x[train,:,:,:], train_data_y[train], validation_data=(train_data_x[train_sets[i%partition],:,:,:],train_data_y[train_sets[i%partition]]), epochs=period, batch_size=32) # + addr = "ADAM8_8_8_8_8_16_16_16_16_16_8_8_8_8_8F32" model.save(addr + ".h5") model_json = model.to_json() with open(addr + ".json", "w") as json_file: json_file.write(model_json) # - # ## Computer Vision # + # load in the whole architecture, training methods, and weights with h5 file addr = "ADAM_16_8_8_4F64_32_16" model = load_model(addr + '.h5') # # load the whole architecture stored in json and create model # json_file = open(addr + '.json','r') # loaded_model_json = json_file.read() # json_file.close() # model = model_from_json(loaded_model_json) # # load weights into model from h5 file # model.load_weights(addr + '.h5') # evaluate model on test set model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(test_data_x, test_data_y, verbose=0) print("model %s: %.2f%%" % (model.metrics_names[1], score[1]*100)) # - # ## Prediction Result # prediction and true labels y_prob = model.predict(test_data_x, batch_size=32, verbose=0) y_pred = [np.argmax(prob) for prob in y_prob] y_true = [np.argmax(true) for true in test_data_y] counts = np.bincount(y_pred) labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'] print([[x, y] for x, y in zip(labels, counts)]) # + from sklearn.metrics import confusion_matrix def plot_confusion_matrix(y_true, y_pred, cmap=plt.cm.Blues): cm = confusion_matrix(y_true, y_pred) cm = (cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]).round(2) fig = plt.figure(figsize=(7,7)) matplotlib.rcParams.update({'font.size': 16}) ax = fig.add_subplot(111) matrix = ax.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0, vmax=1) fig.colorbar(matrix) thresh = 0.5 for i in range(7): for j in range(7): ax.text(j,i, str(int(cm[i,j]*100)) + "%",va='center', ha='center', color="white" if cm[i, j] > thresh else "black") ax.set_title('Confusion Matrix') ticks = np.arange(len(labels)) ax.set_xticks(ticks) ax.set_xticklabels(labels, rotation=45) ax.set_yticks(ticks) ax.set_yticklabels(labels) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plot_confusion_matrix(y_true, y_pred, cmap=plt.cm.YlGnBu) # - # ## Classification Metrics # + def class_precision(y_true, y_pred, emotion): cm = confusion_matrix(y_true, y_pred) i = [i for i, label in enumerate(labels) if label == emotion][0] col = [cm[j,i] for j in range(7)] return float(col[i])/sum(col) def class_recall(y_true, y_pred, emotion): cm = confusion_matrix(y_true, y_pred) i = [i for i, label in enumerate(labels) if label == emotion][0] row = [cm[i,j] for j in range(7)] return float(row[i])/sum(row) def class_accuracy(y_true, y_pred, emotion): cm = confusion_matrix(y_true, y_pred) i = [i for i, label in enumerate(labels) if label == emotion][0] tp = cm[i,i] fn = sum([cm[i,j] for j in range(7) if j != i]) fp = sum([cm[j,i] for j in range(7) if j != i]) tn = sum([cm[i,j] for j in range(7) for i in range(0,6)]) -(tp+fp+fn) return float(tp + tn)/sum([tp, fn, fp, tn]) # - for emotion in labels: print(emotion) print(' acc = {}'.format(class_accuracy(y_true, y_pred, emotion))) print(' prec = {}'.format(class_precision(y_true, y_pred, emotion))) print('recall = {}\n'.format(class_recall(y_true, y_pred, emotion))) from sklearn.metrics import classification_report print(classification_report(y_true, y_pred, target_names=labels))
keras-CNN/Keras_CNN_7_Emotions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tweepy import pandas as pd import time import json # + # credentials consumer_key = "jgRrFk3glCFOS0eIVOWGFhHEa" consumer_secret = "<KEY>" access_token = "<KEY>" access_token_secret = "<KEY>" # + # authentication auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, wait_on_rate_limit=True) # + tweets = [] def text_query_to_csv(text_query, count): try: # Creation of query method using parameters tweets = tweepy.Cursor(api.search, q=text_query).items(count) # Pulling information form tweets iterable object tweets_list = [[tweet.created_at, tweet.id, tweet.text] for tweet in tweets] # Creation of dataframe from tweets list tweets_df = pd.DataFrame(tweets_list, columns=["Datetime", "Tweet ID", "Text"]) # Convertion of dataframe into csv tweets_df.to_csv("{}-{}-tweets.csv".format(text_query, count), sep=",", index=False) except BaseException as e: print("Failed:", str(e)) time.sleep(5) # + text_query = "<NAME>" count = 1000 text_query_to_csv(text_query, count) # - df = pd.read_csv("Greysia Apriyani-1000-tweets.csv") df.head(20) df["Text"][0]
crawler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TF-IDF # # The TF-IDF formula makes it possible to determine in what proportions certain words in a text document, a document body or a website can be evaluated in relation to the rest of the text. # ## TF # TF is the abbreviation for **term frenquency**. It determines the relative frequency of a word or combination of words in a document. This term frequency will be compared to the occurrence of all other remaining words in the text, document or website being analyzed. # # This formula uses a logarithm that reads as follows: # $$ tf(i,j) = \dfrac {n_{(i,j)}}{\sum_k n_{(i,j)}} $$ # # The number of times a word appears in a document divded by the total number of words in the document. ***Every document has its own term frequency.*** # This formula attests that a visible increase of the keyword in the text does not lead to an improvement of its value in the calculation. While the keyword density mainly calculates the percentage distribution of a single word in the text (in relation to the total number of words remaining), the term frequency also takes into account the proportion of all words used in a text. # ## IDF # The IDF calculates the **inverse document frequency** and completes the word evaluation analysis. It acts as a correction of the TF. The IDF includes in the calculation the document frequency for a specific word, i.e. the IDF compares the figure corresponding to all known documents with the number of texts containing the word in question. # # The following logarithm is used to "condense" the results: # $${\displaystyle \mathrm {idf} (w)=\log ({\frac {N}{df_{(i)}}}})$$ # # The log of the number of documents divided by the number of documents that contain the word w. Inverse data frequency determines the weight of rare words across all documents in the corpus. # Consequently, the IDF determines the relevance of a text by considering a specific keyword. # # Multiplied formulas show the relative evaluation of the word of a text compared to all potential documents that contain the same keyword. In order to obtain useful results, the formula needs to be applied to any significant keyword in a text document. # # The larger the database used to calculate the TF-IDF, the more accurate the results # # Combining these two we come up with the TF-IDF score (w) for a word in a document in the corpus. It is the product of tf and idf: # # # $$ w_{(i,j)} = tf_{(i,j)} . \log ({\frac {N}{df_{(i)}}})$$ # <center>$ tf_{(i,j)}$ = <i>number of occurences of $i$ in $j$</i></center> # <center>$ df_{(i)}$ = <i>number of documents containing $i$</i></center> # <center>$ N $ = <i>total number of documents</i></center> # ## Applications of TF-IDF # Determining how relevant a word is to a document, or TD-IDF, is useful in many ways, for example: # # * **Information retrieval** # TF-IDF was invented for document search and can be used to deliver results that are most relevant to what you’re searching for. Imagine you have a search engine and somebody looks for LeBron. The results will be displayed in order of relevance. That’s to say the most relevant sports articles will be ranked higher because TF-IDF gives the word LeBron a higher score. It’s likely that every search engine you have ever encountered uses TF-IDF scores in its algorithm. # # # * **Keyword Extraction** # TF-IDF is also useful for extracting keywords from text. How? The highest scoring words of a document are the most relevant to that document, and therefore they can be considered keywords for that document. Pretty straightforward. # ## Pratice time ! from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import Pipeline import numpy as np corpus = [ 'This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?', ] vectorizer = TfidfVectorizer() X = vectorizer.fit_transform(corpus) vocabulary = vectorizer.get_feature_names() print(vocabulary) X.toarray() pipe = Pipeline([('count', CountVectorizer(vocabulary=vocabulary)), ('tfid', TfidfTransformer())]).fit(corpus) pipe['count'].transform(corpus).toarray()
Content/5.TFIDF/1.tfidf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np from scipy.special import sici import matplotlib.pylab as plt from scipy.integrate import quad, trapz SMALL_SIZE = 20 MEDIUM_SIZE = 20 BIGGER_SIZE = 20 plt.rc('font', size=SMALL_SIZE) # controls default text sizes plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title print(plt.style.available) plt.style.use('seaborn-white') # - k_limit = 200 steps = 16386 k = np.linspace(0,k_limit,steps) d = k_limit/(steps-1) e = sici(k) f_real = e[1] f_imag = e[0] f_real2 = f_real - np.log(k) - np.euler_gamma plt.plot(k,f_real2,"b") plt.plot(k,f_imag,"r") prefactor = 1 er = np.exp(-prefactor*np.euler_gamma)*np.exp(prefactor*f_real)*k**(-prefactor) ei1 = np.cos(prefactor*f_imag) ei2 = np.sin(prefactor*f_imag) er[0]=1 plt.loglog(k,er) logk = np.log(k) loger = np.log(er) k_limit = 5000 p=np.polyfit(logk[k_limit:],loger[k_limit:],1) print(p) erfit = np.exp(p[1])*k**p[0] plt.loglog(k,er) plt.loglog(k[k_limit:],erfit[k_limit:]) plt.loglog(k,ei1,"b") plt.loglog(k,ei2,"r") # + prefactor = np.pi/2*0.2 er = np.exp(-prefactor*np.euler_gamma)*np.exp(prefactor*f_real)*k**(-prefactor) ei1 = np.cos(prefactor*f_imag) ei2 = np.sin(prefactor*f_imag) er[0]=1 # need to find the proper way of doing an fft freq = np.fft.fftfreq(32768,d)*2*np.pi fc1 = er*ei1+1j*er*ei2 fc2 = np.conj(fc1) fc = (fc1[:-1] + fc2[1:])/2 # make sure that p(x) is normalized and that the highest frequency # has no imaginary part fc[0] = 1.0 fc[-1] = np.real(fc[-1]) # make sure that fc_c = np.conj(fc[::-1]) fc_t = np.concatenate((fc,fc_c[1:-1])) print(fc_t[2],fc_t[-2]) print(fc_t.shape) # take fourier transform and normalize to freq cf_fft = 2*np.fft.fft(fc_t)/32768/freq[1] print(cf_fft) disp = 700 plt.plot(freq[:disp],np.real(cf_fft[:disp]),"k",label="c = 0.2") plt.xlim((0,2)) plt.ylim((0,1)) plt.xlabel("Intensity I") plt.ylabel("p(I)") plt.legend() plt.savefig("pIanalytic02.png",format='png',dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") # - # normalization print(trapz(np.real(cf_fft[:steps]),dx=freq[1])) # + prefactor = 1 er = np.exp(-prefactor*np.euler_gamma)*np.exp(prefactor*f_real)*k**(-prefactor) ei1 = np.cos(prefactor*f_imag) ei2 = np.sin(prefactor*f_imag) er[0]=1 # need to find the proper way of doing an fft freq = np.fft.fftfreq(32768,d)*2*np.pi fc1 = er*ei1+1j*er*ei2 fc2 = np.conj(fc1) fc = (fc1[:-1] + fc2[1:])/2 # make sure that p(x) is normalized and that the highest frequency # has no imaginary part fc[0] = 1.0 fc[-1] = np.real(fc[-1]) # make sure that fc_c = np.conj(fc[::-1]) fc_t = np.concatenate((fc,fc_c[1:-1])) print(fc_t[2],fc_t[-2]) print(fc_t.shape) # take fourier transform and normalize to freq cf_fft = 2*np.fft.fft(fc_t)/32768/freq[1] print(cf_fft) disp = 700 plt.plot(freq[:disp],np.real(cf_fft[:disp]),"k",label=r"$c = 2/\pi$") plt.xlim((0,3)) #plt.ylim((0,500)) plt.xlabel("Intensity I") plt.ylabel("p(I)") plt.legend() plt.savefig("pIanalytic2overpi.png",format='png',dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") # + prefactor = np.pi/2*1 er = np.exp(-prefactor*np.euler_gamma)*np.exp(prefactor*f_real)*k**(-prefactor) ei1 = np.cos(prefactor*f_imag) ei2 = np.sin(prefactor*f_imag) er[0]=1 # need to find the proper way of doing an fft freq = np.fft.fftfreq(32768,d)*2*np.pi fc1 = er*ei1+1j*er*ei2 fc2 = np.conj(fc1) fc = (fc1[:-1] + fc2[1:])/2 # make sure that p(x) is normalized and that the highest frequency # has no imaginary part fc[0] = 1.0 fc[-1] = np.real(fc[-1]) # make sure that fc_c = np.conj(fc[::-1]) fc_t = np.concatenate((fc,fc_c[1:-1])) print(fc_t[2],fc_t[-2]) print(fc_t.shape) # take fourier transform and normalize to freq cf_fft = 2*np.fft.fft(fc_t)/32768/freq[1] print(cf_fft) disp = 700 plt.plot(freq[:disp],np.real(cf_fft[:disp]),"k",label=r"$c = 1$") plt.xlim((0,5)) #plt.ylim((0,500)) plt.xlabel("Intensity I") plt.ylabel("p(I)") plt.legend() plt.savefig("pIanalytic1.png",format='png',dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") # + prefactor = np.pi/2*5 er = np.exp(-prefactor*np.euler_gamma)*np.exp(prefactor*f_real)*k**(-prefactor) ei1 = np.cos(prefactor*f_imag) ei2 = np.sin(prefactor*f_imag) er[0]=1 # need to find the proper way of doing an fft freq = np.fft.fftfreq(32768,d)*2*np.pi fc1 = er*ei1+1j*er*ei2 fc2 = np.conj(fc1) fc = (fc1[:-1] + fc2[1:])/2 # make sure that p(x) is normalized and that the highest frequency # has no imaginary part fc[0] = 1.0 fc[-1] = np.real(fc[-1]) # make sure that fc_c = np.conj(fc[::-1]) fc_t = np.concatenate((fc,fc_c[1:-1])) print(fc_t[2],fc_t[-2]) print(fc_t.shape) # take fourier transform and normalize to freq cf_fft = 2*np.fft.fft(fc_t)/32768/freq[1] print(cf_fft) disp = 2000 plt.plot(freq[:disp],np.real(cf_fft[:disp]),"k",label=r"$c = 5$") plt.xlim((0,15)) #plt.ylim((0,500)) plt.xlabel("Intensity I") plt.ylabel("p(I)") plt.legend() plt.savefig("pIanalytic5.png",format='png',dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") # - # Now lets compare two concentrations with two different brightness so that the total intensity is the same plt.plot(I1,pI1) plt.plot(I2*0.5,2*pI2) plt.xlim((0,3)) plt.ylim((0,0.015)) plt.xlabel("Intensity I") plt.ylabel("p(I)") plt.savefig("pIanalyticTwo1and2.png",format='png',dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white")
twod_distributions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="pUWcg1yf5SS7" colab_type="text" # # Colab utils # # This repo, [JohnTigue/colab-utils](https://github.com/JohnTigue/colab-utils), is a collection of Jupyter notebooks that were built out for the sole goal of being useful on Colab. # # There are two directories of interest: # * [`probings/`](https://github.com/JohnTigue/colab-utils/tree/master/probings): one-off learning tests to probe how Colab works under the hood # * [`tools/`](https://github.com/JohnTigue/colab-utils/tree/master/tools): Generally useful stuff designed to be reusable # # ## Tools # The `tools/` directory contains the most useful stuff in this repository. In this document "tools" means Jupyter notebooks and snippets that `!pip install` as needed and then perform some useful task on Colab. # # * [`colab_vm_config_info.ipynb`](https://colab.research.google.com/github/JohnTigue/colab-ing/blob/master/tools/colab_vm_config_info.ipynb): various snippets to understand how the VM is configured. # * [`allensdk_on_colab.ipynb`](https://colab.research.google.com/github/JohnTigue/colab-ing/blob/master/tools/allensdk_on_colab.ipynb): snippets for working with [AIBS'](https://alleninstitute.org) data # # ## Probings # # The `tests` directory contains less refined one-off and scraps, but are still useful bits of information about what's going on under the hood in Colab. # # * absolute_minimal.ipynb: very, very minimal `.ipynb` file # * authored_in_colab.ipynb: shows what Colab generates for new notebooks # * scratch_snippets.ipynb: code that ended up on the cutting room floor # * unit_testing.ipynb: A repo? With no tests? Tsk-tsk. # # ## Colab juxtaposed with Binder # # Colab and Binder are similar but there are subtle differences, beyond the open (Binder) versus closed (Colab) source issue. Of course, Google is giving away an addictively useful service with Colab and much of the closed source is for dialing Jupyter to their service. So, if you need more freedom, go get a free temporary VM at https://mybinder.org, where you bring your own code and as such can do anything. Of course, there are no free GPUs and TPUs on mybinder.org. # # ### Terminology # Herein, the follow terms are used: # * "Colab" means colab.research.google.com # * "Binder" means the free Binder service hosted at mybinder.org # # ### Similarities # * both give a user a temporary virtual machine (VM) with a Jupyter UI. # # ### Differences # * Colab only models independent notebook files. Has TPUs! # * Binder models a repo full of notebooks in a single Docker image. # # This means that cross-notebook connections are less powerful on Colab than on Binder, but that can be worked around with drugery.
index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from core import * from core_mps import * from quantum_plots import * from mps.state import MPS, CanonicalMPS from mps.mpo import MPO, MPOList from mps.qft import qft_mpo, qft_flip, qft, iqft # ## Quantum Fourier Transform # We want to analyze the performance of the QFT on the states that we have been studying. def twoscomplement(L, **kwdargs): A0 = np.zeros((1,2,2,2)) A0[0,0,0,0] = 1. A0[0,1,1,1] = 1. A = np.zeros((2,2,2,2)) A[0,0,0,0] = 1. A[0,1,1,0] = 1. A[1,1,0,1] = 1. A[1,0,1,1] = 1. Aend = A[:,:,:,[0]] + A[:,:,:,[1]] return MPO([A0] + [A]*(L-2) + [Aend], **kwdargs) # + def study_qft_entropies(σ=1.0, M=15): ψ, x = Gaussian_GR(M, σ=σ, μ=0, a=-7, b=7) ψmps = MPS.fromvector(ψ, [2]*M) F = qft_mpo(M, simplify=True, normalize=True) S = [all_entropies(ψmps)] for mpo in F.mpos: ψmps = mpo.apply(ψmps) S.append(all_entropies(ψmps)) Fψmps = qft_flip(ψmps) F2ψmps = twoscomplement(M, simplify=True, normalize=True).apply(Fψmps) S.append(all_entropies(F2ψmps)) fig = plot_setup(aspect_ratio=1/1.62, wide=True) ax = fig.add_subplot(2,2,4) Smax = [max(s) for s in S] ax.plot(0, Smax[0], 'o', mfc='w', label='$\\psi$') ax.plot(np.arange(1,M+1), Smax[1:-1], '-', mfc='w', label='$\\mathcal{F}_j\\cdots\\mathcal{F}_1\\psi$') ax.plot(M+2, Smax[-1], 's', mfc='w', label='$U_{2c}\\mathcal{F}\\psi$') ax.set_ylabel('$\\max\\,S[\\rho^{(k,m-k)}]$') ax.set_xlabel('$j$ (step)') ax.set_ylim([-0.1,1.1]) ax.legend(frameon=True,loc='center') ax.text(16,0.95,'d)', fontweight='bold') ax = fig.add_subplot(2,2,1) ax.plot(x, np.abs(ψ)**2) ax.set_xlabel('$x_s$') ax.set_ylabel('$p(x_s)$') ax.text(-5.5,0.000155,'a)',fontweight='bold') ax = fig.add_subplot(2,2,3) ax.plot(np.arange(2**M)/2**M, np.abs(Fψmps.tovector())**2, color=color2) ax.set_xlabel('$s/2^m$') ax.set_ylabel('$[\\mathcal{F}p](s)$') ax.text(0.1,0.33,'b)',fontweight='bold') ax = fig.add_subplot(2,2,2) ax.plot(S[0], '-o', mfc='w', label='$\\psi$') ax.plot(S[-2], '-', label='$\\mathcal{F}\\psi$') ax.plot(S[-1], '-s', mfc='w', label='$U_{2c}\\mathcal{F}\\psi$') ax.set_ylim([-0.1,1.1]) ax.set_xlabel('$k$') ax.set_ylabel('$S[\\rho^{(k,m-k)}]$') ax.text(13.5,0.95,'c)', fontweight='bold') ax.legend(loc='center') plt.tight_layout() plt.savefig('../fig-QFT-entropy.pdf') study_qft_entropies() # - # ## Quantum Fourier interpolation # + def qft_interpolation(σ=1.0, M0=5, Mf=10, a=None, b=None): if b is None: b=-7*σ if a is None: a=-b x0 = np.linspace(a, b, 2**M0+1)[:-1] xf = np.linspace(a, b, 2**Mf+1)[:-1] ψ0 = np.exp(-x0**2/(2*σ**2)); ψ0 /= np.linalg.norm(ψ0) ψf = np.exp(-xf**2/(2*σ**2)); ψf /= np.linalg.norm(ψf) ψ0mps = MPS.fromvector(ψ0, [2]*M0) U2c = twoscomplement(M0, simplify=True, normalize=True) Fψ0mps = U2c.apply(qft_flip(qft(ψ0mps))) # # Extend the state with zero qubits χ = Fψ0mps[0].shape[-1] A = np.zeros((χ,2,χ)) for i in range(χ): A[i,0,i] = 1.0 tensors = [Fψ0mps[0]]+[A]*(Mf-M0)+Fψ0mps[1:] Fψfmps = MPS(tensors) # # Undo Fourier transform U2c = twoscomplement(Mf, simplify=True, normalize=True) ψfmps = qft_flip(iqft(U2c.apply(Fψfmps))) ψfint = ψfmps.tovector() ψfint /= np.linalg.norm(ψfint) xint = xf; fig = plot_setup(aspect_ratio=1/1.62, width_ratio=0.8, wide=True) ax = fig.add_subplot(2,2,1) ax.plot(x0, np.abs(ψ0)**2/np.sum(np.abs(ψ0)**2), '-o') ax.set_xlabel('$x_s$') ax.set_ylabel('$p^{(5)}(x_s)$') ax.text(-6,0.2,'a)',fontweight='bold') ax = fig.add_subplot(2,2,2) ax.plot(xint, np.abs(ψfint**2)) ax.plot(xf, np.abs(ψf**2)) ax.set_xlabel('$x_s$') ax.set_ylabel('$p^{(10)}(x_s)$') ax.text(-6,0.006,'b)',fontweight='bold') ax = fig.add_subplot(2,2,3) ξ0 = np.abs(Fψ0mps.tovector())**2 k0 = np.arange(-2**(M0-1),2**(M0-1))*2*np.pi/(14*σ) ξ0 = np.concatenate((ξ0[2**M0-1:2**(M0-1)-1:-1],ξ0[0:2**(M0-1)])) ax.plot(k0, ξ0, color=color1) ax.set_xlabel('$k_{\\bar{s}}$') ax.set_ylabel('$[\\mathcal{F}p](s)$') ax.text(-6,0.2,'c)',fontweight='bold') ax = fig.add_subplot(2,2,4) ξf = np.abs(Fψfmps.tovector())**2 kf = np.arange(-2**(Mf-1),2**(Mf-1))*2*np.pi/(14*σ) ξf = np.concatenate((ξf[2**Mf-1:2**(Mf-1)-1:-1],ξf[0:2**(Mf-1)])) ax.plot(kf, ξf, color=color2) ax.set_xlabel('$k_{\\bar{s}}$') ax.set_ylabel('$[\\mathcal{F}p](s)$') ax.text(-195,0.2,'d)',fontweight='bold') plt.tight_layout() plt.savefig('../fig-QFT-interpolation.pdf') plt.savefig('../fig-QFT-interpolation.svg') qft_interpolation() # - # ## Fokker-Planck spectral method # We can solve the Fokker-Planck equation in Fourier space. We need to realize that the differential operators are much easier in momentum space # $$\frac{\partial}{\partial x} e^{i k x} \sim k e^{i k x}.$$ # Thanks to this, any differential equation with a generator $G(\partial_x)$ which is a function of the derivatives # $$\partial_t|\psi\rangle = G(\partial_x)|\psi\rangle$$ # can be solved in Fourier space as # $$|\tilde\psi(t)\rangle = e^{G(i k)} |\tilde\psi(0)\rangle,$$ # where $|\tilde\psi\rangle = \mathcal{F}|\psi\rangle$ is the Fourier transform of the state. # Since we are interested in the "laboratory" representation, not in the transformed state, we can write # $$|\psi(t)\rangle = \mathcal{F^{-1}} e^{G(ik)} \mathcal{F} |\psi(0)\rangle.$$ # In practice things are a bit more complicated. We are working with finite discretized intervals $x_s\in[a,b],$ and the derivative $\partial_x$ does not have a trivial representation: we must approximate it. # First we have to remember that the discretization reads # $$x = a + s\,\delta{x},\; s=0,1,\ldots,2^{m}-1,$$ # with discretization step # $$\delta{x} = \frac{b-a}{2^m} = \frac{L}{N}.$$ # Associated to this discrete coordinate, we have the quasimomentum variable # $$q = \frac{2\pi}{L} s \in \frac{2\pi}{L}\times\{0,1,2\ldots, N-1\},$$ # where once more $s$ denotes the states of the quantum register. # Note that this quasimomentum is not quite our 'k' coordinate. The quasimomentum $2\pi(N-1)/L$ effectively represents the negative momentum $k=-2\pi/L.$ More generally, we can associate to the binary numbers $s$ a real momentum # $$k = \frac{2\pi}{L} \times \bar{s}$$ # where $\bar{s}$ is a positive or negative number that uses the [two's complement representation](https://en.wikipedia.org/wiki/Two%27s_complement) with $m$ bits of precision # $$\bar{s} = \left\{ # \begin{array}{l} # \sum_{n=2}^m 2^{m-n}s_n, & s_1 = 0\\ # -\sum_{n=2}^m 2^{m-n}(1-s_n) - 1, & s_1=1. # \end{array}\right.$$ # We can write # $$\bar{s}=-s_1 + \sum_{n=2}^{m}2^{m-n} \left[ (1-s_1) s_n - s_1 (1-s_n)\right] # =-s_1 + \sum_{n=2}^{m}2^{m-n} \left[ s_n-s_1\right] $$ def c2(m, debug=True): alls = [] for z in range(0,2**m): b = [(z >> i) & 1 for i in reversed(range(0,m))] s = sum(2**(m-i)*(b[i-1]-b[0]) for i in range(2,m+1)) - b[0] if debug: print(f'b={b}, s={s}') alls.append(s) return np.array(alls) c2(4) # Using this, we can implement a unitary MPO that performs the exponentiation $\exp(c k)$ with any coefficient $c.$ This is an almost local operator # $$\exp(c k) = \exp(-c s_1) \prod_{n=2}^{m} \exp\left[c (1-s_1) 2^{m-n} s_n - c s_1 2^{m-n} (1-s_n)\right].$$ def mpo_expk(c, m, L, **kwdargs): """Implement exp(ck) for a register with 'm' qubits. Parameters ---------- c -- coefficient in the exponent m -- size of quantum register **kwdargs-- extra arguments for MPO """ c *= 2*np.pi/L A0 = np.zeros((1,2,2,2), dtype=type(c)) A0[0,1,1,1] = np.exp(-c) A0[0,0,0,0] = 1.0 out = [A0] for n in range(2,m+1): cn = c * 2**(m-n) A = np.zeros((2,2,2,2), dtype=type(c)) A[0,1,1,0] = np.exp(cn) A[0,0,0,0] = 1. A[1,1,1,1] = 1. A[1,0,0,1] = np.exp(-cn) if n==m: A = A[:,:,:,[0]]+A[:,:,:,[1]] out.append(A) return MPO(out, **kwdargs) # We can test this with a simple Gaussian function and the displacement operator. We have the equation # $$\partial_t p(x,t) = -\mu \partial_x p(x,t).$$ # This has a solution of the form # $$p(x,t) = p(x-\mu t,0),$$ # and it can be solved in our representation using # $$p(x,t) = \mathcal{F}^{-1} e^{-i \mu k} \mathcal{F} p(x,t).$$ # + from mps.qft import qft, iqft, qft_flip import matplotlib.pyplot as plt def test_drift(T=1.0, μ=1, m=8, σ=1.0, a=None, b=None): if a is None: a = -7*σ if b is None: b = -a L = abs(b - a) x = np.linspace(a, b, 2**m) ψmps = GaussianMPS(m, σ, a=a, b=b, GR=False, simplify=True, normalize=True) U = mpo_expk(-1j * T * μ, m, L, simplify=True) ξmps = qft_flip(qft(ψmps, simplify=True)) ξmps = U.apply(ξmps) plt.plot(np.abs(ξmps.tovector()), label='$\\mathcal{F}ψ$') plt.plot(np.abs(ψmps.tovector()), label='$ψ$') ξmps = qft_flip(iqft(ξmps, simplify=True)) plt.plot(np.abs(ξmps.tovector()), label='$\\mathcal{F}e^{-\\mu\\hat{k}}\\mathcal{F}ψ$') xm0 = np.sum(x * np.abs(ψmps.tovector()))/np.sum(np.abs(ψmps.tovector())) xmT = np.sum(x * np.abs(ξmps.tovector()))/np.sum(np.abs(ξmps.tovector())) print(f'<x(0)> = {xm0:5f}, <x(T)> = {xmT:5f}') plt.plot() test_drift(m=8) # - # We can also implement $\exp(D k^2).$ For that, let us first study $k^2.$ Recall # $$k =\frac{2\pi}{L}\bar{s},$$ # and # $$\bar{s}=-s_1\left[1 + \sum_{n=2}^{m}2^{m-n} (1-s_n)\right] # + (1-s_1) \sum_{n=2}^{m}2^{m-n} s_n$$ # We realize that $s_1$ and $(1-s_1)$ are projectors and hence $s_1(1-s_1)=0.$ We can therefore write # $$\bar{s}^2=s_1\left[1 + \sum_{n=2}^{m}2^{m-n} (1-s_n)\right]^2 # + (1-s_1) \left[\sum_{n=2}^{m}2^{m-n} s_n\right]^2.$$ # This can be written in QUBO form # $$\bar{s}^2=\sum_i Q_{ij}s_i s_j,$$ # but the coefficients in $Q$ are negative and positive, and pretty large, which causes problems. # We realize that there is a unitary operation that implements two's complement: # $$U|s_1,s_n\rangle = |s_1,s_1\oplus s_n\rangle =|s_1,s_n'\rangle,$$ # with which the expression for $\bar{s}^2$ radically changes: # $$\bar{s}=-s_1\left[1 + \sum_{n=2}^{m}2^{m-n} s_n \right] + (1-s_1) \sum_{n=2}^{m}2^{m-n} s_n$$ # $$\bar{s}^2 = s_1 \left[1 + \sum_{n=2}^m 2^{m-n}s_n'\right]^2 + (1-s_1)\left[\sum_{n=2}^m 2^{m-n}s_n'\right]^2.$$ # This new formulation now has $Q_{ij}'\geq 0.$ # It is convenient to rewrite # $$\bar{s}^2 = s_1 + 2s_1 S + S^2$$ # with # $$S = \sum_{n=2}^{m} 2^{m-n}s_n'.$$ # Then we have # $$Q_{1,1} = 1,\; Q_{1,n\geq 2} = 2^{m-n} = Q_{n\geq 2,1},\; Q_{n\geq 2,n'\geq 2} = 2^{2m-n-n'}.$$ def QUBOs2(m): if False: powers = 2**(m-np.arange(2,m+1)) Q = np.block([[np.ones((1,1)), powers.reshape(1,m-1)], [powers.reshape(m-1,1), np.outer(powers,powers)]]) else: h = [1] + [2**(m-n) for n in range(2,m+1)] Q = np.outer(h, h) return Q # We can implement the exponential of the QUBO operator # $$e^{\beta H} = U_{2comp} e^{\beta \sum_{ij} h_i h_j s_i s_j} U_{2comp},$$ # where $U_{2comp}$ is the two's complement operator. def QUBOMPO(Q, β=1, **kwdargs): L = len(Q) Q = β*Q def tensor(u,v): A = np.zeros((2,2,2,2), dtype=np.float64) if v == u: A[1,1,1,1] = np.exp(Q[u,u]) A[0,0,0,0] = 1. else: A[1,1,1,1] = np.exp(Q[u,v]) A[1,0,0,1] = 1. A[0,1,1,0] = 1. A[0,0,0,0] = 1. if v==L-1: A = np.einsum('aijb->aij', A).reshape((A.shape[0],2,2,1)) if v==0: A = np.einsum('aijb->ijb', A).reshape((1,2,2,A.shape[-1])) return A U2comp = twoscomplement(L, **kwdargs) return MPOList([U2comp] + [MPO([tensor(u,v) for v in range(L)], **kwdargs) for u in range(L)] + [U2comp]) # And use this to implement the diffusion operator $\exp(Dk^2).$ # + from mps.qft import qft, iqft, qft_flip import matplotlib.pyplot as plt def QFTFokkerPlanck(T=1.0, steps=10, D=0.2, μ=0.1, m=10, σ=1.0, a=None, b=None, filename=None): if b is None: b = 7*σ if a is None: a = -b L = abs(b - a) x = np.linspace(a, b, 2**m) times = np.linspace(0, T, steps) δt = times[1] ψmps = GaussianMPS(m, σ, a=a, b=b, GR=False, simplify=True, normalize=True) Uμ = mpo_expk(-1j * δt * μ, m, L, simplify=True) UD = QUBOMPO(QUBOs2(m), β=-δt*D*(2*np.pi/L)**2, simplify=True) ψ = [ψmps.tovector()] error = [0.] for t in times[1:]: ψmps = qft_flip(qft(ψmps, simplify=True)); ψmps = UD.apply(Uμ.apply(ψmps)) ψmps = qft_flip(iqft(ψmps, simplify=True)) error.append(ψmps.error()) ψ.append(ψmps.tovector()) theψ = np.abs(ψ[-1]) theψ /= np.sum(theψ) xm = np.sum(x * theψ) x2m = np.sum(x*x*theψ) σ2m = x2m - xm**2 print(f't={t:4}, <x>={xm:5}, <σ2>={σ2m:5}, err={error[-1]}') plt.plot(np.abs(ψ[-1])) ψ = np.array(ψ) if filename is not None: with open(filename,'wb') as f: pickle.dump((ψ, x, times, error, D, μ, b), f) return ψ, x, times, error # - if not os.path.exists('data/fokker-planck-2d-b.pkl'): QFTFokkerPlanck(m=10, σ=1.0, T=30., steps=100, μ=0.5, D=0.1, b=10, filename='data/fokker-planck-2d-b.pkl'); if not os.path.exists('data/fokker-planck-2d-c.pkl'): QFTFokkerPlanck(m=14, σ=1.0, T=30., steps=100, μ=0.5, D=0.1, b=10, filename='data/fokker-planck-2d-c.pkl');
04 MPS Fourier methods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Exceptions try: file = open('nonExistDir/test.txt', 'rb') except IOError as e: print('An IOError occurred. {}'.format(e.args[-1])) # + # Handling multiple exceptions: try: file = open('nonExistDir/test.txt', 'rb') except (IOError, EOFError) as e: print("An error occurred. {}".format(e.args[-1])) # + def openWithException(): try: file = open('nonExistDir/test.txt', 'rb') except EOFError as e: print("An EOF error occurred.") raise e except IOError as e: print("An error occurred.") raise e return file try: file = openWithException() except Exception: print("Caught an exception") # + # finally # finally clause is executed whether or not an exception occurred. try: file = open('nonExistDir/test.txt', 'rb') except IOError as e: print('An IOError occurred. {}'.format(e.args[-1])) finally: print("This would be printed whether or not an exception occurred!") # + # try/else clause # The else clause would only run if no exception occurs and it would run before the finally clause. try: print('I am sure no exception is going to occur!') except Exception: print('exception') else: # any code that should only run if no exception occurs in the try, # but for which exceptions should NOT be caught print('This would only run if no exception occurs. And an error here ' 'would NOT be caught.') finally: print('This would be printed in every case.') # -
py_Exceptions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from datetime import datetime # + df_lists = pd.DataFrame() start_year= 2017 end_year = 2020 #scrapes data from hockeyreference.com for year in range (start_year,end_year+1): k=1 # 2005 was the lockout so there is no data to be scraped if year == 2005: print("2005 was the lockout") else: url = r'https://www.hockey-reference.com/leagues/NHL_' + str(year) + r'_games.html' df_temp_reg = pd.DataFrame(pd.read_html(url)[0]) df_temp_reg['season'] = year # use commented out code if playoff data is desired try: df_temp_post = pd.DataFrame(pd.read_html(url)[1]) df_temp_post['season'] = year except IndexError as e: k = 0 print('no playoffs available yet') print (str(year) + " scraped") df_lists = df_lists.append(df_temp_reg) if k == 1: df_lists.append(df_temp_post) # - # rename columns to better names df_lists.rename(columns={'G':'VisitingGoals', 'G.1':'HomeGoals', 'Unnamed: 5':'OTSO', }, inplace = True) # drop not needed information df_lists.drop(['Att.','LOG','Notes'], axis = 1, inplace=True) # change date column to date type df_lists.loc[:,'Date'] = pd.to_datetime(df_lists['Date']) # + # replace names of teams that have changed location/name replace_dict= {'Home':{'<NAME>': 'Winnipeg Jets', 'Mighty Ducks of Anaheim': 'Anaheim Ducks', 'Phoenix Coyotes': 'Arizona Coyotes'}, 'Visitor':{'<NAME>': 'Winnipeg Jets', 'Mighty Ducks of Anaheim': 'Anaheim Ducks', 'Phoenix Coyotes': 'Arizona Coyotes'}} df_lists.replace(replace_dict,inplace=True) # - # change empty values in OTSO to display 'reg' ind = df_lists['OTSO'].isna() df_lists.loc[ind,'OTSO'] = 'REG' # how come this doesnt work??? teams = df_lists['Home'].unique().sort() # list of all the teams sorted teams = df_lists['Home'].unique() teams.sort() # create initial elos dictionary of 1500 rating for all the teams team_elos = {} for team in teams: team_elos[team] = 1500 # + # functions to update the elos and return likelyhood of winning k_factor = 10 def update_elo(w_elo, l_elo): exp_win = expected_result(w_elo, l_elo) change = k_factor * (1-exp_win) w_elo = w_elo + change l_elo = l_elo - change return w_elo, l_elo def expected_result(elo_a, elo_b): expect = 1.0/(1+10**((elo_b - elo_a)/400)) return expect # - # reset the index of the scraped data games = df_lists.reset_index() # drop the index column games.drop('index', axis=1, inplace=True) # rename the season column to capitalize it games.rename(columns={'season':'Season'}, inplace = True) # for all the team elos store it in a class which inherits from dictionary class TeamElos(dict): def __init__(self): super().__init__(self) for team in teams: self[team] = 1500 def update(self, game_tuple): if game_tuple.VisitingGoals>game_tuple.HomeGoals: winning_team = game_tuple.Visitor losing_team = game_tuple.Home else: winning_team = game_tuple.Home losing_team = game_tuple.Visitor self[winning_team], self[losing_team] = update_elo(self[winning_team],self[losing_team]) # create object elos = TeamElos() # itereate through the game data and update the teamelos object each time for row in games.itertuples(): if row.Date.date()< pd.Timestamp.today().date(): elos.update(row) # change the elos object to a dataframe and then sort the values elos=pd.DataFrame(elos, index = ['Elo Rating']).T.sort_values(by='Elo Rating', ascending = False) # + # select the games that have not happened yet ind = games['Date']> pd.Timestamp('today') games_future = games.loc[ind,:].reset_index() # drop all the games that happened in the past games_future.drop('index', axis=1, inplace=True) # - # select first 10 games of the future games games_prediction = games_future.loc[0:10,['Date', 'Visitor', 'Home']] # + # merge the upcoming games with the elo ratings of the teams games_prediction = games_prediction.merge(elos,left_on= 'Visitor', right_index = True) # change the name of the column games_prediction.rename(columns={'Elo Rating':'Visitor Elo Rating' }, inplace = True) # do the same thing for home teams games_prediction = games_prediction.merge(elos,left_on= 'Home', right_index = True) games_prediction.rename(columns={'Elo Rating':'Home Elo Rating' }, inplace = True) # - # use the expected result function to caculate the chance of winning games_prediction['Home Win%']= expected_result(games_prediction['Home Elo Rating'],games_prediction['Visitor Elo Rating']) games_prediction['Visitor Win%'] = 1-games_prediction['Home Win%'] # drop the elo ratings games_prediction.drop(['Visitor Elo Rating','Home Elo Rating'], axis = 1, inplace=True) # change order of the columns games_prediction = games_prediction[['Date', 'Visitor', 'Visitor Win%','Home','Home Win%']] dict(games_prediction) # + from peewee import * db = SqliteDatabase('elos.db') class Prediction(Model): date = DateField() visitor = CharField(max_length=50) visitor_win = FloatField(default=0) home = CharField(max_length=50) home_win = FloatField(default=0) class Meta: database = db class Elos(Model): team = CharField(max_length=50) elo = FloatField(default=1500) class Meta: database = db def add_information_prediction(row): Prediction.create(date=row.Date, visitor=row.Visitor, visitor_win=row[3], home=row.Home, home_win=row[5] ) def add_information_elos(tup): Elos.create(team=tup[0], elos=tup[1]) if __name__ == '__main__': db.connect() db.create_tables([Elos, Prediction], safe=True) # - for row in games_prediction.itertuples(): add_information_prediction(row) for row in zip(elos.index, map(float,elos.values)): add_information_elos(row) elos.index for row in games_prediction.itertuples(): print(row[3]) games_prediction # + class TeamElos(dict): def __init__(self,teams): super().__init__(self) for team in teams: self[team] = 1500 self.history = HistoryList(teams) def update(self, game_tuple): if game_tuple.VisitingGoals > game_tuple.HomeGoals: winning_team = game_tuple.Visitor losing_team = game_tuple.Home else: winning_team = game_tuple.Home losing_team = game_tuple.Visitor self[winning_team], self[losing_team] = update_elo( self[winning_team], self[losing_team]) self.history.update( winning_team = winning_team, losing_team = losing_team, win_elo = self[winning_team], lose_elo = self[losing_team], date = game_tuple.Date ) class HistoryList(dict): def __init__(self, teams): super().__init__(self) for team in teams: self[team] = pd.DataFrame(columns = ['Date', 'Elo Rating']) def update(self, winning_team, losing_team, win_elo, lose_elo, date): self[winning_team] = self[winning_team].append({'Date': date,'Elo Rating': win_elo}, ignore_index = True) self[losing_team]= self[losing_team].append({'Date': date,'Elo Rating': lose_elo}, ignore_index = True) elos = TeamElos(teams) for row in games.itertuples(): if row.Date.date() < pd.Timestamp.today().date(): elos.update(row) print(elos.history['Toronto Maple Leafs']) # - import numpy as np import matplotlib.pyplot as plt # + def plot_history(*args): fig = plt.figure() ax = fig.add_subplot(111) ax.set_ylim([1350,1700]) plt.xticks(rotation=45) for team in args: cax = plt.plot(elos.history[team]['Date'], elos.history[team]['Elo Rating'], label = team) plt.title(f'{args} Elo History') plt.legend() plot_history('Toronto Maple Leafs','Washington Capitals', 'Boston Bruins') # - elos.history['Toronto Maple Leafs']
.ipynb_checkpoints/NHL Elo-checkpoint.ipynb
# + # Gaussian discriminant analysis in 2d # Author: <NAME>, heavily modified by <NAME> # Based on matlab code by Kevin Murphy # https://github.com/probml/pmtk3/blob/master/demos/discrimAnalysisDboundariesDemo.m import numpy as np import matplotlib.pyplot as plt import os figdir = "figures" def savefig(fname): plt.savefig(os.path.join(figdir, fname)) try: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA except ModuleNotFoundError: # %pip install scikit-learn from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA from sklearn.preprocessing import OneHotEncoder def mvn2d(x, y, u, sigma): xx, yy = np.meshgrid(x, y) xy = np.c_[xx.ravel(), yy.ravel()] sigma_inv = np.linalg.inv(sigma) z = np.dot((xy - u), sigma_inv) z = np.sum(z * (xy - u), axis=1) z = np.exp(-0.5 * z) z = z / (2 * np.pi * np.linalg.det(sigma) ** 0.5) return z.reshape(xx.shape) # Each model specifies the means and covariances. # If the covariances are equal across classes, dboundarioes # will be linear even if we use QDA def is_pos_def(x): return np.all(np.linalg.eigvals(x) > 0) model1 = ([[1.5, 1.5], [-1.5, -1.5]], [np.eye(2)] * 2) model2 = ([[1.5, 1.5], [-1.5, -1.5]], [[[1.5, 0], [0, 1]], np.eye(2) * 0.7]) model3 = ([[0, 0], [0, 5], [5, 5]], [np.eye(2)] * 3) Sigma1 = np.array([[4, 1], [1, 2]]) Sigma2 = np.array([[2, 0], [0, 1]]) Sigma3 = np.eye(2) model4 = ([[0, 0], [0, 4], [4, 4]], [Sigma1, Sigma2, Sigma3]) models = [model1, model2, model3, model4] models = [model4] ngrid = 200 n_samples = 30 # number of each class samples model_names = ("LDA", "QDA") np.random.seed(0) def make_data(u, sigma): # generate random points x = [] # store sample points labels = [] # store class labels nclasses = len(u) # means for i in range(nclasses): x.append(np.random.multivariate_normal(u[i], sigma[i], n_samples)) labels.append([i] * n_samples) return x, labels def make_grid(x): points = np.vstack(x) x_min, y_min = np.min(points, axis=0) x_max, y_max = np.max(points, axis=0) x_range = np.linspace(x_min - 1, x_max + 1, ngrid) y_range = np.linspace(y_min - 1, y_max + 1, ngrid) xx, yy = np.meshgrid(x_range, y_range) return xx, yy, x_range, y_range def plot_dboundaries(xx, yy, z, z_p): plt.pcolormesh(xx, yy, z, alpha=0.1) plt.jet() nclasses = z_p.shape[1] for j in range(nclasses): plt.contour(xx, yy, z_p[:, j].reshape(ngrid, ngrid), [0.5], lw=3, colors="k") def plot_points(x): c = "bgr" m = "xos" for i, point in enumerate(x): plt.plot(point[:, 0], point[:, 1], c[i] + m[i]) def plot_contours(xx, yy, x_range, y_range, u, sigma): nclasses = len(u) c = "bgr" m = "xos" for i in range(nclasses): prob = mvn2d(x_range, y_range, u[i], sigma[i]) cs = plt.contour(xx, yy, prob, colors=c[i]) def make_one_hot(yhat): yy = yhat.reshape(-1, 1) # make 2d enc = OneHotEncoder(sparse=False) Y = enc.fit_transform(yy) return Y for u, sigma in models: x, labels = make_data(u, sigma) xx, yy, x_range, y_range = make_grid(x) X = np.vstack(x) Y = np.hstack(labels) plt.figure() plot_points(x) plt.axis("square") plt.tight_layout() savefig("gda_2d_data.pdf") plt.show() plt.figure() plot_points(x) plot_contours(xx, yy, x_range, y_range, u, sigma) plt.axis("square") plt.tight_layout() savefig("gda_2d_contours.pdf") plt.show() for k, clf in enumerate((LDA(), QDA())): clf.fit(X, Y) z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) z = z.reshape(ngrid, ngrid) z_p = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()]) yhat = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Yhat = make_one_hot(yhat) plt.figure() # plot_dboundaries(xx, yy, z, z_p) plot_dboundaries(xx, yy, z, Yhat) plot_points(x) plot_contours(xx, yy, x_range, y_range, u, sigma) plt.title(model_names[k]) plt.axis("square") plt.tight_layout() savefig("gda_2d_{}.pdf".format(model_names[k])) plt.show()
notebooks/book1/09/discrim_analysis_dboundaries_plot2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Note**: There are multiple ways to solve these problems in SQL. Your solution may be quite different from mine and still be correct. # **1**. Connect to the SQLite3 database at `data/faculty.db` in the `notebooks` folder using the `sqlite` package or `ipython-sql` magic functions. Inspect the `sql` creation statement for each tables so you know their structure. # %load_ext sql # %sql sqlite:///../notebooks/data/faculty.db # + language="sql" # # SELECT sql FROM sqlite_master WHERE type='table'; # - # 2. Find the youngest and oldest faculty member(s) of each gender. # + language="sql" # # SELECT min(age), max(age) FROM person # + language="sql" # # SELECT first, last, age, gender # FROM person # INNER JOIN gender # ON person.gender_id = gender.gender_id # WHERE age IN (SELECT min(age) FROM person) AND gender = 'Male' # UNION # SELECT first, last, age, gender # FROM person # INNER JOIN gender # ON person.gender_id = gender.gender_id # WHERE age IN (SELECT min(age) FROM person) AND gender = 'Female' # UNION # SELECT first, last, age, gender # FROM person # INNER JOIN gender # ON person.gender_id = gender.gender_id # WHERE age IN (SELECT max(age) FROM person) AND gender = 'Male' # UNION # SELECT first, last, age, gender # FROM person # INNER JOIN gender # ON person.gender_id = gender.gender_id # WHERE age IN (SELECT max(age) FROM person) AND gender = 'Female' # LIMIT 10 # - # 3. Find the median age of the faculty members who know Python. # # As SQLite3 does not provide a median function, you can create a User Defined Function (UDF) to do this. See [documentation](https://docs.python.org/2/library/sqlite3.html#sqlite3.Connection.create_function). import statistics class Median: def __init__(self): self.acc = [] def step(self, value): self.acc.append(value) def finalize(self): return statistics.median(self.acc) import sqlite3 con = sqlite3.connect('../notebooks/data/faculty.db') con.create_aggregate("Median", 1, Median) cr = con.cursor() cr.execute('SELECT median(age) FROM person') cr.fetchall() # 4. Arrange countries by the average age of faculty in descending order. Countries are only included in the table if there are at least 3 faculty members from that country. # + language="sql" # # SELECT country, count(country), avg(age) # FROM person # INNER JOIN country # ON person.country_id = country.country_id # GROUP BY country # HAVING count(*) > 3 # ORDER BY age DESC # LIMIT 3 # - # 5. Which country has the highest average body mass index (BMII) among the faculty? Recall that BMI is weight (kg) / (height (m))^2. # + language="sql" # # SELECT country, avg(weight / (height*height)) as avg_bmi # FROM person # INNER JOIN country # ON person.country_id = country.country_id # GROUP BY country # ORDER BY avg_bmi DESC # LIMIT 3 # - # 6. Do obese faculty (BMI > 30) know more languages on average than non-obese faculty? # + language="sql" # # SELECT is_obese, avg(language) # FROM ( # SELECT # weight / (height*height) > 30 AS is_obese, # count(language_name) AS language # FROM person # INNER JOIN person_language # ON person.person_id = person_language.person_id # INNER JOIN language # ON person_language.language_id = language.language_id # GROUP BY person.person_id # ) # GROUP BY is_obese # -
quiz/Quiz0_Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Estimate car price - Apply saved model # #### <NAME> # This notebook is part of a Machine Learning project that is described and available to download on # <BR>https://blogs.sap.com/2019/11/05/hands-on-tutoria…hana-with-python/ # <BR><BR>The purpose of this notebook is to apply a trained Machine Learning model to predict a car's price. # ### Steps in this notebook # - Connect to SAP HANA # - Load the trained model from SAP HANA # - Create a SAP HANA DataFrame which points to the data that is to be used for a prediction # - Apply the model that has been loaded on the data and predict the prices # # ### Documentation # - SAP HANA Python Client API for Machine Learning Algorithms: # https://help.sap.com/doc/0172e3957b5946da85d3fde85ee8f33d/latest/en-US/html/hana_ml.html # - SAP HANA Predictive Analysis Library (PAL): # https://help.sap.com/viewer/2cfbc5cf2bc14f028cfbe2a2bba60a50/latest/en-US/f652a8186a144e929a1ade7a3cb7abe8.html # - Dataset: https://www.kaggle.com/bozungu/ebay-used-car-sales-data # ### Create a SAP HANA DataFrame, which points to the training data # Instantiate a connecton object to SAP HANA. # - For simplicity, to help you get started, these values are hardcoded here. # - We recommend keeping these credentials in the Secure User Store of the SAP HANA Client. Retrieving the credentials from the Secure User Store prevents having to specify these credentials in clear text. See the blog on the SAP Commmunity to which these notebooks belong, for steps on how to use that Secure User Store. import hana_ml.dataframe as dataframe conn = dataframe.ConnectionContext(key = '<KEY>') # ### Load model # Load the trained model from SAP HANA. import hana_ml as hana_ml from hana_ml.algorithms.pal import trees df_model_saved = hana_ml.dataframe.DataFrame(connection_context = conn, select_statement = 'select * from ML.USEDCARPRICES_MODEL_REGTREE') tree_reg_saved = trees.DecisionTreeRegressor(conn_context = conn, algorithm = 'cart') tree_reg_saved.model_ = df_model_saved.select('ROW_INDEX', 'MODEL_CONTENT') # ### Predict price of "new" cars # Create a SAP HANA DataFrame, which points to the table that contains the cars for which the prices are to be predicted. No data is extracted. df_pushdown_new = conn.table(table = 'USEDCARPRICES_TOPREDICT', schema = 'ML') # Peak at the cars that are to be predicted. df_pushdown_new.head(3).collect() # Apply the trained model on the cars to estimate the price. features = ['VEHICLETYPE', 'YEAR', 'MODEL', 'HP', 'FUELTYPE', 'KILOMETER', 'GEARBOX'] df_pushdown_predict = tree_reg_saved.predict(data = df_pushdown_new, features = features, key = 'CAR_ID').select('CAR_ID', 'SCORE') df_predict = df_pushdown_predict.collect() df_predict # Compare the two prices in a bar chart. # %matplotlib inline import pandas as pd df_predict['SCORE'] = pd.to_numeric(df_predict['SCORE']) df_predict['CAR_ID'] = df_predict['CAR_ID'].astype(str) df_predict.plot.bar(x = 'CAR_ID', y = 'SCORE', title = 'Estimated Price'); # Remember that both cars are identical apart from their mileage. The car that has driven an additional 100.000 kilometers is worth over 4.000 Euros less. We are satisfied with the preditions and persist them in a SAP HANA table. df_pushdown_result = df_pushdown_predict.save(where = ('ML', 'USEDCARPRICES_PREDICTED'), table_type = 'COLUMN', force = True) # ### Close connection conn.close() # ### Summary # By going through the notebooks of this project you now have an understanding of how a Data Scientist can leverage SAP HANA directly out of Python. Without having to extract data from SAP HANA, the data can be explored and imputed. Machine Learning models can be trained, tuned and analysed. Predictions can be carried out, visualised and persisted as SAP HANA table. # <BR><BR>Consider using SAP Data Intelligence to deploy the Python code into an ongoing productive process, which can retrain the model and provide predictions in real-time. As the calculations were pushed down to SAP HANA, the code in the Notebooks is also a documentation of the different Data Science steps. As all steps are transparent, it become easier for IT to deploy the code into an ongoing process under corporate governance.
Python-API/usecase-examples/estimate-car-price/30 Apply saved model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # What if we just restart considering a nonblended import sys sys.path.insert(0,"/home/nico/Documents/TEAR/Codes_TEAR/PythonCodes/LibFolder") from Lib_GeneralFunctions import * from Lib_GeneralSignalProcNAnalysis import * from Lib_SigmoidProcessing import * import pandas as pd from matplotlib.gridspec import GridSpec # + # Save into a class the class SSCreference: def __init__(self, filename, coordinates, RefSource="SEM2DPACK"): line = pd.read_csv(filename.format("slip"), header=None) self.Time = line[0] self.Slip = line[1] line = pd.read_csv(filename.format("sr"), header=None) self.SlipRate = line[1] self.Coord = coordinates #Only used for labels and printing self.RefSource = RefSource #end __init__ # Default object printing information def __repr__(self): return "The TPV3reference object was generated from: {} and the receiver is located at {}".format(self.RefSource, self.Coord) #end __repr__ def __str__(self): return "The TPV3reference object was generated from: {} and the receiver is located at {}".format(self.RefSource, self.Coord) #end __str__ def PlotReference(self, ax, SlipSlipRate, filtering=True, **kwargs): if SlipSlipRate=="Slip": if(filtering): ax.plot(self.Time, Butterworth(self.Slip, **kwargs), label = "", c = "k", ls = "--", zorder=1) else: ax.plot(self.Time, self.Slip, label = "", c = "k", ls = "--", zorder=1) elif SlipSlipRate=="SlipRate": if(filtering): ax.plot(self.Time, Butterworth(self.SlipRate, **kwargs), label = "", c = "k", ls = "--", zorder=1) else: ax.plot(self.Time, self.SlipRate, label = "", c = "k", ls = "--", zorder=1) return ax def GenericFigAxis(): fig = plt.figure(figsize=[15,5]) gs = GridSpec(1, 2) ax1 = fig.add_subplot(gs[0, 0]) ax2 = fig.add_subplot(gs[0, 1]) return fig, [ax1, ax2] def format_axes(fig): """ Format a figure and 4 equidistant reveivers' lines from a single file. Receiver distance defines the color. """ for i, ax in enumerate(fig.axes): ax.set_xlim(-0.5,4) ax.set_ylim(-0.5,8) ax.set_xlabel("time(s)") Lines = fig.axes[-1].get_lines() legend2 = fig.axes[-1].legend(Lines, ['2km','4km', '6km', '8km'], loc=1) fig.axes[-1].add_artist(legend2) fig.axes[-1].set_ylabel("Slip Rate (m/s)") fig.axes[0].set_ylabel("Slip (m)") def Multi_format_axes(fig,cmap, LabelsPerColor): """ Format a figure that contains different files with information from several receivers for simulations under sets of blending parameters. """ ColorDict = dict(enumerate(LabelsPerColor)) for i, ax in enumerate(fig.axes): ax.set_xlim(-0.5,4) ax.set_ylim(-0.5,8) ax.set_xlabel("time(s)") Lines = [] for idx,colcol in enumerate(cmap.colors): Lines.append(mlines.Line2D([], [], color = colcol, linewidth = 3, label = ColorDict.get(idx))) legend2 = fig.axes[-1].legend(Lines, LabelsPerColor, loc = 2) fig.axes[-1].add_artist(legend2) fig.axes[-1].set_ylabel("Slip Rate (m/s)") fig.axes[0].set_ylabel("Slip (m)") # - path = "/home/nico/Documents/TEAR/Codes_TEAR/ProfilePicking/Output/" # Reference saved into a list of objects RefList = [SSCreference(path + "Reference/sem2dpack/sem2d-{}-1.txt", "2km"), SSCreference(path + "Reference/sem2dpack/sem2d-{}-2.txt", "4km"), SSCreference(path + "Reference/sem2dpack/sem2d-{}-3.txt", "6km"), SSCreference(path + "Reference/sem2dpack/sem2d-{}-4.txt", "8km"), ] # + from matplotlib.colors import ListedColormap import matplotlib.lines as mlines from palettable.cartocolors.qualitative import Safe_6 cmap = ListedColormap(Safe_6.mpl_colors[:]) # - FolderTiltedPath = "/home/nico/Documents/TEAR/Codes_TEAR/PythonCodes/[SSC]Sigmoid/ProcessedData/20210317-Tilting/" TiltedFile = LoadPickleFile(Filename = "P1-Tilt20.0-TPList_t2045_d125.1.pickle", FolderPath = FolderTiltedPath) TiltedFile1 = LoadPickleFile(Filename = "P1.1-Tilt20.0-TPList_t2045_d125.1.pickle", FolderPath = FolderTiltedPath) # + fig, axis = GenericFigAxis() # Tilted case plotting iidx = 0 for Test1 in TiltedFile[:-1]: axis[0].plot(Test1.Time, Test1.DispX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) axis[1].plot(Test1.Time, Test1.VelX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) iidx = 1 for Test1 in TiltedFile1[:-1]: axis[0].plot(Test1.Time, Test1.DispX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) axis[1].plot(Test1.Time, Test1.VelX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) LabelsPerColor= ["dy:100m, $\delta_f$:1.251"] Multi_format_axes(fig, cmap, LabelsPerColor) fig.suptitle("Tilted 20deg - dx:125m - P1 - $\delta=\delta_f \cdot dy\sim$125m") [item.PlotReference(axis[0], "Slip", filtering=False) for item in RefList] [item.PlotReference(axis[1], "SlipRate", filtering=False) for item in RefList] # - FolderTiltedPath = "/home/nico/Documents/TEAR/Codes_TEAR/PythonCodes/[SSC]Sigmoid/ProcessedData/20210317-TiltingNew/" TiltedFile = LoadPickleFile(Filename = "P1.1-Tilt20.0-TPList_t2045_d125.1.pickle", FolderPath = FolderTiltedPath) # + fig, axis = GenericFigAxis() # Tilted case plotting iidx = 0 for Test1 in TiltedFile[:-1]: axis[0].plot(Test1.Time, Test1.DispX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) axis[1].plot(Test1.Time, Test1.VelX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) LabelsPerColor= ["dy:100m, $\delta_f$:1.251"] Multi_format_axes(fig, cmap, LabelsPerColor) fig.suptitle("Tilted 20deg - dx:125m - P1 - $\delta=\delta_f \cdot dy\sim$125m") [item.PlotReference(axis[0], "Slip", filtering=False) for item in RefList] [item.PlotReference(axis[1], "SlipRate", filtering=False) for item in RefList] # - FolderTiltedPath = "/home/nico/Documents/TEAR/Codes_TEAR/PythonCodes/[SSC]Sigmoid/ProcessedData/20210317-TiltingNew/" TiltedFile = LoadPickleFile(Filename = "P5-Tilt20.0-TPList_t566_d100.0.pickle", FolderPath = FolderTiltedPath) TiltedFile1 = LoadPickleFile(Filename = "P6-Tilt20.0-TPList_t707_d100.0.pickle", FolderPath = FolderTiltedPath) TiltedFile2 = LoadPickleFile(Filename = "P7-Tilt20.0-TPList_t1131_d100.0.pickle", FolderPath = FolderTiltedPath) # + fig, axis = GenericFigAxis() # Tilted case plotting iidx = 0 for Test1 in TiltedFile[:-1]: axis[0].plot(Test1.Time, Test1.DispX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) axis[1].plot(Test1.Time, Test1.VelX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) iidx = 1 for Test1 in TiltedFile1[:-1]: axis[0].plot(Test1.Time, Test1.DispX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) axis[1].plot(Test1.Time, Test1.VelX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) iidx = 2 for Test1 in TiltedFile2[:-1]: axis[0].plot(Test1.Time, Test1.DispX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) axis[1].plot(Test1.Time, Test1.VelX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) LabelsPerColor= ["dx,dy:100m","dx,dy:80m","dx,dy:50m"] Multi_format_axes(fig, cmap, LabelsPerColor) fig.suptitle("Tilted 20deg - dx:125m - P1 - $\delta=100m$") [item.PlotReference(axis[0], "Slip", filtering=False) for item in RefList] [item.PlotReference(axis[1], "SlipRate", filtering=False) for item in RefList] # - FolderTiltedPath = "/home/nico/Documents/TEAR/Codes_TEAR/PythonCodes/[SSC]Sigmoid/ProcessedData/20210317-TiltingNew/" TiltedFile = LoadPickleFile(Filename = "PP1-Tilt20.0-TPList_t2045_d100.1.pickle", FolderPath = FolderTiltedPath) # + fig, axis = GenericFigAxis() # Tilted case plotting iidx = 0 for Test1 in TiltedFile[:-1]: axis[0].plot(Test1.Time, Test1.DispX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) axis[1].plot(Test1.Time, Test1.VelX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) LabelsPerColor= ["dx,dy:100m","dx,dy:80m","dx,dy:50m"] Multi_format_axes(fig, cmap, LabelsPerColor) fig.suptitle("Tilted 20deg - dx:100m - P3 - $\delta=100.1m$") [item.PlotReference(axis[0], "Slip", filtering=False) for item in RefList] [item.PlotReference(axis[1], "SlipRate", filtering=False) for item in RefList] # - FolderTiltedPath = "/home/nico/Documents/TEAR/Codes_TEAR/PythonCodes/[SSC]Sigmoid/ProcessedData/20210317-TiltingNew/" TiltedFile = LoadPickleFile(Filename = "PP2-Tilt20.0-TPList_t2557_d80.07999999999998.pickle", FolderPath = FolderTiltedPath) # + fig, axis = GenericFigAxis() # Tilted case plotting iidx = 0 for Test1 in TiltedFile[:-1]: axis[0].plot(Test1.Time, Test1.DispX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) axis[1].plot(Test1.Time, Test1.VelX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) LabelsPerColor= ["dx,dy:80m"] Multi_format_axes(fig, cmap, LabelsPerColor) fig.suptitle("Tilted 20deg - P3 - $\delta=80*1.001m$") [item.PlotReference(axis[0], "Slip", filtering=False) for item in RefList] [item.PlotReference(axis[1], "SlipRate", filtering=False) for item in RefList] # - FolderTiltedPath = "/home/nico/Documents/TEAR/Codes_TEAR/PythonCodes/[SSC]Sigmoid/ProcessedData/20210318-TiltingNew/" TiltedFile = LoadPickleFile(Filename = "P5-Tilt20.0-TPList_t3274_d50.05.pickle", FolderPath = FolderTiltedPath) TiltedFile1 = LoadPickleFile(Filename = "P6-Tilt20.0-TPList_t4092_d50.05.pickle", FolderPath = FolderTiltedPath) # + fig, axis = GenericFigAxis() # Tilted case plotting iidx = 0 for Test1 in TiltedFile[:-1]: axis[0].plot(Test1.Time, Test1.DispX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) axis[1].plot(Test1.Time, Test1.VelX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) iidx = 1 for Test1 in TiltedFile1[:-1]: axis[0].plot(Test1.Time, Test1.DispX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) axis[1].plot(Test1.Time, Test1.VelX, color= cmap.colors[iidx],linewidth=2,zorder=iidx) LabelsPerColor= ["dx,dy:100m","dx,dy:80m"] Multi_format_axes(fig, cmap, LabelsPerColor) fig.suptitle("Tilted 20deg - P4 - $\delta=dy*1.001m$") [item.PlotReference(axis[0], "Slip", filtering=False) for item in RefList] [item.PlotReference(axis[1], "SlipRate", filtering=False) for item in RefList] # -
PythonCodes/[SSC]Sigmoid/20210317-Tilting_Nonblended.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: rtc_analysis # language: python # name: rtc_analysis # --- # <img src="NotebookAddons/DEVELOP_logo1.jpg" width="250" align="right"/> # <font face="Century Gothic"> # <font size="6"> <b> # Inundation Mapping from Time Series of Dual-Pol SAR Data <br></font><br> # <font size="4"> <b><br> # NASA Jet Propulsion Laboratory <br><br> DEVELOP Program Fall 2018 & Spring 2019 - Alaska Wetland Mapping</b></font><br> # This Jupyter Notebook implements the SAR Wetland Extent Exploration Tool (SWEET), a thresholding-based approach to perform semi-automated mapping of wetland inundation. Required inputs are intensity images of VV+VH polarization bands from Sentinel-1 C-SAR imagery. Loaded images should already be co-registered, subset, radiometrically corrected, and in a projected coordinate system. The notebook outputs classifications of typical, minimum, and maximum inundation states along with multi-looked classifications for individual dates. The inundation classes are: open water, inundated, and not inundated. <br><br> # Additional information on the software requirements, Python dependencies, usage instructions, and project directory structure can be found in the README file. # # **Purpose:** <br> # Process Sentinel-1 VH/VV SAR data and produce maps showing wetland inundation extent in Alaska. Alaska is used as an example for this exercise. Application to other areas is possible. Note, however, that due to the limited penetration of Sentinel-1 C-band SAR data, application in densely vegetated areas may lead to an underestimation of inundation extent.<br> # <img style="padding: 7px" src="NotebookAddons/website_Image2.png" width="400" align="right"/> # # **Process:** # - Read in data (assumes data is already subset and coregistered) # - Select dates to include in analysis based on VV, VH, and VV/VH plots for each date and average backscatter timeseries plots (can exclude images that appear to have calibration errors) # - Calculate multi-temporal averages for VV, VH, and VV/VH (Ia) # - Classify Ia with thresholds to find typical inundation state # - Perform multi-looking on individual dates and classify them to obtain classifications for each # date # - Determine maximum and minimum inundation state from classified individual dates # - Export classified products as GeoTIFFs for validation # <br><br> # # **To do:** # - Implement calibration correction using selected calibration sites # - (Optional) Visual calibration checks:<br> # <t><t> a) Individual scene/multi-temporal average ratio plots <br> # <t><t> b) Average brightness in circle of variable radius around selected pixel over time # </font> # # <font face="Century Gothic"> # <b>Background:</b><br> # Synthetic Aperture Radar (SAR) polarimetric backscatter is a powerful datasource, with its ability to penetrate cloud cover and acquire imagery in low-light conditions. Sentinel-1's C-band wavelength is particularly useful for wetland mapping, as the longer C-band wavelength can penetrate vegetation canopies to detect areas of inundated vegetation.<br> # <br> # Sentinel-1 has dual polarization, where it can transmit a signal in either horizontal (H) or vertical (V) polarizations and receive signals back both vertically and horizontally. This notebook uses both vertically transmitted vertically recieved (VV) and vertically transmitted horizontally received (VH) intensity images and backscatter values.<br> # <br> # With SAR, pulses of microwave energy are emitted and received by the radar instrument's atenna. When the pulses hit smooth surfaces such as open water, the microwave energy scatters and reflects away, resulting in a low return of backscatter to the radar. Thus, flat surfaces like water appear as dark targets in the image. In cases of inundated vegetation, the microwaves scatter off both the flat open water and the vertically emerging vegetation, resulting in a "double-bounce" reflection. In these double-bounce reflections, a large portion of the transmitted waves are reflected back to the radar, resulting in brighter areas in the image.<br> # <br> # VV signals are of interest as they capture the increased brightness from the "double bounce" effect that commonly occurs with areas of inundated vegetation. This effect is even stronger in HH and at L-band wavelengths. Hence, future systems such as NISAR will improve the inundation mapping performance further by providing HH polarization data at L-band.<br> # <br> # To create an algorithm to identiy areas of inundation, we determined brightness threshold characteristics of inundated areas. To identify shifts in inundation, we also looked at the changes over time in image brightness. # </font> # # + pycharm={"name": "#%%\n"} # %%javascript var kernel = Jupyter.notebook.kernel; var command = ["notebookUrl = ", "'", window.location, "'" ].join('') // alert(command) kernel.execute(command) # + from IPython.display import Markdown from IPython.display import display # env = !echo $CONDA_PREFIX if env[0] == '': env[0] = 'Python 3 (base)' if env[0] != '/home/jovyan/.local/envs/rtc_analysis': display(Markdown(f'<text style=color:red><strong>WARNING:</strong></text>')) display(Markdown(f'<text style=color:red>This notebook should be run using the "rtc_analysis" conda environment.</text>')) display(Markdown(f'<text style=color:red>It is currently using the "{env[0].split("/")[-1]}" environment.</text>')) display(Markdown(f'<text style=color:red>Select the "rtc_analysis" from the "Change Kernel" submenu of the "Kernel" menu.</text>')) display(Markdown(f'<text style=color:red>If the "rtc_analysis" environment is not present, use <a href="{notebookUrl.split("/user")[0]}/user/{user[0]}/notebooks/conda_environments/Create_OSL_Conda_Environments.ipynb"> Create_OSL_Conda_Environments.ipynb </a> to create it.</text>')) display(Markdown(f'<text style=color:red>Note that you must restart your server after creating a new environment before it is usable by notebooks.</text>')) # - # Check Python version: import sys pn = sys.version_info[0] # + # %%capture # Import packages: # Packages for analysis: import rasterio #pip install rasterio (version >=1.0.8, requires GDAL >=2.3.1) # %matplotlib inline import matplotlib #pip install matplotlib import matplotlib.pyplot as plt import numpy as np #pip install numpy import copy from datetime import datetime from glob import glob import os, subprocess, sys # Interactive widgets: import ipywidgets as widgets #pip install ipywidgets (included with Jupyter) from ipywidgets import Layout, VBox, Label, Checkbox, GridBox if pn == 2: import cStringIO #needed for the image checkboxes elif pn == 3: import io import base64 # Interactive time slider plots: import plotly #pip install plotly (version >= 3.0) import plotly.graph_objs as go from ipywidgets import interactive, HBox, VBox # Interactive time slider: import pandas as pd #pip install pandas ## Interactive plots w/ pixel selection from mpldatacursor import datacursor # For exporting: from PIL import Image import asf_notebook as asfn asfn.jupytertheme_matplotlib_format() # + ############################################################################### # All functions and classes and defined here in advance of their usage # below. The original idea was to have this cell either hidden or # immutable for user's of this Notebook. # #### Define functions and classes: class DataFile: ''' Data structure for extracting and storing relevant metadata about each data file. Also contains methods for reading and closing data files. For now, assume all dates are in format YYYYMMDD and all file names follow the same naming convention. Parameters: - filepath: Path to the SAR image file being read - convention: Tells the algorithm which naming convention to expect, and consequently, how to extract metadata from the filename Options: "new" - expects filename of the form: S1B_IW_RT10_20170423T040959_G_gpn_VV_subset.tif "old" - expects filename of the form: s1a-iw-rtch-vv-20170612T162820_coreg_subset.tif Attributes: - path: Path to the SAR image - dir: Directory containing the SAR image - name: Filename of SAR image - ext: File extension of the SAR image (e.g .tif, .img) - vtype: Polarization type: vv or vh - date: Date string as extracted from filename (in form of yyyymmdd) - datep: Date parsed into datetime format - datef: Date formatted into a presentable string for labeling - mean: Mean value of pixels in image - min: Minimum value of pixels in image - max: Maximum value of pixels in image Methods: - read_file: Reads in SAR image file - read_data: Reads in 2d array of raster values - extract_metadata: Extracts date and polarization type from filename - calculate_mean: Calculates mean pixel value - calculate_min: Calculates minimum pixel value - calculate_max: Calculates maximum pixel value - close: Close SAR image file to reduce memory usage - plot: Plots the image ''' def __init__(self, filepath, convention="new"): self.path = filepath self.dir = os.path.dirname(os.path.abspath(filepath)) self.name = filepath.split('/')[-1] self.ext = filepath.split('.')[-1] [self.vtype, self.date] = self.extract_metadata(self.name, convention) self.datep = datetime.strptime(self.date, '%Y%m%d') self.datef = self.datep.strftime("%B %d, %Y") self.mean = self.calculate_mean() self.min = self.calculate_min() self.max = self.calculate_max() def read_file(self): '''Read in geospatial raster using rasterio''' self.raw = rasterio.open(self.path) return(self.raw) def read_data(self): '''Read in 2d array of raster values''' raw = self.read_file() data = raw.read(1, out_shape=(1, int(raw.height), int(raw.width))) return(data) def extract_metadata(self, filename, convention): '''Extract metadata (vtype and date) from filename, assumes common naming convention ''' if convention == "new": #used for if "VV" in filename: vtype = "vv" elif "VH" in filename: vtype = "vh" else: vtype = "Null" print("Could not detect vtype") date = filename.split('_')[3].split('T')[0] elif convention == "old": if "-vv-" in filename: vtype = "vv" elif "-vh-" in filename: vtype = "vh" else: vtype = "Null" print("Could not detect vtype") date = filename.split('-')[-1].split('T')[0] return([vtype, date]) def calculate_mean(self): '''Calculate mean pixel value''' mean = np.mean(self.read_data()) self.close() return(mean) def calculate_min(self): '''Calculate minimum pixel value''' minval = np.min(self.read_data()) self.close() return(minval) def calculate_max(self): '''Calculate maximum pixel value''' maxval = np.max(self.read_data()) self.close() return(maxval) def close(self): '''Close SAR image file to reduce memory usage''' self.raw.close() def plot(self): '''Plot image data contained in object''' image_axis = plt.imshow(self.read_data(), cmap = plt.cm.gist_gray) self.close() return(image_axis) ############################################################################### class DateSet: '''Data structure to hold original, multi-looked, and classified VV and VH images and calculated VV/VH for each date. Parameters: - date: Date string in form of yyyymmdd - vv_df: VV DataFile corresponding to date - vh_df: VH DataFile corresponding to date Attributes: - vv_df: VV DataFile corresponding to date - vh_df: VH DataFile corresponding to date - date: Date string in form of yyyymmdd - datep: Date parsed into datetime format - datef: Date formatted into a presentable string for labeling - r_max: Maximum pixel value in VV/VH ratio - r_min: Minimum pixel value in VV/VH ratio - r_mean: Mean value of pixels in VV/VH ratio - class_3x3: Classification product generated from multi-looked VV, VH, and VV/VH values (appended to DateSet object after it is calculated) Methods: - close: Closes VV and VH image files ''' def __init__(self, date, vv_df, vh_df): if vv_df.date == vh_df.date: self.vv_df = vv_df self.vh_df = vh_df self.date = date self.datep = vv_df.datep self.datef = vh_df.datef r = self.vv_df.read_data()/self.vh_df.read_data() self.r_min = np.min(r) self.r_max = np.max(r) self.r_mean = np.mean(r) self.close() self.class_3x3 = None else: print("Could not create DateSet object. VV and VH dates do not match") def close(self): '''Close VV and VH image files by calling close method in VV and VH DataFile objects ''' self.vv_df.close() self.vh_df.close() ############################################################################### def gray_plot(image, vmin=0, vmax=2, return_ax=False): '''Plots an image in grayscale. Parameters: - image: 2D array of raster values - vmin: Minimum value for colormap - vmax: Maximum value for colormap - return_ax: Option to return plot axis ''' ax = plt.imshow(image, cmap = plt.cm.gist_gray) plt.clim(vmin,vmax) if return_ax: return(ax) ############################################################################### def pixel2pixel_plot(image, vmin=0, vmax=.2, dpi=192, cursor=True, full=True): '''Plots a full resolution image of a map where each pixel in the map corresponds to a pixel in the user's monitor. Parameters: - image: 2d array of raster values - vmin: Minimum value for colormap - vmax: Maximum value for colormap - dpi: User monitor DPI - cursor: Option to include an interactive datacursor - full: Option to increase the width of the Jupyter Notebook to the full size of the user's monitor. ''' if full: from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) xsize = np.size(image,1) ysize = np.size(image,0) plt.figure(figsize=(xsize/dpi, ysize/dpi),dpi=dpi) ax = gray_plot(image, vmin, vmax, True) if cursor: dc = datacursor(ax) plt.show() ############################################################################### def big_fig(x=20,y=10): '''Initializes a large figure. Parameters: - x, y: X and Y figure dimensions ''' return(plt.figure(figsize=(x,y))) ############################################################################### def get_files(data_directory, ext="tif", search="", convention="new"): '''Returns a list of the files in the data directory that have the desired extension. Parameters: - data_directory: Directory containing data files - ext: Desired file extension (default is .tif) - search: String that the function searchs for in filenames (default is "") - convention: Naming convention type expected by DataFile ("old" or "new") ''' file_list = [] ext_len = len(ext) for filename in os.listdir(data_directory): if filename[-ext_len::] == ext: if search not in filename: continue filepath = data_directory + filename data_file = DataFile(filepath, convention) file_list.append(data_file) return(file_list) ############################################################################### def find_minmax(date_sets): '''Returns a list of nested lists containing minimum and maximum VV, VH, and VV/VH values found from a list of DateSets to standardize color limits when plotting. Parameters: - date_sets: List of DateSets to compare ''' vv_running_min = 10 vv_running_max = 0 vh_running_min = 10 vh_running_max = 0 r_running_min = 10 r_running_max = 0 for ds in date_sets: # Check VV values if ds.vv_df.min < vv_running_min: vv_running_min = ds.vv_df.min if ds.vv_df.max > vv_running_max: vv_running_max = ds.vv_df.max # Check VH values if ds.vh_df.min < vh_running_min: vh_running_min = ds.vh_df.min if ds.vh_df.max > vh_running_max: vh_running_max = ds.vh_df.max # Check VV/VH values if ds.r_min < r_running_min: r_running_min = ds.r_min if ds.r_max > r_running_max: r_running_max = ds.r_max print("VV value range:") print(vv_running_min, vv_running_max) print("VH value range:") print(vh_running_min, vh_running_max) print("VV/VH value range:") print(r_running_min, r_running_max) return([[vv_running_min, vv_running_max], [vh_running_min, vh_running_max], [r_running_min, r_running_max] ] ) ############################################################################### def find_dates(data_files): '''Returns a list of unique dates in a list of VV and VH DataFiles. Parameters: - data_files: List of DataFiles to compare ''' date_set = set() for df in data_files: date_set.add(df.date) date_list = np.sort(list(date_set)) #convert set to list to access by index return(date_list) ############################################################################### def make_datesets(data_files, date_list): '''Returns a list of DateSet objects from list of DataFile objects and corresponding dates. Parameters: - data_files: List of DataFiles to combine into DateSets - date_list: List of corresponding dates ''' date_dic = {} dateset_list = [] for date in date_list: vtype_dic = {} date_dic[date] = vtype_dic for df in data_files: date = df.date vtype = df.vtype date_dic[date][vtype] = df for date in date_list: vtype_dic = date_dic[date] dateset = DateSet(date, vtype_dic['vv'], vtype_dic['vh']) dateset_list.append(dateset) return(dateset_list) ############################################################################### def vv_vh_r_plot(vv, vh, r, vv_min, vv_max, vh_min, vh_max, r_min=0, r_max=6, axstr="", xsize=9.5, ysize=3 ): ''' Plots a 3 panel figure of VV, VH, and VV/VH for a given date. Parameters: - vv, vh, r: 2D arrays of VV, VH, and R values - vv_min, vv_max: Minimum and maximum VV values for colormap limits - vh_min, vh_max: Minimum and maximum VH values for colormap limits - r_min, r_max: Minimum and maximum VV/VH values for colormap limits - axstr: String to plot along right side of figure (default is "") - xsize, ysize: X and Y dimensions for figure ''' fig = big_fig(xsize, ysize) # VV plot ax = plt.subplot(1, 3, 1) gray_plot(vv, vv_min, vv_max) plt.title("VV") # VH plot ax = plt.subplot(1, 3, 2) gray_plot(vh, vh_min, vh_max) plt.title("VH") # VV/VH plot ax = plt.subplot(1, 3, 3) gray_plot(r, r_min, r_max) plt.title("VV/VH") if axstr: # Set axis string ax.text(1.05, 0.5, axstr, rotation=90, ha='left', va='center', transform=ax.transAxes ) return(fig) ############################################################################### def vv_vh_cov_plot(vv_cov, vh_cov, vv_min, vv_max, vh_min, vh_max, axstr="", xsize=9.5, ysize=3 ): ''' Plots a 3 panel figure of VV, VH, and VV/VH for a given date. Parameters: - vv, vh, r: 2D arrays of VV, VH, and R values - vv_min, vv_max: Minimum and maximum VV values for colormap limits - vh_min, vh_max: Minimum and maximum VH values for colormap limits - r_min, r_max: Minimum and maximum VV/VH values for colormap limits - axstr: String to plot along right side of figure (default is "") - xsize, ysize: X and Y dimensions for figure ''' fig = big_fig(xsize, ysize) # VV cov plot ax = plt.subplot(1, 2, 1) gray_plot(vv_cov, vv_min, vv_max) plt.title("VV Coefficient of Variation") # VH cov plot ax = plt.subplot(1, 2, 2) gray_plot(vh_cov, vh_min, vh_max) plt.title("VH Coefficient of Variation") if axstr: # Set axis string ax.text(1.05, 0.5, axstr, rotation=90, ha='left', va='center', transform=ax.transAxes ) return(fig) ############################################################################### def save_plot3(date_set, vv_min, vv_max, vh_min, vh_max, r_min=0, r_max=6, pn=pn ): '''Returns a 3 panel (VV, VH, and VV/VH) plot saved using cStringIO so it can be used in multi_checkbox_widget_dateimages. Parameters: - date_set: DateSet object - vv_min, vv_max: Minimum and maximum VV values for colormap limits - vh_min, vh_max: Minimum and maximum VH values for colormap limits - r_min, r_max: Minimum and maximum VV/VH values for colormap limits - pn: Python version number (2 or 3) ''' plt.ioff() vv = date_set.vv_df.read_data() vh = date_set.vh_df.read_data() r = vv/vh axstr = date_set.datef fig = vv_vh_r_plot(vv, vh, r, vv_min, vv_max, vh_min, vh_max, r_min, r_max, axstr ) date_set.close() if pn == 2: figdata = cStringIO.StringIO() fig.savefig(figdata, format='png') plt.close(fig) plt.ion() return(figdata) elif pn == 3: figdata = io.BytesIO() fig.savefig(figdata, format='png') plt.close(fig) plt.ion() return(base64.b64encode(figdata.getvalue()).decode()) ############################################################################### def multi_checkbox_widget_dateimages(date_sets, vv_min, vv_max, vh_min, vh_max, r_min=0, r_max=6, pn=pn ): '''Returns a table of 3-paneled (VV, VH, and VV/VH) plots for each date with checkboxes next to each row so user can select dates to exclude based on visual inspection. Parameters: - date_sets: List of DateSet objects - vv_min, vv_max: Minimum and maximum VV values for colormap limits - vh_min, vh_max: Minimum and maximum VH values for colormap limits - r_min, r_max: Minimum and maximum VV/VH values for colormap limits - pn: Python version number (2 or 3) ''' nds = len(date_sets) descriptions = [save_plot3(ds, vv_min, vv_max, vh_min, vh_max, r_min, r_max) for ds in date_sets ] item_layout = Layout(height='auto', min_width='1000px') if pn == 2: items = [Checkbox(layout=item_layout, description='<img src=\"data:/png;base64,%s\"/>' % desc .getvalue() .encode("base64") .strip(), value=False ) for desc in descriptions ] elif pn == 3: items = [Checkbox(layout=item_layout, description='<img src=\"data:/png;base64,%s\"/>' % desc .strip(), value=False ) for desc in descriptions ] box_layout = Layout(border='3px solid black', width='auto', height='', grid_template_columns='1', grid_template_rows='auto' ) grid = GridBox(children=items, layout=box_layout) ws = VBox([Label('Select images to exclude:'), grid]) return(ws) ############################################################################### def get_excluded_dates3(ws, date_sets): '''Returns a list of dates that were selected by the user for exclusion in the image checkbox table. Parameters: - ws: Handle of image checkbox with selections - date_sets: List of DateSets used as input to the image checkbox ''' excluded_idx = [idx for idx, ws in enumerate(ws.children[1].children) if ws.value ] excluded_dates = [date_sets[idx].datef for idx in excluded_idx] return(excluded_dates) ############################################################################### def interactive_backscatter_plot(date_sets): '''Creates an interactive timeseries plot of backscatter values for a site. Modified code from https://plot.ly/python/range-slider/ Parameters: - date_sets: List of DateSets ''' dateps = [] vv_means = [] vh_means = [] for ds in date_sets: dateps.append(ds.datep) vv_means.append(10*np.log10(ds.vv_df.mean)) vh_means.append(10*np.log10(ds.vh_df.mean)) vv_trace = go.Scatter( x = dateps, y = vv_means, name = "vv", text = [str(vvm) for vvm in vv_means], yaxis = "y" ) vh_trace = go.Scatter( x = dateps, y = vh_means, name = "vh", text = [str(vhm) for vhm in vh_means], yaxis = "y" ) data = [vv_trace, vh_trace] # style all the traces for k in range(len(data)): data[k].update( { "hoverinfo": "name+x+text", "line": {"width": 0.5}, "marker": {"size": 8}, "mode": "lines+markers", "showlegend": False } ) layout = { "dragmode": "zoom", "hovermode": "x", "legend": {"traceorder": "reversed"}, "margin": { "t": 100, "b": 100 }, "xaxis": { "autorange": True, "range": ["2017-10-31 18:36:37.3129", "2018-05-10 05:23:22.6871"], "rangeslider": { "autorange": True, "range": ["2017-10-31 18:36:37.3129", "2018-05-10 05:23:22.6871"] }, "type": "date" }, "yaxis": { "anchor": "x", "autorange": True, "domain": [0, 1], "linecolor": "#673ab7", "mirror": True, "range": [-20,20], "showline": True, "side": "right", "tickfont": {"color": "#673ab7"}, "tickmode": "auto", "ticks": "", "title": "Power (dB)", "titlefont": {"color": "#673ab7"}, "type": "linear", "zeroline": False } } plotly.offline.init_notebook_mode(connected=True) fig = go.Figure(data=data, layout=layout) plotly.offline.iplot(fig) ############################################################################### def make_timeslider(date_sets): '''Creates and returns the handle to an interactive time slider that the user can use to select a time range of values to include. Parameters: - date_sets: List of DateSets ''' start_date = date_sets[0].datef end_date = date_sets[-1].datef date_range = pd.date_range(start_date, end_date, freq='D') options = [(date.strftime(' %b %d, %Y '), date) for date in date_range] index = (0, len(options)-1) selection_range_slider = widgets.SelectionRangeSlider( options=options, index=index, description='Dates', orientation='horizontal', layout={'width': '500px'}) return(selection_range_slider) ############################################################################### def get_slider_vals(selection_range_slider): '''Returns the minimum and maximum dates retrieved from the interactive time slider. Parameters: - selection_range_slider: Handle of the interactive time slider ''' [a,b] = list(selection_range_slider.value) slider_min = a.to_pydatetime() slider_max = b.to_pydatetime() return(slider_min, slider_max) ############################################################################### def filter_date_sets(date_sets, excluded_dates, slider_min, slider_max): '''Returns a list of DateSets that are filtered according to the excluded dates from the checkbox table and start/end dates retrieved from the interactive time slider. Parameters: - date_sets: List of DateSets to be filtered - excluded_dates: Dates chosen for exclusion from checkbox table - slider_min, slider_max: Minimum and maximum date values from time slider ''' filtered_date_sets = [] for ds in date_sets: in_time_range = ds.datep >= slider_min and ds.datep <= slider_max if ds.datef not in excluded_dates and in_time_range: filtered_date_sets.append(ds) return(filtered_date_sets) ############################################################################### def plot_all_dates(date_sets, vv_min, vv_max, vh_min, vh_max, r_min=0, r_max=6, xsize=10, ysize=20): '''Plots 3-panel images (VV, VH, and VV/VH) for each date. Parameters: - date_sets: List of DateSets - vv_min, vv_max: Minimum and maximum VV values for colormap limits - vh_min, vh_max: Minimum and maximum VH values for colormap limits - r_min, vr_max: Minimum and maximum VV/VH values for colormap limits - xsize, ysize: X and Y figure dimensions ''' big_fig(xsize,ysize) # initialize a large figure nds = len(date_sets) for ii in range(0, nds): pos = 3*ii + 1 # VV plot ax = plt.subplot(nds, 3, pos) gray_plot(date_sets[ii].vv_df.read_data(), vv_min, vv_max) if ii == 0: plt.title("VV") # VH plot ax = plt.subplot(nds, 3, pos+1) gray_plot(date_sets[ii].vh_df.read_data(), vh_min, vh_max) if ii == 0: plt.title("VH") # VV/VH plot ax = plt.subplot(nds, 3, pos+2) gray_plot( date_sets[ii].vv_df.read_data()/date_sets[ii].vh_df.read_data(), r_min, r_max ) ax.text(1.05, 0.5, date_sets[ii].datef, rotation=90, ha='left', va='center', transform=ax.transAxes ) if ii == 0: plt.title("VV/VH") date_sets[ii].close() ############################################################################### def calculate_temporal_avg(data_file_list, vtype="vh", start_date=0, end_date=9*10**13, verbose=False ): '''Calculates and returns the multi-temporal average of VV or VH data using a running sum. Parameters: - data_file_list: List of DataFiles. - vtype: SAR polarization type (vv or vh) - start_date, end_date: Start and end dates as numeric values - verbose: Option to print a message each time a DataFile is not of the correct vtype, or its date does not fall in the time range between start_date and end_date ''' files_in_range = [] count = 0 for data_file in data_file_list: check = True while check: if data_file.vtype is not vtype: if verbose: print( "Data is not of the correct polarization(VV or VH)" ) break file_date = int(data_file.date) if file_date < start_date or file_date > end_date: if verbose: print( "Data is not within the specified time frame" ) break data = data_file.read_data() if count == 0: running_sum = data running_sum2 = data**2 else: if data.shape != data.shape: if verbose: print( "Data are not the same dimensions (row x colum)" ) break running_sum += data running_sum2 += data**2 count += 1 data_file.close() files_in_range.append(data_file) check = False temporal_mean = running_sum/count st_dev = np.sqrt((running_sum2 - (running_sum**2)/count)/(count-1)) coef_of_variation = st_dev/temporal_mean return([temporal_mean, files_in_range, coef_of_variation]) ############################################################################### def calculate_r_cov(date_sets): '''Calculate the coefficient of variation for the VV/VH ratio. Parameters: - date_sets: List of DateSets ''' count = 0 for date in date_sets: vv = date.vv_df.read_data() vh = date.vh_df.read_data() r = vv/vh date.close() if count == 0: running_sum = r running_sum2 = r**2 else: running_sum += r running_sum2 += r**2 count += 1 temporal_mean = running_sum/count st_dev = np.sqrt((running_sum2 - (running_sum**2)/count)/(count-1)) r_cov = st_dev/temporal_mean return(r_cov) ############################################################################### def calculate_multitemporal_avg(date_sets): '''Calculates and returns the multi-temporal VV, VH, and VV/VH averages from a list of DateSets. Parameters: - date_sets: List of DateSets ''' vv_data_files = [ds.vv_df for ds in date_sets] vh_data_files = [ds.vh_df for ds in date_sets] [vv_avg, vv_files, vv_cov] = calculate_temporal_avg(vv_data_files, "vv") [vh_avg, vh_files, vh_cov] = calculate_temporal_avg(vh_data_files, "vh") r_avg = vv_avg/vh_avg return(vv_avg, vh_avg, r_avg, vv_cov, vh_cov) ############################################################################### class ClassThresholds: '''Class that contains upper and lower bounds for VV, VH, and VV/VH for a class as well as a method for checking membership based on VV, VH, and VV/VH values. Parameters: - vv_low, vv_hi: Lower and upper bounds for VV values in the class - vh_low, vh_hi: Lower and upper bounds for VH values in the class - r_low, r_hi: Lower and upper bounds for VV/VH values in the class Methods: - is_member: Check a set of VV, VH, and VV/VH values from a pixel to see if that pixel should be included in this class ''' def __init__(self, vv_low=-9999, vv_hi=9999, vh_low=-9999, vh_hi=9999, r_low=-9999, r_hi=9999): self.vv_low = vv_low self.vv_hi = vv_hi self.vh_low = vh_low self.vh_hi = vh_hi self.r_low = r_low self.r_hi = r_hi def is_member(self, vv_val, vh_val, r_val): '''Checks if the input VV, VH, and VV/VH values are sufficient to classify the pixel in this class. ''' vv_cond = vv_val > self.vv_low and vv_val < self.vv_hi vh_cond = vh_val > self.vh_low and vh_val < self.vh_hi r_cond = r_val > self.r_low and r_val < self.r_hi result = vv_cond and vh_cond and r_cond return(result) ############################################################################### def pixel_radius_average(image, mid_row, mid_col, radius): '''Calculates and returns the average pixel value for an image in a variable radius around a selected pixel. Parameters: - image: 2D array of raster values - mid_row, mid_col: Row and column coordinates of the selected pixel - radius: Radius size of the moving window (in pixels) ''' # Find indices of pixels within the radius row_rng = range(mid_row-radius,mid_row+radius+1) col_rng = range(mid_col-radius,mid_col+radius+1) good_inds = [] for row in row_rng: row_ind = row y_val = row - mid_row for col in col_rng: col_ind = col x_val = col - mid_col dist = x_val^2 + y_val^2 if dist <= radius^2: good_inds.append([row, col]) # Get values of pixels in radius vals_in_radius = [] for pix in good_inds: row = pix[0] col = pix[1] try: val = image[row][col] vals_in_radius.append(val) except Exception as e: print("Could not add pixel to list; check edge effects") print(e) # Calculate average mean = np.mean(vals_in_radius) return(mean) ############################################################################### def moving_window(data, radius=3, verbose=False): '''Applies a moving window average to a 2D array and returns the result. Parameters: - data: 2D array of raster values - radius: Radius size in pixels - verbose: Option to print a message whenever a pixel cannot be added to the window ''' [nrow, ncol] = data.shape windowed = copy.copy(data) for row in range(nrow): for col in range(ncol): vals_in_window = [] row_rng = range(row-radius,row+radius+1) col_rng = range(col-radius,col+radius+1) for y in row_rng: for x in col_rng: try: val = data[y][x] vals_in_window.append(val) except Exception as e: if verbose: print("Could not add pixel to list, " + "may be due to edge effects") print(e) windowed[row][col] = np.mean(vals_in_window) return(windowed) ############################################################################### def triple_classify(vv_data, vh_data, r_data, water_thresh, flooded_thresh ): '''Makes and returns a classification product based on VV, VH, and VV/VH values. Parameters: - vv_data, vh_data, r_data: VV, VH, and VV/VH arrays - water_thresh: ClassThreshold for the open water class - flooded_thresh: ClassThreshold for the inundated class ''' classified_image = copy.copy(r_data) for row in range(classified_image.shape[0]): for col in range(classified_image.shape[1]): vv_val = float(vv_data[row][col]) vh_val = float(vh_data[row][col]) r_val = float(r_data[row][col]) water = water_thresh.is_member(vv_val, vh_val, r_val) flooded = flooded_thresh.is_member(vv_val, vh_val, r_val) if water: classified_image[row][col] = 1 elif flooded: classified_image[row][col] = 3 else: classified_image[row][col] = 2 return(classified_image) ############################################################################### def plot_class_cov(typical_inundation, vv_cov, vh_cov, xsize=10,ysize=5): '''Makes and returns a classification product based on VV, VH, and VV/VH values. Parameters: - typical_inundation: - vv_cov, vh_cov: VV and VH coefficient of variance (cov) distributions\ - xsize, ysize: size parameters for the figure ''' kwargs = dict(histtype='stepfilled', alpha=.8, bins=1000) nrows = 2 ncols = 3 npanels = nrows*ncols big_fig(xsize, ysize) for x in range(1,npanels+1): if x < 4: vtype = "VV" vals = vv_cov class_num = x else: vtype = "VH" vals = vh_cov class_num = x - 3 indx = np.where(typical_inundation == class_num) ax = plt.subplot(nrows,ncols,x) plt.hist(vals[indx], **kwargs) ax.get_yaxis().set_visible(False) if x < 4: plt.title("Class: " + str(x)) if x == 3: ax.text(1.05, 0.5, "VV Coefficient of Variation", rotation=90, ha='left', va='center', transform=ax.transAxes ) if x == 6: ax.text(1.05, 0.5, "VH Coefficient of Variation", rotation=90, ha='left', va='center', transform=ax.transAxes ) ############################################################################### def refined_classify(vv_data, vh_data, r_data, vv_cov, vh_cov, water_thresh, flooded_thresh, not_flooded_thresh, nf_cov_thresh, f_cov_thresh, w_cov_thresh ): '''Makes and returns a refined classification product based on the typical inundation state and VV coefficient of variation. Parameters: - vv_data, vh_data, r_data: VV, VH, and VV/VH arrays - vv_cov, vh_cov: VV and VH coefficient of variance (cov) distributions - water_thresh: ClassThreshold for the open water class - flooded_thresh: ClassThreshold for the inundated class - not_flooded_thresh: ClassThreshold for the not inundated class - nf_cov_thresh: cov threshold for temporal change in not inundated areas - f_cov_thresh: cov threshold for temporal change in inundated areas - w_cov_thresh: cov threshold for temporal change in open water areas ''' classified_image = copy.copy(r_data) for row in range(classified_image.shape[0]): for col in range(classified_image.shape[1]): vv_val = float(vv_data[row][col]) vh_val = float(vh_data[row][col]) r_val = float(r_data[row][col]) water = water_thresh.is_member(vv_val, vh_val, r_val) flooded = flooded_thresh.is_member(vv_val, vh_val, r_val) not_flooded = not_flooded_thresh.is_member(vv_val, vh_val, r_val) if water: if vv_cov[row][col] > w_cov_thresh: classified_image[row][col] = 1 else: classified_image[row][col] = 0 elif flooded: if vv_cov[row][col] > f_cov_thresh: classified_image[row][col] = 4 else: classified_image[row][col] = 3 else: if vv_cov[row][col] > nf_cov_thresh: classified_image[row][col] = 4 else: classified_image[row][col] = 2 return(classified_image) ############################################################################### def refined_classify2(vv_data, vh_data, r_data, vv_cov, vh_cov, water_thresh, flooded_thresh, not_flooded_thresh, nf_vv_cov_thresh, f_vv_cov_thresh, w_vv_cov_thresh, nf_vh_cov_thresh, f_vh_cov_thresh, w_vh_cov_thresh ): '''Makes and returns a refined classification product based on the typical inundation state and VV & VH coefficients of variation. Parameters: - vv_data, vh_data, r_data: VV, VH, and VV/VH arrays - vv_cov, vh_cov: VV and VH coefficient of variance (cov) distributions - water_thresh: ClassThreshold for the open water class - flooded_thresh: ClassThreshold for the inundated class - not_flooded_thresh: ClassThreshold for the not inundated class - nf_cov_thresh: cov threshold for temporal change in not inundated areas - f_cov_thresh: cov threshold for temporal change in inundated areas - w_cov_thresh: cov threshold for temporal change in open water areas ''' classified_image = copy.copy(r_data) for row in range(classified_image.shape[0]): for col in range(classified_image.shape[1]): vv_val = float(vv_data[row][col]) vh_val = float(vh_data[row][col]) r_val = float(r_data[row][col]) water = water_thresh.is_member(vv_val, vh_val, r_val) flooded = flooded_thresh.is_member(vv_val, vh_val, r_val) not_flooded = not_flooded_thresh.is_member(vv_val, vh_val, r_val) if water: if vv_cov[row][col] > w_vv_cov_thresh: classified_image[row][col] = 1 else: classified_image[row][col] = 0 elif flooded: if vv_cov[row][col] > f_vv_cov_thresh or vh_cov[row][col] > f_vh_cov_thresh: classified_image[row][col] = 4 else: classified_image[row][col] = 3 else: if vv_cov[row][col] > nf_vv_cov_thresh or vh_cov[row][col] > nf_vh_cov_thresh: classified_image[row][col] = 4 else: classified_image[row][col] = 2 return(classified_image) ############################################################################### def refined_multilook_classify(filtered_date_sets, min_inundation, ow_min_count=False, ni_min_count=False, iv_min_count=False, return_counts=False ): ''' Function to perform a refined classification integrating the results of the multilooked classifications. The final refined classification is based off the count of occurences of each class in each pixel across the multilooked classifications. Parameters: - filtered_date_sets: List of DateSets that have multilooked classifications - min_inundation: Minimum inundation state classification - ow_min_count: Minimum count threshold for permanent open water - ni_min_count: Minimum count threshold for permanent not inundated - iv_min_count: Minimum count threshold for permanent inundated vegetation - return_counts: Option to return the occurence counts for each class ''' # If minimum count thresholds have not been provided, set them to # values scaled off the number of dates used ndates = len(filtered_date_sets) if not ow_min_count: ow_min_count = ndates/2 if not ni_min_count: ni_min_count = ndates if not iv_min_count: iv_min_count = ndates/2 # Count number of occurences of each class in each pixel across # classified multi-looked dates nrows = np.size(min_inundation, 0) ncols = np.size(min_inundation, 1) ow_count = np.zeros((nrows, ncols)) iv_count = np.zeros((nrows, ncols)) ni_count = np.zeros((nrows, ncols)) for ds in filtered_date_sets: for row in range(nrows): for col in range(ncols): val = ds.class_3x3[row][col] if val == 1: ow_count[row][col] += 1 elif val == 2: ni_count[row][col] += 1 elif val == 3: iv_count[row][col] += 1 # Make refined classification based off class occurence counts refined_inundation = copy.copy(min_inundation) for ds in filtered_date_sets: for row in range(nrows): for col in range(ncols): if ow_count[row][col] >= ow_min_count: refined_inundation[row][col] = 1 elif ni_count[row][col] >= ni_min_count: refined_inundation[row][col] = 2 elif iv_count[row][col] >= iv_min_count: refined_inundation[row][col] = 3 else: refined_inundation[row][col] = 4 # Return the class occurrence counts, if desired if return_counts: counts = [ow_count, iv_count, ni_count] return([refined_inundation, counts]) else: return(refined_inundation) ############################################################################### def prompt_already_multilooked(pn=pn): '''Asks the user if they have already classified and exported the multi-looked images and loads them if so. If not, starts workflow to create them. Parameters: pn: Python version number (2 or 3) ''' question = ("Have you already classified and exported the " + "multi-looked images? (y/n)") while True: try: if pn == 2: user_in = raw_input(question) elif pn == 3: user_in = input(question) except: print("Invalid response, try again") continue else: affirmative_answers = ["y", "Y","yes", "Yes"] negative_answers = ["n", "N", "No", "no"] if user_in in affirmative_answers: load_classified_3x3s(filtered_date_sets, out_directory) load_multilooked(filtered_date_sets, multi_directory) print("Loaded previously classified multi-looked images.") break elif user_in in negative_answers: multilook_all_dates(filtered_date_sets, water_thresh, flooded_thresh, multi_directory, ulx, uly, lrx, lry, proj ) print("Multi-looking and classification complete.") break else: print("Invalid response, try again") continue ############################################################################### def multilook_classify(date_set, water_thresh, flooded_thresh ): '''Classifies a multi-looked image and returns the result. Parameters: - date_set: A DateSet object from date to be classified - water_thresh: ClassThreshold object for open water class - flooded_thresh: ClassThreshold object for inundated class ''' vv_3x3 = moving_window(date_set.vv_df.read_data()) vh_3x3 = moving_window(date_set.vh_df.read_data()) r_3x3 = vv_3x3/vh_3x3 class_3x3 = triple_classify(vv_3x3, vh_3x3, r_3x3, water_thresh, flooded_thresh ) datef = date_set.datef date_set.close() return(class_3x3, datef) ############################################################################### def multilook_all_dates(filtered_date_sets, water_thresh, flooded_thresh, multi_directory, ulx, uly, lrx, lry, proj, classify=True ): '''Performs multi-looking on all input dates and stores result as an attribute (class_3x3) in the input DateSets. Parameters: - filtered_date_sets: List of filtered DateSets - water_thresh: ClassThreshold object for open water class - flooded_thresh: ClassThreshold object for inundated class - multi_directory: Directory where multilooked images are stored - ulx, uly, lrx, lry: Corner coordinates for exporting - proj: Projection for exporting ''' nds = len(filtered_date_sets) for idx, ds in enumerate(filtered_date_sets): print( str(datetime.now().time().strftime("%H:%M:%S")) + ") Processing data from " + ds.datef + "... [" + str(idx+1) + "/" + str(nds) + "]" ) vv_3x3 = moving_window(ds.vv_df.read_data()) vv_out = multi_directory + ds.vv_df.name.split('.')[0] + '3x3.tif' proc_export(vv_3x3, ulx, uly, lrx, lry, vv_out, proj, True) vh_3x3 = moving_window(ds.vh_df.read_data()) vh_out = multi_directory + ds.vh_df.name.split('.')[0] + '3x3.tif' proc_export(vh_3x3, ulx, uly, lrx, lry, vh_out, proj, True) r_3x3 = vv_3x3/vh_3x3 if classify: ds.class_3x3 = triple_classify( vv_3x3, vh_3x3, r_3x3, water_thresh, flooded_thresh ) ds.close() ############################################################################### def interactive_area_plot(classified_date_sets): '''Creates an interactive time series plot showing the total area and percentage of total area inundated. Parameters: - classified_date_sets: List of DateSets with classified results ''' # Modified code from https://plot.ly/python/range-slider/ dateps = [] areas= [] aps = [] for ds in classified_date_sets: dateps.append(ds.datep) [ds.area, ds.percent, total] = calculate_area_inundated(ds.class_3x3) areas.append(0.0001*ds.area) aps.append([0.0001*ds.area, ds.percent]) area_trace = go.Scatter( x = dateps, y = areas, name = "Inundated area", text = [str(ap[0]) + "ha (%.2f%%)" % ap[1] for ap in aps], yaxis = "y" ) data = [area_trace] # style all the traces for k in range(len(data)): data[k].update( { "hoverinfo": "name+x+text", "line": {"width": 0.5}, "marker": {"size": 8}, "mode": "lines+markers", "showlegend": False } ) layout = { "dragmode": "zoom", "hovermode": "x", "legend": {"traceorder": "reversed"}, "margin": { "t": 100, "b": 100 }, "xaxis": { "autorange": True, "range": ["2017-10-31 18:36:37.3129", "2018-05-10 05:23:22.6871"], "rangeslider": { "autorange": True, "range": ["2017-10-31 18:36:37.3129", "2018-05-10 05:23:22.6871"] }, "type": "date" }, "yaxis": { "anchor": "x", "autorange": True, "domain": [0, 1], "linecolor": "#673ab7", "mirror": True, "range": [0,10000], "showline": True, "side": "right", "tickfont": {"color": "#673ab7"}, "tickmode": "auto", "ticks": "", "title": "Inundated area (ha)", "titlefont": {"color": "#673ab7"}, "type": "linear", "zeroline": False } } plotly.offline.init_notebook_mode(connected=True) fig = go.Figure(data=data, layout=layout) plotly.offline.iplot(fig) ############################################################################### def min_max_inundation(typical_inundation, classified_date_sets): '''Returns the minimum and maximum inundation state by comparing all classified dates. Parameters: - typical_inundation: Classification product from the multi-temporal averages - classified_date_sets: DateSets that contain classifications for the multi-looked products ''' max_inundation = copy.copy(typical_inundation) min_inundation = copy.copy(typical_inundation) for row in range(typical_inundation.shape[0]): for col in range(typical_inundation.shape[1]): pixel_class_values = [ int(ds.class_3x3[row][col]) for ds in classified_date_sets ] default_class = typical_inundation[row][col] if 3 in pixel_class_values: max_inundation[row][col] = 3 if 2 in pixel_class_values: min_inundation[row][col] = 2 elif 1 in pixel_class_values: min_inundation[row][col] = 1 else: min_inundation[row][col] = 3 return(min_inundation, max_inundation) ############################################################################### def classified_plot(classified_image, datef=False): '''Plots a classified product using 5 classes. Parameters: - classified_image: 2D array of classified values - datef (optional): Formatted date string to be plotted along y-axis ''' fig, ax = plt.subplots(figsize=(9.5,9.5)) colors = ['b', 'c', 'g', 'y', 'm'] cmap = matplotlib.colors.ListedColormap(colors) cax = ax.imshow(classified_image, cmap=cmap, vmin = -.2, vmax = 4.2) cbar = fig.colorbar(cax, ticks=[0,1,2,3,4]) cbar.ax.set_yticklabels( ['Permanent Open Water', 'Seasonal Open Water', 'Permanent Not Inundated', 'Permanent Inundated Vegetation', 'Seasonal Inundation'] ) title_str = "Classified using VV, VH, and VV/VH brightness values: " if datef: title_str += datef else: title_str += "Multi-temporal average" ax.set_title(title_str) ############################################################################### def classified_plot2(classified_image, datef=False, colors=['b','g','y']): '''Plots classified product with 3 classes (Open water, not inundated, and inundated). Parameters: - classified_image: 2D array of classified values - datef (optional): Formatted date string to be plotted along y-axis - colors: List containing the names of the colors for the 3 classes ''' fig, ax = plt.subplots(figsize=(9.5,9.5)) cax = ax.imshow( classified_image, cmap=matplotlib.colors.ListedColormap(colors), vmin = 0, vmax = 4 ) cbar = fig.colorbar(cax, ticks=[.67,2,3.33]) cbar.ax.set_yticklabels(['Open Water', 'Not Inundated', 'Inundated']) title_str = "Classified using VV, VH, and VV/VH brightness values: " if datef: title_str += datef else: title_str += "Multi-temporal average" ax.set_title(title_str) ############################################################################### def refined_plot(classified_image, datef=False, colors=['b', 'w', 'y', 'g']): '''Plots the results of the refined classification (4 classes). Parameters: - classified_image: 2D array of refined classification values - datef (optional): Formatted date string to be plotted along y-axis ''' fig, ax = plt.subplots(figsize=(9.5,9.5)) cax = ax.imshow( classified_image, cmap=matplotlib.colors.ListedColormap(colors), vmin = .5, vmax = 4.5 ) cbar = fig.colorbar(cax, ticks=[1,2,3,4]) cbar.ax.set_yticklabels( ['Permanent open water', 'Not inundated', 'Permanent inundated vegetation', 'Seasonal inundation' ] ) title_str = "Refined classification using VV, VH, and VV/VH values: " if datef: title_str += datef else: title_str += "Multi-temporal average" ax.set_title(title_str) ############################################################################### def extract_spatial_metadata(data_file): '''Extracts and returns the projection, upper left, and lower right corners' projected coordinates to use when exporting. Parameters: - data_file: DataFile ''' data_file.read_data() prof = data_file.raw.profile affine = prof['transform'] ulx, uly = affine[2], affine[5] height = prof['height'] width = prof['width'] dx = affine[0] dy = affine[4] lrx = ulx + width*dx lry = uly + height*dy proj = "EPSG:" + str(prof['crs']).split('EPSG:')[-1] data_file.close() return(ulx, uly, lrx, lry, proj) ############################################################################### def proc_export(array, ulx, uly, lrx, lry, outpath, proj="EPSG:32605", clean_temp=True ): '''Exports array as a raster file (.tif). Parameters: - array: 2D image array - ulx, uly: Projected x and y coordinates of upper left corner - lrx, lry: Projected x and y coordinates of lower right corner - outpath: Path for output file - proj: Projection in GDAL-readable format ''' if not outpath.endswith(".tif"): print("Output filename must end with .tif") return(False) else: # Create temporary TIF file temp_path = outpath.split(".tif")[0] + "_temp.tif" temp_image = Image.fromarray(array) temp_image.save(temp_path) # Add spatial information with GDAL translate_command = "gdal_translate" translate_command += " -a_srs " + proj translate_command += " -a_ullr " + str(ulx) + " " + str(uly) translate_command += " " + str(lrx) + " " + str(lry) translate_command += " " + temp_path + " " + outpath subprocess.call(translate_command, shell=True) # Clean temporary file if clean_temp: clean_command = "rm " + temp_path subprocess.call(clean_command, shell=True) ############################################################################### def export_classified_3x3(classified_date_sets, out_dir): '''Exports the results of the classification of the multi-looked images as .tif files. Parameters: - classified_date_sets: List of DateSets with multi-look classifications - out_dir: Output directory ''' if out_dir[-1] != '/': out_dir += '/' for ds in classified_date_sets: c3x3 = ds.class_3x3 outpath = out_dir + ds.date + '_inundation.tif' proc_export(c3x3, ulx, uly, lrx, lry, outpath, proj, True) ############################################################################### def load_multilooked(filtered_dates_sets, multi_directory): for ds in filtered_date_sets: vv_out = multi_directory + ds.vv_df.name.split('.')[0] + '3x3.tif' vv_raw = rasterio.open(vv_out) ds.vv_3x3 = vv_raw.read( 1, out_shape=(1, int(vv_raw.height), int(vv_raw.width)) ) vh_out = multi_directory + ds.vh_df.name.split('.')[0] + '3x3.tif' vh_raw = rasterio.open(vh_out) ds.vh_3x3 = vh_raw.read( 1, out_shape=(1, int(vh_raw.height), int(vh_raw.width)) ) ds.r_3x3 = ds.vv_3x3/ds.vh_3x3 ############################################################################### def load_classified_3x3s(filtered_dates_set, out_dir): '''Loads the previously exported multi-looked classifications and appends them to existing DateSets. Parameters: - filtered_date_sets: DateSets that have been filtered for inclusion - out_dir: Path to directory where previously exported results are stored ''' for ds in filtered_date_sets: data_dir_search = out_dir + "*" + ds.date + "_inundation.tif" search = glob(data_dir_search) if len(search) == 1: raw = rasterio.open(search[0]) ds.class_3x3 = raw.read( 1, out_shape=(1, int(raw.height), int(raw.width)) ) ############################################################################### def calculate_area_inundated(classified, res=10): '''Calculates and returns area inundated (and percent area inundated). Parameters: - classified: Classified 2D array - res: Pixel size (in projected units, e.g. meters) ''' nrow, ncol = classified.shape xlength = ncol*res ylength = nrow*res total_area = xlength * ylength count_inundated = 0 res2 = res**2 for row in range(nrow): for col in range(ncol): if classified[row][col] == 3: count_inundated += 1 area_inundated = count_inundated*res2 percent_inundated = float(area_inundated)*100/total_area return([area_inundated, percent_inundated, total_area]) ############################################################################### def proc_resample(path, overwrite=False): '''Resamples a file using the GDAL shell library and creates a new file. Parameters: - path: Path to the file to be resampled - overwrite: Option to overwrite existing resampled result ''' output = path.split('.tif')[0] + '_resampled.tif' pieces = ["gdalwarp -tr 100 100", path, output] if overwrite: pieces.append("-overwrite") gdalwarp_command = " ".join(pieces) print(gdalwarp_command) subprocess.call(gdalwarp_command, shell=True) def resample_all(directory, overwrite=False): '''Resamples all files in a specified directory. Parameters: - directory: Path to directory - overwrite: Option to overwrite existing resampled output ''' figs = os.listdir(directory) paths = [os.path.join(directory, fig) for fig in figs] for path in paths: if 'resampled' not in path and path.endswith('.tif'): proc_resample(path, overwrite) ############################################################################### def proc_gdalwarp(infile, proj="EPSG:32605", outfile="null"): '''Projects a raster into specified projection. Parameters: - infile: Path to file to be projected - proj: String in GDAL-readable format specifying projection (default is WGS 84 - UTM Zone 5N). - outfile: Path to output file (filename will be automatically generated if none is specified) ''' if outfile == "null": ext = infile.split(".")[-1] outfile = infile.split(".")[0] + "_proj." + ext warp_command = "gdalwarp -t_srs " + proj + " " + infile + " " + outfile subprocess.call(warp_command, shell=True) ############################################################################### def proc_translate(infile, outfile="null"): '''Converts a raster file into ENVI format and produces an ENVI header file while doing so. Parameters: - infile: Path to file to be converted - outfile: Path to output file ''' if outfile == "null": outfile = infile.split(".")[0] + ".img" translate_command = "gdal_translate -of ENVI " + infile + " " + outfile subprocess.call(translate_command, shell=True) ############################################################################### # - # <br> # <hr> # <br> # <font face="Century Gothic"> # <font size="3"><b>Load Data</b><br></font> # For this exercise, we will be using a VV/VH Sentinel-1 data stack over Selawik, Alaska. The town of Selawik is located in the northwest of Alaska on the coast to the Chukchi and Bering sea. It is prone to heavy rains and extensive inundation during the breakup and summer seasons. We will use Sentinel-1 data to map inundated vegetation and understand the persistence of inundation in the area. # # Before we get started, let's first <b>create a working directory for this analysis and download the relevant data into the notebook"</b> # </font> project_dir = "/home/jovyan/notebooks/SAR_Training/English/Ecosystems/S1-InundationMapping/" time_series_path = 's3://asf-jupyter-data/S1-InundationMapping.zip' time_series = os.path.basename(time_series_path) # !aws --region=us-east-1 --no-sign-request s3 cp $time_series_path $time_series if asfn.path_exists(time_series): asfn.asf_unzip(os.getcwd(), time_series) os.remove(time_series) os.chdir(project_dir) site_name = "SelawikZoom" data_directory = "./data/" + site_name + "/" out_directory = "./out/" + site_name + "/" multi_directory = data_directory + "multi/" # <font face="Century Gothic"> # <font size="3"><b>Selection Process</b><br></font> # After selecting the desired time range, this section outputs a list of the files, their VV, VH, and VV/VH ratio value <br>ranges, image thumbnails, and plots of image brightness. High increases in image brightness may be due to <br>different reasons such as the presence of snow or ice cover, wind and weather changes, or calibration errors, but <br>with the selection options below, users can visually evaluate the image collection and select which dates to <br>include or exclude from analysis. # </font> # + # Get list of files in data_directory with the correct file extension # and contain the search string in their filename ext = ".tif" search = "subset" data_files = get_files(data_directory, ext, search, "new") # Find dates dates = find_dates(data_files) # Make DateSets (store VV and VH DataFiles into a single object) date_sets = make_datesets(data_files, dates) datesf = [ds.datef for ds in date_sets] # Find minimum and maximum VV and VH values of the DataFiles (use these to # standardize color limits) [[vv_min, vv_max], [vh_min, vh_max], [r_min, r_max]] = find_minmax(date_sets) # Print dates print("\nDates:") datesf # - #### Make adjustments to plotting color limits vv_min = 0 #minimum VV value vv_max = .15 #maximum VV value vh_min = 0#minimum VH value vh_max = .03 #maximum VH value r_min = 0 #minimum VV/VH value r_max = 15 #maximum VV/VH value ############################################################################### # Plot all images with checkboxes to select images for exclusion: ws3 = multi_checkbox_widget_dateimages( date_sets, vv_min, vv_max, vh_min, vh_max, r_min, r_max ) ws3 # Get indices of checked options #(Can only be run once per creation of the box above, need to re-run code above) excluded_dates3 = get_excluded_dates3(ws3, date_sets) print("Dates to be excluded:") excluded_dates3 ############################################################################### # Interactive plot time series of average brightness per image interactive_backscatter_plot(date_sets) # Make time slider to select start and end dates selection_range_slider = make_timeslider(date_sets) selection_range_slider # + # Get start and end dates from time slider (re-run this to get updated times) slider_min, slider_max = get_slider_vals(selection_range_slider) # Filter dates according to checkbox and time slider filtered_date_sets = filter_date_sets(date_sets, excluded_dates3, slider_min, slider_max) # Extract spatial metdata from filtered images to export classified products ulx, uly, lrx, lry, proj = extract_spatial_metadata(filtered_date_sets[0].vv_df) # - # <font face="Century Gothic"> # <font size="4"><b>Classification Overview</b></font><br> # Typical inundation is defined as the classification of the multi-temporal average iamges (Ia).<br> # <br> # <font size="3"><b>Classify Inundation:</b></font> # <ul> # <li> Calculate multi-temporal average for VV, VH, and VV/VH </li> # <li> Derive typical classification from multi-temporal averages with rules-based classification</li> # <li>For each date: </li> # <ul> # <li> Calculate multi-looked view for VV, VH, and VV/VH </li> # <li>Perform initial classification</li> # <li>Compare to corresponding multi-temporal average to determine changes</li> # <li>Make refined classification based on change with rules</li> # </ul> # <li>For each pixel:</li><ul> # <li>Loop over all refined classifications to find minimum and maximum inundation</li></ul> # </ul> # </font> # <font face="Century Gothic"> # <font size="3"><b> # Multi-Temporal Averages <br></font></b> # This section produces a multi-temporal radar backscatter image (Ia) by averaging the data from each acquisition of<br> # VV, VH, and VV/VH over the selected date ranges.<br> # <br> # In general, SAR data can be noisy and speckled. However, averaging images reduces speckle and smooths the imagery<br> # and can be used to examine changes over time. In areas where land cover and terrain remain the same, the level of<br> # speckle visible in individual scenes is reduced, while dynamic areas of environmental change will reflect<br> # variations in backscatter. This can be particularly evident in areas of inundation, as open water and inundated<br> # vegetation are at opposite ends of the range of radar backscatter values. # </font> # Calculate multi-temporal average for VV, VH, and VV/VH over selected date range (vv_avg, vh_avg, r_avg, vv_cov, vh_cov) = calculate_multitemporal_avg(filtered_date_sets) # Plot them: fig_xsize = 10 fig_ysize = 7 f = vv_vh_r_plot(vv_avg, vh_avg, r_avg, vv_min, vv_max, vh_min, vh_max, r_min, r_max, "Multi-temporal averages", fig_xsize, fig_ysize ) # Large plot of multi-temporal average of VV values to inspect pixel values fig_xsize = 8 fig_ysize = 8 big_fig(fig_xsize, fig_ysize) gray_plot(vv_avg, vv_min, vv_max) # Full-resolution plot of multi-temporal average of VV dpi = 192 #change this to match your monitor's DPI cursor = True #option to enable data cursor in resulting image full = True #option to increase Jupyter Notebook width to monitor size #pixel2pixel_plot(vv_avg, vv_min, vv_max, dpi, cursor, full) # Comment/uncomment this line to run # Large plot of multi-temporal average of VH values to inspect pixel values big_fig(fig_xsize, fig_ysize) gray_plot(vh_avg, vh_min, vh_max) # Full-resolution plot of multi-temporal average of VH dpi = 192 #change this to match your monitor's DPI cursor = True #option to enable data cursor in resulting image full = True #option to increase Jupyter Notebook width to monitor size # pixel2pixel_plot(vh_avg, vh_min, vh_max, dpi, cursor, full) # Comment/uncomment this line to run # Large plot of multi-temporal average of VV/VH values to inspect pixel values big_fig(fig_xsize, fig_ysize) gray_plot(r_avg, r_min, r_max) # Full-resolution plot of multi-temporal average of VV/VH dpi = 192 #change this to match your monitor's DPI cursor = True #option to enable data cursor in resulting image full = True #option to increase Jupyter Notebook width to monitor size # pixel2pixel_plot(r_avg, r_min, r_max, dpi, cursor, full) # uncomment this line to run # Plot coefficient of variation for VV and VH f = vv_vh_cov_plot(vv_cov, vh_cov, 0, 1, 0, 1, "Coefficient of Variation", 10, 7 ) # Large plot of VV coefficient of variation big_fig(fig_xsize, fig_ysize) gray_plot(vv_cov, 0, 1) # Full resolution plot of VV/VH coefficient of variation dpi = 192 #change this to match your monitor's DPI cursor = True #option to enable data cursor in resulting image full = True #option to increase Jupyter Notebook width to monitor size # pixel2pixel_plot(r_cov, 0, 1, dpi, cursor, full) # Comment/uncomment this line to run # Calculate VV/VH coefficient of variation r_cov = calculate_r_cov(filtered_date_sets) # Large plot of VV/VH coefficient of variation big_fig(fig_xsize, fig_ysize) gray_plot(r_cov, 0, 1) # Full resolution plot of VV/VH coefficient of variation dpi = 192 #change this to match your monitor's DPI cursor = True #option to enable data cursor in resulting image full = True #option to increase Jupyter Notebook width to monitor size # pixel2pixel_plot(r_cov, 0, 1, dpi, cursor, full) # Comment/uncomment this line to run # Export multi-temporal averages and coefficient of variation # VV vv_avg_out = out_directory + 'vv_avg.tif' proc_export(vv_avg, ulx, uly, lrx, lry, vv_avg_out, proj, True) # VH vh_avg_out = out_directory + 'vh_avg.tif' proc_export(vh_avg, ulx, uly, lrx, lry, vh_avg_out, proj, True) # VV/VH r_avg_out = out_directory + 'r_avg.tif' proc_export(r_avg, ulx, uly, lrx, lry, r_avg_out, proj, True) # VV CoV vv_cov_out = out_directory + 'vv_cov.tif' proc_export(vv_cov, ulx, uly, lrx, lry, vv_cov_out, proj, True) # VH CoV vh_cov_out = out_directory + 'vh_cov.tif' proc_export(vh_cov, ulx, uly, lrx, lry, vh_cov_out, proj, False) # + ############################################################################### # Derive typical inundation from multi-temporal averages with rules-based # classification. These thresholds are determined through pixel inspection # and analysis from temporal averages, and can be adjusted by the user. # Set threshold values for the different classes '''Example: class_thresh = ClassThresholds(class_vv_min, class_vv_max, class_vh_min, class_vh_max, class_r_min, class_r_max ) ''' water_thresh = ClassThresholds(0, 0.07, 0, 0.0045, 0, 1000 ) flooded_thresh = ClassThresholds(0.12, 1, .0045, 1.5, 4, 50 ) # Typical inundation, or the average inundation over a time period, is # found using a triple classification function. In this triple classification, # the function runs through and uses each of the different class thresholds # from VV, VH,and VV/VH to create a single classification output. # Find typical inundation from multi-temporal averages of VV, VH, and VV/VH typical_inundation = triple_classify( vv_avg, vh_avg, r_avg, water_thresh, flooded_thresh) # - # Plot typical inundation state classified_plot2(typical_inundation) #plt.savefig('typical_inundation.pdf') #Save figure as pdf # Export typical inundation state as GeoTIFF typical_out = out_directory + 'typical_inundation.tif' proc_export(typical_inundation, ulx, uly, lrx, lry, typical_out, proj, True) # <font face="Century Gothic"> # <font size="3"><b> # Multi-looked Classification # </font></b><br> # Inundation extent can fluctuate greatly. Individual images or dates can also be classified to analyze temporal variability and make specific date comparisons. Multi-looked views are applied to each date's VV, VH, and VV/VH images to help smooth and reduce speckle. The multi-look view uses a 3x3 moving window to average neighboring pixels. <br> # <br><b>For each date: </b><br> # <ul> # <li> Calculate multi-looked view for VV, VH, and VV/VH</li> # <li> Perform rules-based classification</li> # </ul> # # Note: I precalculated all necessary multilooked images. <font color='rgba(200,0,0,0.2)'><b>So please answer the question in the next code cell with YES!</b></font> # </font> # + # Ask user if they have previously classified and exported the multi-looked images. # If the user has not classified and exported the multi-looked images, # the script will multi-look all filtered dates using a 3x3 moving window, # perform rules-based classification, # and save the classification to the DateSet objects in filtered_date_sets. # WARNING: This takes several minutes per date! # If the user has already classified and exported the multi-looked images, # the script will load the exported .tif files located in ./out/ # and append them to the DateSet objects in filtered_date_sets. # NOTE: This step takes very long!! Therefore, I have preprocessed all multi-looked data for you. # PLEASE ANSWER THE PROMPTED QUESTION WITH YES for this data set!! prompt_already_multilooked() # - # Export all classified multi-looked dates export_classified_3x3(filtered_date_sets, out_directory) # Calculate and plot change in inundated area over time interactive_area_plot(filtered_date_sets) # Find minimum and maximum inundation min_inundation, max_inundation = min_max_inundation( typical_inundation, filtered_date_sets ) # Plot minimum inundation classified_plot2(min_inundation, "Minimum inundation") #plt.savefig('minimum_inundation.pdf') #Save figure as pdf # Plot maximum inundation classified_plot2(max_inundation, "Maximum inundation") #plt.savefig('maximum_inundation.pdf') #Save figure as pdf # Export minimum and maximum inundation maps as GeoTIFF min_out = out_directory + 'minimum_inundation.tif' max_out = out_directory + 'maximum_inundation.tif' proc_export(min_inundation, ulx, uly, lrx, lry, min_out, proj, True) proc_export(max_inundation, ulx, uly, lrx, lry, max_out, proj, True) # <font face="Century Gothic"> # <font size="3"><b> # Multi-looked-based refined classification # </font></b><br> # Detect areas of seasonal inundation using change detected from multi-looked dates. <br> # # </font> # + # Perform refined classification based on number of class occurrences per pixel # across all multi-looked dates. User can elect to alter the default minimum # counts below by changing the min_count values to a number less than # or equal to the number of multi-looked dates. # Class occurrence count thresholds # (Leave as False to use default values, or change to numeric values) ow_min_count=False #Open water minimum count ni_min_count=10 #Not inundated minimum count iv_min_count=10 #Inundated vegetation minimum count refined_inundation = refined_multilook_classify(filtered_date_sets, min_inundation, ow_min_count, ni_min_count, iv_min_count ) # - # Plot refined classification refined_plot(refined_inundation, "Refined classification") # Export refined classification as GeoTIFF refined_out = out_directory + "refined_classification2_dswe.tif" proc_export(refined_inundation, ulx, uly, lrx, lry, refined_out, proj, True) # <hr> # <br> # <font face="Century Gothic" size="5"> <b> 8. Conclusion</b> </font> # # <font face="Century Gothic" size="3">Multi-temporal SAR data data from Sentinel-1 are a good basis for identifying inundated areas and distinguish seasonal inundation from short term inundation. Note, however, that due to limited penetration into dense vegetation, the performance of C-band Sentinel-1 data for inundation mapping in Colombia will be limited. A better choice will be L-band SAR data from future missions such as NISAR. The higher penetration of L-band will improve inundation mapping performance. The same workflow can be used for these future L-band data. # # For a bit more information on change detection and SAR in general, please look at the recently published <a href="https://gis1.servirglobal.net/TrainingMaterials/SAR/SARHB_FullRes.pdf" target="_blank">SAR Handbook: Comprehensive Methodologies for Forest Monitoring and Biomass Estimation</a>. # </font> # <hr> # <br> # <font face="Calibri" size="2"> <i>Exercise7-InundationMappingfromSARTimeSeries-Example.ipynb - Version 1.3.0 - April 2021 # <br> # <b>Version Changes:</b> # <ul> # <li>namespace asf_notebook</li> # </ul> # </i> # </font>
SAR_Training/English/Ecosystems/Exercise7-InundationMappingfromSARTimeSeries-Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from pint import UnitRegistry ureg = UnitRegistry() Q_ = ureg.Quantity angle = 45 np.deg2rad(angle) angle = 45 * ureg.degree np.deg2rad(angle) angle = 0.785 * ureg.radian np.deg2rad(angle) angle = 45 * ureg.dimensionless np.deg2rad(angle)
code/soln/test_deg2rad.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests import json from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession from pyspark.sql.functions import from_utc_timestamp, to_timestamp, to_date, col import nltk import pandas as pd import numpy as np import scipy from matplotlib import pyplot as plt import datetime import time import pickle import matplotlib.dates as mdates import matplotlib.pyplot as plt # + PUSHSHIFT_REDDIT_URL = "http://api.pushshift.io/reddit" def fetchObjects(**kwargs): # Default params values params = {"sort_type":"created_utc","sort":"asc","size":1000} for key,value in kwargs.items(): params[key] = value print(params) type = "comment" if 'type' in kwargs and kwargs['type'].lower() == "submission": type = "submission" r = requests.get(PUSHSHIFT_REDDIT_URL + "/" + type + "/search/",params=params) if r.status_code == 200: response = json.loads(r.text) data = response['data'] sorted_data_by__id = sorted(data, key=lambda x: int(x['id'],36)) return sorted_data_by__id def process(**kwargs): max_created_utc = 1577750400 max_id = 0 data = pd.DataFrame() #file = open("data.json","w") while 1: nothing_processed = True d = {'created_utc': [], 'body': [], 'subreddit': [], 'score': [],'author':[]} objects = fetchObjects(**kwargs,after=max_created_utc) if objects == None: print("pushshift failed") continue for object in objects: id = int(object['id'],36) if id > max_id: nothing_processed = False created_utc = object['created_utc'] max_id = id if created_utc > max_created_utc: max_created_utc = created_utc # Code to do something with comment goes here ... # ... # insertCommentIntoDB(object) # print(json.dumps(object,sort_keys=True,ensure_ascii=True),file=file) # ... d['body'].append(object['body']) d['created_utc'].append(created_utc) d['author'].append(object['author']) d['subreddit'].append(object['subreddit']) d['score'].append(object['score']) data = data.append(pd.DataFrame(d)) if nothing_processed: data.to_parquet("dataframes/2020_comments/{subreddit}-2020_comments.parquet".format(**kwargs)) max_created_utc -= 1 time.sleep(1) subreddits = ["Pete_Buttigieg"] frames = map(lambda sub : process(subreddit=sub,type="comment",before=1588291200),subreddits) data = pd.concat(frames) data # -
Create_Graphs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="HFQsXwLUNHLc" colab_type="text" # UNIVARIATE DATASET: # # # x y # # 0 0.2 # # 0.5 0.78 # # 1 0.89 # # 1.5 1.2 # # 2 2.2 # # 2.5 2.5 # # 3 2.789 # # 3.5 4.3 # # Since this is a univariate dataset we will not apply feature scaling # + colab_type="code" id="C4HZx7Gndbrh" colab={"base_uri": "https://localhost:8080/", "height": 415} outputId="90770a35-9298-440e-c36f-a9c1c24d11b6" #import libraries import numpy as np from matplotlib import pyplot as plt ml=[] cl=[] rl=[] alpha_value=0 iterate=0 #init_R=0 ---will be used for automtic optimization def gradient(m=0,c=7,alpha=0.1,iter=50000): global iterate iterate= iter global alpha_value alpha_value=alpha #get the dataset as a numpy array s=np.array([0.2,0.78,.89,1.2,2.2,2.5,2.789,4.3]) x=np.array([0,0.5,1,1.5,2,2.5,3,3.5]) yp=m*x+c n=len(s) i=0 while(i<iter): i+=1 R=yp-s global rl rl.append(R.sum()) m=m-(alpha*sum(R*x)*(1/n)) c=c-(alpha*sum(R)*(1/n)) global ml global cl cl.append(c) ml.append(m) yp=m*x+c return [m,c,(((yp-s)**2)/n).sum()] s=np.array([.2,.78,.89,1.20,2.20,2.5,2.789,4.300]) n=len(s) #Run Gradient Descent (seed_value,seed_value,alpha,iterations) m,c,res= gradient(1,0.5,0.03,300) xi=np.arange(iterate) x=np.array([0,.5,1,1.5,2,2.5,3,3.5]) #Evaluate the model yp = m*x + c print("MODEL parameters:") print("iterations: ",iterate) print("alpha: ",alpha_value) if(c>=0): print("Hypothesis Line: "+str(m)+'*x + '+str(c)) else: print("Hypothesis Line: "+str(m)+'*x '+str(c)) print('MSE: %.4f '%res) print('\n\n\n') plt.scatter(x,s,color='red') plt.plot(x,yp) plt.title("Regression Line") plt.xlabel("Feature") plt.ylabel("Response") plt.show() # + [markdown] id="z5adSQUEM9WI" colab_type="text" # # gradient descent is given by # Theta0=Theta0-(1/2m*d/dTheta0(sum(yp-yi))**2 # # Theta1=Theta1-(1/2m*d/dTheta1(sum(yp-yi))**2 # # yp= Theta0 + Theta1*x # # so # # Theta0=Theta0-(1/m*sum(R)) # # Theta1=Theta1-(1/m*sum(R)xi) # # iterate until convergence # + id="tOOU4Gpp95jm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="800de439-e139-4566-a9a8-cd4b106fe827" rm=np.array(rl) plt.plot(xi,rm**2,color='orange') plt.plot(xi,np.zeros(iterate),color='gray') plt.title("Gradient Descent") plt.xlabel("Iterations") plt.ylabel("Residual Error") plt.show() # + [markdown] id="bS-QfFSKJvgQ" colab_type="text" # USE THE MODEL TO PREDICT VALUES # # + id="quQYmZBkJrw8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="8a016d4b-9658-4e41-842d-4af8369daa70" def predict(feature): return m*feature+c response=predict(2) print("Predicted Response: %.4f"%response)
Gradient_Descent_(Univariate).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp labrie2008 # - # # LaBrie et al 2008 # # > Full replication # This notebook reproduces every table in LaBrie et al's 2008 paper on casino gambling behaviour. To get started, download the raw data from the link below from the transparency project's website. The data we need is **Raw Dataset 2** (**text version**) under the title '*Virtual Casino Gambling: February 2005 through February 2007*' towards the bottom of the page. # # Once you've downloaded and extracted it, you should see a file called **RawDataSet2_DailyAggregCasinoTXT.txt** - copy this into the same directory as this notebook to begin. # - [Data Download (thetransparencyproject.org)](http://www.thetransparencyproject.org/download_index.php) # - [Original data description](http://www.thetransparencyproject.org/codebooks/Codebook_for_Virtual_Casino_Gambling.pdf) # - [Original paper link](https://academic.oup.com/eurpub/article/18/4/410/477060) # # The first step is to import the [*gamba*](https://github.com/gamba-dev/gamba) framework, run the cell below to do so. If this cell throws an error, [see the install documentation page](https://gamba.dev) to make sure you have [*gamba*](https://github.com/gamba-dev/gamba) installed. import gamba as gb # With [*gamba*](https://github.com/gamba-dev/gamba) loaded, the next step is to get the data into a usable format. To do this, we call the `prepare_labrie_data` method from the data module. This does two things, first it renames the columns to values compatable with the [*gamba*](https://github.com/gamba-dev/gamba) framework, then it saves this newly compatable dataframe as a new csv file (in case it's needed elsewhere). all_player_bets = gb.data.prepare_labrie_data('RawDataSet2_DailyAggregCasinoTXT.txt') # In two lines of code we're ready to start the analysis, and have each player's transactions individually saved in-case anything goes wrong or we want to take a sample. The next step is to load in the data we just prepared, this uses some magic from the [glob](https://docs.python.org/3/library/glob.html#module-glob) library to load every CSV file in the `labrie_individuals/` folder into the variable `all_player_bets`. # If we want to do any other analysis on all of the players this is where we would add new methods, but let's crack on with calculating each of the measures described in the paper - which includes things like **frequency**, **duration**, **total amount wagered**, etc. Heads up: this calculation can take up to 10 minutes on a normal computer, so now is a great time to share this page with a colleague, or [tweet us your feedback](https://twitter.com/gamba_dev)! # calculate measures and save them measures_table = gb.measures.calculate_labrie_measures(all_player_bets, loud=True) # The cell above took a while to finish, to make sure we don't have to do that computation again the output has been saved as `gamba_labrie_measures.csv` next to this notebook. We'll come back to this file later to make sure this recreation matches the original, but lets keep going! Time for the first meaningful output, the first table in the original paper - which describes the measures we just calculated using basic statistics; # load measures back in and plot descriptive table measures_table = gb.read_csv('gamba_labrie_measures.csv') labrie_table = gb.statistics.descriptive_table(measures_table) display(labrie_table) # Nice! Looks like the original! Next up is the Spearman's R coefficient matrix, which tells us how the measures relate to one-another. Run the next cell; spearman_coefficient_table = gb.statistics.spearmans_r(measures_table) display(spearman_coefficient_table) # Nice x2! Now that the first two tables from the paper have been reproduced, the measures need splitting into the top 5% and remaining 95% of players by their total amount wagered. The `split_labrie_measures` method from the `gamba.studies` module does this, returning the two splits as dataframes. labelled_measures = gb.labelling.top_split(measures_table, 'total_wagered', loud=True) # With the two cohorts seperated, the last part of the paper uses the same descriptive table to present their differences. To reproduce that using [*gamba*](https://github.com/gamba-dev/gamba), we simply call the same method as the first table on each of the cohorts; # plot descriptive table(s) on the 95-5 split labelled_groups = gb.labelling.get_labelled_groups(labelled_measures, 'top_total_wagered') top5_table = gb.statistics.descriptive_table(labelled_groups[1]) other95_table = gb.statistics.descriptive_table(labelled_groups[0]) display(top5_table, other95_table) # That's it! In around 10 lines of code the [*gamba*](https://github.com/gamba-dev/gamba) framework can fully replicate the findings of LaBrie et al's 2008 paper. The most interesting question now is how to expand this analysis to uncover more details from the data, or to calculate new behavioural measures and see if they are useful in any way.
32_labrie2008.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Parameter Management # # The ultimate goal of training deep networks is to find good parameter values for a given architecture. When everything is standard, the `nn.Sequential` class is a perfectly good tool for it. However, very few models are entirely standard and most scientists want to build things that are novel. This section shows how to manipulate parameters. In particular we will cover the following aspects: # # * Accessing parameters for debugging, diagnostics, to visualize them or to save them is the first step to understanding how to work with custom models. # * Secondly, we want to set them in specific ways, e.g. for initialization purposes. We discuss the structure of parameter initializers. # * Lastly, we show how this knowledge can be put to good use by building networks that share some parameters. # # As always, we start from our trusty Multilayer Perceptron with a hidden layer. This will serve as our choice for demonstrating the various features. # + attributes={"classes": [], "id": "", "n": "1"} from mxnet import init, np, npx from mxnet.gluon import nn npx.set_np() net = nn.Sequential() net.add(nn.Dense(256, activation='relu')) net.add(nn.Dense(10)) net.initialize() # Use the default initialization method x = np.random.uniform(size=(2, 20)) net(x) # Forward computation # - # ## Parameter Access # # In the case of a Sequential class we can access the parameters with ease, simply by indexing each of the layers in the network. The params variable then contains the required data. Let's try this out in practice by inspecting the parameters of the first layer. # + attributes={"classes": [], "id": "", "n": "2"} print(net[0].params) print(net[1].params) # - # The output tells us a number of things. Firstly, the layer consists of two sets of parameters: `dense0_weight` and `dense0_bias`, as we would expect. They are both single precision and they have the necessary shapes that we would expect from the first layer, given that the input dimension is 20 and the output dimension 256. In particular the names of the parameters are very useful since they allow us to identify parameters *uniquely* even in a network of hundreds of layers and with nontrivial structure. The second layer is structured accordingly. # # ### Targeted Parameters # # In order to do something useful with the parameters we need to access them, though. There are several ways to do this, ranging from simple to general. Let's look at some of them. # + attributes={"classes": [], "id": "", "n": "3"} print(net[1].bias) print(net[1].bias.data()) # - # The first returns the bias of the second layer. Since this is an object containing data, gradients, and additional information, we need to request the data explicitly. Note that the bias is all 0 since we initialized the bias to contain all zeros. Note that we can also access the parameters by name, such as `dense0_weight`. This is possible since each layer comes with its own parameter dictionary that can be accessed directly. Both methods are entirely equivalent but the first method leads to much more readable code. # + attributes={"classes": [], "id": "", "n": "4"} print(net[0].params['dense0_weight']) print(net[0].params['dense0_weight'].data()) # - # Note that the weights are nonzero. This is by design since they were randomly initialized when we constructed the network. `data` is not the only function that we can invoke. For instance, we can compute the gradient with respect to the parameters. It has the same shape as the weight. However, since we did not invoke backpropagation yet, the values are all 0. # + attributes={"classes": [], "id": "", "n": "5"} net[0].weight.grad() # - # ### All Parameters at Once # # Accessing parameters as described above can be a bit tedious, in particular if we have more complex blocks, or blocks of blocks (or even blocks of blocks of blocks), since we need to walk through the entire tree in reverse order to how the blocks were constructed. To avoid this, blocks come with a method `collect_params` which grabs all parameters of a network in one dictionary such that we can traverse it with ease. It does so by iterating over all constituents of a block and calls `collect_params` on subblocks as needed. To see the difference consider the following: # + attributes={"classes": [], "id": "", "n": "6"} # parameters only for the first layer print(net[0].collect_params()) # parameters of the entire network print(net.collect_params()) # - # This provides us with a third way of accessing the parameters of the network. If we wanted to get the value of the bias term of the second layer we could simply use this: # + attributes={"classes": [], "id": "", "n": "7"} net.collect_params()['dense1_bias'].data() # - # Throughout the book we'll see how various blocks name their subblocks (Sequential simply numbers them). This makes it very convenient to use regular expressions to filter out the required parameters. # + attributes={"classes": [], "id": "", "n": "8"} print(net.collect_params('.*weight')) print(net.collect_params('dense0.*')) # - # ### <NAME> strikes again # # Let's see how the parameter naming conventions work if we nest multiple blocks inside each other. For that we first define a function that produces blocks (a block factory, so to speak) and then we combine these inside yet larger blocks. # + attributes={"classes": [], "id": "", "n": "20"} def block1(): net = nn.Sequential() net.add(nn.Dense(32, activation='relu')) net.add(nn.Dense(16, activation='relu')) return net def block2(): net = nn.Sequential() for i in range(4): net.add(block1()) return net rgnet = nn.Sequential() rgnet.add(block2()) rgnet.add(nn.Dense(10)) rgnet.initialize() rgnet(x) # - # Now that we are done designing the network, let's see how it is organized. `collect_params` provides us with this information, both in terms of naming and in terms of logical structure. print(rgnet.collect_params) print(rgnet.collect_params()) # Since the layers are hierarchically generated, we can also access them accordingly. For instance, to access the first major block, within it the second subblock and then within it, in turn the bias of the first layer, we perform the following. rgnet[0][1][0].bias.data() # ## Parameter Initialization # # Now that we know how to access the parameters, let's look at how to initialize # them properly. We discussed the need for # initialization in :numref:`chapter_numerical_stability`. By default, MXNet initializes the weight matrices # uniformly by drawing from $U[-0.07, 0.07]$ and the bias parameters are all set # to $0$. However, we often need to use other methods to initialize the # weights. MXNet's `init` module provides a variety of preset initialization # methods, but if we want something out of the ordinary, we need a bit of extra # work. # # ### Built-in Initialization # # Let's begin with the built-in initializers. The code below initializes all parameters with Gaussian random variables. # + attributes={"classes": [], "id": "", "n": "9"} # force_reinit ensures that the variables are initialized again, regardless of # whether they were already initialized previously net.initialize(init=init.Normal(sigma=0.01), force_reinit=True) net[0].weight.data()[0] # - # If we wanted to initialize all parameters to 1, we could do this simply by changing the initializer to `Constant(1)`. # + attributes={"classes": [], "id": "", "n": "10"} net.initialize(init=init.Constant(1), force_reinit=True) net[0].weight.data()[0] # - # If we want to initialize only a specific parameter in a different manner, we can simply set the initializer only for the appropriate subblock (or parameter) for that matter. For instance, below we initialize the second layer to a constant value of 42 and we use the `Xavier` initializer for the weights of the first layer. # + attributes={"classes": [], "id": "", "n": "11"} net[1].initialize(init=init.Constant(42), force_reinit=True) net[0].weight.initialize(init=init.Xavier(), force_reinit=True) print(net[1].weight.data()[0,0]) print(net[0].weight.data()[0]) # - # ### Custom Initialization # # Sometimes, the initialization methods we need are not provided in the `init` module. At this point, we can implement a subclass of the `Initializer` class so that we can use it like any other initialization method. Usually, we only need to implement the `_init_weight` function and modify the incoming `ndarray` according to the initial result. In the example below, we pick a decidedly bizarre and nontrivial distribution, just to prove the point. We draw the coefficients from the following distribution: # # $$ # \begin{aligned} # w \sim \begin{cases} # U[5, 10] & \text{ with probability } \frac{1}{4} \\ # 0 & \text{ with probability } \frac{1}{2} \\ # U[-10, -5] & \text{ with probability } \frac{1}{4} # \end{cases} # \end{aligned} # $$ # + attributes={"classes": [], "id": "", "n": "12"} class MyInit(init.Initializer): def _init_weight(self, name, data): print('Init', name, data.shape) data[:] = np.random.uniform(-10, 10, data.shape) data *= np.abs(data) >= 5 net.initialize(MyInit(), force_reinit=True) net[0].weight.data()[0] # - # If even this functionality is insufficient, we can set parameters directly. Since `data()` returns an `ndarray` we can access it just like any other matrix. A note for advanced users - if you want to adjust parameters within an `autograd` scope you need to use `set_data` to avoid confusing the automatic differentiation mechanics. # + attributes={"classes": [], "id": "", "n": "13"} net[0].weight.data()[:] += 1 net[0].weight.data()[0,0] = 42 net[0].weight.data()[0] # - # ## Tied Parameters # # In some cases, we want to share model parameters across multiple layers. For instance when we want to find good word embeddings we may decide to use the same parameters both for encoding and decoding of words. We discussed one such case when we introduced :numref:`chapter_model_construction`. Let's see how to do this a bit more elegantly. In the following we allocate a dense layer and then use its parameters specifically to set those of another layer. # + attributes={"classes": [], "id": "", "n": "14"} net = nn.Sequential() # We need to give the shared layer a name such that we can reference its # parameters shared = nn.Dense(8, activation='relu') net.add(nn.Dense(8, activation='relu'), shared, nn.Dense(8, activation='relu', params=shared.params), nn.Dense(10)) net.initialize() x = np.random.uniform(size=(2, 20)) net(x) # Check whether the parameters are the same print(net[1].weight.data()[0] == net[2].weight.data()[0]) net[1].weight.data()[0,0] = 100 # Make sure that they're actually the same object rather than just having the # same value print(net[1].weight.data()[0] == net[2].weight.data()[0]) # - # The above example shows that the parameters of the second and third layer are tied. They are identical rather than just being equal. That is, by changing one of the parameters the other one changes, too. What happens to the gradients is quite ingenious. Since the model parameters contain gradients, the gradients of the second hidden layer and the third hidden layer are accumulated in the `shared.params.grad( )` during backpropagation. # # ## Summary # # * We have several ways to access, initialize, and tie model parameters. # * We can use custom initialization. # * Gluon has a sophisticated mechanism for accessing parameters in a unique and hierarchical manner. # # # ## Exercises # # 1. Use the FancyMLP defined in :numref:`chapter_model_construction` and access the parameters of the various layers. # 1. Look at the [MXNet documentation](http://beta.mxnet.io/api/gluon-related/mxnet.initializer.html) and explore different initializers. # 1. Try accessing the model parameters after `net.initialize()` and before `net(x)` to observe the shape of the model parameters. What changes? Why? # 1. Construct a multilayer perceptron containing a shared parameter layer and train it. During the training process, observe the model parameters and gradients of each layer. # 1. Why is sharing parameters a good idea? # # ## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/2326) # # ![](../img/qr_parameters.svg)
5 deep-learning-computation/parameters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # What is PyTorch? # ================ # # It’s a Python-based scientific computing package targeted at two sets of # audiences: # # - A replacement for NumPy to use the power of GPUs # - a deep learning research platform that provides maximum flexibility # and speed # # Getting Started # --------------- # # Tensors # ^^^^^^^ # # Tensors are similar to NumPy’s ndarrays, with the addition being that # Tensors can also be used on a GPU to accelerate computing. # # from __future__ import print_function import torch # Construct a 5x3 matrix, uninitialized: # # x = torch.empty(5, 3) print(x) # Construct a randomly initialized matrix: # # x = torch.rand(5, 3) print(x) # Construct a matrix filled zeros and of dtype long: # # x = torch.zeros(5, 3, dtype=torch.long) print(x) # Construct a tensor directly from data: # # x = torch.tensor([5.5, 3]) print(x) # or create a tensor based on an existing tensor. These methods # will reuse properties of the input tensor, e.g. dtype, unless # new values are provided by user # # # + x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes print(x) x = torch.randn_like(x, dtype=torch.float) # override dtype! print(x) # result has the same size # - # Get its size: # # print(x.size()) # <div class="alert alert-info"><h4>Note</h4><p>``torch.Size`` is in fact a tuple, so it supports all tuple operations.</p></div> # # Operations # ^^^^^^^^^^ # There are multiple syntaxes for operations. In the following # example, we will take a look at the addition operation. # # Addition: syntax 1 # # y = torch.rand(5, 3) print(x + y) # Addition: syntax 2 # # print(torch.add(x, y)) # Addition: providing an output tensor as argument # # result = torch.empty(5, 3) torch.add(x, y, out=result) print(result) # Addition: in-place # # # adds x to y y.add_(x) print(y) # <div class="alert alert-info"><h4>Note</h4><p>Any operation that mutates a tensor in-place is post-fixed with an ``_``. # For example: ``x.copy_(y)``, ``x.t_()``, will change ``x``.</p></div> # # You can use standard NumPy-like indexing with all bells and whistles! # # print(x[:, 1]) # Resizing: If you want to resize/reshape tensor, you can use ``torch.view``: # # x = torch.randn(4, 4) y = x.view(16) z = x.view(-1, 8) # the size -1 is inferred from other dimensions print(x.size(), y.size(), z.size()) # If you have a one element tensor, use ``.item()`` to get the value as a # Python number # # x = torch.randn(1) print(x) print(x.item()) # **Read later:** # # # 100+ Tensor operations, including transposing, indexing, slicing, # mathematical operations, linear algebra, random numbers, etc., # are described # `here <https://pytorch.org/docs/torch>`_. # # NumPy Bridge # ------------ # # Converting a Torch Tensor to a NumPy array and vice versa is a breeze. # # The Torch Tensor and NumPy array will share their underlying memory # locations, and changing one will change the other. # # Converting a Torch Tensor to a NumPy Array # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # a = torch.ones(5) print(a) b = a.numpy() print(b) # See how the numpy array changed in value. # # a.add_(1) print(a) print(b) # Converting NumPy Array to Torch Tensor # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # See how changing the np array changed the Torch Tensor automatically # # import numpy as np a = np.ones(5) b = torch.from_numpy(a) np.add(a, 1, out=a) print(a) print(b) # All the Tensors on the CPU except a CharTensor support converting to # NumPy and back. # # CUDA Tensors # ------------ # # Tensors can be moved onto any device using the ``.to`` method. # # # let us run this cell only if CUDA is available # We will use ``torch.device`` objects to move tensors in and out of GPU if torch.cuda.is_available(): device = torch.device("cuda") # a CUDA device object y = torch.ones_like(x, device=device) # directly create a tensor on GPU x = x.to(device) # or just use strings ``.to("cuda")`` z = x + y print(z) print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together!
Deep-learning-framework/pytorch/pytorch-handbook/chapter1/tensor_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # PyLadies and local Python User Groups # # _Last updated: August 4, 2015_ # # I am not a statistician by trade; far from it. I did take a few stats & econometrics courses in college, but I won't even consider myself an armchair statistician here. # # I am not making any suggestions about causation, just merely exploring what the [Meetup API][0] has to offer. # # This also isn't how I code in general; but I love ~~IPython~~ [Jupyter Notebooks][1], and I wanted an excuse to use it with Pandas (first time I'm using [Pandas][2] too!). # # --- # # This data was used in my EuroPython 2015 talk, [Diversity: We're not done yet][3]. ([Slides][4], video soon) # # [0]: http://www.meetup.com/meetup_api/ # [1]: https://jupyter.org/ # [2]: http://pandas.pydata.org/ # [3]: http://www.roguelynn.com/words/were-not-done-yet/ # [4]: https://speakerdeck.com/roguelynn/diversity-were-not-done-yet # + from __future__ import print_function from collections import defaultdict import json import os import time import requests # - # ### Part 1: Grabbing all Python-centric meetup groups # # #### NOTE # This repository includes all the data files that I used (latest update: Aug 4, 2015). You may skip this part if you don't want to call the Meetup API to get new/fresh data. # # --- # # #### TIP # Take a look at Meetup's [API Console][0]; I used it when forming API requests as well as getting an idea of pagination for some requests. # # --- # # #### What we're doing # We'll call a few different endpoints from the Meetup API and save the data locally in a `json` file for us to use later. # # To get your own Meetup API key, you'll need a regular Meetup user account. Once you're logged in, you can navigate to the [API Key][1] portion of the API docs to reveal your API key. # # API Endpoint docs: # # * [Groups][2] # # [0]: https://secure.meetup.com/meetup_api/console/ # [1]: https://secure.meetup.com/meetup_api/key/ # [2]: http://www.meetup.com/meetup_api/docs/2/groups/ def save_output(data, output_file): with open(output_file, "w") as f: json.dump(data, f) # Set some global variables MEETUP_API_KEY = "yeah right" MEETUP_GROUPS_URL = "https://api.meetup.com/2/groups" PARAMS = { "signed": True, "key": MEETUP_API_KEY, "topic": "python", "category_id": 34, # 34 = Tech, there are only ~35 categories "order": "members", "page": 200, # max allowed "omit": "group_photo" # no need for photos in response } TOTAL_PAGES = 6 # looked on the API console, 1117 meetup groups as of 7/17, 200 groups per page = 6 pages # The Meetup API [limits requests][0], however their documentation isn't exactly helpful. Using their headers, I saw that I was limited to 30 requests per 10 seconds. Therefore, I'll sleep 1 second in between each request to be safe. # # [0]: http://www.meetup.com/meetup_api/docs/#limits def get_meetup_groups(): meetup_groups = [] for i in xrange(TOTAL_PAGES): PARAMS["offset"] = i print("GROUPS: Getting page {0} of {1}".format(i+1, TOTAL_PAGES+1)) response = requests.get(MEETUP_GROUPS_URL, params=PARAMS) if response.ok: meetup_groups.extend(response.json().get("results")) time.sleep(1) # don't bombard the Meetup API print("GROUPS: Collected {0} Meetup groups".format(len(meetup_groups))) return meetup_groups meetup_groups = get_meetup_groups() # + # Create a directory to save everything data_dir = "meetup_data" if not os.path.exists(data_dir): os.makedirs(data_dir) # Save meetup groups data output = os.path.join(data_dir, "meetup_groups.json") save_output(meetup_groups, output) # - # inspect one for funsies meetup_groups[0] # ## Part 2: Narrow down & sort the meetup groups # # We got a lot returned from searching the `/groups` endpoint with just the "python" topic. So we should narrow it down a bit, as well as sort out PyLadies groups. # # My process is to just narrow down by actual name of the group (e.g. `python`, `py`, `django`, etc). # # Spot checking the results will definitely be needed, but will come a bit later. # + search = ["python", "pydata", "pyramid", "py", "django", "flask", "plone"] omit = ["happy"] # I realize that a group could be called "happy python user group" or something... def is_pug(group): """ Return `True` if in `search` key words and not in `omit` keywords. """ group_name = group.get("name").lower() for o in omit: if o in group_name: return False for s in search: if s in group_name: return True return False def sort_groups(groups): """ Sort groups by 'pyladies' and 'python user groups'. """ pyladies = [] user_groups = [] for g in groups: if "pyladies" in g.get("name").lower(): pyladies.append(g) else: if is_pug(g): user_groups.append(g) return user_groups, pyladies # - user_groups, pyladies = sort_groups(meetup_groups) # Let's spot check the UGs to see if what we're left with makes sense # Note: I took a peek at a few (not shown here) and for the most part, # all seems okay for g in user_groups: print(g.get("name")) # ## Part 3: Find all Python meetup groups with a PyLadies within 50 miles # # I've adapted this from a [Java implementation][0] to find if a point is within a radius of another point. Geo-math is hard. # # [0]: http://stackoverflow.com/questions/120283/how-can-i-measure-distance-and-create-a-bounding-box-based-on-two-latitudelongi/123305#123305 from math import sin, cos, asin, degrees, radians, atan2, sqrt RADIUS = 3958.75 # Earth's radius in miles def is_within_50_miles(pyladies_coords, python_coords): pyladies_lat, pyladies_lon = pyladies_coords[0], pyladies_coords[1] python_lat, python_lon = python_coords[0], python_coords[1] d_lat = radians(pyladies_lat - python_lat) d_lon = radians(pyladies_lon - python_lon) sin_d_lat = sin(d_lat / 2) sin_d_lon = sin(d_lon / 2) a = (sin_d_lat ** 2 + sin_d_lon ** 2 ) * cos(radians(pyladies_lat)) * cos(radians(python_lat)) c = 2 * atan2(sqrt(a), sqrt(1-a)) dist = RADIUS * c return dist <= 50 # + def get_coords(group): return group.get("lat"), group.get("lon") def get_nearby_python_groups(pyl, collect): pyl_coords = get_coords(pyl) nearby = [] for group in user_groups: pyt_coords = get_coords(group) if is_within_50_miles(pyl_coords, pyt_coords): nearby.append(group) collect[pyl.get("name")] = nearby return collect # - collect = {} for pylady in pyladies: collect = get_nearby_python_groups(pylady, collect) for item in collect.items(): print(item[0], len(item[1])) # + # Save data into pyladies-specific directories def pylady_dir(pyl): _dir = pyl.split() _dir = "".join(_dir) outdir = os.path.join(data_dir, _dir) if not os.path.exists(outdir): os.makedirs(outdir) return _dir def save_pyladies(): for pylady in pyladies: name = pylady.get("name") subdir = pylady_dir(name) outputdir = os.path.join(data_dir, subdir) output = os.path.join(outputdir, subdir + ".json") save_output(pylady, output) groups = collect.get(name) for g in groups: group_link = g.get("link") group_name = group_link.split(".com/")[1][:-1] group_name = "".join(group_name) outfile = group_name + ".json" ug_output = os.path.join(outputdir, outfile) save_output(g, ug_output) # - save_pyladies() # Sanity check (I have a `tree` command installed via `brew install tree`): # !tree # ## Part 4: Membership join history # # #### Note # # If getting members from an endpoint returns 0, despite the member count in the group data being a positive number, then the group is set to private & accessible only to members (you can join that group to be able to have access that data, but I did not; I already have too much email). # # #### Note # # There's a "pseudo" race condition where the group data member # may be one number, but you actually receive a different number (+/- ~3), it's (probably) due to people leaving or joining the group between the group API call and the members API call. # # API endpoint docs: # # * [Members][0] # # [0]: http://www.meetup.com/meetup_api/docs/2/members/ MEETUP_MEMBER_URL = "https://api.meetup.com/2/members" PARAMS = { "signed": True, "key": MEETUP_API_KEY, } def get_members(group): PARAMS["group_id"] = group.get("id") members_count = group.get("members") print(u"MEMBERS: Getting {0} members for group {1}".format(members_count, group.get("name"))) pages = members_count / 200 remainder = members_count % 200 if remainder > 0: pages += 1 members = [] for i in xrange(pages): print("MEMBERS: Iteration {0} out of {1}".format(i+1, pages+1)) PARAMS["offset"] = i resp = requests.get(MEETUP_MEMBER_URL, PARAMS) if resp.ok: results = resp.json().get("results") members.extend(results) time.sleep(1) print("MEMBERS: Got {0} members".format(len(members))) return members def get_members_collection(pylady, groups): pylady_members = get_members(pylady) pug_members = defaultdict(list) for g in groups: pg_mbrs = get_members(g) pug_members[g.get("name")].append(pg_mbrs) return pylady_members, pug_members # NOTE: this takes *FOREVER*. start = time.time() for i, item in enumerate(collect.items()): print("COLLECTING: {0} out of {1}".format(i+1, len(collect)+1)) pylady = [p for p in pyladies if p.get("name") == item[0]][0] pylady_members, pug_members = get_members_collection(pylady, item[1]) print("COLLECTING: Saving all the data!") pylady_name = pylady.get("name") outdir = pylady_dir(pylady_name) outdir = os.path.join(data_dir, outdir) outfile = os.path.join(outdir, "pyladies_members.json") save_output(pylady_members, outfile) outfile = os.path.join(outdir, "pug_members.json") save_output(pug_members, outfile) end = time.time() delta_s = end - start delta_m = delta_s / 60 print("**DONE**") print("Completed in {:.0f} minutes".format(delta_m)) # ## Part 5: Graphing # # Take a look at `Creating Graphs with Pandas and matplotlib.ipynb` for how to visualize this data with Pandas (not sure why I broke it up into two notebooks).
Meetup Stats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + from astropy.table import Table import numpy as np import sys sys.path.insert(1, '/Users/kellydouglass/Documents/Research/Rotation_curves/RotationCurves/spirals/') from dark_matter_mass_v1 import rot_fit_BB import matplotlib.pyplot as plt # %matplotlib notebook # - # # Data # + data_filename = '../spirals/DRP-master_file_vflag_BB_smooth1p85_mapFit_N2O2_HIdr2_morph_v6.txt' data = Table.read(data_filename, format='ascii.commented_header') # + ################################################################################ # Calculate the velocity at R90, Rmax #------------------------------------------------------------------------------- # Convert r from arcsec to kpc #------------------------------------------------------------------------------- H_0 = 100 # Hubble's Constant in units of h km/s/Mpc c = 299792.458 # Speed of light in units of km/s dist_to_galaxy_Mpc = c*data['NSA_redshift']/H_0 dist_to_galaxy_kpc = dist_to_galaxy_Mpc*1000 data['R90_kpc'] = dist_to_galaxy_kpc*np.tan(data['NSA_elpetro_th90']*(1./60)*(1./60)*(np.pi/180)) #------------------------------------------------------------------------------- data['V90_kms'] = rot_fit_BB(data['R90_kpc'], [data['Vmax_map'], data['Rturn_map'], data['alpha_map']]) ''' data['VRmax_kms'] = rot_fit_BB(data['Rmax_map'], [data['Vmax_map'], data['Rturn_map'], data['alpha_map']]) '''; ################################################################################ bad_boolean = np.logical_or.reduce([np.isnan(data['M90_map']), np.isnan(data['M90_disk_map']), data['alpha_map'] > 99, data['ba_map'] > 0.998, data['V90_kms']/data['Vmax_map'] < 0.9, (data['Tidal'] & (data['DL_merge'] > 0.97)), data['map_frac_unmasked'] < 0.05, #(data['map_frac_unmasked'] > 0.13) & (data['DRP_map_smoothness'] > 1.96), #(data['map_frac_unmasked'] > 0.07) & (data['DRP_map_smoothness'] > 2.9), #(data['map_frac_unmasked'] > -0.0638*data['DRP_map_smoothness'] + 0.255) & (data['DRP_map_smoothness'] > 1.96) ]) good_galaxies = data[~bad_boolean] len(good_galaxies) # - # ## Calculate mass ratios # + good_galaxies['M90_Mdisk_ratio'] = 10**(good_galaxies['M90_map'] - good_galaxies['M90_disk_map']) #goodHI_galaxies['M90_Mdisk_ratio'] = 10**(goodHI_galaxies['M90_map'] - goodHI_galaxies['M90_disk_map']) #goodHI_galaxies['M90_MdiskHI_ratio'] = 10**goodHI_galaxies['M90_map']/(10**goodHI_galaxies['M90_disk_map'] + 10**goodHI_galaxies['logHI']) #goodHI_galaxies['M90_vis'] = np.log10(10**goodHI_galaxies['M90_disk_map'] + 10**goodHI_galaxies['logHI']) # - # # Distribution in $M_{90}/M_{90,disk}$ # + plt.figure() plt.hist(good_galaxies['M90_Mdisk_ratio'], bins=np.arange(0,7000,50)) plt.yscale('log') plt.xlabel('$M_{90}/M_{90,disk}$') plt.tight_layout(); # - good_galaxies[(good_galaxies['M90_Mdisk_ratio'] > 1000) & (good_galaxies['M90_Mdisk_ratio'] < 7000)]
notebooks/High_Mratios_map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import pandas as pd import numpy as np pd.__version__ names= pd.Series(["Pratik","Sneha","Vaishnavi","Bhumika"]) pointers= pd.Series([9.9,10,7.5,8.8]) dataframe = pd.DataFrame({"Name":names, "Pointer":pointers}) dataframe dataframe.describe() sample_csv=pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv", sep=",") sample_csv.head() sample_csv.describe() sample= sample_csv.describe() sample["total_rooms"] # sample_csv.hist("total_rooms") # %matplotlib inline import matplotlib.pyplot as plt pm= sample_csv.hist("median_house_value") # + some= sample_csv["median_house_value"] # - some.head() skks=some/1000 skks.head() import numpy as np np.log(skks).head() skks.apply(lambda val: val>70).head() dataframe["Name"].append(pd.Series(['Mahajan']), ignore_index=True) dataframe['Surname']=pd.Series(['Mahajan','Mahajan','Hire','Ekbote']) dataframe dataframe['is Surname Mahajan']= dataframe['Surname'].apply(lambda val : True if val=="Mahajan" else False) dataframe dataframe.index dataframe dataframe.reindex([3,1,2,0]) dataframe.reindex(np.random.permutation(dataframe.index)) np.random.permutation(3) dataframe.reindex(np.random.permutation(5))
A01. Pandas/Learning_Pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # #Print Formatting # In this lecture we will briefly cover the various ways to format your print statements. As you code more and more, you will probably want to have print statements that can take in a variable into a printed string statement. # # The most basic example of a print statement is: print 'This is a string' # ##Strings # You can use the %s to format strings into your print statements. s = 'STRING' print 'Place another string with a mod and s: %s' %(s) # ##Floating Point Numbers # Floating point numbers use the format %n1.n2f where the n1 is the total minimum number of digits the string should contain (these may be filled with whitespace if the entire number does not have this many digits. The n2 placeholder stands for how many numbers to show past the decimal point. Lets see some examples: print 'Floating point numbers: %1.2f' %(13.144) print 'Floating point numbers: %1.0f' %(13.144) print 'Floating point numbers: %1.5f' %(13.144) print 'Floating point numbers: %10.2f' %(13.144) print 'Floating point numbers: %25.2f' %(13.144) # ##Conversion Format methods. # It should be noted that two methods %s and %r actually convert any python object to a string using two seperate methods: str() and repr(). We will learn more about these functions later on in the course, but you should note you can actually pass almost any Python object with these two methods and it will work: # print 'Here is a number: %s. Here is a string: %s' %(123.1,'hi') print 'Here is a number: %r. Here is a string: %r' %(123.1,'hi') # ##Multiple Formatting # Pass a tuple to the modulo symbol to place multiple formats in your print statements: print 'First: %s, Second: %1.2f, Third: %r' %('hi!',3.14,22) # #Using the string .format() method # The best way to format objects into your strings for print statements is using the format method. The syntax is: # # 'String here {var1} then also {var2}'.format(var1='something1',var2='something2') # # Lets see some examples: print 'This is a string with an {p}'.format(p='insert') # Multiple times: print 'One: {p}, Two: {p}, Three: {p}'.format(p='Hi!') # Several Objects: print 'Object 1: {a}, Object 2: {b}, Object 3: {c}'.format(a=1,b='two',c=12.3) # That is the basics of string formatting! Remember that Python 3 uses a print() function, not the print statement!
notebooks/Complete-Python-Bootcamp-master/Print Formatting.ipynb
/ --- / jupyter: / jupytext: / text_representation: / extension: .q / format_name: light / format_version: '1.5' / jupytext_version: 1.14.4 / --- / + cell_id="f146784c-0402-4ef4-b2fd-40bfe27860ec" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=931 execution_start=1644175533571 source_hash="47b5d1f8" tags=[] import wandb / + cell_id="ffb56fb6-b3af-4d79-b966-e34e6153de5f" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1292 execution_start=1644175534510 source_hash="aa697592" tags=[] import argparse, os, logging, random, time import numpy as np import math import time import scipy.sparse import lightgbm as lgb import data_helpers as dh / + cell_id="00001-73c9b372-c326-45df-9519-85efa0f62ec7" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=206 execution_start=1644177622516 source_hash="e9764fd6" tags=[] import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms from sklearn.utils.extmath import softmax from torch.autograd import Variable from torch.nn.parameter import Parameter from torch.optim import Optimizer, AdamW, SGD import gc / + cell_id="00002-6c28609d-59b7-434c-8362-465b50670832" deepnote_cell_type="code" deepnote_output_heights=[21] deepnote_to_be_reexecuted=false execution_millis=7 execution_start=1644175537417 source_hash="58de1d54" tags=[] torch.__version__ / + cell_id="00003-275def0f-1d1c-4e0b-9796-d8110e943ce5" deepnote_cell_type="code" deepnote_output_heights=[21] deepnote_to_be_reexecuted=false execution_millis=131013 execution_start=1644175537431 source_hash="576d90f9" tags=[] torchvision.__version__ / + cell_id="00004-e7c45317-1bb3-47fe-8084-894a6646ef34" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=131016 execution_start=1644175537443 source_hash="280b5616" tags=[] import pdb device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if torch.cuda.is_available(): torch.set_default_tensor_type(torch.cuda.FloatTensor) type_prefix = torch.cuda else: type_prefix = torch / + cell_id="00006-21e4b05e-e064-4754-b322-fac65f70403e" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=131029 execution_start=1644175537485 is_code_hidden=true source_hash="47cd33c8" tags=[] def one_hot(y, numslot, mask=None): y_tensor = y.type(type_prefix.LongTensor).reshape(-1, 1) y_one_hot = torch.zeros(y_tensor.size()[0], numslot, device=device, dtype=torch.float32, requires_grad=False).scatter_(1, y_tensor, 1) if mask is not None: y_one_hot = y_one_hot * mask y_one_hot = y_one_hot.reshape(y.shape[0], -1) return y_one_hot / + cell_id="00007-208efb08-a5c6-4d84-a616-a8a4d2572c2c" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=11 execution_start=1644177414926 is_code_hidden=false source_hash="91eaa51" tags=[] class BatchDense(nn.Module): def __init__(self, batch, in_features, out_features, bias_init=None): super(BatchDense, self).__init__() self.batch = batch self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(batch, in_features, out_features)) self.bias = Parameter(torch.Tensor(batch, 1, out_features)) self.reset_parameters(bias_init) def reset_parameters(self, bias_init=None): stdv = math.sqrt(6.0 /(self.in_features + self.out_features)) self.weight.data.uniform_(-stdv, stdv) if bias_init is not None: # pdb.set_trace() self.bias.data = torch.from_numpy(np.array(bias_init)) else: self.bias.data.fill_(0) def forward(self, x): size = x.size() # Todo: avoid the swap axis x = x.view(x.size(0), self.batch, -1) out = x.transpose(0, 1).contiguous() out = torch.baddbmm(self.bias, out, self.weight) out = out.transpose(0, 1).contiguous() out = out.view(x.size(0), -1) return out / + cell_id="00008-63f6b7e0-61f3-415d-8c15-ac6c542c0757" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1 execution_start=1644178739977 is_code_hidden=false source_hash="18e69f81" tags=[] class EmbeddingModel(nn.Module): def __init__(self, n_models, max_ntree_per_split, embsize, maxleaf, n_output, out_bias=None, task='regression'): super(EmbeddingModel, self).__init__() self.task = task self.n_models = n_models self.maxleaf = maxleaf self.fcs = nn.ModuleList() self.max_ntree_per_split = max_ntree_per_split self.embed_w = Parameter(torch.Tensor(n_models, max_ntree_per_split*maxleaf, embsize)) # torch.nn.init.xavier_normal(self.embed_w) stdv = math.sqrt(1.0 /(max_ntree_per_split)) self.embed_w.data.normal_(0,stdv) # .uniform_(-stdv, stdv) self.bout = BatchDense(n_models, embsize, 1, out_bias) self.bn = nn.BatchNorm1d(embsize * n_models) self.tanh = nn.Tanh() self.sigmoid = nn.Sigmoid() # self.output_fc = Dense(n_models * embsize, n_output) self.dropout = torch.nn.Dropout() if task == 'regression': self.criterion = nn.MSELoss() else: self.criterion = nn.BCELoss() def batchmul(self, x, models, embed_w, length): out = one_hot(x, length) out = out.view(x.size(0), models, -1) out = out.transpose(0, 1).contiguous() out = torch.bmm(out, embed_w) out = out.transpose(0, 1).contiguous() out = out.view(x.size(0), -1) return out def lastlayer(self, x): out = self.batchmul(x, self.n_models, self.embed_w, self.maxleaf) out = self.bn(out) # out = self.tanh(out) # out = out.view(x.size(0), self.n_models, -1) return out def forward(self, x): out = self.lastlayer(x) out = self.dropout(out) out = out.view(x.size(0), self.n_models, -1) out = self.bout(out) # out = self.output_fc(out) sum_out = torch.sum(out,-1,True) if self.task != 'regression': return self.sigmoid(sum_out), out return sum_out, out def joint_loss(self, out, target, out_inner, target_inner, *args): return nn.MSELoss()(out_inner, target_inner) def true_loss(self, out, target): return self.criterion(out, target) / + cell_id="00009-608161e1-eeed-46b5-b7e6-132fe8c9c716" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=44 execution_start=1644180106877 is_code_hidden=false source_hash="575bb924" tags=[] def eval_metrics(task, true, pred): if task == 'binary': logloss = sklearn.metrics.log_loss(true.astype(np.float64), pred.astype(np.float64)) auc = sklearn.metrics.roc_auc_score(true, pred) # error = 1-sklearn.metrics.accuracy_score(true,(pred+0.5).astype(np.int32)) return (logloss, auc)#, error) else: mseloss = sklearn.metrics.mean_squared_error(true, pred) return mseloss def EvalTestset(test_x, test_y, model, test_batch_size, test_x_opt=None): test_len = test_x.shape[0] test_num_batch = math.ceil(test_len / test_batch_size) sum_loss = 0.0 y_preds = [] model.eval() with torch.no_grad(): for jdx in range(test_num_batch): tst_st = jdx * test_batch_size tst_ed = min(test_len, tst_st + test_batch_size) inputs = torch.from_numpy(test_x[tst_st:tst_ed].astype(np.float32)).to(device) if test_x_opt is not None: inputs_opt = torch.from_numpy(test_x_opt[tst_st:tst_ed].astype(np.float32)).to(device) outputs = model(inputs, inputs_opt) else: outputs = model(inputs) targets = torch.from_numpy(test_y[tst_st:tst_ed]).to(device) if isinstance(outputs, tuple): outputs = outputs[0] y_preds.append(outputs) loss_tst = model.true_loss(outputs, targets).item() sum_loss += (tst_ed - tst_st) * loss_tst return sum_loss / test_len, np.concatenate(y_preds, 0) def TrainWithLog(loss_dr, loss_init, loss_de, log_freq, test_freq, task, test_batch_size, train_x, train_y, train_y_opt, test_x, test_y, model, opt, epoch, batch_size, n_output, key="", train_x_opt=None, test_x_opt=None): # trn_writer = tf.summary.FileWriter(summaryPath+plot_title+key+"_output/train") # tst_writer = tf.summary.FileWriter(summaryPath+plot_title+key+"_output/test") if isinstance(test_x, scipy.sparse.csr_matrix): test_x = test_x.todense() train_len = train_x.shape[0] global_iter = 0 trn_batch_size = batch_size train_num_batch = math.ceil(train_len / trn_batch_size) total_iterations = epoch * train_num_batch start_time = time.time() total_time = 0.0 min_loss = float("Inf") # min_error = float("Inf") max_auc = 0.0 for epoch in range(epoch): shuffled_indices = np.random.permutation(np.arange(train_x.shape[0])) Loss_trn_epoch = 0.0 Loss_trn_log = 0.0 log_st = 0 for local_iter in range(train_num_batch): trn_st = local_iter * trn_batch_size trn_ed = min(train_len, trn_st + trn_batch_size) batch_trn_x = train_x[shuffled_indices[trn_st:trn_ed]] if isinstance(batch_trn_x, scipy.sparse.csr_matrix): batch_trn_x = batch_trn_x.todense() inputs = torch.from_numpy(batch_trn_x.astype(np.float32)).to(device) targets = torch.from_numpy(train_y[shuffled_indices[trn_st:trn_ed],:]).to(device) model.train() if train_x_opt is not None: inputs_opt = torch.from_numpy(train_x_opt[shuffled_indices[trn_st:trn_ed]].astype(np.float32)).to(device) outputs = model(inputs, inputs_opt) else: outputs = model(inputs) opt.zero_grad() if isinstance(outputs, tuple) and train_y_opt is not None: # targets_inner = torch.from_numpy(s_train_y_opt[trn_st:trn_ed,:]).to(device) targets_inner = torch.from_numpy(train_y_opt[shuffled_indices[trn_st:trn_ed],:]).to(device) loss_ratio = loss_init * max(0.3,loss_dr ** (epoch // loss_de))#max(0.5, args.loss_dr ** (epoch // args.loss_de)) if len(outputs) == 3: loss_val = model.joint_loss(outputs[0], targets, outputs[1], targets_inner, loss_ratio, outputs[2]) else: loss_val = model.joint_loss(outputs[0], targets, outputs[1], targets_inner, loss_ratio) loss_val.backward() loss_val = model.true_loss(outputs[0], targets) elif isinstance(outputs, tuple): loss_val = model.true_loss(outputs[0], targets) loss_val.backward() else: loss_val = model.true_loss(outputs, targets) loss_val.backward() opt.step() loss_val = loss_val.item() wandb.log({"batch loss":loss_val}) global_iter += 1 Loss_trn_epoch += (trn_ed - trn_st) * loss_val Loss_trn_log += (trn_ed - trn_st) * loss_val if global_iter % log_freq == 0: print(key+"Epoch-{:0>3d} {:>5d} Batches, Step {:>6d}, Training Loss: {:>9.6f} (AllAvg {:>9.6f})" .format(epoch, local_iter + 1, global_iter, Loss_trn_log/(trn_ed-log_st), Loss_trn_epoch/trn_ed)) # trn_summ = tf.Summary() # trn_summ.value.add(tag=args.data+ "/Train/Loss", simple_value = Loss_trn_log/(trn_ed-log_st)) # trn_writer.add_summary(trn_summ, global_iter) log_st = trn_ed Loss_trn_log = 0.0 if global_iter % test_freq == 0 or local_iter == train_num_batch - 1: if model == 'deepgbm' or model == 'd1': try: print('Alpha: '+str(model.alpha)) print('Beta: '+str(model.beta)) except: pass # tst_summ = tf.Summary() torch.cuda.empty_cache() test_loss, pred_y = EvalTestset(test_x, test_y, model, test_batch_size, test_x_opt) wandb.log({"loss":test_loss}) current_used_time = time.time() - start_time start_time = time.time() wandb.log({"createdAt":start_time}) total_time += current_used_time remaining_time = (total_iterations - (global_iter) ) * (total_time / (global_iter)) if task == 'binary': metrics = eval_metrics(task, test_y, pred_y) _, test_auc = metrics wandb.log({"test batch auc":test_auc}) # min_error = min(min_error, test_error) max_auc = max(max_auc, test_auc) wandb.log({"test max auc":max_auc}) # tst_summ.value.add(tag=args.data+"/Test/Eval/Error", simple_value = test_error) # tst_summ.value.add(tag=args.data+"/Test/Eval/AUC", simple_value = test_auc) # tst_summ.value.add(tag=args.data+"/Test/Eval/Min_Error", simple_value = min_error) # tst_summ.value.add(tag=args.data+"/Test/Eval/Max_AUC", simple_value = max_auc) print(key+"Evaluate Result:\nEpoch-{:0>3d} {:>5d} Batches, Step {:>6d}, Testing Loss: {:>9.6f}, Testing AUC: {:8.6f}, Used Time: {:>5.1f}m, Remaining Time: {:5.1f}m" .format(epoch, local_iter + 1, global_iter, test_loss, test_auc, total_time/60.0, remaining_time/60.0)) else: print(key+"Evaluate Result:\nEpoch-{:0>3d} {:>5d} Batches, Step {:>6d}, Testing Loss: {:>9.6f}, Used Time: {:>5.1f}m, Remaining Time: {:5.1f}m" .format(epoch, local_iter + 1, global_iter, test_loss, total_time/60.0, remaining_time/60.0)) min_loss = min(min_loss, test_loss) wandb.log({"test min loss": min_loss}) # tst_summ.value.add(tag=args.data+"/Test/Loss", simple_value = test_loss) # tst_summ.value.add(tag=args.data+"/Test/Min_Loss", simple_value = min_loss) print("-------------------------------------------------------------------------------") # tst_writer.add_summary(tst_summ, global_iter) # tst_writer.flush() print("Best Metric: %s"%(str(max_auc) if task=='binary' else str(min_loss))) print("####################################################################################") print("Final Best Metric: %s"%(str(max_auc) if task=='binary' else str(min_loss))) return min_loss def GetEmbPred(model, fun, X, test_batch_size): model.eval() tst_len = X.shape[0] test_num_batch = math.ceil(tst_len / test_batch_size) y_preds = [] with torch.no_grad(): for jdx in range(test_num_batch): tst_st = jdx * test_batch_size tst_ed = min(tst_len, tst_st + test_batch_size) inputs = torch.from_numpy(X[tst_st:tst_ed]).to(device) t_preds = fun(inputs).data.cpu().numpy() y_preds.append(t_preds) y_preds = np.concatenate(y_preds, 0) return y_preds / + cell_id="00010-4b9b9799-83d7-4c12-808a-5d7c37215426" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=0 execution_start=1644175537583 source_hash="7ddb1838" tags=[] HOME_DIR = os.getcwd() DATA_DIR = os.path.join(HOME_DIR, 'data') / + cell_id="00011-446232b1-47c8-40ef-9fd4-7e7e7beb29f8" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=3 execution_start=1644175537584 source_hash="15d6e06e" tags=[] num_data = dh.load_data('/work/neurotrees/articles code reproduction/DeepGBM/data/data_offline_num') / + cell_id="00012-6bd23b4b-2133-428c-8e1a-684711af74e6" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=0 execution_start=1644175537585 source_hash="14a37530" tags=[] train_x, train_y, test_x, test_y = num_data / + cell_id="8e97f491-c718-44a5-9593-4e9d8fa32752" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=130998 execution_start=1644175537586 source_hash="22f94691" tags=[] PATH_TO_PICKLE = '/work/neurotrees/experiments/DeepGBM-decomposition/wine-dataset' / + cell_id="00013-1ce90f72-5996-4962-8a80-febf2c3684cb" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=11 execution_start=1644175537592 source_hash="d83cfbaa" tags=[] import pickle / + cell_id="4420d812-8e12-409e-8e8c-a1b5b0325cab" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=0 execution_start=1644176933763 source_hash="f2cbfc7d" tags=[] sweep_config = { 'method': 'random', #grid, random 'metric': { 'name': 'loss', 'goal': 'minimize' }, 'parameters': { 'emb_epoch': { 'values': [2, 5, 10] }, 'batch_size': { 'values': [256, 128, 64, 32] }, 'emb_lr': { 'values': [1e-2, 1e-3, 1e-4, 3e-4, 3e-5, 1e-5] }, 'optimizer': { 'values': ['adamW', 'sgd'] }, } } / + cell_id="ac38de1f-eb3f-4a99-950f-038a131376c9" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=631 execution_start=1644175964489 source_hash="ec1dc211" tags=[] sweep_id = wandb.sweep(sweep_config, project="deepgbm-wandb") / + cell_id="00014-985ed709-dc65-4fd1-8b97-eb8436f2ed84" deepnote_cell_type="code" deepnote_output_heights=[611] deepnote_to_be_reexecuted=false execution_millis=20 execution_start=1644178900761 source_hash="efeec928" tags=[] # embsize = 20 # maxleaf = 64 # task = "regression" # l2_reg = 1e-6 # emb_lr = 1e-3 # emb_epoch = 2 # batch_size = 512 # test_batch_size = 100 # loss_init = 1.0 # loss_dr = 0.7 # loss_de = 2 # log_freq = 500 # test_freq = 300 # key = "" # n_output = train_y.shape[1] def train(): # Default values for hyper-parameters we're going to sweep over with open(os.path.join(PATH_TO_PICKLE,'n_models_wine_100.pickle'), 'rb') as f: # Pickle using the highest protocol available. n_models = pickle.load(f) with open(os.path.join(PATH_TO_PICKLE,'max_ntree_per_split_wine_100.pickle'), 'rb') as f: # Pickle using the highest protocol available. max_ntree_per_split = pickle.load(f) with open(os.path.join(PATH_TO_PICKLE,'group_average_wine_100.pickle'), 'rb') as f: # Pickle using the highest protocol available. group_average = pickle.load(f) with open(os.path.join(PATH_TO_PICKLE,'leaf_preds_wine_100.pickle'), 'rb') as f: # Pickle using the highest protocol available. leaf_preds = pickle.load(f) with open(os.path.join(PATH_TO_PICKLE,'test_leaf_preds_wine_100.pickle'), 'rb') as f: # Pickle using the highest protocol available. test_leaf_preds = pickle.load(f) with open(os.path.join(PATH_TO_PICKLE,'tree_outputs_wine_100.pickle'), 'rb') as f: # Pickle using the highest protocol available. tree_outputs = pickle.load(f) config_defaults = dict( n_models = n_models, max_ntree_per_split = max_ntree_per_split, group_average = group_average, embsize = 20, maxleaf = 64, task = "regression", l2_reg = 1e-6, emb_lr = 1e-3, emb_epoch = 2, batch_size = 512, test_batch_size = 100, loss_init = 1.0, loss_dr = 0.7, loss_de = 2, log_freq = 500, test_freq = 300, key = "", n_output = train_y.shape[1] ) # Initialize a new wandb run wandb.init(config=config_defaults) # Config is a variable that holds and saves hyperparameters and inputs config = wandb.config # wandb.log({"batch loss":loss.item()}) # wandb.log({"loss":closs/config.batch_size}) emb_model = EmbeddingModel(config.n_models, config.max_ntree_per_split, config.embsize, config.maxleaf+1, config.n_output, config.group_average, task=config.task).float().to(device) if config.optimizer=='sgd': opt = SGD(emb_model.parameters(),lr=config.emb_lr, momentum=0.9) elif config.optimizer=='adamW': opt = AdamW(emb_model.parameters(),lr=config.emb_lr, weight_decay=config.l2_reg) tree_outputs = np.asarray(tree_outputs).reshape((config.n_models, leaf_preds.shape[0])).transpose((1,0)) TrainWithLog(config.loss_dr, config.loss_init, config.loss_de, config.log_freq, config.test_freq, config.task, config.test_batch_size, leaf_preds, train_y, tree_outputs, test_leaf_preds, test_y, emb_model, opt, config.emb_epoch, config.batch_size, config.n_output, config.key+"emb-") output_w = emb_model.bout.weight.data.cpu().numpy().reshape(config.n_models*config.embsize, config.n_output) output_b = np.array(emb_model.bout.bias.data.cpu().numpy().sum()) train_embs = GetEmbPred(emb_model, emb_model.lastlayer, leaf_preds, config.test_batch_size) del tree_outputs, leaf_preds, test_leaf_preds gc.collect(); / + cell_id="2ae137ea-535a-4b16-8963-74cd9afbc98b" deepnote_cell_type="code" deepnote_output_heights=[21, 40, 81, 21, 40] deepnote_to_be_reexecuted=false execution_millis=10710 execution_start=1644178906727 source_hash="661bb006" tags=[] train() / + cell_id="00015-c5092149-13bf-496d-bd78-235ee088dbc5" deepnote_cell_type="code" deepnote_output_heights=[null, 40, null, 42, 350, null, 40, null, 40, 350, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 40, 350, null, 40, null, 40, 350, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351, null, 42, null, 42, 351] deepnote_to_be_reexecuted=false execution_millis=294712 execution_start=1644179102262 source_hash="a7c22e3f" tags=[] wandb.agent(sweep_id, train) / + [markdown] created_in_deepnote_cell=true deepnote_cell_type="markdown" tags=[] / <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=de072003-a9db-4342-8067-19a4b45feff1' target="_blank"> / <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,<KEY> > </img> / Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
experiments/DeepGBM-wandb/embedding-wandb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="tT6lcgsxsYzT" # # Autoencoder RecSys Models on ML-1m # + [markdown] id="MugCaZzrsrM9" # ## Setup # + id="FwYoeMVyJvzL" import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os,sys,inspect import gc from tqdm.notebook import tqdm import random import heapq from sklearn.preprocessing import LabelEncoder from scipy.sparse import csr_matrix from tensorflow import keras import tensorflow as tf from tensorflow.keras import optimizers, callbacks, layers, losses from tensorflow.keras.layers import Dense, Concatenate, Activation, Add, BatchNormalization, Dropout, Input, Embedding, Flatten, Multiply from tensorflow.keras.models import Model, Sequential, load_model # + id="sB_OJdPrYZSK" SEED = 42 np.random.seed(SEED) tf.random.set_seed(SEED) os.environ['PYTHONHASHSEED']=str(SEED) random.seed(SEED) gpus = tf.config.experimental.list_physical_devices('GPU') # + id="0fxfIpXLTe-B" if gpus: try: tf.config.experimental.set_memory_growth(gpus[0], True) except RuntimeError as e: print(e) # + colab={"base_uri": "https://localhost:8080/"} id="EpIyG5zgtaai" executionInfo={"status": "ok", "timestamp": 1639716148938, "user_tz": -330, "elapsed": 28, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="9d5c6806-8dde-45d4-a715-1639547315ab" print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU'))) # + colab={"base_uri": "https://localhost:8080/"} id="UlyLzNq1sooD" executionInfo={"status": "ok", "timestamp": 1639716176022, "user_tz": -330, "elapsed": 1132, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="bdabe7d6-81af-4772-c63e-7b466287e619" # !wget -q --show-progress https://files.grouplens.org/datasets/movielens/ml-1m.zip # !unzip ml-1m.zip # + id="4DbVJCtKPuBF" def mish(x): return x*tf.math.tanh(tf.math.softplus(x)) def leakyrelu(x, factor=0.2): return tf.maximum(x, factor*x) # + id="XOfeaOEgLsGG" def load_data(filepath, threshold=0): df = pd.read_csv(filepath, sep="::", header=None, engine='python', names=['userId', 'movieId', 'rating', 'time']) df = df.drop('time', axis=1) df['userId'] = df['userId'].astype(int) df['movieId'] = df['movieId'].astype(int) df['rating'] = df['rating'].astype(float) df = df[['userId', 'movieId', 'rating']] if threshold > 0: df['rating'] = np.where(df['rating']>threshold, 1, 0) else: df['rating'] = 1. m_codes = df['movieId'].astype('category').cat.codes u_codes = df['userId'].astype('category').cat.codes df['movieId'] = m_codes df['userId'] = u_codes return df def add_negative(df, uiid, times=4): df_ = df.copy() user_id = df_['userId'].unique() item_id = df_['movieId'].unique() for i in tqdm(user_id): cnt = 0 n = len(df_[df_['userId']==i]) n_negative = min(n*times, len(item_id)-n-1) available_negative = list(set(uiid) - set(df[df['userId']==i]['movieId'].values)) new = np.random.choice(available_negative, n_negative, replace=False) new = [[i, j, 0] for j in new] df_ = df_.append(pd.DataFrame(new, columns=df.columns), ignore_index=True) return df_ def extract_from_df(df, n_positive, n_negative): df_ = df.copy() rtd = [] user_id = df['userId'].unique() for i in tqdm(user_id): rtd += list(np.random.choice(df[df['userId']==i][df['rating']==1]['movieId'].index, n_positive, replace=False)) rtd += list(np.random.choice(df[df['userId']==i][df['rating']==0]['movieId'].index, n_negative, replace=False)) return rtd # + id="GFQjwqAPMH8C" def eval_NDCG(true, pred): top_k = pred for i, item in enumerate(top_k, 1): if item == true: return 1 / np.log2(i+1) return 0 # + [markdown] id="S78F5a7AYgQz" # ## CDAE # + [markdown] id="1sAQYk5eYv1j" # ### Load data # + colab={"base_uri": "https://localhost:8080/", "height": 215} id="rCg36MGFTue5" executionInfo={"status": "ok", "timestamp": 1639716202713, "user_tz": -330, "elapsed": 5930, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="d87757a9-77d6-4c1f-e057-5adab8f37e60" df = load_data('./ml-1m/ratings.dat', threshold=3) df.head() # + [markdown] id="uo-H-H5yZjSI" # ### Preprocessing # + id="4_0IEsjWM4nI" df = df[df['rating']==1].reset_index(drop=True) tdf = pd.pivot_table(df, index='userId', values='rating', columns='movieId').fillna(0) cnt = tdf.sum(1) df = df[df['userId'].isin(np.where(cnt >= 10)[0])].reset_index(drop=True) tdf = pd.pivot_table(df, index='userId', values='rating', columns='movieId').fillna(0) tdf.iloc[:,:] = 0 test_idx = [] for i in tdf.index: test_idx += list(np.random.choice(df[df['userId']==i].index, 1)) train = df.loc[list(set(df.index)-set(test_idx)),:] test = df.loc[test_idx, :] # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="QCEA3JWYNgHT" executionInfo={"status": "ok", "timestamp": 1639716243239, "user_tz": -330, "elapsed": 49, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="3077ce6b-b907-4e0d-b2b2-94c6e019dc42" df # + colab={"base_uri": "https://localhost:8080/"} id="Ov0ZmaZabiVe" executionInfo={"status": "ok", "timestamp": 1639716249445, "user_tz": -330, "elapsed": 425, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="f4c9d415-cd86-4274-8d0e-dca78697f41d" df.shape, train.shape, test.shape # + colab={"base_uri": "https://localhost:8080/", "height": 470} id="AyFVjZiUNtBV" executionInfo={"status": "ok", "timestamp": 1630835048249, "user_tz": -330, "elapsed": 51728, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e9cbcb22-0779-4089-a265-085b72948158" for uid, iid in zip(train['userId'].values, train['movieId'].values): tdf.loc[uid, iid] = 1 train = tdf.copy() train # + [markdown] id="_br-gyTNd40x" # ### Model architecture # + id="UK-_v59OYjHz" class CDAE(tf.keras.models.Model): def __init__(self, input_dim, latent_dim, n_user, lamda=1e-4): super().__init__() self.input_dim = input_dim self.latent_dim = latent_dim self.lamda = lamda self.n_user = n_user self.embedding = Embedding(n_user, latent_dim, ) self.model = self.build() def compile(self, optimizer, loss_fn=None): super().compile() self.optimizer = optimizer self.loss_fn = loss_fn def build(self): self.encoder = self.build_encoder() self.decoder = self.build_decoder() rating = Input(shape=(self.input_dim, ), name='rating_input') user_id = Input(shape=(1, ), name='user_input') emb = self.embedding(user_id) emb = tf.squeeze(emb, 1) enc = self.encoder(rating) + emb enc = tf.nn.tanh(enc) outputs = self.decoder(enc) return Model([rating, user_id], outputs) def build_encoder(self): inputs = Input(shape = (self.input_dim, )) encoder = Sequential() encoder.add(Dropout(0.2)) encoder.add(Dense(self.latent_dim, activation='tanh')) outputs = encoder(inputs) return Model(inputs, outputs) def build_decoder(self): inputs = Input(shape = (self.latent_dim, )) encoder = Sequential() encoder.add(Dense(self.input_dim, activation='sigmoid')) outputs = encoder(inputs) return Model(inputs, outputs) def train_step(self, data): x = data['rating'] user_ids = data['id'] with tf.GradientTape() as tape: pred = self.model([x, user_ids]) rec_loss = tf.losses.binary_crossentropy(x, pred) loss = rec_loss grads = tape.gradient(loss, self.model.trainable_weights) self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights)) return {'loss': loss} # + [markdown] id="-R-qDHX_dD3T" # ### Training # + id="Fjtaq3RkcIO6" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1630835242291, "user_tz": -330, "elapsed": 194051, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a69c188d-f725-4553-ee6d-d6df3484f19d" loader = tf.data.Dataset.from_tensor_slices({'rating': train.values, 'id': np.arange(len(train))}) loader = loader.batch(32, drop_remainder=True).shuffle(len(train)) model = CDAE(train.shape[1], 200, len(train)) model.compile(optimizer=tf.optimizers.Adam()) model.fit(loader, epochs=25) # + [markdown] id="NfDBp4XndBDM" # ### Evaluation # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["0bc4610585dc4b6e89d3bc4598293dc4", "0e1c65f3d0a04bbfa4d53d8287faf7b0", "707a58e866a9416eaf94f9aea50bb9f5", "<KEY>", "a89f0935d9c846ae92716177f9a3da31", "ae8bf7ee34674a0e9fe0b9aef3b8063d", "eea7588021c24de29cd5018918139907", "b997a6a3013f4f649bbba23c10d1bbe3", "c04be49d501e4f70a3ed182c6253a336", "ee609560d3104683b742c9ed66e782d7", "4e3a404483954fa3916ac4bbf4bd4929"]} id="4vR5WPgGOwbn" executionInfo={"status": "ok", "timestamp": 1630835457454, "user_tz": -330, "elapsed": 128721, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="6ad93a00-03d5-4158-bb59-0babb6fac1b5" top_k = 10 np.random.seed(42) scores = [] for idx, i in tqdm(enumerate(np.random.choice(train.index, 100))): item_to_pred = {item: pred for item, pred in zip(train.columns, model.model.predict([train.values, np.arange(len(train))])[idx])} test_ = test[(test['userId']==i) & (test['rating']==1)]['movieId'].values items = list(np.random.choice(list(filter(lambda x: x not in np.argwhere(train.values[idx]).flatten(), item_to_pred.keys())), 100)) + list(test_) top_k_items = heapq.nlargest(top_k, items, key=item_to_pred.get) score = eval_NDCG(test_, top_k_items) scores.append(score) np.mean(scores) # + [markdown] id="cqBW2VetZvne" # ## EASE # + [markdown] id="R-XpG8cMZvnf" # ### Load data # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="D_oXGFDhZvnf" executionInfo={"status": "ok", "timestamp": 1630834671821, "user_tz": -330, "elapsed": 5997, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="66c7d829-c8f3-4e7b-f03c-1c832040c3f5" df = load_data('./ml-1m/ratings.dat', threshold=3) df.head() # + [markdown] id="OfHD5JjfZvnh" # ### Preprocessing # + id="KRzrwrHpZvnh" test_idx = [] user_id = df for i in df['userId'].unique(): test_idx += list(np.random.choice(df[df['userId']==i].index, 1)) train = df.iloc[list(set(df.index)-set(test_idx)),:] test = df.iloc[test_idx, :] # + colab={"base_uri": "https://localhost:8080/"} id="5hbeGQeyZvni" executionInfo={"status": "ok", "timestamp": 1630834902542, "user_tz": -330, "elapsed": 16, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="19ad5720-8a92-47bc-b018-b360fe911342" df.shape, train.shape, test.shape # + [markdown] id="REsycBTbZvni" # ### Model architecture # + id="N8qLutMpZvnj" class EASE: def __init__(self): self.user_enc = LabelEncoder() self.item_enc = LabelEncoder() def _get_users_and_items(self, df): users = self.user_enc.fit_transform(df.loc[:, 'userId']) items = self.item_enc.fit_transform(df.loc[:, 'movieId']) return users, items def fit(self, df, lambda_: float = 0.5, implicit=True): """ df: pandas.DataFrame with columns user_id, item_id and (rating) lambda_: l2-regularization term implicit: if True, ratings are ignored and taken as 1, else normalized ratings are used """ users, items = self._get_users_and_items(df) values = np.ones(df.shape[0]) if implicit else df['rating'].to_numpy() / df['rating'].max() X = csr_matrix((values, (users, items))) self.X = X G = X.T.dot(X).toarray() diagIndices = np.diag_indices(G.shape[0]) G[diagIndices] += lambda_ P = np.linalg.inv(G) B = P / (-np.diag(P)) B[diagIndices] = 0 self.B = B self.pred = X.dot(B) def predict(self, train, users, items, k): df = pd.DataFrame() items = self.item_enc.transform(items) dd = train.loc[train['userId'].isin(users)] dd['ci'] = self.item_enc.transform(dd['movieId']) dd['cu'] = self.user_enc.transform(dd['userId']) g = dd.groupby('userId') for user, group in tqdm(g): watched = set(group['ci']) candidates = [item for item in items if item not in watched] u = group['cu'].iloc[0] pred = np.take(self.pred[u, :], candidates) res = np.argpartition(pred, -k)[-k:] r = pd.DataFrame({ "userId": [user] * len(res), "movieId": np.take(candidates, res), "score": np.take(pred, res) }).sort_values('score', ascending=False) df = df.append(r, ignore_index=True) df['movieId'] = self.item_enc.inverse_transform(df['movieId']) return df # + [markdown] id="KwfcFM2yZvnj" # ### Training # + id="zEzkqTbWVCzD" ease = EASE() ease.fit(train) # + colab={"base_uri": "https://localhost:8080/"} id="5I5877tBU7yB" executionInfo={"status": "ok", "timestamp": 1630834999318, "user_tz": -330, "elapsed": 644, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="dcb8117a-e70e-42b8-d713-4da7dd8cc4e4" uid = 0 ease.user_enc.inverse_transform([0])[0] # + colab={"base_uri": "https://localhost:8080/"} id="ZdpFQsXgV1Mp" executionInfo={"status": "ok", "timestamp": 1630835007002, "user_tz": -330, "elapsed": 551, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="80c3d15c-99a2-4603-b91f-f5c7bb2045f7" ease.item_enc.inverse_transform(np.argsort(ease.pred[0])) # + colab={"base_uri": "https://localhost:8080/"} id="d9fCVWvOV2sK" executionInfo={"status": "ok", "timestamp": 1630835012996, "user_tz": -330, "elapsed": 785, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="8b52e238-bc6f-47d4-ce62-2e5602f4c4f7" np.argsort(-ease.pred[0]) # + colab={"base_uri": "https://localhost:8080/"} id="j-arIMZHV4e5" executionInfo={"status": "ok", "timestamp": 1630835020095, "user_tz": -330, "elapsed": 508, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="257943c7-262d-4aee-99fa-a97e53ec23fe" ease.pred[0][np.argsort(-ease.pred[0])] # + colab={"base_uri": "https://localhost:8080/"} id="GTdoYucRU30u" executionInfo={"status": "ok", "timestamp": 1630835023395, "user_tz": -330, "elapsed": 738, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7a35e7f5-8f17-4200-b8bb-a18c5cf8b4d9" np.unique(train[train['userId']==0]['movieId']) # + [markdown] id="pV3bdkthZvnn" # ### Evaluation # + colab={"base_uri": "https://localhost:8080/", "height": 451, "referenced_widgets": ["2f741acf5a964291af3d1314627871e7", "508e94b448894be08342221ff5ca5515", "4de8e9ae4b8e4e4c954bec8acd09d992", "c5882e7ec03a4e978341e8406e7ec604", "23ef87acab4e46bf9eb6e11870fbb6fc", "ee3e841f043d404fa1a8a8409cc3f421", "<KEY>", "eacf302707954850878334ff6a5c1fae", "ad83565fcfd34dfc824ab4c16ad6cd7a", "f9ef7106318146e89ecbcde0ad71bcd7", "6c3ee40cc767411d98efd6d4af1ca2eb"]} id="01b3WkGrV9Nk" executionInfo={"status": "ok", "timestamp": 1630835080341, "user_tz": -330, "elapsed": 40605, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b6635be8-427b-4694-84bf-90d422c9d30e" pred = ease.predict(train, train['userId'].unique(), train['movieId'].unique(), 100) pred # + colab={"base_uri": "https://localhost:8080/", "height": 49} id="ggRtLpiNV_c2" executionInfo={"status": "ok", "timestamp": 1630835091546, "user_tz": -330, "elapsed": 588, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="744a2a4c-d13f-4211-eafa-1dea1e4671ee" uid = 1 df[(df['userId']==uid) & (df['movieId'].isin(pred[pred['userId']==uid]['movieId']))] # + colab={"base_uri": "https://localhost:8080/", "height": 49} id="QPkVz-gnWCQW" executionInfo={"status": "ok", "timestamp": 1630835091967, "user_tz": -330, "elapsed": 13, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="def7da36-b6ea-4131-a7ac-bbf81ae15a3a" train[(train['userId']==uid) & (train['movieId'].isin(pred[pred['userId']==uid]['movieId']))] # + id="UcilNIv2WD_n" for uid in range(942): pdf = df[(df['userId']==uid) & (df['movieId'].isin(pred[pred['userId']==uid]['movieId']))] # + colab={"base_uri": "https://localhost:8080/"} id="8WvaXEwZWFTu" executionInfo={"status": "ok", "timestamp": 1630835114633, "user_tz": -330, "elapsed": 726, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3eb88b98-6150-41d2-fad5-c97043016ef2" ease.pred.shape # + colab={"base_uri": "https://localhost:8080/"} id="WudrTlyWZvnq" executionInfo={"status": "ok", "timestamp": 1630835116746, "user_tz": -330, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="528edaa4-2162-45fe-e108-41d0c5de33bb" train['userId'].unique().shape, train['movieId'].unique().shape, # + [markdown] id="dQUI23C-WWWO" # ## MultiVAE # + [markdown] id="hA2o80xvWlah" # ### Load data # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="7NKMTvN_Wlai" executionInfo={"status": "ok", "timestamp": 1630835218743, "user_tz": -330, "elapsed": 6555, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a5b11636-ea6a-458e-e482-49b72a12d954" df = load_data('./ml-1m/ratings.dat', threshold=3) df.head() # + [markdown] id="vm3oUHOcWlaj" # ### Preprocessing # + id="nxvgVlESWlaj" df = df[df['rating']==1].reset_index(drop=True) tdf = pd.pivot_table(df, index='userId', values='rating', columns='movieId').fillna(0) cnt = tdf.sum(1) df = df[df['userId'].isin(np.where(cnt >= 10)[0])].reset_index(drop=True) tdf = pd.pivot_table(df, index='userId', values='rating', columns='movieId').fillna(0) tdf.iloc[:,:] = 0 test_idx = [] for i in tdf.index: test_idx += list(np.random.choice(df[df['userId']==i].index, 1)) train = df.iloc[list(set(df.index)-set(test_idx)),:] test = df.iloc[test_idx, :] for uid, iid in zip(train['userId'].values, train['movieId'].values): tdf.loc[uid, iid] = 1 train = tdf.copy() def sampling(args): z_mean, z_log_var = args batch = tf.shape(z_mean)[0] dim = tf.shape(z_mean)[1] epsilon = tf.random.normal(shape=(batch, dim), stddev=0.01) return z_mean + tf.exp(0.5 * z_log_var) * epsilon # + colab={"base_uri": "https://localhost:8080/"} id="Lr6hS321Wlak" executionInfo={"status": "ok", "timestamp": 1630835304957, "user_tz": -330, "elapsed": 20, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7e9aa5fa-d0ee-47c7-c909-daca50c8b412" df.shape, train.shape, test.shape # + [markdown] id="Q3XcxcpoWlak" # ### Model architecture # + id="25ZXRGkfXLjd" class MultVAE(tf.keras.models.Model): def __init__(self, input_dim, latent_dim, lamda=1e-4): super().__init__() self.input_dim = input_dim self.latent_dim = latent_dim self.anneal = 0. self.model = self.build() def compile(self, optimizer, loss_fn=None): super().compile() self.optimizer = optimizer self.loss_fn = loss_fn def build(self): self.encoder = self.build_encoder() self.decoder = self.build_decoder() inputs = self.encoder.input mu, log_var = self.encoder(inputs) h = sampling([mu, log_var]) outputs = self.decoder(h) return Model(inputs, outputs) def build_encoder(self): inputs = Input(shape = (self.input_dim, )) h = Dropout(0.2)(inputs) mu = Dense(self.latent_dim)(h) log_var = Dense(self.latent_dim)(h) return Model(inputs, [mu, log_var]) def build_decoder(self): inputs = Input(shape = (self.latent_dim, )) outputs = Dense(self.input_dim, activation='sigmoid')(inputs) return Model(inputs, outputs) def train_step(self, data): x = data with tf.GradientTape() as tape: mu, log_var = self.encoder(x) pred = self.model(x) kl_loss = tf.reduce_mean(tf.reduce_sum(0.5*(log_var + tf.exp(log_var) + tf.pow(mu, 2)-1), 1, keepdims=True)) ce_loss = -tf.reduce_mean(tf.reduce_sum(tf.nn.log_softmax(pred) * x, -1)) loss = ce_loss + kl_loss*self.anneal grads = tape.gradient(loss, self.model.trainable_weights) self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights)) return {'loss': loss} def predict(self, data): mu, log_var = self.encoder(data) return self.decoder(mu) # + [markdown] id="zh7u-CfiXNwo" # ### Training # + id="OMgAqDDAXQ0s" loader = tf.data.Dataset.from_tensor_slices(train.values.astype(np.float32)) loader = loader.batch(8, drop_remainder=True).shuffle(len(train)) model = MultVAE(train.shape[1], 200) model.compile(optimizer=tf.optimizers.Adam()) # + id="rRtvceiYXXmX" class AnnealCallback(callbacks.Callback): def __init__(self): super().__init__() self.anneal_cap = 0.3 def on_train_batch_end(self, batch, logs=None): self.model.anneal = min(self.anneal_cap, self.model.anneal+1e-4) # + colab={"base_uri": "https://localhost:8080/"} id="vPj4AythXUo6" executionInfo={"status": "ok", "timestamp": 1630835847202, "user_tz": -330, "elapsed": 429687, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="23552135-8011-439d-a61e-22d007fb7f44" model.fit(loader, epochs=25, callbacks=[AnnealCallback()]) # + [markdown] id="OutR1NzaXZ4p" # ### Evaluation # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["356fbd553cd74283bd40f67e43b24e2f", "4e71c9f1b0a74ffbae2c04f977efd6a6", "2b4e62ee18cf43a59467818ebb76289e", "39515edfa1324e2e8b3ded22fd6f1384", "a7a16d4486fb4a9ab1ff40b1b61a41a4", "e2d3d44b598242ed84c780fab3cea768", "0f589d8eceff4716a2029420f1da243c", "c0dc1db826314529aadd1884683d4eda", "59269807a50b4aecac1472fdece6a0be", "ef2b338adc7140a8856263406b28c8d3", "388c8dca4a624437a32b04844a53e84d"]} id="EGuXlAcpXtCA" executionInfo={"status": "ok", "timestamp": 1630835990749, "user_tz": -330, "elapsed": 143578, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="9dc168b8-fc8d-46f8-ff4f-3aef4f32cc5b" top_k = 10 np.random.seed(42) scores = [] for idx, i in tqdm(enumerate(np.random.choice(train.index, 100))): item_to_pred = {item: pred for item, pred in zip(train.columns, model.model.predict(train.values)[idx])} test_ = test[(test['userId']==i) & (test['rating']==1)]['movieId'].values items = list(np.random.choice(list(filter(lambda x: x not in np.argwhere(train.values[idx]).flatten(), item_to_pred.keys())), 100)) + list(test_) top_k_items = heapq.nlargest(top_k, items, key=item_to_pred.get) score = eval_NDCG(test_, top_k_items) scores.append(score) np.mean(scores) # + [markdown] id="gyrB_wB1aXGf" # ## DAE # + [markdown] id="p27z1a9RaXGg" # ### Load data # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="FUCzohpraXGh" executionInfo={"status": "ok", "timestamp": 1630833675434, "user_tz": -330, "elapsed": 5144, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="28528ff8-0aec-41dd-8355-823359e5f830" df = load_data('./ml-1m/ratings.dat', threshold=3) df.head() # + [markdown] id="VYGBPKuKaXGj" # ### Preprocessing # + id="95emN_MmaXGk" df = df[df['rating']==1].reset_index(drop=True) tdf = pd.pivot_table(df, index='userId', values='rating', columns='movieId').fillna(0) cnt = tdf.sum(1) df = df[df['userId'].isin(np.where(cnt >= 10)[0])].reset_index(drop=True) tdf = pd.pivot_table(df, index='userId', values='rating', columns='movieId').fillna(0) tdf.iloc[:,:] = 0 test_idx = [] for i in tdf.index: test_idx += list(np.random.choice(df[df['userId']==i].index, 1)) train = df.loc[list(set(df.index)-set(test_idx)),:] test = df.loc[test_idx, :] # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="3Qjyrw5SaXGl" executionInfo={"status": "ok", "timestamp": 1630833683218, "user_tz": -330, "elapsed": 31, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="6286da55-c46b-42d7-de7e-bcfda08b3227" df # + colab={"base_uri": "https://localhost:8080/"} id="tFJKSzzfaXGm" executionInfo={"status": "ok", "timestamp": 1630833683220, "user_tz": -330, "elapsed": 27, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="bc4ae887-d201-46be-be65-6fab4a39d805" df.shape, train.shape, test.shape # + colab={"base_uri": "https://localhost:8080/", "height": 470} id="24m_wWovaXGo" executionInfo={"status": "ok", "timestamp": 1630833732782, "user_tz": -330, "elapsed": 49579, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0bc3342b-926b-41be-c4f6-19764a138450" for uid, iid in zip(train['userId'].values, train['movieId'].values): tdf.loc[uid, iid] = 1 train = tdf.copy() train # + [markdown] id="jJQnYRyGaXGp" # ### Model architecture # + id="k0osEM5vaXGq" class DAE(tf.keras.models.Model): def __init__(self, input_dim, latent_dim, lamda=1e-4): super().__init__() self.input_dim = input_dim self.latent_dim = latent_dim self.lamda = lamda self.model = self.build() def compile(self, optimizer, loss_fn=None): super().compile() self.optimizer = optimizer self.loss_fn = loss_fn def build(self): self.encoder = self.build_encoder() self.decoder = self.build_decoder() inputs = self.encoder.input outputs = self.decoder(self.encoder(inputs)) return Model(inputs, outputs) def build_encoder(self): inputs = Input(shape = (self.input_dim, )) encoder = Sequential() encoder.add(Dropout(0.2)) encoder.add(Dense(self.latent_dim, activation='tanh')) outputs = encoder(inputs) return Model(inputs, outputs) def build_decoder(self): inputs = Input(shape = (self.latent_dim, )) encoder = Sequential() encoder.add(Dense(self.input_dim, activation='sigmoid')) outputs = encoder(inputs) return Model(inputs, outputs) def train_step(self, x): with tf.GradientTape() as tape: pred = self.model(x) rec_loss = tf.losses.binary_crossentropy(x, pred) loss = rec_loss grads = tape.gradient(loss, self.model.trainable_weights) self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights)) return {'loss': loss} # + [markdown] id="0Nx3cmNKaXGr" # ### Training # + colab={"base_uri": "https://localhost:8080/"} id="_O0JYFMeaXGr" executionInfo={"status": "ok", "timestamp": 1630833850254, "user_tz": -330, "elapsed": 113676, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="00b48b7c-859a-478d-f0a9-4349db9ca679" loader = tf.data.Dataset.from_tensor_slices(train.values) loader = loader.batch(32, drop_remainder=True).shuffle(len(df)) model = DAE(train.shape[1], 200) model.compile(optimizer=tf.optimizers.Adam()) model.fit(loader, epochs = 25) # + [markdown] id="UG7OWzoCaXGt" # ### Evaluation # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["54ec6362c43f4c9fb0da5fae155dc21e", "b5bae646ec9141fa858b0f24e55c3919", "26275847d59a42ef9a5199401a59e6d5", "f4e0639d4d194b15aeeb3e8fcf17b0ee", "<KEY>", "<KEY>", "4276f2e7a5aa4042ac0fea7a4433341a", "e0c831a414ab443ab1b2d5053d34ca43", "<KEY>", "bb840c41a45e44b78d9dffb837ef2872", "<KEY>"]} id="zaBPGF_paXGt" executionInfo={"status": "ok", "timestamp": 1630836102534, "user_tz": -330, "elapsed": 131749, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a05cf1da-f629-41b6-8a38-755acaf0ad39" top_k = 10 np.random.seed(42) scores = [] for idx, i in tqdm(enumerate(np.random.choice(train.index, 100))): item_to_pred = {item: pred for item, pred in zip(train.columns, model.model.predict(train.values)[idx])} test_ = test[(test['userId']==i) & (test['rating']==1)]['movieId'].values items = list(np.random.choice(list(filter(lambda x: x not in np.argwhere(train.values[idx]).flatten(), item_to_pred.keys())), 100)) + list(test_) top_k_items = heapq.nlargest(top_k, items, key=item_to_pred.get) score = eval_NDCG(test_, top_k_items) scores.append(score) np.mean(scores) # + [markdown] id="3WzMlytjXENu" # ## RecVAE # + [markdown] id="3JGgb4cpX-8S" # ### Load data # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="iJY1toVZX-8S" executionInfo={"status": "ok", "timestamp": 1630835582915, "user_tz": -330, "elapsed": 6008, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="5d82f1ef-db9e-4ce1-a201-ec5f3ed4b04e" df = load_data('./ml-1m/ratings.dat', threshold=3) df.head() # + [markdown] id="_er5KwLBX-8T" # ### Preprocessing # + id="STkKDPOiX-8U" df = df[df['rating']==1].reset_index(drop=True) tdf = pd.pivot_table(df, index='userId', values='rating', columns='movieId').fillna(0) cnt = tdf.sum(1) df = df[df['userId'].isin(np.where(cnt >= 10)[0])].reset_index(drop=True) tdf = pd.pivot_table(df, index='userId', values='rating', columns='movieId').fillna(0) tdf.iloc[:,:] = 0 test_idx = [] for i in tdf.index: test_idx += list(np.random.choice(df[df['userId']==i].index, 1)) train = df.iloc[list(set(df.index)-set(test_idx)),:] test = df.iloc[test_idx, :] for uid, iid in zip(train['userId'].values, train['movieId'].values): tdf.loc[uid, iid] = 1 train = tdf.copy().astype(np.float32) loader = tf.data.Dataset.from_tensor_slices(train.values.astype(np.float32)) loader = loader.batch(8, drop_remainder=True).shuffle(len(train)) # + [markdown] id="6ZDpx3i9X-8V" # ### Model architecture # + id="axGJNldkYLEe" def log_norm_pdf(x, mu, logvar): return -0.5*(logvar + tf.math.log(2 * np.pi) + tf.pow((x - mu), 2) / tf.exp(logvar)) def sampling(args): z_mean, z_log_var = args batch = tf.shape(z_mean)[0] dim = tf.shape(z_mean)[1] epsilon = tf.random.normal(shape=(batch, dim), stddev=0.01) return z_mean + tf.exp(0.5 * z_log_var) * epsilon # + id="48AlS5OtX-8W" class CompositePrior(tf.keras.models.Model): def __init__(self, x_dim, latent_dim, mixture_weights = [3/20, 15/20, 2/20]): super().__init__() self.encoder_old = Encoder(x_dim, latent_dim, dropout_rate=0) self.latent_dim = latent_dim self.mixture_weights = mixture_weights self.mu_prior = self.add_weight(shape=(self.latent_dim, ), initializer = tf.zeros_initializer(), trainable=False) self.logvar_prior = self.add_weight(shape=(self.latent_dim, ), initializer = tf.zeros_initializer(), trainable=False) self.logvar_unif_prior = self.add_weight(shape=(self.latent_dim, ), initializer = tf.constant_initializer(10), trainable=False) def call(self, x, z): post_mu, post_logvar = self.encoder_old(x) stnd_prior = log_norm_pdf(z, self.mu_prior, self.logvar_prior) post_prior = log_norm_pdf(z, post_mu, post_logvar) unif_prior = log_norm_pdf(z, self.mu_prior, self.logvar_unif_prior) gaussians = [stnd_prior, post_prior, unif_prior] gaussians = [g+tf.math.log(w) for g, w in zip(gaussians, self.mixture_weights)] density = tf.stack(gaussians, -1) return tf.math.log(tf.reduce_sum(tf.exp(density), -1)) # logsumexp # + id="CQG9eIBxYPWl" class Encoder(tf.keras.models.Model): def __init__(self, x_dim, latent_dim, dropout_rate = 0.1): super().__init__() self.latent_dim = latent_dim self.x_dim = x_dim self.dropout_rate = dropout_rate self.model = self.build_model() def build_model(self): # now just shallow net x_in = Input(shape=(self.x_dim, )) h = Dense(1024, activation='relu')(x_in) mu = Dense(self.latent_dim)(h) logvar = Dense(self.latent_dim)(h) return Model(x_in, [mu, logvar]) def call(self, x): norm = tf.sqrt(tf.reduce_sum(tf.pow(x, 2), -1, keepdims=True)) x = x/norm if self.dropout_rate>0: x = Dropout(self.dropout_rate)(x) return self.model(x) class RecVAE(tf.keras.models.Model): def __init__(self, x_dim, latent_dim): super().__init__() self.encoder = Encoder(x_dim, latent_dim) self.decoder = Dense(x_dim) self.prior = CompositePrior(x_dim, latent_dim) def call(self, data): mu, logvar = self.encoder(data) z = sampling([mu, logvar]) recon = self.decoder(z) return mu, logvar, z, recon def predict(self, data): mu, logvar = self.encoder(data) z = sampling([mu, logvar]) recon = self.decoder(z) return recon def update_prior(self): self.prior.encoder_old.set_weights(self.encoder.get_weights()) # + [markdown] id="HdkKDWyXX-8W" # ### Training # + id="peb46Hd9X-8W" def tf_train(model, loader, optimizer, target, gamma=1.): total_loss = 0. for x in loader: norm = tf.reduce_sum(x, -1, keepdims=True) kl_weight = gamma*norm with tf.GradientTape() as tape: mu, logvar, z, pred = model(x) # kl_loss = tf.reduce_mean(tf.reduce_sum(0.5*(logvar + tf.exp(logvar) + tf.pow(mu, 2)-1), 1, keepdims=True)) kl_loss = tf.reduce_mean(log_norm_pdf(z, mu, logvar) - tf.multiply(model.prior(x, z), kl_weight)) ce_loss = -tf.reduce_mean(tf.reduce_sum(tf.nn.log_softmax(pred) * x, -1)) loss = ce_loss + kl_loss*kl_weight if target == 'encoder': grads = tape.gradient(loss, model.encoder.trainable_weights) optimizer.apply_gradients(zip(grads, model.encoder.trainable_weights)) else: grads = tape.gradient(loss, model.decoder.trainable_weights) optimizer.apply_gradients(zip(grads, model.decoder.trainable_weights)) total_loss += tf.reduce_sum(loss) return total_loss # + id="FzTZPD9CYfpj" epochs = 25 model = RecVAE(train.shape[1], 200) enc_opt = optimizers.Adam() dec_opt = optimizers.Adam() for e in range(epochs): # alternating ## train step tf_train(model, loader, enc_opt, 'encoder') model.update_prior() tf_train(model, loader, dec_opt, 'decoder') ## eval step # + [markdown] id="bhZuJedDX-8X" # ### Evaluation # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["87c9bc69718d40c0acd55be8b3d028c3", "592a360bb8f74fd690223f2e4bb14f0e", "291c4790efeb43b68b78eaef6a99ced7", "8d0ed0b8a3e94733aad8d23ae0265d3c", "<KEY>", "32727136c19e46558e6016eea4fa6fec", "fa3e40aea0f14e4db3be250b51c8ede0", "7a486090a89343168b6c82943865733c", "57b0ddec4be6490b8da427622de4ebac", "2d1191aacad24eba93d354e26e6ee37b", "9940ceebfb384b6da8e6d5deffcce3a4"]} id="KgYQGSPBYUCb" executionInfo={"status": "ok", "timestamp": 1630838390198, "user_tz": -330, "elapsed": 184666, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b760dcd0-9831-49d3-c3ee-3ab95dc24eda" top_k = 10 np.random.seed(42) scores = [] for idx, i in tqdm(enumerate(np.random.choice(train.index, 100))): item_to_pred = {item: pred.numpy() for item, pred in zip(train.columns, model.predict(train.values)[idx])} test_ = test[(test['userId']==i) & (test['rating']==1)]['movieId'].values items = list(np.random.choice(list(filter(lambda x: x not in np.argwhere(train.values[idx]).flatten(), item_to_pred.keys())), 100)) + list(test_) top_k_items = heapq.nlargest(top_k, items, key=item_to_pred.get) score = eval_NDCG(test_, top_k_items) scores.append(score) # break np.mean(scores) # + [markdown] id="RAaqLy1UtcIC" # --- # + colab={"base_uri": "https://localhost:8080/"} id="Jit1oP3jtd7k" executionInfo={"status": "ok", "timestamp": 1639716362410, "user_tz": -330, "elapsed": 4112, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="8470ae76-a435-4a4f-f909-351ed5e37fed" # !pip install -q watermark # %reload_ext watermark # %watermark -a "Sparsh A." -m -iv -u -t -d # + [markdown] id="qrYL9Jx-tcIF" # --- # + [markdown] id="pZR6MBOZtcIG" # **END**
_notebooks/2022-01-13-ae-ml.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import am_sim as ams # + from utilities.analyze_inference import best_par_in_df # load inferred parameter set inference_path = Path('inference_results/t_final_search_history.csv') search_df = pd.read_csv(inference_path, index_col=0) par = best_par_in_df(search_df) # path to save results save_path = Path('figures/fig_2') # + from utilities.simulate_stoch_GC_evo import initialize_GC_with_stoch_pop, evolve_GC_till_extinction # set random seed for reproducibility np.random.seed(21) # high and low Ag dosages D_low = 1. D_high = 10. # extract initial population, the same for the two simulations init_st_pop = ams.stoch_pop(par) # Create two GCs with the same initial population GC_low = initialize_GC_with_stoch_pop(D_inj=D_low, par=par, st_pop=init_st_pop) GC_high = initialize_GC_with_stoch_pop(D_inj=D_high, par=par,st_pop=init_st_pop) # evolve the GCs, returns dictionary of results. # The dictiornary containts info on the final MC/PC population, # population evolution and Ag evolution. # See the function definition for more info on the form of the dicionary. res_low = evolve_GC_till_extinction(par=par, GC=GC_low) res_high = evolve_GC_till_extinction(par=par, GC=GC_high) # - # ### Panel B # + # define global parameters of the plots # weeks at which to plot the population distribution plot_weeks = [1,2,4,8,12] # plot x-limits, will be the same for panel B and D xlims = [-24,-10] # binning of the histograms, will be the same for panel B and D bins = np.linspace(xlims[0]-20, xlims[1]+20,150) # colors for the two distributions (low and high dosages respectively) colors = ['C0', 'C1'] # + # function to extract the population corresponding to a specific day from the dictionary of results def select_pop_from_day(res, day): pop_en = res['GC_pop_en'] time = res['GC_t'] time_idxs = np.argwhere(time == day).flatten() if time_idxs.size > 0: time_idx = time_idxs[0] return pop_en[time_idx] else: return None # initialze figure fig, ax = plt.subplots(len(plot_weeks),1, sharex=True, sharey=True, figsize=(5,6)) # for the high and low dosage cases for n_res, res in enumerate([res_low, res_high]): # and for every week for nw, w in enumerate(plot_weeks): # extract list of binding energy of the population at the specified time pop_en = select_pop_from_day(res, day=w*7.) if pop_en is not None: # if the population is alive at this time then plot the histogram ax[nw].hist(pop_en, bins=bins, color=colors[n_res], alpha=0.5) # for every plot subplot specify the time: for nw, w in enumerate(plot_weeks): ax[nw].text(0.95, 0.75,f't = {w} week' + 's'*(w>1), transform=ax[nw].transAxes, color='gray', horizontalalignment='right') # add injected dosages for the two cases ax[0].text(0.05, 0.75,f'Ag. injected D = {int(D_low)} ' + r'$\mu g$', transform=ax[0].transAxes, color=colors[0]) ax[0].text(0.05, 0.55,f'Ag. injected D = {int(D_high)} ' + r'$\mu g$', transform=ax[0].transAxes, color=colors[1]) # figure and axes adjustments ax[-1].set_xlabel(r'$\epsilon$') plt.xlim(xlims) plt.subplots_adjust(hspace=0) # save and show figure plt.savefig(save_path / 'pop_hist.pdf') plt.savefig(save_path / 'pop_hist.svg') plt.show() # - # ### Panel C # + # setup figure fig, ax = plt.subplots(3,1, sharex=True, figsize=(5,5)) # mark with vertical gray lines the time-points at which the histogram is plotted in panel B for nw, w in enumerate(plot_weeks): for a in ax: a.axvline(w, c='grey', ls='-', linewidth=5, alpha=0.2) D_list = [D_low, D_high] # for both the high and low dosage cases for nr, res in enumerate([res_low, res_high]): # extract quantitities to plot from the result dictionary t, N, avg_eps = [res[lab] for lab in ['GC_t', 'GC_N', 'GC_avg_eps']] ag_t, ag_C = [res[lab] for lab in ['ag_t', 'ag_C']] # plot Ag evolution ax[0].plot(ag_t / 7., ag_C, '.',label=f'D = {int(D_list[nr])} ' + r'$\mu g$', c=colors[nr]) # plot number of cells ax[1].plot(t / 7, N, '.', c=colors[nr]) # plot average energy ax[2].plot(t/7, avg_eps, '.', c=colors[nr]) # set labels, limits and scale axis 0 ax[0].set_ylabel('Ag. concentration') ax[0].set_yscale('log') ax[0].legend() ax[0].set_xlim(left=0.9) # set labels, limits and scale axis 1 ax[1].set_yscale('log') ax[1].set_ylabel('N. B-cells') ax[1].set_yticks([1,10,100,1000]) # set labels, limits and scale axis 2 ax[2].set_ylabel(r'avg. binding energy') ax[2].set_xlabel('t (weeks)') ax[2].set_yticks([-22, -20,-18,-16, -14]) ax[2].set_xticks(np.append([1], np.arange(2,18,2))) # save and show figure plt.savefig(save_path / 'pop_evo.pdf') plt.savefig(save_path / 'pop_evo.svg') plt.show() # - # ### Panel D # + # setup figure fig, ax = plt.subplots(2,1, sharex=True, sharey=False, figsize=(5,3)) # for the high and low dosage cases for n_res, res in enumerate([res_low, res_high]): # plot MC binding energy histogram MC_en = res['MC_en'] ax[0].hist(MC_en, color=colors[n_res], alpha=0.5, bins=bins) # plot PC binding energy histogram PC_en = res['PC_en'] ax[1].hist(PC_en, color=colors[n_res], alpha=0.5, bins=bins) # figure and axes adjustements ax[0].set_ylabel('final MC pop.') ax[1].set_ylabel('final PC pop.') ax[-1].set_xlabel(r'$\epsilon$') plt.xlim(xlims) plt.ylim(bottom=0) # save and show figure plt.tight_layout() plt.savefig(save_path / 'MC_PC_hist.pdf') plt.savefig(save_path / 'MC_PC_hist.svg') plt.show()
figure_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # -*- coding: utf-8 -*- from __future__ import unicode_literals # text in Western (Windows 1252) import pickle import numpy as np # import StringIO import math from keras import optimizers, metrics from keras.models import Model from keras.layers import Dense, Dropout, Input from keras.layers.merge import concatenate from keras import regularizers from keras.layers.convolutional import Conv1D from keras.layers.convolutional import MaxPooling1D from keras.constraints import maxnorm from keras.layers import Flatten from keras.optimizers import SGD from keras.models import load_model # from keras import backend as Input np.random.seed(7) # + # # %run ../../../prepare_data.py import sys sys.path.insert(0, '../../../') from prepare_data import * # + # # %run ../../../prepare_data.py # X_train, X_other_features_train, y_train, X_test, X_other_features_test, y_test, X_validate, X_other_features_validate, y_validate = generate_syllable_inputs('../../internal_representations/inputs/content_shuffle_vector.h5', '../../internal_representations/inputs/shuffle_vector') # save_inputs('../../internal_representations/inputs/shuffeled_syllable_train_inputs.h5', X_train, y_train, other_features = X_other_features_train) # save_inputs('../../internal_representations/inputs/shuffeled_syllable_test_inputs.h5', X_test, y_test, other_features = X_other_features_test) # save_inputs('../../internal_representations/inputs/shuffeled_syllable_validate_inputs.h5', X_validate, y_validate, other_features = X_other_features_validate) # X_train, X_other_features_train, y_train = load_inputs('../../internal_representations/inputs/shuffeled_syllable_train_inputs.h5', other_features=True) # X_test, X_other_features_test, y_test = load_inputs('../../internal_representations/inputs/shuffeled_syllable_test_inputs.h5', other_features=True) # X_validate, X_other_features_validate, y_validate = load_inputs('../../internal_representations/inputs/shuffeled_syllable_validate_inputs.h5', other_features=True) # syllable_letters_translator = create_syllable_letters_translator(max_syllable, syllable_dictionary, dictionary, vowels) data = Data('s', accent_classification=True) data.generate_data('syllabled_letters_accent_classification_train', 'syllabled_letters_accent_classification_test', 'syllabled_letters_accent_classification_validate', force_override=False) # - # concatenate test and train data data.x_train = np.concatenate((data.x_train, data.x_test), axis=0) data.x_other_features_train = np.concatenate((data.x_other_features_train, data.x_other_features_test), axis=0) data.y_train = np.concatenate((data.y_train, data.y_test), axis=0) # %run ../../../prepare_data.py gen = data.generator('train', 16) test = next(gen) # + # prints test word from generator # print(X_train[0]) # print(syllable_dictionary[test[0][0][0][0].tolist().index(1)]) print(np.array(test[0][0]).shape) for el in test[0][0][0]: syllable = '' for pos in range(0, len(el), 36): syllable += data.decode_x([el[pos:pos+36]], dictionary) print(syllable) # print(el) # print(syllable_dictionary[el.tolist().index(1)]) # + num_examples = len(data.x_train) nn_output_dim = 13 nn_hdim = 516 batch_size = 16 actual_epoch = 20 num_fake_epoch = 20 #conv_input_shape=(10, 252) conv_input_shape=(10, 5168) othr_input = (150, ) conv_input = Input(shape=conv_input_shape, name='conv_input') # x_conv = Conv1D(43, (3), padding='same', activation='relu')(conv_input) x_conv = Conv1D(200, (2), padding='same', activation='relu')(conv_input) x_conv = MaxPooling1D(pool_size=2)(x_conv) x_conv = Flatten()(x_conv) # x_conv = Dense(516, activation='relu', kernel_constraint=maxnorm(3))(x_conv) othr_input = Input(shape=othr_input, name='othr_input') # x_othr = Dense(256, input_dim=167, activation='relu')(othr_input) # x_othr = Dropout(0.3)(x_othr) # x_othr = Dense(512, activation='relu')(othr_input) # x_othr = Dropout(0.3)(x_othr) # x_othr = Dense(256, activation='relu')(othr_input) x = concatenate([x_conv, othr_input]) # x = Dense(1024, input_dim=(516 + 256), activation='relu')(x) x = Dense(256, activation='relu')(x) x = Dropout(0.3)(x) x = Dense(256, activation='relu')(x) x = Dropout(0.3)(x) x = Dense(256, activation='relu')(x) x = Dropout(0.3)(x) x = Dense(nn_output_dim, activation='sigmoid')(x) model = Model(inputs=[conv_input, othr_input], outputs=x) opt = optimizers.Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[actual_accuracy,]) # - y_array_train = np.asarray(data.y_train) accentuation_length_train = (y_array_train > 0).sum() y_array_test = np.asarray(data.y_test) accentuation_length_test = (y_array_test > 0).sum() history = model.fit_generator(data.generator('train', batch_size), accentuation_length_train/(batch_size * num_fake_epoch), epochs=actual_epoch*num_fake_epoch) name = '20_test_epoch' model.save(name + '.h5') output = open(name + '_history.pkl', 'wb') pickle.dump(history.history, output) output.close() # + # dictionary, max_word, max_num_vowels, content, vowels, accetuated_vowels = create_dict() # train_content, test_content, validate_content = split_content(content, 0.2, '../../internal_representations/inputs/content_shuffle_vector.h5') # feature_dictionary = create_feature_dictionary() # syllable_dictionary = create_syllables_dictionary(content, vowels) # max_syllable = get_max_syllable(syllable_dictionary) content = data._read_content('../../../data/SlovarIJS_BESEDE_utf8.lex') dictionary, max_word, max_num_vowels, vowels, accented_vowels = data._create_dict(content) feature_dictionary = data._create_feature_dictionary() syllable_dictionary = data._create_syllables_dictionary(content, vowels) # - # %run ../../../prepare_data.py # generate_X_and_y(dictionary, max_word, max_num_vowels, content, vowels, accetuated_vowels, feature_dictionary)
cnn/accent_classification/syllables/cnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Problem Statement - Food Nutrition # # Intro to Python # ## Data Dictionary # 1. NDB_No - Nutrition database number # 2. Shrt_Desc - Short description # 3. Water_(g) - water in grams per 100 grams # 4. Energ_Kcal - Energy in Kcal # 5. Protein_(g) - Protein # 6. LipidTot(g) - Total Lipid # 7. Ash_(g) - Ash # 8. Carbohydrt_(g) - Carbohydrate, by difference # 9. FiberTD(g) - Fiber, total dietary # 10. SugarTot(g) - Total Sugars # 11. Calcium_(mg) - Calcium # 12. Iron_(mg) - Iron # 13. Magnesium_(mg) - Magnesium # 14. Phosphorus_(mg) - Phosphorus # 15. Potassium_(mg) - Potassium # 16. Zinc_(mg) - Zinc # 17. Copper_(mg) - Copper # 18. Manganese_(mg) - Manganese # 19. Selenium_(æg) - Selenium # 20. Sodium_(mg) - Sodium #Import all the necessary modules #Import all the necessary modules import pandas as pd import numpy as np import os # # Set the working directory # + #os.chdir('C:\\Users\\jayveer\\Desktop\\DataCamp_exercises\\GL_Mentoring\\Python') # - # # Import Excel file # Load the Data file into Python DataFrame using pandas read_excel method # The csv that we are going to load contains the unicode format data. We will use an additional encoding parameter which will allow pandas to load the csv. data_df = pd.read_excel("Food Nutrition.xlsx") # # View Top 10 rows # Head function is used to view the top records. The number of records to be viewed, needs to be given in the parenthesis. data_df.head(10) # # View Last 20 records # Tail function is used to view the last records. The number of records to be viewed, needs to be given in the parenthesis. data_df.tail(20) # Displaying the shape of the Data Frame in which first value is giving number of Rows and second value is giving number of columns. data_df.shape print("The number of rows are ",data_df.shape[0],"\n","The number of columns are",data_df.shape[1]) # # Check for missing values data_df.isnull().sum() # There are two missing values in the data set one for each variable lipid_Tot_(g) and Ash_(g). We need to drop these missing values before proceeding. We will use the dropna method to drop the missing values. data_df=data_df.dropna() data_df.isnull().sum() # We can now see that there are no missing values in our data set. # # Summary of the data # # To see the summary of any dataframe we will use describe function. data_df.describe() # # Create a vector “test” using the top 10 values of variable Protein_(g) # # we will pass the variable name inside the square brackets of data_df and then use head function to retrieve the top 10 records for that variable. test=data_df['Protein_(g)'].head(10) test # # Select the top 5 rows of initial 5 variables in a matrix format # To fetch the top 5 rows and initial variables we will use iloc and will pass the index both for rows and columns. data_df.iloc[0:5, 0:5] # # What is the data_type of the Sodium_(mg) variable # To check the class of any variable we will use dtype property. data_df['Sodium_(mg)'].dtype # # Create a new variable “EPW” by dividing Energ_Kcal with the Water; what is the dimension of the new dataset? # data_df['EPW']=data_df['Energ_Kcal']/data_df['Water_(g)'] #dimension will be checked using the shape data_df.shape data_df.head(5) # # Create a subset of the dataset, where the Energ_Kcal is less than 500, what is the dimension of this new dataset? # # We will create a new data frame by using the subset conditions from the original data_df and will use shape property for checking the dimension. data_df_new=data_df[data_df['Energ_Kcal']<500] data_df_new.shape # Here what we got reduced rows as there were 36 rows where Energ_Kcal > 500. # (598-36=562) # # Find the top 10 products based on following # # 1. Higher the Energy_Kcal, higher the ranking # # Here we will use sorting to find out based on higher energy values.We will use sort_values method to order the data. Since we are saying higher trhe value the rank needs to be higher we will sort the data in the descending order so that the top most value gets the higher rank. data_df.sort_values(by='Energ_Kcal',ascending = False).head(10)# for descending order we chose ascending = False here # 2. Lower the water content, higher the ranking # # Since we are saying lower the value the rank needs to be higher,we will sort the data in the ascending order so that the lower most value gets the higher rank. data_df.sort_values(by='Water_(g)',ascending = True).head(10)# for ascending order we chose ascending = True here # # Create a subset of the data where product_desc contains “CHEESE” and list down the summary statistics of the subset # # To find a specific value we will use the contains method. We will pass the string in the paranthesis which needs to be located in the dataframe variable. data_df_cheese= data_df[data_df['Shrt_Desc'].str.contains("CHEESE")] data_df_cheese.shape data_df_cheese.describe() # # Using the cut function on water variable divide the whole data into 6 bins, list down the summary statistics of all the 6 bins. # data_bins=data_df data_bins['bins']=pd.cut(data_bins['Water_(g)'],6, labels =["A", "B", "C","E","F","G"]) data_bins.head(5) data_bins["bins"].value_counts() # we can either do this in one go, like shown below data_bins.groupby("bins").describe().T # or we can create subsets of the data on the bases of bins A=data_bins[data_bins["bins"]=="A"] B=data_bins[data_bins["bins"]=="B"] C=data_bins[data_bins["bins"]=="C"] E=data_bins[data_bins["bins"]=="E"] F=data_bins[data_bins["bins"]=="F"] G=data_bins[data_bins["bins"]=="G"] A.describe()# You can do for others # OR make loop for it. X=[A,B,C,E,F,G] for i in X: print(i.describe())
M1 Python For Data Science/Week_2_Python_For_Data_Science_NumPy_Pandas/Food_Nutrition_Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 4 # # Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment. # # This assignment requires that you to find **at least** two datasets on the web which are related, and that you visualize these datasets to answer a question with the broad topic of **religious events or traditions** (see below) for the region of **Ann Arbor, Michigan, United States**, or **United States** more broadly. # # You can merge these datasets with data from different regions if you like! For instance, you might want to compare **Ann Arbor, Michigan, United States** to Ann Arbor, USA. In that case at least one source file must be about **Ann Arbor, Michigan, United States**. # # You are welcome to choose datasets at your discretion, but keep in mind **they will be shared with your peers**, so choose appropriate datasets. Sensitive, confidential, illicit, and proprietary materials are not good choices for datasets for this assignment. You are welcome to upload datasets of your own as well, and link to them using a third party repository such as github, bitbucket, pastebin, etc. Please be aware of the Coursera terms of service with respect to intellectual property. # # Also, you are welcome to preserve data in its original language, but for the purposes of grading you should provide english translations. You are welcome to provide multiple visuals in different languages if you would like! # # As this assignment is for the whole course, you must incorporate principles discussed in the first week, such as having as high data-ink ratio (Tufte) and aligning with Cairo’s principles of truth, beauty, function, and insight. # # Here are the assignment instructions: # # * State the region and the domain category that your data sets are about (e.g., **Ann Arbor, Michigan, United States** and **religious events or traditions**). # * You must state a question about the domain category and region that you identified as being interesting. # * You must provide at least two links to available datasets. These could be links to files such as CSV or Excel files, or links to websites which might have data in tabular form, such as Wikipedia pages. # * You must upload an image which addresses the research question you stated. In addition to addressing the question, this visual should follow Cairo's principles of truthfulness, functionality, beauty, and insightfulness. # * You must contribute a short (1-2 paragraph) written justification of how your visualization addresses your stated research question. # # What do we mean by **religious events or traditions**? For this category you might consider calendar events, demographic data about religion in the region and neighboring regions, participation in religious events, or how religious events relate to political events, social movements, or historical events. # # ## Tips # * Wikipedia is an excellent source of data, and I strongly encourage you to explore it for new data sources. # * Many governments run open data initiatives at the city, region, and country levels, and these are wonderful resources for localized data sources. # * Several international agencies, such as the [United Nations](http://data.un.org/), the [World Bank](http://data.worldbank.org/), the [Global Open Data Index](http://index.okfn.org/place/) are other great places to look for data. # * This assignment requires you to convert and clean datafiles. Check out the discussion forums for tips on how to do this from various sources, and share your successes with your fellow students! # # ## Example # Looking for an example? Here's what our course assistant put together for the **Ann Arbor, MI, USA** area using **sports and athletics** as the topic. [Example Solution File](./readonly/Assignment4_example.pdf) # # National Park Visits in the American Southwest # **Data Source** # Tidy Tuesday - https://github.com/rfordatascience/tidytuesday/tree/master/data/2019/2019-09-17 # fivethirtyeight article - https://fivethirtyeight.com/features/the-national-parks-have-never-been-more-popular/ # national park data - https://data.world/inform8n/us-national-parks-visitation-1904-2016-with-boundaries # population data - https://en.wikipedia.org/wiki/List_of_U.S._states_and_territories_by_historical_population # gas price data - https://www.energy.gov/eere/vehicles/fact-915-march-7-2016-average-historical-annual-gasoline-pump-price-1929-2015 # **Question** # How do gas prices affect visitation per capita of continental US? # + # load libraries # %matplotlib notebook import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns # - #import data park_visits = pd.read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-09-17/national_parks.csv", dtype = {'year': 'str'}) state_pop = pd.read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-09-17/state_pop.csv", dtype = {'year': 'str'}) gas_price = pd.read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-09-17/gas_price.csv", dtype = {'year': 'str'}) park_visits.head() state_pop.head() gas_price.head() #merge state_pop and gas_price data state_gas = state_pop.merge(gas_price, how = 'inner', on = 'year') #1929 onwards sg = state_gas.set_index(['year', 'state']) sg.head() #tidy park_visits parks = park_visits.sort_values(by = ['year']) columns_to_keep = ['parkname', 'region', 'unit_type', 'visitors'] p1 = parks[(parks['state'] != 'HI') & (parks['state'] != 'AK')].set_index(['year', 'state']) p1 = p1[columns_to_keep] p1.head() # merge sg and p1 sg1 = sg.reset_index() df = p1.merge(sg1, how = 'inner', left_index = True, right_on = ['year', 'state']) cols = ['parkname', 'state', 'visitors', 'pop', 'gas_constant'] df = df.reset_index() df = df.set_index('year') df = df[df['unit_type'] == 'National Park'] df = df[cols] df['visits_per_capita'] = df['visitors'] / df['pop'] df.head() # + sns.set_style('white') # relationship between gas price and utah national park visits. not the right graph for the job. sns.jointplot(df['gas_constant'], df['visits_per_capita'], kind = 'kde', space = 0); # - cols1 = ['gas_constant', 'visits_per_capita'] df1 = df[cols1].rename(columns = {'gas_constant': 'Gas Price', 'visits_per_capita': 'Visits Per Capita'}) df2 = df1.reset_index().groupby('year').mean() ax = df2.plot(x = df2.index, y = 'Visits Per Capita', c = 'g') plt.ylabel('Visits Per Capita') df2.plot(x = df2.index, y = 'Gas Price', secondary_y = True, c = 'y', ax = ax, title = 'Continental US National Parks:\nGas price has a small effect on visitation per capita') plt.ylabel('Gas Price (Constant 2015 USD/Gallon)');
Assignment4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Beautiful Charts # **Inhalt:** Etwas Chart-Formatierung # # **Nötige Skills:** Erste Schritte mit Pandas # # **Lernziele:** # - Basic Parameter in der Plot-Funktion kennenlernen # - Charts formatieren mit weiteren Befehlen # - Intro für Ready-Made Styles und Custom Styles # - Charts exportieren # # # **Weitere Ressourcen:** # - Alle Ressourcen: siehe https://github.com/MAZ-CAS-DDJ/kurs_19_20/tree/master/08%20Pandas%20Teil%201/material # - Simons Cheat Sheet: https://github.com/MAZ-CAS-DDJ/kurs_19_20/blob/master/08%20Pandas%20Teil%201/material/plotting.md # ## Charts in Pandas # # Eine Reihe von Basic Chart-Funktionalitäten haben wir bereits kennengelernt: # - Line Plots # - Bar Charts # - Histogramme # - etc. # # Wenn wir darüber hinausgehen wollen, kann es sehr schnell kompliziert werden. Es gibt zig verschiedene Arten, wie man auf die Funktionen zugreifen kann und Charts formatieren kann. # - Die Funktion, die wir bereits kennen, heisst `plot()`. Wir können sie ausgehend von einem Dataframe oder einer Serie verwenden. Hier die offizielle Referenz dazu: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html # # - Im Hintergrund der `plot()`-Funktion steht die Matplotlib-Bibliothek: https://matplotlib.org/index.html. Bei manchen Formatierungs-Optionen müssen wir Befehle direkt von dort verwenden. # ## Setup # Wir importieren dieses Mal diverse Libraries: # # - Pandas import pandas as pd # - und Matplotlib, um auf einige Spezialfunktionen zugreifen zu können import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as mpatches import matplotlib.image as mpimg import matplotlib.ticker as ticker # Wie immer geben wir den Befehl, den Output der plot-Funktion direkt als Bild anzuzeigen # %matplotlib inline # ## das Beispiel # # Eine Liste von Ländern mit ihrer Grösse, BIP pro Kopf und Lebenserwartung path = "dataprojects/countries/countries.csv" df = pd.read_csv(path) df.head(3) # ## Elemente eines Charts # Ein Chart besteht aus überraschend vielen Elementen. # # Die meisten Programmiersprachen verwenden ähnliche Namen dafür. # # Hier die Bezeichnungen bei Pandas / Matplotlib: # # (Quelle: https://matplotlib.org/tutorials/introductory/usage.html#sphx-glr-tutorials-introductory-usage-py) from IPython.display import display, Image img = Image(filename='BeautifulCharts/anatomy.png') display(img) # ## Ein simpler Scatterplot # Das hier kennen wir bereits: df.plot(kind='scatter', x='gdp_per_capita', y='life_expectancy', figsize=(10,7)) # ## Den Chart verschönern # # (oder verschlimmern, je nach dem, wie man es nimmt...) # ### Variante 1: nur plot()-Parameter # In der Plot-Funktion selbst hat es bereits einige Parameter, mit denen wir etwas spielen können: df.plot(kind='scatter', x='gdp_per_capita', y='life_expectancy', alpha=0.5, #Transparenz der Füllfarbe s=40, #Grösse der PUnkte color='purple', #Farbe der Punkte linewidth=2, #Dicke der Rahmenlinie xlim=(-2000,52000), #Min und Max für die X-Achse ylim=(38, 82), #Min und Max für die Y-Achse xticks=[0,10000,20000,30000,40000,50000], #Die X-Ticks einzeln spezifizieren yticks=[0,40,50,60,70,80], #Die Y-Ticks einzeln spezifizieren figsize=(11,8), #Grösse der Abbildung grid=True, #Gitternetzlinien ja/nein fontsize=14, #Schriftrösse der Tick Labels title='Ab einem BIP pro Kopf von 20000 steigt die Lebenserwartug nicht mehr') # ### Variante 2: plot()-Parameter und matplotlib-Funktionen # Dazu gibt es noch zig weitere Einstellungen, die man im Nachhinein definieren oder verändern kann. # # Wir müssen dazu den Output der `plot()`-Funktion in einer eigenen Variable speichern. Typischerweise: `ax` # + #Was man mit der Pandas-Funktion alles machen kann ax = df.plot(kind='scatter', x='gdp_per_capita', y='life_expectancy', alpha=0.5, s=40, color='darkblue', linewidth=2, xlim=(-2000,62000), ylim=(38, 82), xticks=[0,20000,40000,60000], yticks=[40,60,80], figsize=(11,8), grid=True, fontsize=14) #Was man separat einstellen kann: # - Titel title_font = {'fontsize': 20, 'fontweight': 'bold', 'fontname': 'Comic Sans MS'} ax.set_title('Reiche Leute leben länger, aber nicht ewig', fontdict=title_font, loc='left') # - Achsenbeschriftungen label_font = {'fontsize': 14, 'fontweight': 'bold', 'fontname': 'Comic Sans MS'} ax.set_ylabel("Lebenserwartung", fontdict=label_font) ax.set_xlabel("BIP pro Kopf", fontdict=label_font) ax.yaxis.set_label_position('left') # - Ticks ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('${x:,}')) # Gitternetz formatieren ax.grid(which='major', linestyle='-.', linewidth='0.5', color='black', ) ax.minorticks_on() ax.grid(which='minor', linestyle='-.', linewidth='0.2', color='blue', ) # - Rahmenlinien ausschalten ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) # - Hintergrundfarbe ax.set_facecolor('#EEEEEE') # - # ### Variante 3: nur matplotlib-Funktion # Manchmal erstellen wir einen Plot gar nicht via plot(), sondern über matplotlib. ZB bei Small Multiples. # # In dem Fall müssen wir praktisch alle Parameter via Matplotlib setzen, leider. # + # Zuerst ein Figure- und ein Ax-Objekt erstellen fig, ax = plt.subplots() # Dann aus dem Ax-Objekt heraus einen Plot erstellen. # Die scatter()-Funktion ist ähnlich, aber nicht identisch wie plot(kind='scatter') ax.scatter(x=df['gdp_per_capita'], y=df['life_expectancy'], alpha=0.5, s=40, color='darkgreen', linewidth=2) # Eine ganze Reihe von Chart-Formatierungsparametern konnten wir hier nicht ansprechen # Wir müssen sie extra nochmals neu setzen # - Die Grösse der Abbildung fig.set_size_inches(11, 8) #NEU # - Den Titel ax.set_title('Reiche Leute leben länger, aber nicht ewig', fontsize=20, fontname='Impact', loc='left') # - Die Achsen ax.set_xlim([0, 50000]) #NEU ax.set_ylim([40, 80]) #NEU ax.set_ylabel("Lebenserwartung", fontsize=14, fontname='Impact', fontweight='bold') ax.set_xlabel("BIP pro Kopf", fontsize=14, fontname='Impact', fontweight='bold') # - Die Ticks ax.xaxis.set_ticks([0, 10000, 20000, 30000, 40000, 50000]) #NEU ax.yaxis.set_ticks([40, 50, 60, 70, 80]) #NEU ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('${x:,}')) # - Das Gitter ax.grid(which='major', linestyle='-.', linewidth='0.5', color='grey', ) # - Etc. etc: Alles, was wir in Variante 2 verwendet haben, können wir auch hier verwenden. # - # Klingt kompliziert...? **Ja, ist es!** Darum: Mit den Formatierungen am besten erst ganz am Schluss herumspielen, wenn es wirklich darum geht, einen Chart irgendwo zu präsentieren. Für schnelles Abchecken und Austesten von Charts lohnt sich das einfach nicht. # Eine andere Variante, wie wir etwas rascher mit Styles spielen können, finden wir noch weiter unten. # # Zuerst aber noch was anderes. # ## Legende und Farben # # Wie vorgehen, wenn wir die einzelnen Punkte entsprechend einer Kategorie einfärben wollen, zB nach dem Kontinent? Hier eine Lösung. # ### Für die Farben df['continent'].unique() colors = { 'Asia': 'green', 'Europe': 'blue', 'Africa': 'brown', 'N. America': 'yellow', 'S. America': 'red', 'Oceania': 'purple' } colorlist = df['continent'].apply(lambda continent: colors[continent]) # ### Für die Legende patches = [] for continent, color in colors.items(): this_patch = mpatches.Patch(color=color, label=continent, alpha=0.5) patches.append(this_patch) # ### Für die Punktegrösse area = df['population'] / 400000 # ### Plotten # + #Was man mit der Pandas-Funktion alles machen kann ax = df.plot(kind='scatter', x='gdp_per_capita', y='life_expectancy', alpha=0.5, s=area, color=colorlist, linewidth=2, xlim=(-2000,52000), ylim=(38, 82), xticks=[0,10000,20000,30000,40000,50000], yticks=[0,40,50,60,70,80], figsize=(11,8), grid=True, fontsize=14) #Was man separat einstellen kann: - Titel ax.set_title('Reiche Länder werden irgendwann nicht mehr älter', fontsize=16, fontweight='bold') # - Achsenbeschriftungen ax.set_ylabel("Lebenserwartung", fontsize=14, fontweight='bold') ax.set_xlabel("BIP pro Kopf", fontsize=14, fontweight='bold') # - Ticks ausschalten ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') # - Legende (this is really an ugly way to do this) ax.legend(handles=patches, frameon=False, fontsize=14) # - # <NAME> would be so proud!! https://www.ted.com/playlists/474/the_best_hans_rosling_talks_yo # ### Wichtig # # Nochmals: Eine genaue und vollständige Liste der Parameter zu kriegen, ist so gut wie unmöglich (tell me if you find one!). # # Daher, und nicht nur daher, lohnt es sich im allgemeinen nicht, allzu viel Zeit für die Formatierung von Charts aufzuwenden. Besser: Daten oder pdf evportieren und anderswo weiterbearbeiten. # # Eine andere Option ist, mit einem prädefinierten Stil zu arbeiten # ## Exportieren # Wir können einzelne Plots als Dateien exportieren. Dazu 1x diese Einstellung ausführen: matplotlib.rcParams['pdf.fonttype'] = 42 #important for the fonts # Und dann exportieren. # - als pdf df.plot(kind='scatter', x='gdp_per_capita', y='life_expectancy', title='Lebenserwartung und Wohlstand') plt.savefig("BeautifulCharts/Lebenserwartung-Wohlstand.pdf") # - als svg-Vektorgrafik df.plot(kind='scatter', x='gdp_per_capita', y='life_expectancy', title='Lebenserwartung und Wohlstand') plt.savefig("BeautifulCharts/Lebenserwartung-Wohlstand.svg") # ## Prädefinierte Stile # Diese Stile sind ziemlich praktisch. Man kann sich eine Liste davon anzeigen lassen: print(plt.style.available) # Um einen bestimmten Stil zu verwenden: plt.style.use('seaborn') # Umgesetzt sieht das dann so aus: df.plot(kind='scatter', x='gdp_per_capita', y='life_expectancy', title='Lebenserwartung und Wohlstand') # Der neue Style bleibt so lange gespeichert, bis wir ihn wieder zurücksetzen. plt.style.use('default') # ## Custom Style Sheets # Wer es mit den Matplotlib wirklich wissen will, kann sich auch sein eigenes Stylesheet erstellen. # Schritt1: Erstelle eine Datei mit diesem Namen (oder irgendeinem anderen Namen): # # `my_style.mplstyle` # # In die Datei, schreibe die eigenen Default-Werte für bestimmte Stil-Elemente rein: # # `axes.titlesize : 20 # axes.labelsize : 16 # lines.linewidth : 3 # lines.markersize : 10 # xtick.labelsize : 14 # ytick.labelsize : 14 # axes.grid : True # grid.color : red` # # etc. # # Die Dokumentation über alle möglichen Parameter gibt es hier: https://matplotlib.org/tutorials/introductory/customizing.html#sphx-glr-tutorials-introductory-customizing-py # Style laden: plt.style.use('BeautifulCharts/my_style.mplstyle') # Test: df.plot(kind='scatter', x='gdp_per_capita', y='life_expectancy', title='Lebenserwartung und Wohlstand')
12 Pandas Teil 4/Beautiful Charts L.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Explore Random Graphs Using NetworkX # In this example, we build a simple UI for exploring random graphs with [NetworkX](http://networkx.github.io/). from IPython.html.widgets import interact # %matplotlib inline import matplotlib.pyplot as plt import networkx as nx # + # wrap a few graph generation functions so they have the same signature def random_lobster(n, m, k, p): return nx.random_lobster(n, p, p / m) def powerlaw_cluster(n, m, k, p): return nx.powerlaw_cluster_graph(n, m, p) def erdos_renyi(n, m, k, p): return nx.erdos_renyi_graph(n, p) def newman_watts_strogatz(n, m, k, p): return nx.newman_watts_strogatz_graph(n, k, p) def plot_random_graph(n, m, k, p, generator): g = generator(n, m, k, p) nx.draw(g) plt.show() # - interact(plot_random_graph, n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001), generator={'lobster': random_lobster, 'power law': powerlaw_cluster, 'Newman-Watts-Strogatz': newman_watts_strogatz, u'Erdős-Rényi': erdos_renyi, });
notebooks/1 - IPython Notebook Examples/IPython Project Examples/Interactive Widgets/Exploring Graphs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # Creating Extensions Using numpy and scipy # ========================================= # **Author**: `<NAME> <https://github.com/apaszke>`_ # # **Updated by**: `<NAME> <https://github.com/adam-dziedzic>`_ # # In this tutorial, we shall go through two tasks: # # 1. Create a neural network layer with no parameters. # # - This calls into **numpy** as part of its implementation # # 2. Create a neural network layer that has learnable weights # # - This calls into **SciPy** as part of its implementation # # import torch from torch.autograd import Function # Parameter-less example # ---------------------- # # This layer doesn’t particularly do anything useful or mathematically # correct. # # It is aptly named BadFFTFunction # # **Layer Implementation** # # # + from numpy.fft import rfft2, irfft2 class BadFFTFunction(Function): @staticmethod def forward(ctx, input): numpy_input = input.detach().numpy() result = abs(rfft2(numpy_input)) return input.new(result) @staticmethod def backward(ctx, grad_output): numpy_go = grad_output.numpy() result = irfft2(numpy_go) return grad_output.new(result) # since this layer does not have any parameters, we can # simply declare this as a function, rather than as an nn.Module class def incorrect_fft(input): return BadFFTFunction.apply(input) # - # **Example usage of the created layer:** # # input = torch.randn(8, 8, requires_grad=True) result = incorrect_fft(input) print(result) result.backward(torch.randn(result.size())) print(input) # Parametrized example # -------------------- # # In deep learning literature, this layer is confusingly referred # to as convolution while the actual operation is cross-correlation # (the only difference is that filter is flipped for convolution, # which is not the case for cross-correlation). # # Implementation of a layer with learnable weights, where cross-correlation # has a filter (kernel) that represents weights. # # The backward pass computes the gradient wrt the input and the gradient wrt the filter. # # # + from numpy import flip import numpy as np from scipy.signal import convolve2d, correlate2d from torch.nn.modules.module import Module from torch.nn.parameter import Parameter class ScipyConv2dFunction(Function): @staticmethod def forward(ctx, input, filter, bias): # detach so we can cast to NumPy input, filter, bias = input.detach(), filter.detach(), bias.detach() result = correlate2d(input.numpy(), filter.numpy(), mode='valid') result += bias.numpy() ctx.save_for_backward(input, filter, bias) return torch.as_tensor(result, dtype=input.dtype) @staticmethod def backward(ctx, grad_output): grad_output = grad_output.detach() input, filter, bias = ctx.saved_tensors grad_output = grad_output.numpy() grad_bias = np.sum(grad_output, keepdims=True) grad_input = convolve2d(grad_output, filter.numpy(), mode='full') # the previous line can be expressed equivalently as: # grad_input = correlate2d(grad_output, flip(flip(filter.numpy(), axis=0), axis=1), mode='full') grad_filter = correlate2d(input.numpy(), grad_output, mode='valid') return torch.from_numpy(grad_input), torch.from_numpy(grad_filter).to(torch.float), torch.from_numpy(grad_bias).to(torch.float) class ScipyConv2d(Module): def __init__(self, filter_width, filter_height): super(ScipyConv2d, self).__init__() self.filter = Parameter(torch.randn(filter_width, filter_height)) self.bias = Parameter(torch.randn(1, 1)) def forward(self, input): return ScipyConv2dFunction.apply(input, self.filter, self.bias) # - # **Example usage:** # # module = ScipyConv2d(3, 3) print("Filter and bias: ", list(module.parameters())) input = torch.randn(10, 10, requires_grad=True) output = module(input) print("Output from the convolution: ", output) output.backward(torch.randn(8, 8)) print("Gradient for the input map: ", input.grad) # **Check the gradients:** # # # + from torch.autograd.gradcheck import gradcheck moduleConv = ScipyConv2d(3, 3) input = [torch.randn(20, 20, dtype=torch.double, requires_grad=True)] test = gradcheck(moduleConv, input, eps=1e-6, atol=1e-4) print("Are the gradients correct: ", test)
docs/_downloads/52d4aaa33601a2b3990ace6aa45546ce/numpy_extensions_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- import pandas as pd import os import numpy as np import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime # %matplotlib inline pd.set_option('display.max_columns', None) # %config IPCompleter.greedy=True raw_veh_stop_sept = pd.read_csv('trimet_congestion/init_veh_stoph 1-30SEP2017.csv') tripsh_sept = pd.read_csv('trimet_congestion/init_tripsh 1-30SEP2017.csv') tripsh_sept.info(verbose=True, null_counts=True) raw_veh_stop_sept.info(verbose=True, null_counts=True) stop_type = tripsh_sept.merge(raw_veh_stop_sept, left_on='EVENT_NO', right_on='EVENT_NO_TRIP', how='inner') stop_type.info(verbose=True, null_counts=True) stop_type['TIME_DIFF'] = stop_type['ACT_DEP_TIME_y'] - stop_type['ACT_ARR_TIME'] stop_type.head() stop_type = stop_type[stop_type.TIME_DIFF != 0] stop_type.info(verbose=True, null_counts=True) stop_type['TIME_DIFF_MIN'] = stop_type['TIME_DIFF'] / 60 stop_type.head() stop_type[(stop_type['LINE_ID'] == 72)].groupby(['STOP_TYPE'])['TIME_DIFF'].sum().plot(title = 'Line 72 Stop Type in Seconds', kind='bar', y= 'seconds') stop_type[(stop_type['LINE_ID'] == 33)].groupby(['STOP_TYPE'])['TIME_DIFF'].sum().plot(title = 'Line 33 Stop Type in Seconds', kind='bar', y= 'seconds') stop_type[(stop_type['LINE_ID'] == 4)].groupby(['STOP_TYPE'])['TIME_DIFF'].sum().plot(title = 'Line 4 Stop Type in Seconds', kind='bar', y= 'seconds') stop_type['LINE_ID'].value_counts() stop_type[(stop_type['LINE_ID'] == 75)].groupby(['STOP_TYPE'])['TIME_DIFF'].sum().plot(title = 'Line 75 Stop Type in Seconds', kind='bar', y= 'seconds') # + fig, axes = plt.subplots(nrows=2, ncols=2,figsize=(12,12)) #from matplotlib import rcParams #rcParams.update({'figure.autolayout': True}) stop_type[(stop_type['LINE_ID'] == 75)].groupby(['STOP_TYPE'])['TIME_DIFF_MIN'].sum().plot(ax=axes[0,0], kind='bar', y= 'seconds'); axes[0,0].set_title('Line 75 Stop Type in Seconds') stop_type[(stop_type['LINE_ID'] == 4)].groupby(['STOP_TYPE'])['TIME_DIFF_MIN'].sum().plot(ax=axes[0,1], kind='bar', y= 'seconds'); axes[0,1].set_title('Line 4 Stop Type in Seconds') stop_type[(stop_type['LINE_ID'] == 72)].groupby(['STOP_TYPE'])['TIME_DIFF_MIN'].sum().plot(ax=axes[1,0], kind='bar', y= 'seconds'); axes[1,0].set_title('Line 72 Stop Type in Seconds') stop_type[(stop_type['LINE_ID'] == 20)].groupby(['STOP_TYPE'])['TIME_DIFF_MIN'].sum().plot(ax=axes[1,1], kind='bar', y= 'seconds'); axes[1,1].set_title('Line 20 Stop Type in Seconds') #plt.tight_layout() # - line4_df = stop_type[(stop_type.LINE_ID == 4) & (stop_type.STOP_TYPE == 3)] line4_df.info(verbose=True, null_counts=True) line4_df.head(100) # From here there are a few options: # 1. Do this same process for October and November and then concatenate the 3 dataframes showing disturbance stops for a line for all three months # 2. Keep the months separate and concatanete the line numbers together by month # 3. Do option one for all three line numbers and then concatanate for having all months and all line numbers together in one dataframe # # Below is option #2 line14_df = stop_type[(stop_type.LINE_ID == 14) & (stop_type.STOP_TYPE == 3)] line14_df.info(verbose=True, null_counts=True) line73_df = stop_type[(stop_type.LINE_ID == 73) & (stop_type.STOP_TYPE == 3)] line73_df.info(verbose=True, null_counts=True) all_lines_disturbance_df = pd.concat([line4_df,line14_df,line73_df],ignore_index=True) all_lines_disturbance_df.info(verbose=True, null_counts=True) all_lines_disturbance_df.to_csv('Lines4_14_73_Disturbance_Stops.csv')
congestion_analysis/stop_type_sept.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + print("Hello and welcome to Awesome Jobs.") print('We will together find a better job for you.\n') name = input('Q1. Tell me, what is your full name?\n') city = input('Q2. Which city are you from?\n') gend = input("Q3. What's your gender?\n") age = input("Q4. What's your age?\n") print("\n") print("This is exciting.") print("I already have found more than 300 jobs that you might like.") job = input("Q5. Which kind of job do you like?\n") salary = input("Q6. What is your monthly salary expectations?\n") # - database = {1 : {'Job' : "Software Enginner", "Company" : "Google" , "Salary" : 18}, 2 : {'Job' : "Software Enginner", "Company" : "ETG" , "Salary" : 10}, 3 : {'Job' : "Software Enginner", "Company" : "Apple" , "Salary" : 15}, 4 : {'Job' : "Software Enginner", "Company" : "Microsoft", "Salary" : 7}}
Recruiting Chatbot - Overview/Recruiting Chatbot - Overview.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # ConvNet in Keras # #### for MNIST Digit Classification # #### based on https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py from __future__ import print_function import numpy as np from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.utils import np_utils from keras import backend as K # #### Configure Model batch_size = 128 # number of samples to include in each mini-batch nb_classes = 10 # there are ten digit classes in the MNIST data set nb_epoch = 10 # number of epochs to train for img_rows, img_cols = 28, 28 # input image dimensions nb_filters = 32 # number of convolutional filters to use pool_size = (2, 2) # size of pooling area for max pooling kernel_size = (3, 3) # convolution kernel size # #### Load data, shuffle it, and split between test and training sets (X_train, y_train), (X_test, y_test) = mnist.load_data() if K.image_dim_ordering() == 'th': X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols) X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 print('X_train shape:', X_train.shape) print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # #### Convert class vectors to binary class matrices Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) y_train Y_train # #### Build Model model = Sequential() model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid', input_shape=input_shape)) model.add(Activation('relu')) model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1])) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=pool_size)) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, Y_test)) score = model.evaluate(X_test, Y_test, verbose=0) print('Test score:', score[0]) print('Test accuracy:', score[1])
demos-for-talks/Keras_MNIST_ConvNet.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # + [markdown] tags=[] # # Process Mining Module - PDEng program Data Science # # This notebook is part of the Process Mining module of the JADS PDEng program on Data Science. It accompanies Lecture 1 on *Event Logs and Process Discovery*. # The collection of notebooks is a *living document* and subject to change. # # # Lecture 2 - 'Process Discovery with the Heuristics Miner' (R / bupaR) # # * **Responsible Lecturer**: Dr. <NAME>, [@fmannhardt](https://twitter.com/fmannhardt) # * **Last Update**: 28th April 2021 # - # ## Setup # # <img src="http://bupar.net/images/logo_text.PNG" alt="bupaR" style="width: 200px;"/> # # In this notebook, we are going to need the `tidyverse` and the `bupaR` packages. # + ## Perform the commented out commands below in a separate R session # install.packages("tidyverse") # install.packages("bupaR") # - # for larger and readable plots options(jupyter.plot_scale=1.25) # the initial execution of these may give you warnings that you can safely ignore library(tidyverse) library(bupaR) library(processanimateR) library(heuristicsmineR) # ## Process Discovery # ### Process Models 🚧 # # bupaR does not provide an option to load BPMN models yet. Please have a look at the PM4Py instructions. # ### Quality Dimensions # # The quality dimensions `fitness`, `precision`, `simplicity`, and `generalisation` are best illustrated by using a small example event log. # We are using an example event log in XES format that is used in the book `Process Mining - Data Science in Action` by <NAME>, which is downloaded and stored in the `../data` directory with the code below: # ignore the warnings, the package needs to be updated and no 'activity instance identifier' is required in this example example_log <- xesreadR::read_xes("../data/Lfull.xes") # Let us have a look at the event log in tabular form. The mapping of the activity labels to actual activities is: # # * a = register request, # * b = examine thoroughly, # * c = examine casually, # * d = check ticket, # * e = decide, # * f = reinitiate request, # * g = pay compensation, and # * h = reject request. example_log %>% head() # Now let us discover a process map as we have seen in Lecture 1: example_log %>% process_map() # Not really very insightful the directly-follows based process map visualization. # #### Four Process Models - One Event Log 🚧 # This part is under construction. # ### Heuristics Miner # #### L_heur_1 Event Log # We are using an example event log that is suited to introduce the Heuristics Miner algorithm. This event log is already included with the `heuristicsmineR` package in bupaR. L_heur_1 # The naive process map drawing reveals some weird behaviour between the activities `b` and `c`. There seems to be a loop between both activities even though they never occur more than once in each trace. L_heur_1 %>% as_tibble() %>% mutate(activity_id = as.character(activity_id)) %>% mutate(activity_id = if_else(activity_id == "b" | activity_id == "c", "cb", activity_id)) %>% simple_eventlog(case_id = "CASE_concept_name", activity_id = "activity_id", timestamp = "timestamp") %>% process_map() L_heur_1 %>% process_map() # #### Dependency Graphs L_heur_1 %>% precedence_matrix(type = "absolute") %>% plot() # Based on the precedence matrix, we can follows the formula for the dependency relation: mat_pre <- L_heur_1 %>% precedence_matrix(type = "absolute") %>% as.matrix() mat_pre # Since, we want to compute how often activities follow each other in either direction, we need the transposed matrix: t_mat_pre <- t(mat_pre) t_mat_pre # And, then it is basic math: (mat_pre - t_mat_pre) / (mat_pre + t_mat_pre + 1) # Of course, this has already been implemented in the `heuristicsmineR` package. There are also some more details of the algorithm that deal with the detection of loops as well as making sure that all activities are connected to each other. Please consult the original [Heuristics Miner paper](https://is.ieis.tue.nl/staff/aweijters/WP334_FHMv3.pdf) and the documentation of `heuristicsmineR` for more details. L_heur_1 %>% dependency_matrix(threshold = 0) %>% plot() # A dependency graph can be L_heur_1 %>% dependency_matrix(threshold = 0.8) %>% render_dependency_matrix() # Have a look at the parameters (via `?dependency_matrix`) and try to change some of them to see what happens. L_heur_1 %>% dependency_matrix(threshold = 0.9) %>% render_dependency_matrix() sepsis %>% precedence_matrix() %>% plot sepsis %>% dependency_matrix(threshold = 0.7) %>% render_dependency_matrix() sepsis %>% dependency_matrix(threshold = 0.9) %>% render_dependency_matrix() # ### Causal nets L_heur_1 %>% causal_net(threshold = 0.8) %>% render_causal_net() sepsis %>% act_unite(Release = c("Release A", "Release B", "Release C", "Release D", "Release E")) %>% causal_net(all_connected = TRUE) %>% render_causal_net() example_log %>% causal_net() %>% render_causal_net() # #### Visualise / Convert as BPMN 🚧 # In bupaR there is currently no support for BPMN visualizations. However, it is possible to convert the Causal net into a Petri net. For simple process models, the mapping between BPMN and Petri nets is easy to understand. Thus, we are using Petri nets here. L_heur_1 %>% causal_net() %>% as.petrinet() %>% petrinetR::render_PN() example_log %>% causal_net() %>% as.petrinet() %>% petrinetR::render_PN() # **TODO** we could use the discovered Petri net with PM4Py to do further processing 🚧
r/lecture2-discovery.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Arrays # Dask array provides a parallel, larger-than-memory, n-dimensional array using blocked algorithms. Simply put: distributed Numpy. # # * **Parallel**: Uses all of the cores on your computer # * **Larger-than-memory**: Lets you work on datasets that are larger than your available memory by breaking up your array into many small pieces, operating on those pieces in an order that minimizes the memory footprint of your computation, and effectively streaming data from disk. # * **Blocked Algorithms**: Perform large computations by performing many smaller computations # # **Related Documentation** # # * [Documentation](http://dask.readthedocs.io/en/latest/array.html) # * [API reference](http://dask.readthedocs.io/en/latest/array-api.html) # ## Blocked Algorithms # A *blocked algorithm* executes on a large dataset by breaking it up into many small blocks. # # For example, consider taking the sum of a billion numbers. We might instead break up the array into 1,000 chunks, each of size 1,000,000, take the sum of each chunk, and then take the sum of the intermediate sums. # # We achieve the intended result (one sum on one billion numbers) by performing many smaller results (one thousand sums on one million numbers each, followed by another sum of a thousand numbers.) # # We do exactly this with Python and NumPy in the following example: # **Create random dataset** # + # create data if it doesn't already exist from prep import random_array random_array() # Load data with h5py # this creates a pointer to the data, but does not actually load import h5py import os f = h5py.File(os.path.join('data', 'random.hdf5'), mode='r') dset = f['/x'] # - # **Compute sum using blocked algorithm** # Before using dask, lets consider the concept of blocked algorithsm. We can compute the sum of a large number of elements by loading them chunk-by-chunk, and keeping a running total. # # Here we compute the sum of this large array on disk by # # 1. Computing the sum of each 1,000,000 sized chunk of the array # 2. Computing the sum of the 1,000 intermediate sums # # Note that this is a sequential process in the notebook kernel, both the loading and summing. # + # Compute sum of large array, one million numbers at a time sums = [] for i in range(0, 1000000000, 1000000): chunk = dset[i: i + 1000000] # pull out numpy array sums.append(chunk.sum()) total = sum(sums) print(total) # - # ### Exercise: Compute the mean using a blocked algorithm # Now that we've seen the simple example above try doing a slightly more complicated problem, compute the mean of the array, assuming for a moment that we don't happen to already know how many elements are in the data. You can do this by changing the code above with the following alterations: # # 1. Compute the sum of each block # 2. Compute the length of each block # 3. Compute the sum of the 1,000 intermediate sums and the sum of the 1,000 intermediate lengths and divide one by the other # # This approach is overkill for our case but does nicely generalize if we don't know the size of the array or individual blocks beforehand. # + # Compute the mean of the array # - # %load solutions/Array-01.py # `dask.array` contains these algorithms # -------------------------------------------- # # Dask.array is a NumPy-like library that does these kinds of tricks to operate on large datasets that don't fit into memory. It extends beyond the linear problems discussed above to full N-Dimensional algorithms and a decent subset of the NumPy interface. # **Create `dask.array` object** # You can create a `dask.array` `Array` object with the `da.from_array` function. This function accepts # # 1. `data`: Any object that supports NumPy slicing, like `dset` # 2. `chunks`: A chunk size to tell us how to block up our array, like `(1000000,)` import dask.array as da x = da.from_array(dset, chunks=(1000000,)) # ** Manipulate `dask.array` object as you would a numpy array** # Now that we have an `Array` we perform standard numpy-style computations like arithmetic, mathematics, slicing, reductions, etc.. # # The interface is familiar, but the actual work is different. dask_array.sum() does not do the same thing as numpy_array.sum(). # **What's the difference?** # `dask_array.sum()` builds an expression of the computation. It does not do the computation yet. `numpy_array.sum()` computes the sum immediately. # *Why the difference?* # Dask arrays are split into chunks. Each chunk must have computations run on that chunk explicitly. If the desired answer comes from a small slice of the entire dataset, running the computation over all data would be wasteful of CPU and memory. result = x.sum() result # **Compute result** # Dask.array objects are lazily evaluated. Operations like `.sum` build up a graph of blocked tasks to execute. # # We ask for the final result with a call to `.compute()`. This triggers the actual computation. result.compute() # ### Exercise: Compute the mean # And the variance, std, etc.. This should be a trivial change to the example above. # # Look at what other operations you can do with the Jupyter notebook's tab-completion. # Does this match your result from before? # Performance and Parallelism # ------------------------------- # # <img src="images/fail-case.gif" width="40%" align="right"> # # In our first examples we used `for` loops to walk through the array one block at a time. For simple operations like `sum` this is optimal. However for complex operations we may want to traverse through the array differently. In particular we may want the following: # # 1. Use multiple cores in parallel # 2. Chain operations on a single blocks before moving on to the next one # # Dask.array translates your array operations into a graph of inter-related tasks with data dependencies between them. Dask then executes this graph in parallel with multiple threads. We'll discuss more about this in the next section. # # # ### Example # 1. Construct a 20000x20000 array of normally distributed random values broken up into 1000x1000 sized chunks # 2. Take the mean along one axis # 3. Take every 100th element # + import numpy as np import dask.array as da x = da.random.normal(10, 0.1, size=(20000, 20000), # 400 million element array chunks=(1000, 1000)) # Cut into 1000x1000 sized chunks y = x.mean(axis=0)[::100] # Perform NumPy-style operations # - x.nbytes / 1e9 # Gigabytes of the input processed lazily # %%time y.compute() # Time to compute the result # Performance comparision # --------------------------- # # The following experiment was performed on a heavy personal laptop. Your performance may vary. If you attempt the NumPy version then please ensure that you have more than 4GB of main memory. # **NumPy: 19s, Needs gigabytes of memory** # ```python # import numpy as np # # # %%time # x = np.random.normal(10, 0.1, size=(20000, 20000)) # y = x.mean(axis=0)[::100] # y # # CPU times: user 19.6 s, sys: 160 ms, total: 19.8 s # Wall time: 19.7 s # ``` # **Dask Array: 4s, Needs megabytes of memory** # ```python # import dask.array as da # # # %%time # x = da.random.normal(10, 0.1, size=(20000, 20000), chunks=(1000, 1000)) # y = x.mean(axis=0)[::100] # y.compute() # # CPU times: user 29.4 s, sys: 1.07 s, total: 30.5 s # Wall time: 4.01 s # ``` # **Discussion** # Notice that the Dask array computation ran in 4 seconds, but used 29.4 seconds of user CPU time. The numpy computation ran in 19.7 seconds and used 19.6 seconds of user CPU time. # # Dask finished faster, but used more total CPU time because Dask was able to transparently parallelize the computation because of the chunk size. # *Questions* # * What happens if the dask chunks=(20000,20000)? # * Will the computation run in 4 seconds? # * How much memory will be used? # * What happens if the dask chunks=(25,25)? # * What happens to CPU and memory? # ### Exercise: Meteorological data # There is 2GB of somewhat artifical weather data in HDF5 files in `data/weather-big/*.hdf5`. We'll use the `h5py` library to interact with this data and `dask.array` to compute on it. # # Our goal is to visualize the average temperature on the surface of the Earth for this month. This will require a mean over all of this data. We'll do this in the following steps # # 1. Create `h5py.Dataset` objects for each of the days of data on disk (`dsets`) # 2. Wrap these with `da.from_array` calls # 3. Stack these datasets along time with a call to `da.stack` # 4. Compute the mean along the newly stacked time axis with the `.mean()` method # 5. Visualize the result with `matplotlib.pyplot.imshow` from prep import create_weather # Prep data if it doesn't exist create_weather() # + import h5py from glob import glob import os filenames = sorted(glob(os.path.join('data', 'weather-big', '*.hdf5'))) dsets = [h5py.File(filename, mode='r')['/t2m'] for filename in filenames] dsets[0] # - dsets[0][:5, :5] # Slicing into h5py.Dataset object gives a numpy array # + # %matplotlib inline import matplotlib.pyplot as plt fig = plt.figure(figsize=(16, 8)) plt.imshow(dsets[0][::4, ::4], cmap='RdBu_r') # - # **Integrate with `dask.array`** # Make a list of `dask.array` objects out of your list of `h5py.Dataset` objects using the `da.from_array` function with a chunk size of `(500, 500)`. # %load solutions/02-dask-arrays-make-arrays.py # **Stack this list of `dask.array` objects into a single `dask.array` object with `da.stack`** # Stack these along the first axis so that the shape of the resulting array is `(31, 5760, 11520)`. # %load solutions/02-dask-arrays-stacked.py # **Plot the mean of this array along the time (`0th`) axis** # complete the following fig = plt.figure(figsize=(16, 8)) plt.imshow(..., cmap='RdBu_r') # %load solutions/02-dask-arrays-weather-mean.py # **Plot the difference of the first day from the mean** # %load solutions/02-dask-arrays-weather-difference.py # ### Exercise: Subsample and store # In the above exercise the result of our computation is small, so we can call `compute` safely. Sometimes our result is still too large to fit into memory and we want to save it to disk. In these cases you can use one of the following two functions # # 1. `da.store`: Store dask.array into any object that supports numpy setitem syntax, e.g. # # f = h5py.File('myfile.hdf5') # output = f.create_dataset(shape=..., dtype=...) # # da.store(my_dask_array, output) # # 2. `da.to_hdf5`: A specialized function that creates and stores a `dask.array` object into an `HDF5` file. # # da.to_hdf5('data/myfile.hdf5', '/output', my_dask_array) # # The task in this exercise is to **use numpy step slicing to subsample the full dataset by a factor of two in both the latitude and longitude direction and then store this result to disk** using one of the functions listed above. # # As a reminder, Python slicing takes three elements # # start:stop:step # # >>> L = [1, 2, 3, 4, 5, 6, 7] # >>> L[::3] # [1, 4, 7] # %load solutions/Array-03.py # ## Example: Lennard-Jones potential # The [Lennard-Jones](https://en.wikipedia.org/wiki/Lennard-Jones_potential) is used in partical simuluations in physics, chemistry and engineering. It is highly parallelizable. # # First, we'll run and profile the Numpy version on 7,000 particles. # + import numpy as np # make a random collection of particles def make_cluster(natoms, radius=40, seed=1981): np.random.seed(seed) cluster = np.random.normal(0, radius, (natoms,3))-0.5 return cluster def lj(r2): sr6 = (1./r2)**3 pot = 4.*(sr6*sr6 - sr6) return pot # build the matrix of distances def distances(cluster): diff = cluster[:, np.newaxis, :] - cluster[np.newaxis, :, :] mat = (diff*diff).sum(-1) return mat # the lj function is evaluated over the upper traingle # after removing distances near zero def potential(cluster): d2 = distances(cluster) dtri = np.triu(d2) energy = lj(dtri[dtri > 1e-6]).sum() return energy # - cluster = make_cluster(int(7e3), radius=500) # %time potential(cluster) # Notice that the most time consuming function is `distances`. # + # this would open in another browser tab # # %load_ext snakeviz # # %snakeviz potential(cluster) # alternative simple version given text results in this tab # %prun -s cumulative potential(cluster) # - # ### Dask version # Here's the Dask version. Only the `potential` function needs to be rewritten to best utilize Dask. # # Note that `da.nansum` has been used over the full $NxN$ distance matrix to improve parallel efficiency. # # + import dask.array as da # compute the potential on the entire # matrix of distances and ignore division by zero def potential_dask(cluster): d2 = distances(cluster) energy = da.nansum(lj(d2))/2. return energy # - # Let's convert the NumPy array to a Dask array. Since the entire NumPy array fits in memory it is more computationally efficient to chunk the array by number of CPU cores. # + from os import cpu_count dcluster = da.from_array(cluster, chunks=cluster.shape[0]//cpu_count()) # - # This step should scale quite well with number of cores. The warnings are complaining about dividing by zero, which is why we used `da.nansum` in `potential_dask`. e = potential_dask(dcluster) # %time e.compute() # Limitations # ----------- # # Dask.array does not implement the entire numpy interface. Users expecting this # will be disappointed. Notably dask.array has the following failings: # # 1. Dask does not implement all of ``np.linalg``. This has been done by a # number of excellent BLAS/LAPACK implementations and is the focus of # numerous ongoing academic research projects. # 2. Dask.array does not support any operation where the resulting shape # depends on the values of the array. In order to form the Dask graph we # must be able to infer the shape of the array before actually executing the # operation. This precludes operations like indexing one Dask array with # another or operations like ``np.where``. # 3. Dask.array does not attempt operations like ``sort`` which are notoriously # difficult to do in parallel and are of somewhat diminished value on very # large data (you rarely actually need a full sort). # Often we include parallel-friendly alternatives like ``topk``. # 4. Dask development is driven by immediate need, and so many lesser used # functions, like ``np.full_like`` have not been implemented purely out of # laziness. These would make excellent community contributions.
02_high-perf/99_array.ipynb