code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Recommendations with IBM # # In this notebook, a number of different methods for making recommendations that can be used for different situations are built out on on real-world data from the IBM Watson Studio platform. # # # ## Table of Contents # # I. [Exploratory Data Analysis](#Exploratory-Data-Analysis)<br> # II. [Rank Based Recommendations](#Rank)<br> # III. [User-User Based Collaborative Filtering](#User-User)<br> # IV. [Content Based Recommendations (EXTRA - NOT REQUIRED)](#Content-Recs)<br> # V. [Matrix Factorization](#Matrix-Fact)<br> # VI. [Extras & Concluding](#conclusions) # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import project_tests as t from sklearn.metrics import precision_score, recall_score # %matplotlib inline df = pd.read_csv('data/user-item-interactions.csv') df_content = pd.read_csv('data/articles_community.csv') del df['Unnamed: 0'] del df_content['Unnamed: 0'] # Show df to get an idea of the data df.head() # - # Show df_content to get an idea of the data df_content.head() # ### <a class="anchor" id="Exploratory-Data-Analysis">Part I : Exploratory Data Analysis</a> # # Use the dictionary and cells below to provide some insight into the descriptive statistics of the data. # # `1.` What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article. # calculate how many articles each user interacts in the dataset num_atcs_per_user = df.groupby('email')['article_id'].count() # histogram plot to see the price distribution sns.distplot(num_atcs_per_user, color='forestgreen', kde_kws={'color': 'indianred', 'lw': 2, 'label': 'KDE'}) plt.title('User interactions Distribution', fontsize=14) plt.ylabel('Probablity', fontsize=12) plt.xlabel('Number of articles', fontsize=12) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.legend(['KDE'], prop={"size":12}) plt.show() # data description print('Descriptive analysis of user interactions with articles') num_atcs_per_user.describe() # calculate how many users interacts with each article in the dataset num_users_per_atc = df.groupby('article_id')['email'].count() # histogram plot to see the price distribution sns.distplot(num_users_per_atc, color='forestgreen', kde_kws={'color': 'indianred', 'lw': 2, 'label': 'KDE'}) plt.title('Article interactions Distribution', fontsize=14) plt.ylabel('Probablity', fontsize=12) plt.xlabel('Number of users', fontsize=12) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.legend(['KDE'], prop={"size":12}) plt.show() # data description print('Descriptive analysis of user interactions with articles') num_users_per_atc.describe() # + # Fill in the median and maximum number of user_article interactios below median_val = df.groupby('email')['article_id'].count().median()# 50% of individuals interact with ____ number of articles or fewer. max_views_by_user = df.groupby('email')['article_id'].count().max() # The maximum number of user-article interactions by any 1 user is ______. print('50% of individuals interact with {} articles or fewer'.format(median_val)) print('The maximum number of user-article interactions by any user is {}'.format(max_views_by_user)) # - # ### Observation # In the descriptive analysis results, on one hand, the distribution of the user interactions with articles are positively skewed. Most of the users have interactions with less than or equal to 9 articles and half of the users read less than or equal to 3 articles. On the other hand, the distribution of the article iteractions by users are postively skewed as well. Most of the articles have been read by less than or equal to 69 users, and half of articles in this platform has less than or equal to 25 users. # `2.` Explore and remove duplicate articles from the **df_content** dataframe. # Find and explore duplicate articles n_duplicate_articles = df_content.shape[0] - df_content.nunique()['article_id'] n_duplicate_articles df_content.shape[0] # Remove any rows that have the same article_id - only keep the first df_content.drop_duplicates(subset='article_id', inplace=True) df_content.shape[0] # `3.` Use the cells below to find: # # **a.** The number of unique articles that have an interaction with a user. # **b.** The number of unique articles in the dataset (whether they have any interactions or not).<br> # **c.** The number of unique users in the dataset. (excluding null values)<br> # **d.** The number of user-article interactions in the dataset. df.nunique() df.shape[0] unique_articles = 714 # The number of unique articles that have at least one interaction total_articles = 1051 # The number of unique articles on the IBM platform unique_users = 5148 # The number of unique users user_article_interactions = 45993 # The number of user-article interactions # `4.` Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was find using other information that all of these null values likely belonged to a single user. df.groupby('article_id').count().sort_values('email', ascending=False).head(1) most_viewed_article_id = '1429.0' # The most viewed article in the dataset as a string with one value following the decimal max_views = 937 # The most viewed article in the dataset was viewed how many times? # + ## No need to change the code here - this will be helpful for later parts of the notebook # Run this cell to map the user email to a user_id column and remove the email column def email_mapper(): coded_dict = dict() cter = 1 email_encoded = [] for val in df['email']: if val not in coded_dict: coded_dict[val] = cter cter+=1 email_encoded.append(coded_dict[val]) return email_encoded email_encoded = email_mapper() del df['email'] df['user_id'] = email_encoded # show header df.head() # + ## If you stored all your results in the variable names above, ## you shouldn't need to change anything in this cell sol_1_dict = { '`50% of individuals have _____ or fewer interactions.`': median_val, '`The total number of user-article interactions in the dataset is ______.`': user_article_interactions, '`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user, '`The most viewed article in the dataset was viewed _____ times.`': max_views, '`The article_id of the most viewed article is ______.`': most_viewed_article_id, '`The number of unique articles that have at least 1 rating ______.`': unique_articles, '`The number of unique users in the dataset is ______`': unique_users, '`The number of unique articles on the IBM platform`': total_articles } # Test your dictionary against the solution t.sol_1_test(sol_1_dict) # - # ### <a class="anchor" id="Rank">Part II: Rank-Based Recommendations</a> # # Unlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with. # # `1.` Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below. # + def get_top_articles(n, df=df): ''' INPUT: n - (int) the number of top articles to return df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: top_articles - (list) A list of the top 'n' article titles ''' # Your code here top_articles = list(df.groupby('title').count().sort_values( 'user_id', ascending=False).head(n).index) return top_articles # Return the top article titles from df (not df_content) def get_top_article_ids(n, df=df): ''' INPUT: n - (int) the number of top articles to return df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: top_articles - (list) A list of the top 'n' article titles ''' # Your code here top_articles = list(df.groupby('article_id').count().sort_values( 'user_id', ascending=False).head(n).index.astype(str)) return top_articles # Return the top article ids # - print(get_top_articles(10)) print(get_top_article_ids(10)) # + # Test your function by returning the top 5, 10, and 20 articles top_5 = get_top_articles(5) top_10 = get_top_articles(10) top_20 = get_top_articles(20) # Test each of your three lists from above t.sol_2_test(get_top_articles) # - # ### <a class="anchor" id="User-User">Part III: User-User Based Collaborative Filtering</a> # # # `1.` Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns. # # * Each **user** should only appear in each **row** once. # # # * Each **article** should only show up in one **column**. # # # * **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1. # # # * **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**. # # Use the tests to make sure the basic structure of your matrix matches what is expected by the solution. # + # create the user-article matrix with 1's and 0's def create_user_item_matrix(df): ''' INPUT: df - pandas dataframe with article_id, title, user_id columns OUTPUT: user_item - user item matrix Description: Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with an article and a 0 otherwise ''' # Fill in the function here user_item_df = df.groupby(['user_id','article_id']).count() user_item_df[user_item_df >= 1] = 1 user_item = user_item_df.unstack().fillna(0) user_item = user_item.droplevel(level=0, axis=1) return user_item # return the user_item matrix user_item = create_user_item_matrix(df) # - ## Tests: You should just need to run this cell. Don't change the code. assert user_item.shape[0] == 5149, "Oops! The number of users in the user-article matrix doesn't look right." assert user_item.shape[1] == 714, "Oops! The number of articles in the user-article matrix doesn't look right." assert user_item.sum(axis=1)[1] == 36, "Oops! The number of articles seen by user 1 doesn't look right." print("You have passed our quick tests! Please proceed!") # `2.` Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users. # # Use the tests to test your function. def find_similar_users(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user_id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: most_similar_users - (list) an ordered list where the closest users (largest dot product users) are listed first Description: Computes the similarity of every pair of users based on the dot product Returns an ordered ''' # compute similarity of each user to the provided user dot_prod_users = user_item.dot(np.transpose(user_item)) user_id_index = np.where(user_item.index == user_id)[0][0] similar_users = dot_prod_users.iloc[user_id_index] # sort by similarity most_similar_users = similar_users.sort_values(ascending=False) # create list of just the ids most_similar_users = list(most_similar_users.index) # remove the own user's id most_similar_users.remove(user_id) return most_similar_users # return a list of the users in order from most to least similar # Do a spot check of your function print("The 10 most similar users to user 1 are: {}".format(find_similar_users(1)[:10])) print("The 5 most similar users to user 3933 are: {}".format(find_similar_users(3933)[:5])) print("The 3 most similar users to user 46 are: {}".format(find_similar_users(46)[:3])) # `3.` Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user. # + def get_article_names(article_ids, df=df): ''' INPUT: article_ids - (list) a list of article ids df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: article_names - (list) a list of article names associated with the list of article ids (this is identified by the title column) ''' # Your code here article_names = [] for article_id in article_ids: article_names.append(df[df['article_id']==float(article_id)]['title'].values[0]) return article_names # Return the article names associated with list of article ids def get_user_articles(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: article_ids - (list) a list of the article ids seen by the user article_names - (list) a list of article names associated with the list of article ids Description: Provides a list of the article_ids and article titles that have been seen by a user ''' # Your code here article_ids = user_item.loc[user_id][user_item.loc[user_id]==1].index.astype(str).tolist() article_names = get_article_names(article_ids) return article_ids, article_names # return the ids and names def user_user_recs(user_id, m=10): ''' INPUT: user_id - (int) a user id m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user Description: Loops through the users based on closeness to the input user_id For each user - finds articles the user hasn't seen before and provides them as recs Does this until m recommendations are found Notes: Users who are the same closeness are chosen arbitrarily as the 'next' user For the user where the number of recommended articles starts below m and ends exceeding m, the last items are chosen arbitrarily ''' # Your code here similar_user_ids = find_similar_users(user_id) user_article_ids, user_article_names = get_user_articles(user_id) recs = [] for similar_user_id in similar_user_ids: similar_article_ids, similar_article_names = get_user_articles(similar_user_id) new_recs = np.setdiff1d(similar_article_ids, user_article_ids, assume_unique=True) recs.extend(new_recs) user_article_ids = recs # update the user_articles_ids to avoid repeat articles if len(recs) >= m: break recs = recs[:m] return recs # return your recommendations for this user_id # - # Check Results get_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1 # double check to avoid repeated articles len(np.unique(get_article_names(user_user_recs(1, 200)))) # Test your functions here - No need to change this code - just run this cell assert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), "Oops! Your the get_article_names function doesn't work quite how we expect." assert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), "Oops! Your the get_article_names function doesn't work quite how we expect." assert set(get_user_articles(20)[0]) == set(['1320.0', '232.0', '844.0']) assert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']) assert set(get_user_articles(2)[0]) == set(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0']) assert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']) print("If this is all you see, you passed all of our tests! Nice job!") # `4.` Now we are going to improve the consistency of the **user_user_recs** function from above. # # * Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions. # # # * Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier. # + def find_similar_users_similarity(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user_id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: similar_user_ids - (list) an ordered list where the closest users (largest dot product users) are listed first similar_user_similarity - (list) an ordered list where the largest similarity (largest dot product users) are listed first Description: Computes the similarity of every pair of users based on the dot product ''' # compute similarity of each user to the provided user dot_prod_users = user_item.dot(np.transpose(user_item)) user_id_index = np.where(user_item.index == user_id)[0][0] similar_users = dot_prod_users.iloc[user_id_index] # sort by similarity similar_users = similar_users.sort_values(ascending=False) # create list of just the ids similar_user_ids = similar_users.index[1:] # create list of just the similarities similar_user_similarity = similar_users.values[1:] return similar_user_ids, similar_user_similarity # return a list of the users in order from most to least similar def get_ranked_articles(df=df): ''' INPUT: df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: ranked_articles - (pandas dataframe) with article id and number of interactions, sorted by the number of interactions in a descending order Description: Computes the number of interactions each article has and return an ordered df based on the number of interactions ''' ranked_articles = df.groupby('article_id').count().sort_values( 'user_id', ascending=False).drop('title', axis=1).reset_index() ranked_articles.columns = ['article_id', 'interactions'] return ranked_articles def get_top_sorted_users(user_id, df=df, user_item=user_item): ''' INPUT: user_id - (int) df - (pandas dataframe) df as defined at the top of the notebook user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: neighbors_df - (pandas dataframe) a dataframe with: neighbor_id - is a neighbor user_id similarity - measure of the similarity of each user to the provided user_id num_interactions - the number of articles viewed by the user - if a u Other Details - sort the neighbors_df by the similarity and then by number of interactions where highest of each is higher in the dataframe ''' # Your code here similar_user_ids, similar_user_similarity = find_similar_users_similarity(user_id) interactions_df = df[df['user_id'].isin(similar_user_ids)].groupby('user_id').count()['article_id'].reset_index() neighbors_df = pd.DataFrame(columns=['user_id', 'similarity']) neighbors_df['user_id'] = similar_user_ids neighbors_df['similarity'] = similar_user_similarity neighbors_df = pd.merge(neighbors_df, interactions_df, on='user_id', how='left') neighbors_df.columns = ['neighbor_id', 'similarity', 'num_interactions'] neighbors_df = neighbors_df.sort_values(['similarity', 'num_interactions'], ascending = False) return neighbors_df # Return the dataframe specified in the doc_string def user_user_recs_part2(user_id, m=10, df=df): ''' INPUT: user_id - (int) a user id m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user by article id rec_names - (list) a list of recommendations for the user by article title Description: Loops through the users based on closeness to the input user_id For each user - finds articles the user hasn't seen before and provides them as recs Does this until m recommendations are found Notes: * Choose the users that have the most total article interactions before choosing those with fewer article interactions. * Choose articles with the articles with the most total interactions before choosing those with fewer total interactions. ''' # Your code here recs = [] ranked_articles = get_ranked_articles(df=df) neighbors_df = get_top_sorted_users(user_id) user_article_ids, user_article_names = get_user_articles(user_id) for neighbor_id in neighbors_df['neighbor_id']: similar_article_ids, similar_article_names = get_user_articles(neighbor_id) article_interactions = [] for article_id in similar_article_ids: article_interactions.append(ranked_articles[ ranked_articles['article_id']==float(article_id)]['interactions'].max()) new_recs_df = pd.DataFrame({'article_id': similar_article_ids, 'interactions': article_interactions}) new_recs_df = new_recs_df.sort_values('interactions',ascending=False) new_recs_ids = np.setdiff1d(new_recs_df['article_id'], user_article_ids, assume_unique=True) recs.extend(new_recs_ids) user_article_ids = recs if len(recs) >= m: break recs = recs[:m] rec_names = get_article_names(recs) return recs, rec_names # - # double check to avoid repeated articles rec_ids, rec_names = user_user_recs_part2(20, 200) len(np.unique(rec_ids)) len(np.unique(rec_names)) # Quick spot check - don't change this code - just use it to test your functions rec_ids, rec_names = user_user_recs_part2(20, 10) print("The top 10 recommendations for user 20 are the following article ids:") print(rec_ids) print() print("The top 10 recommendations for user 20 are the following article names:") print(rec_names) # `5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below. # + ### Tests with a dictionary of results user1_most_sim = find_similar_users(1)[0]# Find the user that is most similar to user 1 user131_10th_sim = find_similar_users(131)[9]# Find the 10th most similar user to user 131 # + ## Dictionary Test Here sol_5_dict = { 'The user that is most similar to user 1.': user1_most_sim, 'The user that is the 10th most similar to user 131': user131_10th_sim, } t.sol_5_test(sol_5_dict) # - # `6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users. # **Provide your response here.** # `7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation. # + new_user = '0.0' # What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles. # Provide a list of the top 10 article ids you would give to new_user_recs = get_top_article_ids(10)# Your recommendations here # + assert set(new_user_recs) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), "Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users." print("That's right! Nice job!") # - # ### Dissusion # For a new user, we have no knowledge about them and we don't know their preference yet. Thus it makes sense we recommend them the most popular articles in the platform with the most user interactions using the Rank Based Recommendation in Part II. # ### <a class="anchor" id="Content-Recs">Part IV: Content Based Recommendations (EXTRA - NOT REQUIRED)</a> # # Another method we might use to make recommendations is to perform a ranking of the highest ranked articles associated with some term. You might consider content to be the **doc_body**, **doc_description**, or **doc_full_name**. There isn't one way to create a content based recommendation, especially considering that each of these columns hold content related information. # # `1.` Use the function body below to create a content based recommender. Since there isn't one right answer for this recommendation tactic, no test functions are provided. Feel free to change the function inputs if you decide you want to try a method that requires more input values. The input values are currently set with one idea in mind that you may use to make content based recommendations. One additional idea is that you might want to choose the most popular recommendations that meet your 'content criteria', but again, there is a lot of flexibility in how you might make these recommendations. # # ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills. def make_content_recs(): ''' INPUT: OUTPUT: ''' # `2.` Now that you have put together your content-based recommendation system, use the cell below to write a summary explaining how your content based recommender works. Do you see any possible improvements that could be made to your function? Is there anything novel about your content based recommender? # # ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills. # **Write an explanation of your content based recommendation system here.** # `3.` Use your content-recommendation system to make recommendations for the below scenarios based on the comments. Again no tests are provided here, because there isn't one right answer that could be used to find these content based recommendations. # # ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills. # + # make recommendations for a brand new user # make a recommendations for a user who only has interacted with article id '1427.0' # - # ### <a class="anchor" id="Matrix-Fact">Part V: Matrix Factorization</a> # # In this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform. # # `1.` You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook. # Load the matrix here user_item_matrix = pd.read_pickle('user_item_matrix.p') # quick look at the matrix user_item_matrix.head() # `2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perfrom SVD, and explain why this is different than in the lesson. # + # Perform SVD on the User-Item Matrix Here u, s, vt = np.linalg.svd(user_item_matrix) # use the built in to get the three matrices # - # **Observation** # # This dataset is a good candidate using SVD, since there is no missing value in the dataset. While in the lesson dataset, there are missing values which makes SVD not suitable anymore, instead we used FunkSVD to tackle the problem. # `3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features. # + num_latent_feats = np.arange(10,700+10,20) sum_errs = [] for k in num_latent_feats: # restructure with k latent features s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :] # take dot product user_item_preds = np.around(np.dot(np.dot(u_new, s_new), vt_new)) # compute error for each prediction to actual value diffs = np.subtract(user_item_matrix, user_item_preds) # total errors and keep track of them err = np.sum(np.sum(np.abs(diffs))) sum_errs.append(err) plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]); plt.title('Accuracy vs. Number of Latent Features', fontsize=14) plt.xlabel('Number of Latent Features', fontsize=14) plt.ylabel('Accuracy', fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.legend(['All data'], prop={"size":12}) plt.show() # - # `4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below. # # Use the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below: # # * How many users can we make predictions for in the test set? # * How many users are we not able to make predictions for because of the cold start problem? # * How many articles can we make predictions for in the test set? # * How many articles are we not able to make predictions for because of the cold start problem? df_train = df.head(40000) df_test = df.tail(5993) user_item_train = create_user_item_matrix(df_train) user_item_test = create_user_item_matrix(df_test) # + def create_test_and_train_user_item(df_train, df_test): ''' INPUT: df_train - training dataframe df_test - test dataframe OUTPUT: user_item_train - a user-item matrix of the training dataframe (unique users for each row and unique articles for each column) user_item_test - a user-item matrix of the testing dataframe (unique users for each row and unique articles for each column) test_idx - all of the test user ids test_arts - all of the test article ids ''' # Your code here user_item_train = create_user_item_matrix(df_train) user_item_test = create_user_item_matrix(df_test) test_idx = user_item_test.index test_arts = user_item_test.columns return user_item_train, user_item_test, test_idx, test_arts user_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test) # - num_common_idx = test_idx.isin(user_item_train.index).sum() num_common_idx num_unpredicted_idx = len(test_idx) - num_common_idx num_unpredicted_idx num_common_arts = test_arts.isin(user_item_train.columns).sum() num_common_arts num_unpredicted_arts = len(test_arts) - num_common_arts num_unpredicted_arts # + # Replace the values in the dictionary below a = 662 b = 574 c = 20 d = 0 sol_4_dict = { 'How many users can we make predictions for in the test set?': c, 'How many users in the test set are we not able to make predictions for because of the cold start problem?': a, 'How many articles can we make predictions for in the test set?': b, 'How many articles in the test set are we not able to make predictions for because of the cold start problem?': d } t.sol_4_test(sol_4_dict) # - # `5.` Now use the **user_item_train** dataset from above to find **U**, **S**, and **V** transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`. # # Use the cells below to explore how well SVD works towards making predictions for recommendations on the test data. # fit SVD on the user_item_train matrix u_train, s_train, vt_train = np.linalg.svd(user_item_train) # fit svd similar to above then use the cells below # + # Use these cells to see how well you can use the training # decomposition to predict on test data row_idxs = user_item_train.index.isin(test_idx) col_idxs = user_item_train.columns.isin(test_arts) u_test = u_train[row_idxs, :] vt_test = vt_train[:, col_idxs] user_idxs = user_item_test.index.isin(user_item_train.index) user_item_test_20 = user_item_test.loc[user_idxs,:] # users can be predicted in the test data num_latent_feats = np.arange(0,700+10,20) sum_errs_train = [] sum_errs_test = [] test_precision = [] test_recall = [] # flattent the actual data for precision and recall calculation test_actual = np.array(user_item_test_20).flatten() for k in num_latent_feats: # restructure with k latent features s_train_k, u_train_k, vt_train_k = np.diag(s_train[:k]), u_train[:, :k], vt_train[:k, :] u_test_k, vt_test_k = u_test[:, :k], vt_test[:k, :] # take dot product user_item_train_preds = np.around(np.dot(np.dot(u_train_k, s_train_k), vt_train_k)) user_item_test_preds = np.around(np.dot(np.dot(u_test_k, s_train_k), vt_test_k)) # compute error for each prediction to actual value diffs_train = np.subtract(user_item_train, user_item_train_preds) diffs_test = np.subtract(user_item_test_20, user_item_test_preds) # total errors and keep track of them err_train = np.sum(np.sum(np.abs(diffs_train))) err_test = np.sum(np.sum(np.abs(diffs_test))) sum_errs_train.append(err_train) sum_errs_test.append(err_test) # flatten the predicted matrix train_preds = np.array(user_item_train_preds).flatten() test_preds = np.array(user_item_test_preds).flatten() test_precision.append(precision_score(test_actual, test_preds, zero_division=0)) test_recall.append(recall_score(test_actual, test_preds, zero_division=0)) # - plt.plot(num_latent_feats, 1 - np.array(sum_errs_train)/df.shape[0] , label='Train') plt.title('Accuracy vs. Number of Latent Features', fontsize=14) plt.xlabel('Number of Latent Features', fontsize=14) plt.ylabel('Accuracy', fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.legend(['Train'], prop={"size":12}) plt.show() plt.plot(num_latent_feats, 1 - np.array(sum_errs_test)/df.shape[0], label='Test') plt.title('Accuracy vs. Number of Latent Features', fontsize=14) plt.xlabel('Number of Latent Features', fontsize=14) plt.ylabel('Accuracy', fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.legend(['Test'], prop={"size":12}) plt.show() plt.plot(num_latent_feats, test_precision , label='Test Precision') plt.plot(num_latent_feats, test_recall , label='Test Recall') plt.title('Precision and Recall vs. Number of Latent Features', fontsize=14) plt.xlabel('Number of Latent Features', fontsize=14) plt.ylabel('Precision', fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.legend(prop={"size":12}) plt.show() # `6.` Use the cell below to comment on the results you found in the previous question. Given the circumstances of your results, discuss what you might do to determine if the recommendations you make with any of the above recommendation systems are an improvement to how users currently find articles? # **Observation** # # The increasing of number of the latent features in the SVD process results in overfitting of the model. We can see that the training data accuracy approches 1 while the testing data accuracy decreases when the number of latent features increase. # # In the test data, there are only 20 users in the train data, thus SVD can not be used for the prediction of the whole test dataset, knowledge based (rank based recommendation) and collaborative filtering based (user-uaser based recommendation) recommendation developed in Part II and Part III or content basedother recommendation methods may be used to improve our recommendation for this cold start problem. # # In addition, the metric we used for calculating the accuracy simply counts the number of user_article interactions in the actual matrix and estimated one. The actual positions are not taken into consideration. By using precision and recall, we can see that the recommendation model still needs a lot improvement. One possible reason could be lack of data of our test dataset with only have 20 users. # # To test if the recommendation engine build in this notebook work for the IBM Watson Studio platform, an A/B testing can be conducted with one group having no change while another having the recommendation engines for article recommendations. The number of articles interacted by the user can be used as a metric to indicate the recommendation engine significance. # <a id='conclusions'></a> # ### Future exploration # Using this workbook, you could now save your recommendations for each user, develop a class to make new predictions and update your results, and make a flask app to deploy your results. These tasks are beyond what is required for this project. However, from what you learned in the lessons, you certainly capable of taking these tasks on to improve upon your work here! # from subprocess import call call(['python', '-m', 'nbconvert', 'Recommendations_with_IBM.ipynb'])
Recommendations_with_IBM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D4_DeepLearning1/W3D4_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" # # Neuromatch Academy: Week 3, Day 4, Tutorial 1 # # Deep Learning: Decoding Neural Responses # # **Content creators**: <NAME>, <NAME> # # **Content reviewers**: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # + [markdown] colab_type="text" # --- # # Tutorial Objectives # In this tutorial, we'll use deep learning to decode stimulus information from the responses of sensory neurons. Specifically, we'll look at the activity of ~20,000 neurons in mouse primary visual cortex responding to oriented gratings recorded in [this study](https://www.biorxiv.org/content/10.1101/679324v2.abstract). Our task will be to decode the orientation of the presented stimulus from the responses of the whole population of neurons. We could do this in a number of ways, but here we'll use deep learning. Deep learning is particularly well-suited to this problem for a number of reasons: # * The data are very high-dimensional: the neural response to a stimulus is a ~20,000 dimensional vector. Many machine learning techniques fail in such high dimensions, but deep learning actually thrives in this regime, as long as you have enough data (which we do here!). # * As you'll be able to see below, different neurons can respond quite differently to stimuli. This complex pattern of responses will, therefore, require non-linear methods to be decoded, which we can easily do with non-linear activation functions in deep networks. # * Deep learning architectures are highly flexible, meaning we can easily adapt the architecture of our decoding model to optimize decoding. Here, we'll focus on a single architecture, but you'll see that it can easily be modified with few changes to the code. # # More concretely, our goal will be learn how to: # * Build a deep feed-forward network using PyTorch # * Evaluate the network's outputs using PyTorch built-in loss functions # * Compute gradients of the loss with respect to each parameter of the network using automatic differentiation # * Implement gradient descent to optimize the network's parameters # # This tutorial will take up the first full session (equivalent to two tutorials on other days). # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 517} colab_type="code" outputId="9269a531-a230-486d-adcd-330d5120b2ce" #@title Video 1: Decoding from neural data using feed-forward networks in pytorch from IPython.display import YouTubeVideo video = YouTubeVideo(id="SlrbMvvBOzM", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # + [markdown] colab_type="text" # --- # # Setup # # + cellView="both" colab={} colab_type="code" import os import numpy as np import torch from torch import nn from torch import optim import matplotlib as mpl from matplotlib import pyplot as plt # + cellView="form" colab={} colab_type="code" #@title Data retrieval and loading import hashlib import requests fname = "W3D4_stringer_oribinned1.npz" url = "https://osf.io/683xc/download" expected_md5 = "436599dfd8ebe6019f066c38aed20580" if not os.path.isfile(fname): try: r = requests.get(url) except requests.ConnectionError: print("!!! Failed to download data !!!") else: if r.status_code != requests.codes.ok: print("!!! Failed to download data !!!") elif hashlib.md5(r.content).hexdigest() != expected_md5: print("!!! Data download appears corrupted !!!") else: with open(fname, "wb") as fid: fid.write(r.content) # + cellView="form" colab={} colab_type="code" #@title Figure Settings # %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") # + cellView="form" colab={} colab_type="code" #@title Helper Functions def load_data(data_name=fname, bin_width=1): """Load mouse V1 data from Stringer et al. (2019) Data from study reported in this preprint: https://www.biorxiv.org/content/10.1101/679324v2.abstract These data comprise time-averaged responses of ~20,000 neurons to ~4,000 stimulus gratings of different orientations, recorded through Calcium imaginge. The responses have been normalized by spontanous levels of activity and then z-scored over stimuli, so expect negative numbers. They have also been binned and averaged to each degree of orientation. This function returns the relevant data (neural responses and stimulus orientations) in a torch.Tensor of data type torch.float32 in order to match the default data type for nn.Parameters in Google Colab. This function will actually average responses to stimuli with orientations falling within bins specified by the bin_width argument. This helps produce individual neural "responses" with smoother and more interpretable tuning curves. Args: bin_width (float): size of stimulus bins over which to average neural responses Returns: resp (torch.Tensor): n_stimuli x n_neurons matrix of neural responses, each row contains the responses of each neuron to a given stimulus. As mentioned above, neural "response" is actually an average over responses to stimuli with similar angles falling within specified bins. stimuli: (torch.Tensor): n_stimuli x 1 column vector with orientation of each stimulus, in degrees. This is actually the mean orientation of all stimuli in each bin. """ with np.load(data_name) as dobj: data = dict(**dobj) resp = data['resp'] stimuli = data['stimuli'] if bin_width > 1: # Bin neural responses and stimuli bins = np.digitize(stimuli, np.arange(0, 360 + bin_width, bin_width)) stimuli_binned = np.array([stimuli[bins == i].mean() for i in np.unique(bins)]) resp_binned = np.array([resp[bins == i, :].mean(0) for i in np.unique(bins)]) else: resp_binned = resp stimuli_binned = stimuli # Return as torch.Tensor resp_tensor = torch.tensor(resp_binned, dtype=torch.float32) stimuli_tensor = torch.tensor(stimuli_binned, dtype=torch.float32).unsqueeze(1) # add singleton dimension to make a column vector return resp_tensor, stimuli_tensor def plot_data_matrix(X, ax): """Visualize data matrix of neural responses using a heatmap Args: X (torch.Tensor or np.ndarray): matrix of neural responses to visualize with a heatmap ax (matplotlib axes): where to plot """ cax = ax.imshow(X, cmap=mpl.cm.pink, vmin=np.percentile(X, 1), vmax=np.percentile(X, 99)) cbar = plt.colorbar(cax, ax=ax, label='normalized neural response') ax.set_aspect('auto') ax.set_xticks([]) ax.set_yticks([]) def identityLine(): """ Plot the identity line y=x """ ax = plt.gca() lims = np.array([ax.get_xlim(), ax.get_ylim()]) minval = lims[:, 0].min() maxval = lims[:, 1].max() equal_lims = [minval, maxval] ax.set_xlim(equal_lims) ax.set_ylim(equal_lims) line = ax.plot([minval, maxval], [minval, maxval], color="0.7") line[0].set_zorder(-1) def get_data(n_stim, train_data, train_labels): """ Return n_stim randomly drawn stimuli/resp pairs Args: n_stim (scalar): number of stimuli to draw resp (torch.Tensor): train_data (torch.Tensor): n_train x n_neurons tensor with neural responses to train on train_labels (torch.Tensor): n_train x 1 tensor with orientations of the stimuli corresponding to each row of train_data, in radians Returns: (torch.Tensor, torch.Tensor): n_stim x n_neurons tensor of neural responses and n_stim x 1 of orientations respectively """ n_stimuli = train_labels.shape[0] istim = np.random.choice(n_stimuli, n_stim) r = train_data[istim] # neural responses to this stimulus ori = train_labels[istim] # true stimulus orientation return r, ori def stimulus_class(ori, n_classes): """Get stimulus class from stimulus orientation Args: ori (torch.Tensor): orientations of stimuli to return classes for n_classes (int): total number of classes Returns: torch.Tensor: 1D tensor with the classes for each stimulus """ bins = np.linspace(0, 360, n_classes + 1) return torch.tensor(np.digitize(ori.squeeze(), bins)) - 1 # minus 1 to accomodate Python indexing def plot_decoded_results(train_loss, test_labels, predicted_test_labels): """ Plot decoding results in the form of network training loss and test predictions Args: train_loss (list): training error over iterations test_labels (torch.Tensor): n_test x 1 tensor with orientations of the stimuli corresponding to each row of train_data, in radians predicted_test_labels (torch.Tensor): n_test x 1 tensor with predicted orientations of the stimuli from decoding neural network """ # Plot results fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6)) # Plot the training loss over iterations of GD ax1.plot(train_loss) # Plot true stimulus orientation vs. predicted class ax2.plot(stimuli_test.squeeze(), predicted_test_labels, '.') ax1.set_xlim([0, None]) ax1.set_ylim([0, None]) ax1.set_xlabel('iterations of gradient descent') ax1.set_ylabel('negative log likelihood') ax2.set_xlabel('true stimulus orientation ($^o$)') ax2.set_ylabel('decoded orientation bin') ax2.set_xticks(np.linspace(0, 360, n_classes + 1)) ax2.set_yticks(np.arange(n_classes)) class_bins = [f'{i * 360 / n_classes: .0f}$^o$ - {(i + 1) * 360 / n_classes: .0f}$^o$' for i in range(n_classes)] ax2.set_yticklabels(class_bins); # Draw bin edges as vertical lines ax2.set_ylim(ax2.get_ylim()) # fix y-axis limits for i in range(n_classes): lower = i * 360 / n_classes upper = (i + 1) * 360 / n_classes ax2.plot([lower, lower], ax2.get_ylim(), '-', color="0.7", linewidth=1, zorder=-1) ax2.plot([upper, upper], ax2.get_ylim(), '-', color="0.7", linewidth=1, zorder=-1) plt.tight_layout() # + [markdown] colab_type="text" # --- # # Section 1: Load and visualize data # # In the next cell, we have provided code to load the data and plot the matrix of neural responses. # # Next to it, we plot the tuning curves of three randomly selected neurons. # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 375} colab_type="code" outputId="77f0552f-00a1-4c7a-a9a4-f9a4666449d0" #@title #@markdown Execute this cell to load and visualize data # Load data resp_all, stimuli_all = load_data() # argument to this function specifies bin width n_stimuli, n_neurons = resp_all.shape print(f'{n_neurons} neurons in response to {n_stimuli} stimuli') fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(2 * 6, 5)) # Visualize data matrix plot_data_matrix(resp_all[:100, :].T, ax1) # plot responses of first 100 neurons ax1.set_xlabel('stimulus') ax1.set_ylabel('neuron') # Plot tuning curves of three random neurons ineurons = np.random.choice(n_neurons, 3, replace=False) # pick three random neurons ax2.plot(stimuli_all, resp_all[:, ineurons]) ax2.set_xlabel('stimulus orientation ($^o$)') ax2.set_ylabel('neural response') ax2.set_xticks(np.linspace(0, 360, 5)) plt.tight_layout() # + [markdown] colab_type="text" # We will split our data into a training set and test set. In particular, we will have a training set of orientations (`stimuli_train`) and the corresponding responses (`resp_train`). Our testing set will have held-out orientations (`stimuli_test`) and the corresponding responses (`resp_test`). # + cellView="form" colab={} colab_type="code" #@title #@markdown Execute this cell to split into training and test sets # Set random seeds for reproducibility np.random.seed(4) torch.manual_seed(4) # Split data into training set and testing set n_train = int(0.6 * n_stimuli) # use 60% of all data for training set ishuffle = torch.randperm(n_stimuli) itrain = ishuffle[:n_train] # indices of data samples to include in training set itest = ishuffle[n_train:] # indices of data samples to include in testing set stimuli_test = stimuli_all[itest] resp_test = resp_all[itest] stimuli_train = stimuli_all[itrain] resp_train = resp_all[itrain] # + [markdown] colab_type="text" # --- # # Section 2: Deep feed-forward networks in *pytorch* # # We'll now build a simple deep neural network that takes as input a vector of neural responses and outputs a single number representing the decoded stimulus orientation. # # To keep things simple, we'll build a deep network with **one** hidden layer. See the appendix for a deeper discussion of what this choice entails, and when one might want to use deeper/shallower and wider/narrower architectures. # # Let $\mathbf{r}^{(n)} = \begin{bmatrix} r_1^{(n)} & r_2^{(n)} & \ldots & r_N^{(n)} \end{bmatrix}^T$ denote the vector of neural responses (of neurons $1, \ldots, N$) to the $n$th stimulus. The network we will use is described by the following set of equations: # \begin{align} # \mathbf{h}^{(n)} &= \mathbf{W}^{in} \mathbf{r}^{(n)} + \mathbf{b}^{in}, && [\mathbf{W}^{in}: M \times N], \\ # y^{(n)} &= \mathbf{W}^{out} \mathbf{h}^{(n)} + \mathbf{b}^{out}, && [\mathbf{W}^{out}: 1 \times M], # \end{align} # where $y^{(n)}$ denotes the scalar output of the network: the decoded orientation of the $n$th stimulus. # # The $M$-dimensional vector $\mathbf{h}^{(n)}$ denotes the activations of the **hidden layer** of the network. # # <p align="center"> # <img src="https://github.com/NeuromatchAcademy/course-content/blob/master/tutorials/static/one-layer-network.png?raw=true" width="450" /> # </p> # # The blue components of this diagram denote the **parameters** of the network, which we will later optimize with gradient descent. These include all the weights and biases $\mathbf{W}^{in}, \mathbf{b}^{in}, \mathbf{W}^{out}, \mathbf{b}^{out}$. # # # + [markdown] colab_type="text" # ### Section 2.1: Introduction to PyTorch # # Here, we'll use the **PyTorch** package to build, run, and train deep networks of this form in Python. There are two core components to the PyTorch package: # # 1. The first is the `torch.Tensor` data type used in PyTorch. `torch.Tensor`'s are effectively just like a `numpy` arrays, except that they have some important attributes and methods needed for automatic differentiation (to be discussed below). They also come along with infrastructure for easily storing and computing with them on GPU's, a capability we won't touch on here but which can be really useful in practice. # # 2. The second core ingredient is the PyTorch `nn.Module` class. This is the class we'll use for constructing deep networks, so that we can then easily train them using built-in PyTorch functions. Keep in my mind that `nn.Module` classes can actually be used to build, run, and train any model -- not just deep networks! # # The next cell contains code for building the deep network we defined above using the `nn.Module` class. It contains three key ingredients: # # * `__init__()` method to initialize its parameters, like in any other Python class. In this case, it takes two arguments: # * `n_inputs`: the number of input units. This should always be set to the number of neurons whose activities are being decoded (i.e. the dimensionality of the input to the network). # * `n_hidden`: the number of hidden units. This is a parameter that we are free to vary in deciding how to build our network. See the appendix for a discussion of how this architectural choice affects the computations the network can perform. # # * `nn.Linear` modules, which are built-in PyTorch classes containing all the weights and biases for a given network layer (documentation [here](https://pytorch.org/docs/master/generated/torch.nn.Linear.html)). This class takes two arguments to initialize: # * \# of inputs to that layer # * \# of outputs from that layer # # For the input layer, for example, we have: # * \# of inputs = \# of neurons whose responses are to be decoded ($N$, specified by `n_inputs`) # * \# of outputs = \# of hidden layer units ($M$, specified by `n_hidden`) # # PyTorch will initialize all weights and biases randomly. # # * `forward()` method, which takes as argument an input to the network and returns the network output. In our case, this comprises computing the output $y$ from a given input $\mathbf{r}$ using the above two equations. See the next cell for code implementing this computation using the built-in PyTorch `nn.Linear` classes. # + cellView="both" colab={} colab_type="code" class DeepNet(nn.Module): """Deep Network with one hidden layer Args: n_inputs (int): number of input units n_hidden (int): number of units in hidden layer Attributes: in_layer (nn.Linear): weights and biases of input layer out_layer (nn.Linear): weights and biases of output layer """ def __init__(self, n_inputs, n_hidden): super().__init__() # needed to invoke the properties of the parent class nn.Module self.in_layer = nn.Linear(n_inputs, n_hidden) # neural activity --> hidden units self.out_layer = nn.Linear(n_hidden, 1) # hidden units --> output def forward(self, r): """Decode stimulus orientation from neural responses Args: r (torch.Tensor): vector of neural responses to decode, must be of length n_inputs. Can also be a tensor of shape n_stimuli x n_inputs, containing n_stimuli vectors of neural responses Returns: torch.Tensor: network outputs for each input provided in r. If r is a vector, then y is a 1D tensor of length 1. If r is a 2D tensor then y is a 2D tensor of shape n_stimuli x 1. """ h = self.in_layer(r) # hidden representation y = self.out_layer(h) return y # + [markdown] colab_type="text" # The next cell contains code for initializing and running this network. We use it to decode stimulus orientation from a vector of neural responses to the very first stimulus. Note that when the initialized network class is called as a function on an input (e.g. `net(r)`), its `.forward()` method is called. This is a special property of the `nn.Module` class. # # Note that the decoded orientations at this point will be nonsense, since the network has been initialized with random weights. Below, we'll learn how to optimize these weights for good stimulus decoding. # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" outputId="f7fa3e0c-0653-4b5a-eb50-f73bb9207fa0" # Set random seeds for reproducibility np.random.seed(1) torch.manual_seed(1) # Initialize a deep network with M=200 hidden units net = DeepNet(n_neurons, 200) # Get neural responses (r) to and orientation (ori) to one stimulus in dataset r, ori = get_data(1, resp_train, stimuli_train) # using helper function get_data # Decode orientation from these neural responses using initialized network out = net(r) # compute output from network, equivalent to net.forward(r) print('decoded orientation: %.2f degrees' % out) print('true orientation: %.2f degrees' % ori) # + [markdown] colab_type="text" # --- # ### Section 2.2: Activation functions # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 517} colab_type="code" outputId="615a20d3-4ac2-4a13-8ea4-4dedd75ceaf5" #@title Video 2: Nonlinear activation functions from IPython.display import YouTubeVideo video = YouTubeVideo(id="JAdukDCQALA", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # + [markdown] colab_type="text" # Note that the deep network we constructed above comprises solely **linear** operations on each layer: each layer is just a weighted sum of the elements in the previous layer. It turns out that linear hidden layers like this aren't particularly useful, since a sequence of linear transformations is actually essentially the same as a single linear transformation. We can see this from the above equations by plugging in the first one into the second one to obtain # \begin{equation} # y^{(n)} = \mathbf{W}^{out} \left( \mathbf{W}^{in} \mathbf{r}^{(n)} + \mathbf{b}^{in} \right) + \mathbf{b}^{out} = \mathbf{W}^{out}\mathbf{W}^{in} \mathbf{r}^{(n)} + \left( \mathbf{W}^{out}\mathbf{b}^{in} + \mathbf{b}^{out} \right) # \end{equation} # In other words, the output is still just a weighted sum of elements in the input -- the hidden layer has done nothing to change this. # # To extend the set of computable input/output transformations to more than just weighted sums, we'll incorporate a **non-linear activation function** in the hidden units. This is done by simply modifying the equation for the hidden layer activations to be # \begin{equation} # \mathbf{h}^{(n)} = \phi(\mathbf{W}^{in} \mathbf{r}^{(n)} + \mathbf{b}^{in}) # \end{equation} # where $\phi$ is referred to as the activation function. Using a non-linear activation function will ensure that the hidden layer performs a non-linear transformation of the input, which will make our network much more powerful (or *expressive*, cf. appendix). In practice, deep networks *always* use non-linear activation functions. # # # + [markdown] colab_type="text" # #### Exercise 1: Nonlinear Activations # # Create a new class `DeepNetReLU` by modifying our above deep network model to use a non-linear activation function. We'll use the linear rectification function: # \begin{equation} # \phi(x) = # \begin{cases} # x & \text{if } x > 0 \\ # 0 & \text{else} # \end{cases} # \end{equation} # which can be implemented in PyTorch using `torch.relu()`. Hidden layers with this activation function are typically referred to as "**Re**ctified **L**inear **U**nits", or **ReLU**'s. # # Initialize this network with 20 hidden units and run on an example stimulus. # # **Hint**: you only need to modify the `forward()` method of the above `DeepNet()` class. # # + colab={} colab_type="code" class DeepNetReLU(nn.Module): def __init__(self, n_inputs, n_hidden): super().__init__() # needed to invoke the properties of the parent class nn.Module self.in_layer = nn.Linear(n_inputs, n_hidden) # neural activity --> hidden units self.out_layer = nn.Linear(n_hidden, 1) # hidden units --> output def forward(self, r): ############################################################################ ## TO DO for students: write code for computing network output using a ## rectified linear activation function for the hidden units # Fill out function and remove raise NotImplementedError("Student exercise: complete DeepNetReLU forward") ############################################################################ h = ... y = ... return y # Set random seeds for reproducibility np.random.seed(1) torch.manual_seed(1) # Get neural responses (r) to and orientation (ori) to one stimulus in dataset r, ori = get_data(1, resp_train, stimuli_train) # Uncomment to test your class # Initialize deep network with M=20 hidden units and uncomment lines below # net = DeepNetReLU(...) # Decode orientation from these neural responses using initialized network # net(r) is equivalent to net.forward(r) # out = net(r) # print('decoded orientation: %.2f degrees' % out) # print('true orientation: %.2f degrees' % ori) # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" outputId="64207e85-886a-42e7-fbbb-6fb70a52652a" # to_remove solution class DeepNetReLU(nn.Module): def __init__(self, n_inputs, n_hidden): super().__init__() # needed to invoke the properties of the parent class nn.Module self.in_layer = nn.Linear(n_inputs, n_hidden) # neural activity --> hidden units self.out_layer = nn.Linear(n_hidden, 1) # hidden units --> output def forward(self, r): h = torch.relu(self.in_layer(r)) y = self.out_layer(h) return y # Set random seeds for reproducibility np.random.seed(1) torch.manual_seed(1) # Get neural responses (r) to and orientation (ori) to one stimulus in dataset r, ori = get_data(1, resp_train, stimuli_train) # Initialize deep network with M=20 hidden units and uncomment lines below net = DeepNetReLU(n_neurons, 20) # Decode orientation from these neural responses using initialized network # net(r) is equivalent to net.forward(r) out = net(r) print('decoded orientation: %.2f degrees' % out) print('true orientation: %.2f degrees' % ori) # + [markdown] colab_type="text" # You should see that the decoded orientation is 0.13 $^{\circ}$ while the true orientation is 139.00 $^{\circ}$. # + [markdown] colab_type="text" # --- # # Section 3: Loss functions and gradient descent # # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 517} colab_type="code" outputId="dbdf76e2-4c23-4d8e-ac59-57ab7535adaf" #@title Video 3: Loss functions & gradient descent from IPython.display import YouTubeVideo video = YouTubeVideo(id="aEtKpzEuviw", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # + [markdown] colab_type="text" # ### Section 3.1: Loss functions # # Because the weights of the network are currently randomly chosen, the outputs of the network are nonsense: the decoded stimulus orientation is nowhere close to the true stimulus orientation. We'll shortly write some code to change these weights so that the network does a better job of decoding. # # But to do so, we first need to define what we mean by "better". One simple way of defining this is to use the squared error # \begin{equation} # L = (y - \tilde{y})^2 # \end{equation} # where $y$ is the network output and $\tilde{y}$ is the true stimulus orientation. When the decoded stimulus orientation is far from the true stimulus orientation, $L$ will be large. We thus refer to $L$ as the **loss function**, as it quantifies how *bad* the network is at decoding stimulus orientation. # # PyTorch actually carries with it a number of built-in loss functions. The one corresponding to the squared error is called `nn.MSELoss()`. This will take as arguments a **batch** of network outputs $y_1, y_2, \ldots, y_P$ and corresponding target outputs $\tilde{y}_1, \tilde{y}_2, \ldots, \tilde{y}_P$, and compute the **mean squared error (MSE)** # \begin{equation} # L = \frac{1}{P}\sum_{n=1}^P \left(y^{(n)} - \tilde{y}^{(n)}\right)^2 # \end{equation} # # # + [markdown] colab_type="text" # #### Exercise 2: Computing MSE # # # Evaluate the mean squared error for a deep network with $M=20$ rectified linear units, on the decoded orientations from neural responses to 20 random stimuli. # + colab={} colab_type="code" # Set random seeds for reproducibility np.random.seed(1) torch.manual_seed(1) # Initialize a deep network with M=20 hidden units net = DeepNetReLU(n_neurons, 20) # Get neural responses to first 20 stimuli in the data set r, ori = get_data(20, resp_train, stimuli_train) # Decode orientation from these neural responses out = net(r) ################################################### ## TO DO for students: evaluate mean squared error ################################################### # Initialize PyTorch mean squared error loss function (Hint: look at nn.MSELoss) loss_fn = ... # Evaluate mean squared error loss = ... # Uncomment once above is filled in # print('mean squared error: %.2f' % loss) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" outputId="e3f1c24d-1bc8-4569-8ac7-f06ea244af19" # to_remove solution # Set random seeds for reproducibility np.random.seed(1) torch.manual_seed(1) # Initialize a deep network with M=20 hidden units net = DeepNetReLU(n_neurons, 20) # Get neural responses to first 20 stimuli in the data set r, ori = get_data(20, resp_train, stimuli_train) # Decode orientation from these neural responses out = net(r) # Initialize PyTorch mean squared error loss function (Hint: look at nn.MSELoss) loss_fn = nn.MSELoss() # Evaluate mean squared error loss = loss_fn(out, ori) print('mean squared error: %.2f' % loss) # + [markdown] colab_type="text" # You should see a mean squared error of 42943.75. # + [markdown] colab_type="text" # --- # ### Section 3.2: Optimization with gradient descent # # Our goal is now to modify the weights to make the mean squared error loss $L$ as small as possible over the whole data set. To do this, we'll use the **gradient descent (GD)** algorithm, which consists of iterating three steps: # 1. **Evaluate the loss** on the training data, # ``` # out = net(train_data) # loss = loss_fn(out, train_labels) # ``` # where `train_data` are the network inputs in the training data (in our case, neural responses), and `train_labels` are the target outputs for each input (in our case, true stimulus orientations). # 2. **Compute the gradient of the loss** with respect to each of the network weights. In PyTorch, we can do this with one line of code: # ``` # loss.backward() # ``` # This command tells PyTorch to compute the gradients of the quantity stored in the variable `loss` with respect to each network parameter using [automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation). These gradients are then stored behind the scenes (see appendix for more details). # 3. **Update the network weights** by descending the gradient. In Pytorch, we can do this using built-in optimizers. We'll use the `optim.SGD` optimizer (documentation [here](https://pytorch.org/docs/stable/optim.html#torch.optim.SGD)) which updates parameters along the negative gradient, scaled by a learning rate (see appendix for details). To initialize this optimizer, we have to tell it # * which parameters to update, and # * what learning rate to use # # For example, to optimize *all* the parameters of a network `net` using a learning rate of .001, the optimizer would be initialized as follows # ``` # optimizer = optim.SGD(net.parameters(), lr=.001) # ``` # where `.parameters()` is a method of the `nn.Module` class that returns a [Python generator object](https://wiki.python.org/moin/Generators) over all the parameters of that `nn.Module` class (in our case, $\mathbf{W}^{in}, \mathbf{b}^{in}, \mathbf{W}^{out}, \mathbf{b}^{out}$). # # After computing all the parameter gradients in step 2, we can then update each of these parameters using the `.step()` method of this optimizer, # ``` # optimizer.step() # ``` # This single line of code will extract all the gradients computed with `.backward()` and execute the SGD updates for each parameter given to the optimizer. Note that this is true no matter how big/small the network is, allowing us to use the same two lines of code to perform the gradient descent updates for any deep network model built using PyTorch. # # Finally, an important detail to remember is that the gradients of each parameter need to be cleared before calling `.backward()`, or else PyTorch will try to accumulate gradients across iterations. This can again be done using built-in optimizers via the method `zero_grad()`, as follows: # ``` # optimizer.zero_grad() # ``` # # Putting all this together, each iteration of the GD algorith will contain a block of code that looks something like this: # ``` # Get outputs from network # Evaluate loss # # # Compute gradients # optimizer.zero_grad() # clear gradients # loss.backward() # # # Update weights # optimizer.step() # ``` # # In the next exercise, we'll give you a code skeleton for implementing the GD algorithm. Your job will be to fill in the blanks. # # For the mathematical details of the GD algorithm, see the appendix. Note, in particular, that here we using the gradient descent algorithm, rather than the more commonly used *stochastic* gradient descent algorithm. See the appendix for a more detailed discussion of how these differ and when one might need to use the stochastic variant. # + [markdown] colab_type="text" # #### Exercise 3: Gradient descent in PyTorch # # Complete the function `train()` that uses the gradient descent algorithm to optimize the weights of a given network. This function takes as input arguments # * `net`: the PyTorch network whose weights to optimize # * `loss_fn`: the PyTorch loss function to use to evaluate the loss # * `train_data`: the training data to evaluate the loss on (i.e. neural responses to decode) # * `train_labels`: the target outputs for each data point in `train_data` (i.e. true stimulus orientations) # # We will then train a neural network on our data and plot the loss (mean squared error) over time. When we run this function, behind the scenes PyTorch is actually changing the parameters inside this network to make the network better at decoding, so its weights will now be different than they were at initialization. # # # **Hint:** all the code you need for doing this is provided in the above description of the GD algorithm. # + colab={"base_uri": "https://localhost:8080/", "height": 430} colab_type="code" outputId="5698e3fa-947e-4fb2-fcf0-650a8bb43858" def train(net, loss_fn, train_data, train_labels, n_iter=50, learning_rate=1e-4): """Run gradient descent to opimize parameters of a given network Args: net (nn.Module): PyTorch network whose parameters to optimize loss_fn: built-in PyTorch loss function to minimize train_data (torch.Tensor): n_train x n_neurons tensor with neural responses to train on train_labels (torch.Tensor): n_train x 1 tensor with orientations of the stimuli corresponding to each row of train_data, in radians n_iter (int): number of iterations of gradient descent to run learning_rate (float): learning rate to use for gradient descent Returns: (list): training loss over iterations """ # Initialize PyTorch SGD optimizer optimizer = optim.SGD(net.parameters(), lr=learning_rate) # Placeholder to save the loss at each iteration track_loss = [] # Loop over epochs (cf. appendix) for i in range(n_iter): ###################################################################### ## TO DO for students: fill in missing code for GD iteration raise NotImplementedError("Student exercise: write code for GD iterations") ###################################################################### # Evaluate loss using loss_fn out = ... # compute network output from inputs in train_data loss = ... # evaluate loss function # Compute gradients ... # Update weights ... # Store current value of loss track_loss.append(loss.item()) # .item() needed to transform the tensor output of loss_fn to a scalar # Track progress if (i + 1) % (n_iter // 5) == 0: print(f'iteration {i + 1}/{n_iter} | loss: {loss.item():.3f}') return track_loss # Set random seeds for reproducibility np.random.seed(1) torch.manual_seed(1) # Initialize network net = DeepNetReLU(n_neurons, 20) # Initialize built-in PyTorch MSE loss function loss_fn = nn.MSELoss() # Run GD on data #train_loss = train(net, loss_fn, resp_train, stimuli_train) # Plot the training loss over iterations of GD #plt.plot(train_loss) plt.xlim([0, None]) plt.ylim([0, None]) plt.xlabel('iterations of gradient descent') plt.ylabel('mean squared error') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 522} colab_type="code" outputId="42336dd2-aa2e-41f6-bf31-b47db374855b" # to_remove solution def train(net, loss_fn, train_data, train_labels, n_iter=50, learning_rate=1e-4): """Run gradient descent to opimize parameters of a given network Args: net (nn.Module): PyTorch network whose parameters to optimize loss_fn: built-in PyTorch loss function to minimize train_data (torch.Tensor): n_train x n_neurons tensor with neural responses to train on train_labels (torch.Tensor): n_train x 1 tensor with orientations of the stimuli corresponding to each row of train_data, in radians n_iter (int): number of iterations of gradient descent to run learning_rate (float): learning rate to use for gradient descent """ # Initialize PyTorch SGD optimizer optimizer = optim.SGD(net.parameters(), lr=learning_rate) # Placeholder to save the loss at each iteration track_loss = [] # Loop over epochs (cf. appendix) for i in range(n_iter): # Evaluate loss using loss_fn out = net(train_data) # compute network output from inputs in train_data loss = loss_fn(out, train_labels) # evaluate loss function # Compute gradients optimizer.zero_grad() loss.backward() # Update weights optimizer.step() # Store current value of loss track_loss.append(loss.item()) # .item() needed to transform the tensor output of loss_fn to a scalar # Track progress if (i + 1) % (n_iter // 5) == 0: print(f'iteration {i + 1}/{n_iter} | loss: {loss.item():.3f}') return track_loss # Set random seeds for reproducibility np.random.seed(1) torch.manual_seed(1) # Initialize network net = DeepNetReLU(n_neurons, 20) # Initialize built-in PyTorch MSE loss function loss_fn = nn.MSELoss() # Run GD on data train_loss = train(net, loss_fn, resp_train, stimuli_train) # Plot the training loss over iterations of GD with plt.xkcd(): plt.plot(train_loss) plt.xlim([0, None]) plt.ylim([0, None]) plt.xlabel('iterations of gradient descent') plt.ylabel('mean squared error') plt.show() # + [markdown] colab_type="text" # --- # # Section 4: Evaluating model performance # # # + [markdown] colab_type="text" # ## Section 4.1: Generalization performance with test data # # Note that gradient descent is essentially an algorithm for fitting the network's parameters to a given set of training data. Selecting this training data is thus crucial for ensuring that the optimized parameters **generalize** to unseen data they weren't trained on. In our case, for example, we want to make sure that our trained network is good at decoding stimulus orientations from neural responses to any orientation, not just those in our data set. # # To ensure this, we have split up the full data set into a **training set** and a **testing set**. In Exercise 3, we trained a deep network by optimizing the parameters on a training set. We will now evaluate how good the optimized parameters are by using the trained network to decode stimulus orientations from neural responses in the testing set. Good decoding performance on this testing set should then be indicative of good decoding performance on the neurons' responses to any other stimulus orientation. This procedure is commonly used in machine learning (not just in deep learning)and is typically referred to as **cross-validation**. # # We will compute the MSE on the test data and plot the decoded stimulus orientations as a function of the true stimulus. # # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 430} colab_type="code" outputId="5582205f-a22c-4b39-ecf7-5e4c61c6aaab" #@title #@markdown Execute this cell to evaluate and plot test error out = net(resp_test) # decode stimulus orientation for neural responses in testing set ori = stimuli_test # true stimulus orientations test_loss = loss_fn(out, ori) # MSE on testing set (Hint: use loss_fn initialized in previous exercise) plt.plot(ori, out.detach(), '.') # N.B. need to use .detach() to pass network output into plt.plot() identityLine() # draw the identity line y=x; deviations from this indicate bad decoding! plt.title('MSE on testing set: %.2f' % test_loss.item()) # N.B. need to use .item() to turn test_loss into a scalar plt.xlabel('true stimulus orientation ($^o$)') plt.ylabel('decoded stimulus orientation ($^o$)') axticks = np.linspace(0, 360, 5) plt.xticks(axticks) plt.yticks(axticks) plt.show() # + [markdown] colab_type="text" # **PyTorch Note**: # # An important thing to note in the code snippet for plotting the decoded orientations is the `.detach()` method. The PyTorch `nn.Module` class is special in that, behind the scenes, each of the variables inside it are linked to each other in a computational graph, for the purposes of automatic differentiation (the algorithm used in `.backward()` to compute gradients). As a result, if you want to do anything that is not a `torch` operation to the parameters or outputs of an `nn.Module` class, you'll need to first "detach" it from its computational graph. This is what the `.detach()` method does. In this hidden code above, we need to call it on the outputs of the network so that we can plot them with the `plt.plot()` function. # + [markdown] colab_type="text" # --- # ## (Bonus) Section 4.2: Model criticism # # Please move to the Summary and visit this section only if you have time after completing all non-bonus material! # # Let's now take a step back and think about how our model is succeeding/failing and how to improve it. # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 430} colab_type="code" outputId="f24d982c-e9b3-4d6e-8eb5-92121202b5ec" #@title #@markdown Execute this cell to plot decoding error out = net(resp_test) # decode stimulus orientation for neural responses in testing set ori = stimuli_test # true stimulus orientations error = out - ori # decoding error plt.plot(ori, error.detach(), '.') # plot decoding error as a function of true orientation (make sure all arguments to plt.plot() have been detached from PyTorch network!) # Plotting plt.xlabel('true stimulus orientation ($^o$)') plt.ylabel('decoding error ($^o$)') plt.xticks(np.linspace(0, 360, 5)) plt.yticks(np.linspace(-360, 360, 9)) plt.show() # + [markdown] colab_type="text" # ### Think # # In the cell below, we will plot the *decoding error* for each neural response in the testing set. The decoding error is defined as the decoded stimulus orientation minus true stimulus orientation # \begin{equation} # \text{decoding error} = y^{(n)} - \tilde{y}^{(n)} # \end{equation} # # In particular, we plot decoding error as a function of the true stimulus orientation. # # # * Are some stimulus orientations harder to decode than others? # * If so, in what sense? Are the decoded orientations for these stimuli more variable and/or are they biased? # * Can you explain this variability/bias? What makes these stimulus orientations different from the others? # * (Will be addressed in next exercise) Can you think of a way to modify the deep network in order to avoid this? # + colab={"base_uri": "https://localhost:8080/", "height": 158} colab_type="code" outputId="018c16f3-c958-49d2-f554-156da902e5c0" # to_remove explanation """ It appears that the errors are larger at 0 and 360 degrees. The errors are biased in the positive direction at 0 degrees and in the negative direction at 360 degrees. This is because the 0 degree stimulus and the 360 degree stimulus are in fact the same because orientation is a circular variable. The network therefore has trouble determining whether the stimulus is 0 or 360 degrees. We can modify the deep network to avoid this problem in a few different ways. One approach would be to predict a sine and a cosine of the angle and then taking the predicted angle as the angle of the complex number $sin(\theta) + i cos(\theta)$. An alternative approach is to bin the stimulus responses and predict the bin of the stimulus. This turns the problem into a classification problem rather than a regression problem, and in this case you will need to use a new loss function (see below). """ # + [markdown] colab_type="text" # ### (Advanced Bonus) Exercise 4: Improving the loss function # As illustrated in the previous exercise, the squared error is not a good loss function for circular quantities like angles, since two angles that are very close (e.g. $1^o$ and $359^o$) might actually have a very large squared error. # # Here, we'll avoid this problem by changing our loss function to treat our decoding problem as a **classification problem**. Rather than estimating the *exact* angle of the stimulus, we'll now aim to construct a decoder that classifies the stimulus into one of $C$ classes, corresponding to different bins of angles of width $b = \frac{360}{C}$. The true class $\tilde{y}^{(n)}$ of stimulus $i$ is now given by # \begin{equation} # \tilde{y}^{(n)} = # \begin{cases} # 1 &\text{if angle of stimulus $n$ is in the range } [0, b] \\ # 2 &\text{if angle of stimulus $n$ is in the range } [b, 2b] \\ # 3 &\text{if angle of stimulus $n$ is in the range } [2b, 3b] \\ # \vdots \\ # C &\text{if angle of stimulus $n$ is in the range } [(C-1)b, 360] # \end{cases} # \end{equation} # # We have a helper function `stimulus_class` that will extract `n_classes` stimulus classes for us from the stimulus orientations. # + [markdown] colab_type="text" # To decode the stimulus class from neural responses, we'll use a deep network that outputs a $C$-dimensional vector of probabilities $\mathbf{p} = \begin{bmatrix} p_1, p_2, \ldots, p_C \end{bmatrix}^T$, corresponding to the estimated probabilities of the stimulus belonging to each class $1, 2, \ldots, C$. # # To ensure the network's outputs are indeed probabilities (i.e. they are positive numbers between 0 and 1, and sum to 1), we'll use a [softmax function](https://en.wikipedia.org/wiki/Softmax_function) to transform the real-valued outputs from the hidden layer into probabilities. Letting $\sigma(\cdot)$ denote this softmax function, the equations describing our network are # \begin{align} # \mathbf{h}^{(n)} &= \phi(\mathbf{W}^{in} \mathbf{r}^{(n)} + \mathbf{b}^{in}), && [\mathbf{W}^{in}: M \times N], \\ # \mathbf{p}^{(n)} &= \sigma(\mathbf{W}^{out} \mathbf{h}^{(n)} + \mathbf{b}^{out}), && [\mathbf{W}^{out}: C \times M], # \end{align} # The decoded stimulus class is then given by that assigned the highest probability by the network: # \begin{equation} # y^{(n)} = \underset{i}{\arg\max} \,\, p_i # \end{equation} # The softmax function can be implemented in PyTorch simply using `torch.softmax()`. # # Often *log* probabilities are easier to work with than actual probabilities, because probabilities tend to be very small numbers that computers have trouble representing. We'll therefore actually use the logarithm of the softmax as the output of our network, # \begin{equation} # \mathbf{l}^{(n)} = \log \left( \mathbf{p}^{(n)} \right) # \end{equation} # which can implemented in PyTorch together with the softmax via an `nn.LogSoftmax` layer. The nice thing about the logarithmic function is that it's *monotonic*, so if one probability is larger/smaller than another, then its logarithm is also larger/smaller than the other's. We therefore have that # \begin{equation} # y^{(n)} = \underset{i}{\arg\max} \,\, p_i^{(n)} = \underset{i}{\arg\max} \, \log p_i^{(n)} = \underset{i}{\arg\max} \,\, l_i^{(n)} # \end{equation} # # See the next cell for code for constructing a deep network with one hidden layer that of ReLU's that outputs a vector of log probabilities. # + colab={} colab_type="code" # Deep network for classification class DeepNetSoftmax(nn.Module): """Deep Network with one hidden layer, for classification Args: n_inputs (int): number of input units n_hidden (int): number of units in hidden layer n_classes (int): number of outputs, i.e. number of classes to output probabilities for Attributes: in_layer (nn.Linear): weights and biases of input layer out_layer (nn.Linear): weights and biases of output layer """ def __init__(self, n_inputs, n_hidden, n_classes): super().__init__() # needed to invoke the properties of the parent class nn.Module self.in_layer = nn.Linear(n_inputs, n_hidden) # neural activity --> hidden units self.out_layer = nn.Linear(n_hidden, n_classes) # hidden units --> outputs self.logprob = nn.LogSoftmax(dim=1) # probabilities across columns should sum to 1 (each output row corresponds to a different input) def forward(self, r): """Predict stimulus orientation bin from neural responses Args: r (torch.Tensor): n_stimuli x n_inputs tensor with neural responses to n_stimuli Returns: torch.Tensor: n_stimuli x n_classes tensor with predicted class probabilities """ h = torch.relu(self.in_layer(r)) logp = self.logprob(self.out_layer(h)) return logp # + [markdown] colab_type="text" # What should our loss function now be? Ideally, we want the probabilities outputted by our network to be such that the probability of the true stimulus class is high. One way to formalize this is to say that we want to maximize the *log* probability of the true stimulus class $\tilde{y}^{(n)}$ under the class probabilities predicted by the network, # \begin{equation} # \log \left( \text{predicted probability of stimulus } n \text{ being of class } \tilde{y}^{(n)} \right) = \log p^{(n)}_{\tilde{y}^{(n)}} = l^{(n)}_{\tilde{y}^{(n)}} # \end{equation} # To turn this into a loss function to be *minimized*, we can then simply multiply it by -1: maximizing the log probability is the same as minimizing the *negative* log probability. Summing over a batch of $P$ inputs, our loss function is then given by # \begin{equation} # L = -\sum_{n=1}^P \log p^{(n)}_{\tilde{y}^{(n)}} = -\sum_{n=1}^P l^{(n)}_{\tilde{y}^{(n)}} # \end{equation} # In the deep learning community, this loss function is typically referred to as the **cross-entropy**, or **negative log likelihood**. The corresponding built-in loss function in PyTorch is `nn.NLLLoss()` (documentation [here](https://pytorch.org/docs/master/generated/torch.nn.CrossEntropyLoss.html)). # # In the next cell, we've provided most of the code to train and test a network to decode stimulus orientations via classification, by minimizing the negative log likelihood. Fill in the missing pieces. # # Once you've done this, have a look at the plotted results. Does changing the loss function from mean squared error to a classification loss solve our problems? Note that errors may still occur -- but are these errors as bad as the ones that our network above was making? # + colab={} colab_type="code" def decode_orientation(n_classes, train_data, train_labels, test_data, test_labels): """ Initialize, train, and test deep network to decode binned orientation from neural responses Args: n_classes (scalar): number of classes in which to bin orientation train_data (torch.Tensor): n_train x n_neurons tensor with neural responses to train on train_labels (torch.Tensor): n_train x 1 tensor with orientations of the stimuli corresponding to each row of train_data, in radians test_data (torch.Tensor): n_test x n_neurons tensor with neural responses to train on test_labels (torch.Tensor): n_test x 1 tensor with orientations of the stimuli corresponding to each row of train_data, in radians Returns: (list, torch.Tensor): training loss over iterations, n_test x 1 tensor with predicted orientations of the stimuli from decoding neural network """ # Bin stimulus orientations in training set train_binned_labels = stimulus_class(train_labels, n_classes) ############################################################################## ## TODO for students: fill out missing pieces below to initialize, train, and # test network # Fill out function and remove raise NotImplementedError("Student exercise: complete decode_orientation function") ############################################################################## # Initialize network net = ... # use M=20 hidden units # Initialize built-in PyTorch MSE loss function loss_fn = nn.NLLLoss() # Run GD on training set data, using learning rate of 0.1 train_loss = ... # Decode neural responses in testing set data out = ... out_labels = np.argmax(out.detach(), axis=1) # predicted classes return train_loss, out_labels # Set random seeds for reproducibility np.random.seed(1) torch.manual_seed(1) n_classes = 12 # start with 12, then (bonus) try making this as big as possible! does decoding get worse? # Uncomment below to test your function # Initialize, train, and test network #train_loss, predicted_test_labels = decode_orientation(n_classes, resp_train, stimuli_train, resp_test, stimuli_test) # Plot results #plot_decoded_results(train_loss, stimuli_test, predicted_test_labels) # + colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" outputId="d2f41dd3-0f4f-4a33-8389-2370a9fdf187" # to_remove solution def decode_orientation(n_classes, train_data, train_labels, test_data, test_labels): """ Initialize, train, and test deep network to decode binned orientation from neural responses Args: n_classes (scalar): number of classes in which to bin orientation train_data (torch.Tensor): n_train x n_neurons tensor with neural responses to train on train_labels (torch.Tensor): n_train x 1 tensor with orientations of the stimuli corresponding to each row of train_data, in radians test_data (torch.Tensor): n_test x n_neurons tensor with neural responses to train on test_labels (torch.Tensor): n_test x 1 tensor with orientations of the stimuli corresponding to each row of train_data, in radians Returns: (list, torch.Tensor): training loss over iterations, n_test x 1 tensor with predicted orientations of the stimuli from decoding neural network """ # Bin stimulus orientations in training set train_binned_labels = stimulus_class(train_labels, n_classes) # Initialize network net = DeepNetSoftmax(n_neurons, 20, n_classes) # use M=20 hidden units # Initialize built-in PyTorch MSE loss function loss_fn = nn.NLLLoss() # Run GD on training set data, using learning rate of 0.1 train_loss = train(net, loss_fn, train_data, train_binned_labels, learning_rate=0.1) # Decode neural responses in testing set data out = net(resp_test) out_labels = np.argmax(out.detach(), axis=1) # predicted classes return train_loss, out_labels # Set random seeds for reproducibility np.random.seed(1) torch.manual_seed(1) n_classes = 12 # start with 12, then (bonus) try making this as big as possible! does decoding get worse? # Initialize, train, and test network train_loss, predicted_test_labels = decode_orientation(n_classes, resp_train, stimuli_train, resp_test, stimuli_test) # Plot results with plt.xkcd(): plot_decoded_results(train_loss, stimuli_test, predicted_test_labels) # + [markdown] colab_type="text" # --- # # Summary # # We have now covered a number of common and powerful techniques for applying deep learning to decoding from neural data, some of which are common to almost any machine learning problem: # * Building and training deep networks using the **PyTorch** `nn.Module` class and built-in **optimizers** # * Choosing and evaluating **loss functions** # * Testing a trained model on unseen data via **cross-validation**, by splitting the data into a **training set and testing set** # # An important aspect of this tutorial was the `train()` function we wrote in exercise 6. Note that it can be used to train *any* network to minimize *any* loss function (cf. advanced exercise) on *any* training data. This is the power of using PyTorch to train neural networks and, for that matter, **any other model**! There is nothing in the `nn.Module` class that forces us to use `nn.Linear` layers that implement neural network operations. You can actually put anything you want inside the `.__init__()` and `.forward()` methods of this class. As long as its parameters and computations involve only `torch.Tensor`'s, and the model is differentiable, you'll then be able to optimize the parameters of this model in exactly the same way we optimized the deep networks here. # # What kinds of conclusions can we draw from these sorts of analyses? If we can decode the stimulus well from visual cortex activity, that means that there is information about this stimulus available in visual cortex. Whether or not the animal uses that information to make decisions is not determined from an analysis like this. In fact mice perform poorly in orientation discrimination tasks compared to monkeys and humans, even though they have information about these stimuli in their visual cortex. Why do you think they perform poorly in orientation discrimination tasks? # # See this paper for some potential hypotheses (https://www.biorxiv.org/content/10.1101/679324v2), but this is totally an open question! # + [markdown] colab_type="text" # --- # # Appendix # + [markdown] colab_type="text" # ## Neural network *depth*, *width* and *expressivity* # # Two important architectural choices that always have to be made when constructing deep feed-forward networks like those used here are # * the number of hidden layers, or the network's *depth* # * the number of units in each layer, or the layer *widths* # # Here, we restricted ourselves to networks with a single hidden layer with a width of $M$ units, but it is easy to see how this code could be adapted to arbitrary depths. Adding another hidden layer simply requires adding another `nn.Linear` module to the `__init__()` method and incorporating it into the `.forward()` method. # # The depth and width of a network determine the set of input/output transormations that it can perform, often referred to as its *expressivity*. The deeper and wider the network, the more *expressive* it is; that is, the larger the class of input/output transformations it can compute. In fact, it turns out that an infinitely wide *or* infinitely deep networks can in principle [compute (almost) *any* input/output transformation](https://en.wikipedia.org/wiki/Universal_approximation_theorem). # # A classic mathematical demonstration of the power of depth is given by the so-called [XOR problem](https://medium.com/@jayeshbahire/the-xor-problem-in-neural-networks-50006411840b#:~:text=The%20XOr%2C%20or%20%E2%80%9Cexclusive%20or,value%20if%20they%20are%20equal.). This toy problem demonstrates how even a single hidden layer can drastically expand the set of input/output transformations a network can perform, relative to a shallow network with no hidden layers. The key intuition is that the hidden layer allows you to represent the input in a new format, which can then allow you to do almost anything you want with it. The *wider* this hidden layer, the more flexibility you have in this representation. In particular, if you have more hidden units than input units, then the hidden layer representation of the input is higher-dimensional than the raw data representation. This higher dimensionality effectively gives you more "room" to perform arbitrary computations in. It turns out that even with just this one hidden layer, if you make it wide enough you can actually approximate any input/output transformation you want. See [here](http://neuralnetworksanddeeplearning.com/chap4.html) for a neat visual demonstration of this. # # In practice, however, it turns out that increasing depth seems to grant more expressivity with fewer units than increasing width does (for reasons that are not well understood). It is for this reason that truly *deep* networks are almost always used in machine learning, which is why this set of techniques is often referred to as *deep* learning. # # That said, there is a cost to making networks deeper and wider. The bigger your network, the more parameters (i.e. weights and biases) it has, which need to be optimized! The extra expressivity afforded by higher width and/or depth thus carries with it (at least) two problems: # * optimizing more parameters usually requires more data # * a more highly parameterized network is more prone to overfit to the training data, so requires more sophisticated optimization algorithms to ensure generalization # + [markdown] colab_type="text" # ## Gradient descent equations # # Here we provide the equations for the three steps of the gradient descent algorithm, as applied to our decoding problem: # # 1. **Evaluate the loss** on the training data. For a mean squared error loss, this is given by # \begin{equation} # L = \frac{1}{P}\sum_{n=1}^P (y^{(n)} - \tilde{y}^{(n)})^2 # \end{equation} # where $y^{(n)}$ denotes the stimulus orientation decoded from the population response $\mathbf{r}^{(n)}$ to the $n$th stimulus in the training data, and $\tilde{y}^{(n)}$ is the true orientation of that stimulus. $P$ denotes the total number of data samples in the training set. In the syntax of our `train()` function above, $\mathbf{r}^{(n)}$ is given by `train_data[n, :]` and $\tilde{y}^{(n)}$ by `train_labels[n]`. # # 2. **Compute the gradient of the loss** with respect to each of the network weights. In our case, this entails computing the quantities # \begin{equation} # \frac{\partial L}{\partial \mathbf{W}^{in}}, \frac{\partial L}{\partial \mathbf{b}^{in}}, \frac{\partial L}{\partial \mathbf{W}^{out}}, \frac{\partial L}{\partial \mathbf{b}^{out}} # \end{equation} # Usually, we would require lots of math in order to derive each of these gradients, and lots of code to compute them. But this is where PyTorch comes to the rescue! Using a cool technique called [automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation), PyTorch automatically calculates these gradients when the `.backward()` function is called. # # More specifically, when this function is called on a particular variable (e.g. `loss`, as above), PyTorch will compute the gradients with respect to each network parameter. These are computed and stored behind the scenes, and can be accessed through the `.grad` attribute of each of the network's parameters. As we saw above, however, we actually never need to look at or call these gradients when implementing gradient descent, as this can be taken care of by PyTorch's built-in optimizers, like `optim.SGD`. # # 3. **Update the network weights** by descending the gradient: # \begin{align} # \mathbf{W}^{in} &\leftarrow \mathbf{W}^{in} - \alpha \frac{\partial L}{\partial \mathbf{W}^{in}} \\ # \mathbf{b}^{in} &\leftarrow \mathbf{b}^{in} - \alpha \frac{\partial L}{\partial \mathbf{b}^{in}} \\ # \mathbf{W}^{out} &\leftarrow \mathbf{W}^{out} - \alpha \frac{\partial L}{\partial \mathbf{W}^{out}} \\ # \mathbf{b}^{out} &\leftarrow \mathbf{b}^{out} - \alpha \frac{\partial L}{\partial \mathbf{b}^{out}} # \end{align} # where $\alpha$ is called the **learning rate**. This **hyperparameter** of the SGD algorithm controls how far we descend the gradient on each iteration. It should be as large as possible so that fewer iterations are needed, but not too large so as to avoid parameter updates from skipping over minima in the loss landscape. # # While the equations written down here are specific to the network and loss function considered in this tutorial, the code provided above for implementing these three steps is completely general: no matter what loss function or network you are using, exactly the same commands can be used to implement these three steps. # + [markdown] colab_type="text" # ## *Stochastic* gradient descent (SGD) vs. gradient descent (GD) # # In this tutorial, we used the gradient descent algorithm, which differs in a subtle yet very important way from the more commonly used **stochastic gradient descent (SGD)** algorithm. The key difference is in the very first step of each iteration, where in the GD algorithm we evaluate the loss *at every data sample in the training set*. In SGD, on the other hand, we evaluate the loss only at a random subset of data samlpes from the full training set, called a **mini-batch**. At each iteration, we randomly sample a mini-batch to perform steps 1-3 on. All the above equations still hold, but now the $P$ data samples $\mathbf{r}^{(n)}, \tilde{y}^{(n)}$ denote a mini-batch of $P$ random samples from the training set, rather than the whole training set. # # There are several reasons why one might want to use SGD instead of GD. The first is that the training set might be too big, so that we actually can't actually evaluate the loss on every single data sample in it. In this case, GD is simply infeasible, so we have no choice but to turn to SGD, which bypasses the restrictive memory demands of GD by sub-sampling the training set into smaller mini-batches. # # But, even when GD is feasible, SGD turns out to be generally better. The stochasticity induced by the extra random sampling step in SGD effectively adds some noise in the search for local minima of the loss function. This can be really useful for avoiding potential local minima, and enforce that whatever minimum is converged to is a good one. This is particularly important when networks are wider and/or deeper, in which case the large number of parameters can lead to overfitting. # # Here, we used only GD because (1) it is simpler, and (2) it suffices for the problem being considered here. Because we have so many neurons in our data set, decoding is not too challenging and doesn't require a particularly deep or wide network. The small number of parameters in our deep networks therefore can be optimized without a problem using GD.
tutorials/W3D4_DeepLearning1/W3D4_Tutorial1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Calculate correlation functions with CCL # In this example, we will calculate clustering and lensing correlation functions for an example cosmology. import numpy as np import pylab as plt import pyccl as ccl # %matplotlib inline # ### Define a cosmology and source number density # First, we define a set of cosmological parameters. cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.83, n_s=0.96) # The angular power spectrum is weighted by the source number density as a function of redshift, dN/dz. We define an example here. # + z = np.linspace(0., 3., 200) i_lim = 26. # Limiting i-band magnitude z0 = 0.0417*i_lim - 0.744 Ngal = 46. * 100.31 * (i_lim - 25.) # Normalisation, galaxies/arcmin^2 pz = 1./(2.*z0) * (z / z0)**2. * np.exp(-z/z0) # Redshift distribution, p(z) dNdz = Ngal * pz # Number density distribution b = 1.5*np.ones(200) # - plt.plot(z, dNdz) plt.show() # ### Create Tracer objects # CCL manages auto- and cross-spectrum calculations through `Tracer` objects. For the sake of this example we will define two tracers: one for lensing and one for clustering. lens1 = ccl.WeakLensingTracer(cosmo, dndz=(z, dNdz)) clu1 = ccl.NumberCountsTracer(cosmo, has_rsd=False, dndz=(z,dNdz), bias=(z,b)) # The argument set to `False` in the first statement specifies that we are ignoring intrinsic alignments. For the clustering tracer, we are excluding both RSD and magnification bias with the two `False` statements. # If we wanted to include intrinsic alignments, we could have created the `ClTracer` object in the following way: bias_ia = -0.01* np.ones(z.size) # Intrinsic alignment bias factor f_red = 0.2 * np.ones(z.size) # Fraction of red galaxies lens1_ia = ccl.WeakLensingTracer(cosmo, dndz=(z, dNdz), ia_bias=(z, bias_ia), red_frac=(z, f_red)) # ### Obtain angular power spectra # Before computing the correlation functions, we need to obtain the angular power spectra of the tracers. ell = np.arange(2, 100) cls = ccl.angular_cl(cosmo, lens1, lens1, ell) cls_ia = ccl.angular_cl(cosmo, lens1_ia, lens1_ia, ell) cls_clu = ccl.angular_cl(cosmo, clu1, clu1, ell) plt.plot(ell, cls, 'k-') plt.plot(ell, cls_ia, 'r-') plt.show() # ### Calculate the correlation functions # We can now calculate the correlation functions for the tracers. # + theta_deg = np.logspace(-1, np.log10(5.), 20) # Theta is in degrees xi_plus = ccl.correlation(cosmo, ell, cls, theta_deg, corr_type='L+', method='FFTLog') xi_plus_ia = ccl.correlation(cosmo, ell, cls_ia, theta_deg, corr_type='L+', method='FFTLog') xi_minus = ccl.correlation(cosmo, ell, cls, theta_deg, corr_type='L-', method='FFTLog') xi_clu = ccl.correlation(cosmo, ell, cls_clu, theta_deg, corr_type='GG', method='FFTLog') # - # We can then plot the correlations, first for lensing: # + plt.plot(theta_deg, xi_plus, label='Lensing +, no IA') plt.plot(theta_deg, xi_minus, label='Lensing -, no IA') plt.plot(theta_deg, xi_plus_ia, label='Lensing +, w/IA') plt.xscale('log') plt.legend(loc='upper right') plt.xlabel(r'$\theta$ (deg)') plt.ylabel(r'$\xi (r)$') plt.show() # - # And then for galaxy-galaxy clustering: # + plt.plot(theta_deg, xi_clu, label='Clustering') plt.xscale('log') plt.legend(loc='upper right') plt.xlabel(r'$\theta$ (deg)') plt.ylabel(r'$\xi (r)$') plt.show() # -
examples/Correlation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeRegressor # Create a random dataset rng = np.random.RandomState(1) X = np.sort(200 * rng.rand(100, 1) - 100, axis=0) y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T y[::5, :] += (0.5 - rng.rand(20, 2)) # Fit regression model regr_1 = DecisionTreeRegressor(max_depth=2) regr_2 = DecisionTreeRegressor(max_depth=5) regr_3 = DecisionTreeRegressor(max_depth=8) regr_1.fit(X, y) regr_2.fit(X, y) regr_3.fit(X, y) # Predict X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis] y_1 = regr_1.predict(X_test) y_2 = regr_2.predict(X_test) y_3 = regr_3.predict(X_test) # Plot the results plt.figure() s = 50 s = 25 plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, edgecolor="black", label="data") plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, edgecolor="black", label="max_depth=2") plt.scatter(y_2[:, 0], y_2[:, 1], c="red", s=s, edgecolor="black", label="max_depth=5") plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, edgecolor="black", label="max_depth=8") plt.xlim([-6, 6]) plt.ylim([-6, 6]) plt.xlabel("target 1") plt.ylabel("target 2") plt.title("Multi-output Decision Tree Regression") plt.legend(loc="best") plt.show()
lab05/tree/plot_tree_regression_multioutput.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 5 - Handling Slow Tasks # # **GOAL:** The goal of this exercise is to show how to use `ray.wait` to avoid waiting for slow tasks. # # See the documentation for ray.wait at http://ray.readthedocs.io/en/latest/api.html#waiting-for-a-subset-of-tasks-to-finish. # # This script starts 6 tasks, each of which takes a random amount of time to complete. We'd like to process the results in two batches (each of size 3). Change the code so that instead of waiting for a fixed set of 3 tasks to finish, we make the first batch consist of the first 3 tasks that complete. The second batch should consist of the 3 remaining tasks. Do this exercise by using `ray.wait`. # # ### Concepts for this Exercise - ray.wait # # After launching a number of tasks, you may want to know which ones have finished executing. This can be done with `ray.wait`. The function works as follows. # # ```python # ready_ids, remaining_ids = ray.wait(object_ids, num_returns=1, timeout_ms=None) # ``` # # **Arguments:** # - `object_ids`: This is a list of object IDs. # - `num_returns`: This is maximum number of object IDs to wait for. The default value is `1`. # - `timeout_ms`: This is the maximum amount of time in milliseconds to wait for. So `ray.wait` will block until either `num_returns` objects are ready or until `timeout_ms` milliseconds have passed. # # **Return values:** # - `ready_ids`: This is a list of object IDs that are available in the object store. # - `remaining_ids`: This is a list of the IDs that were in `object_ids` but are not in `ready_ids`, so the IDs in `ready_ids` and `remaining_ids` together make up all the IDs in `object_ids`. # + from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import ray import time # - ray.init(num_cpus=6, redirect_output=True) # Define a remote function that takes a variable amount of time to run. @ray.remote def f(i): np.random.seed(5 + i) x = np.random.uniform(0, 4) time.sleep(x) return i, time.time() # **EXERCISE:** Using `ray.wait`, change the code below so that `initial_results` consists of the outputs of the first three tasks to complete instead of the first three tasks that were submitted. # + # Sleep a little to improve the accuracy of the timing measurements below. time.sleep(2.0) start_time = time.time() # This launches 6 tasks, each of which takes a random amount of time to # complete. result_ids = [f.remote(i) for i in range(6)] # Get one batch of tasks. Instead of waiting for a fixed subset of tasks, we # should instead use the first 3 tasks that finish. #initial_results = ray.get(result_ids[:3]) initial_results, remaining_results = ray.wait(result_ids, num_returns=3) end_time = time.time() duration = end_time - start_time # - # **EXERCISE:** Change the code below so that `remaining_results` consists of the outputs of the last three tasks to complete. # Wait for the remaining tasks to complete. #remaining_results = ray.get(result_ids[3:]) initial_results = ray.get(initial_results) remaining_results = ray.get(remaining_results) # **VERIFY:** Run some checks to verify that the changes you made to the code were correct. Some of the checks should fail when you initially run the cells. After completing the exercises, the checks should pass. # + assert len(initial_results) == 3 assert len(remaining_results) == 3 initial_indices = [result[0] for result in initial_results] initial_times = [result[1] for result in initial_results] remaining_indices = [result[0] for result in remaining_results] remaining_times = [result[1] for result in remaining_results] assert set(initial_indices + remaining_indices) == set(range(6)) assert duration < 1.5, ('The initial batch of ten tasks was retrieved in ' '{} seconds. This is too slow.'.format(duration)) assert duration > 0.8, ('The initial batch of ten tasks was retrieved in ' '{} seconds. This is too slow.'.format(duration)) # Make sure the initial results actually completed first. assert max(initial_times) < min(remaining_times) print('Success! The example took {} seconds.'.format(duration)) # -
exercise05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''vela'': pipenv)' # language: python # name: python37664bitvelapipenvde09592071074af6a70ce3b1ce38af95 # --- # # 20-XX-XX: Daily Data Practice # # --- # # ### Daily Practices # # * Meta Data: Review and write # * Focus on a topic, review notes and resources, write a blog post about it # * HackerRank SQL or Packt SQL Data Analytics # * Practice with the common DS/ML tools and processes # * Try to hit benchmark accuracies with [UCI ML datasets](https://archive.ics.uci.edu/ml/index.php) # * Hands-on ML with sklearn, Keras, and TensorFlow # * Read, code along, take notes # * _test yourself on the concepts_ — i.e. do all the chapter exercises # * [fast.ai course](https://course.fast.ai/) # * Kaggle # * Interviewing # * "Tell me a bit about yourself" # * "Tell me about a project you've worked on and are proud of" # * Business case walk-throughs # * Hot-seat DS-related topics for recall practice (under pressure) # * Job sourcing # * LinkedIn # --- # # ### Writing # # > Focus on a topic or project, learn/review the concepts, and write a blog post about it # # # ### The Data # # As seems to be the case with most, if not all, machine learning projects, we spent the # vast majority of the time gathering and labeling our dataset. # # In an ideal world, our model would be able to recognize any object that anyone would # ever want to throw away. But the reality is that this is practically impossible, # particularly within the 8 weeks we had to work on Trash Panda. # # We were granted an API key from Earth911 to utilize their recycling center search # database. When we were working with it, the database held information on around 300 # items—how they should be recycled based on location, and facilities that accept them if # they are not curbside recyclable. # # We had our starting point for the list of items our system should be able to # recognize. However, the documentation for the neural network architecture we'd decided # to use suggested that to create a robust model, it should be trained with at least # 1,000 instancesi (in this case, images) of each of the classes we wanted it to detect. # # Gathering 300,000 images was also quite a bit out of the scope of the project at that # point. So the DS team spent many hours reducing the size of that list to something a # little more manageable and realistic. # # The main method of doing so was to group the items based primarily on visual # similarity. We knew it was also out of the scope of our time with the project to train # a model that could tell the difference between #2 plastic bottles and #3 plastic # bottles, or motor oil bottles and brake fluid bottles. # # We also considered the items that 1) users would be throwing away on a somewhat # regular basis, and 2) users would usually either be unsure of how to dispose of properly # or would dispose of properly. # --- # # ## Statistics and Probability # # * Training kit # * Lecture and assignment notebooks # * Books # * Practical Statistics for Data Scientists # * Video # * [StatQuest Statistics Fundamentals](https://www.youtube.com/playlist?list=PLblh5JKOoLUK0FLuzwntyYI10UQFUhsY9) # #### Sets # # A set is a collection of unique entities. A set is said to be a subset of another set, if all of the first set's members are also members of the second set. # # An empty set is a set without any members. It can be defined as the set that is the subset of every set, and every set (universal set) is a subset of itself. # ### Random sampling and sample bias # # A _sample_ is a subset of data from a larger dataset, the _population_. # # * `N(n)` : The size of the population (sample) # * Random sampling : Drawing elements into a sample at random # * Each available member of the population has an equal chance of being chosen for the sample at each draw # * Stratified sampling : Dividing the population into strata and randomly sampling from each strata # * The intuition here is that a stratified sample can help a sample to follow the distribution of the population, particularly in the case of a biased distribution # * Simple random sample : random sample without stratifying the population # * Sample bias : a sample that misrepresents the population # * Samples will always be somewhat non-representative of the population # * Sampling bias occurs when that difference is meaningful # * An unbiased process will produce error, but it is random and does not tend strongly in any direction # * with replacement : observations are put back in the population after each draw # * without replacement : once selected, observations can't be drawn again # # Data quality is often more important than data quantity. Random sampling can reduce bias and facilitate quality improvement that would be prohibitively expensive. # # To minimize bias, specify a hypothesis first, then collect data using randomization and random sampling. # # * Regression to the mean : when taking successive measurements on a given variable, extreme observations tend to be followed by more central ones # # #### Sampling distribution of a statistic # # * Sample statistic : a metric calculated for a sample of data drawn from a larger population # * Data distribution : the frequency distribution of individual values in a dataset # * Sampling distribution : the frequency distribution of a sample statistic over many samples or resamples # * Central Limit Theorem : the tendency of the sampling distribution to take on a normal shape as sample size increases # * Standard error : the variability (stdev) of a sample statistic over many samples # * Standard deviation : variability of individual data values # # #### The bootstrap # # * Bootstrap sample : a sample taken with replacement from an observed dataset # * Resampling : the process of taking repeated samples from observed data # * Includes bootstrap and permutation (shuffling) # # # # --- # # ### Interviewing # # > Practice answering the most common interview questions # # * "Tell me a bit about yourself" # * "Tell me about a project you've worked on and are proud of" # * "What is your greatest strength / weakness?" # * "Tell me about a time when you had conflict with someone and how you handled it" # * "Tell me about a mistake you made and how you handled it" # * Business case walk-throughs # * Hot-seat DS-related topics for recall practice (under pressure) # > "Where do you see yourself in 3-5 years?" # # Ideally working on the cutting edge of deep learning, whether it is doing research or # developing applications and products for users. As company X does X, I can see myself # working deeply on the research or machine learning engineering team here. # > "What brought you to data science? What interested you about data science?" # # My background in Economics gave me my first real taste of programmatically gathering # and utilizing data. Data science is mostly a continuation of that into the modern age # of big data. I know that data can make big improvements in peoples' lives, and data # science ... # > "What sort of compensation are you looking for for this position?" # # I have an idea of a salary range based on the position, the work, and my experience. # It's flexible and depends heavily on the whole package, such as PTO and other benefits. # First, I wanted to hear what general range you would offer for this position. # > "Tell me a bit about yourself" # # * Homeschooled until seventh grade # * Learning is a life-long endeavor # * Jack of many trades, master of some # * I have a wide range of interests, skills and experiences # * Go deep on things I'm fascinated with (maybe better as strength/weakness) # * I like the free flow of creativity # * also need something technical to dig my teeth into # * Always been fascinated by technology # * Econ undergrad - first taste of harnessing the power of data # * After college # * On-site implementation consultant for an ERP software company # * Trained in manipulating Oracle RDBMS # * Wrote Crystal Reports using SQL (lots and lots of joins) # * Worked as a professional DJ for a couple of years # * Wasn't stratching my technical itch # * Found my way back to data # > "Tell me about a mistake you made and how you handled it" # # * First solo implementation project # * Tried to do everything myself # * Instantiating the system # * Migrating the data # * Teaching the users # * Writing reports # * Resolution # * Started building my delegation muscle # * Decided to backtrack to be sure all of the bases were hit # * Delegated work to those who are specialized for it (teachers to teach) # # --- # # ### SQL # # > Work through practice problems on HackerRank or Packt # # # # # # --- # # ### DS + ML Practice # # * Pick a dataset and try to do X with it # * Try to hit benchmark accuracies with [UCI ML datasets](https://archive.ics.uci.edu/ml/index.php) # * Kaggle # * Practice with the common DS/ML tools and processes # * Hands-on ML with sklearn, Keras, and TensorFlow # * Machine learning flashcards # # #### _The goal is to be comfortable explaining the entire process._ # # * Data access / sourcing, cleaning # * SQL # * Pandas # * Exploratory data analysis # * Data wrangling techniques and processes # * Inference # * Statistics # * Probability # * Visualization # * Modeling # * Implement + justify choice of model / algorithm # * Track performance + justify choice of metrics # * Communicate results as relevant to the goal # # # --- # # ### Job sourcing # # > Browse LinkedIn, Indeed, and connections for promising leads # # # # # # # # # # # # # #
ds/practice/daily_practice/20-05/20-05-05-ds_practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neural Network # # In this tutorial, we'll create a simple neural network classifier in TensorFlow. The key advantage of this model over the [Linear Classifier](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/3_Neural_Network/Tutorials/1_Neural_Network.ipynb) trained in the previous tutorial is that it can separate data which is __NOT__ linearly separable. We will implement this model for classifying images of hand-written digits from the so-called MNIST data-set. # # We assume that you have the basic knowledge over the concept and you are just interested in the __Tensorflow__ implementation of the Neural Nets. If you want to know more about the Neural Nets we suggest you to take [this](https://www.coursera.org/learn/machine-learning) amazing course on machine learning or check out the following tutorials: # # [Neural Networks Part 1: Setting up the Architecture](https://cs231n.github.io/neural-networks-1/) # # [Neural Networks Part 2: Setting up the Data and the Loss](https://cs231n.github.io/neural-networks-2/) # # [Neural Networks Part 3: Learning and Evaluation](https://cs231n.github.io/neural-networks-3/) # # The structure of the neural network that we're going to implement is as follows. Like before, we're using images of handw-ritten digits of the MNIST data which has 10 classes (i.e. digits from 0 to 9). The implemented network has 2 hidden layers: the first one with 200 hidden units (neurons) and the second one (also known as classifier layer) with 10 (number of classes) neurons. # # <img src="files/files/nn.png"> # # ___Fig. 1-___ Sample Neural Network architecture with two layers implemented for classifying MNIST digits # # # # # ## 0. Import the required libraries: # We will start with importing the required Python libraries. # imports import tensorflow as tf import numpy as np import matplotlib.pyplot as plt # ## 1. Load the MNIST data # # For this tutorial we use the MNIST dataset. MNIST is a dataset of handwritten digits. If you are into machine learning, you might have heard of this dataset by now. MNIST is kind of benchmark of datasets for deep learning and is easily accesible through Tensorflow # # The dataset contains $55,000$ examples for training, $5,000$ examples for validation and $10,000$ examples for testing. The digits have been size-normalized and centered in a fixed-size image ($28\times28$ pixels) with values from $0$ to $1$. For simplicity, each image has been flattened and converted to a 1-D numpy array of $784$ features ($28\times28$). # # <img src="files/files/mnist.png"> # # # If you want to know more about the MNIST dataset you can check __Yann Lecun__'s [website](http://yann.lecun.com/exdb/mnist/). # # ### 1.1. Data dimension # Here, we specify the dimensions of the images which will be used in several places in the code below. Defining these variables makes it easier (compared with using hard-coded number all throughout the code) to modify them later. Ideally these would be inferred from the data that has been read, but here we will just write the numbers. # # It's important to note that in a linear model, we have to flatten the input images into a vector. Here, each of the $28\times28$ images are flattened into a $1\times784$ vector. img_h = img_w = 28 # MNIST images are 28x28 img_size_flat = img_h * img_w # 28x28=784, the total number of pixels n_classes = 10 # Number of classes, one class per digit # ### 1.2. Helper functions to load the MNIST data # # In this section, we'll write the function which automatically loads the MNIST data and returns it in our desired shape and format. If you wanna learn more about loading your data, you may read our __How to Load Your Data in TensorFlow __ tutorial which explains all the available methods to load your own data; no matter how big it is. # # Here, we'll simply write a function (__`load_data`__) which has two modes: train (which loads the training and validation images and their corresponding labels) and test (which loads the test images and their corresponding labels). You can replace this function to use your own dataset. # # Other than a function for loading the images and corresponding labels, we define two more functions: # # 1. __randomize__: which randomizes the order of images and their labels. This is important to make sure that the input images are sorted in a completely random order. Moreover, at the beginning of each __epoch__, we will re-randomize the order of data samples to make sure that the trained model is not sensitive to the order of data. # # 2. __get_next_batch__: which only selects a few number of images determined by the batch_size variable (if you don't know why, read about Stochastic Gradient Method) # # + def load_data(mode='train'): """ Function to (download and) load the MNIST data :param mode: train or test :return: images and the corresponding labels """ from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) if mode == 'train': x_train, y_train, x_valid, y_valid = mnist.train.images, mnist.train.labels, \ mnist.validation.images, mnist.validation.labels return x_train, y_train, x_valid, y_valid elif mode == 'test': x_test, y_test = mnist.test.images, mnist.test.labels return x_test, y_test def randomize(x, y): """ Randomizes the order of data samples and their corresponding labels""" permutation = np.random.permutation(y.shape[0]) shuffled_x = x[permutation, :] shuffled_y = y[permutation] return shuffled_x, shuffled_y def get_next_batch(x, y, start, end): x_batch = x[start:end] y_batch = y[start:end] return x_batch, y_batch # - # ### 1.3. Load the data and display the sizes # Now we can use the defined helper function in __train__ mode which loads the train and validation images and their corresponding labels. We'll also display their sizes: # Load MNIST data x_train, y_train, x_valid, y_valid = load_data(mode='train') print("Size of:") print("- Training-set:\t\t{}".format(len(y_train))) print("- Validation-set:\t{}".format(len(y_valid))) # To get a better sense of the data, let's checkout the shapes of the loaded arrays. print('x_train:\t{}'.format(x_train.shape)) print('y_train:\t{}'.format(y_train.shape)) print('x_train:\t{}'.format(x_valid.shape)) print('y_valid:\t{}'.format(y_valid.shape)) # As you can see, __`x_train`__ and __`x_valid`__ arrays contain $55000$ and $5000$ flattened images ( of size $28\times28=784$ values). __`y_train`__ and __`y_valid`__ contain the corresponding labels of the images in the training and validation set respectively. # # Based on the dimesnion of the arrays, for each image, we have 10 values as its label. Why? This technique is called __One-Hot Encoding__. This means the labels have been converted from a single number to a vector whose length equals the number of possible classes. All elements of the vector are zero except for the $i^{th}$ element which is one and means the class is $i$. For example, the One-Hot encoded labels for the first 5 images in the validation set are: y_valid[:5, :] # where the $10$ values in each row represents the label assigned to that partiular image. # ## 2. Hyperparameters # # Here, we have about $55,000$ images in our training set. It takes a long time to calculate the gradient of the model using all these images. We therefore use __Stochastic Gradient Descent__ which only uses a small batch of images in each iteration of the optimizer. Let's define some of the terms usually used in this context: # # - __epoch__: one forward pass and one backward pass of __all__ the training examples. # - __batch size__: the number of training examples in one forward/backward pass. The higher the batch size, the more memory space you'll need. # - __iteration__: one forward pass and one backward pass of __one batch of images__ the training examples. # + # Hyper-parameters epochs = 10 # Total number of training epochs batch_size = 100 # Training batch size display_freq = 100 # Frequency of displaying the training results learning_rate = 0.001 # The optimization initial learning rate h1 = 200 # number of nodes in the 1st hidden layer # - # Given the above definitions, each epoch consists of $55,000/100=550$ iterations. # ## 3. Helper functions for creating the network # # ### 3.1. Helper functions for creating new variables # # As explained (and also illustrated in Fig. 1), we need to define two variables $\mathbf{W}$ and $\mathbf{b}$ to construt our linear model. These are generally called model parameters and as explained in our [Tensor Types](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/2_Tensor_Types.ipynb) tutorial, we use __Tensorflow Variables__ of proper size and initialization to define them.The following functions are written to be later used for generating the weight and bias variables of the desired shape: # + # weight and bais wrappers def weight_variable(name, shape): """ Create a weight variable with appropriate initialization :param name: weight name :param shape: weight shape :return: initialized weight variable """ initer = tf.truncated_normal_initializer(stddev=0.01) return tf.get_variable('W_' + name, dtype=tf.float32, shape=shape, initializer=initer) def bias_variable(name, shape): """ Create a bias variable with appropriate initialization :param name: bias variable name :param shape: bias variable shape :return: initialized bias variable """ initial = tf.constant(0., shape=shape, dtype=tf.float32) return tf.get_variable('b_' + name, dtype=tf.float32, initializer=initial) # - # ### 3.2. Helper-function for creating a fully-connected layer # # Neural network consists of stacks of fully-connected (dense) layers. Having the weight ($\mathbf{W}$) and bias ($\mathbf{b}$) variables, a fully-connected layer is defined as $activation(\mathbf{W}\times \mathbf{x} + \mathbf{b})$. We define __`fc_layer`__ function as follows: def fc_layer(x, num_units, name, use_relu=True): """ Create a fully-connected layer :param x: input from previous layer :param num_units: number of hidden units in the fully-connected layer :param name: layer name :param use_relu: boolean to add ReLU non-linearity (or not) :return: The output array """ in_dim = x.get_shape()[1] W = weight_variable(name, shape=[in_dim, num_units]) b = bias_variable(name, [num_units]) layer = tf.matmul(x, W) layer += b if use_relu: layer = tf.nn.relu(layer) return layer # ## 4. Create the network graph # # Now that we have defined all the helped functions to create our model, we can create our network. # # ### 4.1. Placeholders for the inputs (x) and corresponding labels (y) # # First we need to define the proper tensors to feed in the input values to our model. As explained in the [Tensor Types](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/2_Tensor_Types.ipynb) tutorial, placeholder variable is the suitable choice for the input images and corresponding labels. This allows us to change the inputs (images and labels) to the TensorFlow graph. # Create the graph for the linear model # Placeholders for inputs (x) and outputs(y) x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='X') y = tf.placeholder(tf.float32, shape=[None, n_classes], name='Y') # Placeholder __`x`__ is defined for the images; its data-type is set to __`float32`__ and the shape is set to __[None, img_size_flat]__, where __`None`__ means that the tensor may hold an arbitrary number of images with each image being a vector of length __`img_size_flat`__. # # # Next we have __`y`__ which is the placeholder variable for the true labels associated with the images that were input in the placeholder variable __`x`__. The shape of this placeholder variable is __[None, num_classes]__ which means it may hold an arbitrary number of labels and each label is a vector of length __`num_classes`__ which is $10$ in this case. # ### 4.2. Create the network layers # # # After creating the proper input, we have to pass it to our model. Since we have a neural network, we can stack multiple fully-connected layers using __`fc_layer`__ method. Note that we will not use any activation function (`use_relu=False`) in the last layer. The reason is that we can use `tf.nn.softmax_cross_entropy_with_logits` to calculate the `loss`. # Create a fully-connected layer with h1 nodes as hidden layer fc1 = fc_layer(x, h1, 'FC1', use_relu=True) # Create a fully-connected layer with n_classes nodes as output layer output_logits = fc_layer(fc1, n_classes, 'OUT', use_relu=False) # ### 4.3. Define the loss function, optimizer, accuracy, and predicted class # # After creating the network, we have to calculate the loss and optimize it. Also, to evaluate our model, we have to calculate the `correct_prediction` and `accuracy`. We will also define `cls_prediction` to visualize our results. # + # Define the loss function, optimizer, and accuracy loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output_logits), name='loss') optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name='Adam-op').minimize(loss) correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name='correct_pred') accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') # Network predictions cls_prediction = tf.argmax(output_logits, axis=1, name='predictions') # - # ### 4.4. Initialize all variables # # As explained in the [Tensor Types](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/2_Tensor_Types.ipynb) tutorial, we have to invoke a variable initializer operation to initialize all variables. # Create the op for initializing all variables init = tf.global_variables_initializer() # ## 5. Train # # After creating the graph, it is time to train our model. To train the model, As explained in the [Graph_and_Session](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/1_Graph_and_Session.ipynb) tutorial, we have to create a session and run the graph in our session. # Create an interactive session (to keep the session in the other cells) sess = tf.InteractiveSession() # Initialize all variables sess.run(init) # Number of training iterations in each epoch num_tr_iter = int(len(y_train) / batch_size) for epoch in range(epochs): print('Training epoch: {}'.format(epoch + 1)) # Randomly shuffle the training data at the beginning of each epoch x_train, y_train = randomize(x_train, y_train) for iteration in range(num_tr_iter): start = iteration * batch_size end = (iteration + 1) * batch_size x_batch, y_batch = get_next_batch(x_train, y_train, start, end) # Run optimization op (backprop) feed_dict_batch = {x: x_batch, y: y_batch} sess.run(optimizer, feed_dict=feed_dict_batch) if iteration % display_freq == 0: # Calculate and display the batch loss and accuracy loss_batch, acc_batch = sess.run([loss, accuracy], feed_dict=feed_dict_batch) print("iter {0:3d}:\t Loss={1:.2f},\tTraining Accuracy={2:.01%}". format(iteration, loss_batch, acc_batch)) # Run validation after every epoch feed_dict_valid = {x: x_valid[:1000], y: y_valid[:1000]} loss_valid, acc_valid = sess.run([loss, accuracy], feed_dict=feed_dict_valid) print('---------------------------------------------------------') print("Epoch: {0}, validation loss: {1:.2f}, validation accuracy: {2:.01%}". format(epoch + 1, loss_valid, acc_valid)) print('---------------------------------------------------------') # ## 6. Test # # After the training is done, we have to test our model to see how good it performs on a new dataset. There are multiple approaches to for this purpose. We will use two different methods. # # ## 6.1. Accuracy # One way that we can evaluate our model is reporting the accuracy on the test set. # Test the network after training # Accuracy x_test, y_test = load_data(mode='test') feed_dict_test = {x: x_test[:1000], y: y_test[:1000]} loss_test, acc_test = sess.run([loss, accuracy], feed_dict=feed_dict_test) print('---------------------------------------------------------') print("Test loss: {0:.2f}, test accuracy: {1:.01%}".format(loss_test, acc_test)) print('---------------------------------------------------------') # ## 6.2. plot some results # Another way to evaluate the model is to visualize the input and the model results and compare them with the true label of the input. This is advantages in numerous ways. For example, even if you get a decent accuracy, when you plot the results, you might see all the samples have been classified in one class. Another example is when you plot, you can have a rough idea on which examples your model failed. Let's define the helper functions to plot some correct and missclassified examples. # # ### 6.2.1 Helper functions for plotting the results # + def plot_images(images, cls_true, cls_pred=None, title=None): """ Create figure with 3x3 sub-plots. :param images: array of images to be plotted, (9, img_h*img_w) :param cls_true: corresponding true labels (9,) :param cls_pred: corresponding true labels (9,) """ fig, axes = plt.subplots(3, 3, figsize=(9, 9)) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i].reshape(28, 28), cmap='binary') # Show true and predicted classes. if cls_pred is None: ax_title = "True: {0}".format(cls_true[i]) else: ax_title = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) ax.set_title(ax_title) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) if title: plt.suptitle(title, size=20) plt.show(block=False) def plot_example_errors(images, cls_true, cls_pred, title=None): """ Function for plotting examples of images that have been mis-classified :param images: array of all images, (#imgs, img_h*img_w) :param cls_true: corresponding true labels, (#imgs,) :param cls_pred: corresponding predicted labels, (#imgs,) """ # Negate the boolean array. incorrect = np.logical_not(np.equal(cls_pred, cls_true)) # Get the images from the test-set that have been # incorrectly classified. incorrect_images = images[incorrect] # Get the true and predicted classes for those images. cls_pred = cls_pred[incorrect] cls_true = cls_true[incorrect] # Plot the first 9 images. plot_images(images=incorrect_images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9], title=title) # - # ### 6.2.2 Visualize correct and missclassified examples # Plot some of the correct and misclassified examples cls_pred = sess.run(cls_prediction, feed_dict=feed_dict_test) cls_true = np.argmax(y_test[:1000], axis=1) plot_images(x_test, cls_true, cls_pred, title='Correct Examples') plot_example_errors(x_test[:1000], cls_true, cls_pred, title='Misclassified Examples') plt.show() # After we finished, we have to close the __`session`__ to free the memory. We could have also used: # ```python # with tf.Session as sess: # ... # ``` # # Please check our [Graph_and_Session](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/1_Graph_and_Session.ipynb) tutorial if you do not know the differences between these two implementations. # sess.close() # Thanks for reading! If you have any question or doubt, feel free to leave a comment in our [website](http://easy-tensorflow.com/).
3_Neural_Network/Tutorials/1_Neural_Network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:metis] * # language: python # name: conda-env-metis-py # --- # + from sqlalchemy import create_engine from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, f1_score, roc_auc_score, roc_curve from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set() # - cnx = create_engine('postgresql://ubuntu@172.16.17.32:5432/ponv') # + # Query to get surgery procedures translated and a count query = ''' select pt.procedure_en, count(1) from ponvfull pf INNER JOIN proc_trans pt ON pt.surgical_procedure = pf.surgical_procedure group by pt.surgical_procedure, pf.surgical_procedure order by count(1) DESC ''' proc_table_en = pd.read_sql_query(query, cnx) proc_table_en.head(10) # - # Script to create statements to dummify surgeries for proc in proc_table_en.iloc[:,0]: def_string = 'count(CASE WHEN procedure_en = ' def_string2 = 'THEN 1 END) AS ' print(def_string+"'"+proc+"'"+def_string2+proc.replace(' ','_')+',') proc_dummy_query = ''' SELECT trtbl.patientid, trtbl.procedure_en, count(CASE WHEN procedure_en = 'Gastrointestinal'THEN 1 END) AS Gastrointestinal, count(CASE WHEN procedure_en = 'Mastectomy'THEN 1 END) AS Mastectomy, count(CASE WHEN procedure_en = 'Thoracic'THEN 1 END) AS Thoracic, count(CASE WHEN procedure_en = 'Nephrectomy'THEN 1 END) AS Nephrectomy, count(CASE WHEN procedure_en = 'Hysterectomy'THEN 1 END) AS Hysterectomy, count(CASE WHEN procedure_en = 'Exploratory Laparotomy'THEN 1 END) AS Exploratory_Laparotomy, count(CASE WHEN procedure_en = 'Other'THEN 1 END) AS Other, count(CASE WHEN procedure_en = 'Spine Surgery'THEN 1 END) AS Spine_Surgery, count(CASE WHEN procedure_en = 'Cystectomy'THEN 1 END) AS Cystectomy, count(CASE WHEN procedure_en = 'Prostatectomy'THEN 1 END) AS Prostatectomy, count(CASE WHEN procedure_en = 'Hepatectomy'THEN 1 END) AS Hepatectomy, count(CASE WHEN procedure_en = 'Plastic'THEN 1 END) AS Plastic, count(CASE WHEN procedure_en = 'Cytoreduction'THEN 1 END) AS Cytoreduction, count(CASE WHEN procedure_en = 'Anexectomy/Ovariectomy/'THEN 1 END) AS Anexectomy_Ovariectomy, count(CASE WHEN procedure_en = 'Head/Neck'THEN 1 END) AS Head_Neck, count(CASE WHEN procedure_en = 'Orthopedic'THEN 1 END) AS Orthopedic, count(CASE WHEN procedure_en = 'Hysterectomy VLP'THEN 1 END) AS Hysterectomy_VLP, count(CASE WHEN procedure_en = 'Extensive Lymphadenectomy'THEN 1 END) AS Extensive_Lymphadenectomy, count(CASE WHEN procedure_en = 'Esophagectomy'THEN 1 END) AS Esophagectomy, count(CASE WHEN procedure_en = 'Breast Lumpectomy'THEN 1 END) AS Breast_Lumpectomy, count(CASE WHEN procedure_en = 'Pancreatectomy'THEN 1 END) AS Pancreatectomy, count(CASE WHEN procedure_en = 'Hip Arthoplasty'THEN 1 END) AS Hip_Arthoplasty, count(CASE WHEN procedure_en = 'Soft Tissue Resection'THEN 1 END) AS Soft_Tissue_Resection, count(CASE WHEN procedure_en = 'Limb Amputation'THEN 1 END) AS Limb_Amputation, count(CASE WHEN procedure_en = 'Gallbladder'THEN 1 END) AS Gallbladder FROM (SELECT patientid, pt.surgical_procedure, pt.procedure_en FROM ponvfull pf INNER JOIN proc_trans pt ON pf.surgical_procedure = pt.surgical_procedure) AS trtbl GROUP BY trtbl.patientid, trtbl.procedure_en; ''' # Select primary data set with populated PONV data sample_select_query = ''' SELECT * FROM analysis_set ''' raw_df = pd.read_sql_query(sample_select_query, cnx) # # Start analysis here after pulling in raw data from SQL # + #raw_df.to_pickle('../pkl_files/raw_df.pkl') # - raw_df = pd.read_pickle('../pkl_files/raw_df.pkl') '''(no_dropnull['anesthesia_technique'] == 4)& no_dropnull = raw_df[raw_df['neuraxial_opioid'].notnull()] no_dropnull['neuraxial_opioid'].map({True:1, False:0}) no_dropnull[(no_dropnull['neuraxial_opioid'] == ' ')].iloc[:,15:17] no_dropnull[(no_dropnull['neuraxial_opioid'] == None)] '''''' # ## Drop reasons: # 1) nausea_24h: somewhat a symptom of outcome and therefore too correlated with ponv\ # 2) sex: gender_code replaces\ # 3) chemotherapy_emetogenicity: duplicative given other features and too many NaN\ # 4) neuraxial_opioid: unable to identify NaN values; will readdress if needed\ # 5) Ever smoked is duplicate column raw_df['when_stopped_smoking'].value_counts() raw_df = raw_df[raw_df['fentanil_mcg'] < 3000] # dropping duplicate rows or features that cauase leakage analysis_df = raw_df.drop(['nausea_24h', 'ever_smoked', 'sex', 'chemotherapy_emetogenicity', 'neuraxial_opioid', 'nausea24h_intensity', 'vomiting24h_count'], axis=1).copy() #set nan ketamine dose to median analysis_df.loc[308,'ketamine_dose'] = analysis_df[analysis_df['ketamine_dose'] > 0]['ketamine_dose'].median() analysis_df.loc[308,'ketamine_dose'] #set nan morphine does to False analysis_df['intraoperative_morphine'].fillna(False, inplace=True) analysis_df[analysis_df['intraoperative_morphine_dose'].isna()].iloc[:,22:24] # populating null values for morphine data analysis_df[analysis_df['intraoperative_morphine_dose'].isna()].iloc[:,22:24] median_morph = analysis_df[analysis_df['intraoperative_morphine_dose']>0]['intraoperative_morphine_dose'].median() analysis_df['intraoperative_morphine_dose']=analysis_df.apply(lambda row: 0 if row['intraoperative_morphine'] == False else row['intraoperative_morphine_dose'], axis=1) analysis_df['intraoperative_morphine_dose'].fillna(median_morph, inplace=True) # antiemetic null solutions reg_antiem_df = analysis_df[analysis_df['regular_antiemetic'].isna()].iloc[:,42:55] reg_antiem_df # + # Look at antiemetic drug type administered data to fill in NA for antiemetic boolean def df_block_true(row, col_start, col_end): check = row[col_start:col_end].sum() if check > 0: return True else: return False analysis_df['reg_ae_fillna'] = analysis_df.apply(lambda x: df_block_true(x,49,55), axis=1) # - # There were some inconsistencies between the 'regular_antiemetic' vs. info for antiemetics administered for patients analysis_df[analysis_df['regular_antiemetic'] != analysis_df['reg_ae_fillna']].iloc[:,42:55] analysis_df.drop('regular_antiemetic', axis=1, inplace=True) #Same inconsistences may exist in the rescue_antiemetic columns # analysis_x['received_rescue_antiemetic'] analysis_df[analysis_df['rescue_antiemetic'].isna()].iloc[:,42:55] # Dropping received_rescue_antiemetic due to the number of null values and inconsistences with # other data columns that indicate patient received rescue antiemetic # Lots of inconsistencies in general with the rescue ae data; analysis_df['resc_ae_fillna'] = analysis_df.apply(lambda row: df_block_true(row,44,48) if row['rescue_antiemetic']==None else row['rescue_antiemetic'], axis=1) analysis_df.drop('rescue_antiemetic', axis=1, inplace=True) analysis_df.drop('received_rescue_antiemetic', axis=1, inplace=True) analysis_df.iloc[:,42:46].sum() #[analysis_df.iloc[:,40:50].isna()].iloc[:,43:56] analysis_df[analysis_df.iloc[:,42].isna()].iloc[:,42:46] analysis_df.iloc[:,42:46] = analysis_df.iloc[:,42:46].fillna(False) analysis_df.iloc[:,42:52] = analysis_df.iloc[:,42:52].fillna(False) analysis_df.describe(include='all').iloc[:,42:52] analysis_df.describe(include='all').iloc[:,53] analysis_df.describe(include='all').iloc[:,53] analysis_df.iloc[:,53].fillna(analysis_df.iloc[:,53].mean(),inplace=True) # dropping 1 of the dummy vars for surgical procedure analysis_df.drop('other', axis=1, inplace=True) # + #analysis_df.to_pickle('../pkl_files/analysis_df.pkl') # - analysis_df = pd.read_pickle('../pkl_files/analysis_df.pkl') analysis_x = analysis_df.drop(['ponv', 'vomiting24h'], axis=1) analysis_y = analysis_df[['ponv', 'vomiting24h']] # Extract Test Set tv_x, test_x, tv_y, test_y = train_test_split(analysis_x, analysis_y, test_size=.2, random_state=10) train_x, val_x, train_y, val_y = train_test_split(tv_x, tv_y, test_size=.25, random_state=444) # # Clean Data Here apfel_feat = ['gender_code', 'non_smoker', 'previous_ponv', 'postoperative_opioids'] pp_data = train_x[apfel_feat] pp_data['ponv'] = train_y['ponv'] # + fig = plt.figure(figsize=(16,8)) ax1 = fig.add_subplot(221) ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(223) ax4 = fig.add_subplot(224) sns.barplot(pp_data['gender_code'], pp_data['ponv'], ax=ax1) sns.barplot(pp_data['non_smoker'], pp_data['ponv'], ax=ax2) sns.barplot(pp_data['previous_ponv'], pp_data['ponv'], ax=ax3) sns.barplot(pp_data['postoperative_opioids'], pp_data['ponv'], ax=ax4) # - # ## Anesthesia technique # tv_x['anesthesia_technique'].value_counts() fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) sns.barplot(tv_x['anesthesia_technique'], tv_y['ponv'], ax=ax1) ax2=plt.hist(tv_x['anesthesia_technique']) # Although a key wasn't available to explain what each of the techniques were, I don't believe further analysis is required given the ponv rate for all the techniques seem pretty similar. *I may need to do a hypothesis test to justify?* # ## Baseline Random Forest Regressor vs. Apfel baseline_val_x = val_x[apfel_feat] baseline_val_x.head() # Baseline RFC model baseline_x = pp_data.drop('ponv', axis=1) baseline_y = pp_data['ponv'] baseline_rfc = RandomForestClassifier() baseline_rfc.fit(baseline_x, baseline_y) rfc_pred = baseline_rfc.predict(baseline_val_x) # ## Baseline PONV positive set in train/val set sum(tv_y['ponv'])/len(tv_y['ponv']) baseline_comp = val_x[['patientid', 'apfel']] baseline_comp baseline_comp['apfel_prob'] = baseline_comp['apfel'].map({0:.1, 1:.2, 2:.4, 3:.6, 4:.8}) baseline_comp['baseline_rfc'] = baseline_rfc.predict_proba(baseline_val_x)[:,0] baseline_comp['apfel_pred'] = baseline_comp['apfel_prob'].apply(lambda x: True if x > 0.5 else False) baseline_comp['rfc_pred'] = baseline_rfc.predict(baseline_val_x) baseline_comp['actual'] = val_y['ponv'] baseline_comp = pd.concat([baseline_comp, baseline_val_x], axis=1) fig = plt.figure() ax = sns.heatmap(confusion_matrix(baseline_comp['actual'], baseline_comp['apfel_pred']), cmap='Greens', annot=True, fmt='0.2f', robust = True, yticklabels=['False', 'True'], xticklabels=['False', 'True']) plt.title('Apfel Scale Confusion Matrix', fontsize=20) plt.xlabel('Predicted', fontsize=14) plt.ylabel('Actual', fontsize=14) #plt.savefig('../ppt/Apfel_cm.png') sns.heatmap(confusion_matrix(baseline_comp['actual'], baseline_comp['rfc_pred']), cmap='Blues', annot=True, fmt='0.2f', linecolor='black', robust=True, yticklabels=['False', 'True'], xticklabels=['False', 'True']) plt.title('Random Forest Confusion Matrix', fontsize=20) plt.xlabel('Predicted', fontsize=14) plt.ylabel('Actual', fontsize=14) #plt.savefig('../ppt/RFC_cm.png') # # Without Resampling apfel_tv_x = tv_x[apfel_feat] # + #tv_x.to_pickle('../pkl_files/tv_x.pkl') #tv_y['ponv'].to_pickle('../pkl_files/tv_y.pkl') # - # # EDA of features by category # ## PONV on different surgeries proc_feat = analysis_df.iloc[:,np.r_[55,58:83]].groupby('ponv').sum().T proc_feat proc_feat['subtotal_surg'] = proc_feat[True]+proc_feat[False] proc_feat['ponv_rate'] = proc_feat[True]/proc_feat['subtotal_surg'] surg_ponv_summary = proc_feat[proc_feat['subtotal_surg']>20] surg_ponv_summary = surg_ponv_summary.sort_values(by='ponv_rate', ascending=False).reset_index() list(surg_ponv_summary['index'][:19]) plt.figure(figsize=(16,10)) plt.bar(surg_ponv_summary['index'][:18], surg_ponv_summary['ponv_rate'][:18]) surg_filter = list(surg_ponv_summary['index'][:18]) surg_x = tv_x[surg_filter] surg_y = tv_y agg_df_x = pd.concat([apfel_tv_x, surg_x], axis=1) import PONV_classifier_models as pcm pcm.evaluate_models([1,2,3], agg_df_x, tv_y['ponv']) # ## Anesthetic effect on PONV tv_x.iloc[:,14:24] dosage = ['fentanil_mcg', 'sufentanil_mcg', 'tramadol_dose_pacu', 'ketamine_dose', 'intraoperative_morphine_dose'] opioid_feat = tv_x[dosage] opioid_feat['ponv'] = tv_y['ponv'] opioid_feat[['fentanil_mcg', 'sufentanil_mcg']] = opioid_feat[['fentanil_mcg', 'sufentanil_mcg']]/1000 anes_smry = opioid_feat.groupby('ponv', as_index=False).mean() anes_smry # ### Notable variance in average fentanil and tramadol dosage # + fig = plt.figure(figsize=(16,8)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) sns.barplot(anes_smry['ponv'], anes_smry['fentanil_mcg'], ax=ax1) sns.barplot(anes_smry['ponv'], anes_smry['tramadol_dose_pacu'], ax=ax2) # - drug_x = opioid_feat.drop('ponv', axis=1) pcm.evaluate_models([1,2,3], drug_x, tv_y['ponv']) agg_df_x = pd.concat([apfel_tv_x, surg_x, drug_x], axis=1) agg_df_x.columns pcm.evaluate_models([1,2,3], agg_df_x, tv_y['ponv']) # ## Quick check on age #Age appears to have no discernable effect age_df = pd.concat([tv_x['age'], tv_y['ponv']], axis=1) age_df agg_df_x = pd.concat([apfel_tv_x, surg_x, drug_x, age_df['age']], axis=1) sns.kdeplot(age_df[age_df['ponv'] == True]['age']) sns.kdeplot(age_df[age_df['ponv'] == False]['age']) plt.legend(['pos', 'neg']) # # Effect of Chemo chemo_df = tv_x.iloc[:,8:12] chemo_df pcm.evaluate_models([1,2,3], chemo_df, tv_y['ponv']) agg_df_x = pd.concat([apfel_tv_x, surg_x, drug_x, chemo_df], axis=1) pcm.evaluate_models([1,2,3], agg_df_x, tv_y['ponv']) # ## Additional Data surgery_times_df = pd.read_csv('../raw_data/surgery_time.csv') surgery_times_df surgery_times_df = pd.concat([surgery_times_df['Specialties'],surgery_times_df['Duration of the anesthesia'].str.split('±', expand=True)], axis=1) surgery_times_df.rename({'Specialties':'surg_time_key', 0:'avg_time',1:'std_time'}, axis=1, inplace=True) surgery_times_df[['avg_time', 'std_time']] = surgery_times_df[['avg_time','std_time']].astype(float) analysis_df['surgery_group'].unique() analysis_df.columns # ## Not seeing a significant connection between surgery time; problem is also in data. Standard deviations are huge. # # Emetogenics # Last minute I discovered that these features may potentially be causing data leakage as the coefficients indicated the emetogenic medicine was ***increasing*** the probability of PONV, which didn't make intuitive sense. I figured the doctors were giving patients higher doses based on their Apfel risk score so I ended up omitting the features individually. I did, however, aggregate the total amounts of IV drugs provided (both opioid and emetogenics) as an engineered feature to account for headache side effects commonly associated with emetogenics. tv_x.iloc[:,46:52] emetogenic_df = pd.concat([tv_x.iloc[:,25:35],tv_y],axis=1) emetogenic_df.describe(include='all') emet_dose_list = ['intraoperative_ondansetron_dose', 'dexamethasone_dose', 'intraoperative_dimenidrate_dose', 'metoclopramide_dose', 'droperidol_dose'] emet_dose_df = tv_x[emet_dose_list] emet_dose_df.describe() pcm.evaluate_models([1,2,3], emet_dose_df, tv_y['ponv']) agg_df_x = pd.concat([apfel_tv_x, surg_x, drug_x, chemo_df, age_df['age'], emet_dose_df], axis=1) agg_df_x # ## Smokers quitters = pd.concat([tv_x['when_stopped_smoking'], tv_y['ponv']], axis=1) quitters tv_x['when_stopped_smoking'].value_counts() tv_x['packets_years'].value_counts().head() # # Feature Engineering agg_df_x.info() agg_df_x.iloc[:,22:24].describe() agg_df_x['tot_drugs']= agg_df_x[['tramadol_dose_pacu','ketamine_dose', 'intraoperative_morphine_dose']].sum(axis=1) +\ emet_dose_df.sum(axis=1)+\ agg_df_x[['fentanil_mcg', 'sufentanil_mcg']].sum(axis=1)*1000 agg_df_x['tot_drugs'] # + #agg_df_x.to_pickle('../pkl_files/agg_df_x.pkl') #tv_y.to_pickle('../pkl_files/tv_y.pkl') # - agg_df_x = pd.read_pickle('../pkl_files/agg_df_x.pkl') tv_y=pd.read_pickle('../pkl_files/tv_y.pkl') agg_df_x['tot_drugs'].value_counts() import log_reg_tuning as lrt agg_df_x.columns lrt_phaseI = lrt.ponv_log_reg(agg_df_x, tv_y) dict(zip(list(agg_df_x),lrt_phaseI.coef_[0])) # # Testing Model in STLit # + import pandas as pd import pickle def load_model(input_dict): input_x = pd.DataFrame(input_dict, index=[0]) my_model = pickle.load(open("lr_pickled_model.p","rb")) scaler = pickle.load(open("ponv_scaler.p","rb")) sc_x = scaler.transform(input_x) prediction = my_model.predict_proba(sc_x) return prediction # - input_dict = {"Gender":1, "Non_Smoker":0, "previous_ponv":0, "postoperative_opioids":1, 'pancreatectomy':0, 'hysterectomy_vlp':0, 'anexectomy_ovariectomy':0, 'cystectomy':0, 'extensive_lymphadenectomy':0, 'plastic':0, 'cytoreduction':0, 'hysterectomy':0, 'nephrectomy':0, 'mastectomy':0, 'thoracic':0, 'exploratory_laparotomy':0, 'hepatectomy':0, 'prostatectomy':1, 'breast_lumpectomy':0, 'spine_surgery':0, 'head_neck':0, 'orthopedic':0, 'fentanil_mcg':50, 'sufentanil_mcg':0, 'tramadol_dose_pacu':0, 'ketamine_dose':0, 'intraoperative_morphine_dose':0, 'previous_chemotherapy':0, 'how_many_months_ago_chemotherapy':0, 'post_chemotherapy_nausea':0, 'post_chemotherapy_vomiting':0, }
py_scripts/P3_EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 # fundamentals import os, sys import numpy as np import pandas as pd from calendar import monthrange, month_name import scipy.stats as stats import datetime import imp import scipy.io as sio import pickle as pkl # plotting libraries and setup from matplotlib.colors import BoundaryNorm import matplotlib.pyplot as plt plt.style.use('nrelplot') from windrose import WindroseAxes # met mast functions and utilities sys.path.append('../') import met_funcs as MET import vis as vis import utils as utils # + ########################################### def rose_fig(metdat, varcol, dircol, bins=6, nsector=36, ylim=None, noleg=False): ########################################### """ make wind rose from pandas.Series wind direction and some other value of the same size. Parameters: metdat: Pandas dataframe containing met mast data catinfo: dict containing categorization info for the metmast data. Fore each category, catinfo holds column names, labels, units, and save names category: string specifying category of information to plot (e.g. 'speed', 'stability', etc.) vertloc: int or float describing the exact or approximate height of interest along the tower bins: int specifying number of equally spaced bins to divide var. OR list of bin division limits (eg [0,4,8,12,16]) nsector: number or direction sectors to divide rose ylim: optional float with maximum value for frequency of observations, use to plot different roses with uniform limits noleg: bool switch to turn legend off """ # set up data winddir = metdat[dircol] var = metdat[varcol] relabel = False if not isinstance(bins, int): if ((bins < 0).any()): bins *= -1 relabel = True # get var divisions set up if isinstance(bins, int): nbins = bins else: nbins = len(bins) # set up plotting colors colors = utils.get_colors(nbins-1, basecolor='span') colors += ['#3A4246'] # add something dark to the end. colors = tuple(colors[0:nbins]) # built figure fig = plt.figure() ax = WindroseAxes.from_ax(fig=fig) ax.bar(winddir, var, normed=True, opening=0.95, edgecolor='white', bins=bins, nsector=nsector, colors=colors, linewidth=0.35) # legend leg=['blank'] if noleg is not True: leg = ax.set_legend(loc=6,bbox_to_anchor=(1.25,0.5), fontsize=10, frameon=False) if relabel: for ii in range(nbins-1): leg.get_texts()[ii].set_text('[{}: {})'.format(-bins[ii], -bins[ii+1])) leg.get_texts()[-1].set_text('[{}: {})'.format(-bins[-1], '-inf')) # adjust plot for specified max frequency if ylim is None: ylim = ax.get_ylim()[-1] # frequency axis limits and labels ax.set_ylim(0,ylim) ax.set_yticks(np.linspace(0,ylim,4)) ax.set_yticklabels([str(round(x,1)) for x in np.linspace(0,ylim,4)]) return fig, ax, leg ########################################### def extreme_event_hist(eventdf, varcols, labels, bins=25, colors=None, alpha=1): ''' ''' fig, ax = plt.subplots(figsize=(5,3)) if colors is None: colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] colors = [colors[x] for x in [2,1,3]] for ii, var in enumerate(varcols): data = eventdf[var].dropna(how='any') n,histbins,patches = ax.hist(data, bins = bins, facecolor=colors[ii], edgecolor='k', weights=100*np.ones(data.shape) / len(data), density=False, label=labels[ii], alpha=alpha) leg = ax.legend(frameon=False) ax.set_ylabel('Frequency [\%]') return fig, ax, leg def extreme_event_wind_direction_bar(eventdf, varcols, labels, colors=None): ''' ''' if colors is None: colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] colors = [colors[x] for x in [2,1,3]] event_bydir = eventdf.groupby('dirbin').count() tmp = event_bydir[varcols].copy() tmp = 100*tmp.div(tmp.sum()) fig, ax = plt.subplots(figsize=(8,3)) tmp.plot.bar(ax=ax, color=colors[:len(varcols)], width=0.9, edgecolor='k') leg = ax.legend(labels) xlabs = ax.get_xticklabels()[::2] ax.set_xticks(ax.get_xticks()[::2]) ax.set_xticklabels(xlabs) ax.set_xlabel('Wind Direction [$^\circ$]') ax.set_ylabel('Frequency [\%]') return fig, ax, leg # fig.savefig('../figs_20190109/alpha_bar.pdf') def extreme_event_monthly_bar(eventdf, varcols, labels, colors=None): ''' ''' if colors is None: colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] colors = [colors[x] for x in [2,1,3]] numyears = eventDF.index.year.max() - eventDF.index.year.min() monthly = eventdf[varcols].groupby(eventdf.index.month).count()/numyears # monthly = monthly.div(monthly.sum())*100 fig, ax = plt.subplots(figsize=(5,3)) monthly.plot.bar(ax=ax, label=labels, color=colors[:len(varcols)]) ax.set_xlabel('Month') ax.set_ylabel('Events Per Month') leg = ax.legend(labels, frameon=False) return fig, ax, leg def extreme_event_velocity_scatter(eventdf, varcols, labels, colors=None, ): ''' ''' if colors is None: colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] colors = [colors[x] for x in [2,1,3]] fig, ax = plt.subplots(figsize=(5,3)) for ii, var in enumerate(varcols): eventdf.plot.scatter('WS_mean', var, ax=ax, edgecolor=colors[ii], color='w', label=labels[ii]) ax.set_xlabel('Hub-Height Velocity [m/s]') ax.legend(frameon=False) return fig, ax, leg # - # ## Data directory and list of files containing extreme events datapath = '/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/IEC_tmp/' monthly_events_files = os.listdir(datapath) today = datetime.date.today() figpath = '../figs_{}{}{}'.format(str(today.year), str(today.month).zfill(2), str(today.day).zfill(2)) params = MET.setup_IEC_params() try: os.makedirs(figpath) except: pass # ### Extreme Shear Events # + event_type = 'EWS' events_files = [x for x in monthly_events_files if event_type in x] eventDF = pd.DataFrame() for file in events_files: tmp = pd.read_csv(os.path.join(datapath, file)) eventDF = pd.concat([eventDF, tmp]) eventDF.set_index('datetime', inplace=True) eventDF.index = pd.DatetimeIndex(eventDF.index) eventDF['alpha_min'][eventDF['alpha_min'] > eventDF['alpha_neg_limit']] = np.nan eventDF['alpha_min'][np.abs(eventDF['alpha_min']) > 10.0] = np.nan eventDF['alpha_max'][eventDF['alpha_max'] < eventDF['alpha_pos_limit']] = np.nan eventDF['alpha_max'][np.abs(eventDF['alpha_max']) > 10.0] = np.nan binwidth = 10 eventDF['dirbin'] = pd.cut(eventDF['WD_mean'], bins=np.arange(0,360.1, binwidth), labels=np.arange(binwidth/2,360.1, binwidth)) varcols = ['alpha_max', 'alpha_min'] labels = [r'$\alpha_+$', r'$\alpha_-$'] # ## Rose figures # fig, ax, leg = rose_fig(eventDF, 'alpha_max', 'dirbin', bins=np.array([0,0.5,1,1.5,2,2.5])) # leg.set_title(r'$\alpha_+$') # fig.savefig('{}/{}_pos_rose.pdf'.format(figpath, event_type)) # tmp = eventDF[['dirbin','alpha_min']].copy() # tmp = tmp.replace([np.inf, -np.inf], np.nan).dropna(how='any') # tmp['alpha_min'] *= -1 # fig, ax, leg = rose_fig(tmp, 'alpha_min', 'dirbin', bins=-np.array([0,0.5,1,1.5,2,2.5])) # leg.set_title(r'$\alpha_-$') # fig.savefig('{}/{}_neg_rose.pdf'.format(figpath, event_type)) ## Wind dir bar plot fig, ax, leg = extreme_event_wind_direction_bar(eventDF, varcols, labels) fig.savefig('{}/{}_bar.pdf'.format(figpath, event_type)) # ## monthly frequency bar plot # fig, ax, leg = extreme_event_monthly_bar(eventDF, varcols, labels) # fig.savefig('{}/{}_monthly.pdf'.format(figpath, event_type)) # ## Velocity scatter plot # # adding some limitting lines here # params = MET.setup_IEC_params() # alpha_pos = np.load('/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/pos_alpha_limit.npy') # alpha_neg = np.load('/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/neg_alpha_limit.npy') # alpha_reference_velocity = np.load('/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/alpha_reference_velocity.npy') # fig, ax, leg = extreme_event_velocity_scatter(eventDF, varcols, labels) # ax.plot(alpha_reference_velocity, alpha_pos, 'k') # ax.plot(alpha_reference_velocity, alpha_neg, 'k') # ax.set_ylabel('Shear Exponent [-]') # fig.savefig('{}/{}_v_limits.pdf'.format(figpath, event_type)) # ## Histogram # fig, ax, leg = extreme_event_hist(eventDF, varcols, labels) # ax.set_xlabel('Shear Exponent [-]') # fig.savefig('{}/{}_hist.pdf'.format(figpath, event_type)) # - # ### Extreme Operating Gust Events eventDF.head(1) eventDF.WS_max.plot() figpath # + event_type = 'EOG' events_files = [x for x in monthly_events_files if event_type in x] eventDF = pd.DataFrame() for file in events_files: tmp = pd.read_csv(os.path.join(datapath, file)) eventDF = pd.concat([eventDF, tmp]) eventDF.rename(index=str, columns={'Unnamed: 0': 'datetime'}, inplace=True) eventDF.set_index('datetime', inplace=True) eventDF.index = pd.DatetimeIndex(eventDF.index) binwidth = 10 eventDF['dirbin'] = pd.cut(eventDF['WD_mean'], bins=np.arange(0,360.1, binwidth), labels=np.arange(binwidth/2,360.1, binwidth)) varcols = ['WS_max', 'WS_min'] labels = [r'$V_{max}$', r'$V_{min}$'] # ## Rose figures fig, ax, leg = rose_fig(eventDF, varcols[0], 'dirbin', bins=6) leg.set_title(labels[0]) fig.savefig('{}/{}_{}_rose.pdf'.format(figpath, event_type, varcols[0])) # fig, ax, leg = rose_fig(eventDF, varcols[1], 'dirbin', bins=6) # leg.set_title(labels[1]) # fig.savefig('{}/{}_{}_rose.pdf'.format(figpath, event_type, varcols[1])) # Wind dir bar plot fig, ax, leg = extreme_event_wind_direction_bar(eventDF, varcols[0], labels, colors='C1') fig.savefig('{}/{}_bar.pdf'.format(figpath, event_type)) # ## monthly frequency bar plot fig, ax, leg = extreme_event_monthly_bar(eventDF, varcols[0],'tmp', colors='C1') leg = ax.legend(labels, frameon=False) # ax.set_ylabel('Average ') fig.savefig('{}/{}_monthly.pdf'.format(figpath, event_type)) # velocity scatter fig,ax,leg = extreme_event_velocity_scatter(eventDF, varcols, labels) ## Histogram fig, ax, leg = extreme_event_hist(eventDF, varcols, labels, alpha=0.75) ax.set_xlabel('Velocity [m/s]') # ax.set_xlim() fig.savefig('{}/{}_hist.pdf'.format(figpath, event_type)) # - tmp = eventDF.groupby(eventDF.index.year).count() tmp.mean() eventDF['Vamp'] = eventDF['WS_max'] - eventDF['WS_mean'] eventDF[eventDF['Vamp'] == eventDF['Vamp'].max()] # + fig, ax = plt.subplots(figsize=(5,3)) tmp = eventDF['WS_max'] - eventDF['WS_mean'] data = tmp.values ax.hist(data, bins=35, weights=100*np.ones(len(data))/len(data), facecolor='C2', edgecolor='k', alpha=0.75, label=r'$V_{max}- V_{Ave}$') tmp = eventDF['WS_max'] - eventDF['WS_min'] data = tmp.values ax.hist(data, bins=35, weights=100*np.ones(len(data))/len(data), facecolor='C1', edgecolor='k', alpha=0.75, label=r'$V_{max}- V_{min}$') ax.set_xlabel(r'Velocity [m/s]') ax.set_ylabel('Frequency [\%]') ax.legend() fig.savefig('{}/EOG_vgust_hist_alt.pdf'.format(figpath)) # + # velocity scatter fig,ax,leg = extreme_event_velocity_scatter(eventDF, varcols[0:1], labels[0:1], colors=['C1']) ax.set_ylabel('Gust Velocity [m/s]') leg = ax.get_legend() leg.set_visible(False) fig.savefig('{}/EOG_vgust_scatter.pdf'.format(figpath)) # + vtmp = 8 sigma_1 = params['Iref'] * (0.75 * vtmp + 5.6) test1 = 1.35 * (params['Ve01'] - vtmp) test2 = 3.3 * (sigma_1 / (1 + 0.1 * params['D'] / params['Lambda_1'])) # IEC gust velocity magnitude threshold Vgust = np.min(np.vstack([test1, test2]), axis=0) T = 10.5 #s t = np.linspace(0,T,101) mod = 0.37 * Vgust * np.sin(3 * np.pi * t / T) * (1 - np.cos(2 * np.pi * t / T)) WS_pos_gustlim = vtmp - mod.min() WS_neg_gustlim = vtmp - mod.max() veog = vtmp-mod fig, ax = plt.subplots(figsize=(4,2)) ax.plot(t, veog, color='C1', label=r'$V_{EOG}$') ax.axhline(8.0, ls='--', color='k') ax.axhline(y=veog.max(), xmin=0.35, xmax=0.65, ls='--', color='k') ax.axhline(y=veog.min(), xmin=0.15, xmax=0.4, ls='--', color='k') # ax.arrow(x=2.2, y=veog.min(), dx=0, dy=(8-veog.min()), color='k', width=0.025, length_includes_head=True) # ax.arrow(x=2.2, y=8, dx=0, dy=-(8-veog.min()), color='k', width=0.025, length_includes_head=True) # ax.annotate(s=r'$V_a$', xy=(0.5,0.5), xytext=(0,0), arrowprops=dict(arrowstyle='<|-|>'), xycoords='axes fraction', color='k', textcoords=(0.25,0.25)) ax.set_xlabel('Time [s]') ax.set_ylabel(r'Velocity [m/s]') # ax.legend() # fig.savefig('{}/EOG_hat.pdf'.format(figpath)) # - dict(arrowstyle='<->',width=0.05) # + event_type = 'ETM' events_files = [x for x in monthly_events_files if event_type in x] eventDF = pd.DataFrame() for file in events_files: tmp = pd.read_csv(os.path.join(datapath, file)) eventDF = pd.concat([eventDF, tmp]) eventDF.set_index('datetime', inplace=True) eventDF.index = pd.DatetimeIndex(eventDF.index) binwidth = 10 eventDF['dirbin'] = pd.cut(eventDF['WD_mean'], bins=np.arange(0,360.1, binwidth), labels=np.arange(binwidth/2,360.1, binwidth)) varcols = ['sigma_1'] labels = [r'$\sigma_1$'] # ## Rose figures # fig, ax, leg = rose_fig(eventDF, varcols[0], 'dirbin', bins=6) # leg.set_title(labels[0]) # fig.savefig('{}/{}_{}_rose.pdf'.format(figpath, event_type, varcols[0])) # Wind dir bar plot fig, ax, leg = extreme_event_wind_direction_bar(eventDF, varcols[0], labels, colors='C1') fig.savefig('{}/{}_bar.pdf'.format(figpath, event_type)) # ## Velocity scatter plot # # adding some limitting lines here # fig, ax, leg = extreme_event_velocity_scatter(eventDF, varcols, labels) # ax.plot(eventDF['WS_mean'],eventDF['sigmatest'], label='ETM limit') # ax.set_ylabel('Turbulence Standard Deviation [m/s]'.format(labels[0])) # leg = ax.legend() # fig.savefig('{}/{}_v_limits.pdf'.format(figpath, event_type)) # ## monthly frequency bar plot # fig, ax, leg = extreme_event_monthly_bar(eventDF, varcols[0],'tmp', colors='C1') # leg = ax.legend(labels, frameon=False) # # ax.set_ylabel('Frequency [\%]') # fig.savefig('{}/{}_monthly.pdf'.format(figpath, event_type)) # ## Histogram # fig, ax, leg = extreme_event_hist(eventDF, varcols, labels, alpha=0.75) # ax.set_xlabel('Turbulence Standard Deviation [m/s]'.format(labels[0])) # fig.savefig('{}/{}_hist.pdf'.format(figpath, event_type)) # + event_type = 'EDC' events_files = [x for x in monthly_events_files if event_type in x] eventDF = pd.DataFrame() for file in events_files: tmp = pd.read_csv(os.path.join(datapath, file)) eventDF = pd.concat([eventDF, tmp]) eventDF.rename(index=str, columns={'Unnamed: 0': 'datetime'}, inplace=True) eventDF.set_index('datetime', inplace=True) eventDF.index = pd.DatetimeIndex(eventDF.index) # extra filtering... eventDF[eventDF.WS_mean > 40] = np.nan eventDF[eventDF.WS_mean < 0.1] = np.nan eventDF.dropna(how='any', inplace=True) # test = eventDF.copy() eventDF = eventDF.resample('60T').last() params = MET.setup_IEC_params() binwidth = 10 eventDF['dirbin'] = pd.cut(eventDF['WD_mean'], bins=np.arange(0,360.1, binwidth), labels=np.arange(binwidth/2,360.1, binwidth)) varcols = ['deltaWD'] labels = [r'$\Delta \theta$'] # ## Rose figures # fig, ax, leg = rose_fig(eventDF, varcols[0], 'dirbin', bins=6) # leg.set_title(labels[0]) # fig.savefig('{}/{}_{}_rose.pdf'.format(figpath, event_type, varcols[0])) # Wind dir bar plot # fig, ax, leg = extreme_event_wind_direction_bar(eventDF, varcols[0], labels, colors='C1') # fig.savefig('{}/{}_bar.pdf'.format(figpath, event_type)) # ## Velocity scatter plot # # adding some limitting lines here # fig, ax, leg = extreme_event_velocity_scatter(eventDF, varcols, labels) # tmp = eventDF.copy() # binwidth = 1.0 # # Turbulence standard deviation depends on mean wind speed # vdummy = np.linspace(0,35) # sigma_1_e = params['Iref'] * (0.75 * vdummy + 5.6) # # Direction change threshold depends on wind speed # theta_e = np.degrees(4 * np.arctan( sigma_1_e / (vdummy * (1 + 0.1 * params['D'] / params['Lambda_1'])))) # # tmp['vbin'] = pd.cut(eventDF['WS_mean'], bins=np.arange(0,eventDF['WS_mean'].max(), binwidth), labels=np.arange(binwidth/2, eventDF['WS_mean'].max()-1, binwidth)) # # maxlim = tmp['delta_WD_thresh'].groupby(tmp['vbin']).min() # ax.plot(vdummy, theta_e, color='k') # ax.plot(vdummy, -theta_e, color='k') # ax.set_ylim(-200,200) # ax.set_ylabel(r'Wind Direciton Change [$^\circ$]'.format(labels[0])) # leg = ax.legend() # fig.savefig('{}/{}_v_limits.pdf'.format(figpath, event_type)) # ## monthly frequency bar plot # fig, ax, leg = extreme_event_monthly_bar(eventDF, varcols[0],'tmp', colors='C1') # leg = ax.legend(labels, frameon=False) # fig.savefig('{}/{}_monthly.pdf'.format(figpath, event_type)) # ## Histogram # fig, ax, leg = extreme_event_hist(eventDF, varcols, labels, alpha=0.75) # ax.set_xlabel(r'Wind Direciton Change [$^\circ$]'.format(labels[0])) # fig.savefig('{}/{}_hist.pdf'.format(figpath, event_type)) # - # ### ECD events # # + event_type = 'ECD' events_files = [x for x in monthly_events_files if event_type in x] eventDF = pd.DataFrame() for file in events_files: tmp = pd.read_csv(os.path.join(datapath, file)) eventDF = pd.concat([eventDF, tmp]) eventDF.drop(columns=['Unnamed: 0'], inplace=True) eventDF.set_index('datetime', inplace=True) eventDF.index = pd.DatetimeIndex(eventDF.index ) eventDF['deltaWD'] = np.abs(eventDF['WD_min'] - eventDF['WD_max']) eventDF = eventDF[eventDF['deltaWD']<180] eventDF.dropna(how='any', inplace=True) # - eventDF.head(1) # + event_type = 'ECD' events_files = [x for x in monthly_events_files if event_type in x] eventDF = pd.DataFrame() for file in events_files: tmp = pd.read_csv(os.path.join(datapath, file)) eventDF = pd.concat([eventDF, tmp]) eventDF.drop(columns=['Unnamed: 0'], inplace=True) eventDF.set_index('datetime', inplace=True) eventDF.index = pd.DatetimeIndex(eventDF.index ) eventDF['delta_WD'] = np.abs(eventDF['WD_min'] - eventDF['WD_max']) eventDF['delta_WS'] =eventDF['WS_max'] - eventDF['WS_min'] eventDF = eventDF[eventDF['delta_WD']<180] eventDF.dropna(how='any', inplace=True) binwidth = 10 eventDF['dirbin'] = pd.cut(eventDF['WD_mean'], bins=np.arange(0,360.1, binwidth), labels=np.arange(binwidth/2,360.1, binwidth)) varcols = ['delta_WS', 'delta_WD'] labels = [r'$\Delta V_{hub}$', r'$\Delta \theta$'] # ## Rose figures fig, ax, leg = rose_fig(eventDF, varcols[0], 'dirbin', bins=6) leg.set_title(labels[0]) # fig.savefig('{}/{}_{}_rose.pdf'.format(figpath, event_type, varcols[0])) # Wind dir bar plot fig, ax, leg = extreme_event_wind_direction_bar(eventDF, varcols[0], labels, colors=['C1']) fig.savefig('{}/{}_bar.pdf'.format(figpath, event_type)) vdummy = np.linspace(8,50) thetacg = 180 * np.ones(len(vdummy)) thetacg[vdummy > 4] = 720/vdummy ## Velocity scatter plot # adding some limitting lines here fig, ax, leg = extreme_event_velocity_scatter(eventDF, ['delta_WD'], [r'$\Delta \theta$']) ax.plot(vdummy, thetacg, label='ECD Limit') ax.set_ylabel(r'{} [$^\circ$]'.format(labels[1])) # leg = ax.legend() # fig.savefig('{}/{}_v_limits.pdf'.format(figpath, event_type)) # adding some limitting lines here fig, ax, leg = extreme_event_velocity_scatter(eventDF, ['delta_WS'], labels) ax.plot(eventDF['WS_mean'],eventDF['WD_mean'], label='ETM limit') ax.set_ylabel(r'{} [$^\circ$]'.format(labels[0])) leg = ax.legend() # fig.savefig('{}/{}_v_limits.pdf'.format(figpath, event_type)) ## monthly frequency bar plot fig, ax, leg = extreme_event_monthly_bar(eventDF, varcols, labels, colors=['C1']) leg = ax.legend(labels, frameon=False) ax.set_ylabel('Frequency [\%]') # fig.savefig('{}/{}_monthly.pdf'.format(figpath, event_type)) # ## Histogram fig, ax, leg = extreme_event_hist(eventDF, varcols, labels, alpha=0.75) ax.set_xlabel('Turbulence Standard Deviation, {} [m/s]'.format(labels[0])) # fig.savefig('{}/{}_hist.pdf'.format(figpath, event_type)) # + event_type = 'ECD' events_files = [x for x in monthly_events_files if event_type in x] eventDF = pd.DataFrame() for file in events_files: tmp = pd.read_csv(os.path.join(datapath, file)) eventDF = pd.concat([eventDF, tmp]) eventDF.set_index('datetime', inplace=True) eventDF.index = pd.DatetimeIndex(eventDF.index) eventDF = eventDF[eventDF['delta_WD']<180] varcols = ['delta_WS', 'delta_WD'] labels = [r'$\Delta V_{hub}$', r'$\Delta \theta$'] data = eventDF[varcols] # - eventDF.head(1) # + fig, ax = plt.subplots(figsize=(5,3)) eventDF.plot.scatter('delta_WS', 'delta_WD', edgecolor='C1', color='w', ax=ax) ax.set_xlabel(r'Hub-Height Velocity Change [m/s]') ax.set_ylabel(r'Wind Direction Change [$^\circ$]') fig.savefig('{}/{}_scatter.pdf'.format(figpath, event_type)) # - eventDF.shape # + eventDF.dropna(how='any', inplace=True) varcols = ['delta_WS', 'delta_WD'] labels = [r'$\Delta V_{hub}$', r'$|\Delta \theta|$'] fig, ax = plt.subplots(figsize=(5,3)) bins=25 alpha=0.75 data = eventDF[varcols[0]] n,histbins,patches = ax.hist(data, bins = bins, facecolor='C1', edgecolor='k', weights=100*np.ones(data.shape) / len(data), density=False, label=labels[0], alpha=alpha) ax.set_xlabel(r'Hub-Height Velocity Change [m/s]') ax2 = ax.twiny() data = eventDF[varcols[1]] n,histbins,patches = ax2.hist(data, bins = bins, facecolor='C2', edgecolor='k', weights=100*np.ones(data.shape) / len(data), density=False, label=labels[1], alpha=alpha) ax2.set_xlabel(r'Wind Direction Change, [$^\circ$]', labelpad=15) ax.set_ylabel('Frequency [\%]') fig.legend(loc=6, bbox_to_anchor=(0.65, 0.8)) # fig.tight_layout() fig.savefig('{}/{}_hist.pdf'.format(figpath, event_type)) # + IEC_events = {} event_types = ['EWS', 'EOG', 'ETM', 'EDC', 'ECD'] for event in event_types: events_files = [x for x in monthly_events_files if event in x] eventDF = pd.DataFrame() for file in events_files: tmp = pd.read_csv(os.path.join(datapath, file)) eventDF = pd.concat([eventDF, tmp]) if event is 'EDC': eventDF.rename(index=str, columns={'Unnamed: 0': 'datetime'}, inplace=True) eventDF.set_index('datetime', inplace=True) eventDF.index = pd.DatetimeIndex(eventDF.index) IEC_events[event] = eventDF.copy() # + IEC_events['EWS']['alpha_min'][IEC_events['EWS']['alpha_min'] > IEC_events['EWS']['alpha_neg_limit']] = np.nan IEC_events['EWS']['alpha_min'][np.abs(IEC_events['EWS']['alpha_min']) > 10.0] = np.nan IEC_events['EWS']['alpha_max'][IEC_events['EWS']['alpha_max'] < IEC_events['EWS']['alpha_pos_limit']] = np.nan IEC_events['EWS']['alpha_max'][np.abs(IEC_events['EWS']['alpha_max']) > 10.0] = np.nan IEC_events['EWS'] = IEC_events['EWS'].resample('10T').last() # extra filtering... IEC_events['EDC'][IEC_events['EDC'].WS_mean > 40] = np.nan IEC_events['EDC'][IEC_events['EDC'].WS_mean < 0.1] = np.nan IEC_events['EDC'].dropna(how='any', inplace=True) IEC_events['EDC'] = IEC_events['EDC'].resample('1H').last() IEC_events['EOG'].rename(index=str, columns={'Unnamed: 0': 'datetime'}, inplace=True) # IEC_events['EOG'].set_index('datetime', inplace=True) IEC_events['EOG'].index = pd.DatetimeIndex(IEC_events['EOG']['datetime']) IEC_events['EOG'].dropna(how='any', inplace=True) # IEC_events['EOG'] = IEC_events['EOG'].resample('10T').last() # + monthly = {event: IEC_events[event].resample('1M').count() for event in event_types} monthly = {event: monthly[event].WS_mean.groupby(monthly[event].index.month).mean() for event in event_types} monthly_DF = pd.DataFrame.from_dict(monthly)#, names=event_types) monthly_DF.sum().sum() # + nrelcolors = utils.get_nrelcolors() colors = utils.get_colors(5,basecolor='span') colors = ['#0079c2', '#00A4E4', '#5E6A71', '#D9531E', '#933c06'] # + fig, ax = plt.subplots(figsize=(10,2.5)) monthly_DF.plot.bar(ax=ax,width=0.9, edgecolor='k', color=colors) leg = ax.legend(loc=6, bbox_to_anchor=(1,0.5)) ax.set_xlabel('Month') ax.set_ylabel('Average Events per Month') fig.savefig('{}/monthly_event_bar.pdf'.format(figpath)) # - figpath
notebooks/Extreme_events_figs_20190109.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import torch import matplotlib.pyplot as plt import pandas as pd # %matplotlib inline x = 0.5 y = 0.3 t = torch.tensor(x**2 + y**2 < 1) t.int() def f(x, y): return (x**2 + y**2 < 1).int() uniform = torch.distributions.Uniform(low= 0., high=1.) def estimate(N, plot=False): XY = uniform.sample([N, 2]) X = XY[:, 0] Y = XY[:, 1] pi_hat =(4*f(X, Y).sum()/len(X)).item() if plot: plt.scatter(X, Y, c='k', alpha=0.5, s = 20) plt.xlim((0, 1)) plt.ylim(0, 1) ax = plt.gca() ax.set_aspect("equal") c = plt.Circle((0, 0), 1, alpha=0.5,zorder=-10) ax.add_artist(c) plt.title(rf"$\hat{{\pi}}$ = {pi_hat:0.4f}") return pi_hat estimate(100, True) # + num_experiments = 1000 out = {} for N in torch.pow(10, torch.arange(0, 7, 1)): N_val = N.item() out[N_val] = {} for experiment in range(num_experiments): out[N_val][experiment] = estimate(N_val, False) # - df = pd.DataFrame(out) df mean_df = df.mean() mean_df.plot(logx=True, lw=5) std_df = df.std() plt.fill_between(mean_df.index, mean_df-std_df, mean_df+std_df, alpha=0.2) plt.axhline(torch.pi, color='k', linestyle='--') std_df.plot(logx=True, logy=True)
notebooks/sampling/monte_carlo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/work-with-data/dataprep/how-to-guides/summarize.png) # # Summarize # Copyright (c) Microsoft Corporation. All rights reserved.<br> # Licensed under the MIT License.<br> # # Azure ML Data Prep can help summarize your data by providing you a synopsis based on aggregates over specific columns. # # ## Table of Contents # [Overview](#overview)<br> # [Summmary Functions](#summary)<br> # * [SummaryFunction.MIN](#min)<br> # * [SummaryFunction.MAX](#max)<br> # * [SummaryFunction.MEAN](#mean)<br> # * [SummaryFunction.MEDIAN](#median)<br> # * [SummaryFunction.VAR](#var)<br> # * [SummaryFunction.SD](#sd)<br> # * [SummaryFunction.COUNT](#count)<br> # * [SummaryFunction.SUM](#sum)<br> # * [SummaryFunction.SKEWNESS](#skewness)<br> # * [SummaryFunction.KURTOSIS](#kurtosis) # <a id="overview"></a> # ## Overview # Before we drill down into each aggregate function, let us observe `summarize` end to end. # # We will start by reading some data. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow.head(10) # Next we count (`SummaryFunction.COUNT`) the number of rows with column ID with non-null values grouped by Primary Type. dflow_summarize = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='ID', summary_column_name='Primary Type ID Counts', summary_function=dprep.SummaryFunction.COUNT)], group_by_columns=['Primary Type']) dflow_summarize.head(10) # If we choose to not group by anything, we will instead get a single record over the entire dataset. Here we will get the number of rows that have the column ID with non-null values. dflow_summarize_nogroup = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='ID', summary_column_name='ID Count', summary_function=dprep.SummaryFunction.COUNT)]) dflow_summarize_nogroup.head(1) # Conversely, we can group by multiple columns. dflow_summarize_2group = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='ID', summary_column_name='Primary Type & Location Description ID Counts', summary_function=dprep.SummaryFunction.COUNT)], group_by_columns=['Primary Type', 'Location Description']) dflow_summarize_2group.head(10) # In a similar vein, we can compute multiple aggregates in a single summary. Each aggregate function is independent and it is possible to aggregate the same column multiple times. dflow_summarize_multi_agg = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='ID', summary_column_name='Primary Type ID Counts', summary_function=dprep.SummaryFunction.COUNT), dprep.SummaryColumnsValue( column_id='ID', summary_column_name='Primary Type Min ID', summary_function=dprep.SummaryFunction.MIN), dprep.SummaryColumnsValue( column_id='Date', summary_column_name='Primary Type Max Date', summary_function=dprep.SummaryFunction.MAX)], group_by_columns=['Primary Type']) dflow_summarize_multi_agg.head(10) # If we wanted this summary data back into our original data set, we can make use of `join_back` and optionally `join_back_columns_prefix` for easy naming distinctions. Summary columns will be added to the end. `group_by_columns` is not necessary for using `join_back`, however the behavior will be more like an append instead of a join. dflow_summarize_join = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='ID', summary_column_name='Primary Type ID Counts', summary_function=dprep.SummaryFunction.COUNT)], group_by_columns=['Primary Type'], join_back=True, join_back_columns_prefix='New_') dflow_summarize_join.head(10) # <a id="summary"></a> # ## Summary Functions # Here we will go over all the possible aggregates in Data Prep. # The most up to date set of functions can be found by enumerating the `SummaryFunction` enum. import azureml.dataprep as dprep [x.name for x in dprep.SummaryFunction] # <a id="min"></a> # ### SummaryFunction.MIN # Data Prep can aggregate and find the minimum value of a column. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow_min = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='Date', summary_column_name='Primary Type Min Date', summary_function=dprep.SummaryFunction.MIN)], group_by_columns=['Primary Type']) dflow_min.head(10) # <a id="max"></a> # ### SummaryFunction.MAX # Data Prep can find the maximum value of a column. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow_min = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='Date', summary_column_name='Primary Type Max Date', summary_function=dprep.SummaryFunction.MAX)], group_by_columns=['Primary Type']) dflow_min.head(10) # <a id="mean"></a> # ### SummaryFunction.MEAN # Data Prep can find the statistical mean of a column. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow_min = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='Latitude', summary_column_name='Primary Type Latitude Mean', summary_function=dprep.SummaryFunction.MEAN)], group_by_columns=['Primary Type']) dflow_min.head(10) # <a id="median"></a> # ### SummaryFunction.MEDIAN # Data Prep can find the median value of a column. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow_min = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='Latitude', summary_column_name='Primary Type Latitude Median', summary_function=dprep.SummaryFunction.MEDIAN)], group_by_columns=['Primary Type']) dflow_min.head(10) # <a id="var"></a> # ### SummaryFunction.VAR # Data Prep can find the statistical variance of a column. We will need more than one data point to calculate this, otherwise we will be unable to give results. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow_min = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='Latitude', summary_column_name='Primary Type Latitude Variance', summary_function=dprep.SummaryFunction.VAR)], group_by_columns=['Primary Type']) dflow_min.head(10) # Note that despite there being two cases of BATTERY, one of them is missing geographical location, thus only CRIMINAL DAMAGE can yield variance information. # <a id="sd"></a> # ### SummaryFunction.SD # Data Prep can find the standard deviation of a column. We will need more than one data point to calculate this, otherwise we will be unable to give results. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow_min = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='Latitude', summary_column_name='Primary Type Latitude Standard Deviation', summary_function=dprep.SummaryFunction.SD)], group_by_columns=['Primary Type']) dflow_min.head(10) # Similar to when we calculate variance, despite there being two cases of BATTERY, one of them is missing geographical location, thus only CRIMINAL DAMAGE can yield variance information. # <a id="count"></a> # ### SummaryFunction.COUNT # Data Prep can count the number of rows that have a column with non-null values. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow_min = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='Latitude', summary_column_name='Primary Type Latitude Count', summary_function=dprep.SummaryFunction.COUNT)], group_by_columns=['Primary Type']) dflow_min.head(10) # Note that despite there being two cases of BATTERY, one of them is missing geographical location, thus when we group by Primary Type, we only get a count of one for Latitude. # <a id="sum"></a> # ### SummaryFunction.SUM # Data Prep can aggregate and sum the values of a column. Our dataset does not have many numerical facts, but here we sum IDs grouped by Primary Type. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow_min = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='ID', summary_column_name='Primary Type ID Sum', summary_function=dprep.SummaryFunction.SUM)], group_by_columns=['Primary Type']) dflow_min.head(10) # <a id="skewness"></a> # ### SummaryFunction.SKEWNESS # Data Prep can calculate the skewness of data in a column. We will need more than one data point to calculate this, otherwise we will be unable to give results. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow_min = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='Latitude', summary_column_name='Primary Type Latitude Skewness', summary_function=dprep.SummaryFunction.SKEWNESS)], group_by_columns=['Primary Type']) dflow_min.head(10) # <a id="kurtosis"></a> # ### SummaryFunction.KURTOSIS # Data Prep can calculate the kurtosis of data in a column. We will need more than one data point to calculate this, otherwise we will be unable to give results. import azureml.dataprep as dprep dflow = dprep.auto_read_file(path='../data/crime-dirty.csv') dflow_min = dflow.summarize( summary_columns=[ dprep.SummaryColumnsValue( column_id='Latitude', summary_column_name='Primary Type Latitude Kurtosis', summary_function=dprep.SummaryFunction.KURTOSIS)], group_by_columns=['Primary Type']) dflow_min.head(10)
how-to-use-azureml/work-with-data/dataprep/how-to-guides/summarize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Validation Basics # > This chapter focuses on the basics of model validation. From splitting data into training, validation, and testing datasets, to creating an understanding of the bias-variance tradeoff, we build the foundation for the techniques of K-Fold and Leave-One-Out validation practiced in chapter three. This is the Summary of lecture "Model Validation in Python", via datacamp. # # - toc: true # - badges: true # - comments: true # - author: <NAME> # - categories: [Python, Datacamp, Machine_Learning] # - image: images/train_test_score.png # + import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (8, 8) # - # ## Creating train,test, and validation datasets # - Traditional train/test split # - Seen data (used for training) # - Unseen data (unavailable for training) # ![holdout](image/holdout.png) # ### Create one holdout set # Your boss has asked you to create a simple random forest model on the `tic_tac_toe` dataset. She doesn't want you to spend much time selecting parameters; rather she wants to know how well the model will perform on future data. For future Tic-Tac-Toe games, it would be nice to know if your model can predict which player will win. tic_tac_toe = pd.read_csv('./dataset/tic-tac-toe.csv') tic_tac_toe.head() # + from sklearn.model_selection import train_test_split # Create dummy variables using pandas X = pd.get_dummies(tic_tac_toe.iloc[:, 0:9]) y = tic_tac_toe.iloc[:, 9] # Create training and testing datasets, Use 10% for the test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1111) # - # ### Create two holdout sets # You recently created a simple random forest model to predict Tic-Tac-Toe game wins for your boss, and at her request, you did not do any parameter tuning. Unfortunately, the overall model accuracy was too low for her standards. This time around, she has asked you to focus on model performance. # # Before you start testing different models and parameter sets, you will need to split the data into training, validation, and testing datasets. Remember that after splitting the data into training and testing datasets, the validation dataset is created by splitting the training dataset. # + # Create temporary training and final testing datasets X_temp, X_test, y_temp, y_test = train_test_split(X, y, test_size=0.2, random_state=1111) # Create the final training and validation datasets X_train, X_val, y_train, y_val = train_test_split(X_temp, y_temp, test_size=0.25, random_state=1111) # - # You now have training, validation, and testing datasets, but do you know when you need both validation and testing datasets? # ## Accuracy metrics: regression models # - Mean absolute error (MAE) # $$ \text{MAE} = \frac{\sum_{i=1}^{n} \vert y_i - \hat{y_i} \vert}{n} $$ # - Simplest and most intuitive metric # - Treats all points equally # - Not sensitive to outliers # - Mean squared error (MSE) # $$ \text{MSE} = \frac{\sum_{i=1}^{n}(y_i - \hat{y_i})^2}{n} $$ # - Most widely used regression metric # - Allows outlier errors to contribute more to the overall error # - Random family road trips could lead to large errors in predictions # - MAE vs. MSE # - Accuracy metrics are always application apecific # - MAE and MSE error terms are in different units and should not be compared # ### Mean absolute error # Communicating modeling results can be difficult. However, most clients understand that on average, a predictive model was off by some number. This makes explaining the mean absolute error easy. For example, when predicting the number of wins for a basketball team, if you predict 42, and they end up with 40, you can easily explain that the error was two wins. # # In this exercise, you are interviewing for a new position and are provided with two arrays. `y_test`, the true number of wins for all 30 NBA teams in 2017 and `predictions`, which contains a prediction for each team. To test your understanding, you are asked to both manually calculate the MAE and use `sklearn`. # # + y_test = np.array([53, 51, 51, 49, 43, 42, 42, 41, 41, 37, 36, 31, 29, 28, 20, 67, 61, 55, 51, 51, 47, 43, 41, 40, 34, 33, 32, 31, 26, 24]) predictions = np.array([60, 62, 42, 42, 30, 50, 52, 42, 44, 35, 30, 30, 35, 40, 15, 72, 58, 60, 40, 42, 45, 46, 40, 35, 25, 40, 20, 34, 25, 24]) # + from sklearn.metrics import mean_absolute_error # Manually calculate the MAE n = len(predictions) mae_one = sum(abs(y_test - predictions)) / n print('With a manual calculation, the error is {}'.format(mae_one)) # Use scikit-learn to calculate the MAE mae_two = mean_absolute_error(y_test, predictions) print('Using scikit-learn, the error is {}'.format(mae_two)) # - # These predictions were about six wins off on average. This isn't too bad considering NBA teams play 82 games a year. Let's see how these errors would look if you used the mean squared error instead. # ### Mean squared error # Let's focus on the 2017 NBA predictions again. Every year, there are at least a couple of NBA teams that win way more games than expected. If you use the MAE, this accuracy metric does not reflect the bad predictions as much as if you use the MSE. Squaring the large errors from bad predictions will make the accuracy look worse. # # In this example, NBA executives want to better predict team wins. You will use the mean squared error to calculate the prediction error. The actual wins are loaded as `y_test` and the `predictions` as predictions. # + from sklearn.metrics import mean_squared_error n = len(predictions) # Finish the manual calculation of the MSE mse_one = sum((y_test - predictions) ** 2) / n print('With a manual calculation, the error is {}'.format(mse_one)) # Use the scikit-learn function to calculate MSE mse_two = mean_squared_error(y_test, predictions) print('Using scikit-learn, the error is {}'.format(mse_two)) # - # If you run any additional models, you will try to beat an MSE of 49.1, which is the average squared error of using your model. Although the MSE is not as interpretable as the MAE, it will help us select a model that has fewer 'large' errors. # ### Performance on data subsets # In professional basketball, there are two conferences, the East and the West. Coaches and fans often only care about how teams in their own conference will do this year. # # You have been working on an NBA prediction model and would like to determine if the predictions were better for the East or West conference. You added a third array to your data called `labels`, which contains an "E" for the East teams, and a "W" for the West. `y_test` and `predictions` have again been loaded for your use. labels= np.array(['E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W']) # + from sklearn.metrics import mean_absolute_error as mae # Find the East conference teams east_teams = labels == 'E' # Create arrays for the true and predicted values true_east = y_test[east_teams] preds_east = predictions[east_teams] west_teams = labels == 'W' true_west = y_test[west_teams] preds_west = predictions[west_teams] # Print the accuracy metrics print('The MAE for East teams is {}'.format(mae(true_east, preds_east))) # Print the west accuracy print('The MAE for West teams is {}'.format(mae(true_west, preds_west))) # - # It looks like the Western conference predictions were about two games better on average. Over the past few seasons, the Western teams have generally won the same number of games as the experts have predicted. Teams in the East are just not as predictable as those in the West. # ## Classification metrics # - Types: # - Precision # - Recall (also called sensitivity) # - Accuracy # - Specificity # - F1-score and its variations # - Confusion Matrix # - True Positive: Predict/Actual are both 1 # - True Negative: Predict/Actual are both 0 # - False Positive: Predicted 1, actual 0 # - False Negative: Predicted 0, actual 1 # ### Confusion matrices # Confusion matrices are a great way to start exploring your model's accuracy. They provide the values needed to calculate a wide range of metrics, including sensitivity, specificity, and the F1-score. # # You have built a classification model to predict if a person has a broken arm based on an X-ray image. On the testing set, you have the following confusion matrix: # # | | Prediction: 0 | Prediction: 1 | # | ------ | -------------- | ------------- | # | Actual: 0 | 324 (TN) | 15 (FP) | # | Actual: 1 | 123 (FN) | 491(TP) | # + # Calculate and print the accuracy accuracy = (324 + 491) / (953) print("The overall accuracy is {0: 0.2f}".format(accuracy)) # Calculate and print the precision precision = (491) / (15 + 491) print("The precision is {0: 0.2f}".format(precision)) # Calculate and print the recall recall = (491) / (123 + 491) print("The recall is {0: 0.2f}".format(recall)) # - # In this case, a true positive is a picture of an actual broken arm that was also predicted to be broken. Doctors are okay with a few additional false positives (predicted broken, not actually broken), as long as you don't miss anyone who needs immediate medical attention. # ### Confusion matrices, again # Creating a confusion matrix in Python is simple. The biggest challenge will be making sure you understand the orientation of the matrix. This exercise makes sure you understand the `sklearn` implementation of confusion matrices. Here, you have created a random forest model using the `tic_tac_toe` dataset `rfc` to predict outcomes of 0 (loss) or 1 (a win) for Player One. # # Note: If you read about confusion matrices on another website or for another programming language, the values might be reversed. # + tic_tac_toe = pd.read_csv('./dataset/tic-tac-toe.csv') # Create dummy variables using pandas X = pd.get_dummies(tic_tac_toe.iloc[:, 0:9]) y = tic_tac_toe.iloc[:, 9] y = tic_tac_toe['Class'].apply(lambda x: 1 if x == 'positive' else 0) # Create training and testing datasets, Use 10% for the test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1111) # + from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_estimators=500, random_state=1111) rfc.fit(X_train, y_train) # + from sklearn.metrics import confusion_matrix # Create predictions test_predictions = rfc.predict(X_test) # Create and print the confusion matrix cm = confusion_matrix(y_test, test_predictions) print(cm) # Print the true positives (actual 1s that were predicted 1s) print('the number of true positives is: {}'.format(cm[1, 1])) # - # Row 1, column 1 represents the number of actual 1s that were predicted 1s (the true positives). Always make sure you understand the orientation of the confusion matrix before you start using it! # ### Precision vs. recall # The accuracy metrics you use to evaluate your model should always be based on the specific application. For this example, let's assume you are a really sore loser when it comes to playing Tic-Tac-Toe, but only when you are certain that you are going to win. # # Choose the most appropriate accuracy metric, either precision or recall, to complete this example. But remember, if you think you are going to win, you better win! # + from sklearn.metrics import precision_score, recall_score test_predictions = rfc.predict(X_test) # Create precision score based on the metric p_score = precision_score(y_test, test_predictions) r_score = recall_score(y_test, test_predictions) # Print the final result print('The precision value is {0:.2f}, The recall value is {1:.2f}'.format(p_score, r_score)) # - # ## The bias-variance tradeoff # - Variance # - Following the training data too closely # - Fails to generalize to the test data # - Low training error but high test error # - Occurs when models are overfit and have high complexity # - High variance makes over-fitting # - Bias # - Failing to find the relationship between the data and the response # - High training/test error # - Occurs when models are underfit # - High bias makes under-fitting # ### Error due to under/over-fitting # The candy dataset is prime for overfitting. With only 85 observations, if you use 20% for the testing dataset, you are losing a lot of vital data that could be used for modeling. Imagine the scenario where most of the chocolate candies ended up in the training data and very few in the holdout sample. Our model might only see that chocolate is a vital factor, but fail to find that other attributes are also important. In this exercise, you'll explore how using too many features (columns) in a random forest model can lead to overfitting. # # # + candy = pd.read_csv('./dataset/candy-data.csv') X = candy.drop(['competitorname', 'winpercent'], axis=1) y = candy['winpercent'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1111) # + from sklearn.ensemble import RandomForestRegressor # Update the rfr model rfr = RandomForestRegressor(n_estimators=25, random_state=1111, max_features=2) rfr.fit(X_train, y_train) # Print the training and test accuracy print('The training error is {0:.2f}'.format(mae(y_train, rfr.predict(X_train)))) print('The testing error is {0:.2f}'.format(mae(y_test, rfr.predict(X_test)))) # + # Update the rfr model rfr = RandomForestRegressor(n_estimators=25, random_state=1111, max_features=11) rfr.fit(X_train, y_train) # Print the training and test accuracy print('The training error is {0:.2f}'.format(mae(y_train, rfr.predict(X_train)))) print('The testing error is {0:.2f}'.format(mae(y_test, rfr.predict(X_test)))) # + # Update the rfr model rfr = RandomForestRegressor(n_estimators=25, random_state=1111, max_features=4) rfr.fit(X_train, y_train) # Print the training and test accuracy print('The training error is {0:.2f}'.format(mae(y_train, rfr.predict(X_train)))) print('The testing error is {0:.2f}'.format(mae(y_test, rfr.predict(X_test)))) # - # ### Am I underfitting? # You are creating a random forest model to predict if you will win a future game of Tic-Tac-Toe. Using the `tic_tac_toe` dataset, you have created training and testing datasets, `X_train`, `X_test`, `y_train`, and `y_test`. # # You have decided to create a bunch of random forest models with varying amounts of trees (1, 2, 3, 4, 5, 10, 20, and 50). The more trees you use, the longer your random forest model will take to run. However, if you don't use enough trees, you risk underfitting. You have created a for loop to test your model at the different number of trees. # + # Create dummy variables using pandas X = pd.get_dummies(tic_tac_toe.iloc[:, 0:9]) y = tic_tac_toe.iloc[:, 9] y = tic_tac_toe['Class'].apply(lambda x: 1 if x == 'positive' else 0) # Create training and testing datasets, Use 10% for the test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1111) # + from sklearn.metrics import accuracy_score test_scores, train_scores = [], [] for i in [1, 2, 3, 4, 5, 10, 20, 50]: rfc = RandomForestClassifier(n_estimators=i, random_state=1111) rfc.fit(X_train, y_train) # Create predictions for the X_train and X_test datasets train_predictions = rfc.predict(X_train) test_predictions = rfc.predict(X_test) # Append the accuracy score for the test and train predictions train_scores.append(round(accuracy_score(y_train, train_predictions), 2)) test_scores.append(round(accuracy_score(y_test, test_predictions), 2)) # Print the train and test scores print("The training scores were: {}".format(train_scores)) print("The testing scores were: {}".format(test_scores)) # - # Notice that with only one tree, both the train and test scores are low. As you add more trees, both errors improve. Even at 50 trees, this still might not be enough. Every time you use more trees, you achieve higher accuracy. At some point though, more trees increase training time, but do not decrease testing error. x = [1, 2, 3, 4, 5, 10, 20, 50] tmp = pd.DataFrame({'x':x, 'training':train_scores, 'test':test_scores}) tmp.set_index('x', inplace=True) tmp.plot(title='train/test score for n_estimators');
_notebooks/2020-07-13-02-Validation-Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # # Azure ML Hardware Accelerated Object Detection # This tutorial will show you how to deploy an object detection service based on the SSD-VGG model in just a few minutes using the Azure Machine Learning Accelerated AI service. # # We will use the SSD-VGG model accelerated on an FPGA. Our Accelerated Models Service handles translating deep neural networks (DNN) into an FPGA program. # # The steps in this notebook are: # 1. [Setup Environment](#set-up-environment) # * [Construct Model](#construct-model) # * Image Preprocessing # * Featurizer # * Save Model # * Save input and output tensor names # * [Create Image](#create-image) # * [Deploy Image](#deploy-image) # * [Test the Service](#test-service) # * Create Client # * Serve the model # * [Cleanup](#cleanup) # <a id="set-up-environment"></a> # ## 1. Set up Environment # ### 1.a. Imports import os import tensorflow as tf # ### 1.b. Retrieve Workspace # If you haven't created a Workspace, please follow [this notebook]("../../../configuration.ipynb") to do so. If you have, run the codeblock below to retrieve it. # + from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') # - # <a id="construct-model"></a> # ## 2. Construct model # ### 2.a. Image preprocessing # We'd like our service to accept JPEG images as input. However the input to SSD-VGG is a float tensor of shape \[1, 300, 300, 3\]. The first dimension is batch, then height, width, and channels (i.e. NHWC). To bridge this gap, we need code that decodes JPEG images and resizes them appropriately for input to SSD-VGG. The Accelerated AI service can execute TensorFlow graphs as part of the service and we'll use that ability to do the image preprocessing. This code defines a TensorFlow graph that preprocesses an array of JPEG images (as TensorFlow strings) and produces a tensor that is ready to be featurized by SSD-VGG. # # **Note:** Expect to see TF deprecation warnings until we port our SDK over to use Tensorflow 2.0. # + # Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings import azureml.accel.models.utils as utils tf.reset_default_graph() in_images = tf.placeholder(tf.string) image_tensors = utils.preprocess_array(in_images, output_width=300, output_height=300, preserve_aspect_ratio=False) print(image_tensors.shape) # - # ### 2.b. Featurizer # The SSD-VGG model is different from our other models in that it generates 12 tensor outputs. These corresponds to x,y displacements of the anchor boxes and the detection confidence (for 21 classes). Because these outputs are not convenient to work with, we will later use a pre-defined post-processing utility to transform the outputs into a simplified list of bounding boxes with their respective class and confidence. # # For more information about the output tensors, take this example: the output tensor 'ssd_300_vgg/block4_box/Reshape_1:0' has a shape of [None, 37, 37, 4, 21]. This gives the pre-softmax confidence for 4 anchor boxes situated at each site of a 37 x 37 grid imposed on the image, one confidence score for each of the 21 classes. The first dimension is the batch dimension. Likewise, 'ssd_300_vgg/block4_box/Reshape:0' has shape [None, 37, 37, 4, 4] and encodes the (cx, cy) center shift and rescaling (sw, sh) relative to each anchor box. Refer to the [SSD-VGG paper](https://arxiv.org/abs/1512.02325) to understand how these are computed. The other 10 tensors are defined similarly. # + from azureml.accel.models import SsdVgg saved_model_dir = os.path.join(os.path.expanduser('~'), 'models') model_graph = SsdVgg(saved_model_dir, is_frozen = True) print('SSD-VGG Input Tensors:') for idx, input_name in enumerate(model_graph.input_tensor_list): print('{}, {}'.format(input_name, model_graph.get_input_dims(idx))) print('SSD-VGG Output Tensors:') for idx, output_name in enumerate(model_graph.output_tensor_list): print('{}, {}'.format(output_name, model_graph.get_output_dims(idx))) ssd_outputs = model_graph.import_graph_def(image_tensors, is_training=False) # - # ### 2.c. Save Model # Now that we loaded both parts of the tensorflow graph (preprocessor and SSD-VGG featurizer), we can save the graph and associated variables to a directory which we can register as an Azure ML Model. # + model_name = "ssdvgg" model_save_path = os.path.join(saved_model_dir, model_name, "saved_model") print("Saving model in {}".format(model_save_path)) output_map = {} for i, output in enumerate(ssd_outputs): output_map['out_{}'.format(i)] = output with tf.Session() as sess: model_graph.restore_weights(sess) tf.saved_model.simple_save(sess, model_save_path, inputs={'images': in_images}, outputs=output_map) # - # ### 2.d. Important! Save names of input and output tensors # # These input and output tensors that were created during the preprocessing and classifier steps are also going to be used when **converting the model** to an Accelerated Model that can run on FPGA's and for **making an inferencing request**. It is very important to save this information! # + tags=["register model from file"] input_tensors = in_images.name # We will use the list of output tensors during inferencing output_tensors = [output.name for output in ssd_outputs] # However, for multiple output tensors, our AccelOnnxConverter will # accept comma-delimited strings (lists will cause error) output_tensors_str = ",".join(output_tensors) print(input_tensors) print(output_tensors) # - # <a id="create-image"></a> # ## 3. Create AccelContainerImage # Below we will execute all the same steps as in the [Quickstart](./accelerated-models-quickstart.ipynb#create-image) to package the model we have saved locally into an accelerated Docker image saved in our workspace. To complete all the steps, it may take a few minutes. For more details on each step, check out the [Quickstart section on model registration](./accelerated-models-quickstart.ipynb#register-model). # + from azureml.core import Workspace from azureml.core.model import Model from azureml.core.image import Image from azureml.accel import AccelOnnxConverter from azureml.accel import AccelContainerImage # Retrieve workspace ws = Workspace.from_config() print("Successfully retrieved workspace:", ws.name, ws.resource_group, ws.location, ws.subscription_id, '\n') # Register model registered_model = Model.register(workspace = ws, model_path = model_save_path, model_name = model_name) print("Successfully registered: ", registered_model.name, registered_model.description, registered_model.version, '\n', sep = '\t') # Convert model convert_request = AccelOnnxConverter.convert_tf_model(ws, registered_model, input_tensors, output_tensors_str) # If it fails, you can run wait_for_completion again with show_output=True. convert_request.wait_for_completion(show_output=False) converted_model = convert_request.result print("\nSuccessfully converted: ", converted_model.name, converted_model.url, converted_model.version, converted_model.id, converted_model.created_time, '\n') # Package into AccelContainerImage image_config = AccelContainerImage.image_configuration() # Image name must be lowercase image_name = "{}-image".format(model_name) image = Image.create(name = image_name, models = [converted_model], image_config = image_config, workspace = ws) image.wait_for_creation() print("Created AccelContainerImage: {} {} {}\n".format(image.name, image.creation_state, image.image_location)) # - # <a id="deploy-image"></a> # ## 4. Deploy image # Once you have an Azure ML Accelerated Image in your Workspace, you can deploy it to two destinations, to a Databox Edge machine or to an AKS cluster. # # ### 4.a. Deploy to Databox Edge Machine using IoT Hub # See the sample [here](https://github.com/Azure-Samples/aml-real-time-ai/) for using the Azure IoT CLI extension for deploying your Docker image to your Databox Edge Machine. # # ### 4.b. Deploy to AKS Cluster # Same as in the [Quickstart section on image deployment](./accelerated-models-quickstart.ipynb#deploy-image), we are going to create an AKS cluster with FPGA-enabled machines, then deploy our service to it. # #### Create AKS ComputeTarget # + from azureml.core.compute import AksCompute, ComputeTarget # Uses the specific FPGA enabled VM (sku: Standard_PB6s) # Standard_PB6s are available in: eastus, westus2, westeurope, southeastasia prov_config = AksCompute.provisioning_configuration(vm_size = "Standard_PB6s", agent_count = 1, location = "eastus") aks_name = 'aks-pb6-obj' # Create the cluster aks_target = ComputeTarget.create(workspace = ws, name = aks_name, provisioning_configuration = prov_config) # - # Provisioning an AKS cluster might take awhile (15 or so minutes), and we want to wait until it's successfully provisioned before we can deploy a service to it. If you interrupt this cell, provisioning of the cluster will continue. You can re-run it or check the status in your Workspace under Compute. aks_target.wait_for_completion(show_output = True) print(aks_target.provisioning_state) print(aks_target.provisioning_errors) # #### Deploy AccelContainerImage to AKS ComputeTarget # + from azureml.core.webservice import Webservice, AksWebservice # Set the web service configuration (for creating a test service, we don't want autoscale enabled) # Authentication is enabled by default, but for testing we specify False aks_config = AksWebservice.deploy_configuration(autoscale_enabled=False, num_replicas=1, auth_enabled = False) aks_service_name ='my-aks-service' aks_service = Webservice.deploy_from_image(workspace = ws, name = aks_service_name, image = image, deployment_config = aks_config, deployment_target = aks_target) aks_service.wait_for_deployment(show_output = True) # - # <a id="test-service"></a> # ## 5. Test the service # <a id="create-client"></a> # ### 5.a. Create Client # The image supports gRPC and the TensorFlow Serving "predict" API. We have a client that can call into the docker image to get predictions. # # **Note:** If you chose to use auth_enabled=True when creating your AksWebservice.deploy_configuration(), see documentation [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice(class)?view=azure-ml-py#get-keys--) on how to retrieve your keys and use either key as an argument to PredictionClient(...,access_token=key). # + # Using the grpc client in AzureML Accelerated Models SDK from azureml.accel.client import PredictionClient address = aks_service.scoring_uri ssl_enabled = address.startswith("https") address = address[address.find('/')+2:].strip('/') port = 443 if ssl_enabled else 80 # Initialize AzureML Accelerated Models client client = PredictionClient(address=address, port=port, use_ssl=ssl_enabled, service_name=aks_service.name) # - # You can adapt the client [code](https://github.com/Azure/aml-real-time-ai/blob/master/pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C# [client](https://github.com/Azure/aml-real-time-ai/blob/master/sample-clients/csharp). # # The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup). # <a id="serve-model"></a> # ### 5.b. Serve the model # The SSD-VGG model returns the confidence and bounding boxes for all possible anchor boxes. As mentioned earlier, we will use a post-processing routine to transform this into a list of bounding boxes (y1, x1, y2, x2) where x, y are fractional coordinates measured from left and top respectively. A respective list of classes and scores is also returned to tag each bounding box. Below we make use of this information to draw the bounding boxes on top the original image. Note that in the post-processing routine we select a confidence threshold of 0.5. # + import cv2 from matplotlib import pyplot as plt colors_tableau = [(255, 255, 255), (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)] def draw_boxes_on_img(img, classes, scores, bboxes, thickness=2): shape = img.shape for i in range(bboxes.shape[0]): bbox = bboxes[i] color = colors_tableau[classes[i]] # Draw bounding box... p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1])) p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1])) cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness) # Draw text... s = '%s/%.3f' % (classes[i], scores[i]) p1 = (p1[0]-5, p1[1]) cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.4, color, 1) # + import azureml.accel._external.ssdvgg_utils as ssdvgg_utils result = client.score_file(path="meeting.jpg", input_name=input_tensors, outputs=output_tensors) classes, scores, bboxes = ssdvgg_utils.postprocess(result, select_threshold=0.5) img = cv2.imread('meeting.jpg', 1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) draw_boxes_on_img(img, classes, scores, bboxes) plt.imshow(img) # - # <a id="cleanup"></a> # ## 6. Cleanup # It's important to clean up your resources, so that you won't incur unnecessary costs. In the [next notebook](./accelerated-models-training.ipynb) you will learn how to train a classfier on a new dataset using transfer learning. aks_service.delete() aks_target.delete() image.delete() registered_model.delete() converted_model.delete()
how-to-use-azureml/deployment/accelerated-models/accelerated-models-object-detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Vergleichsoperatoren # Um die Bedingungen in den if-else-Strukturen besser zu verstehen, schauen wir uns **Vergleichsoperationen** an. Das wird uns helfen, eine Vielfalt an Bedingungen formulieren zu können. # ### Ungleichheitsoperatoren und Bools if 6 < 5: print("JA") # Warum kommt es zu keiner Ausgabe mit print()? Schauen wir uns doch Ausdrücke mit Ungleichheitszeichen (<,>) im Detail an: print(6 < 5) print(5 < 6) # **True** und **False** (Großschreibung beachten!) sind weitere _feststehende Ausdrücke_ in Python. Neben Strings (Zeichenketten), Ganzzahlen (Integer) und Fließkommazahlen (Floats) bilden sie einen weiteren Datentyp - den **Bool**. b = False print(b) # Genauer wird eine if-Bedingung also nur dann ausgeführt, wenn nach dem if ein Bool mit dem Wert True steht: result = 5 < 6 if result: print("5 ist kleiner als 6") print(5 < 6) # ### Der Gleichheitsoperator # Neben Ungleichheiten können wir natürlich auch Gleichheiten abfragen, und zwar mit **==** print(5 == 5) print(5 == 4) if 5 == 5: print("5 ist 5") # Mittels des Gleichheitsoperators können wir auch die Zustände _größer gleich_ (**>=**) und _kleiner gleich_ (**<=**) abfragen: print(5 < 5) print(5 <= 5) print(5 >= 5) # ### Strings vergleichen # Wir können nicht nur Zahlen miteinander vergleichen, sondern auch Strings: word = "Hallo" print(word == "Hallo") print(word == "Welt") print(word == "hallo") # ### Der Ungleichheitsoperator # Auf Ungleichheit checken wir mit dem Zeichen **!=** word = "Hallo" print(word != "Hallo") print(word != "Welt") # Auch Zahlen kann man auf Ungleichheit hin miteinander vergleichen: zahl = 4 print(zahl != 4) print(zahl != 5.5) # ### Übrigends: Das klappt auch auf Strings! # + sentence = "Ja, die Monika studiert hier!" if "!" in sentence: print("JA") else: print("NEIN") # - # ## Booleans (`and` und `or`) # + age = 35 if age >= 30 and age <= 39: print("Diese Person ist in ihren 30-ern") # - age = 45 if age < 30 or age >= 40: print("Diese Person ist nicht in ihren 30-ern") age = 25 print(age < 30) # + above_30 = age >= 30 print(above_30) # + age = 25 above_20 = age >= 20 print(above_20) if age >= 20: print("if-Abfrage wurde ausgeführt") # - # ### Logiktabelle # Wenn wir Bedingungen kombinieren, dann ist etwas nur dann wahr, wenn alle Bedingungen wahr sind. print(True and True) print(True and False) print(False and True) print(False and False) print (True or True) print (True or False) print (False or True) print (False or False) # ## Übung # Wie sieht das ganze für "or" aus? # ## Der `not` - Operator # Wenn nicht nötig, lieber vermeiden, da Doppelte Verneinung kompliziert wird. Das "if not" kann auch auseinander genommen werden. Siehe unten. # + age = 25 if not age >= 30: print("ausgeführt") if age < 30: print("ausgeführt") # + names = ["Max", "Nadine", "Edzard"] if "Moritz" not in names: print("Moritz ist nicht in der Liste enthalten") if not "Moritz" in names: print("Moritz ist nicht in der Liste enthalten") if "Edzard" in names: print("Edzard ist in der Liste enthalten") # - # ## Übung # # Schreibe eine Abfrage die für Personen bestimmt ob sie alkohol trinken oder nicht? # + country = "US" age = 50 # schreibe hier deinen Code if country == "US" and age >= 21: print("Person darf Alkohol trinken.") elif country == "CH" and age >= 16: print("Person darf Alkohol trinken.") else: print("Person darf keinen Alkohol trinken.") # + country = "CH" age = 17 # schreibe hier deinen Code if country == "US" and age >= 21: print("Person darf Alkohol trinken.") elif country == "CH" and age >= 16 and age <18: print("Person darf nur leichte alkoholische Getränke trinken.") elif country == "CH" and age >= 18: print("Person darf alle Arten von Alkohol trinken.") else: print("Person darf keinen Alkohol trinken.") # - # ## Vergleichsoperatoren und Listen # Wir können mit **in** checken, ob ein Element in einem anderen Element enthalten ist. # ### Der in-Operator und Listen # Operatoren gibt es auch in Bezug auf Listen; wir können etwa mit dem **in**-Operator prüfen, ob ein Element in einer Liste enthalten ist. # # # Formal sieht die Syntax so aus: **Element <span style="color:green">in</span> Liste** # # + students = ["Max", "Monika", "Erik", "Franziska", "Edzard"] print("Monika" in students) print("Moritz" in students) print("Edzard" in students) # - # Das Resultat einer solchen Abfrage ist ein Bool, d. h., dass der Wert entweder True oder False ist. Somit können wir Ausdrücke mit dem in-Operator auch in if-else-Strukturen verwenden: # + if "Monika" in students: print("Ja, die Monika studiert hier!") else: print("Nein, die Monika studiert hier nicht!") if "Moritz" in students: print("Ja, der Moritz studiert hier!") if "Edzard" in students: print("Nein, Edzard studiert hier.") if "Peter" not in students: print("Nein, Peter studiert hier nicht.") else: print("Nein, der Moritz studiert hier nicht!") # - # ### Der in-Operator und Strings # Tatsächlich lässt sich der in-Operator auch auf Strings anwenden. Wir können also z. B. checken, ob ein Buchstabe bzw. ein Zeichen in einem Wort enthalten ist, oder ein Wort in einem Satz, usw. # + sentence = "Ja, die Monika studiert hier!" if "!" in sentence: print("JA") else: print("NEIN") # + word = "Studium" if "udi" in word: print("JA") else: print("NEIN") # - # ## Übung # # Zurück zu unserem Shop: # * Artikel, die zwischen 0 und 20 (einschließlich) CHF kosten, werden um 20 % reduziert; # * Artikel, die zwischen 20 (nicht einschließlich) und 50 CHF (einschließlich) kosten, werden um 40 % reduziert. # * Alle anderen Artikel, also solche, die mehr als 50 CHF kosten, werden um 60 % reduziert. # * **NEU** Weil crazy September ist sind Artikel deren Preis mit einer 5 anfangen oder eine 5 beinhalten sind umsonst! # # Berechne nun für jeden der alten Preise aus der Liste _prices_ die passenden reduzierten Preise und speichere sie in der neuen Liste new_prices. Gib diese Liste schließlich aus. # + prices = [2, 50, 70, 30, 555] new_prices =[] price = prices[0] if "5" in str(price): new_price = 0 print("Artikel ist gratis.") elif price <= 20: newprice = round(price*0.8,2) print("rabatt = 20%," + "Rabattpreis = " + str(newprice)) elif price >20 and price <=50: newprice = round(price*0.6,2) print("rabatt = 40%, " + "Rabattpreis = " + str(newprice)) elif price >50: newprice = round(price*0.4,2) print("rabatt = 60%, " + "Rabattpreis = " + str(newprice)) # hier kommt deine if-elif-else-Struktur hin # schreibe hier deinen Code print(new_prices) # - 10%3 price=5 #% = modolo = gibt den Rest an if price%5 == 0: print("Artikel ist gratis.")
04 Python Teil 2/02 Vergleichsoperatoren.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Importing some utilities # %cd ../.. import glob import sys from dmg.realism.mle import whichFitsBetter from scripts.modelSet import datasets_supported msetObject = datasets_supported['rds-genmymodel'] train_path = 'data/rds-genmymodel/preprocess' backend = 'python' # # Loading training set Gs = [msetObject.getGraphReal(f,backend) for f in glob.glob(train_path + "/*")] import matplotlib.pyplot as plt numberNodes = [len(G) for G in Gs] plt.hist(numberNodes, bins = 100, alpha=0.5, density = False) plt.boxplot(numberNodes, showfliers=False, labels=['Rds-GenMyModel'])
notebooks/datasetExploration/rds-genmymodel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #default_exp callback.data # - # # Data Callbacks # # > Callbacks which work with a learner's data #export from fastai2.basics import * from nbdev.showdoc import * from fastai2.test_utils import * #export class CollectDataCallback(Callback): "Collect all batches, along with `pred` and `loss`, into `self.data`. Mainly for testing" def begin_fit(self): self.data = L() def after_batch(self): self.data.append(to_detach((self.xb,self.yb,self.pred,self.loss))) #export @delegates() class WeightedDL(TfmdDL): def __init__(self, dataset=None, bs=None, wgts=None, **kwargs): super().__init__(dataset=dataset, bs=bs, **kwargs) wgts = array([1.]*len(dataset) if wgts is None else wgts) self.wgts = wgts/wgts.sum() def get_idxs(self): if self.n==0: return [] if not self.shuffle: return super().get_idxs() return list(np.random.choice(self.n, self.n, p=self.wgts)) #export @patch @delegates(DataSource.databunch) def weighted_databunch(self:DataSource, wgts, bs=16, **kwargs): xtra_kwargs = [{}] * (self.n_subsets-1) return self.databunch(bs=bs, dl_type=WeightedDL, dl_kwargs=({'wgts':wgts}, *xtra_kwargs), **kwargs) n = 160 dsrc = DataSource(torch.arange(n).float()) dbch = dsrc.weighted_databunch(wgts=range(n), bs=16) learn = synth_learner(data=dbch, cb_funcs=CollectDataCallback) learn.fit(1) t = concat(*learn.collect_data.data.itemgot(0,0)) plt.hist(t); # ## Export - #hide from nbdev.export import notebook2script notebook2script()
nbs/14a_callback.data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv # language: python # name: venv # --- # # "Pythonic javascript" # > Comparison of python patterns and the javascript equivalent # - toc: false # - branch: master # - categories: [code snippets, python, javascript] # Content from: # * [Javascript for Python programmers](https://mike.depalatis.net/blog/javascript-for-python-programmers.html) # * [Javascript Object Literal](https://www.dyn-web.com/tutorials/object-literal/) # * [JavaScript Module Pattern: In-Depth](http://www.adequatelygood.com/JavaScript-Module-Pattern-In-Depth.html) # * [Python vs Javascript](https://realpython.com/python-vs-javascript/) # ## Exception handling # + # Python try: thing() except Exception: print("oh no!") raise ValueError("not a good value") # - # ```javascript # // Javascript # try { # thing(); # } catch (error) { # console.error("oh no!"); # } # # throw "not a good value"; # ``` # ## Iterators # + # Python arr = [1, 2, 3] obj = { "a": 1, "b": 2, "c": 3 } for val in arr: print(val) for key in obj: print(key) # - # ```javascript # // Javascript # var arr = [1, 2, 3]; # var obj = { # a: 1, # b: 2, # c: 3 # }; # # for (let val of arr) { # console.log(val); # } # # // or... # arr.forEach((value, index) => { # console.log(value); # }); # # for (let key in obj) { # console.log(key); # } # ``` # ## Generators # Python def gen(x): while True: yield x x = x + 1 # ```javascript # // Javascript # function* gen(x) { # while (true) { # yield x; # x++; # } # } # ``` # ## Classes # + # Python class Thing: def __init__(self, a): self.a = a def add_one(self): return self.a + 1 class OtherThing(Thing): def __init__(self, a, b): super(OtherThing, self).__init__(a) self.b = b def add_things(self): return self.a + self.b # - # ```javascript # // Javascript # class Thing { # constructor(a) { # this.a = a; # } # # addOne() { # return this.a + 1; # } # } # # class OtherThing extends Thing { # constructor(a, b) { # super(a); # this.b = b; # } # # addThings() { # return this.a + this.b; # } # } # ``` # ## Functional programming # ### Lambdas # Python expression = lambda a, b: a + b # ```javascript # // Javascript # // Arrow functions are more powerful than Python lambdas, but not in # // this example! # let expression = (a, b) => a + b; # # // or... # let sameThing = function (a, b) { # return a + b; # } # ``` # # General Javascript # ## Object Literal # A JavaScript object literal is a comma-separated list of name-value pairs wrapped in curly braces. Object literals encapsulate data, enclosing it in a tidy package. This minimizes the use of global variables which can cause problems when combining code. # # Object literals are defined using the following syntax rules: # # * A colon separates property name from value. # * A comma separates each name-value pair from the next. # * There should be no comma after the last name-value pair. # ```javascript # var myObject = { # sProp: 'some string value', # numProp: 2, # bProp: false # }; # ``` # ## Functions # Unlike constants, variables in JavaScript don’t need an initial value. You can provide one later: # ```javascript # let name; # name = '<NAME>'; # ``` # When you leave off the initial value, you create what’s called a variable declaration rather than a variable definition. Such variables automatically receive a special value of undefined, which is one of the primitive types in JavaScript. This is different in Python, where you always define variables except for variable annotations. But even then, these variables aren’t technically declared. # ## Arrow functions # Notice that there’s no function keyword anymore, and the return statement is implicit. The arrow symbol (=>) separates the function’s arguments from its body. # When you want to return an object literal from an arrow function, you need to wrap it in parentheses to avoid ambiguity with a block of code: # ```javascript # let add = (a, b) => ({ # result: a + b # }); # ``` # Otherwise, the function body would be confused for a block of code without any return statements, and the colon would create a labeled statement rather than a key-value pair. # ```javascript # // JavaScript function # function funcName(param) { # return param + 10; # } # # // JavaScript arrow function # const funcName = (param) => param + 2 # ``` # ## Anonymous Closures # This is the fundamental construct that makes it all possible, and really is the single best feature of JavaScript. We’ll simply create an anonymous function, and execute it immediately. All of the code that runs inside the function lives in a closure, which provides privacy and state throughout the lifetime of our application. # ```javascript # (function () { # // ... all vars and functions are in this scope only # // still maintains access to all globals # }()); # ``` # Notice the () around the anonymous function. This is required by the language, since statements that begin with the token function are always considered to be function declarations. Including () creates a function expression instead.
_notebooks/2020-11-04-pythonic-javascript.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: insight # language: python # name: insight # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Imports" data-toc-modified-id="Imports-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Imports</a></span></li><li><span><a href="#Load-data" data-toc-modified-id="Load-data-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Load data</a></span></li><li><span><a href="#Format-Data-&amp;-add-additional-columns" data-toc-modified-id="Format-Data-&amp;-add-additional-columns-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Format Data &amp; add additional columns</a></span></li><li><span><a href="#EDA---Exploratory-Data-Analysis" data-toc-modified-id="EDA---Exploratory-Data-Analysis-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>EDA - Exploratory Data Analysis</a></span><ul class="toc-item"><li><span><a href="#Basic-data-properties" data-toc-modified-id="Basic-data-properties-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Basic data properties</a></span></li><li><span><a href="#Number-of-goods-vs-bads-(nans-vs-non-nans)" data-toc-modified-id="Number-of-goods-vs-bads-(nans-vs-non-nans)-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Number of goods vs bads (nans vs non-nans)</a></span></li></ul></li><li><span><a href="#Plot-Histograms" data-toc-modified-id="Plot-Histograms-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Plot Histograms</a></span></li><li><span><a href="#Pandas-Testing" data-toc-modified-id="Pandas-Testing-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Pandas Testing</a></span></li></ul></div> # - # # Imports # + # Imports import csv import os import pandas as pd import copy import matplotlib.pyplot as plt import numpy as np import seaborn as sns # sklearn from sklearn.linear_model import LinearRegression from sklearn import preprocessing from sklearn.model_selection import KFold from sklearn.preprocessing import OneHotEncoder # - # # Load data filename = os.path.join('data','employee_retention_data.csv') # Load data to pandas df = pd.read_csv(filename) df.head(5) # # Format Data & add additional columns # Convert to date times df['join_date'] = pd.to_datetime(df['join_date']) df['quit_date'] = pd.to_datetime(df['quit_date']) # Add column for still working df['still_working'] = pd.isnull(df['quit_date']) # + # Add a column for quit date_minus join_date df['duration'] = df['quit_date'] - df['join_date'] # Convert to days (this is a float) df['duration'] = df['duration'].dt.days # - # Duration for those still working is null. Convert this to duration worked up until today. # (Alternative way to do this is to replace nan quit_dates to today) today = pd.to_datetime('2015-12-13') duration_stillemployed = today - df.loc[df.still_working == True,'join_date'] df.loc[df.still_working == True,'duration'] = duration_stillemployed.dt.days df.head() # + # Add "min" columns # These columns assume the employee quits on the last day of the data set - 2015/12/13 # These will be used for later analysis # Add quit_date_min df['quit_date_min'] = df['quit_date'] df.loc[pd.isnull(df['quit_date']),['quit_date_min']] = pd.to_datetime('2015/12/13') # + # Add duration_min df['duration_min'] = df['quit_date_min'] - df['join_date'] # Convert to days df['duration_min'] = df['duration_min'].dt.days # - # Get rid of this, since we no longer need df = df.drop('quit_date_min',1) # Add single column for only year join_year = df['join_date'].map(lambda ts: ts.year); df['join_year'] = join_year # Add columns for join year #df['joined2011'] df['j2011'] = df['join_date'] < pd.to_datetime('2012') df['j2012'] = (df['join_date'] >= pd.to_datetime('2012')) & (df['join_date'] < pd.to_datetime('2013')) df['j2013'] = (df['join_date'] >= pd.to_datetime('2013')) & (df['join_date'] < pd.to_datetime('2014')) df['j2014'] = (df['join_date'] >= pd.to_datetime('2014')) & (df['join_date'] < pd.to_datetime('2015')) df['j2015'] = (df['join_date'] >= pd.to_datetime('2015')) & (df['join_date'] < pd.to_datetime('2016')) # Create churn status, which is the smae as still_working, but converted to float df['still_working_numeric'] = df['quit_date'].notna().round() # Borrowed from <NAME> df['still_working_numeric'] = df['still_working'].astype(float) df.head() df.describe() # # EDA - Exploratory Data Analysis # ## Basic data properties # Data types print(df.dtypes) # Number of unique companies print('Unique company ids') print(df.company_id.unique()) print('Number of unqiue companies = {}'.format(str(len(df.company_id.unique())))) # Number of unique depts print('Unique company ids') print(df.dept.unique()) print('Number of unqiue depts = {}'.format(str(len(df.dept.unique())))) # Number of unique seniority print('Unique company ids') print(df.seniority.unique()) print('Number of unqiue seniorities = {}'.format(str(len(df.seniority.unique())))) # Sort senorities foo = df.seniority.unique() foo.sort() print("Number of years experience when hired, sorted unique values: " + str(foo)) # + # For some reason, some people hae 98 or 99 years experience? Look at these values... # experience = 98 - one lucky engineer df[df['seniority'] == 98] # - # experience = 99 df[df['seniority'] == 99] # + # Replace these values with more reasonable numbers df = df.replace({'seniority': 98}, 40) df = df.replace({'seniority': 99}, 40) # Verify it works df[24700:24704] # - # ## Number of goods vs bads (nans vs non-nans) # Save dataframs containing only nans and only non-nans df_good = df.dropna() df_bad = df[pd.isnull(df['quit_date'])] df_good.head(5) df_bad.head(5) # + # Lengths N = len(df) Ngood = len(df_good) Nbad2 = N-Ngood Nbad = len(df_bad) # - print('N={}, Ngood={}, Nbad={}, Nbad2={}'.format(str(N),str(Ngood),str(Nbad),str(Nbad2))) # # Plot Histograms # Copy before messing df2 = df.copy(); df2.head() # Plot durations of all still working foo = df.loc[df['still_working'] == False,['duration']] plt.figure(); foo.plot.hist(); # + # Joined in 2011 foo = df.loc[(df['still_working'] == False) & (df['j2011']),['duration']] plt.figure(); foo.plot.hist(); plt.title('Joined 2011'); # Joined in 2012 foo = df.loc[(df['still_working'] == False) & (df['j2012']),['duration']] plt.figure(); foo.plot.hist(); plt.title('Joined 2012'); # Joined in 2013 foo = df.loc[(df['still_working'] == False) & (df['j2013']),['duration']] plt.figure(); foo.plot.hist(); plt.title('Joined 2013'); # Joined in 2014 foo = df.loc[(df['still_working'] == False) & (df['j2014']),['duration']] plt.figure(); foo.plot.hist(); plt.title('Joined 2014'); # Joined in 2015 foo = df.loc[(df['still_working'] == False) & (df['j2015']),['duration']] plt.figure(); foo.plot.hist(); plt.title('Joined 2015'); # + # Appears that one group tends to quit after around 300-400 days; some stragglers (long tail) # The data for employees that join in 2015 is highly biased. # - # # Pandas Testing # Two different types of methods for pulling out columns a = df['company_id'] b = df.company_id # + # How to smartly index rows inds = (df['still_working'] == False) & (df['j2011'] == True) # Note: the round brakets are necessary: # inds = df['still_working'] == False & df['j2011'].bool() == True # produces an error for some reason # inds = df[('still_working'] == False & (df['still_working'] == False) # inds = df['still_working'] == False # - foo = df.loc[inds,['duration','salary']] foo.head() # + # Another way # - foo = df.query('company_id == 1 and not still_working') foo.head() # # Seaborn testing # ## Pairplot # sns.pairplot(df[['salary','seniority','duration','still_working_numeric']], hue='still_working_numeric'); # hue is float sns.pairplot(df[['salary','seniority','duration','still_working']], hue='still_working'); # hue is boolean sns.pairplot(df[['salary','seniority','duration','still_working_numeric']], hue='seniority'); # ## Histograms # Plot histogram with multiple overlays plt.figure(figsize = (16, 9)) plt.hist( df.query('j2011')['duration'], bins = np.linspace(0,2000,50) ) plt.hist( df.query('j2012')['duration'], bins = np.linspace(0,2000,50) ) plt.hist( df.query('j2013')['duration'], bins = np.linspace(0,2000,50) ) plt.hist( df.query('j2014')['duration'], bins = np.linspace(0,2000,50) ) plt.hist( df.query('j2015')['duration'], bins = np.linspace(0,2000,50) ) plt.title('Days worked vs join date'); df.keys() # ## Violin plot # ### Example # + import seaborn as sns sns.set(style="whitegrid", palette="pastel", color_codes=True) # Load the example tips dataset tips = sns.load_dataset("tips") # Draw a nested violinplot and split the violins for easier comparison sns.violinplot(x="day", y="total_bill", hue="smoker", split=True, inner="quart", palette={"Yes": "y", "No": "b"}, data=tips) sns.despine(left=True) # - tips.head() # ### My data # + # Draw a nested violinplot and split the violins for easier comparison plt.figure(figsize = (16, 9)) sns.violinplot(x="join_year", y="duration", hue="still_working", split=True, inner="quart", # palette={"Yes": "y", "No": "b"}, data=df) sns.despine(left=True) # - # ## FacetPlot # ### Their data mydf = df.copy() # + import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)}) # Create the data rs = np.random.RandomState(1979) x = rs.randn(500) g = np.tile(list("ABCDEFGHIJ"), 50) df = pd.DataFrame(dict(x=x, g=g)) m = df.g.map(ord) df["x"] += m # Initialize the FacetGrid object pal = sns.cubehelix_palette(10, rot=-.25, light=.7) g = sns.FacetGrid(df, row="g", hue="g", aspect=15, height=.5, palette=pal) # Draw the densities in a few steps g.map(sns.kdeplot, "x", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2) g.map(sns.kdeplot, "x", clip_on=False, color="w", lw=2, bw=.2) g.map(plt.axhline, y=0, lw=2, clip_on=False) # Define and use a simple function to label the plot in axes coordinates def label(x, color, label): ax = plt.gca() ax.text(0, .2, label, fontweight="bold", color=color, ha="left", va="center", transform=ax.transAxes) g.map(label, "x") # Set the subplots to overlap g.fig.subplots_adjust(hspace=-.25) # Remove axes details that don't play well with overlap g.set_titles("") g.set(yticks=[]) g.despine(bottom=True, left=True) # - df.head() # Restore my df df = mydf.copy() # ### My data df.head() # pal = sns.cubehelix_palette(10, rot=-.25, light=.7) # g = sns.FacetGrid(df, col='duration',row="join_year", hue="salary", aspect=15, height=.5, palette=pal) # g = sns.FacetGrid(df,row="join_year", aspect=15, height=.5, palette=pal); g = sns.FacetGrid(df,row="join_year", hue = "join_year", aspect=10, height=1, palette=pal); # g = sns.FacetGrid(df,row="join_year", aspect=10, height=1, palette=pal); g.map(sns.kdeplot, "duration", shade=True) # g.map(sns.kdeplot, "duration", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2) # g.map(sns.kdeplot, "duration", clip_on=False, color="w", lw=2, bw=.2) # pal = sns.cubehelix_palette(10, rot=-.25, light=.7) # ## FacetPlot # ### Their data plt.figure(figsize = (10, 10)) sns.heatmap(df.corr(), cmap = sns.diverging_palette(240, 10, n = 256)) # # Modeling # # # + from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix # - target = 'still_working_numeric' features = ['salary', 'seniority', 'dept', 'dept_customer_service', 'dept_data_science', 'dept_design', 'dept_engineer', 'dept_marketing', 'dept_sales', 'salary', 'seniority', 'internal_salary_zscore', 'profession_salary_zscore', 'dept_experience_fraction', 'previous_days_worked', 'career_fraction_in_company', 'profession_seniority_zscore', 'dept_experience_disparity', 'step_up_1', 'step_up_2'] df.keys()
David_Stanley_data_challenge1b-logisticregression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/acsnuisa/LinAlg-2021/blob/main/Assignment4_NUISA.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="oIhcJPw4Ifq7" # # Linear Algebra for ECE # ## Laboratory 4 : Matrices # ![image](https://drive.google.com/uc?export=view&id=1xJLUKtpiXA1nezyfhoSLOtdcqgZn4JnX) # # $$ # [1] # $$ # + [markdown] id="5AZfT1ngIfrC" # Now that you have a fundamental knowledge about Python, we'll try to look into greater dimensions. # + [markdown] id="z3yQzzctIfrD" # ### Objectives # At the end of this activity you will be able to: # 1. Be familiar with matrices and their relation to linear equations. # 2. Perform basic matrix operations. # 3. Program and translate matrix equations and operations using Python. # + [markdown] id="Ol9aor3vdvZu" # ### Methods # # ![image](https://drive.google.com/uc?export=view&id=1pjAZ9KYD4b6KjWqH_JJ1GPZPJ6WPs5Gz) # # $$ # Task 1 Flowchart # $$ # + [markdown] id="I-dPkcbpozut" # ![image](https://drive.google.com/uc?export=view&id=15feMktAJLl4DKVMo6Io9V42M_9uYUtBD) # ![image](https://drive.google.com/uc?export=view&id=1plBqZkKflEjDwNcEhOGKOkvai2Ktw5Hn) # + [markdown] id="OoHJrqIuIfrG" # ## Introduction to Matrices # + [markdown] id="rmoXv48oIfrG" # > A matrix is a two-dimensional array of numbers with rows and columns arranged in rows and columns. Matrices are a type of matrix that can be used to organize, store, and manipulate mathematical data. In the actual world, matrices have a wide range of applications and uses. Matrices are useful when working with models that are based on systems of linear equations [1]. # >> For example, matrix *A* has two rows and three columns. # # + [markdown] id="cechyR2X1uy6" # $$ # A=\begin{bmatrix} -2 & 5 & 6\\ 5 & 2 & 7\end{bmatrix} \\ # $$ # # + [markdown] id="6K4gs9ki2jlU" # $$ # B=\begin{bmatrix} -8 & -4 \\ 23 & 12\\ 18 & 10\end{bmatrix}\\ # $$ # + [markdown] id="AnIaVJPq1w7C" # > * In contrast, matrix *B* has three rows and two columns, so it is a 3 x 2 matrix. \ # Keep in mind that: Rows x Columns! # # + [markdown] id="n9jrJRqu9d8V" # ### Matrix Dimensions # # # + [markdown] id="2I3urEt5b5FI" # > Khan Academy provided a general introduction into matrices so we can further understand its concept. The dimensions of a matrix tells its size: the number of rows and columns of the matrix, in that order. Since matrix *A* has two rows and three columns, we write its dimensions as 2 x 3, pronounced "two by three" [2]. # # $$ # A=\begin{bmatrix} -2 & 5 & 6\\ 5 & 2 & 7\end{bmatrix} \\ # $$ # + [markdown] id="t4UuCvxzAEHV" # #### Representing a linear system with matrices # + [markdown] id="SOO6-oklAKvR" # > Matrices can be used to solve systems of equations. However, we must learn how to represent systems with matrices [3]. First and foremost, the equation must be arranged properly in the standard form. Then we take its constants and put it into its matrix form from left to right (including the constant on the right side) # + [markdown] id="vMNHzYdvIfrG" # Let's say for example you have $A$ and $B$ as system of equation. # + [markdown] id="sMQ94Hln3PX2" # $$ # A = \left\{ # \begin{array}\ # -2x + 5y + 6z \\ # 5x + 2y +7z # \end{array}\ # \right. \\ # $$ # + [markdown] id="v_sfUjJk3Yhx" # $$ # B = \left\{ # \begin{array}\ # -8x - 4y\\ # 23x + 12y \\ # 18x + 10y # \end{array}\ # \right. \\ # $$ # + [markdown] id="GLGaMEOG2_xo" # We could see that $A$ is a system of 2 equations with 3 parameters. While $B$ is a system of 3 equations with 2 parameters. We can represent them as matrices by: # + [markdown] id="kTcyG8fm3A2P" # $$ # A=\begin{bmatrix} -2 & 5 & 6\\ 5 & 2 & 7\end{bmatrix} \\ # $$ # + [markdown] id="TtUHV4oI3F4C" # $$ # B=\begin{bmatrix} -8 & -4 \\ 23 & 12\\ 18 & 10\end{bmatrix}\\ # $$ # + [markdown] id="VR0hHUBCckV_" # ### Declaring Matrices # + [markdown] id="CD85IeetCSg-" # > The entities or numbers in matrices are called the elements of a matrix. These elements are arranged and ordered in rows and columns which form the list/array-like structure of matrices. And just like arrays, these elements are indexed according to their position with respect to their rows and columns. This can be reprsented just like the equation below. Whereas A is a matrix consisting of elements denoted by ai,j . Denoted by i is the number of rows in the matrix while j stands for the number of columns. # Do note that the size of a matrix is i×j . # # $$A=\begin{bmatrix} # a_{(0,0)}&a_{(0,1)}&\dots&a_{(0,j-1)}\\ # a_{(1,0)}&a_{(1,1)}&\dots&a_{(1,j-1)}\\ # \vdots&\vdots&\ddots&\vdots&\\ # a_{(i-1,0)}&a_{(i-1,1)}&\dots&a_{(i-1,j-1)} # \end{bmatrix} # $$ # + [markdown] id="qtSnxYlbIfrN" # > There are several ways of classifying matrices. Once could be according to their <b>shape</b> and another is according to their <b>element values</b>. We'll try to go through them. # + [markdown] id="HUOT_9BWIfrg" # ## Activity # + [markdown] id="8GUiKVDeIfrh" # ### Task 1 # + [markdown] id="jj9MWQHdIfrh" # Create a function named `desc_mat()` that througouhly describes a matrix, it should: <br> # 1. Displays the shape, size, and rank of the matrix. <br> # 2. Displays whether the matrix is square or non-square. <br> # 3. Displays whether the matrix is an empty matrix. <br> # 4. Displays if the matrix is an identity, ones, or zeros matrix <br> # # Use sample matrices in which their shapes are not lower than $(3,3)$. # In your methodology, create a flowchart discuss the functions and methods you have done. Present your results in the results section showing the description of each matrix you have declared. # + id="C4kEil7TIfrh" ## Function area # + id="ciG90VBemu6N" import numpy as np import matplotlib.pyplot as plt import scipy.linalg as la # %matplotlib inline # + id="My0fHOH6TCVp" def desc_mat(matrix): if matrix.size > 0: is_square = True if matrix.shape[0] == matrix.shape[1] else False print(f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\nIs Square: {is_square}\n') else: print('Matrix is Null') # + id="WIdSjZKcIfrh" ## Matrix declarations # + id="wIXjQh31TKkl" T= np.array([ [1,2,3], [4,5,5], [8,7,9] ]) E= np.array([ [8,9,7], [6,5,4], [1,2,3] ]) A = np.array([]) S = np.eye(69) U = np.ones((6,9)) R = np.zeros((3,3)) # + id="uwD8YeVbIfrh" ## Test Areas # + colab={"base_uri": "https://localhost:8080/"} id="ouQy0sCQXHpp" outputId="54c7160e-2bf7-46b7-f697-b02354c1e455" desc_mat(T) # + colab={"base_uri": "https://localhost:8080/"} id="_vRSL18zpw91" outputId="247867ea-32ea-4d6a-fd8b-7615c4c18253" desc_mat(E) # + colab={"base_uri": "https://localhost:8080/"} id="3rbWtLBUpypC" outputId="1a2eac74-8861-4b3a-9b42-e10c7b6b1e65" desc_mat(A) # + colab={"base_uri": "https://localhost:8080/"} id="tI7wF_ERp2Dj" outputId="a605ecf8-7bd5-469d-9f87-451c8b844546" desc_mat(S) # + colab={"base_uri": "https://localhost:8080/"} id="X_YKX2AppOEc" outputId="4bfcf21f-3e85-473e-a2ef-8aeb8f9ab518" desc_mat(U) # + colab={"base_uri": "https://localhost:8080/"} id="cqkCnHbvp0Wk" outputId="005f6b3b-6a42-4e70-b67c-9dfad5f34450" desc_mat(R) # + [markdown] id="e6ZGoLgLIfri" # ### Task 2 # + [markdown] id="Z6yZ3_7pIfri" # Create a function named `mat_operations()` that takes in two matrices a input parameters it should:<br> # 1. Determines if the matrices are viable for operation and returns your own error message if they are not viable. # 2. Returns the sum of the matrices. # 3. Returns the difference of the matrices. # 4. Returns the element-wise multiplication of the matrices. # 5. Returns the element-wise division of the matrices. # # Use at least sample matrices in which their shapes are not lower than $(3,3)$. # In your methodology, create a flowchart discuss the functions and methods you have done. Present your results in the results section showing the description of each matrix you have declared. # + id="XRtDBMVNIfri" ## Function area # + id="ShOeTNKeGeJb" import numpy as np import matplotlib.pyplot as plt import scipy.linalg as la # %matplotlib inline # + id="clqzhLn9tNHn" def mat_operations(np,matA,matB): if M1.size > 0 or M2.size > 0: if M1.shape == M2.shape: if np == 'Add': sum = M1 + M2 print(f'\nThe Sum of \n{M1} \n\n+\n\n {M2} \n\n is equal to:\n\n {sum}') if np == 'Subtract': sub = M1 - M2 print(f'\nThe Difference of \n{M1} \n\n-\n\n {M2} \n\n is equal to:\n\n {sub}') if np == 'Multiply': mul = M1 * M2 print(f'\nThe Product of \n{M1} \n\nx\n\n {M2} \n\n is equal to:\n\n {mul}') if np == 'Divide': div = M1 / M2 print(f'\nThe Result of \n{M1} \n\n/\n\n {M2} \n\n is equal to:\n\n {div}') else: if np == '0': print(f'Operation Not Found!') else: print(f'Syntax Error! Invalid Matrices detected.') else: print(f'One or Both of the Matrices are NULL') # + id="X5UT3MtGIfri" ## Matrix declarations # + id="g-C91tbQaBN0" M1= np.array([ [1,2,3], [4,5,5], [8,7,9] ]) M2= np.array([ [8,9,7], [6,5,4], [1,2,3] ]) # + id="Sms7j22UIfri" ## Test Areas # + colab={"base_uri": "https://localhost:8080/"} id="5YQrkezObaQI" outputId="98109f4a-39a5-477c-c352-645c3efe9080" mat_operations('Add',M1,M2) # + colab={"base_uri": "https://localhost:8080/"} id="-213AmwkbcB-" outputId="ef5b76ed-3365-4de5-e284-1063fd7dd0a6" mat_operations('Subtract',M1,M2) # + colab={"base_uri": "https://localhost:8080/"} id="KvhSCFlkbdue" outputId="2265d9e9-2de8-4b4f-a4d3-14d8ae43f7cb" mat_operations('Multiply',M1,M2) # + colab={"base_uri": "https://localhost:8080/"} id="stxyeuL3beps" outputId="10ad7c5d-36ef-4452-a153-7731cd5ac866" mat_operations('Divide',M1,M2) # + [markdown] id="V4LRAK6vIfri" # ## Conclusion # + [markdown] id="qI0-MOPMIfrj" # > When high-speed computers with hierarchical memory (based on multiple levels # of caches) are utilized to tackle huge computational workloads, efficient matrix computation organization becomes increasingly crucial. Matrix computations are used in the treatment of nearly all large-scale models [4].These jumble of numbers in the matrix format stems from systems of equations. Matrices are commonly seen in mathematics, but we've never actually seen how it is applied. Since matrices are a type of matrix that can be used to organize, store, and manipulate mathematical data. Hence, we can utilize this depending on its real world applications. Real world applications of matrices include encryption, games, economics and business, Physics, and Geology. In encryption, Matrices are used to scramble data for security reasons. They are used to encode and decode data. There is a key that aids in the encoding and decoding of data generated by matrices. Furthermore, games utilize matrices to modify the thing in three-dimensional space. They convert it from a three-dimensional matrix to a two-dimensional matrix as needed. In economics and business, it is used to research a company's trends, shares, and other factors. To develop business models, for example. In Physics there are different applications for Matrices. Electrical circuits, Quantum physics, and Optics are all studied using matrices. It aids in the estimation of battery power outputs and the conversion of electrical energy into another useable form through resistors. As a result, matrices play a significant part in calculations. Especially when applying Kirchoff's voltage and current rules to solve difficulties. It aids in the study and application of quantum physics. Lastly, in Geology it is generally used for seismic surveys [5]. These are few of the applications of Matrices, and it can solve various problems on different fields. Matrices solve problems in technology through it's first application which is encryption. In which it is used to scramble data for security purposes. It is also a way of providing quick approximations of more complicated calculations. # + [markdown] id="SLhsLo4030BY" # ## References # [1]<NAME> and <NAME>(Sep 5, 2021)"Math LibreTexts", Introduction to Matrices. Available: https://math.libretexts.org/Bookshelves/Applied_Mathematics/Applied_Finite_Mathematics_(Sekhon_and_Bloom)/02%3A_Matrices/2.01%3A_Introduction_to_Matrices. [Accessed: Sept. 22, 2021]. # # [2]"Khan Academy",Intro to matrices(n.d). Avaiable: https://www.khanacademy.org/math/precalculus/x9e81a4f98389efdf:matrices/x9e81a4f98389efdf:mat-intro/a/intro-to-matrices. [Accessed: Sept. 22, 2021]. # # [3]“Representing linear systems with matrices,”(n.d). Available: https://www.khanacademy.org/math/algebra-home/alg-matrices/alg-representing-systems-with-matrices/a/representing-systems-with-matrices. Available: http://www.vmsk.org/Layman.pdf. [Accessed: Sept. 22, 2021]. # # [4]<NAME>(2006)"Studies in Computational Mathematics",Applicability to other models. Avilable: https://www.sciencedirect.com/bookseries/studies-in-computational-mathematics. [Accessed: Sept. 23, 2021]. # # [5]<NAME>(May 20, 2021)"Embibe", Where Are Matrices Used In Daily Life? Read Here To Know. Available: https://www.embibe.com/exams/where-are-matrices-used-in-daily-life/. [Accessed: Sept. 23,2021].
Assignment4_NUISA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem 003 # # Largest prime factor # # The prime factors of 13195 are 5, 7, 13 and 29. # # What is the largest prime factor of the number 600851475143 ? # # # + from scripts import myfunc import time start_time = time.time() x = 600851475143 y = myfunc.prime_factor(x) print(y) print(time.time() - start_time) # -
notebooks/problem_solved/problem_003_Largest_prime_factor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Imports from copy import deepcopy from dask import delayed import pandas as pd import dask.dataframe as dd from distributed import Client, LocalCluster from dask.dataframe.core import aca import scipy.stats as ss import numpy as np from collections import Counter from functools import partial # ## Resources # data_path = '../../../data/flights_data/trip_logs.parquet' # data_path = '/Users/nathanieldake/development/unsupervised/DSResearchSpikes/010_Column_Correlation/eda_tools/test_table_class.parquet' data_path = '/Users/nathanieldake/development/unsupervised/data/cw_data/Item_Level_Details_Original.parquet' cluster = LocalCluster(n_workers=6) client = Client(cluster) client REPLACE = 'replace' DROP_SAMPLES = 'drop_samples' DROP_FEATURES = 'drop_features' SKIP = 'skip' DEFAULT_REPLACE_NUMERIC = 0.0 DEFAULT_REPLACE_NOMINAL = 'MISSING' # ## Correlation Func Primitives # + def remove_na_rows(x, y): df = pd.DataFrame({'x': x, 'y': y}) df = df.dropna().reset_index(drop=True) return df['x'], df['y'] def nan_strategy(func): def inner(x, y, **kwargs): if kwargs.get('nan_strategy', 'skip') == DROP_SAMPLES: x, y = remove_na_rows(x, y) return func(x, y) return inner # - def identify_nominal_columns(df, include=['object', 'category']): """Given a dataset, identify categorical columns. Parameters: ----------- dataset : a pandas dataframe include : which column types to filter by; default: ['object', 'category']) Returns: -------- categorical_columns : a list of categorical columns Example: -------- >> df = pd.DataFrame({'col1': ['a', 'b', 'c', 'a'], 'col2': [3, 4, 2, 1]}) >> identify_nominal_columns(df) ['col1'] """ columns = list(df.select_dtypes(include=include).columns) return columns # + def conditional_entropy(x, y): """Calculates the conditional entropy of x given y: S(x|y) Wikipedia: https://en.wikipedia.org/wiki/Conditional_entropy Parameters ---------- x : array-like A sequence of measurements. y : array-like A sequence of measurements. Returns ------- float The total entropy of x given y Examples -------- >>> np.random.seed(1) >>> x = np.random.randint(0,2, size=10) >>> y = np.random.randint(0,2, size=10) >>> conditional_entropy(x,y) 0.606842558824411 """ y_counter = Counter(y) xy_counter = Counter(list(zip(x, y))) total_occurrences = sum(y_counter.values()) p_xy = np.array([val for val in xy_counter.values()])/total_occurrences p_y = np.array([y_counter[xy[1]] for xy in xy_counter.keys()])/total_occurrences entropy = np.sum((p_xy * np.log(p_y/p_xy))) return entropy @nan_strategy def cramers_v(x, y): """Calculates Cramer's V statistic for categorical-categorical association. Uses correction from Bergsma and Wicher, Journal of the Korean Statistical Society 42 (2013): 323-328. This is a symmetric coefficient: V(x,y) = V(y,x) Original function taken from: https://stackoverflow.com/a/46498792/5863503 Wikipedia: https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V Parameters ---------- x : array-like A sequence of categorical measurements. y : array-like A sequence of categorical measurements. Returns ------- float Coefficient in the range [0, 1]. Examples -------- >>> np.random.seed(1) >>> x = np.random.randint(0, 2, size=100) >>> y = x >>> cramers_v(x, y) 0.9795896894087645 """ confusion_matrix = pd.crosstab(x, y) chi2 = ss.chi2_contingency(confusion_matrix)[0] n = confusion_matrix.sum().sum() phi2 = chi2/n r, k = confusion_matrix.shape phi2corr = max(0, phi2-((k-1)*(r-1))/(n-1)) rcorr = r-((r-1)**2)/(n-1) kcorr = k-((k-1)**2)/(n-1) return np.sqrt(phi2corr/min((kcorr-1), (rcorr-1))) @nan_strategy def theils_u(x, y): """Calculates Theil's U statistic (Uncertainty coefficient) for categorical-categorical association. This is the uncertainty of x given y: value is on the range of [0,1] - where 0 means y provides no information about x, and 1 means y provides full information about x. Given the value of x, how many possible states does y have, and how often do they occur. This is an asymmetric coefficient: U(x,y) != U(y,x) Wikipedia: https://en.wikipedia.org/wiki/Uncertainty_coefficient Parameters ---------- x : array-like A sequence of categorical measurements. y : array-like A sequence of categorical measurements. Returns ------- float Coefficient in the range [0, 1]. Examples -------- >>> np.random.seed(1) >>> x = np.random.randint(0, 2, size=100) >>> y = x >>> theils_u(x, y) 1.0 """ s_xy = conditional_entropy(x, y) x_counter = Counter(x) total_occurrences = sum(x_counter.values()) p_x = list(map(lambda n: n/total_occurrences, x_counter.values())) s_x = ss.entropy(p_x) if s_x == 0: return 1 else: return (s_x - s_xy) / s_x @nan_strategy def correlation_ratio(categories, measurements): """Calculates the Correlation Ratio (sometimes marked by the greek letter Eta) for categorical-continuous association. Answers the question - given a continuous value of a measurement, is it possible to know which category is it associated with? Value is in the range [0,1], where 0 means a category cannot be determined by a continuous measurement, and 1 means a category can be determined with absolute certainty. Wikipedia: https://en.wikipedia.org/wiki/Correlation_ratio Parameters ---------- categories : array-like A sequence of categorical measurements. measurements : array-like A sequence of continuous measurements. Returns ------- float Coefficient in the range [0, 1]. Examples -------- >>> np.random.seed(1) >>> categories = np.random.randint(0,2, size=100) >>> measurements = np.random.rand(100) >>> correlation_ratio(categories, measurements) 0.042988734885557815 """ fcat, _ = pd.factorize(categories) cat_num = np.max(fcat)+1 y_avg_array = np.zeros(cat_num) n_array = np.zeros(cat_num) for i in range(0, cat_num): cat_measures = measurements.iloc[np.argwhere(fcat == i).flatten()] n_array[i] = len(cat_measures) y_avg_array[i] = np.average(cat_measures) y_total_avg = np.sum(np.multiply(y_avg_array, n_array))/np.sum(n_array) numerator = np.sum(np.multiply(n_array, np.power(np.subtract(y_avg_array, y_total_avg), 2))) denominator = np.sum(np.power(np.subtract(measurements, y_total_avg), 2)) if numerator == 0: eta = 0.0 else: eta = numerator/denominator return eta # - # ## Make a symmetrical Theils U with nested Delayed def theils_u_symmetrical(x, y, **kwargs): val_1 = delayed(theils_u)(x, y, **kwargs) val_2 = delayed(theils_u)(y, x, **kwargs) return delayed(np.mean)([val_1, val_2]).compute() # ## Apply-Concat-Apply function for Dask Distributed def dask_correlation_aca(corr_func, *args, **kwargs): my_kwargs = deepcopy(kwargs) chunk_kwargs = {'nan_strategy': my_kwargs.pop('nan_strategy', 'skip')} my_kwargs.update({'meta': float}) return aca(args, chunk=corr_func, aggregate=np.mean, chunk_kwargs=chunk_kwargs, **my_kwargs) # + def dask_pairwise_pearson(df, x_col, y_col, **kwargs): """ Parameters ---------- df : dask.dataframe The x_col : str Name of a numeric column. y_col : str Name of a numeric column. Returns ------- float Coefficient in the range [-1, 1]. """ val = df[[x_col, y_col]].corr().values.min() return val def dask_pairwise_corr_func(corr_func, df, x_col, y_col, **kwargs): return dask_correlation_aca(corr_func, df[x_col], df[y_col], **kwargs) # - # ## Correlation Getter class DaskCorrelationGetter(): def __init__(self, use_theils_u=False): """Wraps correlation methods for nominal and numeric series. Parameters ---------- use_theils_u : bool, default=False Whether or not to use a symmetric Theil's U for nominal-only columns """ self.use_theils_u = use_theils_u self.corr_funcs = self._initialize_corr_methods() def _initialize_corr_methods(self): numeric = dask_pairwise_pearson catnum = partial(dask_pairwise_corr_func, correlation_ratio) if self.use_theils_u: categorical = partial(dask_pairwise_corr_func, theils_u_symmetrical) else: categorical = partial(dask_pairwise_corr_func, cramers_v) return { 'numericnumeric': numeric, 'nominalnumeric': catnum, 'nominalnominal': categorical, 'numericnominal': catnum } def get_corr_value(self, df, x_col, y_col, x_type, y_type, **kwargs): key = x_type+y_type if key == 'numericnominal': x_col, y_col = y_col, x_col return self.corr_funcs[key](df, x_col, y_col, **kwargs) # ## Base Associations function def associations_dask(dataset, nominal_columns='auto', mark_columns=False, theils_u=True, nan_strategy=REPLACE, nan_replace_numeric=DEFAULT_REPLACE_NUMERIC, nan_replace_nominal=DEFAULT_REPLACE_NOMINAL): """ Calculate the correlation/strength-of-association of features in data-set with both categorical (eda_tools) and continuous features using: * Pearson's R for continuous-continuous cases * Correlation Ratio for categorical-continuous cases * Cramer's V or Theil's U for categorical-categorical cases **Returns:** a DataFrame of the correlation/strength-of-association between all features **Example:** see `associations_example` under `dython.examples` Parameters ---------- dataset : dask.dataframe.DataFrame The data-set for which the features' correlation is computed nominal_columns : string / list / NumPy ndarray Names of columns of the data-set which hold categorical values. Can also be the string 'all' to state that all columns are categorical, 'auto' (default) to try to identify nominal columns, or None to state none are categorical mark_columns : Boolean, default = False if True, output's columns' names will have a suffix of '(nom)' or '(con)' based on there type (eda_tools or continuous), as provided by nominal_columns theils_u : Boolean, default = False In the case of categorical-categorical feaures, use a symmetrical Theil's U instead of Cramer's V. Computation cost is 2x Theils U but will perform better than Cramer's V for higher cardinality. nan_strategy : string, default = 'replace' How to handle missing values: can be either 'drop_samples' to remove samples with missing values, 'drop_features' to remove features (columns) with missing values, or 'replace' to replace all missing values with the nan_replace_value. Missing values are None and np.nan. nan_replace_numeric : numeric, default = 0.0 The value used to replace missing values with. Only applicable when nan_strategy is set to 'replace' nan_replace_nominal: str, default = "MISSING" The value used to replace missing values with. Only applicable when nan_strategy is set to 'replace' """ print('WARNING: High Cardinality Nominal types (e.g. identifiers) will increase run-time non-linearly') dataset = dataset.select_dtypes(exclude=['datetime']) columns = dataset.columns if nominal_columns is None: nominal_columns = list() elif nominal_columns == 'all': nominal_columns = columns elif nominal_columns == 'auto': nominal_columns = identify_nominal_columns(dataset) numeric_columns = list(set(columns) - set(nominal_columns)) col_types = dict([(col,'nominal') if col in nominal_columns else (col,'numeric') for col in columns]) if nan_strategy == DROP_FEATURES: dataset.dropna(axis=1, inplace=True) elif nan_strategy == REPLACE: dataset[nominal_columns] = dataset[nominal_columns].fillna(nan_replace_nominal) dataset[numeric_columns] = dataset[numeric_columns].fillna(nan_replace_numeric) corrgttr = DaskCorrelationGetter(use_theils_u=theils_u) corr_dict = {} for i in range(0, len(columns)): col_i = columns[i] col_i_type = col_types[col_i] for j in range(i, len(columns)): col_j = columns[j] col_j_type = col_types[col_j] key = str(i)+'.'+str(j) if i == j: corr_dict[key] = 1.0 else: val = corrgttr.get_corr_value(df, col_i, col_j, col_i_type, col_j_type) corr_dict[key] = val corr_dict = delayed(corr_dict).compute() corr = pd.DataFrame(index=columns, columns=columns) for key, val in corr_dict.items(): col_i = columns[int(key.split('.')[0])] col_j = columns[int(key.split('.')[-1])] corr.loc[col_i, col_j] = val corr.loc[col_j, col_i] = val corr.fillna(value=np.nan, inplace=True) if mark_columns: marked_columns = [ '{} (nom)'.format(col) if col in nominal_columns else '{} (con)'.format(col) for col in columns ] corr.columns = marked_columns corr.index = marked_columns return corr # ## Load Test Data df = dd.read_parquet(data_path, engine='pyarrow') df = df.repartition(npartitions=6) df = df.persist() #drop identifiers df = df.drop(columns=[col for col in df.columns if '_id' in col]) # Make Nominal type categoricals = ['canceled', 'diverted'] for col in categoricals: df[col] = df[col].astype(str) # %%time corr_df = associations_dask(df, theils_u=True) corr_df.head()
notebooks/computer_science/dask-overview/dask_associations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from symfit import variables, Parameter, ODEModel, D, Fit, parameters import numpy as np import matplotlib.pyplot as plt # + tdata = np.array([10, 26, 44, 70, 120]) adata = 10e-4 * np.array([44, 34, 27, 20, 14]) a, b, t = variables('a, b, t') k = Parameter('k', 0.1) a0 = 54 * 10e-4 model_dict = { D(a, t): - k * a**2, D(b, t): k * a**2, } ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0}) fit = Fit(ode_model, t=tdata, a=adata, b=None) fit_result = fit.execute() tvec = np.linspace(0, 500, 1000) A, B = ode_model(t=tvec, **fit_result.params) plt.plot(tvec, A, label='[A]') plt.plot(tvec, B, label='[B]') plt.scatter(tdata, adata) plt.legend() plt.show() # - # + AA, B, AAB, BAAB, t = variables('AA, B, AAB, BAAB, t') k, p, l, m = parameters('k, p, l, m') AA_0 = 10 # Some made up initial amound of [AA] B = AA_0 - BAAB + AA # [B] is not independent. model_dict = { D(BAAB, t): l * AAB * B - m * BAAB, D(AAB, t): k * A * B - p * AAB - l * AAB * B + m * BAAB, D(A, t): - k * A * B + p * AAB, } model = ODEModel(model_dict, initial={t: 0.0, AA: AA_0, AAB: 0.0, BAAB: 0.0}) # Generate some data tdata = np.linspace(0, 3, 1000) # Eval the normal way. AA, AAB, BAAB = model(t=tdata, k=0.1, l=0.2, m=0.3, p=0.3) plt.plot(tdata, AA, color='red', label='[AA]') plt.plot(tdata, AAB, color='blue', label='[AAB]') plt.plot(tdata, BAAB, color='green', label='[BAAB]') plt.plot(tdata, B(BAAB=BAAB, AA=AA), color='pink', label='[B]') # plt.plot(tdata, AA + AAB + BAAB, color='black', label='total') plt.legend() plt.show() # -
Simulations/kinetics/.ipynb_checkpoints/kinetics_symfit-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import numpy.matlib import math import random import os import sys import time import tensorflow as tf import pickle import numpy.matlib import matplotlib.pyplot as plt import matplotlib.cm as cm from utils import * # %matplotlib inline # declare parameters, same as the inference declaration alphabet = ' <KEY>' rnn_size = 400 tsteps = 150 batch_size = 32 placeholder_shape = [None, tsteps, 3] kmixtures = 1 nmixtures = 8 v_len = len(alphabet) + 1 #plus one for <UNK> token tsteps_per_ascii =25 text_length = tsteps // tsteps_per_ascii save_path = './saved/model.ckpt' data_dir = './data' eos_prob = 0.4 # threshold probability for ending a stroke train = True data_scale = 50 grad_clip = 10.0 dropout = 0.85 optimizer = "rmsprop" num_layers = 3 # logger = Logger(log_dir, train) dataloader = DataLoader(data_dir, alphabet, batch_size, tsteps, data_scale, tsteps_per_ascii) # + # initial weight vector proposed in Alex_Graves Paper LSTM_initializer = tf.truncated_normal_initializer(mean=0., stddev=.075, seed=None, dtype=tf.float32) window_b_initializer = tf.truncated_normal_initializer(mean=-3.0, stddev=.25, seed=None, dtype=tf.float32) cell = [None] * num_layers for i in range(num_layers) : cell[i] = tf.contrib.rnn.LSTMCell(rnn_size, state_is_tuple=True, initializer=LSTM_initializer) # + input_data = tf.placeholder(dtype=tf.float32, shape=placeholder_shape) output_data = tf.placeholder(dtype=tf.float32, shape=placeholder_shape) istate_cell = [None] *num_layers outs_cell = [None] * num_layers fstate_cell = [None] * num_layers for i in range(num_layers) : istate_cell[i] = cell[i].zero_state(batch_size=batch_size, dtype=tf.float32) inputs = [tf.squeeze(i, [1]) for i in tf.split(input_data, tsteps, 1)] outs_cell[0], fstate_cell[0] = tf.contrib.legacy_seq2seq.rnn_decoder(inputs, istate_cell[0], cell[0], loop_function=None, scope='cell0') # + #attention mechanism def get_phi(length, a, b, k): u = np.linspace(0, length-1 , length) e = tf.multiply(b, - tf.square(tf.subtract(k,u))) phi = tf.multiply(a, tf.exp(e)) return tf.reduce_sum(phi, 1, keep_dims=True) # get the soft window def get_window(coef): [a, b, k, c] = coef length = c.get_shape()[1].value #number of items in sequence phi = get_phi(length, a, b, k) window = tf.squeeze(tf.matmul(phi,c), [1]) return window, phi # soft window parameters def get_coef(i, out_cell, kmixtures, prev_k, char_seq, reuse=True): hidden = out_cell.get_shape()[1] n_out = 3*kmixtures with tf.variable_scope('window',reuse=reuse): window_w = tf.get_variable("window_w", [hidden, n_out], initializer=LSTM_initializer) window_b = tf.get_variable("window_b", [n_out], initializer=window_b_initializer) co = tf.nn.xw_plus_b(out_cell, window_w, window_b) abk = tf.exp(tf.reshape(co, [-1, 3*kmixtures,1])) a, b, k = tf.split(abk, 3, 1) k = k + prev_k return a, b, k, char_seq # + #initial parameters init_kappa = tf.placeholder(dtype=tf.float32, shape=[None, kmixtures, 1]) char_seq = tf.placeholder(dtype=tf.float32, shape=[None, text_length, v_len]) wavg_prev_kappa = init_kappa prev_window = char_seq[:,0,:] #add soft window to the top of the first LSTM layer reuse = False for i in range(len(outs_cell[0])): coef = get_coef(i, outs_cell[0][i], kmixtures, wavg_prev_kappa, char_seq, reuse=reuse) (_, _, next_kappa, _) = coef window, phi = get_window(coef) #combine first layer output, soft-window, and original input text outs_cell[0][i] = tf.concat((outs_cell[0][i], window, inputs[i]), 1) wavg_prev_kappa = tf.reduce_mean(next_kappa, reduction_indices=1, keep_dims=True) # mean along kmixtures dimension reuse = True (alpha, beta, next_kappa, _) = coef # + # ----- finish building second recurrent cell for i in range(1, num_layers): scope = 'cell' + str(i) outs_cell[i], fstate_cell[i]= tf.contrib.legacy_seq2seq.rnn_decoder(outs_cell[i-1], istate_cell[i], cell[i], \ loop_function=None, scope=scope) #use scope from training r_out = tf.reshape(tf.concat(outs_cell[num_layers - 1], 1), [-1, rnn_size]) #concat outputs for efficiency # + #put a dense cap on top of the rnn cells (to interface with the mixture density network) n_out = 1 + nmixtures * 6 # params = end_of_stroke + 6 parameters per Gaussian with tf.variable_scope('mdn_dense'): output_w = tf.get_variable("output_w", [rnn_size, n_out], initializer=LSTM_initializer) output_b = tf.get_variable("output_b", [n_out], initializer=LSTM_initializer) output = tf.nn.xw_plus_b(r_out, output_w, output_b) #data flows through dense nn # - # MDN above the last LSTM layer def gaussian2d(x1, x2, m1, m2, s1, s2, r): # define gaussian mdn (eq 24, 25 from http://arxiv.org/abs/1308.0850) (sub1, sub2) = (tf.subtract(x1, m1), tf.subtract(x2, m2)) sum_1 = tf.square(tf.div(sub1, s1)) + tf.square(tf.div(sub2, s2)) Z = sum_1 - 2*tf.div(tf.multiply(rho, tf.multiply(sub1, sub2)), tf.multiply(s1, s2)) reg = 2*np.pi*tf.multiply(tf.multiply(s1, s2), tf.sqrt(1 - tf.square(r))) gaussian = tf.div(tf.exp(tf.div(-Z,2* (1 - tf.square(r)))), reg) return gaussian #coefficient for MDN layer, detail function can be found in related papers def get_mdn_coef(Z): global pi_hat, m1_hat, m2_hat, s1_hat, s2_hat, r_hat pi_hat, m1_hat, m2_hat, s1_hat, s2_hat, r_hat = tf.split(Z[:, 1:], 6, 1) eos = tf.sigmoid(-1*Z[:, 0:1]) pi = tf.nn.softmax(pi_hat) # softmax m1 = m1_hat; m2 = m2_hat # leave mu1, mu2 as they are s1 = tf.exp(s1_hat); s2 = tf.exp(s2_hat) # exp for sigmas r = tf.tanh(r_hat) # tanh for rho (squish between -1 and 1) return [[eos, pi, m1_hat, m2_hat, tf.exp(s1_hat), tf.exp(s2_hat), tf.tanh(r_hat)], [pi_hat, m1_hat, m2_hat, s1_hat, s2_hat, r_hat]] # loss function from the paper def get_loss(pi, x1_data, x2_data, eos_data, mu1, mu2, sigma1, sigma2, rho, eos): gaussian = gaussian2d(x1_data, x2_data, mu1, mu2, sigma1, sigma2, rho) term1 = tf.reduce_sum(tf.multiply(gaussian, pi), 1, keep_dims=True) term1 = -tf.log(tf.maximum(term1, 1e-20)) term2 = -tf.log(tf.multiply(eos, eos_data) + tf.multiply(1-eos, 1-eos_data)) return tf.reduce_sum(term1 + term2) flat_target_data = tf.reshape(output_data,[-1, 3]) [x1_data, x2_data, eos_data] = tf.split(flat_target_data, 3, 1) retval = get_mdn_coef(output) [eos, pi, mu1, mu2, sigma1, sigma2, rho] = retval[0] pi_hat, mu1_hat, mu2_hat, sigma1_hat, sigma2_hat, rho_hat = retval[1] # + loss = get_loss(pi, x1_data, x2_data, eos_data, mu1, mu2, sigma1, sigma2, rho, eos) cost = loss / (batch_size * tsteps) # initial variables for training m_learning_rate = tf.Variable(0.0, trainable=False) m_decay = tf.Variable(0.0, trainable=False) m_momentum = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip) if optimizer == 'adam': m_optimizer = tf.train.AdamOptimizer(learning_rate=m_learning_rate) elif optimizer == 'rmsprop': m_optimizer = tf.train.RMSPropOptimizer(learning_rate=m_learning_rate, decay=m_decay, momentum=m_momentum) else: raise ValueError("Optimizer type not recognized") train_op = m_optimizer.apply_gradients(zip(grads, tvars)) #load data from files input, output, _ , seq = dataloader.validation_data() valid_inputs = {input_data: input, output_data: output, char_seq: seq} #initialize training sess = tf.InteractiveSession() saver = tf.train.Saver(tf.global_variables()) # + print("start training...") #misc parameters for training momentum = 0.9 decay = 0.95 remember_rate = 0.99 nepochs = 100 learning_rate = 1e-4 lr_decay = 1.0 nbatches = 500 save = 500 total_step = nepochs * nbatches #initialize the network sess.run(tf.global_variables_initializer()) sess.run(tf.assign(m_decay, decay )) sess.run(tf.assign(m_momentum, momentum )) for e in range(0, nepochs): sess.run(tf.assign(m_learning_rate, learning_rate * (lr_decay ** e))) c = [None] * num_layers h = [None] * num_layers for counter in range(num_layers): c[counter] = istate_cell[counter].c.eval() h[counter] = istate_cell[counter].h.eval() kappa = np.zeros((batch_size, kmixtures, 1)) for b in range(nbatches): # current step i = e * nbatches + b #save model for every given point if i % save == 0 and (i != 0): saver.save(sess, save_path, global_step = i) print("model saved at" + str(i)) #load next batch for training x, y, s, ch = dataloader.next_batch() #feed the training set into network feed = {input_data: x, output_data: y, char_seq: ch, init_kappa: kappa} for j in range(num_layers): feed[istate_cell[j].c] = c[j] feed[istate_cell[j].h] = h[j] #run the network sess.run([cost, train_op], feed) feed.update(valid_inputs) feed[init_kappa] = np.zeros((batch_size, kmixtures, 1)) sess.run([cost], feed) #print out the process if i % 10 == 0: print(str(i) + '/' + str(total_step))
final project/train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp utils # - # # This submodule will contain all the utility methods which are used across all submodules # > plotting methods # # > general purpuse utility #hide from nbdev.showdoc import * #export from matplotlib import pyplot as plt #export def plot_x_y(x,y): """ this method will plt x and y """ plt.plot(x,y) plot_x_y([1,2,3],[2,4,6])
00_utils.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Learning With Errors (LWE) # ## import libs # + # #!conda install pycrypto # + import numpy as np import pandas as pd import scipy.stats from scipy import signal import math import itertools import random from Crypto.Util import number import matplotlib.pyplot as plt from matplotlib import collections as matcoll # - # ## Set vector s (secret) # Choose $s \in \mathbb{Z}^m_p$ with arbitrary $m \in \mathbb{N}$ #s = np.array([2, 3]) #s = np.array([10, 13, 9]) s = np.array([10, 13, 9, 11]) #s = np.array([10, 13, 9, 11, 3]) n = len(s) # ## parameters # + # modulus p = 17 # only prime numbers (it has to be a finite field) #p = number.getPrime(n) # using pycrypto lib (p = O(n^2)) print("Prime:", p) #size parameter m = 100 #m = min(2**(3*n), 200) print('Count of equations:', m) # distribution ## gaussian in this example according to the paper https://cims.nyu.edu/~regev/papers/lwesurvey.pdf (09.11.2021, page 2) mu = 0 #alpha = 1/(math.sqrt(n)*math.log2(n)**2) # α = 1/(√n log^2(n)) alpha = 0.1 print(alpha) sigma = alpha * p print("Mu:", mu, "\t Sigma:", sigma) ## plot the dense function of the distribution x = np.arange(-10, 10, .001) y = scipy.stats.norm.pdf(x, loc=mu, scale=sigma) fig, ax = plt.subplots(figsize=(16, 5)) ax.fill_between(x, y) ax.set_xticks(range(round(min(x)), round(max(x))+1)) fig.savefig('gaussian.png') plt.show() # + x = np.arange(-10.5, 11.5, 1) y = scipy.stats.norm.cdf(x, loc=mu, scale=sigma) z = y[1:] - y[:-1] fig, ax = plt.subplots(figsize=(16, 5)) ax.stem(x[:-1]+.5, z, basefmt=" ", use_line_collection=True) ax.set_xticks(range(round(min(x)), round(max(x))+1)) #fig.savefig('roundedGaussian.png') plt.show() # - # ## Convolve gaussians # + delta = 1e-4 big_grid = np.arange(-10,10,delta) gaussian = scipy.stats.norm.pdf(big_grid, loc=mu, scale=sigma) print("Sum of normal pmf: "+str(sum(gaussian*delta))) conv_pdf = signal.fftconvolve(gaussian,gaussian,'same')*delta for i in range(1, n): conv_pdf = signal.fftconvolve(conv_pdf,gaussian,'same')*delta print("Sum of convoluted pmf: "+str(sum(conv_pdf*delta))) #print("Integration of convoluted pdf: " + str(np.trapz(conv_pdf, big_grid))) plt.plot(big_grid,gaussian, label='gaussian') plt.plot(big_grid,conv_pdf, label='convolve n times gaussian') plt.legend(loc='best'), plt.suptitle('PDFs') plt.show() # + x = big_grid y = conv_pdf fig, ax = plt.subplots(figsize=(16, 5)) ax.fill_between(x, y) ax.set_xticks(range(round(min(x)), round(max(x))+1)) #fig.savefig('gaussian.png') plt.show() # - # ## Construct the LWE problem # #### Construct A, b and e # + np.random.seed(42) # set seed np.random.randint(0, p) # uniform distribution (p excluded) np.random.normal(loc=mu, scale=sigma, size=n) # gaussian distribution A = np.random.randint(0, p, size=(m, n)) e = np.rint(np.random.normal(loc=mu, scale=sigma, size=m)) # rounding specified by the IEEE floating point standard IEEE 754 b = (np.matmul(A, s) + e)%p # with error #b = (np.matmul(A, s))%p # without error # + fig, ax = plt.subplots(nrows=2, figsize=(16, 5)) unique, counts = np.unique(e, return_counts=True) ax[0].stem(unique, counts, basefmt=" ", use_line_collection=True) ax[0].set_xticks(range(round(min(unique)), round(max(unique))+1)) unique, counts = np.unique(e%p, return_counts=True) ax[1].stem(unique, counts, basefmt=" ", use_line_collection=True) ax[1].set_xticks(range(round(min(unique)), round(max(unique))+1)) plt.show() # - # ## Solving LWE # ### Gaussian Elimination On Subset # + # Iterative Algorithm (xgcd) def iterative_egcd(a, b): x,y, u,v = 0,1, 1,0 while a != 0: q,r = b//a,b%a; m,n = x-u*q,y-v*q # use x//y for floor "floor division" b,a, x,y, u,v = a,r, u,v, m,n return b, x, y def modinv(a, m): g, x, y = iterative_egcd(a, m) if g != 1: return None else: return x % m def solve_linear_congruence(a, b, m): """ Describe all solutions to ax = b (mod m), or raise ValueError. """ g = math.gcd(a, m) if b % g: raise ValueError("No solutions") a, b, m = a//g, b//g, m//g return modinv(a, m) * b % m, m def print_solutions(a, b, m): print(f"Solving the congruence: {a}x = {b} (mod {m})") x, mx = solve_linear_congruence(a, b, m) print(f"Particular solution: x = {x}") print(f"General solution: x = {x} (mod {mx})") # for debug print_solutions(272, 256, 1009) # + def gaussianEliminationForward(A, b, modulus): (m, n) = A.shape A = np.copy(A[:n][:]) b = np.copy(b[:n]) for j in range(n): # quadratic matrix i = j while(i<n-1): rowUpper = A[i, :] rowUpperLeader = rowUpper[j] leftUpper = b[i] rowLower = A[i+1, :] rowLowerLeader = rowLower[j] leftLower = b[i+1] if rowLowerLeader==0: pass elif rowUpperLeader==0 and rowLowerLeader!=0: # swap rows A[[i, i+1]] = A[[i+1, i]] b[[i, i+1]] = b[[i+1, i]] i=j-1 # redo column elif rowUpperLeader!=0 and rowLowerLeader!=0: lcm = np.lcm(rowUpperLeader, rowLowerLeader) rowLowerNew = (lcm/rowLowerLeader)*rowLower - (lcm/rowUpperLeader)*rowUpper leftLowerNew = (lcm/rowLowerLeader)*leftLower - (lcm/rowUpperLeader)*leftUpper A[i+1, :] = rowLowerNew%modulus b[i+1] = leftLowerNew%modulus i+=1 return A, b def gaussianEliminationBackward(A, b, modulus): (m, n) = A.shape x = np.zeros(m) for i in range(n-1, -1, -1): equLeft = A[i, :] equLeftCoef = equLeft[i] equRight = b[i] equRightCoef = equRight - np.dot(x, equLeft) solution, mx = solve_linear_congruence(equLeftCoef, equRightCoef, modulus) x[i] = solution return x # for debug print(A[:n]) A_new, b_new = gaussianEliminationForward(A, b, p) print(A_new) print() print(b[:n].astype(int)) print(b_new.astype(int)) print() #print(scipy.linalg.solve(A[:m], b[:m])) #print(scipy.linalg.solve(A_new, b_new)) # - try: A_new, b_new = gaussianEliminationForward(A, b, p) x = gaussianEliminationBackward(A_new%p, b_new%p, p) print("Guess:", x.astype(int), "\t", "Right Solution:", s) except ValueError: # occurs by linear dependency in the matrix subsetA print("linear dependency") # ### Gaussian Elimination On A Sample OF Subsets random.seed(42) #set seed (m, n) = A.shape combinations = list(itertools.combinations(range(min([200, m])), n)) # set max of 50 to make sure the program do not crash print('Maximal SampleCount:', len(combinations)) # ## Create subsets in chunks subsetsGuess = list() # + for i in range(120): sampleCount = 30000 loop = i subsets = combinations[loop*sampleCount: (loop+1)*sampleCount] sampleCount = min(sampleCount, len(subsets)) samples = random.sample(subsets, sampleCount) print('Loop:', loop) guessCollection = list() for subset in samples: try: subsetA = A[np.array(subset)] subsetb = b[np.array(subset)] subsetA, subsetb = gaussianEliminationForward(subsetA, subsetb, p) x = gaussianEliminationBackward(subsetA%p, subsetb%p, p) guessCollection.append(x.astype(int)) except ValueError: # occurs by linear dependency in the matrix subsetA pass guessMatrix = np.array(guessCollection).T guess = list() for position in range(len(guessMatrix)): unique, counts = np.unique(guessMatrix[position], return_counts=True) significantList = [scipy.stats.binom_test(count, n=sampleCount, p=1/p, alternative='greater') for count in counts] # determine significant level #print(unique, counts) significantArgMin = np.argmin(significantList) #print("Guess:", significantArgMin) #print("Min significants:", significantList[significantArgMin], significantList[significantArgMin]<0.001) #print() indexOfMax = np.argmax(counts) guess.append(unique[indexOfMax]) #print() #print() #if (guess==s%p).all(): # print("Guess:", guess, "\t", "Right Solution:", s%p, "\t", "Solved!") #else: # print("Guess:", guess, "\t", "Right Solution:", s%p, "\t", "Failed!") subsetsGuess.append(guess) print() print() print("Right Solution:", s%p) subsetsGuess # + guessMatrix = np.array(subsetsGuess).T guess = list() for position in range(len(guessMatrix)): unique, counts = np.unique(guessMatrix[position], return_counts=True) print(unique, counts) print() indexOfMax = np.argmax(counts) guess.append(unique[indexOfMax]) print() print() if (guess==s%p).all(): print("Guess:", guess, "\t", "Right Solution:", s%p, "\t", "Solved!") else: print("Guess:", guess, "\t", "Right Solution:", s%p, "\t", "Failed!") # - # + guessSeries = list() countSeries = list() significantsSeries = list() position = 3 matrixRange = range(1000, len(guessMatrix[position]), 1000) for i in matrixRange: unique, counts = np.unique(guessMatrix[position][:i], return_counts=True) countSeries.append(counts) significantList = [scipy.stats.binom_test(count, n=sampleCount, p=1/p, alternative='greater') for count in counts] # determine significant level #print(unique, counts) significantArgMin = np.argmin(significantList) significantsSeries.append(significantList[significantArgMin]) #print("Guess:", significantArgMin) #print("Min significants:", significantList[significantArgMin], significantList[significantArgMin]<0.001) #print() indexOfMax = np.argmax(counts) guessSeries.append(unique[indexOfMax]) # - # + fig, ax = plt.subplots(nrows=3, figsize=(16, 30)) ax[0].plot(matrixRange, guessSeries) ax[0].set_xticks(matrixRange[::4]) ax[1].plot(matrixRange, countSeries) ax[1].set_xticks(matrixRange[::4]) ax[2].plot(matrixRange, significantsSeries) ax[2].set_xticks(matrixRange[::4]) ax[2].set_yticks([-1, 0, 1, 2]) #fig.savefig('gaussian.png') plt.show() # -
jupyter_notebooks/solving/LWE-v2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mask R-CNN Demo # # A quick intro to using the pre-trained model to detect and segment objects. import cv2 import numpy as np import os import sys #from samples.coco import coco from mrcnn import utils from mrcnn import model as modellib ROOT_DIR = os.getcwd() MODEL_DIR = os.path.join(ROOT_DIR, "logs") COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_damage_0011.h5") if not os.path.exists(COCO_MODEL_PATH): utils.download_trained_weights(COCO_MODEL_PATH) # + import custom config = custom.CustomConfig() # Override the training configurations with a few # changes for inferencing. class InferenceConfig(config.__class__): # Run detection on one image at a time GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() config.display() custom_DIR = os.path.join(ROOT_DIR, "customImages") # + model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config) # Load weights print("Loading weights ", COCO_MODEL_PATH) model.load_weights(COCO_MODEL_PATH, by_name=True) # + class_names= ['BG','damage'] def random_colors(N): np.random.seed(1) colors = [tuple(255 * np.random.rand(3)) for _ in range(N)] return colors colors = random_colors(len(class_names)) class_dict = { name: color for name, color in zip(class_names, colors) } def apply_mask(image, mask, color, alpha=0.5): """apply mask to image""" for n, c in enumerate(color): image[:, :, n] = np.where( mask == 1, image[:, :, n] * (1 - alpha) + alpha * c, image[:, :, n] ) return image def display_instances(image, boxes, masks, ids, names, scores): """ take the image and results and apply the mask, box, and Label """ n_instances = boxes.shape[0] if not n_instances: print('NO INSTANCES TO DISPLAY') else: assert boxes.shape[0] == masks.shape[-1] == ids.shape[0] for i in range(n_instances): if not np.any(boxes[i]): continue print(names) y1, x1, y2, x2 = boxes[i] label = names[ids[i]] color = class_dict[label] score = scores[i] if scores is not None else None caption = '{} {:.2f}'.format(label, score) if score else label mask = masks[:, :, i] image = apply_mask(image, mask, color) image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) image = cv2.putText( image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2 ) return image # - if __name__ == '__main__': """ test everything """ capture = cv2.VideoCapture("VID_20201017_151353.mp4") # these 2 lines can be removed if you dont have a 1080p camera. capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) frame_width = int(capture.get(3)) frame_height = int(capture.get(4)) out = cv2.VideoWriter('output1.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height)) while True: ret, frame = capture.read() # Bail out when the video file ends if not ret: break results = model.detect([frame], verbose=0) r = results[0] frame = display_instances( frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'] ) # frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR) cv2.imshow('frame', frame) out.write(frame) if cv2.waitKey(1) & 0xFF == ord('q'): break capture.release() cv2.destroyAllWindows()
damage_detection_video.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <NAME> # ## Research question/interests # # My reaserach question is what music genres produce the highest charting songs, however im also interested in seeing the link between song success and early artist success? import pandas as pd import numpy as np import numpy as np import matplotlib.pylab as plt import seaborn as sns # + pd.read_csv("../data/raw/time_series_90s.csv") pd.read_csv("../data/raw/recognition_by_generation.csv") # - # ## Milestone 3 # ## Task 1 # ## Exploratory Data Analysis: # + df=pd.read_csv("../data/raw/time_series_90s.csv") df1=pd.read_csv("../data/raw/recognition_by_generation.csv") df2 = df.merge(df1) print("number of rows,columns,etc",df2.info) # - print("Number of rows and columns respectively:", df2.shape) # + print("number of elements:",df2.size) # - print("columns in dataset:",df2.columns) print("number of columns in dataset:", len(df2.columns)) # + df2.head() df4=df2.head(20) # - df2.describe(include='float64').T df2.describe(include='object').T df1.plot.hist() # ### There seems to be an inverse relationship between millenial and gen z song recognition. The data is clearly right-tailed. # + df4['artist'].value_counts().sort_index().plot.barh() # - # ### Amy grant, Aero smith, and Ace of base have the most recognizable songs out of the first 20 rows. This suggests that pop music and rock music are genres that have high popularity amongst generations. df5=df1.head(20) df5.plot.line(title='Song Dataset') # ### There is a noticeable higher amounts of song recognitionn amongst millenials compared to gen-z. # ## Task 2 # ## 1.load data # + df=pd.read_csv("../data/raw/time_series_90s.csv") df1=pd.read_csv("../data/raw/recognition_by_generation.csv") df2 = df.merge(df1) # - # ## Clean Data # + df1 # - # ### df1 is used as it averages out the values of song recognition between two groups, millenials and gen-z.It is essentially a cleaned version of the data in df.It also contains new index and columns. # # # ## Process Data # ### df1 already shows total averages of song recogniton for song recogniton values from pre and post births of children born in 1990. df columns were reformatted by placing years to make data more understandable. # # df6 = df.rename(columns={'years_old_13': 'years_old_13(2003)', 'years_old_12': 'years_old_12(2002)','years_old_11': 'years_old_11(2001)','years_old_10': 'years_old_10(2000)','years_old_9': 'years_old_9(1999)','years_old_8': 'years_old_8(1998)', 'years_old_7': 'years_old_7(1997)','years_old_6': 'years_old_6(1996)', 'years_old_5': 'years_old_5(1995)','years_old_4': 'years_old_4(1994)','years_old_3': 'years_old_3(1993)','years_old_2': 'years_old_2(1992)','years_old_1': 'years_old_1(1991)','years_old_0': 'years_old_0(1990)','years_pre_birth_1':'years_pre_birth_1 (1989)','years_pre_birth_2':'years_pre_birth_2 (1988)','years_pre_birth_3':'years_pre_birth_3 (1987)','years_pre_birth_4':'years_pre_birth_4 (1986)', 'years_pre_birth_5':'years_pre_birth_5 (1985)' ,'years_pre_birth_6':'years_pre_birth_6 (1984)' ,'years_pre_birth_7':'years_pre_birth_7 (1983)','years_pre_birth_8':'years_pre_birth_8 (1982)','years_pre_birth_9':'years_pre_birth_9 (1981)','years_pre_birth_10':'years_pre_birth_10 (1980)','years_pre_birth_11':'years_pre_birth_11 (1979)','years_pre_birth_12':'years_pre_birth_12 (1978)','years_pre_birth_13':'years_pre_birth_13 (1977)'}) df6 # ## Wrangle Data # ### df7 shows the merge between df6 and df1. df7 = df6.merge(df1) # + df7 df7.drop(['years_pre_birth_2 (1988)', 'years_pre_birth_3 (1987)','years_pre_birth_1 (1989)','years_pre_birth_4 (1986)','years_pre_birth_5 (1985)','years_pre_birth_6 (1984)','years_pre_birth_7 (1983)','years_pre_birth_8 (1982)','years_pre_birth_9 (1981)','years_pre_birth_10 (1980)'], axis=1) # - df1s=df1.sort_values("mean_millennial_recognition", ascending=False) df1s df1S=df1.sort_values("mean_gen_z_recognition", ascending=False) df1s df7.reset_index(drop=True) # ## Task 3 # ## Step 1 # # + df8 = ( pd.read_csv("../data/raw/time_series_90s.csv") .rename(columns={'years_old_13': 'years_old_13(2003)', 'years_old_12': 'years_old_12(2002)','years_old_11': 'years_old_11(2001)','years_old_10': 'years_old_10(2000)','years_old_9': 'years_old_9(1999)','years_old_8': 'years_old_8(1998)', 'years_old_7': 'years_old_7(1997)','years_old_6': 'years_old_6(1996)', 'years_old_5': 'years_old_5(1995)','years_old_4': 'years_old_4(1994)','years_old_3': 'years_old_3(1993)','years_old_2': 'years_old_2(1992)','years_old_1': 'years_old_1(1991)','years_old_0': 'years_old_0(1990)','years_pre_birth_1':'years_pre_birth_1 (1989)','years_pre_birth_2':'years_pre_birth_2 (1988)','years_pre_birth_3':'years_pre_birth_3 (1987)','years_pre_birth_4':'years_pre_birth_4 (1986)', 'years_pre_birth_5':'years_pre_birth_5 (1985)' ,'years_pre_birth_6':'years_pre_birth_6 (1984)' ,'years_pre_birth_7':'years_pre_birth_7 (1983)','years_pre_birth_8':'years_pre_birth_8 (1982)','years_pre_birth_9':'years_pre_birth_9 (1981)','years_pre_birth_10':'years_pre_birth_10 (1980)','years_pre_birth_11':'years_pre_birth_11 (1979)','years_pre_birth_12':'years_pre_birth_12 (1978)','years_pre_birth_13':'years_pre_birth_13 (1977)'}) .merge(df1) .reset_index(drop=True) .sort_values("mean_millennial_recognition", ascending=False) .drop(['years_pre_birth_2 (1988)', 'years_pre_birth_3 (1987)','years_pre_birth_1 (1989)','years_pre_birth_4 (1986)','years_pre_birth_5 (1985)','years_pre_birth_6 (1984)','years_pre_birth_7 (1983)','years_pre_birth_8 (1982)','years_pre_birth_9 (1981)','years_pre_birth_10 (1980)'], axis=1) ) df8 # - df9=df7.sort_values("mean_millennial_recognition", ascending=False) df9 # ## Step 2 # + def load_and_process(url_or_path_to_csv_file): df8 = ( pd.read_csv("../data/raw/time_series_90s.csv") .rename(columns={'years_old_13': 'years_old_13(2003)', 'years_old_12': 'years_old_12(2002)','years_old_11': 'years_old_11(2001)','years_old_10': 'years_old_10(2000)','years_old_9': 'years_old_9(1999)','years_old_8': 'years_old_8(1998)', 'years_old_7': 'years_old_7(1997)','years_old_6': 'years_old_6(1996)', 'years_old_5': 'years_old_5(1995)','years_old_4': 'years_old_4(1994)','years_old_3': 'years_old_3(1993)','years_old_2': 'years_old_2(1992)','years_old_1': 'years_old_1(1991)','years_old_0': 'years_old_0(1990)','years_pre_birth_1':'years_pre_birth_1 (1989)','years_pre_birth_2':'years_pre_birth_2 (1988)','years_pre_birth_3':'years_pre_birth_3 (1987)','years_pre_birth_4':'years_pre_birth_4 (1986)', 'years_pre_birth_5':'years_pre_birth_5 (1985)' ,'years_pre_birth_6':'years_pre_birth_6 (1984)' ,'years_pre_birth_7':'years_pre_birth_7 (1983)','years_pre_birth_8':'years_pre_birth_8 (1982)','years_pre_birth_9':'years_pre_birth_9 (1981)','years_pre_birth_10':'years_pre_birth_10 (1980)','years_pre_birth_11':'years_pre_birth_11 (1979)','years_pre_birth_12':'years_pre_birth_12 (1978)','years_pre_birth_13':'years_pre_birth_13 (1977)'}) .drop(['years_pre_birth_2 (1988)', 'years_pre_birth_3 (1987)','years_pre_birth_1 (1989)','years_pre_birth_4 (1986)','years_pre_birth_5 (1985)','years_pre_birth_6 (1984)','years_pre_birth_7 (1983)','years_pre_birth_8 (1982)','years_pre_birth_9 (1981)','years_pre_birth_10 (1980)'], axis=1) .merge(df1) .sort_values("mean_millennial_recognition", ascending=False) ) return df8 # - load_and_process("data/raw/time_series_90s.csv") # ## Step 3 # + import project_functions3 as p3 df = "../data/raw/time_series_90s.csv" df1 = "../data/raw/recognition_by_generation.csv" p3.load_and_process(df) # - # # ## Task 4 # # print("Shape of Dataset (no. of rows, no. of columns):",df8.shape);print() print("Columns of the Dataset:",df8.columns);print() df8.to_csv("../../df8.csv",index=False) df8.describe(include='object') df8.describe() df8.sort_values(by=['mean_gen_z_recognition'], ascending=False) # Analysis # # In order to narrow down the results and get more relevant data, the data frame has been configured to focus on the years starting from 1990 and moving up all the way to 2003. The description of df8 showcases that the artist with the most songs is Mariah carey. This information also helps to understand why the recognizability of Mariah Carey is so high amongst both millenials and generation z. Her high frequency of songs may have been a factor in m aking her have such a high recognizability as an artist. Since she is a pop artist, this alludes to pop music being the most popular music genre with the most charting songs. This is made more apparent when looking at what songs have the most recognizability amongst generation z and millenials. It seems that songs such as 'My heart will go on', 'Hit me baby one more time', and 'Wannabe' are the most recognized songs, the fact that these are all pop songs continues to showcase the idea that pop music is the most popular music genre with the most charting songs. boxdf = df8[['mean_millennial_recognition','mean_gen_z_recognition']] boxdf = pd.melt(boxdf) sns.boxplot(x='variable',y='value',data=boxdf) plt.show() # The box plot above showcases that more songs on average are recognized by millenials compared to generation z. This makes sense because millenials had more time to listen to these different songs. df5=df1.head(20) df5.plot.line(title='Song Dataset') # This graph clearly shows that on average more songs seem to be recognized by millenials compared to generation z. # However it also shows that the most popular songs amongst millenials are also the most recognized amongst generation z. sns.jointplot(x='mean_millennial_recognition',y='mean_gen_z_recognition',data=df8,kind='reg') # The graph above shows that there clearly a positive coefficient correlation and covariance between mean recognizability between both millenials and genreation-z. This suggests that early success may determine how successful a song can be. If a song is popular amongst millenials then it is highly likely that it will be the most popular amongst genereation z as well. This shows that early success may also be another reason that pop music is the most successfull music genre starting from the 90's.
notebooks/analysis3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import tensorflow as tf import matplotlib.pyplot as plt # %matplotlib inline df=pd.read_csv("customer_churn.csv") df.head() #data exploration #customer id has no usage df.drop('customerID',axis='columns',inplace=True) df.dtypes #'TotalCharges' are in object(should be a float) df.TotalCharges.values #to show the blank entries in total charges column df[pd.to_numeric(df.TotalCharges,errors='coerce').isnull()] #dropping these empty data df_1=df[df.TotalCharges!=' '] df_1.shape df_1.dtypes #converting TotalCharges to float df_1.TotalCharges=pd.to_numeric(df_1.TotalCharges) df_1.TotalCharges.dtypes #visualization-1 #checking tenure tenure_churn_no=df_1[df_1.Churn=='No'].tenure tenure_churn_yes=df_1[df_1.Churn=='Yes'].tenure plt.xlabel('Tenure') plt.ylabel('Number of customer') plt.title("Customer churn prediction visualization") plt.hist([tenure_churn_yes,tenure_churn_no],color=["green",'red']) plt.legend(["churn=Yes","churn=NO"]) #Visualization-2 #based on montly charges mc_churn_no=df_1[df_1.Churn=='No'].MonthlyCharges mc_churn_yes=df_1[df_1.Churn=='Yes'].MonthlyCharges plt.xlabel('Monthly Charges') plt.ylabel('Number of customer') plt.title("Customer churn prediction visualization") plt.hist([mc_churn_yes,mc_churn_no],color=["green",'red']) plt.legend(["churn=Yes","churn=NO"]) #to check unique values in every categorical column def print_unique_col_values(df): for column in df: if df[column].dtypes=='object': print(f"{column}:{df[column].unique()}") #data cleaning print_unique_col_values(df_1) #replacing No___ service to no df_1.replace('No phone service',"No",inplace=True) df_1.replace('No internet service',"No",inplace=True) print_unique_col_values(df_1) #yes_no_columns yes_no_col=["Partner","Dependents","PhoneService","MultipleLines","OnlineSecurity", "OnlineBackup","DeviceProtection","TechSupport","StreamingTV", "StreamingMovies","PaperlessBilling","Churn"] #Replacing yes and no to 1 and 2 for col in yes_no_col: df_1[col].replace({"Yes":1,"No":0},inplace=True) #checking uniqueness for column in df_1: print(f"{column}:{df_1[column].unique()}") #converting Female and male to 1 and 0 df_1["gender"].replace({"Female":1,"Male":0},inplace=True) df_1["gender"].unique() #One Hot Encoding df_2=pd.get_dummies(data=df_1,columns=["InternetService","Contract","PaymentMethod"]) df_2.columns df_2.sample(5) df_2.dtypes #Scaling monthlycharges , Tenure , TotalCharges: cols_to_scale=["tenure","MonthlyCharges","TotalCharges"] from sklearn.preprocessing import MinMaxScaler scaler=MinMaxScaler() df_2[cols_to_scale]=scaler.fit_transform(df_2[cols_to_scale]) #Checking uniqueness for column in df_2: print(f"{column}:{df_2[column].unique()}") # + #STEP 2: splitting the dataset X=df_2.drop("Churn",axis='columns') Y=df_2["Churn"] from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=5) # - x_train.shape x_test.shape x_train.head() # + #Neural network model= tf.keras.Sequential([ tf.keras.layers.Dense(100,input_shape=(26,),activation='relu'), tf.keras.layers.Dense(50,activation="relu"), tf.keras.layers.Dense(25,activation="relu"), tf.keras.layers.Dense(1,activation="sigmoid") ]) model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) model.fit(x_train,y_train,epochs=100) # - y_p=model.predict(x_test) y_p[:5] y_test[:10] y_pred=[] for i in y_p: if i >0.5: y_pred.append(1) else: y_pred.append(0) y_pred[:10] #Classification Report from sklearn.metrics import confusion_matrix,classification_report print(classification_report(y_test,y_pred)) #confusion matrix import seaborn as sn cm=tf.math.confusion_matrix(labels=y_test,predictions=y_pred) plt.figure(figsize=(10,7)) sn.heatmap(cm,annot=True,fmt='d') plt.xlabel("Predicted") plt.ylabel("Truth")
customer_churn_prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # XSAR simple example # # just open a dataset with [xsar.open_dataset](../basic_api.rst#xsar.open_dataset), and display denoised `sigma0` in 'VH' polarisation import xarray as xr import xsar import os import holoviews as hv hv.extension('bokeh') from holoviews.operation.datashader import datashade,rasterize import datashader as dh # get test file. You can replace with an path to other SAFE filename = xsar.get_test_file('S1A_IW_GRDH_1SDV_20170907T103020_20170907T103045_018268_01EB76_Z010.SAFE') filename # open the dataset with xarray sar_ds = xsar.open_dataset(filename) sar_ds # we use here 'rasterize' to display the image, because the full image is really big rasterize(hv.Image(sar_ds.sigma0.sel(pol='VH')).opts(cmap='gray',colorbar=True,tools=['hover'],title="xsar",width=800,height=800,clim=(0,0.02)))
docs/examples/xsar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Mael-zys/SD212/blob/main/lab1_A_sparse.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="83035Q0ZvvWD" # # SD212: Graph mining # # # Lab 1: Sparse matrices # + [markdown] id="8GHfcJHrvvWE" # The objective of this lab is to understand the structure and main properties of [sparse matrices](https://en.wikipedia.org/wiki/Sparse_matrix). # # You will learn to code your own sparse matrices to understand their underlying structure. <br>Note that in the other labs, we will only use sparse matrices of [SciPy](https://www.scipy.org/scipylib/index.html). # + [markdown] id="RWizh_ItvvWF" # ## Import # + id="koQ0u4xxvvWF" import numpy as np # + id="BdQ-UWtqvvWF" from scipy import sparse # + [markdown] id="d111yjU2vvWG" # ## Coordinate format # + id="kZii2puovvWG" # random matrix (dense format) A_dense = np.random.randint(2, size = (5,10)) # + colab={"base_uri": "https://localhost:8080/"} id="MOwCgiUfvvWG" outputId="9372a9dc-20f4-498c-ec3d-b8a6e8be9893" A_dense # + id="FYaQ_f0NvvWG" A_coo = sparse.coo_matrix(A_dense) # + colab={"base_uri": "https://localhost:8080/"} id="xR4Y9m7-vvWH" outputId="02df1636-0028-49f6-f182-24456b8c19f4" A_coo # + colab={"base_uri": "https://localhost:8080/"} id="kLPUfdgLvvWH" outputId="d9a28e7c-aa9a-4a9f-b201-0e451ed95fb9" A_coo.shape # + colab={"base_uri": "https://localhost:8080/"} id="AcqxOnYyvvWH" outputId="381d8779-0f6d-43d8-ca18-29a54aae230a" A_coo.nnz # + colab={"base_uri": "https://localhost:8080/"} id="YftZ9HAnvvWH" outputId="0fd70c1a-7e0d-490c-e6cc-21bf245fdaef" print(A_coo.row) print(A_coo.col) print(A_coo.data) # + id="FgNSiG3PvvWI" # there might be zeros in data! row = A_coo.row col = A_coo.col data = np.random.randint(5, size=len(A_coo.data)) shape = A_coo.shape # + colab={"base_uri": "https://localhost:8080/"} id="LZM9lPjNwmq7" outputId="c936ab1e-a50b-4343-d19b-e363e06d8ef5" shape # + colab={"base_uri": "https://localhost:8080/"} id="lJ7KInQhvvWI" outputId="ffe09c10-7ce9-4395-dcf0-7ead4bf9867e" data # + id="Q_gmkHN39BUT" shape = (3, 5) indices = [1, 4, 0, 1, 2] indptr = [0, 2, 2, 5] data = [1, 3, 1, 2, 1] # + id="AnWkUTgQ9FYQ" # + id="G1wo7WhlvvWI" colab={"base_uri": "https://localhost:8080/", "height": 166} outputId="3f86db19-e600-495c-e401-48ea0bb1fc8c" B_coo = sparse.coo_matrix((data, (row, col)), shape) # + colab={"base_uri": "https://localhost:8080/"} id="OQJ69Y0YvvWI" outputId="892fb422-944f-4c9a-b8c4-19db6476a5b3" B_coo # + colab={"base_uri": "https://localhost:8080/"} id="iweHrfHivvWI" outputId="296f9909-e295-4136-bba7-1ff0fec6c65e" B_coo.toarray() # + colab={"base_uri": "https://localhost:8080/"} id="kAsrljssvvWJ" outputId="aec0bf11-6d02-476d-f6fb-01c0d973a5cd" B_coo.nnz # + colab={"base_uri": "https://localhost:8080/"} id="6eXsfIHJvvWJ" outputId="903799a6-c71a-4004-db75-777f9f448df5" np.sum(B_coo.data > 0) # + id="AQN2wWQivvWJ" B_coo.eliminate_zeros() # + colab={"base_uri": "https://localhost:8080/"} id="mb9IYhV6vvWJ" outputId="eb80149c-41d5-4b90-e5ea-64c3797dd5ac" B_coo # + colab={"base_uri": "https://localhost:8080/"} id="vML_QCqvwH_c" outputId="5e16cfd7-140c-492a-86f6-6bb1e3f11558" B_coo.nnz # + [markdown] id="ud4nup5tvvWJ" # ## To do # + [markdown] id="59UFkJ_PvvWK" # Complete the function below that converts a dense matrix into a sparse matrix in COO format. # # Needless to say... # * don't use `scipy` # * don't use any loop # # **Hint:** Use `np.nonzero` # + colab={"base_uri": "https://localhost:8080/"} id="nM3AztlNvvWK" outputId="8eb63db3-d612-4352-f286-867e390e7678" A_dense # + colab={"base_uri": "https://localhost:8080/"} id="REPYUyKRvvWK" outputId="1c03d3c8-867c-4a6c-834e-14aefa15b77a" np.nonzero(A_dense) # + id="vKQq9Kc6vvWK" class SparseCOO(): def __init__(self, data: np.ndarray, row: np.ndarray, col: np.ndarray, shape: tuple): self.data = data self.row = row self.col = col self.shape = shape # + id="ZjgQNe9NvvWK" def dense_to_coo(A): '''Convert dense matrix to sparse in COO format. Parameters ---------- A : np.ndarray Dense matrix Returns ------- A_coo : SparseCOO Sparse matrix in COO format. ''' # to be modified data = A[np.nonzero(A)] row = np.nonzero(A)[0] col = np.nonzero(A)[1] shape = A.shape return SparseCOO(data, row, col, shape) # + id="16-U4Mg6vvWL" def test_equality(A, B, attributes): return [np.all(getattr(A, a) == getattr(B, a)) for a in attributes] # + id="UkmkQ91kvvWL" # test A_dense = np.random.randint(2, size = (5,10)) A_coo = sparse.coo_matrix(A_dense) A_coo_ = dense_to_coo(A_dense) # + colab={"base_uri": "https://localhost:8080/"} id="d02K3BQHvvWL" outputId="852465c0-54c3-4309-9eb8-d9fe96dd3c31" test_equality(A_coo, A_coo_, ["data", "row", "col"]) # + [markdown] id="I7gfT1FhvvWL" # ## CSR format # + [markdown] id="VWb6HfUAvvWL" # The CSR (Compressed Sparse Row) format is the most efficient for arithmetic operations (see below). # + colab={"base_uri": "https://localhost:8080/"} id="LSViIBMsvvWL" outputId="37f0fc05-9a7e-4c40-f838-bfe00021cb35" A_dense # + id="CoIPeAKXvvWL" A_csr = sparse.csr_matrix(A_dense) # + colab={"base_uri": "https://localhost:8080/"} id="cCL99OU2vvWM" outputId="0eac1aaa-cbec-460b-c607-db7a2757a404" A_csr # + colab={"base_uri": "https://localhost:8080/"} id="lpUNbqD9vvWM" outputId="a9253cd3-6fe0-4990-ef43-e87d1c085341" A_csr.shape # + colab={"base_uri": "https://localhost:8080/"} id="aHxKjgqEvvWM" outputId="c9c6850a-5df6-47ae-b5d4-215bce5addce" A_csr.nnz # + colab={"base_uri": "https://localhost:8080/"} id="mktEwN6UvvWM" outputId="f5592e04-0642-43af-909d-519bf28c793d" print(A_csr.indices) print(A_csr.indptr) print(A_csr.data) # + colab={"base_uri": "https://localhost:8080/"} id="M0VHNoEfvvWM" outputId="dd0f6c66-1798-4000-89e1-daee66e055e6" A_csr[3, 4] # + colab={"base_uri": "https://localhost:8080/"} id="JA9o25EuvvWM" outputId="9c68e36c-2e6c-46b2-a154-e0953ed093a4" A_csr[3] # + colab={"base_uri": "https://localhost:8080/"} id="C6QqkJKpvvWM" outputId="9dba53c2-87cf-4cdb-8511-1e7e2fd0c187" A_csr[3].toarray() # + id="SL0UE10GvvWN" # data might have zeros! indices = A_csr.indices indptr = A_csr.indptr data = np.random.randint(5, size=len(A_csr.data)) shape = A_csr.shape # + id="Ki2mWAuyvvWN" B_csr = sparse.csr_matrix((data, indices, indptr), shape) # + colab={"base_uri": "https://localhost:8080/"} id="VCq7ArWlvvWN" outputId="f7ca63b9-be74-4e30-cd18-6f647d036df6" B_csr # + colab={"base_uri": "https://localhost:8080/"} id="8puc2It09Qm9" outputId="960ab8a9-e5b3-40a9-b3a4-0fe58d0eb13c" B_csr.nnz # + colab={"base_uri": "https://localhost:8080/"} id="IHOHL8_N9Tt2" outputId="93ad46c0-a933-4ea6-f921-72fc5b835253" B_csr.toarray() # + id="lJ3JOLEEvvWN" B_csr.eliminate_zeros() # + colab={"base_uri": "https://localhost:8080/"} id="zPYvl-27vvWN" outputId="b9a9613c-b990-4503-b623-c4e3cabc0a53" B_csr # + id="bpJSDdFLvvWN" # from COO format row = [0, 0, 1, 2, 2] col = [2, 3, 0, 1, 2] data = np.ones(5) A_csr = sparse.csr_matrix((data, (row, col)), shape = (3, 4)) # + colab={"base_uri": "https://localhost:8080/"} id="3F9aDhNPvvWN" outputId="c64c57d8-4abb-41aa-bc6e-1671307d8f43" A_csr.toarray() # + id="xhdzStievvWN" # equivalently A_coo = sparse.coo_matrix((data, (row, col)), shape = (3, 4)) A_csr = sparse.csr_matrix(A_coo) # + colab={"base_uri": "https://localhost:8080/"} id="qqV9vJWpvvWO" outputId="650cba31-cad9-4b02-a08d-0bd8f2cb8fd6" A_csr.toarray() # + [markdown] id="br5zE8kwvvWO" # ## To do # + [markdown] id="bi3q-7NavvWO" # Complete the function below that converts a sparse matrix from COO format to CSR format. # # Again... # * don't use `scipy` # * don't use any loop # # **Hint:** Use ``np.unique`` and ``np.cumsum``. # + id="zFT-_M1SvvWO" class SparseCSR(): def __init__(self, data: np.ndarray, indices: np.ndarray, indptr: np.ndarray, shape: tuple): self.data = data self.indices = indices self.indptr = indptr self.shape = shape # + id="rvNycwWuvvWO" def coo_to_csr(A_coo): '''Convert a sparse matrix from COO to CSR format. Parameters ---------- A_coo : SparseCSR Sparse matrix in COO format. Returns ------- A_csr : SparseCSR Sparse matrix in CSR format. ''' # to be modified data = A_coo.data indices = A_coo.col shape = A_coo.shape indptr = np.zeros(shape[0] + 1, dtype = int) row_indices, counts = np.unique(A_coo.row, return_counts=True) print(row_indices) print(counts) indptr[row_indices+1] = counts indptr = np.cumsum(indptr) return SparseCSR(data, indices, indptr, shape) # + id="Np-t82hSvvWO" def dense_to_csr(A): '''Convert dense matrix to sparse in CSR format. Parameters ---------- A : np.ndarray Dense matrix Returns ------- A_csr : SparseCSR Sparse matrix in CSR format. ''' return coo_to_csr(sparse.coo_matrix(A)) # + colab={"base_uri": "https://localhost:8080/"} id="v6Ywco3ovvWP" outputId="ce7b2ab6-c630-4de6-c5c9-2f42565b5139" # test A_dense = np.random.randint(2, size = (5,10)) A_csr = sparse.csr_matrix(A_dense) A_csr_ = dense_to_csr(A_dense) print(A_csr.indptr) print(A_csr_.indptr) # + colab={"base_uri": "https://localhost:8080/"} id="LNVGJwtvvvWP" outputId="827dc760-54b4-4bb1-f892-96eb75816d2e" test_equality(A_csr, A_csr_, ["data", "indices", "indptr"]) # + [markdown] id="UYWcLrqYvvWP" # ## Diagonal format # + id="SS6L_q08vvWP" A_diag = sparse.diags(np.arange(5)) # + id="m0YiZFTvvvWP" A_diag # + id="Pl58SGWJvvWP" A_diag.toarray() # + id="XQlQ1xgPvvWP" A_diag.diagonal() # + id="0gQaHN57vvWQ" A = sparse.csr_matrix(A_diag) # + id="qSB84aZRvvWQ" A # + [markdown] id="hvKE9etSvvWQ" # ## To do # + [markdown] id="5LnduTLSvvWQ" # Complete the following function that returns a sparse CSR matrix with the pseudo-inverse vector on the diagonal. # # **Example:** pseudo inverse of (0, 1, 2) -> (0, 1, 1/2) # # **Hint:** Use the property of sparse matrices! # + id="4aP2y6TCvvWQ" def get_pseudo_inverse(vector): '''Return a sparse matrix with pseudo-inverse on the diagonal. Parameters ---------- vector : np.ndarray Input vector. Returns ------- A_csr : sparse.csr_matrix Sparse matrix in scipy CSR format. ''' # to be modified return None # + id="rJOQJUSivvWR" # test get_pseudo_inverse(np.arange(3)) # + [markdown] id="vF5R2-TnvvWR" # ## Operations # + [markdown] id="GLgRV9G8vvWR" # Usual arithmetic operations apply to sparse matrices. The only contraint is to have a sparse matrix on the **left-hand side** of the operator. # + id="KBxfOCqzvvWR" A = sparse.csr_matrix(A_dense) # + id="SSBASwXjvvWR" n_row, n_col = A.shape # + id="wm4YdgeuvvWR" A.dot(np.ones(n_col, dtype=int)) # + id="7yjkTIdKvvWR" A.T.dot(np.ones(n_row, dtype=int)) # + id="AsJaTcCIvvWR" # observe the format of the transpose A.T # + id="QuN_cfepvvWS" A.T.dot(A) # + id="FNjeYABDvvWS" A.dot(A.T) # + id="QcjF62pbvvWS" A.data = np.random.choice((1,2,3,4), size = len(A.data)) # + id="RcQv7HGivvWS" B = A > 1 # + id="vCdhqqmavvWS" B # + id="02NjTDvkvvWS" # Explain the following warning... B = A < 1 # + id="xBX5EG97vvWS" B # + id="DFTwUeeEvvWS" B_dense = np.random.randint(2, size = (5,10)) B = sparse.csr_matrix(B_dense) # + id="uJtLfPGavvWT" 2 * A + 5 * B # + [markdown] id="pw57lhmVvvWT" # ## To do # + [markdown] id="ZU8oGOBZvvWT" # Complete the following function that normalizes a sparse CSR matrix with non-negative entries so that each row sums to 1 (or to 0 if the whole row is zero). # # **Hint:** Use the above function ``get_pseudo_inverse``. # + id="AQp31IlovvWT" def normalize_rows(A): '''Normalize the rows of a CSR matrix so that all sum to 1 (or 0). Parameters ---------- A : sparse.csr_matrix Input matrix (non-negative entries). Returns ------- A_norm : sparse.csr_matrix Normalized matrix. ''' # to be modified return None # + [markdown] id="9tbTWkJgvvWT" # ## To do # + [markdown] id="6b9emsJDvvWT" # Complete the following method that returns the dot product of a sparse CSR matrix with a vector. # # * No loop allowed! # + id="uBUkP_1IvvWT" class SparseCSR(): def __init__(self, data: np.ndarray, indices: np.ndarray, indptr: np.ndarray, shape: tuple): self.data = data self.indices = indices self.indptr = indptr self.shape = shape def dot(self, x: np.ndarray) -> np.ndarray: '''Sparse-vector dot product.''' # to be modified return None # + [markdown] id="iz3_JE96vvWU" # ## Slicing # + [markdown] id="PshmIN_2vvWU" # Sparse matrices can be sliced like numpy arrays. The CSR format is more efficient for row slicing (although column slicing is possible), while the CSC format is more efficient for column slicing. # + id="XhHvIs1zvvWU" A = sparse.csr_matrix(A_dense) # + id="IwppxIKJvvWU" A[:2] # + id="p41QykqRvvWU" A[1:4,2:] # + id="vimx6WMyvvWU" A[np.array([0,2,4])] # + [markdown] id="bu35fCvZvvWU" # ## To do # # Consider the following matrix: # + id="xNuY5rDHvvWU" A = sparse.csr_matrix(np.random.randint(2, size = (20,30))) # + [markdown] id="Nf5ht0t3vvWV" # Extract the 10 rows of largest sums and build the corresponding matrix. # + [markdown] id="Sz4Ds0eIvvWV" # ## Bonus # + [markdown] id="q33DjpunvvWV" # Complete all methods of the following CSR class. # + id="zBLFvOPVvvWV" class SparseCSR(): def __init__(self, data: np.ndarray, indices: np.ndarray, indptr: np.ndarray, shape: tuple): self.data = data self.indices = indices self.indptr = indptr self.shape = shape def dot(self, x: np.ndarray) -> np.ndarray: '''Sparse-vector dot product.''' # to be modified return None def dot_array(self, X: np.ndarray) -> np.ndarray: '''Sparse-array dot product.''' # to be modified return None def dot_sparse(self, X: SparseCSR) -> SparseCSR: '''Sparse-sparse dot product.''' # to be modified return None def add_sparse(self, X: SparseCSR) -> SparseCSR: '''Add a sparse matrix.''' # to be modified return None def slice_row(self, index: np.ndarray) -> SparseCSR: '''Slice rows of a sparse matrix.''' # to be modified return None def slice_col(self, index: np.ndarray) -> SparseCSR: '''Slice columns of a sparse matrix.''' # to be modified return None def eliminate_zeros(self) -> SparseCSR: '''Eliminate zeros of a sparse matrix.''' # to be modified return None
lab1_A_sparse.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Starbucks Capstone Challenge # # ### Introduction # # This data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. # # Not all users receive the same offer, and that is the challenge to solve with this data set. # # Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products. # # Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement. # # You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. # # Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer. # # ### Example # # To give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer. # # However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer. # # ### Cleaning # # This makes data cleaning especially important and tricky. # # You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers. # # ### Final Advice # # Because this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A). # # Data Sets # # The data is contained in three files: # # * portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.) # * profile.json - demographic data for each customer # * transcript.json - records for transactions, offers received, offers viewed, and offers completed # # Here is the schema and explanation of each variable in the files: # # **portfolio.json** # * id (string) - offer id # * offer_type (string) - type of offer ie BOGO, discount, informational # * difficulty (int) - minimum required spend to complete an offer # * reward (int) - reward given for completing an offer # * duration (int) - time for offer to be open, in days # * channels (list of strings) # # **profile.json** # * age (int) - age of the customer # * became_member_on (int) - date when customer created an app account # * gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F) # * id (str) - customer id # * income (float) - customer's income # # **transcript.json** # * event (str) - record description (ie transaction, offer received, offer viewed, etc.) # * person (str) - customer id # * time (int) - time in hours since start of test. The data begins at time t=0 # * value - (dict of strings) - either an offer id or transaction amount depending on the record # # **Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook. # # You can see how to access the terminal and how the install works using the two images below. First you need to access the terminal: # # <img src="pic1.png"/> # # Then you will want to run the above command: # # <img src="pic2.png"/> # # Finally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors. # + import pandas as pd import numpy as np import math import json from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import PolynomialFeatures from sklearn.metrics import mean_squared_error from sklearn.metrics import classification_report from time import time from sklearn.model_selection import GridSearchCV import matplotlib.pyplot as plt from sklearn.linear_model import Ridge from sklearn.tree import DecisionTreeRegressor # %matplotlib inline # read in the json files portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True) profile = pd.read_json('data/profile.json', orient='records', lines=True) transcript = pd.read_json('data/transcript.json', orient='records', lines=True) # - # ## Data Exploration<br> # a. Offer portfolio data<br> # According to the information provided by Udacity, the schema is as follows:<br> # # portfolio.json<br> # # id (string) - offer id<br> # offer_type (string) - type of offer ie BOGO, discount, informational<br> # difficulty (int) - minimum required spend to complete an offer<br> # reward (int) - reward given for completing an offer<br> # duration (int) -<br> # channels (list of strings)<br> # Moreover, some further information given about the offers is that there are 3 different offer types: # <br> # BOGO - buy one get one free<br> # Discount - discount with purchase<br> # Informational - provides information about products<br> # <br>Thus, the schema is pretty straightforward, as it contains the attributes of 3 different offer types. While the duration was not explained I assumed from context that it is in terms of number of days. portfolio.head() # check, if there is a null item or missing values in dataframe portfolio.isnull().sum() #check number of unique offers in the dataframe portfolio.id.nunique() # Now group accroding to types of offer present and fetch the id count portfolio.groupby('offer_type')['id'].count() # ## b. Demographic data<br> # Demographic data for customers is provided in the profile dataset. The schema and variables are as follows: # <br> # #### profile.json<br> # # age (int) - age of the customer<br> # became_member_on (int) - date when customer created an app account<br> # gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)<br> # id (str) - customer id<br> # income (float) - customer's income<br> # It is also relatively straightforward, as it contains the demographic profile on the customer.<br> # check the profile dataframe profile.head() # check for null values profile.isnull().sum() # We can see that there are gender and income column which has so many null values and other columns have no null values # Now, check the graph to see some visulisation for more understanding to the data profile.age.hist() # We can see that age 118 has no values #check count of age=118 value print(profile[profile['age']==118].count()) # corresponding gender and income columns profile[['gender','income','age']][profile['age']==118].head() # Now we can clearly see that for age 118 there is no value for gender and age. So there will be the possibility to drop the values # Now take a visual look on the income column profile.income.hist() # ## c. Transactional records<br> # The schema for the transactional data is as follows:<br> # # transcript.json<br> # # event (str) - record description (ie transaction, offer received, offer viewed, etc.)<br> # person (str) - customer id<br> # time (int) - time in hours. The data begins at time t=0<br> # value - (dict of strings) - either an offer id or transaction amount depending on the record # take a look on transcript table transcript.head() # Check the unique events in transcript transcript.event.unique() #check number of unique people represented transcript['person'].nunique() #check how many unique people are in dataset profile['id'].nunique() # We can see that number of person in transcript table is equal to the number of id in profile table. But we need to check the null values present in the transcript table #check for null values transcript.isnull().sum() # Its great, we don'thave any nul values in transcript table so we need not to clean the data. transcript=pd.concat([transcript, transcript['value'].apply(pd.Series)], axis=1) transcript.head() # + #create new column to ensure only one offer_id column transcript['offer_id_new']=np.where(transcript['offer id'].isnull() & transcript['offer_id'].notnull(),transcript['offer_id'],transcript['offer id']) #drop unnecessary offer_id columns transcript.drop(['offer id','offer_id'],axis=1,inplace=True) #rename offer_id column transcript.rename(columns={'offer_id_new':'offer_id'},inplace=True) # - transcript.head() # ## Data Preprocessing Approch # + portfolio.rename(columns={'id':'offer_id'},inplace=True) #join transcript with offer type transcript=transcript.merge(portfolio,how='left',on='offer_id') # - transcript.groupby(['event','offer_type'])['offer_type'].count() # <hr style='bold'> # We know that there are 4 types of events: offer completed, offer received, offer viewed and transaction. But our data shows that we do not have any offer_id associated with transactions, because they are not recorded in the transcript event data. Thus, the first objective in data preprocessing is to define a methodology to assign offer_ids to specific transactions. # <hr style='bold'> # # ## Data Preprocessing # #### A. Assign offer ids to transansations<br> # # wehave to explore methods to assign offer_ids to specific transactions. Among the considerations is to define the following main groups of customers: # # 1. People who are influenced and successfully convert - effective offers: # # 2. People who received and viewed an offer but did not successfully convert - ineffective offers: # # 3. People who purchase/complete offers regardless of awareness of any offers: # # 4. People who received offers but no action taken: # #define dropcolumns function as I will be doing this many times def drop_cols(drop_cols,df,inplace=False): ''' inputs: - drop_cols: list or string of column name to be dropped - df: dataframe from which column should be dropped - inplace: specify whether columns are dropped in place or not outputs: - Returns dataframe with dropped columns. ''' df=df.drop(columns=drop_cols,axis=1,inplace=inplace) return df #drop unnecessary columns to clean dataset transcript=drop_cols(['reward_x','reward_y'],transcript) #sort events by person and time transcript=transcript.sort_values(['person','time']) #filter dataset for transactions that occur after an offer is viewed, forward fill offer ids by person offers_view_transacted=transcript[['time','offer_id','person','event']][(transcript['event']=='transaction') | (transcript['event']=='offer viewed')].groupby(['person','offer_id']).ffill() offers_view_transacted['offer_id']=offers_view_transacted['offer_id'].ffill() # Since the above temporary dataset is just a subset of the transcript dataset, I can create a new dataset with the filled in offer ids for transactions. transcript=transcript.merge(offers_view_transacted,how='left',on=['person','time','event']) # + #clean up dataset to unify multiple offer_id columns into one column transcript['offer_id']=np.where(transcript['offer_id_x'].isnull(),transcript['offer_id_y'],transcript['offer_id_x']) drop_cols(['offer_id_x','offer_id_y'],transcript,inplace=True); # - #merge portfolio dataset again to get offer data for the transaction events transcript=transcript.merge(portfolio,how='left',on='offer_id') transcript['duration']=np.where(transcript['duration_x'].isnull(),transcript['duration_y'],transcript['duration_x']) drop_cols(['duration_x','offer_type_x','difficulty_x','channels_x','duration_y'],transcript,inplace=True); transcript.rename(columns={'channels_y':'channels','reward_y':'reward','difficulty_y':'difficulty','offer_type_y':'offer_type'},inplace=True) #Check the transcript table transcript.head() # ## b. Flagging transactions and offers completed after offers viewed # The next important step for preparing our data for modeling and analysis is to identify a completed offer and transactions occurring after an offer is viewed. # # Once we have assigned a transaction occurring after an offer is viewed, I can use that information to subset my data according to the groups defined above, and analyse within each group. # # Using our dataset with the offer_ids populated for transaction events, we can flag the converted transactions and completed offers. We have to first ensure that the offer id of the previous event is the same one. Since we have tagged the offer id for all viewed, transactions and completed offers, we can use the offer_id field to ensure that the previous offer consists of those events. # # This means that as long as the events offer viewed,transaction, and offer completed occur in the same event space and are in the corrrect sequence of time, we can be assured that it is a transaction and/or completed offer occurring only after an offer is viewed. # # To do this, I created a new column to flag the previous offer id using pandas' shift function. # # #get sample space of events consisting of offer viewed, transactions and offer completed offers_viewed_transactions_completed=transcript[(transcript['event']=='offer viewed') | (transcript['event']=='transaction') | (transcript['event']=='offer completed')].copy() #add extra column to flag the previous offer id offers_viewed_transactions_completed['offer_id_previous'] = offers_viewed_transactions_completed.groupby(['person','offer_id'])['offer_id'].shift() #flag a completed transaction/offer completed as long as the previous offer id consists of events in the same sample space offers_viewed_transactions_completed['valid_completed']=np.where(offers_viewed_transactions_completed['offer_id_previous']==offers_viewed_transactions_completed['offer_id'],1,0) # + #get only offer received events offers_received=transcript[transcript['event']=='offer received'].copy() #ensure all columns are the same between datasets to be appended offers_received['offer_id_previous']=np.nan offers_received['valid_completed']=np.nan #append datasets to complete dataset of transactions transcript=offers_received.append(offers_viewed_transactions_completed) #sort values transcript=transcript.sort_values(['person','time']) # + #define function to split into 3 offer types def split(offer_type,grp_df): ''' Splits dataframe to groups of specified offer type. inputs: - offer_type: specify offer type name in string format - grp_df: original transcript dataframe to split on offer type outputs: - Returns dataframe containing data of just offer type. ''' df=grp_df[grp_df['offer_type']==offer_type].copy() return df #split transcript into 3 different offer types transcript_bogo=split('bogo',transcript) transcript_discount=split('discount',transcript) transcript_info=split('informational',transcript) # - # Having assigned offer_ids for transactions for which an offer viewed event occurred prior, we can now revisit the four customer groups of unique person-offer_id pairs we are trying to analyse. # # Since we consider the conversion events of depending on offer type differently, we have to first separate the transcript into 3 different offer types, in order to accommodate for the different treatment in assigning the target variable.<br> # Within each offer type, we can already successfully separate every unique person-offer_id in group 1 from the others using our valid_completed column. Since we have flagged all conversion events (transaction or offer completed event depending on offer type) occurring after an offer viewed event, we can be assured that whichever conversion events are flagged with valid_completed=1 are at least within the first group (People who are influenced and successfully convert - effective offers). # # For BOGO and discount offers, we will only consider offer completed events as the conversion events, while we can consider transaction event as the conversion event for the informational offers. # + #since will do this for both BOGO and discount, define function for repeated operation def grp1(df): ''' Subsets dataframe to just group 1 members. inputs: - df: original transcript dataframe outputs: - Returns dataframe containing transcript data of just group 1 users. ''' grp1=df[['person','offer_id']][(df['valid_completed']==1) & (df['event']=='offer completed')].groupby(['person','offer_id']).count().reset_index() return grp1 grp1_bogo=grp1(transcript_bogo) grp1_discount=grp1(transcript_discount) # + #again, we define a function as we will repeat this for 2 datasets - BOGO & discount def no_conv(df): ''' Takes in transcript dataframe of single offer type to check for people who converted vs people with just offer received events. inputs: - df: original transcript dataframe of specific offer type outputs: - Returns dataframe containing unqiue person-offer_id pairs with conversion events and offers received events, with indicator of each. Note: left_only indicator is just the offers received events, right_only is just conversion events ''' #subset offer ids that have transactions or conversions by person and offer_id conversion_ids=df[['person','offer_id']][(df['event']=='transaction') | (df['event']=='offer completed') ].groupby(['person','offer_id']).count().reset_index() #check for unique person-offer_id pairs that consist of offers received offers_received_only=df[['person','offer_id']][df['event']=='offer received'].groupby(['person','offer_id']).count().reset_index() #create merged dataset to diffrentiate groups check_merge=conversion_ids.merge(offers_received_only,how='right',on=['person','offer_id'],indicator=True) return check_merge #check how many are in either group check_merge_bogo=no_conv(transcript_bogo) print('For BOGO offers:') print(check_merge_bogo.groupby(['_merge']).count()) check_merge_discount=no_conv(transcript_discount) print('For Discount offers:') print(check_merge_discount.groupby(['_merge']).count()) # - # # We can see that there are definitely a fair number of unique person-offer_id pairs that have offer received events, but no conversion events. These would be considered offers in group 2 and 4 within each offer type, according to our definition above. # # People with an offer viewed event in this subset are definitely in group 2, as we can assume everyone with an offer viewed event has an offer received event prior. # + #define group 2 & 4 function as will repeat this for BOGO and discount offers def grp_2_4(df): ''' Takes in output dataframe from no_conv function to split into group 2 and 4 customers. inputs: - df: output dataframe from no_conv function outputs: - Returns 2 dataframes containing unique person-offer_id pairs with dataframe containing only group2 customers first, followed by dataframe containing only group 4 customers. ''' #subset to check group 2 and 4 grp_2_4=df[df['_merge']=='right_only'] #remerge with transcript to get events grp_2_4=grp_2_4.merge(transcript,how='left',on=['person','offer_id']) #within this subset, separate people with offer viewed event, and people with offer received but no offer viewed grp2=grp_2_4[['person','offer_id']][grp_2_4['event']=='offer viewed'].groupby(['person','offer_id']).count().reset_index() #remerge with full dataset and get remaining to get grp4 drop_cols('_merge',grp_2_4,inplace=True) grp4=grp_2_4.merge(grp2[['person','offer_id']],how='left',indicator=True) grp4=grp4[grp4['_merge']=='left_only'].copy() return grp2,grp4 grp2_bogo,grp4_bogo=grp_2_4(check_merge_bogo) grp2_discount,grp4_discount=grp_2_4(check_merge_discount) # - # # Group 3 people are everyone in the converted ids who do not have an offer viewed prior - hence, they would be people with conversion events but no offer viewed event prior. For BOGO and discount offers, they would be people with offer completed events that have valid_completed ! # + def grp3(df): ''' Takes in transcript dataframe of single offer type to check for people who converted vs people with just offer received events. inputs: - df: original transcript dataframe of specific offer type outputs: - Returns dataframe containing unqiue person-offer_id pairs with conversion events and offers received events, with indicator of each. ''' #check all conversion events with invalid conversions grp3=df[['person','offer_id']][(df['event']=='offer completed') & (df['valid_completed']!=1)].groupby(['person','offer_id']).count().reset_index() return grp3 grp3_bogo=grp3(transcript_bogo) grp3_discount=grp3(transcript_discount) # - # Now we have split our data into 4 different customer groups for the BOGO and discount offers. Next, we have to consider the effective and ineffective offers depending on the group type. As already elaborated above, any unique person-offer_id belonging to group 1 can be considered in our target variable effective_offer=1 group. # # Meanwhile, group 2 is in our target variable effective_offer=0 group. For customers in groups 3 and 4, I deprioritise them for model implementation, but will be doing some exploratory analysis on them later. # + def offers(grp1,grp2): ''' inputs: - grp1: dataframe containing group1 customer data - grp2: dataframe containing group2 customer data outputs: - Returns dataframe with labeled effective offer column ''' #assign effective offer flag column grp1['effective_offer']=1 grp2['effective_offer']=0 #append datasets together offers=grp1.append(grp2,sort=False) return offers offers_bogo=offers(grp1_bogo,grp2_bogo) offers_discount=offers(grp1_discount,grp2_discount) # - # ## c. Considering duration/validity of offers in converted transactions from informational offers # There is an additional rule to consider when considering an effective/converted transaction and offer. This applies for offers that are of type 'informational'. As already elaborated above, the reason why informational offers get a different treatment is because the conversion event is not an offer completed event, but a transaction. # # For informational offers, the duration of the offer can be considered to be the duration of the influence. Hence, we can make the assumption that an offer should only be considered effective if it is within the duration of the offer. # # Meanwhile, for BOGO and discount offers, we can assume that if there is a conversion/ offer completed event, it should be within duration as it would not make sense for an offer to be completed if an offer is past its validity period. # # As we saw in our data dictionary, the time of an event in the transcript data is in terms of hours. In order to ensure it is on the same scale as the duration of the offer, we have to convert it into days. #convert time into days transcript_info['day_offer']=transcript_info['time']/24 #drop unnecessary columns drop_cols(['time','value','offer_id_previous'],transcript_info,inplace=True) transcript_info=transcript_info.sort_values(['person','day_offer','event','offer_id']) #get difference in time for informational offers transcript_info['diff_info']=transcript_info[(transcript_info['offer_type']=='informational') & ((transcript_info['event']=='offer received') | (transcript_info['event']=='transaction'))].groupby(['person','offer_id'])['day_offer'].diff() # + #create column for flagging valid events transcript_info['valid_completed_duration']=np.nan #flag valid events if within duration transcript_info.loc[transcript_info['diff_info']<=transcript_info['duration'],'valid_completed_duration']=1 #fill any missing values with 0 flag transcript_info['valid_completed_duration']=transcript_info['valid_completed_duration'].fillna(value=0) # - # Scenarios 1 and 2 can be considered to be actions that would put the customer into our Group 3 of customers - People who purchase/complete offers regardless of awareness of any offers. # # For customers in Scenario 1, even though according to our valid_completed flag, they had viewed an offer prior to the transaction, but it is not within the duration, thus they are not 'influenced' by the offer. # # Meanwhile for customers in Scenario 2, they are in Group 3 as they completed transactions without viewing an offer. # # Scenario 4 can be considered in group 4, as they only consist of transactions. # # We will need to separate those users in group 2 - those who may have received and viewed an offer, but no transactions after. We need to subset those where effective_offer!=1 into groups 2,3 and 4. #flag effective_offers where valid_completed=1 and valid_completed_duration=1 transcript_info['effective_offer']=np.where(((transcript_info['valid_completed']==1) & (transcript_info['valid_completed_duration']==1)),1,0) #separate group 1 in transcript_into grp1_info=transcript_info[['person','offer_id']][transcript_info['effective_offer']==1].groupby(['person','offer_id']).sum().reset_index() #separate out group 2 of customers check_merge_info=no_conv(transcript_info) print('For informational offers:') print(check_merge_info.groupby(['_merge']).count()) grp2_info,grp4_info=grp_2_4(check_merge_info) #scenario 1 grp3_1=transcript_info[['person','offer_id']][(transcript_info['event']=='transaction')&(transcript_info['valid_completed']!=1) & (transcript_info['valid_completed_duration']==1)].groupby(['person','offer_id']).count().reset_index() #scenario 2 grp3_2=transcript_info[['person','offer_id']][(transcript_info['event']=='transaction')&(transcript_info['valid_completed']==1) & (transcript_info['valid_completed_duration']!=1)].groupby(['person','offer_id']).count().reset_index() grp3_info=grp3_1.append(grp3_2,sort=False) del grp3_1 del grp3_2 # Now we can append the datasets together to make the offers_info dataset, ready for modeling. offers_info=offers(grp1_info,grp2_info) # ## d. Feature engineering # Now we have to look back had to look into the features and see how to be creative in creating new features. # # 1. became_member_on column to be engineered<br> # Recalling my preliminary data exploration steps, the became_member_on column were in date format. Hence in order to extract meaningful insights from that feature, we can convert it as a feature indicating tenure of membership. There could be some influence in how long someone has been a member, with whether he takes up an offer. # + #rename column for merging profile.rename(columns={'id':'person'},inplace=True) #create function to reuse for 3 datasets def member(df): ''' inputs: - df: original dataframe to transform became_member_on column outputs: - Returns dataframe with became_member_on column transformed to be tenure in days ''' #merge to get user demographic profile df=df.merge(profile,how='left',on='person') #convert became_member_on into member tenure df['year']=pd.Series([int(str(x)[:4]) for x in df['became_member_on']]) df['month']=pd.Series([int(str(x)[-3]) for x in df['became_member_on']]) df['day']=pd.Series([int(str(x)[-2:]) for x in df['became_member_on']]) df=drop_cols('became_member_on',df) df.loc[df['year'] == 2018, 'membership_tenure_days'] = (30*df['month'])+df['day'] df.loc[df['year'] != 2018, 'membership_tenure_days'] = ((2018-df['year'])*365)+(30*df['month'])+df['day'] df=drop_cols(['year','month','day'],df) return df offers_bogo=member(offers_bogo) offers_discount=member(offers_discount) offers_info=member(offers_info) # + #group event=offer received per person in transactional records print(transcript[transcript['event']=='offer received'].groupby('person')['event'].count().head()) #visualise offers received per person transcript[transcript['event']=='offer received'].groupby('person')['event'].count().hist() # - # We can see above that the offer received per person in the transactional data could range from 1 to 6 offers received. I had the hypothesis that the frequency of offers received per person might result in more effective offers, so decided to engineer a feature offer_received_cnt to account for this frequency. # + #get count of offers received per person, put into separate dataset df_offer_received_cnt=transcript[transcript['event']=='offer received'].groupby(['person','offer_id','time']).count()['event'].reset_index() #rename columns df_offer_received_cnt.rename(columns={'event':'offer_received_cnt'},inplace=True) #drop unnecessary columns drop_cols('time',df_offer_received_cnt,inplace=True) #ensure only unique person-offer_id pairs df_offer_received_cnt=df_offer_received_cnt.groupby(['person','offer_id']).sum().reset_index() # - # 2 Separating user behaviours by transactions<br> # I also wondered how many transactions were considered 'invalid' by my definition. Ordinarily, these would be the sum of transactions done by people not in group 1. The objective of offers are to drive purchases, so it would already be the case that users with high spend in their transactions would be flagged as effective_offers. # # We've already defined that there are people in groups 3 and 4, where they are separate pools of users who are loyal spenders, and already tend to purchase more, isolated from the the effect of offers. # # But for users in group 1 have a high amount of 'invalid spend' outside of the effect of offers, there might be some predictive power onto the effectiveness of offers; since a loyal user might have a higher tendency of taking up an offer. # # In my datasets, I had already separated the transactions who are conversions versus transactions who are just the users' normal purchasing behaviour. This is through the valid_completed column, where I checked if a transaction had an offer viewed event prior. # # In the cases where valid_completed=1, I had already included them in my effective offers flag for BOGO and Discount offers. However, for those transctions where valid_completed=0, I have not considered them, and this could be a potential feature to include, as a proxy for the 'baseline' level of spending for a user. # # The logic is to wonder if there is some baseline level of spending for users who are highly influenced by certain offers (in group 1), and group 2, and if there is some predictive power in this baseline level of 'invalid transactions' that can predict the propensity of a user to take up an offer. #filter dataset by invalid transactions df_transactions_invalid=transcript[(transcript['event']=='transaction') & (transcript['valid_completed']==0)].groupby(['person','offer_id'])['amount'].sum().reset_index() df_transactions_invalid.rename(columns={'amount':'amount_invalid'},inplace=True) # 3. Time elapsed between offers received<br> # I also wanted to include time as a potential feature into my dataset, but since the transactional data starts from time=0, I suspected it would not have been of much predictive power without some feature engineering. I had the hypothesis that if there were multiple offers received per person within a certain time period, there might be some predictive power in the time elapsed between offers received. # + #convert time into days transcript['day_offer']=transcript['time']/24 #drop unnecessary columns drop_cols(['time'],transcript,inplace=True); #find time elapsed between offers received transcript['time_elapsed_offers']=transcript[transcript['event']=='offer received'].groupby(['person','offer_id'])['day_offer'].diff() #fill missing values with 0, as if someone does not receive an offer or is receiving an offer for the first time, there is no time elapsed transcript['time_elapsed_offers']=transcript['time_elapsed_offers'].fillna(value=0) #create temporary dataset df_time_elapsed=transcript.groupby(['person','offer_id'])['time_elapsed_offers'].sum().reset_index() # - # # f. Preparing data for implementation<br> # Now we can finally begin with preparing the data for modeling. # # To do this, there are some additional preparation steps for each dataset. Recalling our initial preliminary data exploration, there are some steps to prepare the data: # # a. Merge with temporary datasets created above to include engineered features # # b. Drop missing values in gender column for demographic data; convert gender into dummy variables # # c. Separate the channel column into categorical variables # # d. Treatment of duplicate records #merge to get offers received count and invalid amount transacted offers_bogo=offers_bogo.merge(df_offer_received_cnt[['person','offer_id','offer_received_cnt']],how='left',on=['person','offer_id']) offers_bogo=offers_bogo.merge(df_transactions_invalid[['person','offer_id','amount_invalid']],how='left',on=['person','offer_id']) #check % of missing values in dataset (offers_bogo.isnull().sum()/len(offers_bogo)*100).sort_values(ascending=False).head() # + #fill missing values for amount_invalid with 0 offers_bogo['amount_invalid']=offers_bogo['amount_invalid'].fillna(value=0) #drop income and gender null rows offers_bogo.dropna(inplace=True); # - # #### Separate the channel column into categorical variables¶ # + #foresee need to reuse function so create rename function def rename(col_name,df): df[col_name]=np.where(df[col_name]==col_name,1,0) return df #foresee need to reuse dummy variable encoding function def dummy(df,col): df=pd.concat([df[:],pd.get_dummies(df[col],prefix=col)],axis=1) df=drop_cols(col,df) return df # + #merge with portfolio to get offer details offers_bogo=offers_bogo.merge(portfolio,how='left',on='offer_id') #convert channels into categorical variables channels = offers_bogo['channels'].apply(pd.Series) channels = channels.rename(columns={0:'web',1:'email',2:'mobile',3:'social'}) offers_bogo=pd.concat([offers_bogo[:], channels[:]], axis=1) rename('web',offers_bogo) rename('email',offers_bogo) rename('mobile',offers_bogo) rename('social',offers_bogo) offers_bogo=drop_cols('channels',offers_bogo) #convert gender into categorical variables offers_bogo=dummy(offers_bogo,'gender') # - # Since we need to repeat these steps for offers_discount, I created a function containing all the steps above. # + def prep_offers_df(df): ''' inputs: - df: original dataframe for modeling outputs: - Returns dataframe containing engineered features, filled missing values and cleaned and transformed variables (channel and gender) ''' #merge to get engineered features df=df.merge(df_offer_received_cnt[['person','offer_id','offer_received_cnt']],how='left',on=['person','offer_id']) df=df.merge(df_transactions_invalid[['person','offer_id','amount_invalid']],how='left',on=['person','offer_id']) #fill missing values for amount_invalid with 0 df['amount_invalid']=df['amount_invalid'].fillna(value=0) #drop income and gender null rows df.dropna(inplace=True); #merge with portfolio to get offer details df=df.merge(portfolio,how='left',on='offer_id') #convert channels into categorical variables channels = df['channels'].apply(pd.Series) channels = channels.rename(columns={0:'web',1:'email',2:'mobile',3:'social'}) df=pd.concat([df[:], channels[:]], axis=1) rename('web',df) rename('email',df) rename('mobile',df) rename('social',df) df=drop_cols('channels',df) #convert gender column into dummy variables df=dummy(df,'gender') return df # - #prepare data for offer_discounts offers_discount=prep_offers_df(offers_discount) # For offers_info dataset, a slightly different treatment needs to be done as the channels column contains a different order of values. # + #merge with portfolio to get offer details offers_info=offers_info.merge(portfolio,how='left',on='offer_id') #reset index for offers_info offers_info=drop_cols('index',offers_info.reset_index()) #expand channel column into categorical variables def channel_col(name,df=offers_info): ''' inputs: - name: name of channel column to be transformed - df: dataframe outputs: - offer_info dataframe with channel column transformed ''' df[name]= np.nan df.loc[pd.Series([name in df['channels'][x] for x in range(len(df['channels']))]),name]=1 df[name]=df[name].fillna(value=0) return df # + channel_col('web') channel_col('email') channel_col('mobile') channel_col('social'); drop_cols('channels',offers_info,inplace=True); # + #repurpose function for offers_info def prep_offers_df(df): ''' inputs: - df: dataframe to be transformed outputs: - Returns dataframe with engineered features and filled missing values, with transformed gender column. ''' #merge to get engineered features df=df.merge(df_offer_received_cnt[['person','offer_id','offer_received_cnt']],how='left',on=['person','offer_id']) df=df.merge(df_transactions_invalid[['person','offer_id','amount_invalid']],how='left',on=['person','offer_id']) #fill missing values for amount_invalid and offer_received_cnt with 0 df['amount_invalid']=df['amount_invalid'].fillna(value=0) #drop income and gender null rows df.dropna(inplace=True); #convert gender column into dummy variables df=dummy(df,'gender') return df # - offers_info=prep_offers_df(offers_info) # check the result offers_info.head() # Since we have subset the data cleanly according to unique person-offer_id pairs by group, we should not have any duplicate records. But just in case, we check to make sure we have no duplicate records. #check multiple records for each person and offer ids for the target variable print((offers_bogo.groupby(['person','offer_id','effective_offer']).size()>1).sum()) print((offers_discount.groupby(['person','offer_id','effective_offer']).size()>1).sum()) print((offers_info.groupby(['person','offer_id','effective_offer']).size()>1).sum()) # ## Implementation Part # Now that the datasets are ready, we can proceed to implementing the model. Revisiting our objective, we wanted to analyse the drivers of an effective offer, with the target variable being effective_offer. # # Since we have 3 offer types, there are thus 3 different models to be built. Since we are predicting whether an offer would be effective or not, this is effectively a binary classification supervised learning model. # # I decided to compare the performance of a simple decision tree classifier model as a baseline model, with an ensemble random forest classifier model. Reason why I selected a decision tree as the baseline model is because I wanted to prioritise the interpretability of the model. Going back to the objective, since we intend to analyse the feature importance to determine the drivers of an effective offer, a decision tree would provide good interpretability for us to analyse. # # Meanwhile, I also selected random forest as an alternate model to compare the baseline model is as an improvement over simple ensemble bagging of decision trees, in order to drive towards a high accuracy in training the model. # # Before we can proceed, we have to make sure that the classes we are predicting for are balanced in each dataset. #check for class balance in datasets print(offers_bogo[['person','effective_offer']].groupby('effective_offer').count()/len(offers_bogo)) print(offers_discount[['person','effective_offer']].groupby('effective_offer').count()/len(offers_discount)) print((offers_info[['person','effective_offer']].groupby('effective_offer').count()/len(offers_info))) # #### 1. Model Implementation def data_prep(df,drop_cols_prep): ''' inputs: - df: prepared dataframe for modeling outputs: - Returns 2 dataframes - features and target dataframes ''' # Split the data into features and target label target = df['effective_offer'] features = drop_cols(drop_cols_prep,df) return features,target #prepare model pipeline def model_pipeline(features,target): ''' inputs: - features & target dataframe outputs: - Splits features and target dataframe to train and test sets, performs feature scaling on both datasets. - Outputs X_train, X_test, y_train and y_test dataframes ''' #split into training and test sets X_train, X_test, y_train, y_test = train_test_split(features,target, test_size=0.20, random_state=42) #fit and transform scaling on training data scaler=StandardScaler() X_train=scaler.fit_transform(X_train) #scale test data X_test=scaler.transform(X_test) return X_train,X_test,y_train, y_test # + def train_predict(learner, X_train, y_train, X_test, y_test): ''' inputs: - learner: the learning algorithm to be trained and predicted on - sample_size: the size of samples (number) to be drawn from training set - X_train: features training set - y_train: review_scores_rating training set - X_test: features testing set - y_test: review_scores_rating testing set ''' results = {} #Fit the learner to the training data and get training time start = time() learner = learner.fit(X_train, y_train) end = time() results['train_time'] = end-start # Get predictions on the test set(X_test), then get predictions on first 300 training samples start = time() predictions_test = learner.predict(X_test) predictions_train = learner.predict(X_train) end = time() # Calculate the total prediction time results['pred_time'] = end-start #add training accuracy to results results['training_score']=learner.score(X_train,y_train) #add testing accuracy to results results['testing_score']=learner.score(X_test,y_test) print("{} trained on {} samples.".format(learner.__class__.__name__, len(y_train))) print("MSE_train: %.4f" % mean_squared_error(y_train,predictions_train)) print("MSE_test: %.4f" % mean_squared_error(y_test,predictions_test)) print("Training accuracy:%.4f" % results['training_score']) print("Test accuracy:%.4f" % results['testing_score']) print(classification_report(y_test, predictions_test,digits=4)) return results # - def run_model(clf1,clf2,name): ''' inputs: - clf1: first classifier model - clf2: 2nd classifier model for comparison - name: name of models for comparison outputs: - Dataframe of results from model training and prediction ''' # Collect results on the learners results = {} for clf in [clf1, clf2]: clf_name = clf.__class__.__name__ + '_' +name results[clf_name] = {} results[clf_name]= train_predict(clf, X_train, y_train, X_test, y_test) return pd.DataFrame(results) # #### BOGO offers model # First we try to build the BOGO offers model. I initialize the models with some randomly chosen parameters to check the initial performance. If performance needs to be improved further, I will attempt Grid Search to find the optimal parameters. # + drop_cols_prep=['person','offer_id','effective_offer','offer_type'] features,target=data_prep(offers_bogo,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model - baseline is DT model, bogo_1 model is RF model baseline = DecisionTreeClassifier(criterion='entropy',max_depth=5,random_state=2,min_samples_split=90,min_samples_leaf=50) bogo_1 = RandomForestClassifier(random_state=2,max_depth= 11, max_features= 'auto',min_samples_split= 10,n_estimators=20,min_samples_leaf=20) results=run_model(baseline,bogo_1,'bogo_1') # - # The accuracy for Random Forest Classifier (RF) model actually ends up outperforming the Decision Tree Classifier (DT) model slightly, but overall the performance for both models is about the same (82.14% vs 81.77% respectively in terms of accuracy). Accuracy for a first attempt is quite good, more than 80%. I will try to tune the model further to get a better accuracy. # #### 2. Discount offers model # + drop_cols_prep=['person','offer_id','effective_offer','offer_type'] features,target=data_prep(offers_discount,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model discount_1 = RandomForestClassifier(random_state=2,max_depth= 20, max_features= 'auto',min_samples_split= 10,n_estimators=20,min_samples_leaf=10) results=pd.concat([results[:],run_model(baseline,discount_1,'discount_1')],axis=1) # - # This time, the Random Forest Classifier model also has a better performance compared to the Decision Tree Classifier in terms of accuracy (87.23% vs 86.72%), and the F1 score is also lower (81.43% vs 82.87%). # # # #### 3. Informational offers model # + features,target=data_prep(offers_info,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model info_1 = RandomForestClassifier(random_state=5,criterion='gini',max_depth= 20, max_features= 'auto',min_samples_split= 10,n_estimators=20,min_samples_leaf=10) results=pd.concat([results[:],run_model(baseline,info_1,'info_1')],axis=1) # - # The performance for these models are worse compared to the other 2 datasets, with accuracy below 80% for both models, but RF model still performing better. The F1 score is also worse, at 67.54% RF Classifier, worse than the DT model at 68.66%. # One potential reason for the worse performance is perhaps due to the fact that I had the key assumption to assign the conversion events to be transactions that only occur after an offer is viewed and within the specified duration; I might have missed out on some valuable information by removing those transactions that occur regardless. We can see this from how the overall sample dataset is smaller (about half) the datasets for the other 2 offers, with only about 5K samples compared to about 10K for both BOGO and discount respectively. # ## Refinement # In refining the model, I will first try parameter tuning for the 3 RF models, before expreimenting with removing or adding features to improve model performance. #define function to find best model results for each offer type def best_model(offer_type): ''' input: - offer_type: string of offer type name output: - dataframe containing results of best model so far ''' print('For ' + offer_type + ' RF model:') return results.transpose()[results.transpose()['testing_score']==results.transpose()[results.transpose().index.str.contains("RandomForestClassifier_"+offer_type)]['testing_score'].max()] # #### Grid Search methods # For all three offers, the Random Forest model had relatively good performance, so I used Grid Search on this to determine the best parameters. #define Grid Search function def rand_forest_param_selection(X,y): ''' input: - X,y: training datasets for X and y output: - dictionary with best parameters for random forest model ''' param_grid={'max_features': ['auto', 'sqrt'], 'max_depth' : [5,10,15,20], 'n_estimators': [10,20,25,30,40,50], 'min_samples_split': [2, 10, 20], 'min_samples_leaf': [2, 10,15, 20], } grid_search = GridSearchCV(RandomForestClassifier(random_state=2), param_grid) grid_search.fit(X, y) grid_search.best_params_ return grid_search.best_params_ #define BOGO dataset features,target=data_prep(offers_bogo,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) rand_forest_param_selection(X_train, y_train) # Now that we have the optimal parameters for the BOGO model, I run my model again with the new parameters, keeping the DecisionTree baseline model with the same parameters as comparison. # + features,target=data_prep(offers_bogo,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model bogo_2 = RandomForestClassifier(random_state=2,max_depth= 10, max_features= 'auto',min_samples_split= 20,n_estimators=30,min_samples_leaf=2) results=pd.concat([results[:],run_model(baseline,bogo_2,'bogo_2')],axis=1) # - results[['RandomForestClassifier_bogo_1','RandomForestClassifier_bogo_2']] #find best model so far for BOGO offer type best_model('bogo') # The accuracy for the RF model increased slightly - from 82.14% to 82.51%, and the F1 score increased from 75.91% to 77.64%. This is a good performance increase but minimal, which indicates that perhaps there's not much that can be done to improve the performance of the model with parameter tuning. #define discount dataset features,target=data_prep(offers_discount,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) rand_forest_param_selection(X_train, y_train) # + X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model discount_2 = RandomForestClassifier(random_state=2,max_depth= 10, max_features= 'auto',min_samples_split= 20,n_estimators=30,min_samples_leaf=2) results=pd.concat([results[:],run_model(baseline,discount_2,'discount_2')],axis=1) # - results[['RandomForestClassifier_discount_1','RandomForestClassifier_discount_2']] #find best model so far for discount offer type best_model('discount') # The accuracy of the model increaased slightly, from 87.23% to 87.47%, and the F1 score improved from 81.43% to 82.06%. The good thing is that now both the accuracy and the F1 score for the RF model is better than the DT model. # # But because the increase was minimal, again we can conclude that tuning the parameters won't really improve the performance of the model significantly. #define info dataset features,target=data_prep(offers_info,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) rand_forest_param_selection(X_train, y_train) # + features,target=data_prep(offers_info,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model info_2 = RandomForestClassifier(random_state=2,max_depth= 15, max_features= 'auto',min_samples_split= 2,n_estimators=20,min_samples_leaf=15) results=pd.concat([results[:],run_model(baseline,info_2,'info_2')],axis=1) # - results[['RandomForestClassifier_info_1','RandomForestClassifier_info_2']] #find best model so far for info offer type best_model('info') # Again we see some improvement in accuracy for RF model, from 75.09% to 75.30%, and slight increase in F1 score from 67.54% to 67.78%. This improvement is minimal,so we look into improving the feature selection of the model. # #### Removing sparse features # In terms of feature selection, I wanted to try and see if removing the amount_invalid variable, which we had noted as being sparse, hence may not be useful in predicting the effectiveness of offers, would help. # # I removed the feature from my data prep and retrained the model using the same optimal parameters found via GridSearch, with the DT model as a baseline. # + #add amount_invalid variable to drop_cols_prep list drop_cols_prep=['person','offer_id','effective_offer','offer_type','amount_invalid'] #train BOGO model features,target=data_prep(offers_bogo,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model bogo_3 = RandomForestClassifier(random_state=2,max_depth= 10, max_features= 'auto',min_samples_split= 20,n_estimators=30,min_samples_leaf=2) results=pd.concat([results[:],run_model(baseline,bogo_3,'bogo_3')],axis=1) # - results[['RandomForestClassifier_bogo_2','RandomForestClassifier_bogo_3']] #find best model so far for BOGO offer type best_model('bogo') # + #train discount model features,target=data_prep(offers_discount,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model discount_3 = RandomForestClassifier(random_state=2,max_depth= 10, max_features= 'auto',min_samples_split= 20,n_estimators=30,min_samples_leaf=2) results=pd.concat([results[:],run_model(baseline,discount_3,'discount_3')],axis=1) # - results[['RandomForestClassifier_discount_2','RandomForestClassifier_discount_3']] #find best model so far for discount offer type best_model('discount') # Accuracy of the model actually increased while F1 model remained the same. In this case, I will also remove the amount_invalid feature for the discount model. # + #train info model features,target=data_prep(offers_info,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model info_3 = RandomForestClassifier(random_state=2,max_depth= 15, max_features= 'auto',min_samples_split= 2,n_estimators=20,min_samples_leaf=15) results=pd.concat([results[:],run_model(baseline,info_3,'info_3')],axis=1) # - results[['RandomForestClassifier_info_2','RandomForestClassifier_info_3']] #find best model so far for info offer type best_model('info') # Accuracy and F1 score of the model actually decreased here for info model, so I will also keep the feature in. This is expected since the model had already a worse performance compared to the other 2 models, so the model is slightly underfitting compared to the others. Hence the model needs more features to learn to predict better. # #### Dropping one level of dummy variables/one-hot encoding # In scikitlearn implementations of RF and DT, one has to encode the variables. So I decided to test my model performance if I were to drop one level of my categorical variables (in my data - the channel variables and the gender variables), just to reduce the sparsity and noise in the data for my model. # + #add one level of dummy variables to drop column drop_cols_prep=['person','offer_id','effective_offer','offer_type','amount_invalid','social','gender_O'] features,target=data_prep(offers_bogo,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model - reuse best performing model - bogo_4 = RandomForestClassifier(random_state=2,max_depth= 10, max_features= 'auto',min_samples_split= 20,n_estimators=30,min_samples_leaf=2) results=pd.concat([results[:],run_model(baseline,bogo_4,'bogo_4')],axis=1) # - results[['RandomForestClassifier_bogo_3','RandomForestClassifier_bogo_4']] #find best model so far for BOGO offer type best_model('bogo') # Performance of this model was not as good as previous model - hence I will keep alll levels of variables in. # + features,target=data_prep(offers_discount,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model - reuse best performing model - discount_4 = RandomForestClassifier(random_state=2,max_depth= 10, max_features= 'auto',min_samples_split= 20,n_estimators=30,min_samples_leaf=2) results=pd.concat([results[:],run_model(baseline,discount_4,'discount_4')],axis=1) # - results[['RandomForestClassifier_discount_3','RandomForestClassifier_discount_4']] #find best model so far for discount offer type best_model('discount') # + features,target=data_prep(offers_info,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model - reuse best performing model - info_4 = RandomForestClassifier(random_state=2,max_depth= 15, max_features= 'auto',min_samples_split= 2,n_estimators=20,min_samples_leaf=15) results=pd.concat([results[:],run_model(baseline,info_4,'info_4')],axis=1) # - results[['RandomForestClassifier_info_3','RandomForestClassifier_info_4']] #find best model so far for info offer type best_model('info') # Overall, we have seen that there is not much improvement in model performance just by reducing one level of categorical features. I am quite satisfied with the performance of the BOGO and discount models, but want to explore if I can improve the performance of the info model. # #### Using polynomial feature # Since a low accuracy score for the info model is likely due to the model underfitting, I decided to attempt if transforming the features further might improve model performance. # # I tweaked my model_pipeline function to include the polynomial features transformation to my features. # + #prepare model pipeline def model_pipeline_poly(features,target,poly_feat=0): ''' input: - features & target dataframes - poly_feat: number of degrees to transform polynomial features output: - X_train, X_test, y_train, y_test dataframes ''' #split into training and test sets X_train, X_test, y_train, y_test = train_test_split(features,target, test_size=0.20, random_state=42) #fit and transform training data poly = PolynomialFeatures(poly_feat) X_train_poly=poly.fit_transform(X_train) #transform test data X_test_poly=poly.transform(X_test) #fit and transform scaling on training data scaler=StandardScaler() X_train=scaler.fit_transform(X_train_poly) #scale test data X_test=scaler.transform(X_test_poly) return X_train,X_test,y_train, y_test # + #keep amount_invalid in offers_info dataset drop_cols_prep=['person','offer_id','effective_offer','offer_type'] features,target=data_prep(offers_info,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline_poly(features,target,2) #Initialize the model info_5 = RandomForestClassifier(random_state=2,max_depth= 15, max_features= 'auto',min_samples_split= 2,n_estimators=20,min_samples_leaf=15) results=pd.concat([results[:],run_model(baseline,info_5,'info_5')],axis=1) # - results[['RandomForestClassifier_info_2','RandomForestClassifier_info_5']] #find best model so far for info offer type best_model('info') # We can see that performance actually decreased slightly for the RF model. Hence it would perhaps be a better idea to just keep the model as is. A maximum accuracy of 74.80% is acceptable for the info offers, even though it is not as high as the BOGO or discount offers. After all, we already included some assumptions for the 'influence' of the offer based on the duration. results.loc[['training_score','testing_score'],['RandomForestClassifier_info_1','RandomForestClassifier_info_2','RandomForestClassifier_info_3','RandomForestClassifier_info_4','RandomForestClassifier_info_5']].transpose().plot.line() plt.title('Training and Testing score for RF info models') plt.show() # A note however, we can above actually see the model is performing better in the training accuracy as we add more variables for each model via polynomial features and removing the amount_invalid feature. It is just that the testing accuracy was reducing, and we can see this is due to overfitting. # # I can improve the accuracy and performance of the info model further by using RF info model 5, but adding more data, as we already noted the dataset for the offers_info dataset is half the size of the BOGO and discount datasets. Hence, ultimately with more data and with performance tuning, removing unnecessary variables and feature transformation, with more data I could have ultimately got the performance of the model perhaps above 80% # #### Discussion on best models and feature importances: # Now that I am done with refining the 3 models, we can check the results for our best models for all 3 and check the feature importances to see the top drivers of effectiveness of offers. #get best model overall for bogo,discount and info offers best_model('bogo').append([best_model('discount'),best_model('info')]).transpose() # Overall, we can see that the top performing models are the 3rd model (with GridSearch to find optimal model parameters and removing amount_invalid column) for predicting effectiveness of BOGO and discount offers, whereas the best performing model for informational offers was just after performing GridSearch to find the optimal parameters. # # In order to find the most influential drivers of an effective offer, we can check the feature importances of our best models above. # + #show feature importance #BOGO 3 model #prepare data same as BOGO 3 state drop_cols_prep=['person','offer_id','effective_offer','offer_type','amount_invalid'] features,target=data_prep(offers_bogo,drop_cols_prep) feature_importances = pd.DataFrame(bogo_3.feature_importances_, index = features.columns, columns=['importance']).sort_values('importance',ascending=False) feature_importances.plot.bar() plt.title('Best BOGO model feature importance') plt.show() #discount 3 model feature_importances = pd.DataFrame(discount_3.feature_importances_, index = features.columns, columns=['importance']).sort_values('importance',ascending=False) feature_importances.plot.bar() plt.title('Best discount model feature importance') plt.show() #info_2 model #prepare data similar to info_2 state drop_cols_prep=['person','offer_id','effective_offer','offer_type'] features,target=data_prep(offers_discount,drop_cols_prep) #print feature importance feature_importances = pd.DataFrame(info_2.feature_importances_, index = features.columns, columns=['importance']).sort_values('importance',ascending=False) feature_importances.plot.bar() plt.title('Best info model feature importance') plt.show() # - # Checking on the feature importance to analyse the main drivers of an effective offer, we can see that the most important driver of effective offers across all three are the tenure of membership. However, the 2nd most important feature is different for each of the three models. # # For a BOGO offer, the membership tenure is the most important feature, and the other variables are a lot smaller in proportions. Income, age and offer_received_cnt are the 2nd, 3rd and 4th most important features, but their proportions are very small. # # For a discount offer, after the membership tenure, age and income are the next most important variables. But it is still very small in proportions. # # The feature importances for the informational offer models are more distributed compared to the BOGO and discount models, with income being the 2nd most important feature. Age is the third and mobile channel interestingly being the 4th. # ### Exploration on users in Groups 3 and 4 - People who purchase regardless of viewing any offers # #### Data Preparation # It would be interesting to see how people in groups 3 and 4 contrast with people in groups 1 and 2, so I decided to compare between all 3. # # First, I need to append the data from all groups from the three offer types together, then compare the characteristics of each group via visualizations. # + #append datasets together #grp 3+4 grp3_4=grp3_bogo.append(grp3_discount,sort=False) grp3_4=grp3_4.append(grp3_info,sort=False) grp3_4=grp3_4.append(grp4_bogo,sort=False) grp3_4=grp3_4.append(grp4_discount,sort=False) grp3_4=grp3_4.append(grp4_info,sort=False) #grp1 grp1_all=grp1_bogo.append(grp1_discount,sort=False) grp1_all=grp1_all.append(grp1_info,sort=False) #grp2 grp2_all=grp2_bogo.append(grp2_discount,sort=False) grp2_all=grp2_all.append(grp2_info,sort=False) #get unique person-offer_id pairs grp3_4=grp3_4[['person','offer_id']].groupby(['person','offer_id']).count().reset_index() grp1_all=grp1_all[['person','offer_id']].groupby(['person','offer_id']).count().reset_index() grp2_all=grp2_all[['person','offer_id']].groupby(['person','offer_id']).count().reset_index() #get membership_tenure_days grp3_4=member(grp3_4) grp1_all=member(grp1_all) grp2_all=member(grp2_all) #merge with transcript to check transaction amount grp3_4=grp3_4.merge(transcript[['person','offer_id','amount']].groupby(['person','offer_id']).sum(),on=['person','offer_id'],how='left') grp1_all=grp1_all.merge(transcript[['person','offer_id','amount']].groupby(['person','offer_id']).sum(),on=['person','offer_id'],how='left') grp2_all=grp2_all.merge(transcript[['person','offer_id','amount']].groupby(['person','offer_id']).sum(),on=['person','offer_id'],how='left') # + #check null values print("For grp 3 and 4:") print((grp3_4.isnull().sum()/len(grp3_4))*100) #drop null values grp3_4=grp3_4.dropna() #check null values print("For grp 1:") print((grp1_all.isnull().sum()/len(grp1_all))*100) #drop null values grp1_all=grp1_all.dropna() #check null values print("For grp 2:") print((grp2_all.isnull().sum()/len(grp2_all))*100) #drop null values grp2_all=grp2_all.dropna() # - #check size of groups print("Size of group 1: "+ str(len(grp1_all['person']))) print("Size of group 3+4: "+ str(len(grp3_4['person']))) print("Size of group 2: "+ str(len(grp2_all['person']))) # #### Exploration of demographic characteristics #create function for plotting multiple histograms overlaying the 3 groups def plot_hist(variable,bins=None): plt.hist(grp1_all[variable],alpha=0.5, label='group 1',bins=bins) plt.hist(grp3_4[variable], alpha=0.5, label='group 3 and 4',bins=bins) plt.hist(grp2_all[variable], alpha=0.5, label='group 2',bins=bins) plt.legend(loc='upper right') plt.title('distribution of '+ variable + ' between group 1, group 2 and groups 3 + 4') plt.show() #plot distribution of income plot_hist('income') # Across the 3 segments, most people fall within the middle range of income (50K - 100K). The income distribution between the 3 segments are relatively similar. #plot ditribution of age plot_hist('age') # Age distribution looks relatively similar between the 3 groups as well, with most people between the age 40-80 years old. #plot tenure of membership plot_hist('membership_tenure_days') # Group 2 are people who did not spend at all as the offers were ineffective on them, hence they are not in the graph. But for groups 1 and 3+4, we can see that the amount spent is relatively similar, except that people in group 1 spent slightly more. This is to be expected as we might expect that the offers managed to incentivise them to purchase more, hence their overall spend increased. #plot distribution of amount spent given an effective offer plot_hist('amount',bins=1) # The distribution of membership tenure also looks similar between the 3 segments, with most people between 0-700 days of tenure. It appears as though there are not much demographic characteristic differences between the 3 groups, at least in the current data provided. # ### Potential all-in-one model # could we predict the effectiveness of an offer if the offer type was included as a categorical feature. Would the type of offer affect the user's responsiveness? # + #append datasets together offers_bogo['offer_type']='bogo' offers_info['offer_type']='informational' offers_discount['offer_type']='discount' offers=offers_discount.append(offers_bogo,sort=False) offers=offers.append(offers_info,sort=False) #create dummy variable for offer_type categorical variable offers=dummy(offers,'offer_type') # + #do grid search to find optimal parameters for RF model drop_cols_prep=['person','offer_id','effective_offer','amount_invalid'] features,target=data_prep(offers,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) rand_forest_param_selection(X_train, y_train) # + drop_cols_prep=['person','offer_id','effective_offer','amount_invalid'] features,target=data_prep(offers,drop_cols_prep) X_train, X_test, y_train, y_test=model_pipeline(features,target) #Initialize the model all_in_one = RandomForestClassifier(random_state=5,criterion='gini',max_depth= 20, max_features= 'auto',min_samples_split= 2,n_estimators=50,min_samples_leaf=15) results=pd.concat([results[:],run_model(baseline,all_in_one,'all_in_one')],axis=1) # - #comparing best performance of all 3 models with all_in_one model results[['RandomForestClassifier_bogo_3','RandomForestClassifier_discount_3','RandomForestClassifier_info_2','DecisionTreeClassifier_all_in_one','RandomForestClassifier_all_in_one']] results.loc[['testing_score'],['RandomForestClassifier_bogo_3','RandomForestClassifier_discount_3','RandomForestClassifier_info_2','DecisionTreeClassifier_all_in_one','RandomForestClassifier_all_in_one']].plot.bar() plt.title('Comparing testing set accuracy score for the 3 models vs all-in-one model') plt.legend(loc=3) plt.show() # Comparing the performance of the 3 best models for each offer type with the all_in_one model, we can se that having the all-in-one model is not as good as the RF bogo and discount models, and is about slightly better than the info model. This is probably due to the info model pulling down the performance, resulting in lower accuracy for the all in one model. I suspect that if we were to break down the all-in-one model performance to just looking at its ability to predict the effectiveness of informational offer types, it would also be worse than its performance predicting the other 2 types. # If we take a step back and look at the big picture, it is more useful to have a higher accuracy for 3 separate models, as opposed to one all-in-one model. This is because the BOGO and discount offers are actually aimed at driving sales with some promotional cost, whereas the informational offer is essentially 'free' with no cost, and if they can drive sales that would be a bonus. # # Hence, I would actually suggest that the 3 separate models are more useful. # #### Given an effective offer, can we predict how much someone would spend? # In addition to the all-in-one model, since we already kept the datasets of effective transactions, I was curious to know if I could build a regression model to predict how much someone would spend, given an effective offer. I could have built a model separately for each offer type to predict their spend, but I was curious to know if the type of offer would also determine a user's level of spend. # # To do this, we have already assigned effective offers based on group 1 customers. From there, we just need to sum up their amount of spend driven by offers to see if we can predict how much someone would spend depending on the offer type. # + #append all 3 datasets together grp1=grp1_bogo.append(grp1_discount,sort=False) grp1=grp1.append(grp1_info,sort=False) #drop unnecessary columns drop_cols('effective_offer',grp1,inplace=True) #get offer details grp1=grp1.merge(portfolio,how='left',on='offer_id') # - #get sum of valid transactions per person based on unique person and offer_id pair grp1=grp1.merge(transcript[['person','offer_id','amount']][transcript['valid_completed']==1].groupby(['person','offer_id']).sum(),on=['person','offer_id']) # + #get demographic data and membership_tenure details grp1=member(grp1) #reset index for offers_info grp1=drop_cols('index',grp1.reset_index()) #reuse offers_info channel_col function to expand channel column into categorical variables channel_col('web',grp1) channel_col('email',grp1) channel_col('mobile',grp1) channel_col('social',grp1); drop_cols('channels',grp1,inplace=True); #reuse offers_info function to prep dataset grp1=prep_offers_df(grp1) #encode offer type as dummy variables grp1=dummy(grp1,'offer_type') # - #add one level of dummy variable to drop drop_cols_prep=['person', 'offer_id','amount','social','gender_O','offer_type_informational'] target=grp1['amount'] features=drop_cols(drop_cols_prep,grp1) #tweak train_predict function - def train_predict_reg(learner, X_train, y_train, X_test, y_test): ''' inputs: - learner: the learning algorithm to be trained and predicted on - sample_size: the size of samples (number) to be drawn from training set - X_train: features training set - y_train: review_scores_rating training set - X_test: features testing set - y_test: review_scores_rating testing set ''' results = {} #Fit the learner to the training data and get training time start = time() learner = learner.fit(X_train, y_train) end = time() results['train_time'] = end-start # Get predictions on the test set(X_test), then get predictions on first 300 training samples start = time() predictions_test = learner.predict(X_test) predictions_train = learner.predict(X_train) end = time() # Calculate the total prediction time results['pred_time'] = end-start #add training accuracy to results results['training_score']=learner.score(X_train,y_train) #add testing accuracy to results results['testing_score']=learner.score(X_test,y_test) print("{} trained on {} samples.".format(learner.__class__.__name__, len(y_train))) print("MSE_train: %.4f" % mean_squared_error(y_train,predictions_train)) print("MSE_test: %.4f" % mean_squared_error(y_test,predictions_test)) print("Training accuracy:%.4f" % results['training_score']) print("Test accuracy:%.4f" % results['testing_score']) return results def run_model_reg(clf1,clf2,name): ''' input: - clf1: baseline regression model - clf2: 2nd regression model to compare - name: name to keep track of comparison output: - dataframe containing results of training and prediction of model ''' # Collect results on the learners results = {} for clf in [clf1, clf2]: clf_name = clf.__class__.__name__ + '_' +name results[clf_name] = {} results[clf_name]= train_predict_reg(clf, X_train, y_train, X_test, y_test) return pd.DataFrame(results) # + X_train, X_test, y_train, y_test=model_pipeline_poly(features,target,2) #Initialize the model clf1 = Ridge(alpha=2,random_state=2) clf2 = DecisionTreeRegressor(random_state=2) results_reg=run_model_reg(clf1,clf2,'reg') # - results_reg # The regression models really underperformed in terms of predicting the amount spent. It appears with the current data within our group 1 of customers, there is not enough information to predict the amount that can be driven by the offer type. We can see the Decision Tree Regressor model really overfit the data, with a very high training score but sub par testing score. Meanwhile, the linear regression model (with ridge/l2 regularization) also shows a minimal correlation between the features and the target variable. The model really underfits the data. # # I may get better performance if I break the models up into 3 different models based on offer type again; or even try to include non-influenced/invalid transactions, but this could be an exploration for another time. # ## Conclusion # Overall, I found this project challenging, mainly due to the structure of the data in the transcript dataset. I had started out with 2 business questions: # # What are the main drivers of an effective offer on the Starbucks app? # Could the data provided, namely offer characteristics and user demographics, predict whether a user would take up an offer? # #### a. Reflection: # ##### a.i. Question 1 findings: # For Question 1, the feature importance given by all 3 models were that the tenure of a member is the biggest predictor of the effectiveness of an offer. Further study would be able to indicate what average tenure days would result in an effective BOGO offer. # <br> # For all three models, the top 3 variables were the same - membership tenure, income and age. However, income and age switched orders depending on offer type. # <br> # For BOGO and discount offers, the distribution of feature importances were relatively equal. However, for informational offers, the distribution is slightly more balanced, with income the second most important variable. # # ##### a.ii. Question 2 findings: # My decision to use 3 separate models to predict the effectiveness of each offer type ended up with good accuracy for the BOGO and discount models (82.83% for BOGO and 87.35% for discount), while slightly less accurate performance for informational offers (75.3%). However, I would regard 75% as acceptable in a business setting, as for informational offers, there is no cost involved to inform users of a product. # <br> # Meanwhile, for BOGO and discount models, I am quite happy with the 80% and above accuracy, as in a business setting that would be acceptable to show offers to people, even if the model misclassifies a few, the overall revenue increase might justify the few mistakes. # # #### b. Main challenges and potential improvement: # When analysing and building the machine learning models to answer the above questions, reflections on my main challenges and findings are as follows: # # ##### b.i. Attribution framework for assigning offer_ids for transactions: # In order to answer Question 1, I had to first define what an 'effective offer' means using the transactional records. This proved to be the trickiest portion of the project. I had to define a funnel for what what an effective conversion would look like, as we had data on both effective and noneffective conversions. Thus, I was desigining an attribution model for the conversion events (offer completed and transaction events) based on the events that occurred prior for each person. # <br> # I ended up having to separate the users into 4 different pools, based on their actions in the transcript data: # <br> # Group 1: People who are influenced by offers and thus purchase/complete the offer(successful/effective conversion of offer)<br> # Group 2: People who receive and an offer but is not influenced and thus no conversion event (ineffective conversion of offer)<br> # Group 3: People who have conversion events but was not actually influenced by an offer<br> # Group 4: People who receive offers but no views or action taken<br> # # Even after separating the groups, it was challenging to assign the people in group 3 based on the transactional data. I had to define the event space where the right sequence of events would occur before I could assign an offer id to transactions (which did not have an offer_id), essentally designing a event/sequence-based attribution window. # <br> # After attributing the conversions to specific offers, the rest of the data preparation and cleaning was relatively straightforward. I was grateful that there were not many missing values, and the preparation of categorical variables was also relatively straightforward. # # ##### b.ii. Feature engineering: # I decided to do some basic feature engineering as I found the model had slightly underfit on my first attempt in this project, so I had added the feature engineering section later. It improved the performance of the model slightly, and the membership_tenure feature I had engineered out of the became_member_on column ended up being the most important predictor variable. # <br> # However, overall I found that I could not think of additional features using the time data, even though I had the hunch that the time of receiving the offer might be quite influential in determining whether it is effective or not. # # ##### b.iii. Model implementation decisions: # I had made the decision to build 3 separate models depending on offer types based on my definition of the problem statement - as I wanted to discover what would drive an effective offer, I thought it made more sense to remove noise from the data by separating the data into the offer types. My decision ended up to be quite a good one as the single BOGO and discount models got good performance in testing scores, compared to the all-in-one model overall score. # <br> # For the info model, the accuracy was slightly worse as we had less records overall (half of the BOGO and discount models). As elaborated above, I believe that if we had more data, I could have gotten the accuracy higher, as there was a clear diverging pattern occurring between the training and testing score as I made decisions to improve the model fit like adding polynomial features and removing 'noisy' features like the amount_invalid feature. Due to the limited data, my decisions ended up with the model overfitting, hence I believe the model accuracy would have benefitted from more data. # <br> # An additional note on model selection - I selected tree-based models as I wanted to assess feature importance, but I could have extended this study further by testing a parametric/ regression model (e.g. logistic regression for classification tasks). The weights of the coefficients from a regression model might have been interesting to contrast with the feature importance of a tree-based model, given that both models have different ways of analysing the data. The feature membership_tenure_days might not have been the highest weighted feature, in contrast to how it was in this study. # # ##### b.iv. Exploring demographics of different customer groups: # I was curious to know what the characteristics were of groups 3 and 4, which are customers who are not influenced by an offer at all. However, after comparing their characteristics with groups 1 and 2, I could not see any significant differences in their demographics. # <br> # I would have liked to have more data to perhaps understand why this group of customers tend to not be influenced by offer, in order to make useful suggestions on how to give a good customer experience to these customers, even if we do not serve them any offers. # # ##### b.v. Model accuracy in predicting amount spent given an effective offer: # The regression model I built out of curiosity to see if we could predict the amount a user would spend, given that they are effectively influenced by an offer. The motivation was that if we can predict how much someone would spend given an offer, perhaps we can assess which offers bring in the most revenue. # <br> # However, my model found virtually no correlation between the features provided (namely, offer characteristics and demographics of app users) with the amount spent per user. These features aren't strong enough to predict the amount spent per user. Perhaps if we also have a value of the offer, for example, for a discount offer, the value of the discount in dollar terms, perhaps we might be able to predict better. # <br> # Perhaps I could have broken them up into 3 different models for the 3 offer types, the way I did with the binary classification models, in order to get a better result. However, given that this was just a curiosity and I wanted to explore if the offer type would be a statistically significant predictor feature, I built an all-in-one model for this instance. This would be worth exploring further, given more time and data.
Starbucks_Capstone_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.016307, "end_time": "2022-01-10T17:05:19.160432", "exception": false, "start_time": "2022-01-10T17:05:19.144125", "status": "completed"} tags=[] # # Input URL # + [markdown] papermill={"duration": 0.015642, "end_time": "2022-01-10T17:05:19.193671", "exception": false, "start_time": "2022-01-10T17:05:19.178029", "status": "completed"} tags=[] # This component reads a file from a HTTP(s) source via wget # + papermill={"duration": 1.74616, "end_time": "2022-01-10T17:05:20.956199", "exception": false, "start_time": "2022-01-10T17:05:19.210039", "status": "completed"} tags=[] # !pip install wget==3.2 # + papermill={"duration": 0.02608, "end_time": "2022-01-10T17:05:21.005692", "exception": false, "start_time": "2022-01-10T17:05:20.979612", "status": "completed"} tags=[] import logging import os import re import sys import wget # + papermill={"duration": 0.028704, "end_time": "2022-01-10T17:05:21.052573", "exception": false, "start_time": "2022-01-10T17:05:21.023869", "status": "completed"} tags=[] # path and file name for output output_data_csv = os.environ.get('output_data_csv', 'data.csv') # url of souce url = os.environ.get('url') # temporal data storage for local execution data_dir = os.environ.get('data_dir', '../../data/') # + papermill={"duration": 0.027696, "end_time": "2022-01-10T17:05:21.101263", "exception": false, "start_time": "2022-01-10T17:05:21.073567", "status": "completed"} tags=[] parameters = list( map(lambda s: re.sub('$', '"', s), map( lambda s: s.replace('=', '="'), filter( lambda s: s.find('=') > -1 and bool(re.match(r'[A-Za-z0-9_]*=[.\/A-Za-z0-9]*', s)), sys.argv ) ))) for parameter in parameters: logging.warning('Parameter: ' + parameter) exec(parameter) # + papermill={"duration": 37.803424, "end_time": "2022-01-10T17:05:58.925851", "exception": false, "start_time": "2022-01-10T17:05:21.122427", "status": "completed"} tags=[] destination = os.path.join(data_dir, output_data_csv) os.remove(destination) if os.path.exists(destination) else None wget.download(url, out=destination) # + papermill={"duration": 0.184592, "end_time": "2022-01-10T17:05:59.287980", "exception": false, "start_time": "2022-01-10T17:05:59.103388", "status": "completed"} tags=[] print('Data written successfully')
component-library/input/input-url.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="t3Nmb5isbvMF" colab_type="text" # # Solver de Markowitz # # **Responsable:** # <NAME> # # **Infraestructura usada:** Google Colab, para pruebas # 0. Importamos librerias necesarias # # **Fuente:*** código desarrollada en etapas previas # # # Objetivo: # # Implementar un solver que permite dar solución al problema de minimización de riesgo en el áreas de finanzas del siguiente tenor: # # * 1. Para un retorno $r$ esperado por un inversionista sobre un portafolio de activos, con un vector $mu$ de rendimiento promedio de cada uno de tales activos en cierto periodo histórico # 2. Encontrar los pesos $w$ asociados a los activos del portafolio, que permitan obtener el portafolio con el retorno $r$ y a su vez con mínima varianza. # # En términos matemáticos ello equivale a resolver # # $$\min_{w} \frac{1}{2} w^t \Sigma w$$ # # tal que # # $$ w^t \mu= r$$ # # $$ w^t 1_{m}= 1$$ # # En donde $\Sigma$ es la matriz de covarianzas asociadas a los rendimientos de los activos en el periodo de interés. # # Cabe destacar que el procedimiento descrito se basa en las siguientes premisas: # # * Se hace uso de la herramienta *Cupy* de Python para aprovechar el hardware GPU de los equipos disponibles, # * Aprovechar que el problema planteado tiene una solución cerrada empleando la expresión de los puntos críticos del Lagrangiano asociado a este problema (para mayor detalle, por favor véase el *Readme.md* del proyecto # * Los datos de portafolios se tomarán del framework de Python existente para *Yahoo Finance* # # + [markdown] id="QYLbLJ-ZgAl_" colab_type="text" # # 0. Importamos librerias necesarias # + id="dSAaRoPIbxIf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 230} outputId="578f6e88-d85b-4349-81e9-b8fd17f2376b" # !curl https://colab.chainer.org/install | sh - # + id="8aKfh0BOWM5P" colab_type="code" outputId="a26d8b05-c0b4-415a-8058-120484c5ce29" colab={"base_uri": "https://localhost:8080/", "height": 49} import cupy as cp import numpy as np import pandas as pd import fix_yahoo_finance as yf import datetime import matplotlib.pyplot as plt import seaborn as sns import time # + [markdown] id="tUxGRk7JhQU4" colab_type="text" # # 1. Portafolios a emplear # # A continuación creamos un arreglo con abreviaturas de los portafolios a probar en esta implementación, las abreviaturas corresponden a los nombres con que en el framework de Python para Yahoo Finance se identifican a los activos de nuestro interés: # + id="KaCF-TxqSMKa" colab_type="code" colab={} stocks = ['COP','AMT','LIN','LMT','AMZN','WMT','JNJ','VTI','MSFT','GOOG','XOM','CCI','BHP.AX','UNP', 'BABA','NSRGY','RHHBY','VOO','AAPL','FB','CVX','PLD','RIO.L','HON','HD','PG','UNH','BRK-A','V','0700.HK', 'RDSA.AS','0688.HK','AI.PA','RTX','MC.PA','KO','PFE','JPM','005930.KS','VZ','RELIANCE.NS','DLR','2010.SR', 'UPS','7203.T','PEP','MRK','1398.HK','MA','T'] # + id="WsKa6VmZSMMt" colab_type="code" colab={} def extraer_datos_yahoo(stocks): ''' Funcion para extraer datos de los portafolios de yahoo finance de 2015-01-01 a 2020-04-30 ''' df_c = yf.download(stocks, start='2015-01-01', end='2020-04-30').Close base = df_c['AAPL'].dropna().to_frame() for i in range(0,50): base = base.join(df_c.iloc[:,i].to_frame(), lsuffix='_caller', rsuffix='_other') base = base.drop(columns=['AAPL_caller']) base = base.rename(columns={"AAPL_other": "AAPL"}) base = base.fillna(method='ffill') base = base.fillna(method='bfill') return base # + id="UCbwccJfSMOw" colab_type="code" outputId="a28d0486-25e5-452d-f28d-68e7c5dd8f25" colab={"base_uri": "https://localhost:8080/", "height": 32} datos = extraer_datos_yahoo(stocks) # + [markdown] id="OHhBF3vvin1C" colab_type="text" # Ahora examinamos los datos recién descargados: # + id="CMh8fmQaSMQ0" colab_type="code" outputId="ac2c110f-3913-48ae-e4ff-a73ebbf93428" colab={"base_uri": "https://localhost:8080/", "height": 627} datos # + [markdown] id="7CI8UYlOis9h" colab_type="text" # # 2. Funciones auxiliares # # Ahora definimos funcionas auxiliares para el cálculo de los rendimientos de los activos, así como de la matriz de covarianza de ellos: # + id="dHMeTducYURe" colab_type="code" colab={} def calcular_rendimiento_vector(x): """ Función para calcular el rendimiento esperado params: x vector de precios return: r_est rendimiento esperado diario """ # Definimos precios iniciales y finales como arreglo alojado en la gpu x_o = cp.asarray(x) x_f = x_o[1:] # Calculamos los rendimientos diarios r = cp.log(x_f/x_o[:-1]) return r # + id="TNqlvmKUYZmg" colab_type="code" colab={} def calcular_rendimiento(X): """ Función para calcular el rendimiento esperado para un conjunto de acciones params: X matriz mxn de precios, donde: m es el número de observaciones y n el número de acciones return: r_est rvector de rendimientos esperados """ m,n = X.shape r_est = cp.zeros(n) X = cp.asarray(X) for i in range(n): r_est[i] = calcular_rendimiento_vector(X[:,i]).mean() return 264*r_est # + id="gIHYPvp9SMS1" colab_type="code" colab={} def calcular_varianza(X): """ Función para calcular el la matriz de varianzas y covarianzas para un conjunto de acciones params: X matriz mxn de precios, donde: m es el número de observaciones y n el número de acciones return: S matriz de varianzas y covarianzas """ m,n=X.shape X = cp.asarray(X) X_m = cp.zeros((m-1,n)) for i in range(n): X_m[:,i] = calcular_rendimiento_vector(X[:,i]) - calcular_rendimiento_vector(X[:,i]).mean() S = (cp.transpose(X_m)@X_m)/(m-2) return S # + [markdown] id="9aJyxuSmi7pY" colab_type="text" # # 3. Matriz de covarianza, vector de rendimiento promedio y rendimiento (con base al máximo observado) # # Con los datos recién conseguidos y las funciones previamente presentadas, ahora estamos en condiciones de construir las matrices y vectores que se usarán en el problema en comento. # # **Matriz de covarianza** # + id="WzL27K3cSMVR" colab_type="code" colab={} Sigma = calcular_varianza(datos) # + [markdown] id="TYAL3xE8jeKz" colab_type="text" # **Vector de rendimientos promedio** # + id="xKqYqPcVSMXf" colab_type="code" colab={} mu = calcular_rendimiento(datos) # + id="Ov2aS-WJtF6v" colab_type="code" colab={} mu = cp.array(mu) # + [markdown] id="sAuDKPSpjjzG" colab_type="text" # **Rendimiento** # # Solo con fines ilustrativos, calcularemos cual fue el rendimiento máximo obtenido (de entre las medias de los datos históricos): # + id="83Ev_Jb_SMZd" colab_type="code" colab={} r=max(mu) # + [markdown] id="tzBLHh7EjyhM" colab_type="text" # # 4. Solución del modelo de Markowitz # # Ahora definimos las funciones que nos permitirán resolver el problema de encontrar el portafolio de interés: # + id="7LMw-kV3buFo" colab_type="code" colab={} def formar_vectores(mu, Sigma): ''' Calcula las cantidades u = \Sigma^{-1} \mu y v := \Sigma^{-1} \cdot 1 del problema de Markowitz Args: mu (cupy array, vector): valores medios esperados de activos (dimension n) Sigma (cupy array, matriz): matriz de covarianzas asociada a activos (dimension n x n) Return: u (cupy array, escalar): vector dado por \cdot Sigma^-1 \cdot mu (dimension n) v (cupy array, escalar): vector dado por Sigma^-1 \cdot 1 (dimension n) ''' # Vector auxiliar con entradas igual a 1 n = Sigma.shape[0] ones_vector = cp.ones(n) # Formamos vector \cdot Sigma^-1 mu y Sigm^-1 1 # Nota: # 1) u= Sigma^-1 \cdot mu se obtiene resolviendo Sigma u = mu # 2) v= Sigma^-1 \cdot 1 se obtiene resolviendo Sigma v = 1 # Obtiene vectores de interes u = cp.linalg.solve(Sigma, mu) u = u.transpose() # correcion de expresion de array v = cp.linalg.solve(Sigma, ones_vector) return u , v # + id="MlP474P8buHn" colab_type="code" colab={} def formar_abc(mu, Sigma): ''' Calcula las cantidades A, B y C del diagrama de flujo del problema de Markowitz Args: mu (cupy array, vector): valores medios esperados de activos (dimension n) Sigma (cupy array, matriz): matriz de covarianzas asociada a activos (dimension n x n) Return: A (cupy array, escalar): escalar dado por mu^t \cdot Sigma^-1 \cdot mu B (cupy array, escalar): escalar dado por 1^t \cdot Sigma^-1 \cdot 1 C (cupy array, escalar): escalar dado por 1^t \cdot Sigma^-1 \cdot mu ''' # Vector auxiliar con entradas igual a 1 n = Sigma.shape[0] ones_vector = cp.ones(n) # Formamos vector \cdot Sigma^-1 mu y Sigm^-1 1 # Nota: # 1) u= Sigma^-1 \cdot mu se obtiene resolviendo Sigma u = mu # 2) v= Sigma^-1 \cdot 1 se obtiene resolviendo Sigma v = 1 u, v = formar_vectores(mu, Sigma) # Obtiene escalares de interes A = mu.transpose()@u B = ones_vector.transpose()@v C = ones_vector.transpose()@u return A, B, C # + id="wYI0tnCQbvas" colab_type="code" colab={} def delta(A,B,C): ''' Calcula las cantidad Delta = AB-C^2 del diagrama de flujo del problema de Markowitz Args: A (cupy array, escalar): escalar dado por mu^t \cdot Sigma^-1 \cdot mu B (cupy array, escalar): escalar dado por 1^t \cdot Sigma^-1 \cdot 1 C (cupy array, escalar): escalar dado por 1^t \cdot Sigma^-1 \cdot mu Return: Delta (cupy array, escalar): escalar dado \mu^t \cdot \Sigma^{-1} \cdot \mu ''' Delta = A*B-C**2 return Delta # + id="jqJe_K5Dbvei" colab_type="code" colab={} def formar_omegas(r, mu, Sigma): ''' Calcula las cantidades w_o y w_ del problema de Markowitz Args: mu (cupy array, vector): valores medios esperados de activos (dimension n) Sigma (cupy array, matriz): matriz de covarianzas asociada a activos (dimension n x n) Return: w_0 (cupy array, matriz): matriz dada por w_0 = \frac{1}{\Delta} (B \Sigma^{-1} \hat{\mu}- C\Sigma^{-1} 1) w_1 (cupy array, vector): vector dado por w_1 = \frac{1}{\Delta} (C \Sigma^{-1} \hat{\mu}- A\Sigma^{-1} 1) ''' # Obtenemos u = Sigma^{-1} \hat{\mu}, v = \Sigma^{-1} 1 u, v = formar_vectores(mu, Sigma) # Escalares relevantes A, B, C = formar_abc(mu, Sigma) Delta = delta(A,B,C) # Formamos w_0 y w_1 w_0 = (1/Delta)*(r*B-C) w_1 = (1/Delta)*(A-C*r) return w_0, w_1 # + id="_quTZIRdbviJ" colab_type="code" colab={} def markowitz(r, mu, Sigma): ''' Calcula las cantidades w_o y w_ del problema de Markowitz Args: mu (cupy array, vector): valores medios esperados de activos (dimension n) Sigma (cupy array, matriz): matriz de covarianzas asociada a activos (dimension n x n) Return: w_0 (cupy array, matriz): matriz dada por w_0 = \frac{1}{\Delta} (B \Sigma^{-1} \hat{\mu}- C\Sigma^{-1} 1) w_1 (cupy array, vector): vector dado por w_1 = \frac{1}{\Delta} (C \Sigma^{-1} \hat{\mu}- A\Sigma^{-1} 1) ''' # Obtenemos u = Sigma^{-1} \hat{\mu}, v = \Sigma^{-1} 1 u, v = formar_vectores(mu, Sigma) # Formamos w_0 y w_1 w_0, w_1 = formar_omegas(r, mu, Sigma) return w_0*u+w_1*v # + [markdown] id="NUbpn8YdkKKq" colab_type="text" # # 6. Solver # # Con todo lo anterior, podemos probar el solver recien implementados. En concreto el vector de pesos se obtiene en este caso como: # + id="fjgd7gXIbvlz" colab_type="code" colab={} w=markowitz(r,mu,Sigma) # + id="CtpQs_lplopY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 230} outputId="1cf9f529-cc1a-422b-ab3a-5f8590d75825" w # + [markdown] id="kSXzl89KlrSn" colab_type="text" # **Verificamos w^t 1 = 1** # + id="9gVwSshTbuKF" colab_type="code" outputId="415a4da1-3fd3-4e32-b299-91333e75fc67" colab={"base_uri": "https://localhost:8080/", "height": 32} sum(w) # + [markdown] id="ZpOo3I3Al3EE" colab_type="text" # **Verificamos que $w^t \mu = r$** # + id="OIwFM3p_l_OK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 32} outputId="e9b29d31-bdc1-4d19-fdcf-837863fb831a" r # + id="Z_cU37Oel_Us" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 32} outputId="d45d5999-7625-48c7-8db7-7647734d24dc" w@mu # + [markdown] id="C0gfu-UXmFr4" colab_type="text" # **Calculamos la varianza del portafolio** # + id="l2N-ShZMtmIs" colab_type="code" outputId="0363a398-5e65-4f7a-ef43-16d512fd58a5" colab={"base_uri": "https://localhost:8080/", "height": 32} w.transpose()@Sigma@w
notebooks/Programacion/4_Solver.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from rpy2.robjects import r, pandas2ri import pandas as pd def data(name): return pd.DataFrame(pandas2ri.ri2py(r[name])) # Import data mtcars = data('mtcars') # https://stat.ethz.ch/R-manual/R-devel/library/datasets/html/state.html states = pd.concat([data("state.name"), data("state.abb"), data('state.x77')],axis=1) states.columns = ["name","abb","pop","income","illiteracy","lifeExp","murder","HS Grad","frost","area"] state_arrests = pd.concat([data("state.name"), data("state.abb"), data('USArrests')],axis=1) state_arrests.columns = ['Name','Abb'] + state_arrests.columns.tolist()[2:] # Make a frequency count by distinct values of # column(s) listed in 'groupbyvars' # Returns pandas dataframe def tidy_count(df,groupbyvars): return(df.groupby(groupbyvars).size().reset_index().rename(columns={0: "n"})) # - states.head(5) state_arrests.head(5) mtcars.head(5) tidy_count(mtcars,["cyl","am"]) pd.merge(state,state_arrests,left_on="abb",right_on="Abb").head(5) # + def print_values(**kwargs): for key, value in kwargs.items(): print("The value of {} is {}".format(key, value)) print_values(left_on="this var", your_name="hulk") # - # Experimental wrapper function for pandas merge # Drops column specified in right_on # However this isn't always desired behaviour def tidy_merge(*args,**kwargs): try: right_on=kwargs['right_on'] except: right_on='' if right_on == '': return(pd.merge(*args,**kwargs)) else: return(pd.merge(*args,**kwargs).drop(right_on,axis=1)) tidy_merge(states,state_arrests.rename(columns={'Abb':'abb'}),left_on="abb",right_on="abb",how="inner").head(5) state_arrests.drop('Murder',axis=1).\ merge(states.drop('name',axis=1),left_on="Abb",right_on='abb').head(5)
Python/Pandas_Codeblocks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # *Contenuti* # === # - [Liste](#Liste) # - [Dimensione e accesso indicizzato](#Dimensione-e-accesso-indicizzato) # - [Lista vuota, aggiunta, inserimento, modifica e cancellazione](#Lista-vuota,-aggiunta,-inserimento,-modifica-e-cancellazione) # - [Estrazione](#Estrazione) # - [*Esercizio 1*](#Esercizio-1) # - [Ricerca](#Ricerca) # - [Ordinamento](#Ordinamento) # - [Operatori aggregati](#Operatori-aggregati) # - [*Esercizio 2*](#Esercizio-2) # - [Liste di tipo misto](#Liste-di-tipo-misto) # - [*Slicing*](#Slicing) # - [*Esercizio 3*](#Esercizio-3) # - [Cicli *for*](#Cicli-for) # - [Enumerazione](#Enumerazione) # - [*Esercizio 4*](#Esercizio-4) # - [Comprensioni di liste](#Comprensioni-di-liste) # - [*Esercizio 5*](#Esercizio-5) # - [*Esercizio 6*](#Esercizio-6) # Liste # === # Una *lista* è una collezione ordinata di oggetti, contenuta in una variabile. shapes = ['triangle', 'square', 'pentagon'] print(type(shapes)) print(shapes) # Dimensione e accesso indicizzato # --- # La *dimensione* di una lista si ottiene attraverso la funzione *len* (da *length*, lunghezza in inglese). len(shapes) # Si può accedere ad una lista attraverso un indice intero: la prima posizione ha indice 0, l'ultima len-1. print(shapes[0]) print(shapes[1]) print(shapes[len(shapes)-1]) # Le liste Python sono *circolari*: si possono usare indici negativi per accedere comodamente agli ultimi elementi. print(shapes[-1])#ultimo print(shapes[-2])#penultimo # Lista vuota, aggiunta, inserimento, modifica e cancellazione # --- # Si può creare una lista vuota e aggiungervi elementi successivamente. # + shapes = []#lista vuota shapes # - # Quando ha a che fare con liste, Python interpreta l'operatore + come una *concatenazione*. # + shapes = shapes + ['triangle', 'square'] shapes # + shapes = shapes + ['pentagon'] shapes # - # **Nota**: l'aggiunta di un unico elemento ad una lista è un caso particolare della concatenazione. Servono quindi le parentesi quadre: stiamo infatti concatenando una lista ad un'altra che contiene solo un elemento. # La concatenazione (in generale l'operazione generica +) può essere eseguita in modo compatto. # + a = 0 a += 5#forma compatta per 'a = a + 5' a # + b = 'ciao' b += ' mamma!'#concatenazione compatta di stringhe b # + shapes += ['hexagon']#concatenazione compatta di liste shapes # - # Si possono inserire elementi in una posizione specifica della lista. Tutti quelli successivi saranno spostati sulla destra, e la dimensione della lista aumenterà di 1. # + shapes.insert(2, 'rectangle')#inserimento in posizione 2 shapes # - # Gli elementi di una lista possono essere modificati in qualsiasi momento. # + shapes[0] = 'circle'#modifica shapes # - # Per cancellare un elemento della lista si usa la funzione *remove*. # + shapes.remove('hexagon')#rimozione shapes # - # Estrazione # --- # La funzione *pop* estrae un elemento da una lista (di default, l'ultimo), e lo restituisce. last_element = shapes.pop() print(shapes) print('Ultimo elemento:', last_element) # ### *Esercizio 1* # # Che differenza c'è (oltre al diverso tipo su cui sono definite) tra le funzioni viste finora (come upper, lower, replace del tipo stringa) e insert, remove e pop? # Ricerca # --- # Possiamo controllare se un certo oggetto è contenuto in una lista. 'square' in shapes 'triangle' in shapes # Ordinamento # --- # Una lista può essere ordinata attraverso la funzione *sorted*. Anche in questo caso, è Python che interpreta un'operazione in modo dinamico (quale?) in base al tipo di variabili contenute nella lista. Nel caso delle stringhe, l'ordinamento sarà alfabetico. # + shapes = ['triangle', 'square', 'circle', 'hexagon'] sorted(shapes) # - # La funzione sorted crea (e restituisce) una nuova lista: se ne alteriamo gli elementi, non otteniamo effetti su quella di partenza. sorted_shapes = sorted(shapes)#assegno risultato ad una variabile print('lista ordinata:', sorted_shapes) sorted_shapes[0] = 'rhombus'#modifica print('lista ordinata modificata:', sorted_shapes) print('lista originale:', shapes) # Quanto detto vale anche per le liste numeriche. Per convenzione, l'ordinamento è crescente. # + numbers = [3,6,1,7,8,5] sorted(numbers) # - # La funzione sorted permette l'ordinamento inverso, attraverso l'argomento *reverse*. Si tratta di un *argomento opzionale*: se non viene valorizzato (con '='), la funzione utilizza un valore di default (in questo caso, *False*). # # Se non si specifica diversamente, l'ordinamento sarà quindi quello naturale (dalla A alla Z per le stringhe, crescente per i numeri). # # Vedremo a breve il significato di True e False. # + numbers = [3,6,1,7,8,5] sorted(numbers, reverse=True)#argomento opzionale # - # Operatori aggregati # --- # E' possibile fare operazioni che coinvolgono tutti gli oggetti contenuti in una lista. Per quanto riguarda le liste numeriche, ad esempio, possiamo calcolare minimo, massimo e somma. grades = [28, 25, 22, 30, 30, 28, 26] print('voto minimo:', min(grades)) print('voto massimo:', max(grades)) # ### *Esercizio 2* # # Stampare il voto medio col formato qui sopra, usando (anche) la funzione *sum*. #FILL ME sum([10, 5]) # Liste di tipo misto # --- # In Python, una lista può contenere qualsiasi cosa. # + stuff = [] stuff += ['apples'] stuff += ['oranges'] stuff += [32] stuff += [17] stuff # - # Anche un'altra lista. # + another_list = [1,2,3,4] stuff += [another_list] stuff # - # Attenzione, è diverso da: # + stuff = ['apples', 'oranges', 32, 17] another_list = [1,2,3,4] stuff += another_list stuff # - # L'ordinamento usa in modo dinamico l'operatore '<', che non è definito su tutte le coppie di tipi. print(sorted(stuff)) # *Slicing* # --- # Python permette l'estrazione di sottoliste (slicing) in modo compatto. letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] letters[:3]#'le prime 3' / 'fino alla terza (inclusa)' letters[3:]#'dalla terza (esclusa) in poi' letters[:3] + letters[3:] letters[3:6] # *Esercizio 3* # --- # Giocare un po' con le liste e con le operazioni che abbiamo visto: # # - creare una lista vuota e riempirla # - inserirci, modificarne e cancellarne elementi # - concatenarla con un'altra lista # - aggiungerle un'altra lista # - ordinarla in modo naturale e inverso # - estrarne delle porzioni e assegnarle a variabili differenti; ricombinarle successivamente. # - ... # + #FILL ME # - # Cicli ***for*** # === # Nella programmazione, un *ciclo* è un costrutto in cui si ripetono delle operazioni. # # In Python si può accedere in modo ordinato agli elementi di una lista attraverso un ciclo *for*. # # **Nota**: in Python l'indentazione è obbligatoria: Jupyter lo fa in automatico andando a capo dopo il for. # + shapes = ['triangle', 'square', 'circle', 'hexagon'] for shape in shapes: print(shape)#codice indentato # - # Nel codice qui sopra, *shape* contiene, ad ogni iterazione del ciclo, un oggetto della lista. E' una variabile, e può avere un nome qualsiasi. # + shapes = ['triangle', 'square', 'circle', 'hexagon'] for x in shapes: print(x)#codice indentato # - # Enumerazione # --- # Supponiamo di voler conoscere l'indice dell'iterazione corrente. In altre parole, oltre all'elemento, vogliamo conoscere anche la sua posizione nella lista. # + shapes = ['triangle', 'square', 'circle', 'hexagon'] counter = 0 for shape in shapes: print("L'elemento in posizione {} è {}".format(counter, shape))#uso doppi apici perché stringa contiene ' counter += 1 # - # Quanto sopra si può realizzare in modo più compatto grazie alla funzione *enumerate*: # # for index, element in enumerate(list): # ... # + shapes = ['triangle', 'square', 'circle', 'hexagon'] for counter, shape in enumerate(shapes):#prima indice, poi elemento print("L'elemento in posizione {} è {}".format(counter, shape)) # - # Al solito, *counter* e *shape* sono solo nomi (qualsiasi) di variabile. Il loro ordine è invece obbligatorio! # + shapes = ['triangle', 'square', 'circle', 'hexagon'] for i, x in enumerate(shapes):#prima indice, poi elemento print("L'elemento in posizione {} è {}".format(i, x)) # - # **Nota**: tutto quello che è indentato sotto il ciclo for viene eseguito, appunto, dentro il ciclo. # + a = 'Ciao mamma!' shapes = ['triangle', 'square', 'circle', 'hexagon'] for index, element in enumerate(shapes): print("L'elemento in posizione {} è {}".format(index, element)) print(a)#dentro il ciclo # + a = 'Ciao mamma!' shapes = ['triangle', 'square', 'circle', 'hexagon'] for index, element in enumerate(shapes):#prima indice, poi elemento print("L'elemento in posizione {} è {}".format(index, element)) print(a)#fuori dal ciclo # - # *Esercizio 4* # --- # # - creare una lista di numeri interi, float o misti # - iterare sui numeri della lista, stampandoli insieme al loro indice # - inserire in una lista di appoggio i quadrati dei numeri della lista # - stampare la nuova lista per verificarne contenuto # - come buona pratica, commentare il codice #FILL ME print(10**2)#operatore 'al quadrato' # Comprensioni di liste # --- # Come abbiamo visto nell'esercizio precedente, utilizzare un ciclo per costruire una lista a partire da un'altra è un po' macchinoso. # + names = ['francesco', 'elisa', 'alessandro', 'giovanni', '<NAME>'] capitalized_names = []#lista di appoggio for n in names: capitalized_names += [n.upper()]#riempo lista di appoggio print(capitalized_names) # - # Una *comprensione* esegue il ciclo in una riga, iterando sugli elementi della lista: # # [f(x) for x in list] # # In questo modo è possibile derivare nuove liste da una nota in modo comodo e compatto. E' uno dei costrutti base di Python più eleganti, ed è molto utilizzato. # + capitalized_names = [n.upper() for n in names] capitalized_names # + capitalized_initials = [n.upper()[0] for n in names] capitalized_initials # - # *Esercizio 5* # -- # Le comprensioni di liste funzionano anche con l'enumerazione, cioè con l'indicizzazione dei singoli elementi: # # [f(index, element) for index, element in enumerate(list)] # + indexed_names = [[i, n] for i, n in enumerate(names)] indexed_names # - # Ripetere l'enumerazione della cella precedente stampando i nomi in ordine alfabetico (una riga di codice!). indexed_sorted_names = #FILL ME indexed_sorted_names # *Esercizio 6* # --- # Ripetere l'esercizio 4 (senza la stampa) utilizzando una comprensione di lista. # + #FILL ME # - # <script> # $(document).ready(function(){ # $('div.back-to-top').hide(); # $('nav#menubar').hide(); # $('div.prompt').hide(); # $('.hidden-print').hide(); # }); # </script> # # <footer id="attribution" style="float:right; color:#999; background:#fff;"> # Created with Jupyter, delivered by Fastly, rendered by Rackspace. # </footer>
3. Liste, cicli e comprensioni.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: study # language: python # name: study # --- # + import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scipy.stats import norm import GPy from matplotlib.backends.backend_pdf import PdfPages # %matplotlib inline sns.set_style("white") import matplotlib matplotlib.rcParams["text.usetex"] = True plt.rcParams["font.size"] = 15 def f(x): res = ( x + 8 * np.exp(-0.5 * np.square(x - 3) / np.square(1)) + 5 * np.sin(2 * x) - 8 * np.exp(-0.5 * np.square(x - 3.8) / np.square(0.25)) + 1 * np.exp(-0.5 * np.square(x - 0.8) / np.square(0.4)) ) return res def f2(x): res = 3 - 40 * x + 38 * np.power(x, 2) - 11 * np.power(x, 3) + np.power(x, 4) return -res def straddle(mean, std, th): return 1.96 * std - np.abs(mean - th) # + np.random.seed(0) n_split = 200 x = np.linspace(-0.5, 5.5, n_split).reshape(-1, 1) y = f(x) obs_x = [] obs_y = [] obs_i = [] pdf = PdfPages("lse.pdf") q = [20, 190] # q = [10, 85] th = 7 for item in q: obs_x.append(x[item]) obs_y.append(y[item]) obs_i.append(item) for i in range(15): m = np.mean(y) gp = GPy.models.GPRegression(np.array(obs_x), np.array(obs_y) - m) gp.Gaussian_noise.constrain_fixed(1e-2) gp.rbf.variance.constrain_fixed(16) gp.rbf.lengthscale.constrain_fixed(0.5) # gp.optimize() # print(gp) mean, var = gp.predict(x) mean += m mean = mean.flatten() var = var.flatten() ci = 1.96 * np.sqrt(var) upper_i = np.where((mean - ci) > th) # print(upper_i) upper_range = [] upper_flag = False start = 0 for xi in np.arange(n_split): if np.any(upper_i == xi): if not upper_flag: start = xi upper_flag = True else: if upper_flag: upper_flag = False upper_range.append((start, xi)) else: upper_flag = False if upper_flag is True: upper_range.append((start, n_split - 1)) lower_i = np.where((mean + ci) < th) lower_range = [] lower_flag = False start = 0 for xi in np.arange(n_split): if np.any(lower_i == xi): if not lower_flag: start = xi lower_flag = True else: if lower_flag: lower_flag = False lower_range.append((start, xi)) else: lower_flag = False if lower_flag is True: lower_range.append((start, n_split - 1)) fig, ax = plt.subplots() ax.plot(x.flatten(), mean, label=r"$\mu(x)$", zorder=2, lw=2) ax.fill_between( x.flatten(), mean - ci, mean + ci, label=r"$\mu(x)\pm 1.96\sigma(x)$", alpha=0.3, zorder=2, ) ax.axhline(th, label=r"$\theta$", ls="--", c="tab:red", alpha=1, lw=2, zorder=1) ax.plot( x.flatten(), y.flatten(), c="black", ls="--", label=r"$f(x)$", zorder=1, lw=2.5 ) marker = "s" color = "white" ax.scatter( np.array(obs_x)[:-1], np.array(obs_y)[:-1], marker=marker, s=45, color=color, edgecolor="black", lw=1.5, zorder=3, ) if i == 0: color = "white" else: color = "gold" ax.scatter( np.array(obs_x)[-1], np.array(obs_y)[-1], marker="s", s=45, color=color, edgecolor="black", lw=1.5, zorder=3, ) for r in upper_range: ax.axvspan(x.flatten()[r[0]], x.flatten()[r[1]], alpha=0.2, color="tab:red") for r in lower_range: ax.axvspan(x.flatten()[r[0]], x.flatten()[r[1]], alpha=0.2, color="tab:green") ax.set_xlabel("$x$", fontsize=18) ax.set_ylabel("$f(x)$") ax.set_xlim(-0.7, 5.7) ax.set_ylim(-6, 14) ax.legend(borderaxespad=0, ncol=2, framealpha=0.7, fontsize=13, loc="lower right") ax.set_title("iteration {}".format(i + 1)) margin = 1 ac = straddle(mean[::margin], var[::margin], th) ac[np.array(obs_i, dtype=int) // margin] = 0 next_i = np.argmax(ac) * margin fig.tight_layout() pdf.savefig(fig) obs_x.append(x[next_i]) obs_y.append(y[next_i]) obs_i.append(next_i) print(gp) pdf.close() # -
LSE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## A visual representation of named colors # # I looked into word embeddings for my bachelor thesis and stumbled into a great GitHub Gist by [<NAME>](https://gist.github.com/aparrish/2f562e3737544cf29aaf1af30362f469) titled "Understanding word vectors". Her Gist is very educational, explaining everything from the ground up. However, the section about colors as vectors did not show the colors being discussed. Since I am a visual learner, I wanted to implement some way of plotting the colors to see them. I realized that this could be a handy tool in other circumstances, and I have extracted what I made and modified it to work without her Gists context. # ### Color basis # Let's start by loading the color information from <NAME>'s GitHub repository containing a [JSON file](https://github.com/dariusk/corpora/blob/master/data/colors/xkcd.json) with the xkcd color names and values. # + import urllib.request, json # read json data with urllib.request.urlopen("https://raw.githubusercontent.com/dariusk/corpora/master/data/colors/xkcd.json") as url: color_data = json.loads(url.read().decode()) # - # I want to make a dictionary that holds the hex, integer, and normalized integer values, so the first step is to create a function that converts hex to a tuple with RGB values. def hex_to_int(color): """ Converts hexcolor codes to tuple of integers. Args: color (str): hex color code. Returns: tuple: RGB values as integers. """ color = color.lstrip("#") return int(color[:2], 16), int(color[2:4], 16), int(color[4:6], 16) # Now, I am ready to define the new color dictionary, which holds all the mentioned values. # + # Define one dictionary with name as key colors: dict = {} for i in color_data["colors"]: temp = list(i.values()) # hex color as value val_hex = temp[1] # int (RGB 0-255) color as value val_int = hex_to_int(temp[1]) # normalized int (0-1) color as value val_norm = tuple([x / 255 for x in val_int]) # combine to dict colors[temp[0]] = {"hex": val_hex, "int": val_int, "norm": val_norm} # - # Let's see look at some results. # + print("Sample of 5 colors with hex values") names = [x[0] for x in list(colors.items())[0:5]] values = [colors[x]["hex"] for x in names] display(list(zip(names, values))) print("Sample of 5 colors with int values") names = [x[0] for x in list(colors.items())[0:5]] values = [colors[x]["int"] for x in names] display(list(zip(names, values))) print("Sample of 5 colors with normalized int values") names = [x[0] for x in list(colors.items())[0:5]] values = [colors[x]["norm"] for x in names] display(list(zip(names, values))) # - # Let's test if we can give a color name as input and get the values back. print("Test for the color 'red':") display(colors["red"]) # ### Making it visible # There was already an excellent function for plotting colors in the [Matplotlib documentation](https://matplotlib.org/3.1.0/gallery/color/named_colors.html), so I copied it and made some small changes to better suit my needs. # + import matplotlib.pyplot as plt import matplotlib.colors as mcolors def plot_colortable(colors, title="Colors", sort_colors=True, emptycols=0, title_size=18, text_size=14): cell_width = 212 cell_height = 22 swatch_width = 48 margin = 12 topmargin = 40 # Sort colors by hue, saturation, value and name. if sort_colors is True: by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgb(color))), name) for name, color in colors.items()) names = [name for hsv, name in by_hsv] else: names = list(colors) n = len(names) ncols = 4 - emptycols nrows = n // ncols + int(n % ncols > 0) width = cell_width * 4 + 2 * margin height = cell_height * nrows + margin + topmargin dpi = 72 fig, ax = plt.subplots(figsize=(width / dpi, height / dpi), dpi=dpi) fig.subplots_adjust(margin/width, margin/height, (width-margin)/width, (height-topmargin)/height) ax.set_xlim(0, cell_width * 4) ax.set_ylim(cell_height * (nrows-0.5), -cell_height/2.) ax.yaxis.set_visible(False) ax.xaxis.set_visible(False) ax.set_axis_off() ax.set_title(title, fontsize=title_size, loc="left", pad=10) for i, name in enumerate(names): row = i % nrows col = i // nrows y = row * cell_height swatch_start_x = cell_width * col swatch_end_x = cell_width * col + swatch_width text_pos_x = cell_width * col + swatch_width + 7 ax.text(text_pos_x, y, name, fontsize=text_size, horizontalalignment='left', verticalalignment='center') ax.hlines(y, swatch_start_x, swatch_end_x, color=colors[name], linewidth=18) return fig # - # Since a predefined function is used for plotting, a function that generates the needed input is defined. def make_selection_dict(names, color_index, val_type="hex"): """ Makes a dictionary for the selected colors and their values. Args: names (list): color names color_index (dict): All avaliable colors. val_type (str, optional): value return type. Defaults to "hex". Returns: [dict]: color names and values. """ value_list: list = [] # Makes a list of color values based on the input and desired return type. for i in names: value_list.append(color_index[i][val_type]) # Combines the names and values in a dictionary. return {k: v for k, v in zip(names, value_list)} # Let's make a list of colors and test that the new function returns "hex" values. # + color_selection = ["red", "green", "blue"] display(selection := make_selection_dict(color_selection, colors, "hex")) # - # Now the time to see the actual colors is here. plot_colortable(selection, sort_colors=False, emptycols=1); # ### Finding shades of a color # Allison's Gist had some functions that enabled us to find the n closest colors to our selection based on euclidean distance. I have combined some of her functions and made alterations to them to better suit my needs. def closest(color_index, color_val, n=10): """ Defines a list of n closest colors to the input color. Args: color_index (dict): All avaliable colors. color_val (dict): Base color. n (int, optional): Number of closest colors. Defaults to 10. Returns: list: Names of closest colors. """ from scipy.spatial.distance import euclidean closest = [] if isinstance(color_val, dict): for key in sorted(color_index.keys(), key=lambda x: euclidean(color_val["int"], color_index[x]["int"]))[:n]: closest.append(key) elif isinstance(color_val, list): for key in sorted( color_index.keys(), key=lambda x: euclidean(color_val, color_index[x]["int"]))[:n]: closest.append(key) return closest # Let's find the 6 closest colors to "red". color_selection = closest(colors, colors["red"], 6) selection = make_selection_dict(color_selection, colors, "hex") # <-- using hex plot_colortable(selection, emptycols=1); # Let's find the 6 closest colors to "green". color_selection = closest(colors, colors["green"], 6) selection = make_selection_dict(color_selection, colors, "norm") # <-- using norm plot_colortable(selection, emptycols=1); # Let's find the 12 closest colors to "pure blue", by using the RGB values. color_selection = closest(colors, [3, 6, 223], 12) selection = make_selection_dict(color_selection, colors, "hex") plot_colortable(selection, emptycols=1); # ### Playing with vectors # The following functions are copied as they were from the previously mentioned Gist since they do the intended job, and I don't see any need to alter them. # #### Subtract one color from another # Let's test subtracting "magenta" from "cyan". # + def subtractv(coord1, coord2): return [c1 - c2 for c1, c2 in zip(coord1, coord2)] # Have to use "int" in the subtractv function color_selection = closest(colors, subtractv(colors['magenta']["int"], colors['cyan']["int"]), 12) selection = make_selection_dict(color_selection, colors, "hex") plot_colortable(selection, emptycols=1); # - # #### Add one color to another # Let's test adding "royal" with "teal". # + def addv(coord1, coord2): return [c1 + c2 for c1, c2 in zip(coord1, coord2)] # Have to use "int" in the addv function color_selection = closest(colors, addv(colors['royal']["int"], colors['teal']["int"]), 12) selection = make_selection_dict(color_selection, colors, "hex") plot_colortable(selection, emptycols=1); # - # #### Find the average of a list # Let's test finding the average of black and white. # + def meanv(coords): # assumes every item in coords has same length as item 0 sumv = [0] * len(coords[0]) for item in coords: for i in range(len(item)): sumv[i] += item[i] mean = [0] * len(sumv) for i in range(len(sumv)): mean[i] = float(sumv[i]) / len(coords) return mean meanv([[0, 1], [2, 2], [4, 3]]) # Have to use "int" in the meanv function color_selection = closest(colors, meanv([colors['black']["int"], colors['white']["int"]]), 12) selection = make_selection_dict(color_selection, colors, "hex") plot_colortable(selection, emptycols=1); # - # #### Finding random colors # + import random color_selection = random.sample(colors.keys(), 12) selection = make_selection_dict(color_selection, colors, "hex") plot_colortable(selection, sort_colors=False, emptycols=1); # - # #### Every n color in range color_selection = [list(colors.keys())[x] for x in range(0, 37, 3)] selection = make_selection_dict(color_selection, colors, "hex") plot_colortable(selection, emptycols=1);
assets/notebooks/A visual representation of named colors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm import os import astropy.coordinates as coord import astropy.units as u from astropy.table import Table, QTable, hstack from myspace import MySpace from sklearn.mixture import GaussianMixture # + import sklearn import jax import numpy import scipy print('scikit-learn', sklearn.__version__) print('jax', jax.__version__) print('numpy', numpy.__version__) print('scipy', scipy.__version__) #Output: #scikit-learn 0.23.2 #jax 0.2.5 #numpy 1.19.1 #scipy 1.5.0 # - gaia = QTable.read('../data/RV-all-result.fits', format='fits') # + from zero_point import zpt zpt.load_tables() gmag = gaia['phot_g_mean_mag'].value nueffused = gaia['nu_eff_used_in_astrometry'].value psc = gaia['pseudocolour'].value sinbeta = np.sin(np.deg2rad(gaia['ecl_lat'].value)) soltype = gaia['astrometric_params_solved'] zpvals = zpt.get_zpt(gmag, nueffused, psc, sinbeta, soltype) cparallax=gaia['parallax'].value-zpvals # - qindx=(gaia['parallax_over_error']>4.) def make_anim_xv(XX,VV,tensorsx,myspacex,tensorsxv,myspacexv,gs=150): _cyl = gal.represent_as('cylindrical') mask2_r500 = (_cyl.rho < 500*u.pc) & (np.abs(_cyl.z) < 500*u.pc) & (_cyl.rho > 200*u.pc) disk_vmask2=(np.sqrt(VV[:,0]**2+VV[:,1]**2+VV[:,2]**2)<200.) rindx=(disk_vmask2)*(mask2_r500) for i in range(0,36): wedgedex=rindx*(_cyl.phi+np.pi*u.rad>(i*np.pi/18.)*u.rad)*(_cyl.phi+np.pi*u.rad<((i+3)*np.pi/18.)*u.rad) if i==34: wedgedex=rindx*(_cyl.phi+np.pi*u.rad>(i*np.pi/18.)*u.rad)*(_cyl.phi+np.pi*u.rad<((i+3)*np.pi/18.)*u.rad)+rindx*(_cyl.phi+np.pi*u.rad>0.)*(_cyl.phi+np.pi*u.rad<((1)*np.pi/18.)*u.rad) if i==35: wedgedex=rindx*(_cyl.phi+np.pi*u.rad>(i*np.pi/18.)*u.rad)*(_cyl.phi+np.pi*u.rad<((i+3)*np.pi/18.)*u.rad)+rindx*(_cyl.phi+np.pi*u.rad>0.)*(_cyl.phi+np.pi*u.rad<((2)*np.pi/18.)*u.rad) print(wedgedex.sum(),'stars in wedge',i) fixx=myspacex.get_model_v(tensorsx,VV[wedgedex],XX[wedgedex]) fixxv=myspacexv.get_model_v(tensorsxv,VV[wedgedex],XX[wedgedex]) f, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2, 2, figsize=(15,15)) ax1.hist2d(XX[:,0][wedgedex],XX[:,1][wedgedex],range=[[-500,500],[-500.,500.]],bins=gs,cmin=1.0e-50,rasterized=True,density=True) ax1.set_xlabel(r'$X\ (\mathrm{kpc})$',fontsize=20) ax1.set_ylabel(r'$Y\ (\mathrm{kpc})$',fontsize=20) ax1.set_xlim(-500.,500.) ax1.set_ylim(-500.,500.) ax1.set_title(r'$\mathrm{Selected\ area}$',fontsize=20) ax2.hist2d(VV[:,0][wedgedex],VV[:,1][wedgedex],range=[[-125,125],[-125,125]],bins=gs,cmin=1.0e-50,rasterized=True,density=True) ax2.set_xlabel(r'$v_X\ (\mathrm{km\ s}^{-1})$',fontsize=20) ax2.set_ylabel(r'$v_Y\ (\mathrm{km\ s}^{-1})$',fontsize=20) ax2.set_xlim(-125,125) ax2.set_ylim(-125,125) ax2.set_title(r'$\mathrm{No\ correction}$',fontsize=20) ax3.hist2d(fixx[:,0],fixx[:,1],range=[[-125,125],[-125,125]],bins=gs,cmin=1.0e-50,rasterized=True,density=True) ax3.set_xlabel(r'$v_X\ (\mathrm{km\ s}^{-1})$',fontsize=20) ax3.set_ylabel(r'$v_Y\ (\mathrm{km\ s}^{-1})$',fontsize=20) ax3.set_xlim(-125,125) ax3.set_ylim(-125,125) ax3.set_title(r'$\mathrm{x\ correction}$',fontsize=20) ax4.hist2d(fixxv[:,0],fixxv[:,1],range=[[-125,125],[-125,125]],bins=gs,cmin=1.0e-50,rasterized=True,density=True) ax4.set_xlabel(r'$v_X\ (\mathrm{km\ s}^{-1})$',fontsize=20) ax4.set_ylabel(r'$v_Y\ (\mathrm{km\ s}^{-1})$',fontsize=20) ax4.set_xlim(-125,125) ax4.set_ylim(-125,125) ax4.set_title(r'$\mathrm{xv\ correction}$',fontsize=20) ax1.tick_params(axis='both', which='major', labelsize=15) ax2.tick_params(axis='both', which='major', labelsize=15) ax3.tick_params(axis='both', which='major', labelsize=15) ax4.tick_params(axis='both', which='major', labelsize=15) plt.savefig('gaiao'+str("{:02d}".format(i))+'.pdf',bbox_inches='tight') plt.close() os.system('convert -delay 5 -loop 0 gaiao*.pdf orders.gif') def make_anim_justx(XX,VV,tensorsx,myspacex,gs=150): _cyl = gal.represent_as('cylindrical') mask2_r500 = (_cyl.rho < 500*u.pc) & (np.abs(_cyl.z) < 500*u.pc) & (_cyl.rho > 200*u.pc) disk_vmask2=(np.sqrt(VV[:,0]**2+VV[:,1]**2+VV[:,2]**2)<100.) rindx=(disk_vmask2)*(mask2_r500) for i in range(0,36): wedgedex=rindx*(_cyl.phi+np.pi*u.rad>(i*np.pi/18.)*u.rad)*(_cyl.phi+np.pi*u.rad<((i+3)*np.pi/18.)*u.rad) if i==34: wedgedex=rindx*(_cyl.phi+np.pi*u.rad>(i*np.pi/18.)*u.rad)*(_cyl.phi+np.pi*u.rad<((i+3)*np.pi/18.)*u.rad)+rindx*(_cyl.phi+np.pi*u.rad>0.)*(_cyl.phi+np.pi*u.rad<((1)*np.pi/18.)*u.rad) if i==35: wedgedex=rindx*(_cyl.phi+np.pi*u.rad>(i*np.pi/18.)*u.rad)*(_cyl.phi+np.pi*u.rad<((i+3)*np.pi/18.)*u.rad)+rindx*(_cyl.phi+np.pi*u.rad>0.)*(_cyl.phi+np.pi*u.rad<((2)*np.pi/18.)*u.rad) print(wedgedex.sum(),'stars in wedge',i) fixx=myspacex.get_model_v(tensorsx,VV[wedgedex],XX[wedgedex]) f, ((ax1,ax2,ax3)) = plt.subplots(1, 3, figsize=(21,7)) ax1.hist2d(XX[:,0][wedgedex],XX[:,1][wedgedex],range=[[-0.5,0.5],[-0.5,0.5]],bins=gs,cmin=1.0e-50,rasterized=True,density=True) ax1.set_xlabel(r'$X\ (\mathrm{kpc})$',fontsize=20) ax1.set_ylabel(r'$Y\ (\mathrm{kpc})$',fontsize=20) ax1.set_xlim(-0.5,0.5) ax1.set_ylim(-0.5,0.5) ax1.set_title(r'$\mathrm{Selected\ area}$',fontsize=20) ax2.hist2d(VV[:,0][wedgedex],VV[:,1][wedgedex],range=[[-125,125],[-125,125]],bins=gs,cmin=1.0e-50,rasterized=True,density=True) ax2.set_xlabel(r'$v_X\ (\mathrm{km\ s}^{-1})$',fontsize=20) #ax2.set_ylabel(r'$v_Y\ (\mathrm{km\ s}^{-1})$',fontsize=20) ax2.set_xlim(-125,125) ax2.set_ylim(-125,125) ax2.set_title(r'$\mathrm{No\ correction}$',fontsize=20) ax3.hist2d(fixx[:,0],fixx[:,1],range=[[-125,125],[-125,125]],bins=gs,cmin=1.0e-50,rasterized=True,density=True) ax3.set_xlabel(r'$v_X\ (\mathrm{km\ s}^{-1})$',fontsize=20) #ax3.set_ylabel(r'$v_Y\ (\mathrm{km\ s}^{-1})$',fontsize=20) ax3.set_xlim(-125,125) ax3.set_ylim(-125,125) ax3.set_title(r'$\mathrm{x\ correction}$',fontsize=20) ax1.tick_params(axis='both', which='major', labelsize=15) ax3.tick_params(axis='both', which='major', labelsize=15) plt.savefig('93-'+str("{:02d}".format(i))+'.pdf',bbox_inches='tight') plt.close() os.system('convert -delay 5 -loop 0 93-*.pdf justx.gif') # + c = coord.SkyCoord(ra=gaia['ra'][qindx],dec=gaia['dec'][qindx],distance=1./cparallax[qindx]*u.kpc,pm_ra_cosdec=gaia['pmra'][qindx],pm_dec=gaia['pmdec'][qindx],radial_velocity=gaia['radial_velocity'][qindx]) # - gal = c.galactic gal.set_representation_cls('cartesian') # + xyz = np.vstack((gal.u.to(u.kpc).value, gal.v.to(u.kpc).value, gal.w.to(u.kpc).value)).T UVW = np.vstack((gal.U.to(u.km/u.s).value, gal.V.to(u.km/u.s).value, gal.W.to(u.km/u.s).value)).T disk_vmask = np.linalg.norm(UVW, axis=1) < 150. # + XX=xyz VV=UVW dist2=np.sqrt(XX[:,0]**2+XX[:,1]**2) _cyl = gal.represent_as('cylindrical') mask_r100 = (_cyl.rho < 100*u.pc) & (np.abs(_cyl.z) < 150*u.pc) mask_r300 = (_cyl.rho < 300*u.pc) & (np.abs(_cyl.z) < 500*u.pc) mask_r500 = (_cyl.rho < 500*u.pc) & (np.abs(_cyl.z) < 500*u.pc) mask_r100.sum(), mask_r500.sum() #local_mask=(dist2<0.2)*(np.fabs(XX[:,2])<0.2) #train_mask=(dist2>0.2)*(dist2<0.5)*(np.fabs(XX[:,2])<0.5) local_v = UVW[disk_vmask & mask_r100] local_x = xyz[disk_vmask & mask_r100] print(local_v.shape) # - local_gmm = GaussianMixture(n_components=64) local_gmm.fit(local_v) # + # Just X # myspace = MySpace(local_gmm, terms=['x']) # - myspacexv = MySpace(local_gmm, terms=['x','xv']) # myspacexvx = MySpace(local_gmm, terms=['x','xv','xx']) # + train_v = UVW[disk_vmask & mask_r300] train_x = xyz[disk_vmask & mask_r300] test_v = UVW[disk_vmask & mask_r500] test_x = xyz[disk_vmask & mask_r500] local_v.shape, train_v.shape, test_v.shape # + # res, tensors = myspace.fit(train_x, train_v) # - resxv, tensorsxv = myspacexv.fit(train_x, train_v) # resxvx, tensorsxvx = myspacexvx.fit(train_x, train_v) print(tensors) def calculate_oort_constants(tensors): Aij=tensors['Aij'] A = 0.5 * (-1.*Aij[0, 1] + -1.*Aij[1, 0]) B = 0.5 * (-1.*Aij[1, 0] - -1.*Aij[0, 1]) K = 0.5 * (-1.*Aij[0, 0] + -1.*Aij[1, 1]) C = 0.5 * (-1.*Aij[0, 0] - -1.*Aij[1, 1]) print('A=',A,'B=',B,'C=',C,'K=',K) return(A,B,C,K) calculate_oort_constants(tensors) fixx=myspace.get_model_v(tensors,test_v,test_x) fixxv=myspacexv.get_model_v(tensorsxv,test_v,test_x) fixxvx=myspacexvx.get_model_v(tensorsxvx,test_v,test_x) f, ((ax1,ax2)) = plt.subplots(1, 2, figsize=(10,5)) gs=200 ax1.hexbin(test_v[:,0],test_v[:,1],extent=[-125,125,-125,125],mincnt=1,rasterized=True,gridsize=gs) ax1.set_title('Uncorrected',fontsize=20) ax1.set_xlabel('vx (km/s)',fontsize=20) ax1.set_ylabel('vy (km/s)',fontsize=20) ax1.set_xlim(-125,125) ax1.set_ylim(-125,125) ax2.hexbin(fixx[:,0],fixx[:,1],extent=[-125,125,-125,125],mincnt=1,rasterized=True,gridsize=gs) ax2.set_title('x corrected',fontsize=20) ax2.set_xlabel('vx (km/s)',fontsize=20) ax2.set_xlim(-125,125) ax2.set_ylim(-125,125) plt.show() make_anim_justx(XX,VV,tensors,myspace) make_anim_xv(XX,VV,tensors,myspace,tensorsxv,myspacexv) f, ((ax1,ax2,ax3,ax4)) = plt.subplots(1, 4, figsize=(20,5)) gs=200 ax1.hexbin(test_v[:,0],test_v[:,1],extent=[-125,125,-125,125],mincnt=1,rasterized=True,gridsize=gs) ax1.set_title('Uncorrected',fontsize=20) ax1.set_xlabel('vx (km/s)',fontsize=20) ax1.set_ylabel('vy (km/s)',fontsize=20) ax1.set_xlim(-125,125) ax1.set_ylim(-125,125) ax2.hexbin(fixx[:,0],fixx[:,1],extent=[-125,125,-125,125],mincnt=1,rasterized=True,gridsize=gs) ax2.set_title('x corrected',fontsize=20) ax2.set_xlabel('vx (km/s)',fontsize=20) ax2.set_xlim(-125,125) ax2.set_ylim(-125,125) ax3.hexbin(fixxv[:,0],fixxv[:,1],extent=[-125,125,-125,125],mincnt=1,rasterized=True,gridsize=gs) ax3.set_title('xv corrected',fontsize=20) ax3.set_xlabel('vx (km/s)',fontsize=20) ax3.set_xlim(-125,125) ax3.set_ylim(-125,125) ax4.hexbin(fixxvx[:,0],fixxvx[:,1],extent=[-125,125,-125,125],mincnt=1,rasterized=True,gridsize=gs) ax4.set_title('xvxx corrected',fontsize=20) ax4.set_xlabel('vx (km/s)',fontsize=20) ax4.set_xlim(-125,125) ax4.set_ylim(-125,125) #plt.savefig('comp64.pdf',bbox_inches='tight') plt.show()
notebooks/ForHogg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/zulkernine/MachineLearning/blob/master/HandWrittenDigitClassification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="QS7l0-8U89Qg" colab_type="text" # Load the Dataset from MNIST # + id="ww1d3laj74Dq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="0ae5b1e5-3b89-4217-843b-9d62069384b5" # %tensorflow_version 2.x from tensorflow import keras from keras.datasets import mnist #import keras keras.__version__ # + id="1RH_S7zz-uIG" colab_type="code" colab={} (train_images,train_labels),(test_images,test_labels) = mnist.load_data() # + [markdown] id="Qa81Kym3GzNw" colab_type="text" # Visualise the data # + id="IF4IyCfrG2Ql" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="c030c309-75c7-42ff-d0b4-01b815108cf4" import matplotlib.pyplot as plt plt.figure() plt.imshow(train_images[16]) plt.colorbar() plt.grid(False) plt.show() # + [markdown] id="1Oqxwcma_EYa" colab_type="text" # Implement the Network # + id="mPXIc_O8_HBb" colab_type="code" colab={} # from keras import models # from keras import layers network = keras.Sequential() network.add(keras.layers.Dense(512,activation='relu', input_shape=(28*28,))) network.add(keras.layers.Dense(10,activation='softmax')) # + [markdown] id="YvMf318IABwv" colab_type="text" # Compilation Step # + id="zNp9cS4PAE1s" colab_type="code" colab={} network.compile( optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'] ) # + [markdown] id="uHBg4CyaAoFF" colab_type="text" # Preparing image data # + id="vqbyBWW_Altk" colab_type="code" colab={} train_images = train_images.reshape((60000,28*28)) train_images = train_images.astype('float32')/255 test_images = test_images.reshape((10000,28*28)) test_images = test_images.astype('float32')/255 # + [markdown] id="08W3oP7hBLTO" colab_type="text" # Preparing the labels # + id="0g2Np930BOAZ" colab_type="code" colab={} from keras.utils import to_categorical train_labels = to_categorical(train_labels) test_labels = to_categorical(test_labels) # + [markdown] id="nkvnp-V-BlKo" colab_type="text" # Fit the data # + id="NFqldHszBmsz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="0a294932-ca10-4df0-9cdc-30e4f12e02a3" network.fit(train_images,train_labels,epochs=10,batch_size=128) # + [markdown] id="bdQjUmzQCV0j" colab_type="text" # Test the network on test images # + id="5hq5t5wjCZ4w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="02d0d83d-6580-4834-b787-f91589cede0f" test_loss, test_acc = network.evaluate(test_images, test_labels) print('test accuracy : ',test_acc) # + [markdown] id="sJ1b2MXfGAYQ" colab_type="text" # Visualise the data
HandWrittenDigitClassification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plots for the Poster # ## Comparing Average Precisions Across Models iou = 0.5 inFile = '../../5_evaluate/output/mAP.txt' # + import matplotlib.pyplot as plt import numpy as np import itertools import json # Load Data data = [] with open(inFile) as f: for line in f: j_content = json.loads(line) data.append(j_content) # - # extract relevent data data_iou = [x for x in data if x['iouThresh']==iou] # + noaug = [(x['mAP'], x['banana treeAP'], x['coconut treeAP']) for x in data_iou if x['model']=='noaugment30000'] noaug = list(itertools.chain(*noaug)) # Flattens List print(noaug) basic = [(x['mAP'], x['banana treeAP'], x['coconut treeAP']) for x in data_iou if x['model']=='basic30000'] basic = list(itertools.chain(*basic)) # Flattens Lis print(basic) frz22 = [(x['mAP'], x['banana treeAP'], x['coconut treeAP']) for x in data_iou if x['model']=='22froze30000'] frz22 = list(itertools.chain(*frz22)) # Flattens List print(frz22) frz13 = [(x['mAP'], x['banana treeAP'], x['coconut treeAP']) for x in data_iou if x['model']=='13froze30000'] frz13 = list(itertools.chain(*frz13)) # Flattens List print(frz13) balan = [(x['mAP'], x['banana treeAP'], x['coconut treeAP']) for x in data_iou if x['model']=='balanced30000'] balan = list(itertools.chain(*balan)) # Flattens List print(balan) allv2 = [(x['mAP'], x['banana treeAP'], x['coconut treeAP']) for x in data_iou if x['model']=='allv2-20000'] allv2 = list(itertools.chain(*allv2)) # Flattens List print(allv2) ancho = [(x['mAP'], x['banana treeAP'], x['coconut treeAP']) for x in data_iou if x['model']=='anchors30000'] ancho = list(itertools.chain(*ancho)) # Flattens List print(allv2) # + # Plot fig, ax = plt.subplots(ncols=1, figsize=(30,20)) ind = np.arange(len(basic)) width = 0.09 colours = ['#4d4d4d', '#cd2323', '#4d4d4d', '#4d4d4d'] p1 = ax.bar(ind, noaug, width, color=colours[0], alpha=1) p2 = ax.bar(ind+width+0.01, basic, width, color=colours[1], alpha=1) p3 = ax.bar(ind+2*(width+0.01), frz13, width, color=colours[3], alpha=0.8) p4 = ax.bar(ind+3*(width+0.01), frz22, width, color=colours[2], alpha=0.6) p5 = ax.bar(ind+4*(width+0.01), balan, width, color=colours[3], alpha=0.4) p6 = ax.bar(ind+5*(width+0.01), allv2, width, color=colours[3], alpha=0.2) # Adjust spines ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['bottom'].set_visible(False) # Adjust Ticks ax.tick_params(axis='y', direction='out', color='#4d4d4d', which='both', labelcolor='#4d4d4d', labelsize=24) ax.tick_params(axis='x', color='white', labelcolor='#4d4d4d', labelsize=24) plt.yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]) plt.xticks([0.25, 1.25, 2.25], ['mAP over Bananas and Coconuts', 'Banana Trees', 'Coconut Trees'],fontsize=24) # Adjust Grid ax.grid(axis='y', color='white', linewidth=1) ax.set_ylim(0, 0.8) # Add text to plot ax.set_ylabel('Average Precision', color='#4d4d4d', size=30, labelpad=30) # Add Legend ax.legend((p1[0], p2[0], p3[0], p4[0], p5[0], p6[0]), ('No Augmentation', 'No Pretraining', 'Pretrained (22 Frozen Layers)', "Pretrained (13 Frozen Layers)", 'Balanced Dataset', 'Detecting Seven Classes'), loc=9, ncol=2, frameon=False, borderpad=0, prop={'size': 25}) # Add Title ax.set_title('Comparing Model Performance', size=36, color='#4d4d4d') plt.savefig('../output/plot.png') # - # ## Plotting Increasing mAP in iterative unfreezing process # lbl = [(x['mAP'], x['banana treeAP'], x['coconut treeAP']) for x in data_iou if x['model'].startswith('lbl')] #lbl = list(itertools.chain(*lbl)) # Flattens List print(lbl) # + # Plot fig, ax = plt.subplots(ncols=1, figsize=(30,20)) colours = ['#000000','#ffff00', '#996600'] ind = np.arange(len(lbl)) print(ind) # + p1 = ax.plot(ind, [x[0] for x in lbl], linewidth=6, color= colours[0]) #mAP p2 = ax.plot(ind, [x[1] for x in lbl], linewidth=6, color= colours[1]) #bAP p3 = ax.plot(ind, [x[2] for x in lbl], linewidth=6, color= colours[2]) #cAP # Add text to plot ax.set_ylabel('Average Precision', color=colours[0], size=30, labelpad=30) ax.set_xlabel('Unfrozen Layers (out of 23 convolutional layers)', color=colours[0], size=30, labelpad=30) # Adjust Ticks ax.tick_params(axis='y',labelsize=24) ax.tick_params(axis='x', color='black', labelcolor='#4d4d4d', labelsize=24) plt.xticks(ind,['1','3','4','5','6','7','8','9']) # Add Legend ax.legend((p3[0], p1[0], p2[0]), ('Coconut AP', 'mAP', 'Banana AP', ), loc=2, ncol=1, frameon=False, borderpad=0, prop={'size': 25}) # Add Title ax.set_title('How Average Precision Increases as Layers are Unfrozen', size=36, color='#4d4d4d') plt.savefig('output/plot2.png', bbox_inches='tight') # -
InteractiveTools/Report Visualizations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dynamic Programming # # In this notebook, you will write your own implementations of many classical dynamic programming algorithms. # # While we have provided some starter code, you are welcome to erase these hints and write your code from scratch. # # --- # # ### Part 0: Explore FrozenLakeEnv # # We begin by importing the necessary packages. # + import numpy as np import copy import check_test from frozenlake import FrozenLakeEnv from plot_utils import plot_values # - # Use the code cell below to create an instance of the [FrozenLake](https://github.com/openai/gym/blob/master/gym/envs/toy_text/frozen_lake.py) environment. env = FrozenLakeEnv() # The agent moves through a $4 \times 4$ gridworld, with states numbered as follows: # ``` # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11] # [12 13 14 15]] # ``` # and the agent has 4 potential actions: # ``` # LEFT = 0 # DOWN = 1 # RIGHT = 2 # UP = 3 # ``` # # Thus, $\mathcal{S}^+ = \{0, 1, \ldots, 15\}$, and $\mathcal{A} = \{0, 1, 2, 3\}$. Verify this by running the code cell below. # + # print the state space and action space print(env.observation_space) print(env.action_space) # print the total number of states and actions print(env.nS) print(env.nA) # - # Dynamic programming assumes that the agent has full knowledge of the MDP. We have already amended the `frozenlake.py` file to make the one-step dynamics accessible to the agent. # # Execute the code cell below to return the one-step dynamics corresponding to a particular state and action. In particular, `env.P[1][0]` returns the the probability of each possible reward and next state, if the agent is in state 1 of the gridworld and decides to go left. env.P[1][0] # Each entry takes the form # ``` # prob, next_state, reward, done # ``` # where: # - `prob` details the conditional probability of the corresponding (`next_state`, `reward`) pair, and # - `done` is `True` if the `next_state` is a terminal state, and otherwise `False`. # # Thus, we can interpret `env.P[1][0]` as follows: # $$ # \mathbb{P}(S_{t+1}=s',R_{t+1}=r|S_t=1,A_t=0) = \begin{cases} # \frac{1}{3} \text{ if } s'=1, r=0\\ # \frac{1}{3} \text{ if } s'=0, r=0\\ # \frac{1}{3} \text{ if } s'=5, r=0\\ # 0 \text{ else} # \end{cases} # $$ # # To understand the value of `env.P[1][0]`, note that when you create a FrozenLake environment, it takes as an (optional) argument `is_slippery`, which defaults to `True`. # # To see this, change the first line in the notebook from `env = FrozenLakeEnv()` to `env = FrozenLakeEnv(is_slippery=False)`. Then, when you check `env.P[1][0]`, it should look like what you expect (i.e., `env.P[1][0] = [(1.0, 0, 0.0, False)]`). # # The default value for the `is_slippery` argument is `True`, and so `env = FrozenLakeEnv()` is equivalent to `env = FrozenLakeEnv(is_slippery=True)`. In the event that `is_slippery=True`, you see that this can result in the agent moving in a direction that it did not intend (where the idea is that the ground is *slippery*, and so the agent can slide to a location other than the one it wanted). # # Feel free to change the code cell above to explore how the environment behaves in response to other (state, action) pairs. # # Before proceeding to the next part, make sure that you set `is_slippery=True`, so that your implementations below will work with the slippery environment! # ### Part 1: Iterative Policy Evaluation # # In this section, you will write your own implementation of iterative policy evaluation. # # Your algorithm should accept four arguments as **input**: # - `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics. # - `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy. # - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). # - `theta`: This is a very small positive number that is used to decide if the estimate has sufficiently converged to the true value function (default value: `1e-8`). # # The algorithm returns as **output**: # - `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s` under the input policy. # # Please complete the function in the code cell below. def policy_evaluation(env, policy, gamma=1, theta=1e-8): V = np.zeros(env.nS) ## TODO: complete the function return V # We will evaluate the equiprobable random policy $\pi$, where $\pi(a|s) = \frac{1}{|\mathcal{A}(s)|}$ for all $s\in\mathcal{S}$ and $a\in\mathcal{A}(s)$. # # Use the code cell below to specify this policy in the variable `random_policy`. random_policy = np.ones([env.nS, env.nA]) / env.nA # Run the next code cell to evaluate the equiprobable random policy and visualize the output. The state-value function has been reshaped to match the shape of the gridworld. # + # evaluate the policy V = policy_evaluation(env, random_policy) plot_values(V) # - # Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! # # **Note:** In order to ensure accurate results, make sure that your `policy_evaluation` function satisfies the requirements outlined above (with four inputs, a single output, and with the default values of the input arguments unchanged). check_test.run_check('policy_evaluation_check', policy_evaluation) # ### Part 2: Obtain $q_\pi$ from $v_\pi$ # # In this section, you will write a function that takes the state-value function estimate as input, along with some state $s\in\mathcal{S}$. It returns the **row in the action-value function** corresponding to the input state $s\in\mathcal{S}$. That is, your function should accept as input both $v_\pi$ and $s$, and return $q_\pi(s,a)$ for all $a\in\mathcal{A}(s)$. # # Your algorithm should accept four arguments as **input**: # - `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics. # - `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`. # - `s`: This is an integer corresponding to a state in the environment. It should be a value between `0` and `(env.nS)-1`, inclusive. # - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). # # The algorithm returns as **output**: # - `q`: This is a 1D numpy array with `q.shape[0]` equal to the number of actions (`env.nA`). `q[a]` contains the (estimated) value of state `s` and action `a`. # # Please complete the function in the code cell below. def q_from_v(env, V, s, gamma=1): q = np.zeros(env.nA) ## TODO: complete the function return q # Run the code cell below to print the action-value function corresponding to the above state-value function. Q = np.zeros([env.nS, env.nA]) for s in range(env.nS): Q[s] = q_from_v(env, V, s) print("Action-Value Function:") print(Q) # Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! # # **Note:** In order to ensure accurate results, make sure that the `q_from_v` function satisfies the requirements outlined above (with four inputs, a single output, and with the default values of the input arguments unchanged). check_test.run_check('q_from_v_check', q_from_v) # ### Part 3: Policy Improvement # # In this section, you will write your own implementation of policy improvement. # # Your algorithm should accept three arguments as **input**: # - `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics. # - `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`. # - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). # # The algorithm returns as **output**: # - `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy. # # Please complete the function in the code cell below. You are encouraged to use the `q_from_v` function you implemented above. def policy_improvement(env, V, gamma=1): policy = np.zeros([env.nS, env.nA]) / env.nA ## TODO: complete the function return policy # Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! # # **Note:** In order to ensure accurate results, make sure that the `policy_improvement` function satisfies the requirements outlined above (with three inputs, a single output, and with the default values of the input arguments unchanged). # # Before moving on to the next part of the notebook, you are strongly encouraged to check out the solution in **Dynamic_Programming_Solution.ipynb**. There are many correct ways to approach this function! check_test.run_check('policy_improvement_check', policy_improvement) # ### Part 4: Policy Iteration # # In this section, you will write your own implementation of policy iteration. The algorithm returns the optimal policy, along with its corresponding state-value function. # # Your algorithm should accept three arguments as **input**: # - `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics. # - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). # - `theta`: This is a very small positive number that is used to decide if the policy evaluation step has sufficiently converged to the true value function (default value: `1e-8`). # # The algorithm returns as **output**: # - `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy. # - `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`. # # Please complete the function in the code cell below. You are strongly encouraged to use the `policy_evaluation` and `policy_improvement` functions you implemented above. def policy_iteration(env, gamma=1, theta=1e-8): policy = np.ones([env.nS, env.nA]) / env.nA ## TODO: complete the function return policy, V # Run the next code cell to solve the MDP and visualize the output. The optimal state-value function has been reshaped to match the shape of the gridworld. # # **Compare the optimal state-value function to the state-value function from Part 1 of this notebook**. _Is the optimal state-value function consistently greater than or equal to the state-value function for the equiprobable random policy?_ # + # obtain the optimal policy and optimal state-value function policy_pi, V_pi = policy_iteration(env) # print the optimal policy print("\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):") print(policy_pi,"\n") plot_values(V_pi) # - # Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! # # **Note:** In order to ensure accurate results, make sure that the `policy_iteration` function satisfies the requirements outlined above (with three inputs, two outputs, and with the default values of the input arguments unchanged). check_test.run_check('policy_iteration_check', policy_iteration) # ### Part 5: Truncated Policy Iteration # # In this section, you will write your own implementation of truncated policy iteration. # # You will begin by implementing truncated policy evaluation. Your algorithm should accept five arguments as **input**: # - `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics. # - `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy. # - `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`. # - `max_it`: This is a positive integer that corresponds to the number of sweeps through the state space (default value: `1`). # - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). # # The algorithm returns as **output**: # - `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`. # # Please complete the function in the code cell below. def truncated_policy_evaluation(env, policy, V, max_it=1, gamma=1): ## TODO: complete the function return V # Next, you will implement truncated policy iteration. Your algorithm should accept five arguments as **input**: # - `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics. # - `max_it`: This is a positive integer that corresponds to the number of sweeps through the state space (default value: `1`). # - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). # - `theta`: This is a very small positive number that is used for the stopping criterion (default value: `1e-8`). # # The algorithm returns as **output**: # - `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy. # - `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`. # # Please complete the function in the code cell below. def truncated_policy_iteration(env, max_it=1, gamma=1, theta=1e-8): V = np.zeros(env.nS) policy = np.zeros([env.nS, env.nA]) / env.nA ## TODO: complete the function return policy, V # Run the next code cell to solve the MDP and visualize the output. The state-value function has been reshaped to match the shape of the gridworld. # # Play with the value of the `max_it` argument. Do you always end with the optimal state-value function? # + policy_tpi, V_tpi = truncated_policy_iteration(env, max_it=2) # print the optimal policy print("\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):") print(policy_tpi,"\n") # plot the optimal state-value function plot_values(V_tpi) # - # Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! # # **Note:** In order to ensure accurate results, make sure that the `truncated_policy_iteration` function satisfies the requirements outlined above (with four inputs, two outputs, and with the default values of the input arguments unchanged). check_test.run_check('truncated_policy_iteration_check', truncated_policy_iteration) # ### Part 6: Value Iteration # # In this section, you will write your own implementation of value iteration. # # Your algorithm should accept three arguments as input: # - `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics. # - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). # - `theta`: This is a very small positive number that is used for the stopping criterion (default value: `1e-8`). # # The algorithm returns as **output**: # - `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy. # - `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`. def value_iteration(env, gamma=1, theta=1e-8): V = np.zeros(env.nS) ## TODO: complete the function return policy, V # Use the next code cell to solve the MDP and visualize the output. The state-value function has been reshaped to match the shape of the gridworld. # + policy_vi, V_vi = value_iteration(env) # print the optimal policy print("\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):") print(policy_vi,"\n") # plot the optimal state-value function plot_values(V_vi) # - # Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! # # **Note:** In order to ensure accurate results, make sure that the `value_iteration` function satisfies the requirements outlined above (with three inputs, two outputs, and with the default values of the input arguments unchanged). check_test.run_check('value_iteration_check', value_iteration)
dynamic-programming/Dynamic_Programming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Integração Python e ArcGIS # # ### O que é? # # - É na verdade uma API que o ArcGIS fornece para usar o Python para acessar as funcionalidades do ArcGIS # - O ArcGIS consegue criar mapas interativos e com vários tipos de dados dinâmicos # # ### Como vamos usar? # # Consultando a API o tempo todo, afinal, tem tudo pronto na API do ArcGIS para a gente, então não precisamos decorar nada. # # Link da API: https://developers.arcgis.com/python/guide/install-and-set-up/ # # ### O que precisamos? # # Como é uma API que exige conta, deveremos criar a nossa conta e pegar as informações de login necessárias para a API. Além disso, o ArcGIS é um programa pago, mas fornece algumas funcionalidades gratuitas em teste, o que usaremos para aprender. # # ### Problema: o ArcGIS não funciona com todas as versões do Python, vamos precisar usar a 3.6 # # - Para isso, vamos criar um ambiente virtual com o Python 3.6 # - Ative o ambiente virtual # - Instale o jupyter: pip install jupyter # - Instale o ArcGIS: conda install -c esri arcgis # - Digite: jupyter notebook # 2. Conectar a API
primeiros-passos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="FYAZ2nct-C2l" # # 머신 러닝 교과서 3판 # + [markdown] id="g-4KQdk2-C2n" # # 18장 - 강화 학습으로 복잡한 환경에서 의사 결정하기 # + [markdown] id="iKN7i0HT-C2n" # **아래 링크를 통해 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.** # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://nbviewer.jupyter.org/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch18/ch18.ipynb"><img src="https://jupyter.org/assets/main-logo.svg" width="28" />주피터 노트북 뷰어로 보기</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch18/ch18.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> # </td> # </table> # + [markdown] id="EOCPtjVK-C2n" # ### 목차 # + [markdown] id="oVuwjmWN-C2o" # - 경험에서 배웁니다 # - 강화 학습 이해하기 # - 강화 학습 시스템의 에이전트-환경 인터페이스 정의하기 # - 강화 학습의 기초 이론 # - 마르코프 결정 과정 # - 마르코프 결정 과정의 수학 공식 # - 마르코프 과정 시각화 # - 에피소드 작업 대 연속적인 작업 # - 강화 학습 용어: 대가, 정책, 가치 함수 # - 대가 # - 정책 # - 가치 함수 # - 벨먼 방정식을 사용한 동적 계획법 # - 강화 학습 알고리즘 # - 동적 계획법 # - 정책 평가 - 동적 계획법으로 가치 함수 예측하기 # - 추정된 가치 함수로 정책 향상시키기 # - 정책 반복 # - 가치 반복 # - 몬테 카를로를 사용한 강화 학습 # - MC를 사용한 상태-가치 함수 추정 # - MC를 사용한 행동-가치 함수 추정 # - MC 제어를 사용해 최적의 정책 찾기 # - 정책 향상 - 행동-가치 함수로부터 그리디 정책 계산하기 # - 시간차 학습 # - TD 예측 # - 온-폴리시 TD 제어 (SARSA) # - 오프-폴리시 TD 제어 (Q-러닝) # - 첫 번째 강화 학습 알고리즘 구현하기 # - OpenAI 짐 툴킷 소개 # - OpenAI 짐에 포함된 환경 사용하기 # - 그리드 월드 # - OpenAI 짐에서 그리드 월드 환경 구현하기 # - Q-러닝으로 그리드 월드 문제 풀기 # - Q-러닝 알고리즘 구현하기 # - 심층 Q-러닝 # - Q-러닝 알고리즘에 따라 DQN 모델 훈련하기 # - 재생 메모리 # - 손실 계산을 위해 타깃 가치 결정하기 # - 심층 Q-러닝 알고리즘 구현 # - 전체 요약 # + id="aW0h86w9-C2o" from IPython.display import Image # + [markdown] id="BjkzSp0e-C2o" # # 경험에서 배웁니다 # # ## 강화 학습 이해하기 # # ## 강화 학습 시스템의 에이전트-환경 인터페이스 정의하기 # + id="he9yqXQY-C2p" outputId="6b540c74-58a8-48c0-d422-a1ab43c81865" colab={"base_uri": "https://localhost:8080/", "height": 446} Image(url='https://git.io/JtTQo', width=700) # + [markdown] id="ULH0eSrQ-C2p" # # 강화 학습의 기초 이론 # # ## 마르코프 결정 과정 # # ## 마르코프 결정 과정의 수학 공식 # + id="QXU39HEE-C2q" outputId="0e689094-398b-42ea-f35f-4cc5faa3e11e" colab={"base_uri": "https://localhost:8080/", "height": 473} Image(url='https://git.io/JtTQi', width=700) # + [markdown] id="wVy6MQKE-C2q" # ### 마르코프 과정 시각화 # + id="R4u_4d6w-C2q" outputId="fc2ff59b-e8ab-4f15-8ef9-1c0abbd8f5c4" colab={"base_uri": "https://localhost:8080/", "height": 276} Image(url='https://git.io/JtTQP', width=700) # + [markdown] id="xN6MVEs3-C2q" # ### 에피소드 작업 대 연속적인 작업 # + [markdown] id="FdueKfC5-C2r" # ## 강화 학습 용어: 대가, 정책, 가치 함수 # # ### 대가 # + id="k0i6lJUZ-C2r" outputId="28f407c1-e3e3-4692-e73b-5be7d7c17748" colab={"base_uri": "https://localhost:8080/", "height": 421} # 할인 계수에 대하여 Image(url='https://git.io/Jtkcl', width=700) # + [markdown] id="BS8vmyz7-C2r" # ### 정책 # # ### 가치 함수 # + [markdown] id="jzQGkggx-C2r" # ## 벨먼 방정식을 사용한 동적 계획법 # + [markdown] id="mOwyDK-f-C2r" # # 강화 학습 알고리즘 # + id="fXKrwBqo-C2r" outputId="fa05f895-622c-4750-f57b-bf47e2ba64b0" colab={"base_uri": "https://localhost:8080/", "height": 236} Image(url='https://git.io/Jtkc4', width=700) # + [markdown] id="diXYfr-1-C2s" # ## 동적 계획법 # # ### 정책 평가 - 동적 계획법으로 가치 함수 예측하기 # # ### 추정된 가치 함수로 정책 향상시키기 # # ### 정책 반복 # # ### 가치 반복 # + [markdown] id="f8zZTfDr-C2s" # ## 몬테 카를로를 사용한 강화 학습 # # ### MC를 사용한 상태-가치 함수 추정 # # ### MC를 사용한 행동-가치 함수 추정 # # ### MC 제어를 사용해 최적의 정책 찾기 # # ### 정책 향상 - 행동-가치 함수로부터 그리디 정책 계산하기 # + [markdown] id="MYHcYa2n-C2s" # ## 시간차 학습 # # ### TD 예측 # # ### 온-폴리시 TD 제어 (SARSA) # # ### 오프-폴리시 TD 제어 (Q-러닝) # + [markdown] id="6r2LUtd1-C2s" # # 첫 번째 강화 학습 알고리즘 구현하기 # # ## OpenAI 짐 툴킷 소개 # # ### OpenAI 짐에 포함된 환경 사용하기 # + id="_hD-JWTu-C2s" outputId="8a653ed4-832b-4e91-ca9a-ba8796429110" colab={"base_uri": "https://localhost:8080/", "height": 203} Image(url='https://git.io/JtkcB', width=800) # + [markdown] id="LeF3J9WX-C2s" # ### 그리드 월드 # + id="YxGS_WO4-C2s" outputId="ca2acb50-9a3e-4a2c-ea94-2b85e8957052" colab={"base_uri": "https://localhost:8080/", "height": 303} Image(url='https://git.io/Jtkc0', width=800) # + [markdown] id="SIehs9EQ-C2t" # ### OpenAI 짐에서 그리드 월드 환경 구현하기 # + [markdown] id="0YCt4M_g-C2t" # ```python # # 스크립트: gridworld_env.py # # import numpy as np # from gym.envs.toy_text import discrete # from collections import defaultdict # import time # import pickle # import os # # from gym.envs.classic_control import rendering # # CELL_SIZE = 100 # MARGIN = 10 # # # def get_coords(row, col, loc='center'): # xc = (col + 1.5) * CELL_SIZE # yc = (row + 1.5) * CELL_SIZE # if loc == 'center': # return xc, yc # elif loc == 'interior_corners': # half_size = CELL_SIZE//2 - MARGIN # xl, xr = xc - half_size, xc + half_size # yt, yb = xc - half_size, xc + half_size # return [(xl, yt), (xr, yt), (xr, yb), (xl, yb)] # elif loc == 'interior_triangle': # x1, y1 = xc, yc + CELL_SIZE//3 # x2, y2 = xc + CELL_SIZE//3, yc - CELL_SIZE//3 # x3, y3 = xc - CELL_SIZE//3, yc - CELL_SIZE//3 # return [(x1, y1), (x2, y2), (x3, y3)] # # # def draw_object(coords_list): # if len(coords_list) == 1: # -> 원 # obj = rendering.make_circle(int(0.45*CELL_SIZE)) # obj_transform = rendering.Transform() # obj.add_attr(obj_transform) # obj_transform.set_translation(*coords_list[0]) # obj.set_color(0.2, 0.2, 0.2) # -> 검정 # elif len(coords_list) == 3: # -> 삼각형 # obj = rendering.FilledPolygon(coords_list) # obj.set_color(0.9, 0.6, 0.2) # -> 노랑 # elif len(coords_list) > 3: # -> 다각형 # obj = rendering.FilledPolygon(coords_list) # obj.set_color(0.4, 0.4, 0.8) # -> 파랑 # return obj # # # class GridWorldEnv(discrete.DiscreteEnv): # def __init__(self, num_rows=4, num_cols=6, delay=0.05): # self.num_rows = num_rows # self.num_cols = num_cols # # self.delay = delay # # move_up = lambda row, col: (max(row - 1, 0), col) # move_down = lambda row, col: (min(row + 1, num_rows - 1), col) # move_left = lambda row, col: (row, max(col - 1, 0)) # move_right = lambda row, col: (row, min(col + 1, num_cols - 1)) # # self.action_defs = {0: move_up, 1: move_right, # 2: move_down, 3: move_left} # # # 상태와 행동 개수 # nS = num_cols * num_rows # nA = len(self.action_defs) # self.grid2state_dict = {(s // num_cols, s % num_cols): s # for s in range(nS)} # self.state2grid_dict = {s: (s // num_cols, s % num_cols) # for s in range(nS)} # # # 골드 상태 # gold_cell = (num_rows // 2, num_cols - 2) # # # 함정 상태 # trap_cells = [((gold_cell[0] + 1), gold_cell[1]), # (gold_cell[0], gold_cell[1] - 1), # ((gold_cell[0] - 1), gold_cell[1])] # # gold_state = self.grid2state_dict[gold_cell] # trap_states = [self.grid2state_dict[(r, c)] # for (r, c) in trap_cells] # self.terminal_states = [gold_state] + trap_states # print(self.terminal_states) # # # 전이 확률 만들기 # P = defaultdict(dict) # for s in range(nS): # row, col = self.state2grid_dict[s] # P[s] = defaultdict(list) # for a in range(nA): # action = self.action_defs[a] # next_s = self.grid2state_dict[action(row, col)] # # # 종료 상태 # if self.is_terminal(next_s): # r = (1.0 if next_s == self.terminal_states[0] # else -1.0) # else: # r = 0.0 # if self.is_terminal(s): # done = True # next_s = s # else: # done = False # P[s][a] = [(1.0, next_s, r, done)] # # # 초기 상태 배치 # isd = np.zeros(nS) # isd[0] = 1.0 # # super(GridWorldEnv, self).__init__(nS, nA, P, isd) # # self.viewer = None # self._build_display(gold_cell, trap_cells) # # def is_terminal(self, state): # return state in self.terminal_states # # def _build_display(self, gold_cell, trap_cells): # # screen_width = (self.num_cols + 2) * CELL_SIZE # screen_height = (self.num_rows + 2) * CELL_SIZE # self.viewer = rendering.Viewer(screen_width, # screen_height) # # all_objects = [] # # # 경계 위치 좌표 # bp_list = [ # (CELL_SIZE - MARGIN, CELL_SIZE - MARGIN), # (screen_width - CELL_SIZE + MARGIN, CELL_SIZE - MARGIN), # (screen_width - CELL_SIZE + MARGIN, # screen_height - CELL_SIZE + MARGIN), # (CELL_SIZE - MARGIN, screen_height - CELL_SIZE + MARGIN) # ] # border = rendering.PolyLine(bp_list, True) # border.set_linewidth(5) # all_objects.append(border) # # # 수직선 # for col in range(self.num_cols + 1): # x1, y1 = (col + 1) * CELL_SIZE, CELL_SIZE # x2, y2 = (col + 1) * CELL_SIZE, \ # (self.num_rows + 1) * CELL_SIZE # line = rendering.PolyLine([(x1, y1), (x2, y2)], False) # all_objects.append(line) # # # 수평선 # for row in range(self.num_rows + 1): # x1, y1 = CELL_SIZE, (row + 1) * CELL_SIZE # x2, y2 = (self.num_cols + 1) * CELL_SIZE, \ # (row + 1) * CELL_SIZE # line = rendering.PolyLine([(x1, y1), (x2, y2)], False) # all_objects.append(line) # # # 함정: --> 원 # for cell in trap_cells: # trap_coords = get_coords(*cell, loc='center') # all_objects.append(draw_object([trap_coords])) # # # 골드: --> 삼각형 # gold_coords = get_coords(*gold_cell, # loc='interior_triangle') # all_objects.append(draw_object(gold_coords)) # # # 에이전트 --> 사각형 또는 로봇 # if (os.path.exists('robot-coordinates.pkl') and CELL_SIZE == 100): # agent_coords = pickle.load( # open('robot-coordinates.pkl', 'rb')) # starting_coords = get_coords(0, 0, loc='center') # agent_coords += np.array(starting_coords) # else: # agent_coords = get_coords(0, 0, loc='interior_corners') # agent = draw_object(agent_coords) # self.agent_trans = rendering.Transform() # agent.add_attr(self.agent_trans) # all_objects.append(agent) # # for obj in all_objects: # self.viewer.add_geom(obj) # # def render(self, mode='human', done=False): # if done: # sleep_time = 1 # else: # sleep_time = self.delay # x_coord = self.s % self.num_cols # y_coord = self.s // self.num_cols # x_coord = (x_coord + 0) * CELL_SIZE # y_coord = (y_coord + 0) * CELL_SIZE # self.agent_trans.set_translation(x_coord, y_coord) # rend = self.viewer.render( # return_rgb_array=(mode == 'rgb_array')) # time.sleep(sleep_time) # return rend # # def close(self): # if self.viewer: # self.viewer.close() # self.viewer = None # # # if __name__ == '__main__': # env = GridWorldEnv(5, 6) # for i in range(1): # s = env.reset() # env.render(mode='human', done=False) # # while True: # action = np.random.choice(env.nA) # res = env.step(action) # print('Action ', env.s, action, ' -> ', res) # env.render(mode='human', done=res[2]) # if res[2]: # break # # env.close() # ``` # + id="A7V3AiWN-C2u" outputId="f78d3dd7-a684-4161-b30d-5591d07db2b0" colab={"base_uri": "https://localhost:8080/", "height": 563} Image(url='https://bit.ly/34MpM2p', width=600) # + [markdown] id="q8uvcoXy-C2v" # ## Q-러닝으로 그리드 월드 문제 풀기 # # ### Q-러닝 알고리즘 구현하기 # + [markdown] id="qGrhQ644-C2v" # ```python # # 스크립트: agent.py # # from collections import defaultdict # import numpy as np # # # class Agent(object): # def __init__( # self, env, # learning_rate=0.01, # discount_factor=0.9, # epsilon_greedy=0.9, # epsilon_min=0.1, # epsilon_decay=0.95): # self.env = env # self.lr = learning_rate # self.gamma = discount_factor # self.epsilon = epsilon_greedy # self.epsilon_min = epsilon_min # self.epsilon_decay = epsilon_decay # # # q_table 정의 # self.q_table = defaultdict(lambda: np.zeros(self.env.nA)) # # def choose_action(self, state): # if np.random.uniform() < self.epsilon: # action = np.random.choice(self.env.nA) # else: # q_vals = self.q_table[state] # perm_actions = np.random.permutation(self.env.nA) # q_vals = [q_vals[a] for a in perm_actions] # perm_q_argmax = np.argmax(q_vals) # action = perm_actions[perm_q_argmax] # return action # # def _learn(self, transition): # s, a, r, next_s, done = transition # q_val = self.q_table[s][a] # if done: # q_target = r # else: # q_target = r + self.gamma*np.max(self.q_table[next_s]) # # # q_table 업데이트 # self.q_table[s][a] += self.lr * (q_target - q_val) # # # epislon 조정 # self._adjust_epsilon() # # def _adjust_epsilon(self): # if self.epsilon > self.epsilon_min: # self.epsilon *= self.epsilon_decay # # ``` # + [markdown] id="jQkxBw-Q-C2v" # ```python # # 스크립트: qlearning.py # # from gridworld_env import GridWorldEnv # from agent import Agent # from collections import namedtuple # import matplotlib.pyplot as plt # import numpy as np # # np.random.seed(1) # # Transition = namedtuple( # 'Transition', ('state', 'action', 'reward', 'next_state', 'done')) # # # def run_qlearning(agent, env, num_episodes=50): # history = [] # for episode in range(num_episodes): # state = env.reset() # env.render(mode='human') # final_reward, n_moves = 0.0, 0 # while True: # action = agent.choose_action(state) # next_s, reward, done, _ = env.step(action) # agent._learn(Transition(state, action, reward, # next_s, done)) # env.render(mode='human', done=done) # state = next_s # n_moves += 1 # if done: # break # final_reward = reward # history.append((n_moves, final_reward)) # print('에피소드 %d: 보상 %.1f #이동 %d' # % (episode, final_reward, n_moves)) # # return history # # # def plot_learning_history(history): # fig = plt.figure(1, figsize=(14, 10)) # ax = fig.add_subplot(2, 1, 1) # episodes = np.arange(len(history)) # moves = np.array([h[0] for h in history]) # plt.plot(episodes, moves, lw=4, # marker="o", markersize=10) # ax.tick_params(axis='both', which='major', labelsize=15) # plt.xlabel('Episodes', size=20) # plt.ylabel('# moves', size=20) # # ax = fig.add_subplot(2, 1, 2) # rewards = np.array([h[1] for h in history]) # plt.step(episodes, rewards, lw=4) # ax.tick_params(axis='both', which='major', labelsize=15) # plt.xlabel('Episodes', size=20) # plt.ylabel('Final rewards', size=20) # plt.savefig('q-learning-history.png', dpi=300) # plt.show() # # # if __name__ == '__main__': # env = GridWorldEnv(num_rows=5, num_cols=6) # agent = Agent(env) # history = run_qlearning(agent, env) # env.close() # # plot_learning_history(history) # ``` # + id="n9Xtr4Kj-C2v" outputId="f54cfb60-3e18-4830-c32d-a858efe872b6" colab={"base_uri": "https://localhost:8080/", "height": 593} Image(url='https://bit.ly/2TBkxR3', width=800) # + [markdown] id="f9v2Yd6B-C2w" # ## 심층 Q-러닝 # + id="kpGLb-g3-C2w" outputId="e7cb3dda-7218-428e-e275-04ab6ef7e397" colab={"base_uri": "https://localhost:8080/", "height": 339} Image(url='https://bit.ly/3yUaFlv', width=800) # + [markdown] id="YnUKSG3z-C2w" # ### Q-러닝 알고리즘에 따라 DQN 모델 훈련하기 # # #### 재생 메모리 # + id="FLp8-kCD-C2w" outputId="8fea290d-c841-4aa9-8df8-c62789c01de0" colab={"base_uri": "https://localhost:8080/", "height": 289} Image(url='https://bit.ly/34CELfz', width=800) # + [markdown] id="_Z8jBIMt-C2w" # #### 손실 계산을 위해 타깃 가치 결정하기 # + id="YsFDayA6-C2w" outputId="65d1cdf4-9169-4da1-c723-fe9164254f7a" colab={"base_uri": "https://localhost:8080/", "height": 538} Image(url='https://bit.ly/34Fkwhb', width=800) # + [markdown] id="KUTwBa5Q-C2x" # ## 심층 Q-러닝 알고리즘 구현 # + [markdown] id="quG_mPlr-C2x" # ```python # # 스크립트: carpole/main.py # # import gym # import numpy as np # import tensorflow as tf # import random # import matplotlib.pyplot as plt # from collections import namedtuple # from collections import deque # # np.random.seed(1) # tf.random.set_seed(1) # # Transition = namedtuple( # 'Transition', ('state', 'action', 'reward', # 'next_state', 'done')) # # # class DQNAgent: # def __init__( # self, env, discount_factor=0.95, # epsilon_greedy=1.0, epsilon_min=0.01, # epsilon_decay=0.995, learning_rate=1e-3, # max_memory_size=2000): # self.enf = env # self.state_size = env.observation_space.shape[0] # self.action_size = env.action_space.n # # self.memory = deque(maxlen=max_memory_size) # # self.gamma = discount_factor # self.epsilon = epsilon_greedy # self.epsilon_min = epsilon_min # self.epsilon_decay = epsilon_decay # self.lr = learning_rate # self._build_nn_model() # # def _build_nn_model(self, n_layers=3): # self.model = tf.keras.Sequential() # # # 은닉층 # for n in range(n_layers - 1): # self.model.add(tf.keras.layers.Dense( # units=32, activation='relu')) # self.model.add(tf.keras.layers.Dense( # units=32, activation='relu')) # # # 마지막 층 # self.model.add(tf.keras.layers.Dense( # units=self.action_size)) # # # 모델 빌드 & 컴파일 # self.model.build(input_shape=(None, self.state_size)) # self.model.compile( # loss='mse', # optimizer=tf.keras.optimizers.Adam(lr=self.lr)) # # def remember(self, transition): # self.memory.append(transition) # # def choose_action(self, state): # if np.random.rand() <= self.epsilon: # return random.randrange(self.action_size) # q_values = self.model.predict(state)[0] # return np.argmax(q_values) # 행동 반환 # # def _learn(self, batch_samples): # batch_states, batch_targets = [], [] # for transition in batch_samples: # s, a, r, next_s, done = transition # if done: # target = r # else: # target = (r + # self.gamma * np.amax( # self.model.predict(next_s)[0] # ) # ) # target_all = self.model.predict(s)[0] # target_all[a] = target # batch_states.append(s.flatten()) # batch_targets.append(target_all) # self._adjust_epsilon() # return self.model.fit(x=np.array(batch_states), # y=np.array(batch_targets), # epochs=1, # verbose=0) # # def _adjust_epsilon(self): # if self.epsilon > self.epsilon_min: # self.epsilon *= self.epsilon_decay # # def replay(self, batch_size): # samples = random.sample(self.memory, batch_size) # history = self._learn(samples) # return history.history['loss'][0] # # # def plot_learning_history(history): # fig = plt.figure(1, figsize=(14, 5)) # ax = fig.add_subplot(1, 1, 1) # episodes = np.arange(len(history[0])) + 1 # plt.plot(episodes, history[0], lw=4, # marker='o', markersize=10) # ax.tick_params(axis='both', which='major', labelsize=15) # plt.xlabel('Episodes', size=20) # plt.ylabel('# Total Rewards', size=20) # plt.show() # # # # 일반 설정 # EPISODES = 200 # batch_size = 32 # init_replay_memory_size = 500 # # if __name__ == '__main__': # env = gym.make('CartPole-v1') # agent = DQNAgent(env) # state = env.reset() # state = np.reshape(state, [1, agent.state_size]) # # # 재생 메모리 채우기 # for i in range(init_replay_memory_size): # action = agent.choose_action(state) # next_state, reward, done, _ = env.step(action) # next_state = np.reshape(next_state, [1, agent.state_size]) # agent.remember(Transition(state, action, reward, # next_state, done)) # if done: # state = env.reset() # state = np.reshape(state, [1, agent.state_size]) # else: # state = next_state # # total_rewards, losses = [], [] # for e in range(EPISODES): # state = env.reset() # if e % 10 == 0: # env.render() # state = np.reshape(state, [1, agent.state_size]) # for i in range(500): # action = agent.choose_action(state) # next_state, reward, done, _ = env.step(action) # next_state = np.reshape(next_state, # [1, agent.state_size]) # agent.remember(Transition(state, action, reward, # next_state, done)) # state = next_state # if e % 10 == 0: # env.render() # if done: # total_rewards.append(i) # print('에피소드: %d/%d, 총 보상: %d' # % (e, EPISODES, i)) # break # loss = agent.replay(batch_size) # losses.append(loss) # plot_learning_history(total_rewards) # ``` # + id="-rTBIyHU-C2x" outputId="8b6a4a86-c9ab-4401-c114-a14ab338c2fe" colab={"base_uri": "https://localhost:8080/", "height": 435} Image(url='https://bit.ly/2TDralR', width=800)
ch18/ch18.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## EDA on NYC Taxi data # Import required packages import boto3 from pyspark.sql import SparkSession from pyspark.sql.types import IntegerType, DoubleType import pyspark.sql.functions as F # Create a local spark session spark = SparkSession.builder \ .appName('nyc-taxi-data') \ .getOrCreate() # Set parameters bucket_name = "nyc-tlc" # s3 bucket name with required nyc tlc files # Create function to read S3 bucket def list_bucket_contents(bucket, match=''): files = [] s3_resource = boto3.resource('s3') bucket_resource = s3_resource.Bucket(bucket) for key in bucket_resource.objects.all(): if match in key.key: files.append(key.key) return files # + # Read yellow and green taxi data for respective years colours = ["yellow","green"] years = ["2019","2020"] files = [] for year in years: for colour in colours: match = colour + "_tripdata_" + year files.extend(list_bucket_contents(bucket=bucket_name, match=match)) # - files # Read January 2019 yellow taxi cab data from S3 bucket yellow_df = spark.read.csv(f"s3a://{bucket_name}/trip data/yellow_tripdata_2020-07.csv", header=True) # Show first twenty rows of the imported file yellow_df.show(20) # Print schema of data frame to show field data types and nullability yellow_df.printSchema() # Read January 2019 green taxi cab data from S3 bucket green_df = spark.read.csv(f"s3a://{bucket_name}/trip data/green_tripdata_2019-01.csv", header=True) green_df.show(10) # Print schema of data frame to show field data types and nullability green_df.printSchema() # ## Recommended data type changes # # All fields imported as a string. The following data type conversions are required: # # * VendorID: string -> categorical (drop - needs processing intensive one hot encoding) # * lpep_pickup_datetime: string -> timestamp # * lpep_dropoff_datetime: string -> timestamp # * store_and_fwd_flag: string -> integer (drop) # * RatecodeID: string -> integer # * PULocationID: string -> categorical (drop - needs processing intensive one hot encoding) # * DOLocationID: string -> categorical (drop - needs processing intensive one hot encoding) # * passenger_count: string -> integer # * trip_distance: string -> double # * fare_amount: string -> double # * extra: string -> double # * mta_tax: string -> double # * tip_amount: string -> double # * tolls_amount: string -> double # * ehail_fee: string -> double # * improvement_surcharge: string -> double # * total_amount: string -> double # * payment_type: string -> integer # * trip_type: string -> integer (drop - is not in other data set) # * congestion_surcharge: string -> double (drop - is not in other data set and should be included in extras per dictionary) # Determine if there are any drop offs before pickups yellow_df.withColumn("pickup_datetime", F.unix_timestamp(F.col("tpep_pickup_datetime"), "yyyy-MM-dd' 'HH:mm:ss").cast("timestamp")).\ withColumn("dropoff_datetime", F.unix_timestamp(F.col("tpep_dropoff_datetime"), "yyyy-MM-dd' 'HH:mm:ss").cast("timestamp")).\ withColumn("trip_duration", (F.col("dropoff_datetime").cast("long") - F.col("pickup_datetime").cast("long"))).\ filter(F.col("trip_duration") < 0).\ select(["pickup_datetime","dropoff_datetime","trip_duration"]).\ show() yellow_df.withColumn("pickup_datetime", F.unix_timestamp(F.col("tpep_pickup_datetime"), "yyyy-MM-dd' 'HH:mm:ss").cast("timestamp")).\ withColumn("dropoff_datetime", F.unix_timestamp(F.col("tpep_dropoff_datetime"), "yyyy-MM-dd' 'HH:mm:ss").cast("timestamp")).\ withColumn("trip_duration", (F.col("dropoff_datetime").cast("long") - F.col("pickup_datetime").cast("long"))).\ filter(F.col("trip_duration") < 30).\ select(["tpep_pickup_datetime","pickup_datetime","tpep_dropoff_datetime","dropoff_datetime","trip_duration","fare_amount"]).\ show() # Look at payment types 6 = voided trips yellow_df.filter(F.col("payment_type").astype(IntegerType()) == 6).\ count() # Look at payment types 4 = dispute yellow_df.filter(F.col("payment_type").astype(IntegerType()) == 4).\ count() # Look at payment types misclassified above 6 yellow_df.filter(F.col("payment_type").astype(IntegerType()) > 6).\ count() # Look at RatecodeID misclassified above 6 yellow_df.filter(F.col("RatecodeID").astype(IntegerType()) > 6).\ count() # Look at mta tax consistently $0.50 yellow_df.filter(F.col("mta_tax").astype(DoubleType()) != 0.50).\ count() # Look for less than one passenger yellow_df.filter(F.col("passenger_count").astype(IntegerType()) < 1).\ count() # Look for tip amount less than zero yellow_df.filter(F.col("tip_amount").astype(DoubleType()) < 0).\ count() # Look for tolls_amount less than zero yellow_df.filter(F.col("tolls_amount").astype(DoubleType()) < 0).\ count() # Look for improvement_surcharge consistently $0.30 yellow_df.filter(F.col("improvement_surcharge").astype(DoubleType()) != 0.30).\ count() # Look for fare_amount less than zero yellow_df.filter(F.col("fare_amount").astype(DoubleType()) < 0).\ count() # Look for extra greater than $1 (congestion or night time fee) yellow_df.filter(F.col("extra").astype(DoubleType()) > 3.5).\ count() # Look for trip_distance less than zero yellow_df.filter(F.col("trip_distance").astype(DoubleType()) < 0).\ count() # Look for trip_distance less than zero yellow_df.filter(F.col("ehail_fee").astype(DoubleType()) >= 0).\ count()
notebooks/AN_EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.0 32-bit # metadata: # interpreter: # hash: 0f8f62efe7dfba651090e09cfe8cbc2f17947c8a082f281b41d2fe5c437199db # name: python3 # --- for i in range(5): # + id="xBGwe63iTM4-" outputId="02c05322-983e-47c8-f3c4-816ff5605fec" colab={"base_uri": "https://localhost:8080/"} from google.colab import drive ; drive.mount("/content/drive") # + id="4LGLcKDpUsrx" outputId="f547b5ff-5031-4090-a022-bf8226e9f7bd" colab={"base_uri": "https://localhost:8080/"} import numpy as np import pandas as pd import matplotlib.pyplot as plt #import tensorflow as tf import os import re import datetime print(f"Numpy Version {np.__version__}") print(f"Pandas Version {pd.__version__}") #print(f"Tensorflow Version {tf.__version__}") # - data = ["Made","dipa","madi"] plt.plot(data,[1,2,3]) # + id="izn1o6-nU7_c" outputId="a7493aa4-ca3f-41fb-a6b6-950c0fc58aff" colab={"base_uri": "https://localhost:8080/", "height": 363} path = "/content/drive/My Drive/Sales_Data" files = [file for file in os.listdir(path)if not file.startswith('.')] FullData = pd.DataFrame() for file in files: datamonth = pd.read_csv(path + "/" + file) FullData = pd.concat([FullData,datamonth],axis = 0) FullData.head(10) # + id="ZdMhsVv-Wq6R" FullData.to_csv("Fulldata.csv",index=False) # + id="YrYgGu_pW1yZ" df = FullData.copy() # + id="Q_2p4BJIW5Xr" outputId="60e9a0d6-206d-45a2-a515-a1ba8d951301" colab={"base_uri": "https://localhost:8080/"} print(f"row {df.shape[0]} , Columns {df.shape[1]}" ) print(f"Contain Nans ? {df.isna().sum()}") # + id="LoyIYVOSXgHc" outputId="d53b62a4-46eb-45e2-b352-3c909f19e16c" colab={"base_uri": "https://localhost:8080/"} df.dropna(how="all" , inplace=True) print(f"Contain Nans ? {df.isna().sum()}") # + id="ge18ppAKdLSU" outputId="90afd380-c67a-43c6-d9af-c181320aa43f" colab={"base_uri": "https://localhost:8080/", "height": 49} satanIndex = df[df["Purchase Address"] == "Purchase Address"].index dfCleaned = df.drop(index=[satanIndex][0]) dfCleaned[dfCleaned["Purchase Address"] == "Purcgase Address"] # + id="pI-AcC2XXWwv" outputId="7d1d6784-5d58-4d4e-c53a-1d30e60fd432" colab={"base_uri": "https://localhost:8080/"} # Adding More columns def get_city(text): return text.split(",")[1].strip(" ") def get_state(text): return text.split(",")[2].split(" ")[1].strip(" ") dfCleaned["city"] = dfCleaned["Purchase Address"].apply(lambda text: f"{get_city(text)}({get_state(text)})") # + id="zMTq1n7xcD0W" outputId="f4cfc379-caa1-4f0d-cea4-ee515210f06e" colab={"base_uri": "https://localhost:8080/", "height": 424} dfCleaned = dfCleaned.drop(["Purchase Address"],axis = 1) dfCleaned # + id="90mSTGlHiVeU" # Grouping Data dfCleaned["ordertime"] = dfCleaned["Order Date"].str[-5:] dfCleaned["orderdate"] = dfCleaned["Order Date"].str[:8] dfCleaned["month"] = dfCleaned["orderdate"].str[0:2] dfCleaned.drop(["Order Date"],axis = 1, inplace = True) # + id="5GdenUKEo0Fv" outputId="14252cc4-8d89-4940-aedb-82e791f35379" colab={"base_uri": "https://localhost:8080/", "height": 36} def ConvertToDate(dateee): dateee = str(dateee) dateee = re.sub("/","-",dateee) return dateee ConvertToDate("01/02/03") # + id="-BY2Knakl9BC" dfCleaned["day"] = dfCleaned["orderdate"].str[3:5] dfCleaned["orderdate"] = dfCleaned["orderdate"].apply(ConvertToDate) dfCleaned["orderdate"] = pd.to_datetime(dfCleaned["orderdate"]) dfCleaned["isStartMonth"] = dfCleaned["orderdate"].dt.is_month_start dfCleaned["isEndMonth"] = dfCleaned["orderdate"].dt.is_month_end # + id="3t7GjsySrf3s" outputId="ef3f5ee3-f546-42d9-b300-39f7e9ae631e" colab={"base_uri": "https://localhost:8080/"} dfCleaned["Sales"] = dfCleaned["Quantity Ordered"].astype("int") * dfCleaned["Price Each"].astype("float") dfCleaned["Sales"] # + id="IS6DScCku-g3" dfCleaned.columns = ["orderid","product","quantity_ordered","price_each","city","ordertime","orderdate","month","day","isStartMonth","isEndMonth","Sales"] # + id="4TAkxghEu096" dfCleaned.quantity_ordered = dfCleaned["quantity_ordered"].astype("int") dfCleaned.price_each = dfCleaned["price_each"].astype("float") # + id="gQBKq2WMwHqr" outputId="14290bc1-f89d-4310-d2da-40a017ab603d" colab={"base_uri": "https://localhost:8080/", "height": 457} dfCleaned.groupby(["month"]).sum() # + [markdown] id="QafW2IITbSzF" # From there we know that for these months[04 , 10 , 12] have 20K Quantity ordered, because in dec it has a lot special events ,such as christmas, end of the year . For 04 and 10 i dont know yet what caused these . # + id="tXWcoi6YrCHW" outputId="66eaf3ec-42f8-4f75-bbc7-497e13ce7d71" colab={"base_uri": "https://localhost:8080/", "height": 457} dfCleaned.groupby(["month"]).aggregate({np.sum,np.mean,np.median})["Sales"] # + id="xsVanA5CtovU" outputId="75149391-2fb5-4b3d-d6df-129bb8ac880c" colab={"base_uri": "https://localhost:8080/", "height": 457} dfCleaned.groupby(["month"]).aggregate({max,min})["Sales"] # + id="TX5bQ6A-xI08" outputId="b576a0c7-7545-4cbc-fcd2-41476016ba12" colab={"base_uri": "https://localhost:8080/"} len(dfCleaned.groupby(["day"]).head(1)) # + id="AsfKUQx9xTSC" SkecthIt1 = dfCleaned.groupby(['day']).aggregate({np.sum,np.mean,np.median})[["quantity_ordered","Sales"]] # + id="PR9itiD2uEvQ" outputId="ca5292ad-bd12-4d17-e4a0-db6e786d57a2" colab={"base_uri": "https://localhost:8080/", "height": 422} plt.figure(figsize=(8,6)) plt.plot(SkecthIt1[("Sales","sum")]) plt.title("Sales Per day for 1 years") plt.xlabel ( 'Days' ) plt.ylabel ("Total in 1K") # + id="9f2T8E9KeN3f" outputId="f96230d2-3b35-4807-c313-ce00e1c91e01" colab={"base_uri": "https://localhost:8080/", "height": 350} SkecthIt2 = dfCleaned.groupby(["month"]).aggregate({np.sum,np.mean,np.median}) fig, ax = plt.subplots(1,1) ax.bar(np.unique(dfCleaned["month"].values),SkecthIt2[("Sales","sum")]) ax.set(title = "Months Sales For 2019 ", xlabel = "Months" , ylabel = "Total Sales in Usd") # + id="Xf-WqX-Ge2HH" outputId="304ea65a-9159-4048-e236-ca69b982cfe6" colab={"base_uri": "https://localhost:8080/", "height": 777} SkecthIt3 = dfCleaned.groupby(["city"]).aggregate({np.sum,np.median,np.mean}) fig, ax = plt.subplots ( 1, 1, figsize = (6,8)) ax.bar(np.unique(dfCleaned['city'].values) , SkecthIt3[("Sales","sum")]) ax.set(title = "City Total Sales",xlabel = 'City' ,ylabel = "Total sales in usd" ) ax.set_xticklabels(labels = np.unique(dfCleaned["city"].values),rotation = "vertical") # + id="MyRI93HakR5x" outputId="dde6f26a-d340-4bea-c701-70e1c8ae95e4" colab={"base_uri": "https://localhost:8080/", "height": 81} dfCleaned["Hour"] = dfCleaned["ordertime"].str[0:2] dfCleaned["Minute"] = dfCleaned["ordertime"].str[3:5] dfCleaned.head(1)[["Hour","Minute"]] # + id="Sdqej5l_kxTj" outputId="f9014b39-f189-41e3-918a-650802dbf562" colab={"base_uri": "https://localhost:8080/", "height": 954} SkecthIt4 = dfCleaned.groupby(["Hour"]).aggregate({np.sum,np.median,np.mean}) fig,ax = plt.subplots(1,1, figsize = (10,8)) ax.bar(np.unique(dfCleaned["Hour"].values), SkecthIt4[("Sales","sum")]) ax.set(title = "Hour Sales",xlabel = "Hours" , ylabel = "Total sales in USD") ax.set_xticklabels(labels = np.unique(dfCleaned["Hour"].values), rotation = "vertical" , size = 8) # + id="9G2Xq1oHmESz" outputId="45e20ae2-33ba-4337-947f-a3f4a9d3f84c" colab={"base_uri": "https://localhost:8080/", "height": 848} fig,ax = plt.subplots(1,1, figsize = (10,6)) ax.plot(np.unique(dfCleaned["Hour"].values), SkecthIt4[("Sales","sum")]) ax.set(title = "Hours Sales" , xlabel = "Hours" , ylabel = "Total Sales in USD") ax.set_xticklabels(labels = np.unique(dfCleaned["Hour"].values),rotation = "vertical") # + id="d9J077szol_w" outputId="e4c1f595-5b05-4e68-e304-5b0eb49511b7" colab={"base_uri": "https://localhost:8080/", "height": 81} dfCleaned.head(1) # + id="n-nqIwrvoDvu" outputId="c1e67338-23da-4f31-98e3-5caece1dc02b" colab={"base_uri": "https://localhost:8080/", "height": 276} NODuplicateDf = dfCleaned[dfCleaned["orderid"].duplicated(keep = False)] NODuplicateDf.head() # + id="blhUZw6zpVaX" Sketchit5 = NODuplicateDf.groupby(["orderid"])["product"].apply(lambda text : ",".join(text)) # + id="wD-O-5H1q4Hh" def count_it_m8(data): datanya = {} for keys, val in data.items(): val = val.lower() if val not in datanya: datanya[val] = 1 elif val in datanya: datanya[val] = datanya[val] + 1 return datanya lol = count_it_m8(Sketchit5) # + id="m7MjeFo_slht" outputId="d9a9f6ae-3dba-4eee-da33-be71bac04aee" colab={"base_uri": "https://localhost:8080/"} Ans = sorted(lol,key = lol.get,reverse=True)[:20] for keys,val in lol.items(): if keys in Ans: index = 0 for item in Ans: if item == keys: Ans[index] = (keys,val) else: index = index + 1 Ans # + id="27KFJ2-aryMh" outputId="fc45779e-87d2-4a4b-aedf-cc60fff3820f" colab={"base_uri": "https://localhost:8080/", "height": 504} Sketchit6 = dfCleaned.groupby(["product"]).aggregate({np.sum,np.median,np.mean}) fig,ax = plt.subplots(1, 1,figsize = (10,6)) ax.bar(np.unique(dfCleaned["product"].values),Sketchit6[("quantity_ordered","sum")]) ax.set_xticklabels(labels = np.unique(dfCleaned["product"]),rotation = "vertical",size = 8) ax.set(ylabel = "Total Sold") ax2= ax.twinx() ax2.plot(np.unique(dfCleaned["product"].values), Sketchit6[("price_each","mean")],color="green") ax2.set(ylabel = "Mean In USD")
SalesDataAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Author: <NAME> # github.com/ernestorodg ############################################################################### ## Analyse UNSW-NB15 dataset for intrusion detection using SVM ############################################################################### # + import pandas as pd import numpy as np import sys ############################################################################### ## Define constants ############################################################################### # Random state for reproducibility STATE = 0 np.random.seed(10) # List of available attacks on the dataset ATTACKS = ['Exploits', 'Generic', ' Fuzzers', 'DoS', 'Analysis', 'Worms', 'Reconnaissance', 'Backdoors', 'Shellcode'] # Especific to the repository UNSW_NB15_DIRECTORY = r'../datasets/unsw-nb15/UNSW-NB15 - CSV Files/' UNSW_NB15_FIRST = 'UNSW-NB15_1.csv' # Only UNSW_NB15_FIRST is being used on this model # UNSW_NB15_SECOND = 'UNSW-NB15_2.csv' # UNSW_NB15_THIRD = 'UNSW-NB15_3.csv' # UNSW_NB15_FOURTH = 'UNSW-NB15_4.csv' # + ############################################################################### ## Load dataset ############################################################################### df = pd.read_csv (UNSW_NB15_DIRECTORY + UNSW_NB15_FIRST) # Fraction dataframe for quicker testing (copying code is hard) df = df.sample (frac = 0.1, replace = True, random_state = 0) columns_label = np.array([ 'srcip','sport','dstip', 'dsport','proto','state', 'dur','sbytes','dbytes', 'sttl','dttl','sloss', 'dloss','service','Sload', 'Dload','Spkts','Dpkts', 'swin','dwin','stcpb', 'dtcpb','smeansz','dmeansz', 'trans_depth','res_bdy_len','Sjit', 'Djit','Stime','Ltime', 'Sintpkt','Dintpkt','tcprtt', 'synack','ackdat','is_sm_ips_ports', 'ct_state_ttl','ct_flw_http_mthd','is_ftp_login', 'ct_ftp_cmd','ct_srv_src','ct_srv_dst', 'ct_dst_ltm','ct_src_ltm','ct_src_dport_ltm', 'ct_dst_sport_ltm','ct_dst_src_ltm','attack_cat', 'Label']) ## Add the columns label to our dataset df.columns = columns_label ## Counting number of null data nanColumns = [i for i in df.columns if df [i].isnull ().any ()] ## Remove NaN and inf values df.replace ('Infinity', np.nan, inplace = True) ## Or other text values df.replace (np.inf, np.nan, inplace = True) ## Remove infinity df.replace (np.nan, 0, inplace = True) ## Remove error values, especific from the dataset df.replace ('0xc0a8', 0, inplace = True) df.replace ('0x20205321', 0, inplace = True) ## For binary comparison: Attack or not Attack for attack in ATTACKS: df['attack_cat'] = df ['attack_cat'].replace(attack, 1) # In this case we drop the last column. 'attack_cat' will be our target df.drop(['Label'], axis=1) # Propose: Having the same amount of attacks and not-attacks rows # if (df.attack_cat.value_counts()[1] < df.attack_cat.value_counts()[0]): # remove_n = df.attack_cat.value_counts()[0] - df.attack_cat.value_counts()[1] # Number of rows to be removed # print(remove_n) # df_to_be_dropped = df[df.attack_cat == 0] # drop_indices = np.random.choice(df_to_be_dropped.index, remove_n, replace=False) # df = df.drop(drop_indices) # else: # remove_n = df.attack_cat.value_counts()[1] - df.attack_cat.value_counts()[0] # Number of rows to be removed # print(remove_n) # df_to_be_dropped = df[df.attack_cat == 1] # drop_indices = np.random.choice(df_to_be_dropped.index, remove_n, replace=False) # df = df.drop(drop_indices) # + ############################################################################### ## Slice the dataframe (usually the last column is the target) ############################################################################### X = pd.DataFrame(df.iloc [:, 0:8]) # Selecting other columns # X = pd.concat([X, df.iloc[:, 2]], axis=1) y = df.iloc [:, -1] print('Number of not-attacks: ', y.value_counts()[0]) print('Number of attacks: ', y.value_counts()[1]) # See Output, only available on jupyter-notebooks # X # + from sklearn.compose import make_column_transformer from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import OrdinalEncoder from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC #################################################################### # Treat categorical data #################################################################### cat_cols = X.columns[X.dtypes == 'O'] # Returns array with the columns that has Object types elements categories = [ X[column].unique() for column in X[cat_cols]] for cat in categories: cat[cat == None] = 'missing' # noqa # Replacing missing values categorical_imputer = SimpleImputer(missing_values=None, strategy='constant', fill_value='missing') X[cat_cols] = categorical_imputer.fit_transform(X[cat_cols]) # Encoding the categorical data categorical_encoder = OrdinalEncoder(categories = categories) X[cat_cols] = categorical_encoder.fit_transform(X[cat_cols]) # Scaling new numerical values numerical_imputer = SimpleImputer(strategy = "mean") X[cat_cols] = numerical_imputer.fit_transform(X[cat_cols]) numerical_scaler = StandardScaler() X[cat_cols] = numerical_scaler.fit_transform(X[cat_cols]) # + #################################################################### # Treat numerical data #################################################################### num_cols = X.columns[(X.dtypes == 'float64') | (X.dtypes == 'int64')] # Returns array with the columns that has float types elements # Scaling numerical values numerical_imputer = SimpleImputer(strategy = "mean") X[num_cols] = numerical_imputer.fit_transform(X[num_cols]) numerical_scaler = StandardScaler() X[num_cols] = numerical_scaler.fit_transform(X[num_cols]) # - # Transform the data to numpy arrays X = X.values y = y.values # + ############################################################################### ## Split dataset into train and test sets if not using cross validation ############################################################################### from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split (X, y, test_size = 1/5, random_state = STATE) # + #################################################################### # Find best parameters and plot graphs #################################################################### # from sklearn.model_selection import validation_curve # from matplotlib import pyplot as plt # import time # # Measure time of training # start_time = time.time() # # For C param # param_range = np.linspace(0.1, 1000, 10) # # For gamma param # # param_range = np.logspace(-6, -1, 5) # train_scores, test_scores = validation_curve( # SVC(), X, y, param_name="C", param_range=param_range, # scoring="precision", n_jobs=1) # print("--- %s seconds ---" % (time.time() - start_time)) # train_scores_mean = np.mean(train_scores, axis=1) # train_scores_std = np.std(train_scores, axis=1) # test_scores_mean = np.mean(test_scores, axis=1) # test_scores_std = np.std(test_scores, axis=1) # plt.title("Validation Curve with SVM") # plt.xlabel(r"$\gamma$") # plt.ylabel("Score") # plt.ylim(0.0, 1.1) # lw = 2 # plt.semilogx(param_range, train_scores_mean, label="Training score", # color="darkorange", lw=lw) # plt.fill_between(param_range, train_scores_mean - train_scores_std, # train_scores_mean + train_scores_std, alpha=0.2, # color="darkorange", lw=lw) # plt.semilogx(param_range, test_scores_mean, label="Cross-validation score", # color="navy", lw=lw) # plt.fill_between(param_range, test_scores_mean - test_scores_std, # test_scores_mean + test_scores_std, alpha=0.2, # color="navy", lw=lw) # plt.legend(loc="best") # plt.show() # Best gamma found for 0.1 x Dataset: 10^(-3) # + ############################################################################### ## Train the model using learning curve, using cross-validation ############################################################################### import time from sklearn.model_selection import learning_curve # Measure time of training start_time = time.time() # Training the model with cross validation approach train_sizes, train_scores, valid_scores = learning_curve( SVC(kernel="rbf", random_state=0, gamma=1, C=1), X_train, y_train, cv=5, scoring='f1') print("--- %s seconds ---" % (time.time() - start_time)) # + ############################################################################### ## Plotting learning curve ############################################################################### from matplotlib import pyplot as plt train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.title("Learning curve with SVM") plt.xlabel("Size of training") plt.ylabel("Score") plt.ylim(0.0, 1.1) lw = 2 plt.plot(train_sizes, train_scores_mean, label="Training score", color="darkorange", lw=lw) plt.plot(train_sizes, valid_scores_mean, label="Cross-validation score", color="navy", lw=lw) plt.legend(loc="best") plt.show() # + ############################################################################### ## Training the model without cross-validation (simpler than the training above) ############################################################################### # Assign the model to be used svc = SVC(kernel="rbf", random_state=0, gamma=1, C=1) # Measure time of this training start_time = time.time() # Training the model model = svc.fit(X_train, y_train) print("--- %s seconds ---" % (time.time() - start_time)) # + ############################################################################### ## Obtain metrics from the trained model without cross-validation ############################################################################### from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import multilabel_confusion_matrix # Predicting from the test slice y_pred = model.predict(X_test) # Precision == TP / (TP + FP) print('Precision Score: ', precision_score(y_test, y_pred)) # Recall == TP / (TP + FN) print('Recall Score: ', recall_score(y_test, y_pred)) # Accuracy print('Accuracy: \n', model.score(X_test, y_test)) # Multilabel Confusion Matrix: # [tn fp] # [fn tp] print(multilabel_confusion_matrix(y_test, y_pred, labels=[0, 1])) # + ############################################################################### ## Plotting confusion matrix ############################################################################### from sklearn.metrics import plot_confusion_matrix plot_confusion_matrix(model, X_test, y_test) # doctest: +SKIP plt.show() # doctest: +SKIP # -
src/specific_models/unsw15/.ipynb_checkpoints/unsw_nb15_svm_notebook-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Leaflet cluster map of talk locations # # set folder as the _talks directory, which contains .md files of all your talks, and Run this notebook. This scrapes the location YAML field from each .md file, geolocates it with geopy/Nominatim, and uses the getorg library to output data, HTML, and Javascript for a standalone cluster map. # !pip install getorg --upgrade #import glob import os import getorg from geopy import Nominatim #g = glob.glob("*.md") folder='./_talks/' g = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f)) and f.endswith('.md')] g geocoder = Nominatim() location_dict = {} location = "" permalink = "" title = "" # + for file in g: with open(folder+file, 'r',encoding='utf-8') as f: lines = f.read() if lines.find('location: "') > 1: loc_start = lines.find('location: "') + 11 lines_trim = lines[loc_start:] loc_end = lines_trim.find('"') location = lines_trim[:loc_end] location_dict[location] = geocoder.geocode(location) print(location, "\n", location_dict[location]) # - m = getorg.orgmap.create_map_obj() getorg.orgmap.output_html_cluster_map(location_dict, folder_name="./talkmap", hashed_usernames=False)
talkmap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.12 ('av2') # language: python # name: python3 # --- # # 3D Object Detection Evaluation Tutorial # # Welcome to the 3D object detection evaluation tutorial! We'll walk through the steps to submit your detections to the competition server. from av2.evaluation.detection.eval import evaluate from av2.evaluation.detection.utils import DetectionCfg from pathlib import Path from av2.utils.io import read_feather, read_all_annotations # ### Constructing the evaluation configuration # # The `DetectionCfg` class stores the configuration for the 3D object detection challenge. # # - During evaluation, we remove _all_ cuboids which are not within the region-of-interest (ROI) which spatially is a 5 meter dilation of the drivable area isocontour. # # - **NOTE**: If you would like to _locally_ enable this behavior, you **must** pass in the directory to sensor dataset (to build the raster maps from the included vector maps). dataset_dir = Path.home() / "data" / "datasets" / "av2" / "sensor" # Path to your AV2 sensor dataset directory. competition_cfg = DetectionCfg(dataset_dir=dataset_dir) # Defaults to competition parameters. split = "val" gts = read_all_annotations(dataset_dir=dataset_dir, split=split) # Contains all annotations in a particular split. display(gts) # ## Preparing detections for submission. # # Our submission server expects a `.feather` file that contains a list of detections for the entire test set. # + # If you've already aggregated your detections into one file. dts_path = Path("detections.feather") dts = read_feather(dts_path) dts, gts, metrics = evaluate(dts, gts, cfg=competition_cfg) # Evaluate instances. # - display(metrics)
tutorials/object_detection_evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Import Modules import warnings warnings.filterwarnings('ignore') # + from src import detect_faces, show_bboxes from PIL import Image import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np from PIL import Image import torch from torchvision import transforms, datasets import numpy as np import os # - # # Path Definitions dataset_path = '../Dataset/emotiw/test/' face_coordinates_directory = '../Dataset/FaceCoordinates/' processed_face_ex = '../Dataset/CroppedFaces/test/' # # Loading Test Dataset # + test_dataset = datasets.ImageFolder(dataset_path) class_names = test_dataset.classes device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # - class_names test_filelist = sorted(os.listdir(dataset_path + 'test_shared/')) test_filelist = [x.split('.')[0] for x in test_filelist] print(test_filelist[:10]) print(len(test_dataset)) # # Crop Faces for i in range(len(test_dataset)): print(test_filelist[i]) image, label = test_dataset[i] face_list = [] landmarks_new_coordinates = [] if os.path.isfile(processed_face_ex + 'test/' + test_filelist[i] + '.npz'): print(test_filelist[i] + ' Already present') continue bbox_lm = np.load(face_coordinates_directory + 'test/' + test_filelist[i] +'.npz') bounding_boxes = bbox_lm['a'] if bounding_boxes.size == 0 or (bounding_boxes[0] == 0).all(): print("No bounding boxes for " + test_filelist[i] + ". Adding empty file for the same") np.savez(processed_face_ex + test_filelist[i], a = np.zeros(1), b = np.zeros(1)) continue landmarks = bbox_lm['b'] for j in range(len(bounding_boxes)): bbox_coordinates = bounding_boxes[j] landmark = landmarks[j] img_face = image.crop((bbox_coordinates[0], bbox_coordinates[1], bbox_coordinates[2], bbox_coordinates[3])) x = bbox_coordinates[0] y = bbox_coordinates[1] for k in range(5): landmark[k] -= x landmark[k+5] -= y img_face = np.array(img_face) landmark = np.array(landmark) if len(face_list) != 0: if img_face.shape[0] == face_list[-1].shape[0]: img_face = image.crop((bbox_coordinates[0] - 1, bbox_coordinates[1] - 1, bbox_coordinates[2], bbox_coordinates[3])) img_face = np.array(img_face) landmark +=1 face_list.append(img_face) landmarks_new_coordinates.append(landmark) face_list = np.asarray(face_list) landmarks_new_coordinates = np.asarray(landmarks_new_coordinates) np.savez(processed_face_ex + test_filelist[i], a = face_list, b = landmarks_new_coordinates) print(i)
MTCNN/.ipynb_checkpoints/Face_Cropper_TestDataset-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd # # Predicting Median House Prices in California Census Blocks (Census 1990) # # Author: DSCI 522 Group 312 # # Date: January 25, 2020 # ## Summary # # This analysis focuses on predicting the median house prices in census blocks given independent variable about the location, home characteristics, and the demographics of the census block. This dataset was sourced from Kaggle, and many other people have completed [similar analyses](https://www.kaggle.com/camnugent/california-housing-prices/kernels). # # Our goal is to build a model that will predict median house value with a higher model validation score than the 0.60 achieved by [<NAME>](https://www.kaggle.com/ericfeng84), the author of [The California House Price](https://www.kaggle.com/ericfeng84/the-california-housing-price) Kaggle page from which the dataset was obtained. # # We aim to bring additional insight to the existing models including looking at multicollinearity and trying KNN with a variety of different values for n_neighbors. # ## Methods # # ### Data # This dataset is a modified version of [The California Housing Dataset](https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html), with [additional columns added by <NAME>](https://github.com/ageron/handson-ml). This dataset contains information about median California house values per census block as sourced from the 1990 US Census. # # ### Analysis # We used Linear Regression, K-Nearest Neighbour Trees, and a Random Forest Regressor to predict the median house value given the independent variables. # # ### Results and Discussion # The Exploratory Data Analysis focused on identifying linear relationships between the independent variables and the dependent variable as well as looking at correlations between independent variables. Previous analyses of this dataset highlighted that linear regression was an appropriate prediction method for the median housing value, but generally, they lacked insight into multicollinearity (the correlation and linear relationships between independent variables). Of all of the variables examined, the Variance Inflation Factor (VIF) was higher than 1, which means that there is strong evidence of multicollinearity. pd.read_csv('eda_charts/vif_table.csv') # The variable with the highest VIF is total bedrooms, and this appears to be strongly linearly related to the total number of rooms, given that the room count includes the bedrooms. # # ![Image](eda_charts/total-rooms_total-bedrooms.png) # # The following heatmap represents the correlation values of the variables. # # ![Image](eda_charts/correlation_heatmap.png) # A common approach to address multicollinearity is to remove variables with high VIFs. As is common in this case, the Linear Regression model performed best when all (or all but one) of the variables were included. The following illustrates the Recursive Feature Elimination for a Linear Regression Model. The x-axis represents the number of features selected so far. # # We additionally ran Recursive Feature Elimination on a Linear Regression model, excluding Latitude and Longitude, since these features are very specific to California. The results follow. # # # # It is clear that Linear Regression performed more favourably on the training and testing data including latitude and longitude. This is somewhat to be expected, as areas with expensive median house values often border other areas with similar socioeconomic groups. # # To attempt to address the multicollinearity, we also ran Recursive Feature Elimination excluding longitude, latitude, and total bedrooms, which was the feature that had the highest Variance Inflation Factor. # # ![Image](ml_results/LR_performace_exc_feats_2.png) # # As expected, the results are very similar to the model that only excluded latitude and longitude because the information from the feature "total_bedrooms" is effectively redundant. # <img src="ml_results/LR_performace.png" width="3%" align="left"/> <img src="ml_results/LR_performace_exc_feats.png" width="50%" align="left"/> # We also attempted to fit a K-Nearest Neighbor to our data, and KNN yielded better accuracy than simple linear regression. A Standard Scaler was used to pre-process the data, which likely contributed to the success of KNN. The following demonstrates the relationship between the number of nearest neighbours and the resulting training and testing scores. # # ![Image](ml_results/KNN_performace.png) # # # As with Linear Regression, we removed Latitude and Longitude in hopes to see the effect it had on KNN in terms of spatial nearest neightbours, and the results are as follows. # # ![Image](ml_results/KNN_performace_exc_feats.png) # From the above, we can infer that having latitude and longitude included did improve the KNN model. With or without these features, the number of nearest neighbours that should be used is approximately 9 in order to avoid overfitting. # # The goal of our project is not to predict based on Census data for other states, however the results are still quite effective without latitude and longitude. # ### Areas for Improvement # Opportunities for improvement of the predictive model include: # - Increasing the breadth of Machine Learning Models used to predict the median housing value; # - Obtaining cross-validation scores, rather than the simple score; # - Work through a feature engineering process to make features more relevant; # - Use a feature selection method for linear regression that addresses or discourages multicollinearity; # - Conducting more in-depth analysis to address multicollinearity. # # ### Conclusion # In both our linear regression model and K-Nearest Neighbors model, we achieved higher accuracy than <NAME>'s best score, which was with linear regression. Chen did not fit a KNN model, so it is unclear whether this model would have performed better for him. For the purposes of predicting median housing price in California by census block, the linear regression and KNN models are effective at estimating the response. # ## References # # <NAME>. n.d. SPLITTING Data into Training and Test Sets with R. https://www.listendata.com/2015/02/splitting-data-into-training-and-test.html. # # <NAME> 2018. docopt: Command-Line Interface Specification Language. https://CRAN.R-project.org/package=docopt. # # <NAME>. 2020. Caret: Classification and Regression Training. https://CRAN.R-project.org/package=caret. # # <NAME> 2017. checkmate: Fast Argument Checks for Defensive R Programming. https://journal.r-project.org/archive/2017/RJ-2017-028/index.html. # # R Core Team. 2019. R: A Language and Environment for Statistical Computing. Vienna, Austria: R Foundation for Statistical Computing. https://www.R-project.org/. # # <NAME>. 2011. testthat: Get Started with Testing. https://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf. # # <NAME>. 2017. Tidyverse: Easily Install and Load the ’Tidyverse’. https://CRAN.R-project.org/package=tidyverse. # # Wickham, Hadley, and <NAME>. 2019. Tidyr: Tidy Messy Data. https://CRAN.R-project.org/package=tidyr. # # <NAME>, <NAME>, and <NAME>. 2018. Readr: Read Rectangular Text Data. https://CRAN.R-project.org/package=readr. # # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>; 12(Oct):2825−2830, 2011 https://scikit-learn.org/stable/ # # <NAME>. (2016) Python Data Analysis with pandas. In: Python Recipes Handbook. Apress, Berkeley, CA. https://pandas.pydata.org/ # # Pedregosa et al., 2011. Scikit-learn: Machine Learning in Python, JMLR 12, pp. 2825-2830. http://jmlr.csail.mit.edu/papers/v12/pedregosa11a.html # # Seabold, Skipper, and <NAME>, 2010. “statsmodels: Econometric and statistical modeling with python.” Proceedings of the 9th Python in Science Conference. http://conference.scipy.org/proceedings/scipy2010/pdfs/seabold.pdf # # VanderPlas, Jacob & Granger, Brian & Heer, Jeffrey & Moritz, Dominik & Wongsuphasawat, Kanit & Lees, Eitan & Timofeev, Ilia & Welsh, <NAME>, Scott. (2018). Altair: Interactive Statistical Visualizations for Python. Journal of Open Source Software. 3. 1057. 10.21105/joss.01057. https://altair-viz.github.io/_sources/index.rst.txt # # plightbo, simon.m.stewart, hbchai, jrhuggins, et al. © Copyright 2011. https://selenium.dev/documentation/en/front_matter/copyright_and_attributions/ # # Oliphant, <NAME>. (2006). A guide to NumPy (Vol. 1). Trelgol Publishing USA. https://web.mit.edu/dvp/Public/numpybook.pdf # # #
results/california_housing_predict_report.nbconvert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Text Classification # - # In this chapter, we'll go over how to perform *text classification* with Python. As another form of natural language processing, text classification will allow us to automatically assign a set of pre-determined categories, or tags, to unstructured texts. # ## Setup # We'll get started by installing `matplotlib` and importing `pandas`. # + slideshow={"slide_type": "slide"} # %matplotlib inline import pandas as pd # - # ### Data # As an example, we'll be looking at a dataset containing [reviews](https://www.kaggle.com/zynicide/wine-reviews/data) from Wine Enthusiast magazine. # + slideshow={"slide_type": "slide"} #https://www.kaggle.com/zynicide/wine-reviews/data wine_df = pd.read_csv('wine_reviews.csv') # - # We can quickly get a sense of the distribution of reviews by looking at the `value_counts` associated with different `point` scores. # + slideshow={"slide_type": "fragment"} wine_df['points'].value_counts() # - # It could also be helpful for us to read over a few of the `descriptions` of the wines. However, since `pandas` defaults to a maximum column width of 50 characters, it's difficult to glean much from each review without specifying a longer maximum character width for the columns in our dataframe. # + slideshow={"slide_type": "slide"} wine_df['description'][:5] # + [markdown] slideshow={"slide_type": "slide"} # ![google_search.png](images/google_search.png) # - # We'll use the `set_options` function in `pandas` to set our maximum column width to 120 characters. # + slideshow={"slide_type": "slide"} pd.set_option('display.max_colwidth', 120) # - # Now we'll have more text to work with when we're looking at the first five descriptions: # + slideshow={"slide_type": "slide"} wine_df['description'][:5] # - # We can also use the `head` function to see all the keys included in the dataframe: # + slideshow={"slide_type": "slide"} wine_df.head() # - # ## Classifying Texts in Python # The following sections will walk us through a step-by-step guide of how to classify texts in Python. # + [markdown] slideshow={"slide_type": "slide"} # #### Turning words into features # # In order to classify our texts, we'll need to convert each text document in our corpus into a fixed-length *vector*. We can accomplish this using the [CountVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html?highlight=vectorizer#sklearn.feature_extraction.text.CountVectorizer) function available through the scikit-learn library. Using `CountVectorizer`, we'll be able to transform the texts in our wine review dataframe into a matrix of token counts. # + slideshow={"slide_type": "slide"} from sklearn.feature_extraction.text import CountVectorizer # + [markdown] slideshow={"slide_type": "slide"} # ### 1) Set up your model, fixing any parameters. # # `CountVectorizer` can take a number of parameters; while not an exhaustive list, some important parameters are described below. # - # #### `CountVectorizer` Parameters # # - **lowercase**: Converts all text to lower case. By default, `lowercase` is set to True. # # # - **ngram_range**: This allows us to restrict the range of n-values for our n-grams. Formatted as a tuple, the first sets the minimum n-value, and the second sets the maximum n-value. By default, `n-gram range` is set to (1,1). (If we leave it alone, we'll only be looking at unigrams.) # # # - **stop_words**: We can use this parameter to rule out words that occur 1) too frequently, 2) not frequently enough, and/or 3) fall outside of a threshold term frequency. This can be set the 'english' to use a pre-determined set of stopwords often found in texts written in the English language. We can also provide our own list of stopwords if we so choose. # # > - **max_df**: Allows us to set a maximum threshold on document frequency for our terms incorporated into our vocabulary. By default, `max_df` is set to 1.0. # > - **min_df**: Allows us to set a minimum threshold on document frequency for our terms incorporated into our vocabulary. By default, `min_df` is set to 1.0. # > - **max_features**: Allows us to build a vocabulary exclusively from high-frequency terms occuring throughout our corpus. # For this exercise, we'll convert our text to lower case, look only at unigrams, use "english" stopwords, and set a minimum document frequency for terms of .01. # + slideshow={"slide_type": "slide"} vectorizer = CountVectorizer(lowercase = True, ngram_range = (1,1), stop_words = 'english', min_df = .01, max_features = None) # + [markdown] slideshow={"slide_type": "slide"} # ### 2) Fit your model # # We'll `fit` the vectorizer we've created to the `description` key in our wine review dataframe. # + [markdown] slideshow={"slide_type": "fragment"} # `model.fit(X)` # + slideshow={"slide_type": "fragment"} vectorizer.fit(wine_df['description']) # + [markdown] slideshow={"slide_type": "slide"} # We can use the `len` function to see how many features were generated when applying the vectorizor to our corpus. # + slideshow={"slide_type": "fragment"} len(vectorizer.get_feature_names()) # + [markdown] slideshow={"slide_type": "slide"} # ### 3) Create new data based on your model # - # Now we can `transform` the text documents in the `description` key of our wine review dataframe into a document-term matrix. # + slideshow={"slide_type": "slide"} review_word_counts = vectorizer.transform(wine_df['description']) # - # Let's take a look at the value counts for the dummy variable `rating` in our dataframe. # # + slideshow={"slide_type": "fragment"} wine_df['rating'].value_counts() # + [markdown] slideshow={"slide_type": "slide"} # ### 4) Set up your (second) model, fixing any parameters. # # #### Multinomial NB # Also imported from the scikit-learn library, [MultinomialNB](https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html?highlight=multinomialnb#sklearn.naive_bayes.MultinomialNB) uses the naive Bayes algorithm to classify multinomially distributed data. This classifier is well-suited to the token counts we've previously generated with our vectorizer. # # + slideshow={"slide_type": "fragment"} from sklearn.naive_bayes import MultinomialNB # + slideshow={"slide_type": "fragment"} nb_classifier = MultinomialNB() # - # ### 5) Fit your (second) model # Before we `fit` the classifier to our data, we'll have to specify a couple of **parameters**: # # - **X**: These are the *training* vectors for our model. For our wine review data, we'll use our `review_word_counts` as our training vectors. # # # # - **Y**: These are the *target* values for our model. For our wine review data, we'll use the `high_rating` key to determine the target values for our model. # + [markdown] slideshow={"slide_type": "fragment"} # `model.fit(X)` # + [markdown] slideshow={"slide_type": "fragment"} # `model.fit(X, Y)` # + slideshow={"slide_type": "fragment"} nb_classifier.fit(review_word_counts, wine_df['rating']) # + [markdown] slideshow={"slide_type": "slide"} # We can now view a list of coefficients associated with each of our features: # + slideshow={"slide_type": "fragment"} nb_classifier.coef_[0] # + slideshow={"slide_type": "slide"} coeficients = pd.Series(nb_classifier.coef_[0], index = vectorizer.get_feature_names()) # - # `sort_values` lets us view the top terms used to describe the wines reviewed in our corpus. # + slideshow={"slide_type": "slide"} coeficients.sort_values(ascending=True)[:20] # + [markdown] slideshow={"slide_type": "slide"} # ### Classification Exercise 1 # # Construct a model of UN speeches to distinquish between those before and after the collapse of the Soviet Union. # # + slideshow={"slide_type": "fragment"} # + [markdown] slideshow={"slide_type": "slide"} # ### 5) Create new data based on your (second) model # + [markdown] slideshow={"slide_type": "fragment"} # Our classifier can now be used to `predict` whether a wine received a relatively `High` or `Low` rating, based on the words used to describe the wine. # + slideshow={"slide_type": "fragment"} nb_classifier.predict(review_word_counts) # + slideshow={"slide_type": "fragment"} wine_df['prediction'] = nb_classifier.predict(review_word_counts) # - # We can run a crosstab with `pandas` to quickly get a feel for how accurately our classifier predicts `High` and `Low` reviews: # + slideshow={"slide_type": "slide"} pd.crosstab(wine_df['rating'], wine_df['prediction']) # - # For more detailed visualizations of prediction accuracy, we can import `classification_report`, `confusion_matrix`, and `accuracy_score` from scikit-learn. # + slideshow={"slide_type": "slide"} from sklearn.metrics import classification_report, confusion_matrix, accuracy_score # - # With [accuracy_score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html?highlight=accuracy_score#sklearn.metrics.accuracy_score), we can get a numerical indicator of prediction accuracy, rather than just eyeball using a crosstab. We'll need to specify the following: # # - **y_true**: The correct labels our model is trying to predict. For this example, we'll set `y_true` to `rating`. # # # - **y_pred**: The labels predicted by our classifier. For this example, we'll set `y_pred` to `prediction`. # + slideshow={"slide_type": "slide"} accuracy_score(wine_df['rating'], wine_df['prediction']) # + [markdown] slideshow={"slide_type": "slide"} # Running a [confusion_matrix](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html?highlight=confusion_matrix#sklearn.metrics.confusion_matrix) produces results similar to the crosstab we generated earlier. However, through the us of the [heatmap](https://seaborn.pydata.org/generated/seaborn.heatmap.html) function available through the seaborn library, we can create a visual representation of the degree of prediction accuracy. # + slideshow={"slide_type": "slide"} import seaborn as sns cm = confusion_matrix(wine_df['rating'], wine_df['prediction']) sns.heatmap(cm, annot=True, cmap="Greens", fmt='g') # + [markdown] slideshow={"slide_type": "slide"} # Finally, we can generate a [classification_report](https://scikit-learn.org/stable/modules/model_evaluation.html#classification-report) to return a text report with some helpful classification metrics. # # - **Precision**: % of selected items that are correct # # # # - **Recall**: % of correct items that are selected # # # + slideshow={"slide_type": "slide"} print(classification_report(wine_df['rating'], wine_df['prediction'])) # - # We can also use our classifer to generate an array of probabilities that each of our features will occur in any given wine review, depending on whether a review is associated with a `High` or `Low` score: # + slideshow={"slide_type": "slide"} nb_classifier.predict_proba(review_word_counts) # + slideshow={"slide_type": "slide"} predict_df = pd.DataFrame(nb_classifier.predict_proba(review_word_counts), columns=nb_classifier.classes_) # + slideshow={"slide_type": "slide"} predict_df.head() # + slideshow={"slide_type": "slide"} wine_df_prediction = pd.concat([wine_df, predict_df], axis = 1) # + slideshow={"slide_type": "slide"} wine_df_prediction.sort_values('High', ascending=False)[['description','points']].head(15) # + slideshow={"slide_type": "slide"} wine_df_prediction.sort_values('Low', ascending=False)[['description','points']].head(15) # + [markdown] slideshow={"slide_type": "slide"} # ### Classification Exercise 2 # # Which **post-1989** speech had the highest likelihood of being delivered during an earlier period? # # + [markdown] slideshow={"slide_type": "slide"} # ### What about overfitting? # - # Up to this point in the chapter, we've done little to account for potentially overfitting our model to the data. When we overfit a model, we run the risk of describing the random errors in our dataset, rather than the relationships we're actually interested in analyizing. # # To get around this, we'll import [train_test_split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html?highlight=train_test_split#sklearn.model_selection.train_test_split), which allows us to set aside a random subset of texts in our corpus to test the fit of our model. We can indicate the proportion of the total body of texts we'd like to set aside for testing the model using the `test_size` parameter. In the following example, we'll set aside 20% of the wine reviews for testing by setting `test_size` to 0.2. # # + slideshow={"slide_type": "slide"} from sklearn.model_selection import train_test_split train, test = train_test_split(wine_df, test_size=0.2) # + slideshow={"slide_type": "slide"} len(train) # + slideshow={"slide_type": "fragment"} len(test) # - # Now that we've split up our data for training and testing, we can set the parameters of our vectorizer, then `fit` the vectorizer to our training data. # + slideshow={"slide_type": "slide"} vectorizer = CountVectorizer(lowercase = True, ngram_range = (1,2), stop_words = 'english', min_df = .01, max_features = None) vectorizer.fit(train['description']) # - # We can use the `transform` method to return a document-term metrix for our training data. # + slideshow={"slide_type": "slide"} X_train = vectorizer.transform(train['description']) # - # Using the MultinomialNB model from we imported earlier in the chapter, we can `fit` the matrix we've generated for descriptions of the wine reviews in the training data to the `rating` dummy variable indicating whether a wine received a high or low rating. # + slideshow={"slide_type": "fragment"} nb_classifier.fit(X_train, train['rating']) # - # The visualizations provided through scikitlearn's `classification_report`, `confusion_matrix`, and `accuracy_score` will come in handy as we compare the performance of our model when applied to our training data versus our test data. # + slideshow={"slide_type": "slide"} from sklearn.metrics import classification_report, confusion_matrix, accuracy_score # + slideshow={"slide_type": "slide"} print(accuracy_score(train['rating'], nb_classifier.predict(X_train))) # - # Before we can make this comparison, though, we'll have to `transform` our the raw documents in our test data into a document-term matrix. We'll also need to define our prediction model using the testing data. # + slideshow={"slide_type": "slide"} test_wf = vectorizer.transform(test['description']) test_prediction = nb_classifier.predict(test_wf) # + slideshow={"slide_type": "slide"} print(accuracy_score(test['rating'], test_prediction)) # + slideshow={"slide_type": "slide"} vectorizer = CountVectorizer(lowercase=True, ngram_range = (1,3), stop_words = 'english', max_df = .60, min_df = 5, max_features = None) # + slideshow={"slide_type": "slide"} vectorizer.fit(train['description']) print(len(vectorizer.get_feature_names())) X_train = vectorizer.transform(train['description']) nb_classifier.fit(X_train, train['rating']) # + slideshow={"slide_type": "slide"} print(accuracy_score(train['rating'], nb_classifier.predict(X_train))) # + slideshow={"slide_type": "slide"} print(accuracy_score(test['rating'], nb_classifier.predict(vectorizer.transform(test['description'])))) # + [markdown] slideshow={"slide_type": "slide"} # ### Classification Exercise 3 # # What happens to your model if you change some of the parameters for your vectorizer? Be sure to split the data between train and test! # # + [markdown] slideshow={"slide_type": "slide"} # ### Logistic Regression # - # In addition to MultinomialNB, we can also import a [Logistic Regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html?highlight=logisticregression#sklearn.linear_model.LogisticRegression) model from scikitlearn. # + slideshow={"slide_type": "slide"} from sklearn.linear_model import LogisticRegression # + slideshow={"slide_type": "fragment"} ln_classifier = LogisticRegression() # - # We can define the parameters of our vectorizer, and fit the vectorizer to the training data in our wine dataframe. This will produce a total of 463 features. # + slideshow={"slide_type": "slide"} vectorizer = CountVectorizer(lowercase=True, ngram_range = (1,2), stop_words = 'english', min_df = .01, max_features = None) vectorizer.fit(train['description']) print(len(vectorizer.get_feature_names())) ln_classifier.fit(vectorizer.transform(train['description']), train['rating']) # - # Let's look at the accuracy scores returned for the training and testing data when applying a logistic regression model: print(accuracy_score(train['rating'], ln_classifier.predict(vectorizer.transform(train['description'])))) print(accuracy_score(test['rating'], ln_classifier.predict(vectorizer.transform(test['description'])))) # We can also see the `heatmap` function from before to get a quick feel for how our model holds up when fit to our test data: # + test_prediction = ln_classifier.predict(vectorizer.transform(test['description'])) cm = confusion_matrix(test['rating'], test_prediction) sns.heatmap(cm, annot=True, cmap="Greens", fmt='g') # - # ### Classification Exercise 4 # # What is the out sample accuracy of a logistic regression model on your data? # # `from sklearn.linear_model import LogisticRegression` # + [markdown] slideshow={"slide_type": "slide"} # ### *K* Nearest Neighbors # + [markdown] slideshow={"slide_type": "slide"} # We can also [neighbors-based](https://scikit-learn.org/stable/modules/neighbors.html#neighbors) classifiers from scikit-learn. Here, we'll import the [KNeighborsClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html), a classifier looking at the *k* nearest neighbors to each query point. In this context, *k* is a user-specified integer value. # + slideshow={"slide_type": "slide"} from sklearn.neighbors import KNeighborsClassifier knn_classifier = KNeighborsClassifier(n_neighbors = 3) # - # Rather than implement a CountVecotrizer as we have for our previous models in this chapter, we'll be importing the [TfidfVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html?highlight=tfidfvectorizer#sklearn.feature_extraction.text.TfidfVectorizer) for our *k* neighbors classifier. This way, we'll be able to look at weighted word frequencies across documents. For a detailed discussion of td-idf, see the chapter on word frequencies. # + slideshow={"slide_type": "slide"} from sklearn.feature_extraction.text import TfidfVectorizer # - # The task of setting our parameters is nearly identical to what we encountered when setting up our `CountVectorizer`. # + slideshow={"slide_type": "slide"} tf_vector = TfidfVectorizer(lowercase = True, ngram_range = (1,2), stop_words = 'english', max_df = .60, min_df = .05, max_features = None) # - # We'll also reconfigure our `train_test_split`: train, test = train_test_split(wine_df, test_size=0.7) # Now, 30% of our total data is allocated to training the model, and 70% to testing it. len(train) len(test) # We'll `transform` and `fit` as we have in the past: # + slideshow={"slide_type": "slide"} tf_vector.fit(train['description']) # + slideshow={"slide_type": "slide"} review_tf = tf_vector.transform(train['description']) # + slideshow={"slide_type": "slide"} knn_classifier.fit(review_tf, train['rating']) # + slideshow={"slide_type": "slide"} knn_prediction = knn_classifier.predict(review_tf) # + slideshow={"slide_type": "slide"} print(accuracy_score(train['rating'], knn_prediction)) # - # We can also generate classification reports and heatmaps as we have before to visualize the strength of our *k* nearest neighbors classifer: # + slideshow={"slide_type": "slide"} print(classification_report(train['rating'], knn_prediction)) # + slideshow={"slide_type": "slide"} cm = confusion_matrix(train['rating'], knn_prediction) sns.heatmap(cm, annot=True, cmap="Greens", fmt='g') # + [markdown] slideshow={"slide_type": "slide"} # ### Classification Exercise 5 # # What does a k-nearest neigbhor for your speech dataset look like? (Don't forget to shrink your dataframe). How does the accuracy compare? # # + [markdown] slideshow={"slide_type": "slide"} # ![](images/knn2.png) # + slideshow={"slide_type": "slide"} knn_classifier = KNeighborsClassifier(n_neighbors = 15) # + [markdown] slideshow={"slide_type": "slide"} # ## But what's the best fitting model? # - # #### GridSearchCV # Now that we know how to run a variety of text classification models in Python, we'd benefit from a discussion of how to determine which model best fits our data. To figure this out, we'll import [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html?highlight=gridsearchcv#sklearn.model_selection.GridSearchCV) from scikit-learn, which allows us to perform an exhaustive grid search cross-validation of parameter values. # + slideshow={"slide_type": "slide"} from sklearn.model_selection import GridSearchCV # - # This allows us to, for example, determine whether it makes the most sense to set `n_neighbors` in our *k* nearest neighbors classifier to 2, 3 (what it was set to in our original model), or 7. # + slideshow={"slide_type": "slide"} # old model: knn_classifier = KNeighborsClassifier(n_neighbors = 3) parameters = {'n_neighbors' : [2,3, 7], 'weights' : ['distance', 'uniform']} # - grid = GridSearchCV(KNeighborsClassifier(), parameters, cv = 5) grid # + [markdown] slideshow={"slide_type": "slide"} # #### Pipeline # - # Using [Pipeline](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html) along with GridSearchCV allows us to combine a list of *transforms* (estimators using the `transform` method) into a single estimator. # + slideshow={"slide_type": "slide"} from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV # - # Below, we'll use Pipline to look at two transforms simultaneously: the `vectorizer` set up through `CountVectorizer`, and the `classifier` set up through `LogisticRegression`. We'll evaluate goodness of fit for models containing 300, 500, and 700 maximum features in the vectorizer. # + slideshow={"slide_type": "skip"} pipeline = Pipeline([ ('vectorizer' , CountVectorizer()), ('classifier' , LogisticRegression()) ]) parameters = {'vectorizer__max_features' : [300, 500, 700], } # - # Before we run GridSearchCV on our pipeline, let's take some time to set up the following parameters. While we've encountered `n_jobs` and `verbose` in preceding chapters, let's take some time to consider a new parameter: # # - **cv**: Allows us to choose one of several cross-validation strategies. By default, `cv` is set to 5, so that we perform a 5-fold cross-validation. # # + slideshow={"slide_type": "slide"} grid_search = GridSearchCV(pipeline, parameters, n_jobs = -1, cv = 5, verbose = 1) # - # Now we can fit our combined estimator to our 3 candidate models: # + slideshow={"slide_type": "slide"} grid_search.fit(wine_df['description'], wine_df['rating']) # - # We can use `grid_search` to determine the best accuracy score, the best estimator, and the best parameters among our candidates: # + slideshow={"slide_type": "slide"} grid_search.best_score_ # + slideshow={"slide_type": "slide"} grid_search.best_estimator_ # + slideshow={"slide_type": "slide"} grid_search.best_estimator_.get_params # - # Based on our results, it looks like the best bet would be to set `max_features` to 700. # + [markdown] slideshow={"slide_type": "slide"} # ### Classification Exercise 6 # # The "data" folder contains board games descriptions scraped from BoardGameGeeks.com. Analyze the relationship between the words in the `description` and whether or not reviewers thought it was a `quality_game`. # # # # #
Notebooks/Text-Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline # %matplotlib notebook import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split sns.set({'figure.figsize': (14, 13)}) # Dataset from: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic) df = pd.read_csv('../../datasets/bcw/bcw.csv').dropna() del df['id'] df = df[~(df == '?').any(axis=1)] # remove '?'-d data rows y = df['Class'].values del df['Class'] X = df.values # - sns.heatmap(df.corr().round(2), annot=True) pd.plotting.scatter_matrix(df, c=y, alpha=.8, marker='o', s=35, figsize=(15, 15,), cmap='Dark2'); # + from sklearn.neural_network import MLPClassifier X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2) model = MLPClassifier( hidden_layer_sizes=(450,), alpha=0.0001, activation='relu', learning_rate_init=.0025, max_iter=1000, shuffle=True, random_state=1 ) model.fit(X_train, y_train) y_pred_train = model.predict(X_train) y_pred_test = model.predict(X_test) print('Accuracy on train', np.mean(y_train == y_pred_train)) print('Accuracy on test: ', np.mean(y_test == y_pred_test))
notebooks/Breast_Cancer_Wisconsin_(Diag).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="yAq6aHVh5oCH" # **Copyright 2020 Google LLC.** # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # - # ------------- # **Important note** # # # This notebook has been adapted to function properly with most recent versions of pandas, sklearn and seaborn on January 2021 for the ML challange. # # ------------- # <NAME>, <NAME>, <NAME>, <NAME>\ # Involved master students AI \ # Faculty of Science\ # University of Amsterdam # + colab={} colab_type="code" id="VpEpTFph2ysp" from __future__ import division import pandas as pd import numpy as np import json import os,sys import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split import numpy as np # + [markdown] colab_type="text" id="F-u1ecNmMiX3" # ## Overview # # ### Pre-processes COMPAS dataset: # # Download the COMPAS dataset from: # https://github.com/propublica/compas-analysis/blob/master/compas-scores-two-years.csv # and save it in the `./data/compas` folder. # # Input: ./data/compas/compas-scores-two-years.csv # # Outputs: train.csv, test.csv, mean_std.json, vocabulary.json, IPS_exampleweights_with_label.json, IPS_exampleweights_without_label.json and dataset_stats.json # + colab={} colab_type="code" id="oyFyRbFk7zox" pd.options.display.float_format = '{:,.2f}'.format dataset_base_dir = '../datasets/compas/' dataset_file_name = 'compas-scores-two-years.csv' # + [markdown] colab_type="text" id="PgWxzZeyKog3" # ### Processing original dataset # + colab={} colab_type="code" id="kL3-NykBQhKz" file_path = os.path.join(dataset_base_dir,dataset_file_name) with open(file_path, "r") as file_name: temp_df = pd.read_csv(file_name) # Columns of interest columns = ['juv_fel_count', 'juv_misd_count', 'juv_other_count', 'priors_count', 'age', 'c_charge_degree', 'c_charge_desc', 'age_cat', 'sex', 'race', 'is_recid'] target_variable = 'is_recid' target_value = 'Yes' # Drop duplicates temp_df = temp_df[['id']+columns].drop_duplicates() df = temp_df[columns].copy() # Convert columns of type ``object`` to ``category`` df = pd.concat([ df.select_dtypes(include=[], exclude=['object']), df.select_dtypes(['object']).apply(pd.Series.astype, dtype='category') ], axis=1).reindex(df.columns, axis=1) # Binarize target_variable df['is_recid'] = df.apply(lambda x: 'Yes' if x['is_recid']==1.0 else 'No', axis=1).astype('category') # Process protected-column values race_dict = {'African-American':'Black','Caucasian':'White'} df['race'] = df.apply(lambda x: race_dict[x['race']] if x['race'] in race_dict.keys() else 'Other', axis=1).astype('category') # + [markdown] colab_type="text" id="zKNj_ZV2K_09" # ### Shuffle and Split into Train (70%) and Test set (30%) # + colab={} colab_type="code" id="0ZLM1kXLz3PI" train_df, test_df = train_test_split(df, test_size=0.30, random_state=42) output_file_path = os.path.join(dataset_base_dir,'train.csv') with open(output_file_path, mode="w") as output_file: train_df.to_csv(output_file,index=False,columns=columns,header=False) output_file.close() output_file_path = os.path.join(dataset_base_dir,'test.csv') with open(output_file_path, mode="w") as output_file: test_df.to_csv(output_file,index=False,columns=columns,header=False) output_file.close() # + [markdown] colab_type="text" id="1VQE85STLL46" # ### Computing Invese propensity weights for each subgroup, and writes to directory. # # IPS_example_weights_with_label.json: json dictionary of the format # {subgroup_id : inverse_propensity_score,...}. Used by IPS_reweighting_model approach. # + colab={"height": 34} colab_type="code" id="2fkieHul02TL" outputId="9aa901d9-b832-4b89-edab-e3521d5c7217" IPS_example_weights_without_label = { 0: (len(train_df))/(len(train_df[(train_df.race != 'Black') & (train_df.sex != 'Female')])), # 00: White Male 1: (len(train_df))/(len(train_df[(train_df.race != 'Black') & (train_df.sex == 'Female')])), # 01: White Female 2: (len(train_df))/(len(train_df[(train_df.race == 'Black') & (train_df.sex != 'Female')])), # 10: Black Male 3: (len(train_df))/(len(train_df[(train_df.race == 'Black') & (train_df.sex == 'Female')])) # 11: Black Female } output_file_path = os.path.join(dataset_base_dir,'IPS_example_weights_without_label.json') with open(output_file_path, mode="w") as output_file: output_file.write(json.dumps(IPS_example_weights_without_label)) output_file.close() print(IPS_example_weights_without_label) # + colab={"height": 34} colab_type="code" id="Dm15uo-R0-LB" outputId="2619a7d0-d079-43c9-cee6-d9eeb3ad4ce4" IPS_example_weights_with_label = { 0: (len(train_df))/(len(train_df[(train_df[target_variable] != target_value) & (train_df.race != 'Black') & (train_df.sex != 'Female')])), # 000: Negative White Male 1: (len(train_df))/(len(train_df[(train_df[target_variable] != target_value) & (train_df.race != 'Black') & (train_df.sex == 'Female')])), # 001: Negative White Female 2: (len(train_df))/(len(train_df[(train_df[target_variable] != target_value) & (train_df.race == 'Black') & (train_df.sex != 'Female')])), # 010: Negative Black Male 3: (len(train_df))/(len(train_df[(train_df[target_variable] != target_value) & (train_df.race == 'Black') & (train_df.sex == 'Female')])), # 011: Negative Black Female 4: (len(train_df))/(len(train_df[(train_df[target_variable] == target_value) & (train_df.race != 'Black') & (train_df.sex != 'Female')])), # 100: Positive White Male 5: (len(train_df))/(len(train_df[(train_df[target_variable] == target_value) & (train_df.race != 'Black') & (train_df.sex == 'Female')])), # 101: Positive White Female 6: (len(train_df))/(len(train_df[(train_df[target_variable] == target_value) & (train_df.race == 'Black') & (train_df.sex != 'Female')])), # 110: Positive Black Male 7: (len(train_df))/(len(train_df[(train_df[target_variable] == target_value) & (train_df.race == 'Black') & (train_df.sex == 'Female')])), # 111: Positive Black Female } output_file_path = os.path.join(dataset_base_dir,'IPS_example_weights_with_label.json') with open(output_file_path, mode="w") as output_file: output_file.write(json.dumps(IPS_example_weights_with_label)) output_file.close() print(IPS_example_weights_with_label) # + [markdown] colab_type="text" id="8SQc7h9HLcSc" # ### Construct vocabulary.json, and write to directory. # # vocabulary.json: json dictionary of the format {feature_name: [feature_vocabulary]}, containing vocabulary for categorical features. # + colab={"height": 54} colab_type="code" id="YIebJG2YfMpv" outputId="2a03258b-c31e-47ce-a282-4f4168f8ca65" cat_cols = train_df.select_dtypes(include='category').columns vocab_dict = {} for col in cat_cols: vocab_dict[col] = list(set(train_df[col].cat.categories)) output_file_path = os.path.join(dataset_base_dir,'vocabulary.json') with open(output_file_path, mode="w") as output_file: output_file.write(json.dumps(vocab_dict)) output_file.close() # + [markdown] colab_type="text" id="V9cxiG9SLfk6" # ### Construct mean_std.json, and write to directory # # mean_std.json: json dictionary of the format feature_name: [mean, std]}, # containing mean and std for numerical features. # + colab={"height": 54} colab_type="code" id="EjZvIZC6FMFm" outputId="d9ae1abd-d18f-486b-f067-c657b1420d97" temp_dict = train_df.describe().to_dict() mean_std_dict = {} for key, value in temp_dict.items(): mean_std_dict[key] = [value['mean'],value['std']] output_file_path = os.path.join(dataset_base_dir,'mean_std.json') with open(output_file_path, mode="w") as output_file: output_file.write(json.dumps(mean_std_dict)) output_file.close() print(mean_std_dict) # - # ### Construct datasets_stats.json # # # dataset_stats.json: json dictionary that contains whith information that is hardcoded in the original TensorFlow implementation by Lahoti et al. # + stats = {} stats["feature_names"] = list(train_df.columns) stats["mean_std"] = mean_std_dict stats["sensitive_column_names"] = ["sex", "race"] stats["sensitive_column_values"] = ["Female", "Black"] stats["target_column_name"] = "is_recid" stats["target_column_positive_value"] = "Yes" stats["vocabulary"] = vocab_dict output_file_path = os.path.join(dataset_base_dir, 'dataset_stats.json') with open(output_file_path, mode="w") as output_file: output_file.write(json.dumps(stats, indent=4, sort_keys=True)) output_file.close()
data/preprocess_data/CreateCompasDatasetFiles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # # 06. Logging APIs # This notebook showcase various ways to use the Azure Machine Learning service run logging APIs, and view the results in the Azure portal. # ## Prerequisites # Make sure you go through the [00. Installation and Configuration](../../00.configuration.ipynb) Notebook first if you haven't. Also make sure you have tqdm and matplotlib installed in the current kernel. # # ``` # (myenv) $ conda install -y tqdm matplotlib # ``` # ## Validate Azure ML SDK installation and get version number for debugging purposes # + tags=["install"] from azureml.core import Experiment, Run, Workspace import azureml.core import numpy as np # Check core SDK version number print("SDK version:", azureml.core.VERSION) # - # ## Initialize Workspace # # Initialize a workspace object from persisted configuration. # + tags=["create workspace"] ws = Workspace.from_config() print('Workspace name: ' + ws.name, 'Azure region: ' + ws.location, 'Subscription id: ' + ws.subscription_id, 'Resource group: ' + ws.resource_group, sep='\n') # - # ## Set experiment # Create a new experiment (or get the one with such name). exp = Experiment(workspace=ws, name='logging-api-test') # ## Log metrics # We will start a run, and use the various logging APIs to record different types of metrics during the run. # + from tqdm import tqdm # start logging for the run run = exp.start_logging() # log a string value run.log(name='Name', value='Logging API run') # log a numerical value run.log(name='Magic Number', value=42) # Log a list of values. Note this will generate a single-variable line chart. run.log_list(name='Fibonacci', value=[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]) # create a dictionary to hold a table of values sines = {} sines['angle'] = [] sines['sine'] = [] for i in tqdm(range(-10, 10)): # log a metric value repeatedly, this will generate a single-variable line chart. run.log(name='Sigmoid', value=1 / (1 + np.exp(-i))) angle = i / 2.0 # log a 2 (or more) values as a metric repeatedly. This will generate a 2-variable line chart if you have 2 numerical columns. run.log_row(name='Cosine Wave', angle=angle, cos=np.cos(angle)) sines['angle'].append(angle) sines['sine'].append(np.sin(angle)) # log a dictionary as a table, this will generate a 2-variable chart if you have 2 numerical columns run.log_table(name='Sine Wave', value=sines) run.complete() # - # Even after the run is marked completed, you can still log things. # ## Log an image # This is how to log a _matplotlib_ pyplot object. # + # %matplotlib inline import matplotlib.pyplot as plt angle = np.linspace(-3, 3, 50) plt.plot(angle, np.tanh(angle), label='tanh') plt.legend(fontsize=12) plt.title('Hyperbolic Tangent', fontsize=16) plt.grid(True) run.log_image(name='Hyperbolic Tangent', plot=plt) # - # ## Upload a file # You can also upload an abitrary file. First, let's create a dummy file locally. # + # %%writefile myfile.txt This is a dummy file. # - # Now let's upload this file into the run record as a run artifact, and display the properties after the upload. props = run.upload_file(name='myfile_in_the_cloud.txt', path_or_stream='./myfile.txt') props.serialize() # ## Examine the run # Now let's take a look at the run detail page in Azure portal. Make sure you checkout the various charts and plots generated/uploaded. run # You can get all the metrics in that run back. run.get_metrics() # You can also see the files uploaded for this run. run.get_file_names() # You can also download all the files locally. # + import os os.makedirs('files', exist_ok=True) for f in run.get_file_names(): dest = os.path.join('files', f.split('/')[-1]) print('Downloading file {} to {}...'.format(f, dest)) run.download_file(f, dest)
01.getting-started/06.logging-api/06.logging-api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Growth and the (optimal) ribosomal content of cells # # # # #### What to improve/add # # - add more data, particularly about ribosomal contant vs growth. # - introduce simpler consideration first, with scenario where cells consist only of ribosomes and RNAP. Consider different parts of translation and transcription (RNAP making rRNA, mRNA for ribosomes and RNAP), ribosomes translating proportional to mRNA abundance. Consider costs of mRNA synthesis. How important is it in terms of costs that several ribosomes use one mRNA to translate? #start python session import numpy as np import pandas as pd import matplotlib.pyplot as plt from sympy import * import matplotlib matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 # # # Growth laws # # Simple consideration why an approx. linear relation between ribosome abundance and growth rate means optimal allocation of ressources to ribosomes and other proteins. Arguments do not consider how the cell manage to obtain an optimal abundance of ribosomes, which is an obivous next step to think about more carefully. # # In publications from <NAME>'s lab together with <NAME> and <NAME> similar considerations have been done, consider particularly Scott et al 2014. However I found the presentations in their manuscript not the easiest to follow since it first starts with the 'ribosome line' or other phenomenological relations as given (e.g. ribosome fraction increases proportional with growth rate). Only much later on model based rationalizations are given why these relation might be useful for the cell. I think it is much easier to understand the relations (or growth-laws) when talking immediatly about nutrient levels and the limitation of translation. With the relations being the consequence of a regulation scheme ensuring an optimal allocation into ribosomes. Furthermore, I think the specific illustration of optimal ribosome allocation vs alternative simpler allocation strategies presented here is helpful to illustrate how important a proper allocation strategy is to maximize growth. # # Note: To prevent the introduction of new notation I started followingg closely the notation used in Scott et al 2014. But I probably was not always consistent. # # ## Ribosomes make all proteins # # To start, let's consider the synthesis of total protein mass $M$ by the ribosomes. The synthesis of proteins depends on the number of ribosomes $N_r$ and their translation speed (how many new AA are synthesized per time). # # $$ \frac{dM}{dt}= k_R N_{r}$$ # # Here, we take 1AA as basic unit for protein mass, so there is no extra conversion factor (and the translation speed $k_R$ has the unit AA/time. # # In a steady state where the composition of the cell is not changing, we thus have for the growth rate: # # $$ \lambda=\frac{dM}{dt}/M=k_R N_{r}/M$$ # # With the total mass of ribosomes $M_R=N_R m_r$ we can rewrite: # # $$ \lambda=\gamma M_R/M=\gamma\phi_R $$ # # with $\phi_R$ being the ribosomal protein mass fraction [proportional to the easier to masure fraction total RNA to total protein]. $\gamma\equiv k_R/m_R$ denotes the translation efficiency and has the unit of a rate (1/time). $m_R$ is 7459 AA for E.coli. # # Note: It is still better to use the term efficiency instead of rate here since it might be the consequence of something more complex and not a simple chemical process described by a constant reaction rate. For example, when using chloramphenicol what seems to happen is that some ribosomes are stopping completely while others are still translating with regular translation speed. A chloramphenicol depemendent $\gamma$ is describing the total effect of chloramphenical on translation and not necessarily the effect of chloramphenicol on single ribosomes. # # This relation should in general be true in steady state, but does not necessarily give a linear relation between ribosom fraction and growth rate: For different growth rates (in different enviornments), the cellular composition is changing and thus translation efficiency $\gamma$ is also expected to change. # # In particular we expect translation efficiency to go down if nutrient levels within the cells are falling too much (e.g. not sufficient charged tRNAs to ensure smooth translation). Let's simplify and consider only one type of nutrient source, like a pool of amino acids (or charged tRNAs) with concentration $a$. We expact a sigmodial dependence of translation efficiency when the availability of chareged tRNA varies. For example by a Michaelis Menten type: # # $$\gamma=\gamma(a)=\gamma_{max}\cdot\frac{a}{a+a_0}$$ # # Ribosomes are translating full power for $a>>a_0$, but translation slows down substantially when $a$ falls below $a$. # # Note: To have the units comparable we here take $a$ in the units of charged tRNA per protein, not per cell mass (Not a problem since protein per cell is not changing). # Note: Translation is not a simple chemical reaction and other functional forms might be more realistic, e.g. a hill curve with a steeper increase of translation around $a_0$. The advantage using a Michaelis Menten type is that we can analytically handle the steady state. # Note: a0 can be estimated from studies investigating how translation changes with charged tRNA levels in the cell. Particularly, there are studies where single tRNA species have been depleted and its effect on translation speed have been quantified. # # Spoiler alert: The linear relation between growth rate and ribosome fraction, a hall-mark of the growth-laws' and obsered over a wide range of growth conditions only follows when a0 is low. In the following we consider the nutrient levels in the cell in more detail and show that optimal growth rates are obtained when nutrient levels are maintained at sufficiently high levels. # # ## Nutrient levels within the cell # # The nutrient levels within the cell (think of it as charged tRNA) is givene by a balance of nutrient uptake and consumption by the translating ribosomes. # # $$\frac{d A}{dt}=J_{in}-\beta \frac{dM}{dt}$$ # # Here, $J_{in}$ describeds the nutrient influx (how nutrient uptake leads to new charged tRNA). Since we measure theinternal nutrient pool in units of charged tRNA and protein mass in units of amino acid we can set the conversion factor $\beta$ translating the consumption of internal nutrients to newly translated protein mass to 1. For the concentration of nutrients within the cell, $a=A/M$ we get: # # $$\frac{d a}{dt}=J_{in}/M-\frac{1}{M}\frac{dM}{dt}(\beta+a)=J_{in}/M-\lambda \beta-\lambda a$$ # # The last term describes dilution of the amino acid concentrations by cell growth. The influx of new nutrients depends on the abundance of proteins taking care of nutrient influx. If we more specifically describe by $M_a$ the proteins required for the uptake of nutrients and by $k_a$ the rate, then we have $J_{in}=k_a M_a$. Or $J_{in}/M=k_a\phi_N $ with $\phi_N=M_a/M$. # # In steady state, the nutrient concentration is thus given by: # # $$a=\beta\left(\frac{k_a\phi_N}{\beta\lambda}-1\right)\equiv \beta \left(\frac{\nu \phi_N}{\lambda}-1\right)$$ # # with the "nutrient efficiency" defined as $\nu=k_a/\beta$. # # Note: Nutrient turnover in the cell is typically large compared to dilution and thus this relation often simplifies to the relation $\lambda=\nu \phi_N$. Thus, when changing enviornmental conditions without changing $\nu$ (like by chloramphenicol as in Scott et al) one sees a linear relation between ribosome fraction and growth rate. However, we are interested here in all kinds of possible steady states, including those were nutrient concentrations reach very high levels. We thus include the dilution term in our further considerations. # # Note: In pincriple, nutrient levels can be controlled in addition by some negative feedback. Too high nutrient levels can for example lead to a decrease of protein activity for example (drop of $k_a$). We are not including this here as we only want to illustrate how optimal steady state growth rates are accomplished. # # Note: It is important to realize that $M_k$ represents not only one type of enzyme (like a specific uptake protein) but stands for the a pool of enzymes needed to charge tRNA (including metabolic enzymes and what ever is needed to synthesize the AA). Accordingly, $k_a$ is not a simple enzymatic rate. # # ## Steady state growth rates # # The steady state growth rates are a result of combinding the equations for nutrient levels and growth introduced above: # $$a=\beta \left(\frac{\nu \phi_N}{\lambda}-1\right)$$ and # $$\lambda=\gamma_{max}\frac{a}{a+a_0}\phi_R$$ # # For the case of translation showing a Miachelis Menten dependence on nutrient levels (as stated above) this is just the solution of a quadratic equation. # # We can then consider specifically how growth is changing when allocation parameters (particularly the ribosomal content) are changing. We specifically consider the following three (simple) regulation scenarios and compare how well they compare with what the cell is doing across growth conditions. # # ### Scenario 1: Cell is adjusting the ribosome fraction to optimize growth. # # We can solve the equations for different ribosome abundance levels $\phi_R$ and find the abundance levels for which growth-rate becomes optimal (given the other parameters are fixed). Spoiler alert: this elegant regulation scheme matches very well with the observations. # # ### Scenario 2: Cell is maintaining a constant translation rate. # # We can solve the equations for different ribosome abundance levels and find the ribosome abundance levels such that translation rate, $\gamma=\gamma_{max}\frac{a}{a+a_0}$, is constant. # # ### Scenario 3: Cell is maintaining a fixed ribosomal content. # # Finally, we can solve the equations and consider how growth-rate is changing when ribosomal content is not adjusted but constant (not changing when other parameters are changing). # # In the following we outline the code how to get these solutions, and then we analyze how growth is changing for the different scenarios and when the major physiological parameters (nutrient efficiency and translation speed) are varied. # # # # + #find optimal ribosome allocation #can be skiped since I copied the solution in the next cells (I calculated also by hand but got too lazy after a while) #caluclation steady state solution for translation following a michaeelis mentend dependence on precursor levesl #start symbols symbolsToDelete = ('lam') var('lam') var('beta') var('nu') var('phiR') var('gamma_max') var('a_0') var('phiO') var('a_sol1') var('a_sol2') var('mpc') #lambda*(nu*phiP+beta*phiR*gamma0)/(Km_nu-beta)-phiR*gamma0*nu*phiP #start with simple model were amino acid levels are only reduced by growth. Slow down in translation follows simple Michaelis Menten solve((lam**2)+lam*(beta*nu*(1-phiR-phiO)+beta*phiR*gamma_max)/(a_0-beta)-gamma_max*beta*nu*phiR*(1-phiR-phiO)/(a_0-beta), lam) ##we can then take these solutions and look at the derivative in phiR (robosomal content) ##calculate derivative grsolution1=(-beta*(gamma_max*phiR - nu*phiO - nu*phiR + nu) + sqrt(beta*(-4*a_0*gamma_max*nu*phiO*phiR - 4*a_0*gamma_max*nu*phiR**2 + 4*a_0*gamma_max*nu*phiR + beta*gamma_max**2*phiR**2 + 2*beta*gamma_max*nu*phiO*phiR + 2*beta*gamma_max*nu*phiR**2 - 2*beta*gamma_max*nu*phiR + beta*nu**2*phiO**2 + 2*beta*nu**2*phiO*phiR - 2*beta*nu**2*phiO + beta*nu**2*phiR**2 - 2*beta*nu**2*phiR + beta*nu**2)))/(2*(a_0 - beta)) dlambda_dphiR=diff(grsolution1,phiR,1) ##print(dlambda_dphiR) ##solve for derivative equals zero to find the optimal ribosome allocation ##this step takes a few seconds ##the optimal ribosome fraction is determined by the derivative being 0. extreme_growth_solutions=solve(dlambda_dphiR,phiR) print("solution: growth for optimal ribosome allocation") #print(extreme_growth_solutions) ### # - extreme_growth_solutions[0] # + #find ribosome allocation for fixed precursor levels #can be skiped since I copied the solution in the next cells. symbolsToDelete = ('lam') symbolsToDelete = ('mpc') var('lam') var('beta') var('nu') var('phiR') var('gamma_max') var('a_0') var('phiO') var('a_sol1') var('a_sol2') var('mpc') #constant level solve((mpc**2)+mpc*beta*(-1*nu*(1-phiR-phiO)/(phiR*gamma_max)+1)-nu*(1-phiR-phiO)*a_0*beta/(gamma_max*phiR), mpc) #we can take this solution and find allocation parameters for which precursor concentration is fixed. #mpc1sol=(-beta*(gamma_max*phiR - nu*phiO - nu*phiR + nu) + sqrt(beta*(-4*a_0*gamma_max*nu*phiO*phiR - 4*a_0*gamma_max*nu*phiR**2 + 4*a_0*gamma_max*nu*phiR + beta*gamma_max**2*phiR**2 - 2*beta*gamma_max*nu*phiO*phiR - 2*beta*gamma_max*nu*phiR**2 + 2*beta*gamma_max*nu*phiR + beta*nu**2*phiO**2 + 2*beta*nu**2*phiO*phiR - 2*beta*nu**2*phiO + beta*nu**2*phiR**2 - 2*beta*nu**2*phiR + beta*nu**2)))/(2*gamma_max*phiR) #mpc2sol=-(beta*(gamma_max*phiR - nu*phiO - nu*phiR + nu) + sqrt(beta*(-4*a_0*gamma_max*nu*phiO*phiR - 4*a_0*gamma_max*nu*phiR**2 + 4*a_0*gamma_max*nu*phiR + beta*gamma_max**2*phiR**2 - 2*beta*gamma_max*nu*phiO*phiR - 2*beta*gamma_max*nu*phiR**2 + 2*beta*gamma_max*nu*phiR + beta*nu**2*phiO**2 + 2*beta*nu**2*phiO*phiR - 2*beta*nu**2*phiO + beta*nu**2*phiR**2 - 2*beta*nu**2*phiR + beta*nu**2)))/(2*gamma_max*phiR) var('avaluec') # #copy in constant solution mpc1solminconstv=(-beta*(gamma_max*phiR + nu*phiO + nu*phiR - nu) + sqrt(beta*(-4*a_0*gamma_max*nu*phiO*phiR - 4*a_0*gamma_max*nu*phiR**2 + 4*a_0*gamma_max*nu*phiR + beta*gamma_max**2*phiR**2 + 2*beta*gamma_max*nu*phiO*phiR + 2*beta*gamma_max*nu*phiR**2 - 2*beta*gamma_max*nu*phiR + beta*nu**2*phiO**2 + 2*beta*nu**2*phiO*phiR - 2*beta*nu**2*phiO + beta*nu**2*phiR**2 - 2*beta*nu**2*phiR + beta*nu**2)))/(2*gamma_max*phiR)-avaluec mpc2solminconstv=-(beta*(gamma_max*phiR + nu*phiO + nu*phiR - nu) + sqrt(beta*(-4*a_0*gamma_max*nu*phiO*phiR - 4*a_0*gamma_max*nu*phiR**2 + 4*a_0*gamma_max*nu*phiR + beta*gamma_max**2*phiR**2 + 2*beta*gamma_max*nu*phiO*phiR + 2*beta*gamma_max*nu*phiR**2 - 2*beta*gamma_max*nu*phiR + beta*nu**2*phiO**2 + 2*beta*nu**2*phiO*phiR - 2*beta*nu**2*phiO + beta*nu**2*phiR**2 - 2*beta*nu**2*phiR + beta*nu**2)))/(2*gamma_max*phiR)-avaluec #mpcl1 = lambdify([phiR,gamma_max,nu,a_0,beta,phiO], mpc1sol, "numpy") #mpcl2 = lambdify([phiR,gamma_max,nu,a_0,beta,phiO], mpc2sol, "numpy") #mpcl1minconstv = lambdify([phiR,gamma_max,nu,a_0,beta,phiO], mpc1solminconstv, "numpy") #mpcl2minconstv = lambdify([phiR,gamma_max,nu,a_0,beta,phiO], mpc2solminconstv, "numpy") #find solution fixconst1=solve(mpc1solminconstv,phiR) fixconst2=solve(mpc2solminconstv,phiR) display(fixconst1) display(fixconst2) # + #run this cell to have the growth optimal solution ready etc #definitions of functions to plot things in the following colorPC='k'#'#4A71B7' colorPR='k'#'#70AD44' colorGR='k' colorGRopt='orange' colorPCcon='magenta' alphac=0.8 def grsolution(beta,gamma_maxin,nu,a_0,phiO,phiR,fracactive): pf_translationrate=3600./7459. #translation to hour.... gamma_max=gamma_maxin*fracactive*pf_translationrate grsol1s=((-beta*(gamma_max*phiR - nu*phiO - nu*phiR + nu) + np.sqrt(beta*(-4*a_0*gamma_max*nu*phiO*phiR - 4*a_0*gamma_max*nu*phiR*phiR + 4*a_0*gamma_max*nu*phiR + beta*gamma_max**2*phiR*phiR + 2*beta*gamma_max*nu*phiO*phiR + 2*beta*gamma_max*nu*phiR*phiR - 2*beta*gamma_max*nu*phiR + beta*nu*nu*phiO*phiO + 2*beta*nu*nu*phiO*phiR - 2*beta*nu*nu*phiO + beta*nu*nu*phiR*phiR - 2*beta*nu*nu*phiR + beta*nu*nu)))/(2*(a_0 - beta))) #grsol1s=((-beta*(gamma_max*phiR - nu*phiO - nu*phiR + nu) + np.sqrt(beta*(-4*a_0*gamma_max*nu*phiO*phiR - 4*a_0*gamma_max*nu*phiR**2 + 4*a_0*gamma_max*nu*phiR + beta*gamma_max**2*phiR*phiR + 2*beta*gamma_max*nu*phiO*phiR + 2*beta*gamma_max*nu*phiR*phiR - 2*beta*gamma_max*nu*phiR + beta*nu*nu*phiO*phiO + 2*beta*nu*nu*phiO*phiR - 2*beta*nu*nu*phiO + beta*nu*nu*phiR*phiR - 2*beta*nu*nu*phiR + beta*nu*nu)))/(2*(a_0 - beta))) a_solution1=beta*(nu*(1-phiO-phiR)/grsol1s-1) return [grsol1s,a_solution1,gamma_max*a_solution1/(a_solution1+a_0)/fracactive/pf_translationrate] #solution for optimal growht #output: fraction ribosomes, precursor level, growth-rate def optphiR_solution(beta,gamma_maxin,nu,a0,phiO,fracactive): pf_translationrate=3600./7459. gamma_max=gamma_maxin*fracactive*pf_translationrate phiRopt=-(2*a0*gamma_max*nu*phiO - 2*a0*gamma_max*nu - beta*gamma_max*nu*phiO + beta*gamma_max*nu - beta*nu*nu*phiO + beta*nu**2 - gamma_max*phiO*np.sqrt(a0*beta*gamma_max*nu) + gamma_max*np.sqrt(a0*beta*gamma_max*nu) + nu*phiO*np.sqrt(a0*beta*gamma_max*nu) - nu*np.sqrt(a0*beta*gamma_max*nu))/(4*a0*gamma_max*nu - beta*gamma_max*gamma_max - 2*beta*gamma_max*nu - beta*nu**2) #print((beta*(-4*a0*gamma_max*nu*phiO*phiRopt - 4*a0*gamma_max*nu*phiRopt*phiRopt + 4*a0*gamma_max*nu*phiRopt + beta*gamma_max*gamma_max*phiRopt*phiRopt + 2*beta*gamma_max*nu*phiO*phiRopt + 2*beta*gamma_max*nu*phiRopt*phiRopt - 2*beta*gamma_max*nu*phiRopt + beta*nu*nu*phiO*phiO + 2*beta*nu*nu*phiO*phiRopt - 2*beta*nu*nu*phiO + beta*nu*nu*phiRopt*phiRopt - 2*beta*nu*nu*phiRopt + beta*nu*nu))) #print(np.sqrt(beta*(-4*a0*gamma_max*nu*phiO*phiRopt - 4*a0*gamma_max*nu*phiRopt*phiRopt + 4*a0*gamma_max*nu*phiRopt + beta*gamma_max*gamma_max*phiRopt*phiRopt + 2*beta*gamma_max*nu*phiO*phiRopt + 2*beta*gamma_max*nu*phiRopt*phiRopt - 2*beta*gamma_max*nu*phiRopt + beta*nu*nu*phiO*phiO + 2*beta*nu*nu*phiO*phiRopt - 2*beta*nu*nu*phiO + beta*nu*nu*phiRopt*phiRopt - 2*beta*nu*nu*phiRopt + beta*nu*nu))) #phiRopt=optimalgrowth_sol1=-(2*a0*gamma_max*nu*phiO - 2*a0*gamma_max*nu - beta*gamma_max*nu*phiO + beta*gamma_max*nu - beta*nu**2*phiO + beta*nu**2 - gamma_max*phiO*sqrt(a0*beta*gamma_max*nu) + gamma_max*sqrt(a0*beta*gamma_max*nu) + nu*phiO*sqrt(a0*beta*gamma_max*nu) - nu*sqrt(a0*beta*gamma_max*nu))/(4*a0*gamma_max*nu - beta*gamma_max**2 - 2*beta*gamma_max*nu - beta*nu**2) grsol1s=((-beta*(gamma_max*phiRopt - nu*phiO - nu*phiRopt + nu) + np.sqrt(beta*(-4*a0*gamma_max*nu*phiO*phiRopt - 4*a0*gamma_max*nu*phiRopt*phiRopt + 4*a0*gamma_max*nu*phiRopt + beta*gamma_max*gamma_max*phiRopt*phiRopt + 2*beta*gamma_max*nu*phiO*phiRopt + 2*beta*gamma_max*nu*phiRopt*phiRopt - 2*beta*gamma_max*nu*phiRopt + beta*nu*nu*phiO*phiO + 2*beta*nu*nu*phiO*phiRopt - 2*beta*nu*nu*phiO + beta*nu*nu*phiRopt*phiRopt - 2*beta*nu*nu*phiRopt + beta*nu*nu)))/(2*(a0 - beta))) #grsol1s=((-beta*(gamma_max*phiRopt - nu*phiO - nu*phiRopt + nu) + sqrt(beta*(-4*a0*gamma_max*nu*phiO*phiRopt - 4*a0*gamma_max*nu*phiRopt**2 + 4*a0*gamma_max*nu*phiRopt + beta*gamma_max**2*phiRopt**2 + 2*beta*gamma_max*nu*phiO*phiRopt + 2*beta*gamma_max*nu*phiRopt**2 - 2*beta*gamma_max*nu*phiRopt + beta*nu**2*phiO**2 + 2*beta*nu**2*phiO*phiRopt - 2*beta*nu**2*phiO + beta*nu**2*phiRopt**2 - 2*beta*nu**2*phiRopt + beta*nu**2)))/(2*(a0 - beta))) a_solution1=beta*(nu*(1-phiO-phiRopt)/grsol1s-1) translc=grsol1s*0+gamma_max*a_solution1/(a_solution1+a0)/fracactive/pf_translationrate return[phiRopt,a_solution1,grsol1s,translc] def constphiR_solution(beta,gamma_maxin,nu,a0,phiO,phiRin,fracactive): pf_translationrate=3600./7459. gamma_max=gamma_maxin*fracactive*pf_translationrate phiRopt=phiRin #-(2*a0*gamma_max*nu*phiO - 2*a0*gamma_max*nu - beta*gamma_max*nu*phiO + beta*gamma_max*nu - beta*nu*nu*phiO + beta*nu**2 - gamma_max*phiO*np.sqrt(a0*beta*gamma_max*nu) + gamma_max*np.sqrt(a0*beta*gamma_max*nu) + nu*phiO*np.sqrt(a0*beta*gamma_max*nu) - nu*np.sqrt(a0*beta*gamma_max*nu))/(4*a0*gamma_max*nu - beta*gamma_max*gamma_max - 2*beta*gamma_max*nu - beta*nu**2) #print((beta*(-4*a0*gamma_max*nu*phiO*phiRopt - 4*a0*gamma_max*nu*phiRopt*phiRopt + 4*a0*gamma_max*nu*phiRopt + beta*gamma_max*gamma_max*phiRopt*phiRopt + 2*beta*gamma_max*nu*phiO*phiRopt + 2*beta*gamma_max*nu*phiRopt*phiRopt - 2*beta*gamma_max*nu*phiRopt + beta*nu*nu*phiO*phiO + 2*beta*nu*nu*phiO*phiRopt - 2*beta*nu*nu*phiO + beta*nu*nu*phiRopt*phiRopt - 2*beta*nu*nu*phiRopt + beta*nu*nu))) #print(np.sqrt(beta*(-4*a0*gamma_max*nu*phiO*phiRopt - 4*a0*gamma_max*nu*phiRopt*phiRopt + 4*a0*gamma_max*nu*phiRopt + beta*gamma_max*gamma_max*phiRopt*phiRopt + 2*beta*gamma_max*nu*phiO*phiRopt + 2*beta*gamma_max*nu*phiRopt*phiRopt - 2*beta*gamma_max*nu*phiRopt + beta*nu*nu*phiO*phiO + 2*beta*nu*nu*phiO*phiRopt - 2*beta*nu*nu*phiO + beta*nu*nu*phiRopt*phiRopt - 2*beta*nu*nu*phiRopt + beta*nu*nu))) #phiRopt=optimalgrowth_sol1=-(2*a0*gamma_max*nu*phiO - 2*a0*gamma_max*nu - beta*gamma_max*nu*phiO + beta*gamma_max*nu - beta*nu**2*phiO + beta*nu**2 - gamma_max*phiO*sqrt(a0*beta*gamma_max*nu) + gamma_max*sqrt(a0*beta*gamma_max*nu) + nu*phiO*sqrt(a0*beta*gamma_max*nu) - nu*sqrt(a0*beta*gamma_max*nu))/(4*a0*gamma_max*nu - beta*gamma_max**2 - 2*beta*gamma_max*nu - beta*nu**2) grsol1s=((-beta*(gamma_max*phiRopt - nu*phiO - nu*phiRopt + nu) + np.sqrt(beta*(-4*a0*gamma_max*nu*phiO*phiRopt - 4*a0*gamma_max*nu*phiRopt*phiRopt + 4*a0*gamma_max*nu*phiRopt + beta*gamma_max*gamma_max*phiRopt*phiRopt + 2*beta*gamma_max*nu*phiO*phiRopt + 2*beta*gamma_max*nu*phiRopt*phiRopt - 2*beta*gamma_max*nu*phiRopt + beta*nu*nu*phiO*phiO + 2*beta*nu*nu*phiO*phiRopt - 2*beta*nu*nu*phiO + beta*nu*nu*phiRopt*phiRopt - 2*beta*nu*nu*phiRopt + beta*nu*nu)))/(2*(a0 - beta))) #grsol1s=((-beta*(gamma_max*phiRopt - nu*phiO - nu*phiRopt + nu) + sqrt(beta*(-4*a0*gamma_max*nu*phiO*phiRopt - 4*a0*gamma_max*nu*phiRopt**2 + 4*a0*gamma_max*nu*phiRopt + beta*gamma_max**2*phiRopt**2 + 2*beta*gamma_max*nu*phiO*phiRopt + 2*beta*gamma_max*nu*phiRopt**2 - 2*beta*gamma_max*nu*phiRopt + beta*nu**2*phiO**2 + 2*beta*nu**2*phiO*phiRopt - 2*beta*nu**2*phiO + beta*nu**2*phiRopt**2 - 2*beta*nu**2*phiRopt + beta*nu**2)))/(2*(a0 - beta))) a_solution1=beta*(nu*(1-phiO-phiRopt)/grsol1s-1) translc=grsol1s*0+gamma_max*a_solution1/(a_solution1+a0)/fracactive/pf_translationrate return[grsol1s*0+phiRopt,a_solution1,grsol1s,translc] #solution for constant nutrient abundance levels #output: fraction ribosomes, precursor level, growth-rate def constPCsolution(beta,gamma_maxin,nu,a_0,avaluec,phiO,fracactive): pf_translationrate=3600./7459. gamma_max=gamma_maxin*fracactive*pf_translationrate sol1c=beta*nu*(-a_0*phiO + a_0 - avaluec*phiO + avaluec)/(a_0*beta*nu + avaluec*avaluec*gamma_max + avaluec*beta*gamma_max + avaluec*beta*nu) #sol2c=beta*nu*(-a_0*phiO + a_0 - avaluec*phiO + avaluec)/(a_0*beta*nu + avaluec**2*gamma_max + avaluec*beta*gamma_max + avaluec*beta*nu) translc=sol1c*0.+gamma_max*avaluec/(avaluec+a_0)/fracactive/pf_translationrate return [sol1c,sol1c*0.+avaluec,gamma_max*sol1c*avaluec/(avaluec+a_0),translc] # - # # Plot variation growth-rate # # The physiological solution (positive nutrient levels) states how growth rates are changing with different parameters and when the fraction of ribosomes is varied. Plots generated using the follwoing cell. # # As we see, growth rates are varying with ribosome fraction with an optimal ribosome fraction at intermediate levels. If ribosome fractions are too low, then growth is limited by the number of ribosomes, available nutrients within the cell are not used efficiently. If ribosome fractions are too high, then nutrient levels fall and translation is not efficienty anymore. # # We see already by these plots that optimal growth rates are occuring at a ribosomal fraction where nutrient levels correspond roughly to $a_0$, the precursor-levels below which ribosomes stop elongation efficiently. # + #standard values used to plot a0v=0.0013*20# gamma_maxv=20 betav=1 nuv=10 phiOv=0.35 fracactivev=0.65 #get solution vor varying parameters (here phiR) x=np.linspace(0.01,(1-phiOv)*0.98,100) [grc,pcc,translcc]=grsolution(betav,gamma_maxv,nuv,a0v,phiOv,x,fracactivev) #get phiR for optimal growth gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxv,nuv,a0v,phiOv,fracactivev) #get phi for pc levels constant avaluev=10.0*a0v pccon_alphaR,pccon_pc,pccon_gr,pccon_transl=constPCsolution(betav,gamma_maxv,nuv,a0v,avaluev,phiOv,fracactivev) fig, axs = plt.subplots(3,1, figsize=(2.8,6.9)) axs[0].set_xlim(0,(1-phiOv)*1.05*100) axs[1].set_xlim(0,(1-phiOv)*1.05*100) axs[1].set_yscale('log') #plot varying growth-rate axs[0].plot(100*x,grc,color='k',ls='-') rlabel='allocation to translation $\\alpha_T\, (\%)$ ' axs[0].set_xlabel(rlabel) axs[0].set_ylabel("growth rate $\lambda\, (1/h)$") #plot varying pc concentrations axs[1].plot(100*x,pcc,color='k',ls='-') axs[1].set_xlabel(rlabel) axs[1].set_ylabel("charged tRNA $m_{t}\, (per AA)$") axs[2].set_xlabel(rlabel) axs[2].set_ylabel("elongation speed $(AA/s)$") axs[2].plot(100*x,translcc,color='k',ls='-') #plot growth-optimal case axs[0].axvline(100*gropt_alphaR,ls='--',color=colorGRopt,alpha=alphac) axs[0].axhline(gropt_gr,ls='--',color=colorGRopt,alpha=alphac) axs[1].axvline(100*gropt_alphaR,ls='--',color=colorGRopt,alpha=alphac) axs[1].axhline(gropt_pc,ls='--',color=colorGRopt,alpha=alphac) axs[2].axvline(100*gropt_alphaR,ls='--',color=colorGRopt,alpha=alphac) axs[2].axhline(gropt_transl,ls='--',color=colorGRopt,alpha=alphac) #plot case with fixed pcc concentration if 3>4: axs[0].axvline(100*pccon_alphaR,ls='--',color=colorPCcon,alpha=alphac) axs[0].axhline(pccon_gr,ls='--',color=colorPCcon,alpha=alphac) axs[1].axvline(100*pccon_alphaR,ls='--',color=colorPCcon,alpha=alphac) axs[1].axhline(pccon_pc,ls='--',color=colorPCcon,alpha=alphac) axs[1].axhline(a0v,label='$K_{M,PC}$',ls=':',color=colorPC,alpha=alphac) #plot varying translation #plt.legend() plt.tight_layout() plt.savefig("plot_output/growthrate_vs_alpha.pdf") # - # Now we can check how growth rate is changing when changing parameters. For example, when changing translation efficiency or nutrient efficiency. F # # Note: Presentation needs to be improved. Stoped adding explaining comments from here onwards. And there are inconsistencies with the notation (e.g. used alpha for the allocation parameters etc). # + # Plot different nutrient qualifies....how growth is varying with ribosome fraction #decide which nutrient qualities should be probed a0v=0.0013*20# gamma_maxv=20 betav=1 nuv=10 phiOv=0.35 fracactivev=0.65 nul=np.linspace(0.05*nuv,nuv,6) #prepare figure fig, axs = plt.subplots(3,1, figsize=(2.8,6.9)) axs[0].set_ylabel("growth-rate $\lambda\, (1/h)$") axs[1].set_ylabel("charged tRNA $m_{t}\, (per AA)$") axs[2].set_ylabel("translation rate $(AA/s)$") rlabel='allocation to translation $\\alpha_T\, (\%)$ ' axs[1].set_xlabel(rlabel) axs[0].set_xlabel(rlabel) axs[2].set_xlabel(rlabel) axs[0].set_xlim(0,(1-phiOv)*1.05*100) axs[1].set_xlim(0,(1-phiOv)*1.05*100) axs[2].set_xlim(0,(1-phiOv)*1.05*100) axs[1].set_yscale('log') colorl=['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928'] #go through different nutrient qualities x=np.linspace(0.01,(1.-phiOv)*0.99,100) alphRoptl=[] groptl=[] proptl=[] transllist=[] for il in range(0,nul.shape[0]): colorc=colorl[il] #get solution vor varying parameters (here phiR) nuv=nul[il] [grc,pcc,tcc]=grsolution(betav,gamma_maxv,nuv,a0v,phiOv,x,fracactivev) #get phiR for optimal growth gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxv,nuv,a0v,phiOv,fracactivev) alphRoptl.append(gropt_alphaR) groptl.append(gropt_gr) proptl.append(gropt_pc) transllist.append(gropt_transl) #get phi for pc levels constant #avaluev=10.0*a0v #pccon_alphaR,pccon_pc,pccon_gr=constPCsolution(betav,gamma_maxv,nuv,a0v,avaluev,phiOv) #plot varying growth-rate axs[0].plot(x*100,grc,color=colorc,ls='-',label=str(round(nuv,1))) #plot varying pc concentrations axs[1].plot(x*100,pcc,color=colorc,ls='-') axs[2].plot(x*100,tcc,color=colorc,ls='-') #plot growth-optimal case axs[0].axvline(100*gropt_alphaR,ls='--',color=colorc,alpha=alphac) #axs[0].axhline(gropt_gr,ls='--',color=colorc,alpha=alphac) axs[1].axvline(100*gropt_alphaR,ls='--',color=colorc,alpha=alphac) axs[2].axvline(100*gropt_alphaR,ls='--',color=colorc,alpha=alphac) #axs[1].axhline(gropt_pc,ls='--',color=colorc,alpha=alphac) #plot case with fixed pcc concentration if 3>4: axs[0].axvline(pccon_alphaR,ls='--',color=colorPCcon,alpha=alphac) axs[0].axhline(pccon_gr,ls='--',color=colorPCcon,alpha=alphac) axs[1].axvline(pccon_alphaR,ls='--',color=colorPCcon,alpha=alphac) axs[1].axhline(pccon_pc,ls='--',color=colorPCcon,alpha=alphac) axs[1].axhline(a0v,label='$K_{M,PC}$',ls=':',color=colorPC,alpha=alphac) axs[0].legend(title='$\\nu$') axs[1].legend() plt.tight_layout() plt.savefig("plot_output/growthrate_vs_alpha_varnu.pdf") #now look at growth-optimal solution fig, axs = plt.subplots(2,1, figsize=(2.8,5)) axs[0].set_xlim(0,2.2) axs[1].set_xlim(0,2.2) nuarray=np.linspace(0.01,10,100) gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxv,nuarray,a0v,phiOv,fracactivev) #ribosome fraction axs[0].plot(gropt_gr,100*gropt_alphaR,ls='-',color='k',label='opt. growth') #translation axs[1].plot(gropt_gr,gropt_transl,ls='-',color='k') #offset fitc=np.polyfit(gropt_gr[int(nuarray.shape[0]/2.):],100*gropt_alphaR[int(nuarray.shape[0]/2.):],1) xgrr=np.linspace(0,2,20) axs[0].plot(xgrr,xgrr*fitc[0]+fitc[1],color='k',ls='--') #plot different nutrient values as examples for il in range(0,nul.shape[0]): axs[0].plot(groptl[il],100*alphRoptl[il],color=colorl[il],marker='o') #plot transltion rate axs[1].plot(groptl[il],transllist[il],color=colorl[il],marker='o') #do the same thing, but look at constant ribosome level alphRoptc=alphRoptl[-1] pcconR_alphaR,pcconR_pc,pcconR_gr,pccon_transl=constphiR_solution(betav,gamma_maxv,nuarray,a0v,phiOv,alphRoptc,fracactivev) axs[0].plot(pcconR_gr,100*pcconR_alphaR,ls='-.',color='gray',label='constr. rib. levels') axs[1].plot(pcconR_gr,pccon_transl,ls='-.',color='gray') #alphRconstl=[] grconstl=[] #prconstl=[] for il in range(0,nul.shape[0]): pcconR_alphaR,pcconR_pc,pcconR_gr,pccon_transl=constphiR_solution(betav,gamma_maxv,nul[il],a0v,phiOv,alphRoptc,fracactivev) axs[0].plot(pcconR_gr,100*pcconR_alphaR,color=colorl[il],marker='<') axs[1].plot(pcconR_gr,pccon_transl,color=colorl[il],marker='<') grconstl.append(pcconR_gr) #do the same thing, but what happens when ribosome activity is not changing avaluec=proptl[-1] #alphRconstactl=[] grconstactl=[] #prconstactl=[] pccon_alphaR,pccon_pc,pccon_gr,pccon_transl=constPCsolution(betav,gamma_maxv,nuarray,a0v,avaluec,phiOv,fracactivev) axs[0].plot(pccon_gr,100*pccon_alphaR,ls=':',color='gray',label='constr. rib. activity') axs[1].plot(pccon_gr,pccon_transl,ls=':',color='gray') for il in range(0,nul.shape[0]): pccon_alphaR,pccon_pc,pccon_gr,pccon_transl=constPCsolution(betav,gamma_maxv,nul[il],a0v,avaluec,phiOv,fracactivev) axs[0].plot(pccon_gr,100*pccon_alphaR,color=colorl[il],marker='^') axs[1].plot(pccon_gr,pccon_transl,color=colorl[il],marker='^') grconstactl.append(pccon_gr) axs[0].set_ylabel(rlabel) axs[0].set_xlabel("growth-rate $\lambda\, (1/h)$") axs[1].set_ylabel('trans. elongation speed') axs[1].set_xlabel("growth-rate $\lambda\, (1/h)$") if 3>2: #fit Mori et al. grr=np.linspace(0,2,100) #axs[0].plot(grr,0.0967+0.2206*grr,color='r',label='Mori et al',alpha=0.5) #axs[1,0].plot(,color='r',label='observ transl.') #take data from RNAseq (c limitation) grdatac=np.array([0.91,0.75, 0.56, 0.51, 0.3 ]) ribosabundanc=np.array([19.18590608, 14.22365961, 12.50443378, 10.73007257, 8.74428159])/100. axs[0].plot(grdatac,100*ribosabundanc,marker='s',ls='',markeredgewidth=2,markeredgecolor='k',markerfacecolor='None',label='RNA-Seq',zorder=-1) #fitc=np.polyfit(grdatac,ribosabundanc,1) #grrc=np.linspace(0,2,100) #axs[0].plot(grrc,100*(grrc*fitc[0]+fitc[1]),color='r',label='RNA-seq',alpha=0.5) if 3>2:#TRANSLATION SPEED - DAI AT AL. grdatac=np.array([1.83,1.28,1.12,0.98,0.75,0.69,0.69,0.55,0.5,0.46,0.41,0.34,0.38,0.33,0.29,0.2,0.23,0.13,0.035]) elongr=np.array([16.7,16.3,16.1,15.9,14.9,15,14.7,13.7,13.1,12.6,13,12.4,12.4,12,12.1,11.6,12.3,10.7,9.4]) axs[1].plot(grdatac,elongr,marker='s',ls='',markeredgewidth=2,markeredgecolor='k',markerfacecolor='None',label='Dai et al',zorder=-1) axs[0].legend() plt.tight_layout() plt.savefig("plot_output/growthrate_vs_alpha_varnu_opt.pdf") #make a bar diagram with different growth-rates #constant ribosomes fig, axs = plt.subplots(1,1, figsize=(3.8,2.3)) barc=[1,2,3,5,6,7,9,10,11,13,14,15,17,18,19,21,22,23] axs.set_xticks(barc) axs.set_xticklabels(["",str(nul[0]),"","",str(nul[1]),"","",str(nul[2]),"","",str(round(nul[3],2)),"","",str(nul[4]),"","",str(nul[5]),""]) barcc=-1 for il in range(0,6): if il==0: labelc="optimal growth" else: labelc=None barcc=barcc+1 axs.bar(barc[barcc],groptl[il],color=colorl[il],label=labelc) barcc=barcc+1 if il==0: labelc="const. transl." else: labelc=None axs.bar(barc[barcc],grconstactl[il],hatch='\\\\',color=colorl[il],label=labelc) barcc=barcc+1 if il==0: labelc="constant rib. fract." else: labelc=None axs.bar(barc[barcc],grconstl[il],hatch='//',color=colorl[il],label=labelc) axs.set_ylabel("growth-rate") axs.set_xlabel("nutrient efficiency $\\nu\, (1/h)$") axs.legend() plt.tight_layout() plt.savefig("plot_output/bardiagram_differences.pdf") #constant ribosomes fig, axs = plt.subplots(1,1, figsize=(3.8,2.3)) barc=[1,2,4,5,7,8,10,11,13,14,16,17] axs.set_xticks(barc) axs.set_xticklabels(["",str(nul[0]),"",str(nul[1]),"",str(nul[2]),"",str(round(nul[3],2)),"",str(nul[4]),"",str(nul[5])]) barcc=-1 for il in range(0,6): barcc=barcc+1 if il==0: labelc="const. transl." else: labelc=None axs.bar(barc[barcc],100*grconstactl[il]/groptl[il],hatch='\\\\',color=colorl[il],label=labelc) barcc=barcc+1 if il==0: labelc="constant rib. fract." else: labelc=None axs.bar(barc[barcc],100*grconstl[il]/groptl[il],hatch='//',color=colorl[il],label=labelc) axs.set_ylabel("growth (fract. optimum, $\%$)") axs.set_xlabel("nutrient efficiency $\\nu\, (1/h)$") #axs.legend() plt.tight_layout() plt.savefig("plot_output/bardiagram_differences_fraction.pdf") # - # # Variation fraction other # + # Plot different nutrient qualifies....how growth is varying with ribosome fraction #decide which nutrient qualities should be probed a0v=0.0013*20# gamma_maxv=20 betav=1 nuv=10 phiOv=0.35 fracactivev=0.65 phiOvl=np.linspace(0.1,0.8,8) #prepare figure fig, axs = plt.subplots(3,1, figsize=(2.8,6.9)) axs[0].set_ylabel("growth-rate $\lambda\, (1/h)$") axs[1].set_ylabel("precursor $m_{PC}\, (per AA)$") axs[2].set_ylabel("translation rate $(AA/s)$") rlabel='allocation to translation $\\alpha_T\, (\%)$ ' axs[1].set_xlabel(rlabel) axs[0].set_xlabel(rlabel) axs[2].set_xlabel(rlabel) axs[0].set_xlim(0,100) axs[1].set_xlim(0,100) axs[2].set_xlim(0,100) axs[1].set_yscale('log') colorl=['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928'] #go through different nutrient qualities alphRoptl=[] groptl=[] proptl=[] gropt_translist=[] for il in range(0,phiOvl.shape[0]): colorc=colorl[il] x=np.linspace(0.01,(1-phiOvl[il])*0.99,100) #get solution vor varying parameters (here phiR) phiOv=phiOvl[il] [grc,pcc,tcc]=grsolution(betav,gamma_maxv,nuv,a0v,phiOv,x,fracactivev) #get phiR for optimal growth gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxv,nuv,a0v,phiOv,fracactivev) alphRoptl.append(gropt_alphaR) groptl.append(gropt_gr) proptl.append(gropt_pc) gropt_translist.append(gropt_transl) #get phi for pc levels constant #avaluev=10.0*a0v #pccon_alphaR,pccon_pc,pccon_gr=constPCsolution(betav,gamma_maxv,nuv,a0v,avaluev,phiOv) #plot varying growth-rate axs[0].plot(100*x,grc,color=colorc,ls='-') #plot varying pc concentrations axs[1].plot(100*x,pcc,color=colorc,ls='-') axs[2].plot(100*x,tcc,color=colorc,ls='-') #plot growth-optimal case axs[0].axvline(100*gropt_alphaR,ls='--',color=colorc,alpha=alphac) axs[1].axvline(100*gropt_alphaR,ls='--',color=colorc,alpha=alphac) axs[2].axvline(100*gropt_alphaR,ls='--',color=colorc,alpha=alphac) #plot case with fixed pcc concentration if 3>4: axs[0].axvline(pccon_alphaR,ls='--',color=colorPCcon,alpha=alphac) axs[0].axhline(100*pccon_gr,ls='--',color=colorPCcon,alpha=alphac) axs[1].axvline(pccon_alphaR,ls='--',color=colorPCcon,alpha=alphac) axs[1].axhline(100*pccon_pc,ls='--',color=colorPCcon,alpha=alphac) axs[1].axhline(a0v,label='$K_{M,PC}$',ls=':',color=colorPC,alpha=alphac) plt.legend() plt.tight_layout() plt.savefig("plot_output/growthrate_vs_alpha_varother.pdf") #now look at growth-optimal solution fig, axs = plt.subplots(2,1, figsize=(2.8,4.6)) axs[0].set_xlim(0,2) phi0array=np.linspace(0.,0.8,100) gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxv,nuv,a0v,phi0array,fracactivev) #ribosome fraction axs[0].plot(gropt_gr,100*gropt_alphaR,ls='-',color='k') #translation axs[1].plot(gropt_gr,gropt_transl,ls='-',color='k') #offset fitc=np.polyfit(gropt_gr[int(phi0array.shape[0]/2.):],100*gropt_alphaR[int(phi0array.shape[0]/2.):],1) xgrr=np.linspace(0,2,20) axs[0].plot(xgrr,xgrr*fitc[0]+fitc[1],color='k',ls='--') #plot different nutrient values as examples for il in range(0,phiOvl.shape[0]): axs[0].plot(groptl[il],100*alphRoptl[il],color=colorl[il],marker='o') #plot transltion rate axs[1].plot(groptl[il],gropt_translist[il],color=colorl[il],marker='o') axs[0].set_ylabel('allocation to translation $\\alpha_R$') axs[0].set_xlabel("growth rate $\lambda\, (1/h)$") axs[1].set_ylabel('translation $(AA/s)$') axs[1].set_xlabel("growth rate $\lambda\, (1/h)$") if 3>2: #fit Mori at al. grr=np.linspace(0,2,100) #axs[0].plot(grr,0.0967+0.2206*grr,color='r',label='Mori et al',alpha=0.5) #axs[1,0].plot(,color='r',label='observ transl.') #take data from RNAseq (c limitation) grdatac=np.array([0.91,0.75, 0.56, 0.51, 0.3 ]) ribosabundanc=np.array([19.18590608, 14.22365961, 12.50443378, 10.73007257, 8.74428159])/100. axs[0].plot(grdatac,100*ribosabundanc,marker='s',ls='',markeredgewidth=2,markeredgecolor='k',markerfacecolor='None',label='RNA-Seq',zorder=-1) #fitc=np.polyfit(grdatac,ribosabundanc,1) #grrc=np.linspace(0,2,100) #axs[0].plot(grrc,100*(grrc*fitc[0]+fitc[1]),color='r',label='RNA-seq',alpha=0.5) if 3>2:#TRANSLATION SPEED - DAI AT AL. grdatac=np.array([1.83,1.28,1.12,0.98,0.75,0.69,0.69,0.55,0.5,0.46,0.41,0.34,0.38,0.33,0.29,0.2,0.23,0.13,0.035]) elongr=np.array([16.7,16.3,16.1,15.9,14.9,15,14.7,13.7,13.1,12.6,13,12.4,12.4,12,12.1,11.6,12.3,10.7,9.4]) axs[1].plot(grdatac,elongr,marker='s',ls='',markeredgewidth=2,markeredgecolor='k',markerfacecolor='None',label='Dai et al',zorder=-1) plt.tight_layout() plt.savefig("plot_output/growthrate_vs_alpha_varother_opt.pdf") # - # # Variaton of both, nutrient efficiency and other proteome fraction # + # Plot different nutrient qualifies....how growth is varying with ribosome fraction #decide which nutrient qualities should be probed a0v=0.0013*20# gamma_maxv=19 betav=1 nuv=10 phiOv=0.35 fracactivev=0.65 #phiOv=0.3 #fit transporter genes #[-14.83514886 21.28571246] #fit motility genes #[-11.6321223 15.79293438] phiOvl=np.linspace(0.35,0.7,5)[::-1] phiOvl=np.append(phiOvl,np.array([0.35]*3)) print(phiOvl) nul=np.linspace(0.05*nuv,10,8) #prepare figure fig, axs = plt.subplots(3,1, figsize=(2.8,6.9)) axs[0].set_ylabel("growth-rate $\lambda\, (1/h)$") axs[1].set_ylabel("charged tRNA $m_{PC}\, (per AA)$") axs[2].set_ylabel("translation rate $(AA/s)$") rlabel='allocation to translation $\\alpha_T\, (\%)$ ' axs[1].set_xlabel(rlabel) axs[0].set_xlabel(rlabel) axs[2].set_xlabel(rlabel) axs[0].set_xlim(0,100*(1-phiOvl[-1])*1.05) axs[1].set_xlim(0,100*(1-phiOvl[-1])*1.05) axs[2].set_xlim(0,100*(1-phiOvl[-1])*1.05) axs[1].set_yscale('log') colorl=['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928'] #go through different nutrient qualities alphRoptl=[] groptl=[] proptl=[] gropt_translist=[] for il in range(0,phiOvl.shape[0]): colorc=colorl[il] x=np.linspace(0.01,(1-phiOvl[il])*0.99,100) #get solution vor varying parameters (here phiR) phiOv=phiOvl[il] nuv=nul[il] [grc,pcc,tcc]=grsolution(betav,gamma_maxv,nuv,a0v,phiOv,x,fracactivev) #get phiR for optimal growth gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxv,nuv,a0v,phiOv,fracactivev) alphRoptl.append(gropt_alphaR) groptl.append(gropt_gr) proptl.append(gropt_pc) gropt_translist.append(gropt_transl) #get phi for pc levels constant #avaluev=10.0*a0v #pccon_alphaR,pccon_pc,pccon_gr=constPCsolution(betav,gamma_maxv,nuv,a0v,avaluev,phiOv) #plot varying growth-rate axs[0].plot(x*100,grc,color=colorc,ls='-',label=str(round(nuv))+" "+str(int((phiOv-phiOvl[-1])*100))) axs[0].set_xlabel('allocation to translation $\\alpha_R$') axs[0].set_ylabel("growth-rate $\lambda\, (1/h)$") #plot varying pc concentrations axs[1].plot(x*100,pcc,color=colorc,ls='-') axs[1].set_xlabel('allocation to translation $\\alpha_R$') axs[1].set_ylabel("precursor $m_{PC}\, (per AA)$") #plot growth-optimal case axs[0].axvline(gropt_alphaR*100,ls='--',color=colorc,alpha=alphac) #axs[0].axhline(gropt_gr,ls='--',color=colorc,alpha=alphac) axs[1].axvline(gropt_alphaR*100,ls='--',color=colorc,alpha=alphac) axs[2].axvline(gropt_alphaR*100,ls='--',color=colorc,alpha=alphac) axs[2].plot(x*100,tcc,color=colorc,ls='-') #axs[1].axhline(gropt_pc,ls='--',color=colorc,alpha=alphac) #plot case with fixed pcc concentration if 3>4: axs[0].axvline(pccon_alphaR,ls='--',color=colorPCcon,alpha=alphac) #axs[0].axhline(pccon_gr,ls='--',color=colorPCcon,alpha=alphac) axs[1].axvline(pccon_alphaR,ls='--',color=colorPCcon,alpha=alphac) #axs[1].axhline(pccon_pc,ls='--',color=colorPCcon,alpha=alphac) axs[1].axhline(a0v,label='$K_{M,PC}$',ls=':',color=colorPC,alpha=alphac) axs[0].legend(title='$\\nu\, \\alpha_O$') plt.tight_layout() plt.savefig("plot_output/growthrate_vs_alpha_varboth.pdf") #now look at growth-optimal solution fig, axs = plt.subplots(2,1, figsize=(2.8,4.6)) axs[0].set_xlim(0,2) axs[1].set_xlim(0,2) #axs[1].set_ylim(0,1) #phiOvl #phi0array=np.linspace(phiOvl[-1],phiOvl[0],160)[::-1] for ill in range(0,len(phiOvl)-1): if ill==0: phi0array=np.linspace(phiOvl[ill],phiOvl[ill+1],20) else: phi0array=np.append(phi0array,np.linspace(phiOvl[ill],phiOvl[ill+1],20)) nuarray=np.linspace(nul[0],nul[-1],140) gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxv,nuarray,a0v,phi0array,fracactivev) #ribosome fraction axs[0].plot(gropt_gr,100*gropt_alphaR,ls='-',color='k') #translation axs[1].plot(gropt_gr,gropt_transl,ls='-',color='k') #offset fitc=np.polyfit(gropt_gr[int(gropt_gr.shape[0]/2.):],100*gropt_alphaR[int(gropt_gr.shape[0]/2.):],1) xgrr=np.linspace(0,2,20) axs[0].plot(xgrr,xgrr*fitc[0]+fitc[1],color='k',ls='--') #plot different nutrient values as examples for il in range(0,phiOvl.shape[0]): axs[0].plot(groptl[il],100*alphRoptl[il],color=colorl[il],marker='o') #axs[0].plot(groptl[il],100*(phiOvl[il]-phiOvl[-1]),color=colorl[il],marker='^') #plot transltion rate axs[1].plot(groptl[il],gropt_translist[il],color=colorl[il],marker='o') axs[0].set_ylabel('allocation to translation $\\alpha_R$') axs[0].set_xlabel("growth rate $\lambda\, (1/h)$") if 3>2: #fit transporter genes and motility genes (2nd part) xrgc=np.linspace(0,2) #axs[0].plot(xrgc,-14.83514886*xrgc+21.28571246-11.6321223*xrgc+15.79293438,color='m') #fit motility genes if 3>2: #fit Mori at al. grr=np.linspace(0,2,100) #axs[0].plot(grr,0.0967+0.2206*grr,color='r',label='Mori et al',alpha=0.5) #axs[1,0].plot(,color='r',label='observ transl.') #take data from RNAseq (c limitation) grdatac=np.array([0.91,0.75, 0.56, 0.51, 0.3 ]) ribosabundanc=np.array([19.18590608, 14.22365961, 12.50443378, 10.73007257, 8.74428159])/100. #axs[0].plot(grdatac,100*ribosabundanc,marker='s',ls='',markeredgewidth=2,markeredgecolor='k',markerfacecolor='None',label='RNA-Seq',zorder=-1) #fitc=np.polyfit(grdatac,ribosabundanc,1) #grrc=np.linspace(0,2,100) #axs[0].plot(grrc,100*(grrc*fitc[0]+fitc[1]),color='r',label='RNA-seq',alpha=0.5) if 3>2:#TRANSLATION SPEED - DAI AT AL. grdatac=np.array([1.83,1.28,1.12,0.98,0.75,0.69,0.69,0.55,0.5,0.46,0.41,0.34,0.38,0.33,0.29,0.2,0.23,0.13,0.035]) elongr=np.array([16.7,16.3,16.1,15.9,14.9,15,14.7,13.7,13.1,12.6,13,12.4,12.4,12,12.1,11.6,12.3,10.7,9.4]) axs[1].plot(grdatac,elongr,marker='s',ls='',markeredgewidth=2,markeredgecolor='k',markerfacecolor='None',label='Dai et al',zorder=-1) axs[1].set_ylabel('trans. elongation speed $(AA/s)$') axs[1].set_xlabel("growth rate $\lambda\, (1/h)$") plt.tight_layout() plt.savefig("plot_output/growthrate_vs_alpha_varboth_opt.pdf") # - # # Variation translation rate # + # Plot different nutrient qualifies....how growth is varying with ribosome fraction #decide which nutrient qualities should be probed a0v=0.0013*20# gamma_maxv=20 betav=1 nuv=10 phiOv=0.35 fracactivev=0.65 gamma_maxvl=np.linspace(0.1,20.,8) #prepare figure fig, axs = plt.subplots(3,1, figsize=(2.8,6.9)) axs[0].set_ylabel("growth rate $\lambda\, (1/h)$") axs[1].set_ylabel("precursor $m_{PC}\, (per AA)$") axs[2].set_ylabel("translation rate $(AA/s)$") rlabel='allocation to translation $\\alpha_T\, (\%)$ ' axs[1].set_xlabel(rlabel) axs[0].set_xlabel(rlabel) axs[2].set_xlabel(rlabel) axs[0].set_xlim(0,100*(1-phiOv)*1.05) axs[1].set_xlim(0,100*(1-phiOv)*1.05) axs[2].set_xlim(0,100*(1-phiOv)*1.05) axs[1].set_yscale('log') colorl=['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928'] #go through different nutrient qualities alphRoptl=[] groptl=[] proptl=[] gropt_transllist=[] for il in range(0,gamma_maxvl.shape[0]): colorc=colorl[il] x=np.linspace(0.01,(1.-phiOv)*0.99,100) #get solution vor varying parameters (here phiR) [grc,pcc,tcc]=grsolution(betav,gamma_maxvl[il],nuv,a0v,phiOv,x,fracactivev) #get phiR for optimal growth gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxvl[il],nuv,a0v,phiOv,fracactivev) alphRoptl.append(gropt_alphaR) groptl.append(gropt_gr) proptl.append(gropt_pc) gropt_transllist.append(gropt_transl) #get phi for pc levels constant #avaluev=10.0*a0v #pccon_alphaR,pccon_pc,pccon_gr=constPCsolution(betav,gamma_maxv,nuv,a0v,avaluev,phiOv) #plot varying growth-rate axs[0].plot(x*100,grc,color=colorc,ls='-') #plot varying pc concentrations axs[1].plot(x*100,pcc,color=colorc,ls='-') axs[2].plot(x*100,tcc,color=colorc,ls='-') #plot growth-optimal case axs[0].axvline(gropt_alphaR*100,ls='--',color=colorc,alpha=alphac) axs[1].axvline(gropt_alphaR*100,ls='--',color=colorc,alpha=alphac) axs[2].axvline(gropt_alphaR*100,ls='--',color=colorc,alpha=alphac) #plot case with fixed pcc concentration if 3>4: axs[0].axvline(pccon_alphaR,ls='--',color=colorPCcon,alpha=alphac) axs[0].axhline(pccon_gr,ls='--',color=colorPCcon,alpha=alphac) axs[1].axvline(pccon_alphaR,ls='--',color=colorPCcon,alpha=alphac) axs[1].axhline(pccon_pc,ls='--',color=colorPCcon,alpha=alphac) axs[1].axhline(a0v,label='$K_{M,PC}$',ls=':',color=colorPC,alpha=alphac) plt.legend() plt.tight_layout() plt.savefig("plot_output/growthrate_vs_alpha_vartransl.pdf") #now look at growth-optimal solution fig, axs = plt.subplots(2,1, figsize=(2.8,4.6)) axs[0].set_xlim(0,2) axs[1].set_xlim(0,2) gammaarray=np.linspace(0.,gamma_maxv,100) gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gammaarray,nuv,a0v,phiOv,fracactivev) #ribosome fraction axs[0].plot(gropt_gr,100*gropt_alphaR,ls='-',color='k') #translation axs[1].plot(gropt_gr,gropt_transl,ls='-',color='k') #linear fit #fitc=np.polyfit(gropt_gr[int(phiarray.shape[0]/2.):],gropt_alphaR[int(phiarray.shape[0]/2.):],1) #xgrr=np.linspace(0,2,20) #axs[0].plot(xgrr,xgrr*fitc[0]+fitc[1],color='k',ls='--') #plot different nutrient values as examples for il in range(0,gamma_maxvl.shape[0]): axs[0].plot(groptl[il],100*alphRoptl[il],color=colorl[il],marker='o') #plot transltion rate axs[1].plot(groptl[il],gropt_transllist[il],color=colorl[il],marker='o') #do the same thing, but look at constant ribosome level alphRoptc=alphRoptl[-1] pcconR_alphaR,pcconR_pc,pcconR_gr,pcconR_tr=constphiR_solution(betav,gammaarray,nuv,a0v,phiOv,alphRoptc,fracactivev) axs[0].plot(pcconR_gr,100*pcconR_alphaR,ls='-.',color='gray') axs[1].plot(pcconR_gr,pcconR_tr,ls='-.',color='gray') grconstactl=[] for il in range(0,gamma_maxvl.shape[0]): pcconR_alphaR,pcconR_pc,pcconR_gr,pcconR_transl=constphiR_solution(betav,gamma_maxvl[il],nuv,a0v,phiOv,alphRoptc,fracactivev) axs[0].plot(pcconR_gr,100*pcconR_alphaR,color=colorl[il],marker='<') axs[1].plot(pcconR_gr,pcconR_transl,color=colorl[il],marker='<') grconstactl.append(pcconR_gr) #do the same thing, but what happens when ribosome activity is not changing avaluec=proptl[-1] if 3>2: pccon_alphaR,pccon_pc,pccon_gr,pccon_transl=constPCsolution(betav,gammaarray,nuv,a0v,avaluec,phiOv,fracactivev) #axs[0].plot(pccon_gr,100*pccon_alphaR,ls=':',color='gray') #axs[1].plot(pccon_gr,pccon_transl,ls=':',color='gray') grconstl=[] for il in range(0,gamma_maxvl.shape[0]): pccon_alphaR,pccon_pc,pccon_gr,pccon_transl=constPCsolution(betav,gamma_maxvl[il],nuv,a0v,avaluec,phiOv,fracactivev) axs[0].plot(pccon_gr,100*pccon_alphaR,color=colorl[il],marker='^') axs[1].plot(pccon_gr,pccon_transl,color=colorl[il],marker='^') grconstl.append(pccon_gr) axs[0].set_ylabel('allocation to translation $\\alpha_R$') axs[0].set_xlabel("growth rate $\lambda\, (1/h)$") axs[1].set_ylabel('tran. elongation speed $(AA/s)$') axs[1].set_xlabel("growth rate $\lambda\, (1/h)$") axs[0].set_ylim(0,70) plt.tight_layout() plt.savefig("plot_output/growthrate_vs_alpha_vartrans_opt.pdf") #### #constant ribosomes fig, axs = plt.subplots(1,1, figsize=(3.8,2.3)) barc=[1,2,3,5,6,7,9,10,11,13,14,15,17,18,19,21,22,23] axs.set_xticks(barc) labelll=["",str(round(gamma_maxvl[0],1)),"","",str(round(gamma_maxvl[1],1)),"","",str(round(gamma_maxvl[2],1)),"","",str(round(gamma_maxvl[3],2)),"","",str(round(gamma_maxvl[4],1)),"","",str(round(gamma_maxvl[5],))] axs.set_xticklabels(labelll) barcc=-1 for il in range(0,6): if il==0: labelc="optimal growth" else: labelc=None barcc=barcc+1 axs.bar(barc[barcc],groptl[il],color=colorl[il],label=labelc) barcc=barcc+1 if il==0: labelc="const. transl." else: labelc=None axs.bar(barc[barcc],grconstactl[il],hatch='\\\\',color=colorl[il],label=labelc) barcc=barcc+1 if il==0: labelc="constant rib. fract." else: labelc=None axs.bar(barc[barcc],grconstl[il],hatch='//',color=colorl[il],label=labelc) axs.set_ylabel("growth rate") axs.set_xlabel("trans. elongation speed $(AA/s)$") axs.legend() plt.tight_layout() plt.savefig("plot_output/bardiagram_differences_trans.pdf") #constant ribosomes fig, axs = plt.subplots(1,1, figsize=(3.8,2.3)) barc=[1,2,4,5,7,8,10,11,13,14,16,17] axs.set_xticks(barc) labelll=["",str(round(gamma_maxvl[0],1)),"",str(round(gamma_maxvl[1],1)),"",str(round(gamma_maxvl[2],1)),"",str(round(gamma_maxvl[3],2)),"",str(round(gamma_maxvl[4],1)),"",str(round(gamma_maxvl[5],))] axs.set_xticklabels(labelll) barcc=-1 for il in range(0,6): barcc=barcc+1 if il==0: labelc="constant ribo. fract." else: labelc=None axs.bar(barc[barcc],100*grconstactl[il]/groptl[il],hatch='\\\\',color=colorl[il],label=labelc) barcc=barcc+1 if il==0: labelc="const. tRNA levels" else: labelc=None axs.bar(barc[barcc],100*grconstl[il]/groptl[il],hatch='//',color=colorl[il],label=labelc) axs.set_ylabel("growth (fract. optimum, $\%$)") axs.set_xlabel("trans. elongation speed $(AA/s)$") #axs.legend() plt.tight_layout() plt.savefig("plot_output/bardiagram_differences_fraction_trans.pdf") # + #variation translation rate for different nutrient qualities # Plot different nutrient qualifies....how growth is varying with ribosome fraction #decide which nutrient qualities should be probed a0v=0.0013*20# gamma_maxv=20 betav=1 nuv=10 phiOv=0.35 fracactivev=0.65 gamma_maxvl=np.linspace(0.1,20.,8) fig, axs = plt.subplots(2,1, figsize=(2.8,4.6)) axs[0].set_xlim(0,2.2) axs[1].set_xlim(0,2.2) for nuv in [2,4,6,8,10]: alphRoptl=[] groptl=[] proptl=[] gropt_transllist=[] for il in range(0,gamma_maxvl.shape[0]): colorc=colorl[il] x=np.linspace(0.01,(1.-phiOv)*0.99,100) #get solution vor varying parameters (here phiR) [grc,pcc,tcc]=grsolution(betav,gamma_maxvl[il],nuv,a0v,phiOv,x,fracactivev) #get phiR for optimal growth gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxvl[il],nuv,a0v,phiOv,fracactivev) alphRoptl.append(gropt_alphaR) groptl.append(gropt_gr) proptl.append(gropt_pc) gropt_transllist.append(gropt_transl) #get phi for pc levels constant #avaluev=10.0*a0v #pccon_alphaR,pccon_pc,pccon_gr=constPCsolution(betav,gamma_maxv,nuv,a0v,avaluev,phiOv) #now look at growth-optimal solution gammaarray=np.linspace(0.,gamma_maxv,100) gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gammaarray,nuv,a0v,phiOv,fracactivev) #ribosome fraction axs[0].plot(gropt_gr,100*gropt_alphaR,ls='-',color='k') #translation axs[1].plot(gropt_gr,gropt_transl,ls='-',color='k') #plot different nutrient values as examples for il in range(0,gamma_maxvl.shape[0]): axs[0].plot(groptl[il],100*alphRoptl[il],color=colorl[il],marker='o') #plot transltion rate axs[1].plot(groptl[il],gropt_transllist[il],color=colorl[il],marker='o') ############################### #now do same thing for varying nutrient quality more smoothling ################################ nul=np.linspace(0.05*nuv,nuv,6) x=np.linspace(0.01,(1.-phiOv)*0.99,100) alphRoptl=[] groptl=[] proptl=[] transllist=[] for il in range(0,nul.shape[0]): colorc=colorl[il] #get solution vor varying parameters (here phiR) nuv=nul[il] [grc,pcc,tcc]=grsolution(betav,gamma_maxv,nuv,a0v,phiOv,x,fracactivev) #get phiR for optimal growth gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxv,nuv,a0v,phiOv,fracactivev) alphRoptl.append(gropt_alphaR) groptl.append(gropt_gr) proptl.append(gropt_pc) transllist.append(gropt_transl) #now look at growth-optimal solution nuarray=np.linspace(0.01,10,100) gropt_alphaR,gropt_pc,gropt_gr,gropt_transl=optphiR_solution(betav,gamma_maxv,nuarray,a0v,phiOv,fracactivev) #ribosome fraction axs[0].plot(gropt_gr,100*gropt_alphaR,ls='-',color='k',label='opt. growth') #translation axs[1].plot(gropt_gr,gropt_transl,ls='-',color='k') #offset fitc=np.polyfit(gropt_gr[int(nuarray.shape[0]/2.):],100*gropt_alphaR[int(nuarray.shape[0]/2.):],1) xgrr=np.linspace(0,2,20) axs[0].plot(xgrr,xgrr*fitc[0]+fitc[1],color='k',ls='--') #plot different nutrient values as examples for il in range(0,nul.shape[0]): axs[0].plot(groptl[il],100*alphRoptl[il],color='gray',marker='o') axs[1].plot(groptl[il],transllist[il],color='gray',marker='o') if 3>2: #fit Mori at al. grr=np.linspace(0,2,100) #axs[0].plot(grr,0.0967+0.2206*grr,color='r',label='Mori et al',alpha=0.5) #axs[1,0].plot(,color='r',label='observ transl.') #take data from RNAseq (c limitation) grdatac=np.array([0.91,0.75, 0.56, 0.51, 0.3 ]) ribosabundanc=np.array([19.18590608, 14.22365961, 12.50443378, 10.73007257, 8.74428159])/100. axs[0].plot(grdatac,100*ribosabundanc,marker='s',ls='',markeredgewidth=2,markeredgecolor='k',markerfacecolor='None',label='RNA-Seq',zorder=-1) #fitc=np.polyfit(grdatac,ribosabundanc,1) #grrc=np.linspace(0,2,100) #axs[0].plot(grrc,100*(grrc*fitc[0]+fitc[1]),color='r',label='RNA-seq',alpha=0.5) if 3>2:#TRANSLATION SPEED - DAI AT AL. grdatac=np.array([1.83,1.28,1.12,0.98,0.75,0.69,0.69,0.55,0.5,0.46,0.41,0.34,0.38,0.33,0.29,0.2,0.23,0.13,0.035]) elongr=np.array([16.7,16.3,16.1,15.9,14.9,15,14.7,13.7,13.1,12.6,13,12.4,12.4,12,12.1,11.6,12.3,10.7,9.4]) axs[1].plot(grdatac,elongr,marker='s',ls='',markeredgewidth=2,markeredgecolor='k',markerfacecolor='None',label='Dai et al',zorder=-1) ################ #finish plotting ################ axs[0].set_ylabel('allocation to translation $\\alpha_R$') axs[0].set_xlabel("growth rate $\lambda\, (1/h)$") axs[1].set_ylabel('tran. elongation speed $(AA/s)$') axs[1].set_xlabel("growth rate $\lambda\, (1/h)$") axs[0].set_ylim(0,70) plt.tight_layout() plt.savefig("plot_output/growthrate_vs_alpha_vartrans_opt_diffnu.pdf") # -
code/analysis/ribosomeallocation_steadystate(2).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="DpXKKojlQOn_" # ### Problem Statement # # Do you trust all the news you hear from social media? All news are not real, right? So how will you detect the fake news? The answer is Python. By practicing this advanced python project of detecting fake news, you will easily make a difference between real and fake news. # # A type of yellow journalism, fake news encapsulates pieces of news that may be hoaxes and is generally spread through social media and other online media. This is often done to further or impose certain ideas and is often achieved with political agendas. Such news items may contain false and/or exaggerated claims, and may end up being viralized by algorithms, and users may end up in a filter bubble. # - # ## Passasive Aggrasive Classifier # # The Passive-Aggressive algorithms are a family of Machine learning algorithms that are not very well known by beginners and even intermediate Machine Learning enthusiasts. However, they can be very useful and efficient for certain applications. # # Note: This is a high-level overview of the algorithm explaining how it works and when to use it. It does not go deep into the mathematics of how it works. # Passive-Aggressive algorithms are generally used for large-scale learning. It is one of the few ‘online-learning algorithms‘. In online machine learning algorithms, the input data comes in sequential order and the machine learning model is updated step-by-step, as opposed to batch learning, where the entire training dataset is used at once. This is very useful in situations where there is a huge amount of data and it is computationally infeasible to train the entire dataset because of the sheer size of the data. We can simply say that an online-learning algorithm will get a training example, update the classifier, and then throw away the example. # # A very good example of this would be to detect fake news on a social media website like Twitter, where new data is being added every second. To dynamically read data from Twitter continuously, the data would be huge, and using an online-learning algorithm would be ideal. # # Passive-Aggressive algorithms are somewhat similar to a Perceptron model, in the sense that they do not require a learning rate. However, they do include a regularization parameter. # ## How Passive-Aggressive Algorithms Work: # Passive-Aggressive algorithms are called so because : # # - Passive: If the prediction is correct, keep the model and do not make any changes. i.e., the data in the example is not enough to cause any changes in the model. # # - Aggressive: If the prediction is incorrect, make changes to the model. i.e., some change to the model may correct it. # Understanding the mathematics behind this algorithm is not very simple and is beyond the scope of a single article. This article provides just an overview of the algorithm and a simple implementation of it. To learn more about the mathematics behind this algorithm, I recommend watching this excellent video on the algorithm’s working by Dr <NAME>. # # Important parameters: # # - C : This is the regularization parameter, and denotes the penalization the model will make on an incorrect prediction # max_iter : The maximum number of iterations the model makes over the training data. # # - tol : The stopping criterion. If it is set to None, the model will stop when (loss > previous_loss – tol). By default, it is set to 1e-3. # ## Conclusion: # If you want to work on big data, this is a very important classifier and I encourage you to go ahead and try to build a project using this classifier and use live data from a social media website like Twitter as input. There will be a huge amount of data coming in every second and this classifier will be able to handle data of this size. # + [markdown] id="44w3KKJ_RMm-" # ### Import libraries # + id="JgQZgTAuQDUP" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # - # + [markdown] id="wes3fAv9RU12" # ### Load dataset # + [markdown] id="0xHlAhJ1TPg1" # The first column identifies the news, the second and third are the title and text, and the fourth column has labels denoting whether the news is REAL or FAKE. # + colab={"base_uri": "https://localhost:8080/", "height": 406} id="w4jqClywRY-X" outputId="79f4d68d-2733-425d-b225-267fa3f3ef21" # load datasets df = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/news.csv') df # - # + [markdown] id="ulGlQzuPTo04" # ### Rename column # + id="u7wje2ivTr4p" df = df.rename(columns = {'Unnamed: 0':'news'}) # - # + [markdown] id="EN_90hskTZNr" # ### Analyse data # + colab={"base_uri": "https://localhost:8080/"} id="6QGORk88TbWp" outputId="da2640b5-6bb6-4731-9094-6e3c57272680" df.info() # - # + [markdown] id="uimjiOAXUX_N" # ### Check for missing values # + colab={"base_uri": "https://localhost:8080/"} id="AXt9Fn72Ubac" outputId="ead4f9fd-947b-42c7-d6c5-1467be6dc00a" df.isnull().sum() # - # + [markdown] id="vdea3TlSUi0D" # ### Analyse label # + colab={"base_uri": "https://localhost:8080/"} id="145tlB2SUmnz" outputId="7dbeddeb-505c-49bb-8046-e34b7f173f71" target_count = df.groupby('label').label.count() target_count # + colab={"base_uri": "https://localhost:8080/"} id="UX2czvamVfqk" outputId="40250ba9-cfbf-442f-98ca-4481d68011bf" percent_target = (target_count / len(df)) * 100 percent_target # + colab={"base_uri": "https://localhost:8080/", "height": 294} id="JaPyGOjzWHAh" outputId="f26b33f1-2b23-42f9-adc9-4fd34d6cf997" df.groupby('label').label.count().plot.bar(ylim=0) plt.show() # - # + [markdown] id="bQy5ErCbWTGO" # ### Map label # + colab={"base_uri": "https://localhost:8080/", "height": 406} id="S6UCzVafWVdu" outputId="65256b4a-cc20-4da5-bc9f-f866e224ae4c" dic = {'REAL':1 ,'FAKE':0} df.label = df.label.map(dic) df # - # + [markdown] id="IgsNANhBn9Jk" # ### Preprocess raw text and get ready for machine learning # + id="m5nnDaTKoA7D" # Importing HTMLParser from html.parser import HTMLParser html_parser = HTMLParser() # + id="iR6qLDAToJLD" # Created a new columns i.e. clean_tweet contains the same tweets but cleaned version df['processedtext'] = df['text'].apply(lambda x: html_parser.unescape(x)) # + colab={"base_uri": "https://localhost:8080/"} id="LQkLj-ZoobnA" outputId="9811bad6-e976-4f03-9b8b-939ccfe5325b" import nltk from nltk.stem import PorterStemmer from nltk.corpus import stopwords nltk.download('stopwords') import re import warnings warnings.filterwarnings('ignore') stemmer = PorterStemmer() words = stopwords.words("english") df['processedtext'] = df['processedtext'].apply(lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i not in words]).lower()) # + id="AeE8YqjDqWvw" #make all words lower case df['processedtext'] = df['processedtext'].str.lower() # remove special characters, numbers, punctuations df['processedtext'] = df['processedtext'].str.replace("[^a-zA-Z#]", " ") #remove words less than 3 characters df['processedtext'] = df['processedtext'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3])) # + colab={"base_uri": "https://localhost:8080/", "height": 380} id="LBJ6es7dq-9o" outputId="6e9c221c-9c03-4657-ab47-ceabbb1bcf80" fake_words = ' '.join([text for text in df['processedtext']]) from wordcloud import WordCloud wordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(fake_words) plt.figure(figsize=(10, 7)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis('off') plt.show() # - # + [markdown] id="CvmOlSavrTd_" # ### Define X and y # + id="epmrv7nhrV3n" #define X and y y = df['label'] X = df['processedtext'] # - # + [markdown] id="v1-KP-kvr4j0" # ### Convert text to word frequency vectors # + colab={"base_uri": "https://localhost:8080/"} id="EmEfwSuCr9SF" outputId="ff8c267c-885c-49f5-ed94-0b399993c219" from sklearn.feature_extraction.text import TfidfVectorizer vectorizer_tfidf = TfidfVectorizer(stop_words='english', max_df=0.7) df_tfIdf = vectorizer_tfidf.fit_transform(X.values.astype('U')) print(vectorizer_tfidf.get_feature_names()[:10]) # - # + [markdown] id="fH5j3pyesWeR" # ### Training and validation Splits # + colab={"base_uri": "https://localhost:8080/"} id="sfn_vtLWsZys" outputId="6a832561-af69-48c8-8818-4bcf739b8625" from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split(df_tfIdf, y, test_size=0.10, random_state=1, shuffle=True) X_train.shape, X_val.shape, y_train.shape,y_val.shape # - # + [markdown] id="U690OBrcs58A" # ### model # + [markdown] id="OBS4Lvgt-tXh" # Passive Aggressive algorithms are online learning algorithms. Such an algorithm remains passive for a correct classification outcome, and turns aggressive in the event of a miscalculation, updating and adjusting. Unlike most other algorithms, it does not converge. Its purpose is to make updates that correct the loss, causing very little change in the norm of the weight vector. # + colab={"base_uri": "https://localhost:8080/"} id="-wES_ke-s4tx" outputId="a676a4d1-c719-48bc-f36d-6edabe3f1a18" from sklearn.linear_model import PassiveAggressiveClassifier model = PassiveAggressiveClassifier(max_iter=10000, random_state=1,tol=1e-3).fit(X_train, y_train) print(model.score(X_train, y_train)) # - # + [markdown] id="XU5p8wjn_96e" # ### Predict on validation set # + colab={"base_uri": "https://localhost:8080/"} id="IMXWbVhL__3e" outputId="c0252b06-67fd-4ce7-bf08-1dd075efb80c" y_pred = model.predict(X_val) print(model.score(X_val, y_val)) # - # + [markdown] id="Jj3HEvRkAVAs" # ### Confusion matrix # + colab={"base_uri": "https://localhost:8080/"} id="KPH7-ptGJZpB" outputId="b2b8dc0e-e88d-461b-ded3-882e450d5017" from sklearn.metrics import confusion_matrix print(confusion_matrix(y_val,y_pred)) # + colab={"base_uri": "https://localhost:8080/", "height": 406} id="QDJUwzL7KTyY" outputId="173ce82b-618a-40d8-a36e-1cf6d5a1f0b6" df_val = pd.DataFrame({'Actual': y_val, 'Predicted':y_pred}) df_val # -
FakeNewsClassifierUsingPassiveAggrasiveClassifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="images/dask_horizontal.svg" align="right" width="30%"> # # Table of Contents # * [Distributed](#Distributed) # * [Making a cluster](#Making-a-cluster) # * [Detailed method](#Detailed-method) # * [Simple method](#Simple-method) # * [Executing with the distributed client](#Executing-with-the-distributed-client) # # # Distributed # As we saw in Foundations, Dask allows you to simply construct graphs of tasks with dependencies. In fact, if you skip forward, you will find that graphs can also be created automatically for you using functional, Numpy or Pandas syntax on data collections. None of this would be very useful, if there weren't also a way to execute these graphs, in a parallel and memory-aware way. Dask comes with four available schedulers: # - dask.threaded.get: a scheduler backed by a thread pool # - dask.multiprocessing.get: a scheduler backed by a process pool # - dask.async.get_sync: a synchronous scheduler, good for debugging # - distributed.Client.get: a distributed scheduler for executing graphs on multiple machines. # # To select one of these for computation, you can specify at the time of asking for a result # ```python # myvalue.compute(get=dask.async.get_sync) # for debugging # ``` # or set the current default, either temporarily or globally # ```python # with dask.set_options(get=dask.multiprocessing.get): # # set temporarily fo this block only # myvalue.compute() # # dask.set_options(get=dask.multiprocessing.get) # # set until further notice # ``` # For single-machine use, the threaded and multiprocessing schedulers are fine choices. However, for scaling out work across a cluster, the distributed scheduler is required. Indeed, this is now generally preferred for all work, because it gives you additional monitoring information not available in the other schedulers. (Some of this monitoring is also available with an explicit progress bar and profiler, see [here](http://dask.pydata.org/en/latest/diagnostics.html).) # ## Making a cluster # ### Detailed method # The following process explains what happens under the hood when setting up a computation environment with the Dask distributed scheduler *by hand*. It is not necessary to do this for the rest of the tutorial, but understanding what is going on will help a great deal when scaling up computations across a cluster. Users may wish to skip this section for now, and continue with the Simple method, below. # # **The scheduler** # # In a terminal, type the following: # ``` # dask-scheduler # ``` # You will get text something like the following: # ``` # distributed.scheduler - INFO - ----------------------------------------------- # distributed.scheduler - INFO - Scheduler at: 192.168.0.11:8786 # distributed.scheduler - INFO - bokeh at: 192.168.0.11:8788 # distributed.scheduler - INFO - http at: 192.168.0.11:9786 # distributed.bokeh.application - INFO - Web UI: http://192.168.0.11:8787/status/ # distributed.scheduler - INFO - ----------------------------------------------- # ``` # The top line gives the address, 192.168.0.1:8786, at which the scheduler is waiting for connections - it is this address that workers and clients need to be given (your IP and/or port numbers may be different). The further addresses are for an in-process bokeh graph server for scheduler debugging, a JSON http endpoint for information about the server, and, finally, the URL of the main monitoring dashboard; you can type this into a web-browser, but it will not show much information yet. # # The scheduler cannot do much without workers. We can create a worker process with: # ``` # dask-worker 192.168.0.11:8786 # ``` # where the address should be the same as given by the scheduler process, above. By default, the worker will start a monitoring process (the *nanny*), and a worker process with the number of threads equal to the number of cores (all the values can be changed). The worker has its own http and bokeh server. From the text displayed in the console, we see that the worker connects to the scheduler - information is also printed by the scheduler indicating that it has received a connection from a worker. Notice that this worker process could have been on a different machine from the scheduler. # # Next, in a new Python session (perhaps in the notebook, or another console, we can do # ```python # from dask.distributed import Client # c = Client('192.168.0.11:8786') # ``` # to connect to the scheduler. Again, the address must match the scheduler, above, and that, again, the scheduler logs the connection from the client. This client is now ready to accept work, and coordinate with the scheduler such that tasks get executed by the threads of the worker process. # The three Python-running consoles might look something like the following: # ![distributed session](images/distributed_session.png) # # Note that both the scheduler and worker commands accept a number of parameters to define the ports used, the number of threads/processed, memory limits, etc. - these will become useful when customising deployments. # # A similar method can be used to set up the scheduler and workers across a number of cluster nodes, and connect to them from a client to do work. There are some automated options for achieving this, including for resource management and dynamic clustering scenarios, see [here](http://distributed.readthedocs.io/en/latest/setup.html). # ### Simple method # Throughout the rest of this tutorial, we will be using the default Dask distributed cluster. This gets created automatically when creating a client with no arguments, if no client has yet been defined. Creating any distributed client also sets it to be the default executor of Dask `compute` calls, unless otherwise specified. # be sure to shut down other kernels running distributed clients from dask.distributed import Client c = Client() c # The scheduler is now listening on your laptop, and has a number of worker processes connected. Furthermore, the web UI will be available on `127.0.0.1:8787/status` - you can open this in a new tab of your browser. Other monitoring output is also available, e.g., `/tasks`. # # Note that you should close any other open kernels using a distributed cluster created this way, because otherwise the new one will not be able to use port 8787, and you will not be able to access the monitoring dashboard. # # ![ui](images/ui.png) # No tasks are yet being processed, and no data is held in the memory of the workers, so the lower part of the display is empty for the moment. # ## Executing with the distributed client # Consider some trivial calculation, as in previous sections, where we have added sleep statements in order to simulate real work being done. # + from dask import delayed import time def inc(x): time.sleep(5) return x + 1 def dec(x): time.sleep(3) return x - 1 def add(x, y): time.sleep(7) return x + y # - x = delayed(inc)(1) y = delayed(dec)(2) total = delayed(add)(x, y) total.compute() # The tasks will appear in the web UI as they are processed by the cluster and, eventually, a result will be printed as output of the cell above. Note that the kernel is blocked while waiting for the result. The resulting tasks block graph might look something like below. Hovering over each block gives which function it related to, and how long it took to execute. ![this](images/tasks.png) # If all you want to do is execute computations created using delayed, or run calculations based on the higher-level data collections (see the coming sections), then that is about all you need to know to scale your work up to cluster scale. However, there is more detail to know about the distributed scheduler that will help with efficient usage. See the chapter Distributed, Advanced.
04_distributed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div style="width: 100%; overflow: hidden;"> # <div style="width: 150px; float: left;"> <img src="data/D4Sci_logo_ball.png" alt="Data For Science, Inc" align="left" border="0"> </div> # <div style="float: left; margin-left: 10px;"> <h1>Epidemics 101</h1> # <h1>Or why your exponential fits of CoVID numbers are wrong</h1> # <p><NAME><br/> # <a href="http://www.data4sci.com/">www.data4sci.com</a><br/> # @bgoncalves, @data4sci</p></div> # </div> # + from collections import Counter from pprint import pprint import pandas as pd import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1.inset_locator import inset_axes from EpiModel import * import watermark # %load_ext watermark # %matplotlib inline # - # We start by print out the versions of the libraries we're using for future reference # # %watermark -n -v -m -g -iv # %watermark -n -v -m -iv # Load default figure style plt.style.use('./d4sci.mplstyle') colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] # ## SI Model # The first model we will look at is called the SI model and it only consists of two compartments (susceptible and infectious) and one transion between them # + beta = 0.2 SI = EpiModel() SI.add_interaction('S', 'I', 'I', beta) print(SI) # - # In other words, when a susceptiblem person encounters an infectious person, s/he aquires the infection with probability 0.2. We can integrate it easily. If in our initial population of 100,000 individuals we have 10 who are infected at time zero # + N = 100000 I0 = 10 SI.integrate(100, S=N-I0, I=I0) # - # And to plot it: ax = (SI.I/N).plot(label='Infectious', color=colors[1]) ax.set_xlabel('Time') ax.set_ylabel('Population') #ax.set_title('SI Model') ax.legend() # This isn't very interesting: after a few steps everyone is infected! # ## SIR Model # A more interesting and realistic model is the SIR model. It allows people to recover from the infection after some time, so now we have 3 compartments and 2 transitions # + beta = 0.2 mu = 0.1 SIR = EpiModel() SIR.add_interaction('S', 'I', 'I', beta) SIR.add_spontaneous('I', 'R', mu) # - print(SIR) # And the dynamics is more interesting as well: SIR.integrate(365, S=N-I0, I=I0, R=0) # And a quick visualization ax = SIR.plot('SIR Model', normed=True) ax.legend(['Susceptible', 'Infectious', 'Recovered']) # The purple line is the number of currently infectious cases as a function of time. As we can see, not all of the population is infectious at the same time, and, in fact, only about $80\%$ of the population is ever infected, as shown by the green curve representing the fraction of recovered. # The typical bell curve you're likely to see (as asked to flatten) is simply the number of infectious individuals as a function of time: ax=(SIR.I/N).plot(label='Infectious', color=colors[1]) ax.set_xlabel('time') ax.set_ylabel('Population') ax.legend() # ## Confirmed cases # If you've been paying attention to the news, the numbers of confirmed cases you've been seeing about CoVID19 correspond to, some fraction, $\phi$ of the total number of people that got infected up to that point. We can calculate that easily by simply seeing how many "healthy" people we lost as a function of time: # # $$ Confirmed = \phi\left(N-S\right)$$ # If, say, $\phi=10\%$ or $\phi=20\%$ of everyone who gets infected takes the test, then: # + phi = 0.1 ax = ((phi*(N-SIR.S))/N).plot(color=colors[3], linestyle='--', label=r'$10\%$ testing') phi = 0.2 ((phi*(N-SIR.S))/N).plot(ax=ax, color=colors[3], linestyle=':', label=r'$20\%$ testing') ax.set_xlabel('Time') ax.set_ylabel('Confirmed Cases') ax.legend() # - # Naturaly, the higher the testing percentage, the higher the number of confirmed cases. Please note that here we are ignoring the number of healthy individuals that take the test and it comes back negative as we are only interested in the number of confirmed cases. # And the number of recovered cases follows a similar path, with a few days lag. Here we apply the same factor of $\phi$ to the number of recovered, since in principle, those would be the only ones we could observe recovering. phi = 0.1 ax = ((phi*(N-SIR.S))/N).plot(label='Confirmed', color=colors[3]) ((phi*SIR.R)/N).plot(ax=ax, label='Recovered', color=colors[2]) ax.set_xlabel('Time') ax.set_ylabel('Cases') ax.legend() # For the sake of clarity I'm making the simplifying assumption that test are instantanous and happen as soon as the infection happens. This is __slightly__ unrealistic :) # # And indeed the numbers do start off growing exponentially as can be easily seen by plotting them on a log-linear scale. # + phi = 0.1 ax = (phi*(N-SIR.S[:50])).plot(color=colors[3], label='Confirmed') t = np.arange(0, 50, 1) y = 2*np.exp((beta-mu)*t) ax.plot(t, y, lw=1, linestyle='--', label='Exponential Trend') ax.set_yscale('log') ax.set_xlabel('Time') ax.set_ylabel('Cases') ax.set_xlim(0, 50) ax.set_ylim(1, 500) ax.legend() # - # And we easily calculate the doubling time doubling_time = np.log(2)/(beta-mu) print("Doubling time: %u days" % np.round(doubling_time)) # ## Non-uniform testing # Now let's look at a more realistic case. What if, instead of testing exactly $\phi=10\%$ of the cases, starting imediatly, it takes us a while to ramp up testing? Say, we start at 0 for the first week and smootly ramp up over the course of three weeks to a steady state rate of $\phi$? phi_t = np.ones(364)*0.1 # The steady state rate phi_t[:7] = 0 # 0 for the first 7 days phi_t[7:28] = np.linspace(0, 0.1, 21) # ramp up over 3 weeks # Now our curves looks a bit more interesting. # + ax = SIR.I.iloc[:49].plot(label='Real cases', color=colors[1]) (phi*(N-SIR.S)).iloc[:49].plot(ax=ax, label='Uniform testing', color=colors[3], linestyle='--') (phi_t*(N-SIR.S)).iloc[:49].plot(ax=ax, label='Dynamic testing rate', color=colors[3]) t = np.arange(0, 49, 1) y = 2*np.exp((beta-mu)*t) ax.plot(t, y, lw=2, linestyle='--', label='Exponential Trend') ax.set_ylim(1, 1000) ax.legend() ax.set_xlabel('Time') ax.set_ylabel('Confirmed Cases') ax.set_yscale('log') # - # Two things should be noted here: # # - By the time we detect the first case, on day 11, the real number of cases is already several dozen # - The increase in testing rate gets muddled together with the increase in the number of cases to look like a much faster increase # # Naturally, the opposite is also true, if we decrease the number of tests, we see an artificial slowing down of the number of cases # + phi_t[35:42]=np.linspace(0.1, 0.05, 7) # Gradually reduce the number of tests in the 5th week phi_t[42:] *= 0.5 # stay at 0.05 for the rest of the time ax = SIR.I.iloc[:49].plot(label='Real cases', color=colors[1]) (phi*(N-SIR.S)).iloc[:49].plot(ax=ax, label='Uniform testing', color=colors[3], linestyle='--') (phi_t*(N-SIR.S)).iloc[:49].plot(ax=ax, label='Dynamic testing rate', color=colors[3]) t = np.arange(0, 49, 1) y = 2*np.exp((beta-mu)*t) ax.plot(t, y, lw=1, linestyle='--') ax.set_ylim(1, 1000) ax.legend() ax.set_xlabel('Time') ax.set_ylabel('Confirmed Cases') ax.set_yscale('log') # - # In practice, the testing rate is not fixed and changes over time due to supply availability, policy changes, etc. As a result, trying to fit exponential curves to the numbers you hear in the news is at best, misleading. # ## Dynamical lags # Another issue that can easily complicate things is the fact that epidemic models work a bit like conveyer belts. Susceptible get fed on one end, become infectious and eventually Recovered come out the other end. # We calculate the number of __new infections__ as a function of time by looking at the change in the number of susceptibles and compare with the total number of currently infectious people. As the two curves have a very different range of values, we we normalize each curve by dividing it by its respective maximum value. # + new_infections = (-SIR.S).diff(1) new_infections_max = new_infections.argmax() infectious_max = SIR.I.argmax() ax=(new_infections/new_infections.max()).plot(label='New Infections', color=colors[4]) (SIR.I/SIR.I.max()).plot(ax=ax, label='Currently Infectious', color=colors[1]) ax.vlines(x=[new_infections_max, infectious_max], ymin=0, ymax=1, lw=1, linestyle='--') ax.set_ylabel('Fraction of maximum') ax.set_xlabel('Time') ax.legend() # - # As we can see, there's a clear lag between the point as the rate of new infectious starts decreasing and the number of currently infectious individuals starts decreasing as well. # ## Lockdown # A consequence of this is that even if you completely cut the supply of Susceptibles who become Infected it takes some time for the effects to be seen. We illustrate this more clearly by implementing a simple lockdown strategy. Starting at time 75, we implement our lockdown strategy and completly stop new infections from occurring. We do this by replacing the epidemic model with one where people are only allowed to recover. # We start with the original SIR model as before and integrating for the first 75 days # + beta = 0.2 mu = 0.1 SIR2 = EpiModel() SIR2.add_interaction('S', 'I', 'I', beta) SIR2.add_spontaneous('I', 'R', mu) SIR2.integrate(75, S=N-I0, I=I0, R=0) # - # Now we create new model with just one transition and setting the initial population to be the population at the end of the previous process # + Quarantine = EpiModel('SIR') Quarantine.add_spontaneous('I', 'R', mu) population = SIR2.values_.iloc[-1] S0 = population.S I0 = population.I R0 = population.R Quarantine.integrate(365-74, S=S0, I=I0, R=R0) # - Quarantine # Now we compbine the results from the two models values = pd.concat([SIR2.values_, Quarantine.values_], axis=0, ignore_index=True) ax = (values/N).plot() ax.vlines(x=74, ymax=1, ymin=0, lw=1, linestyle='--') ax.set_ylabel('Population') ax.set_xlabel('Time') ax.legend(['Susceptible', 'Infectious', 'Recovered']) # Even in this idealized scenario it still takes ~25 days before all the infectious individual recover and life can go back to normal. # If, instead, we have the more realistic scenario where instead of completely stopping the spread we reduce the the spreading $R_0$ to, say, 0.5 (the perfect scenario was equivalent to $R_0=0$, so that the epidemic becomes non viable, we have: # + beta = 0.2/4 # Reduce R0 by 4. mu = 0.1 Quarantine2 = EpiModel() Quarantine2.add_interaction('S', 'I', 'I', beta) Quarantine2.add_spontaneous('I', 'R', mu) Quarantine2.integrate(365-74, S=S0, I=I0, R=R0) values2 = pd.concat([SIR2.values_, Quarantine2.values_], axis=0, ignore_index=True) # - # Now we can compare the two scenarios. Lighter dashed lines representing the perfect case described above ax = (values2/N).plot() ax.vlines(x=74, ymax=1, ymin=0, lw=1, linestyle='--') (values.S/N).plot(ax=ax, lw=2, linestyle='-', c=colors[0]) (values.I/N).plot(ax=ax, lw=2, linestyle='-', c=colors[1]) (values.R/N).plot(ax=ax, lw=2, linestyle='-', c=colors[2]) ax.set_ylabel('Population') ax.set_xlabel('Time') ax.legend(['Susceptible', 'Infectious', 'Recovered']) # Naturally, this requries the social distancing procedure to continue for longer and at the end we'll have more people who have been infected. However, if we stop too early, we simply go back to business as usual # + population = values2.iloc[100] S0 = population.S I0 = population.I R0 = population.R SIR2.integrate(365-99, S=S0, I=I0, R=R0) values3 = values2.iloc[:100].copy() values3 = pd.concat([values3, SIR2.values_], axis=0, ignore_index=True) # - # For comparison we now use the original SIR model withouth any intervention. # + fig, ax = plt.subplots(1) lines = (values3/N).plot(ax=ax) ax.axvspan(xmin=74, xmax=100, alpha=0.3, color=colors[3]) (SIR.S/N).plot(ax=ax, lw=2, linestyle='--', c=colors[0], legend=False) (SIR.I/N).plot(ax=ax, lw=2, linestyle='--', c=colors[1], legend=False) (SIR.R/N).plot(ax=ax, lw=2, linestyle='--', c=colors[2], legend=False) (values2.S/N).plot(ax=ax, lw=2, linestyle='-', c=colors[0]) (values2.I/N).plot(ax=ax, lw=2, linestyle='-', c=colors[1]) (values2.R/N).plot(ax=ax, lw=2, linestyle='-', c=colors[2]) ax.set_ylabel('Population') ax.set_xlabel('Time') ax.legend(['Susceptible', 'Infectious', 'Recovered']) # - # And zooming in on the number of Infectious # + fig, ax = plt.subplots(1) lines = (values3.I/N).plot(ax=ax, c=colors[1]) span = ax.axvspan(xmin=74, xmax=100, alpha=0.3, color=colors[3]) (values2.I/N).plot(ax=ax, lw=2, linestyle='-', c=colors[1]) (SIR.I/N).plot(ax=ax, lw=2, linestyle='--', c=colors[1]) ax.set_ylabel('Population') ax.set_xlabel('Time') ax.legend(['Interrupted Quarantine', 'Continuous Quarantine', 'No Quarantine', ]) # - # As you can see, even a broken social distancing procedure is better than nothing. It buys time and reduces the number of overall infected people in the population. # ## Multiple populations # So far we have considered only a single population, but a usual, reality is much more complex. The fundamental assumption of compartmental models is that the population is well mixed: in effect, everyone is in contact with everyone. # # Naturally, this is not a very realistic assumption for anything larger than a small town. Let's see what happens if instead of a town we have several towns of the same size that get infected at different points in time. # As we already saw, the number of infectious individuals for a single population is, simply: ax=(SIR.I/N).plot(label='Infectious', color=colors[1]) ax.set_xlabel('time') ax.set_ylabel('Population') ax.legend() # Countries, States, Regions and Cities aren't single homogenous population but rather a set of several tightly connected sub populations with the total numbers being the combination of the numbers in each area. If, after starting in 1 population, the infection spreads to a second and third ones after, say, 7 and 14 days, we have: # + infections = (SIR.I).copy() fig, ax = plt.subplots(1) ax.plot(infections/N, color=colors[1], label='1 population', linestyle=':') ax.plot((infections+infections.shift(31).fillna(0))/(2*N), color=colors[1], label='2 populations', linestyle='--') ax.plot((infections+infections.shift(31).fillna(0)+infections.shift(61).fillna(0))/(3*N), color=colors[1], label='3 populations', linestyle='-') ax.set_xlabel('time') ax.set_ylabel('Infectious cases') ax.legend() # - # Naturaly, the exact details of the connections each subpopulation will determine how impacted the shape of the curve will be, # <div style="width: 100%; overflow: hidden;"> # <img src="data/D4Sci_logo_full.png" alt="Data For Science, Inc" align="center" border="0" width=300px> # </div>
Epidemiology101.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/priyanshgupta1998/All_codes/blob/master/decision_tree2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="aWqbtXyz-63K" colab_type="code" colab={} import pandas as pd # + id="W0FTgBwO-67H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="f136f2e7-a1ef-42c6-d0e4-b81dfbc4e89f" df = pd.read_csv("/home/titanic_data0.csv") df.head() # + [markdown] id="_BcoozDl_fcf" colab_type="text" # Now i'm gonna remove 'PassengerId', 'Name',' SibSp',' Parch',' Ticket',' Cabin',' Embarked' columns from the dataset # + id="zGwLeToZ-7By" colab_type="code" colab={} df.drop(['PassengerId','Name','SibSp','Parch','Ticket','Cabin','Embarked'],axis='columns',inplace=True) # + id="pO6mSupP-7G0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="9c2316eb-8c33-4e28-ef90-77d03a513a06" df.head() # + id="2rXcEJbA-7Lg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="996872b3-6c47-42b2-e105-e92fda72f5a2" print(len(df)) print(df.shape) # + [markdown] id="I4YS2Vx8__4H" colab_type="text" # Now take out first column as target dataset and let all other colmns be in the traing dataset # + id="AedP7trY-7KF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="72fb6dac-2bc5-4c58-e73a-663b327e2f5c" inputs = df.drop('Survived',axis='columns') inputs.head() # + id="FNoB95F3-7AN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="91bd8fcc-2781-4943-8b97-828883a461e2" target = df.Survived print(target.head()) # + id="4KQCSyBb-6-f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="c5f012cf-6357-4e4d-c56c-24c8f1eb7555" print(len(inputs)) print(inputs.shape) # + [markdown] id="WVOm2Hb1FM9T" colab_type="text" # Now convert the female and male text into their correspoding value 2 ,1 # + id="NNYY1aHCErTT" colab_type="code" colab={} inputs.Sex = inputs.Sex.map({'male': 1, 'female': 2}) # + id="ql2-18ZvErXz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="ec0e632d-9ea3-407a-eb43-55d393d0541f" inputs.head() # + id="bM_ELxn8Erbd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="998441af-fa80-42c3-bf07-27c48114a507" inputs.Age[:10] # + id="DzPnQqwoErfP" colab_type="code" colab={} inputs.Age = inputs.Age.fillna(inputs.Age.mean()) # fill the empty space by taking mean of the whole column values # + id="aFnqhoqEErjC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="89941506-abac-45e6-fc88-2776abe1fa02" inputs.Age[:8] # + id="PkE1YvauErnk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cf31ced7-565f-442f-f11e-27c21ab88a33" sum=0.0 for i in range(len(inputs.Age)): sum += inputs.Age[i] print(sum) # + id="1dMX05gFErtk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6ae870f8-bcda-4fca-be85-fb24543da2d6" l= len(inputs.Age) l # + id="ZsIHTzENErmJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5dc5267e-b780-41c0-c0dd-adf612e93560" sum/l # + id="2mdMTRjDErWG" colab_type="code" colab={} from sklearn.model_selection import train_test_split # + [markdown] id="f9cVPnZ-MNNA" colab_type="text" # Now split the dataset in the training and test data by using cross_validation technique # + id="x9b7xfCvMJ0h" colab_type="code" colab={} x_train, x_test, y_train, y_test = train_test_split(inputs,target,test_size=0.2) # + id="BG2zYVRLMJ5O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="df932bf6-35b2-44af-93db-5556fc7541f9" print(len(x_train)) print(len(x_test)) # + id="l_wtUhIhMJ_b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="76eec5f1-ccaf-4262-b749-a14e67e2630b" print(len(y_train)) print(len(y_test)) # + [markdown] id="szzlABp_MwEQ" colab_type="text" # #Decision tree # # # ``` # i'll create decision tree classifier constructer/object to classify dataset . # ``` # # # + id="AoEQ8TzCMKNC" colab_type="code" colab={} from sklearn import tree model = tree.DecisionTreeClassifier() # + id="xn3QIr64MKTQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="aacdeeb8-10b7-4cce-86b2-41ff208f4457" model.fit(x_train,y_train) # train the model # + [markdown] id="GHE5gR4KOFzX" colab_type="text" # We calculate the score , to get to know that our model is trained very well . # + id="sI6clLosMKc9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5b911f54-1d0f-49c2-a89f-2feb3c9bdc0a" model.score(x_test,y_test) # find out the score of the model # how much our model is accurate ? # + id="jJ_CKyJWMKbE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a2294387-39c1-42db-e6ea-3602b92dc818" model.score(x_train,y_train) # use same dataset to check the model score
decision_tree2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic Python # #### Variable asignment name_of_var = 2 #can't have spaces or start with a number or special character name_of_var #call for the variable print(name_of_var) #print values # ## Data types num = 50 type(num) #integer num1 = 4.75 type(num1) #float cn = complex(3,4) type(cn) #complex number st = 'hola!' type(st) #string boo = True type(boo) #boolean lst = [1,2,3] type(lst) #list d = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} type(d) #dictionary t = (1,2,3) type(t) #tuple s = {1,2,3} type(s) #set fnc = lambda var: var*2 type(fnc) #function # ## Change type str(num) #number to string int(num1) #float to the nearest integer float(num) #integer to float bool(0) #False bool(1) #True list(t) #tuple to list tuple(lst) #list to tuple list(d) #dictionaty to list (only keys) # ### Numbers 1 + 1 #sum 5 - 2 #sustraction 3 * 4 #product 8 / 2 #division 9 % 2 #module 2 ** 3 #power # + x = 3 y = 6 res = (x * y) - (x + y) res # - (2 + 3) * (5 + 5) #combinated operations # ### Strings st1 = 'single quotes' st1 st2 = "double quotes" st2 st1 + ' or ' + st2 #concatenate strings # + #assign variables name = 'Paul' num = 28 #include variables in text print('My name is {one}, and my age is {two}'.format(one=name,two=num)) print('My name is {}, and my age is {}'.format(name,num)) # - # ### Lists [1,2,3] #same data type ['hi',1,[1,2]] #different type my_list = ['a','b','c'] my_list.append('d') #add value my_list # ### Dictionaties d = {'key1':'value1','key2':'value2','key3':'value3'} d # ### Booleans True & True #& means AND True & False False & False True | True #& means OR True | False False | False # ### Tuples t = (1,2,3) t[0] # + #t[0] = 4 #ERROR: tuples can't be modified # - # ### Sets {1,2,3} {1,2,3,1,2,1,2,3,3,3,3,2,2,2,1,1,2} #unique values # ## Indexing and Selection my_list[0] #one value using position (Iindex starts at 0) my_list[1:3] #many values using range (start is included, end is excluded.) my_list[:1] #if no value means from the beggining/ until the end my_list[0] = 'X' #modify value my_list nest = [1,2,3,[4,5,[7,8]]] #nested list nest[3][2][0] #selects the corresponded number of each nested item d['key1'] #selection in dictionaries using key # ## Comparison Operators 1 < 2 #smaller than 1 > 2 #bigger than 1 >= 1 #bigger or equal 1 <= 4 #smaller or equal 'hola' == 'hi' #equal 'hola' != 'hi' #different # ### Logical Operators (1 > 2) and (2 < 3) #AND means ALL (1 > 2) & (2 < 3) #equivalent (1 > 2) or (2 < 3) #OR means ANY (1 > 2) | (2 < 3) #equivalent 'x' in [1,2,3] #included 'x' in ['x','y','z'] # ## if, elif, else if 1 < 2: print('yes') #if true print if 1 < 2: print('first') else: print('last') #if true print, else print if 1 == 2: print('first') elif 3 == 3: print('middle') else: print('Last') #if true print, elif true print, else print # ## Loops seq = [1,2,3,4,5] for item in seq: print(item) for item in seq: print('number') #while i = 1 while i < 5: print('i is: {}'.format(i)) i = i+1 #range range(5) for i in range(5): print(i) # + #list comprehension x = [1,2,3,4] out = [] for item in x: out.append(item**2) print(out) [item**2 for item in x] #equivalent in one line # - # ## Functions # + def my_func(param1='default'): """ Description. """ print(param1) my_func() # + def square(x): return x**2 square(2) # - # ### lambda expressions # + def times2(var): return var*2 times2(2) lambda var: var*2 #equivalent # + #map seq = [1,2,3,4,5] list(map(times2,seq)) list(map(lambda var: var*2,seq)) #equivalent # - #filter list(filter(lambda item: item%2 == 0,seq)) # ## Methods and functions num = 4.24 round(num) st = 'Winter is coming!' st.lower() #lower case st.upper() #upper case st.capitalize() #capitalize first word stsp = st.split() #split using spaces stsp tweet = 'Winter is coming! #GOT' tweet.split('#')[1] #split using special character and selecting after it d.keys() #for dictionaries, keys d.items() #for dictionaries, items lst = [4,5,1,2,3] lst min(lst) max(lst) sorted(lst) lst.pop() #drops last element lst
projects/python/basicpython/Python_codes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analysis 20190419 # * Written by: <NAME> # * Running under conda env "lipidraft" # * Source Images: testdata # + # import resource import subprocess import os, sys from core.fileop import DirCheck, ListFiles, GetPendingList, GetGrpFLs #Functions --------------------------------------------------------------- def dict2arg_fiji(arg): arg_str = str() for key, val in arg.items(): str_tmp = str(key + '="' + val + '", ') arg_str = arg_str + str_tmp return arg_str def list2arg_python(arg): arg_str = str() for var in arg: arg_str = arg_str + "'" + str(var) + "' " return arg_str # ------------------------------------------------------------------------- # - # ## Specify Parameters # * `fiji`: path to the fiji application # * `codepath`: the workspace for code/script # * `datapath`: the workspace for STORM images and data # * `analysis_dir`: the folder hosting data and images generated from this pipeline fiji = '/Applications/Fiji.app/Contents/MacOS/ImageJ-macosx' codepath = '/Users/michaelshih/Documents/code/wucci/storm_image_processing' datapath = '/Volumes/LaCie_DataStorage/xiaochao_wei_STORM imaging/STORM_imaging' analysis_dir = 'analysis_20190419' # ## Script name: `imgproc.py` # * env: ImageJ/Fiji # * rewritten: 04/26/2019 # * verified # * output folder: `preprocessing` # ### Output # 1. `preproimg` (`.tif`): seperate the channels into individual folders # 2. `imgintensity` (`.csv`): return mean intensity for each time frame from the center of the image (128 pixel * 128 pixel) # 3. `imginfo`: # 1. `imgmetadata` (`.txt`): return the metadata # 2. `imgstat.csv`: statistics from images # * image_name: the name of the image without extension # * ip_file_name: the name of the image with extension # * xSize: frame size on the x-axis # * ySize: frame size on the y-axis # * nSlices: frame size on the z-axis # * nFrames: number of time frames # * nChannels: number of channles # * size_MB: size of the file # + script_name = 'imgproc.py' ippath = '/Volumes/LaCie_DataStorage/xiaochao_wei_STORM imaging/STORM_imaging/resource/testdata' opdir = 'preprocessing' oppath = os.path.join(datapath, analysis_dir, opdir) # create dict to store parameters arg_dict = { 'path': datapath, 'dir_output': analysis_dir, 'ippath': ippath, 'outputdir': oppath, 'batchmodeop': 'false', } arg = dict2arg_fiji(arg_dict) print(arg) # + # Run the script # - subdir = 'Fiji' scriptpath = os.path.join(codepath, subdir, script_name) print("Start running: {}".format(script_name)) subprocess.check_output([fiji, '--ij2', '--run', arg]) print("End: {}".format(script_name)) # ## Script name: `tstormanalysis.py` # * env: ImageJ/Fiji # * plugin: [ThunderSTORM](https://github.com/zitmen/thunderstorm) # * rewritten: 04/26/2019 # * verified # * output folder: `tstorm` # ### Output generated by ThunderSTORM # 1. `csvdata`: # * (`.csv`): STORM dataset # * (`.txt`): protocol # 2. `driftcorr` (`.json`): drift correction profile # 3. `histogram_plot` (`.tif`): histogram image with manification = 5.0 # + script_name = 'tstormanalysis.py' ippath = '/Volumes/LaCie_DataStorage/xiaochao_wei_STORM imaging/STORM_imaging/analysis_20190419/preprocessing/preproimg' opdir = 'preprocessing' opsubdir = 'preproimg' arg_dict = { 'path': datapath, 'dir_output': analysis_dir, 'ippath': ippath, 'batchmodeop': 'false', } arg = dict2arg_fiji(arg_dict) print(arg) # - subdir = 'Fiji' scriptpath = os.path.join(codepath, subdir, script_name) print("Start running: {}".format(script_name)) subprocess.check_output([fiji, '--ij2', '--run', scriptpath, arg]) print("End: {}".format(script_name)) # ## Script name: `csv_slicer_T.py` # * env: ImageJ/Fiji # * rewritten: 04/26/2019 # * verified # * output folder: `csvdata_sliced` # ### Output generated by ThunderSTORM # * `csvdata_sliced_T` (`.csv`): STORM data from T0 to T1 script_name = 'csv_slicer_T.py' nchannel = 2 T0 = 5000 T1 = 10001 arg_list = [datapath, analysis_dir,'tstorm','csvdata', nchannel, T0, T1, 'csvdata_sliced_T'] arg = list2arg_python(arg_list) print(arg) print("Start running: {}".format(script_name)) shellcmd = str('python '+ os.path.join(codepath, script_name)+ ' '+ arg) print(shellcmd) process = subprocess.run(shellcmd, shell = True, check=True) print("End: {}".format(script_name)) # ## Script name: `csv_slicer_ROI.py` # * env: ImageJ/Fiji # * rewritten: 04/29/2019 # * verified # * output folder: `csvdata_sliced` # ### Output generated by ThunderSTORM # * `csvdata_sliced_T_ROI` (`.csv`): STORM data cropped by given region of interest. # csv_slicer_crop.py script_name = 'csv_slicer_ROI.py' arg_list = [datapath, analysis_dir,'tstorm','csvdata_sliced_T', 2, 3, 'csvdata_sliced_T_ROI'] arg = list2arg_python(arg_list) print(arg) print("Start running: {}".format(script_name)) shellcmd = str('python '+ os.path.join(codepath, script_name)+ ' '+ arg) print(shellcmd) process = subprocess.run(shellcmd, shell = True, check=True) print("End: {}".format(script_name)) # ## Script name: `spatialanaylsis.R` # * env: ImageJ/Fiji # * rewritten: 04/29/2019 # * verified # * output folder: `spacial_test` # ### Output generated by ThunderSTORM # * `spacialdata` (`.csv`): Results of Ripley's K # * K_r: $\hat{K}(d)= \hat{\lambda}^{-1}\sum_{i}\sum_{j\neq1}\frac{\omega(l_i, l_j)I(d_{ij<t})}{N}$ # * L_r: # * H_r: # * `spacialdata_bi` (`.rda`): # spatialanaylsis.R script_name = 'spatialanaylsis.R' arg_list = [datapath, analysis_dir,'tstorm','csvdata_sliced_T_ROI', 2, 3, 'spacial_test', 'spacialdata', 'spacialdata_bi'] arg = list2arg_python(arg_list) print(arg) print("Start running: {}".format(script_name)) shellcmd = str('Rscript --vanilla '+ os.path.join(codepath, script_name)+ ' '+ arg) print(shellcmd) process = subprocess.run(shellcmd, shell = True, check=True) print("End: {}".format(script_name)) # plot_spatialdata.py script_name = 'plot_spatialdata.py' arg_list = [datapath, analysis_dir,'spacial_test','spacialdata', 2, 'plot'] arg = list2arg_python(arg_list) print(arg) # + print("Start running: {}".format(script_name)) shellcmd = str('python '+ os.path.join(codepath, script_name)+ ' '+ arg) print(shellcmd) process = subprocess.run(shellcmd, shell = True, check=True) print("End: {}".format(script_name))
.ipynb_checkpoints/pipeline_controller_04192019_test-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import root_pandas # %matplotlib inline # %config InlineBackend.figure_format = 'retina' # - def fig_ax(figsize=(10, 8)): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(1, 1, 1) return fig, ax # fname = 'root://eoslhcb.cern.ch//eos/lhcb/user/a/apearce/CharmProduction/2015_MagDown_MC/27163003/1/DVntuple.root' fname = 'DVntuple.root' key = 'TupleD0ToKpi' mc = root_pandas.read_root(fname, key='MC' + key + '/MCDecayTree', columns=['D0_TRUEPT', 'D0_TRUEP_E', 'D0_TRUEP_Z', 'runNumber', 'eventNumber'], where='totCandidates == 1') reco = root_pandas.read_root(fname, key='Cheated' + key + '/DecayTree', columns=['D0_PT', 'D0_PE', 'D0_PZ', 'runNumber', 'eventNumber'], where='totCandidates == 1 && D0_BKGCAT < 20') # + # Add rapidity mc = mc.assign(D0_TRUE_Y=0.5*np.log((mc.D0_TRUEP_E + mc.D0_TRUEP_Z)/(mc.D0_TRUEP_E - mc.D0_TRUEP_Z))) reco = reco.assign(D0_Y=0.5*np.log((reco.D0_PE + reco.D0_PZ)/(reco.D0_PE - reco.D0_PZ))) del mc['D0_TRUEP_E'] del mc['D0_TRUEP_Z'] del reco['D0_PE'] del reco['D0_PZ'] # - len(mc), len(reco) merged = pd.merge(mc, reco, how='inner', on=['runNumber', 'eventNumber']) print(len(merged)) merged.head() # + difference = (merged.D0_PT - merged.D0_TRUEPT) difference_range = (-50, 50) difference_label = r'$p_{\mathrm{T}}(D^{0}) - p_{\mathrm{T}}^{\mathrm{True}}(D^{0})$ [MeV/$c^{2}$]' fig, ax = fig_ax() difference.plot.hist(bins=100, range=difference_range, histtype='step') ax.set_xlabel(difference_label) ax.set_xlim(difference_range) # + difference_normalised = 100*difference/merged.D0_TRUEPT difference_normalised_range = (-4, 4) difference_normalised_label = r'$\frac{p_{\mathrm{T}}(D^{0}) - p_{\mathrm{T}}^{\mathrm{True}}(D^{0})}{p_{\mathrm{T}}^{\mathrm{True}}(D^{0})}$ [$\%$]' fig, ax = fig_ax() difference_normalised.plot.hist(bins=100, range=difference_normalised_range, histtype='step') ax.set_xlim(difference_normalised_range) ax.set_xlabel(difference_normalised_label) ax.set_yscale('log') # - edges = 1e3*np.concatenate([np.arange(0, 1, 1), np.arange(1, 3, 0.5), np.arange(3, 16, 1)]) print(edges) fig, ax = fig_ax() H, xedges, yedges, C = ax.hist2d( merged.D0_PT/1e3, difference, bins=(edges/1e3, 40), range=((edges[0]/1e3, edges[-1]/1e3), difference_range), cmap='viridis' ) ax.set_xlabel('$p_{\mathrm{T}}(D^{0})$ [GeV/$c^{2}$]') ax.set_ylabel(difference_label) cb = fig.colorbar(C, ax=ax) cb.set_label('Candidates') fig, ax = fig_ax() H, xedges, yedges, C = ax.hist2d( merged.D0_PT/1e3, difference_normalised, bins=(edges/1e3, 40), normed=True, range=((edges[0]/1e3, edges[-1]/1e3), difference_normalised_range), cmap='viridis' ) ax.set_xlabel('$p_{\mathrm{T}}(D^{0})$ [GeV/$c^{2}$]') ax.set_ylabel(difference_normalised_label) cb = fig.colorbar(C, ax=ax) cb.set_label('Arbitrary units') fig.savefig('output/D0ToKpi_PT_resolution.pdf') fig, ax = fig_ax() H, xedges, yedges, C = plt.hist2d( merged.D0_TRUEPT/1e3, merged.D0_PT/1e3, bins=(edges/1e3, edges/1e3), range=((edges[0]/1e3, edges[-1]/1e3), (edges[0]/1e3, edges[-1]/1e3)), norm=matplotlib.colors.LogNorm(), cmap='viridis' ) ax.set_xlabel('$p_{\mathrm{T}}^{\mathrm{True}}(D^{0})$ [GeV/$c^{2}$]') ax.set_ylabel('$p_{\mathrm{T}}(D^{0})$ [GeV/$c^{2}$]') cb = fig.colorbar(C, ax=ax) cb.set_label('Candidates') gev = 1e-3 H, xedges, yedges = np.histogram2d( merged.D0_TRUEPT*gev, merged.D0_PT*gev, bins=(edges*gev, edges*gev) ) H /= H.diagonal() # Mask the diagonal elements H[H == 1] = 0 fig, ax = fig_ax((11, 8)) C = ax.pcolormesh( xedges, yedges, H, norm=matplotlib.colors.LogNorm(), cmap='viridis' ) ax.set_xlim((xedges[0], xedges[-1])) ax.set_ylim((yedges[0], yedges[-1])) ax.set_xlabel('$p_{\mathrm{T}}^{\mathrm{True}}(D^{0})$ [GeV/$c^{2}$]') ax.set_ylabel('$p_{\mathrm{T}}(D^{0})$ [GeV/$c^{2}$]') cb = fig.colorbar(C, ax=ax) cb.set_label('Candidate fraction normalised to diagonal') fig.savefig('output/D0ToKpi_PT_migration_matrix.pdf') edges_rapidity = np.arange(2, 5, 0.5) print(edges_rapidity) difference_rapidity = (merged.D0_Y - merged.D0_TRUE_Y) difference_rapidity_normalised = 100*difference_rapidity/merged.D0_TRUE_Y difference_rapidity_normalised_range = (-0.4, 0.4) difference_rapidity_normalised_label = r'$\frac{y(D^{0}) - y^{\mathrm{True}}(D^{0})}{y^{\mathrm{True}}(D^{0})}$ [$\%$]' fig, ax = fig_ax() H, xedges, yedges, C = ax.hist2d( merged.D0_Y, difference_rapidity_normalised, bins=(edges_rapidity, 40), normed=True, range=((edges_rapidity[0], edges_rapidity[-1]), difference_rapidity_normalised_range), cmap='viridis' ) ax.set_xlabel('$y(D^{0})$') ax.set_ylabel(difference_rapidity_normalised_label) cb = fig.colorbar(C, ax=ax) cb.set_label('Arbitrary units') fig.savefig('output/D0ToKpi_y_resolution.pdf') H, xedges, yedges = np.histogram2d( merged.D0_TRUE_Y, merged.D0_Y, bins=(edges_rapidity, edges_rapidity) ) H /= H.diagonal() # Mask the diagonal elements H[H == 1] = 0 fig, ax = fig_ax((11, 8)) C = ax.pcolormesh( xedges, yedges, H, norm=matplotlib.colors.LogNorm(), cmap='viridis' ) ax.set_xlim((xedges[0], xedges[-1])) ax.set_ylim((yedges[0], yedges[-1])) ax.set_xlabel('$y^{\mathrm{True}}(D^{0})$') ax.set_ylabel('$y(D^{0})$') cb = fig.colorbar(C, ax=ax) cb.set_label('Candidate fraction normalised to diagonal') fig.savefig('output/D0ToKpi_y_migration_matrix.pdf')
scripts/Bin migrations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext lab_black # # Model selection # This example illustrates model selection for the setting of Keane-Wolpin [(1994)](https://www.jstor.org/stable/2109768?seq=1). We show how models with different priors and models with different summary statistics can be compared. # # In this example the following moduels from `respyabc` are used: # # - Distance function for the descriptives:`distances.compute_mean_squared_distance` # - Get point estimate from inference: `evaluation.compute_point_estimate` # - Plot credibility intervals from inference:`evaluation.plot_credible_intervals` # - Plot posterior distribution from inference:`evaluation.plot_kernel_density_posterior` # - Simulation function of the model:`models.compute_model` # - Inference function:`respyabc.respyabc` # # We can import the necessary classes and packages by # + import respy as rp import numpy as np import time from pyabc.visualization import plot_model_probabilities from respyabc.distances import compute_mean_squared_distance from respyabc.models import compute_model from respyabc.respyabc import respyabc from respyabc.tools import convert_time # - # ## Load data and simulate empirical data params, options, data_stored = rp.get_example_model("kw_94_one") model_to_simulate = rp.get_simulate_func(params, options) parameter_true = {"delta_delta": 0.95} np.random.seed(123) pseudo_observed_data = compute_model( parameter_true, model_to_simulate=model_to_simulate, parameter_for_simulation=params, options_for_simulation=options, descriptives="choice_frequencies", ) # ## Model selection with uniform priors # ### Set pyABC settings population_size = 50 max_nr_populations = 10 # We need to specify a list of models we want to compare. In this example both models do only differ in their prior distribution. Model 1 is more centered around and we thus expect it to be more likely. We can specify a model selectio run by passing lists of models, prior and descriptives to the function. models = [compute_model, compute_model] parameters_prior = [ {"delta_delta": [[0.93, 0.04], "uniform"]}, {"delta_delta": [[0.90, 0.09], "uniform"]}, ] descriptives = ["choice_frequencies", "choice_frequencies"] # ### respyabc model selection # Running the model selection is now similar to conducting inference. We just need to set the argument `model_selection=False` and pass the respective lists. np.random.seed(1234) start_delta = time.perf_counter() history = respyabc( model=models, parameters_prior=parameters_prior, data=pseudo_observed_data, distance_abc=compute_mean_squared_distance, descriptives=descriptives, population_size_abc=population_size, max_nr_populations_abc=max_nr_populations, minimum_epsilon_abc=0.05, model_selection=True, ) end_delta = time.perf_counter() delta_time, delta_unit = convert_time(end_delta - start_delta) f"The respyabc model selection run took {delta_time:0.2f} {delta_unit}" # ### Model selection evaluation # Computing the model probabilities can be easily done by applying pyABCs routines. To get the probabilities for each run we apply the `history` object and the `get_model_probabilities()` function. model_probabilities = history.get_model_probabilities() model_probabilities # Plotting the probabilities directly is also already implemented in pyABC and can be done by applying the `plot_model_probabilities()` function to the `history` object. plot_model_probabilities(history) # ## Model selection with normal priors # ### Set normal priors parameters_prior = [ {"delta_delta": [[0.95, 0.02], "norm"]}, {"delta_delta": [[0.90, 0.02], "norm"]}, ] np.random.seed(1234) start_delta = time.perf_counter() history = respyabc( model=models, parameters_prior=parameters_prior, data=pseudo_observed_data, distance_abc=compute_mean_squared_distance, descriptives=descriptives, population_size_abc=population_size, max_nr_populations_abc=max_nr_populations, minimum_epsilon_abc=0.05, model_selection=True, ) end_delta = time.perf_counter() delta_time, delta_unit = convert_time(end_delta - start_delta) f"The respyabc model selection run took {delta_time:0.2f} {delta_unit}" # ### Model selection evaluation model_probabilities = history.get_model_probabilities() model_probabilities plot_model_probabilities(history)
docs/source/tutorials/model_selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="ggpPusBoxZt8" # # Computational and Numerical Methods # ## Group 16 # ### Set 14 (29-10-2018): The Trapezoidal Method to Solve Ordinary Differential Equations # #### <NAME> 201601003 # #### <NAME> 201601086 # + colab_type="text" id="a50RW7-JxysE" active="" # <script> # function code_toggle() { # if (code_shown){ # $('div.input').hide('500'); # $('#toggleButton').val('Show Code') # } else { # $('div.input').show('500'); # $('#toggleButton').val('Hide Code') # } # code_shown = !code_shown # } # # $( document ).ready(function(){ # code_shown=false; # $('div.input').hide() # }); # </script> # <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form> # + colab={} colab_type="code" id="EuL2kN0sksoq" import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') pd.set_option('precision', 2) pd.set_option('display.max_rows', 10) # + [markdown] colab_type="text" id="dq72_6mMrqlW" # ## A # ## $y′(x) = −y(x)$ # ## $y(0) = 1$ # + colab={"base_uri": "https://localhost:8080/", "height": 1409} colab_type="code" id="6fgw-23vkwSc" outputId="c1e2c4d8-599c-4518-f640-9ae459c74184" h = 0.2 n = int(6/h) y = np.zeros(n+1) x = np.linspace(0,6, n+1) f = lambda x,y: -y y[0] = 1 #Euler's Method x0 = 0 y0 = 1 points = np.linspace(0,6,(6/h)+1) ye = np.zeros(len(points), dtype=np.float32) i=1 ye[i-1] = y0 for i in range(1,len(points)): ye[i] = ye[i-1]-(h*ye[i-1]) #Heun's Method for i in range(n): y[i+1] = y[i] + (h*0.5)*(f(x[i],y[i]) + f(x[i+1], y[i] + h*f(x[i],y[i]))) #Trapezoidal Method yt = np.zeros(len(points), dtype=np.float32) i=1 yt[i-1] = y0 xt = x0 for i in range(n): yt[i+1] = yt[i]*((2-h)/(2+h)) x = x + h print("h = 0.2") arr = [] arr.append(x) arr.append(y) arr = np.transpose(arr) df = pd.DataFrame(data=arr, columns=['x', 'y']) plt.plot(df['x'], df['y'], label="Heun's Method") plt.legend() plt.show() plt.plot(df['x'], yt, label="Trapezoidal Method") plt.legend() plt.show() plt.plot(df['x'], ye, label="Euler's Method") plt.legend() plt.show() plt.plot(df['x'], df['y']-np.exp(-df['x']), label="Error Heun's Method") plt.legend() plt.plot(df['x'], yt-np.exp(-df['x']), label="Error Trapezoidal Method") plt.legend() plt.plot(df['x'], ye-np.exp(-df['x']), label="Error Euler's Method") plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1409} colab_type="code" id="l-Oib7gclaO6" outputId="3054e28f-ae81-4d55-8e4c-0c00db012a76" h = 0.1 n = int(6/h) y = np.zeros(n+1) x = np.linspace(0,6, n+1) f = lambda x,y: -y y[0] = 1 #Euler's Method x0 = 0 y0 = 1 points = np.linspace(0,6,(6/h)+1) ye = np.zeros(len(points), dtype=np.float32) i=1 ye[i-1] = y0 for i in range(1,len(points)): ye[i] = ye[i-1]-(h*ye[i-1]) #Heun's Method for i in range(n): y[i+1] = y[i] + (h*0.5)*(f(x[i],y[i]) + f(x[i+1], y[i] + h*f(x[i],y[i]))) #Trapezoidal Method yt = np.zeros(len(points), dtype=np.float32) i=1 yt[i-1] = y0 xt = x0 for i in range(n): yt[i+1] = yt[i]*((2-h)/(2+h)) x = x + h print("h = 0.1") arr = [] arr.append(x) arr.append(y) arr = np.transpose(arr) df = pd.DataFrame(data=arr, columns=['x', 'y']) plt.plot(df['x'], df['y'], label="Heun's Method") plt.legend() plt.show() plt.plot(df['x'], yt, label="Trapezoidal Method") plt.legend() plt.show() plt.plot(df['x'], ye, label="Euler's Method") plt.legend() plt.show() plt.plot(df['x'], df['y']-np.exp(-df['x']), label="Error Heun's Method") plt.legend() plt.plot(df['x'], yt-np.exp(-df['x']), label="Error Trapezoidal Method") plt.legend() plt.plot(df['x'], ye-np.exp(-df['x']), label="Error Euler's Method") plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1409} colab_type="code" id="uHRmfUhMrmDr" outputId="d29ff481-ea23-4ba6-c028-9d38c1ed24df" h = 0.05 n = int(6/h) y = np.zeros(n+1) x = np.linspace(0,6, n+1) f = lambda x,y: -y y[0] = 1 #Euler's Method x0 = 0 y0 = 1 points = np.linspace(0,6,(6/h)+1) ye = np.zeros(len(points), dtype=np.float32) i=1 ye[i-1] = y0 for i in range(1,len(points)): ye[i] = ye[i-1]-(h*ye[i-1]) #Heun's Method for i in range(n): y[i+1] = y[i] + (h*0.5)*(f(x[i],y[i]) + f(x[i+1], y[i] + h*f(x[i],y[i]))) #Trapezoidal Method yt = np.zeros(len(points), dtype=np.float32) i=1 yt[i-1] = y0 xt = x0 for i in range(n): yt[i+1] = yt[i]*((2-h)/(2+h)) x = x + h print("h = 0.05") arr = [] arr.append(x) arr.append(y) arr = np.transpose(arr) df = pd.DataFrame(data=arr, columns=['x', 'y']) plt.plot(df['x'], df['y'], label="Heun's Method") plt.legend() plt.show() plt.plot(df['x'], yt, label="Trapezoidal Method") plt.legend() plt.show() plt.plot(df['x'], ye, label="Euler's Method") plt.legend() plt.show() plt.plot(df['x'], df['y']-np.exp(-df['x']), label="Error Heun's Method") plt.legend() plt.plot(df['x'], yt-np.exp(-df['x']), label="Error Trapezoidal Method") plt.legend() plt.plot(df['x'], ye-np.exp(-df['x']), label="Error Euler's Method") plt.legend() plt.show() # + [markdown] colab_type="text" id="1GcqiGLur_P-" # # B # ## $y′(x)=[y(x)+x^2−2]/(x+1)$ # ## $y(0) = 2$ # + colab={"base_uri": "https://localhost:8080/", "height": 1409} colab_type="code" id="_kRT3D-IsAsz" outputId="7927e642-55c5-441e-9a8c-3ba04770ba5b" h = 0.2 n = int(6/h) y = np.zeros(n+1) x = np.linspace(0,6, n+1) f = lambda x,y: ((y + (x**2) -2)/(x+1)) y[0] = 2 #Heun's Method for i in range(n): y[i+1] = y[i] + (h*0.5)*(f(x[i],y[i]) + f(x[i+1], y[i] + h*f(x[i],y[i]))) #Euler's Method x0 = 0 y0 = 2 points = np.linspace(0,6,(6/h)+1) ye = np.zeros(len(points), dtype=np.float32) i = 1 ye[i-1] = y0 for i in range(1,len(points)): ye[i] = ye[i-1]+(h*((ye[i-1] + (points[i-1]**2) -2)/(points[i-1]+1))) #Trapezoidal Method yt = np.zeros(len(points), dtype=np.float32) i=1 yt[i-1] = y0 xt = x0 for i in range(n): yt[i+1] = (yt[i] + (h/2)*(((yt[i]+ xt**2 -2)/(xt+1)) + (((xt+h)**2 - 2)/(xt+h+1))))/(1 - (h)/(2*(xt+1+h))) xt = xt + h print("h = 0.2") arr = [] arr.append(x) arr.append(y) arr = np.transpose(arr) df = pd.DataFrame(data=arr, columns=['x', 'y']) plt.plot(df['x'], df['y'], label="Heun's Method") plt.legend() plt.show() plt.plot(df['x'], yt, label="Trapezoidal Method") plt.legend() plt.show() plt.plot(df['x'], ye, label="Euler's Method") plt.legend() plt.show() a = df['x']**2 + 2*df['x'] + 2 - (2*(df['x']+1)*np.log(df['x']+1)) plt.plot(df['x'], df['y']-a, label="Error Heun's Method") plt.legend() plt.plot(df['x'], yt-a, label="Error Trapezoidal Method") plt.legend() plt.plot(df['x'], ye-a, label="Error Euler's Method") plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1409} colab_type="code" id="Mi6iWbP4vRxs" outputId="5bd431a2-c224-479b-91f9-a7dadf36f8ae" h = 0.1 n = int(6/h) y = np.zeros(n+1) x = np.linspace(0,6, n+1) f = lambda x,y: ((y + (x**2) -2)/(x+1)) y[0] = 2 #Heun's Method for i in range(n): y[i+1] = y[i] + (h*0.5)*(f(x[i],y[i]) + f(x[i+1], y[i] + h*f(x[i],y[i]))) #Euler's Method x0 = 0 y0 = 2 points = np.linspace(0,6,(6/h)+1) ye = np.zeros(len(points), dtype=np.float32) i = 1 ye[i-1] = y0 for i in range(1,len(points)): ye[i] = ye[i-1]+(h*((ye[i-1] + (points[i-1]**2) -2)/(points[i-1]+1))) #Trapezoidal Method yt = np.zeros(len(points), dtype=np.float32) i=1 yt[i-1] = y0 xt = x0 for i in range(n): yt[i+1] = (yt[i] + (h/2)*(((yt[i]+ xt**2 -2)/(xt+1)) + (((xt+h)**2 - 2)/(xt+h+1))))/(1 - (h)/(2*(xt+1+h))) xt = xt + h print("h = 0.1") arr = [] arr.append(x) arr.append(y) arr = np.transpose(arr) df = pd.DataFrame(data=arr, columns=['x', 'y']) plt.plot(df['x'], df['y'], label="Heun's Method") plt.legend() plt.show() plt.plot(df['x'], yt, label="Trapezoidal Method") plt.legend() plt.show() plt.plot(df['x'], ye, label="Euler's Method") plt.legend() plt.show() a = df['x']**2 + 2*df['x'] + 2 - (2*(df['x']+1)*np.log(df['x']+1)) plt.plot(df['x'], df['y']-a, label="Error Heun's Method") plt.legend() plt.plot(df['x'], yt-a, label="Error Trapezoidal Method") plt.legend() plt.plot(df['x'], ye-a, label="Error Euler's Method") plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1409} colab_type="code" id="p6iJ_NU2xZnu" outputId="ea9a1f72-1355-4b79-c51f-dee5b12314d8" h = 0.05 n = int(6/h) y = np.zeros(n+1) x = np.linspace(0,6, n+1) f = lambda x,y: ((y + (x**2) -2)/(x+1)) y[0] = 2 #Heun's Method for i in range(n): y[i+1] = y[i] + (h*0.5)*(f(x[i],y[i]) + f(x[i+1], y[i] + h*f(x[i],y[i]))) #Euler's Method x0 = 0 y0 = 2 points = np.linspace(0,6,(6/h)+1) ye = np.zeros(len(points), dtype=np.float32) i = 1 ye[i-1] = y0 for i in range(1,len(points)): ye[i] = ye[i-1]+(h*((ye[i-1] + (points[i-1]**2) -2)/(points[i-1]+1))) #Trapezoidal Method yt = np.zeros(len(points), dtype=np.float32) i=1 yt[i-1] = y0 xt = x0 for i in range(n): yt[i+1] = (yt[i] + (h/2)*(((yt[i]+ xt**2 -2)/(xt+1)) + (((xt+h)**2 - 2)/(xt+h+1))))/(1 - (h)/(2*(xt+1+h))) xt = xt + h print("h = 0.05") arr = [] arr.append(x) arr.append(y) arr = np.transpose(arr) df = pd.DataFrame(data=arr, columns=['x', 'y']) plt.plot(df['x'], df['y'], label="Heun's Method") plt.legend() plt.show() plt.plot(df['x'], yt, label="Trapezoidal Method") plt.legend() plt.show() plt.plot(df['x'], ye, label="Euler's Method") plt.legend() plt.show() a = df['x']**2 + 2*df['x'] + 2 - (2*(df['x']+1)*np.log(df['x']+1)) plt.plot(df['x'], df['y']-a, label="Error Heun's Method") plt.legend() plt.plot(df['x'], yt-a, label="Error Trapezoidal Method") plt.legend() plt.plot(df['x'], ye-a, label="Error Euler's Method") plt.legend() plt.show() # + colab={} colab_type="code" id="Q8tfmDP2xemP"
src/Set_14.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python3.6 # --- # # Introduction to GenePattern Notebook # # This document should help you understand how to run an analysis in the GenePattern Notebook environment. In it you will perform a simple preprocessing step and then view the results in a heat map. # # **Instructions are given in blue boxes, such as with the one below.** # # <div class="alert alert-info">Sign in to GenePattern by clicking the login button or entering your username and password into the form below.</div> # + genepattern={"server": "https://genepattern.broadinstitute.org/gp", "type": "auth"} # Requires GenePattern Notebook: pip install genepattern-notebook import gp import genepattern # Username and password removed for security reasons. genepattern.GPAuthWidget(genepattern.register_session("https://genepattern.broadinstitute.org/gp", "", "")) # - # ## Step 1: PreprocessDataset # # Preprocess gene expression data to remove platform noise and genes that have little variation. You can test this step by starting a job using parameters entered into the form below. # # ### Considerations # # * PreprocessDataset can preprocess the data in one or more ways (in this order): # 1. Set threshold and ceiling values. Any value lower/higher than the threshold/ceiling value is reset to the threshold/ceiling value. # 2. Convert each expression value to the log base 2 of the value. # 3. Remove genes (rows) if a given number of its sample values are less than a given threshold. # 4. Remove genes (rows) that do not have a minimum fold change or expression variation. # 5. Discretize or normalize the data. # * If you did not generate the expression data, check whether preprocessing steps have already been taken before running the PreprocessDataset module. # * Learn more by reading about the [PreprocessDataset](https://genepattern.broadinstitute.org/gp/getTaskDoc.jsp?name=PreprocessDataset) module. # ## Example # In this example we will preprocess a dataset of 38 samples of leukemia, 27 of subtype ALL and 11 of subtype AML. The data was created on a microarray platform, but the resulting [GCT](http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide#GCT) file is compatible with RNA-Seq, as well as any other data type that can be expressed with samples as columns and features as rows. # <div class="alert alert-info"> # - Change the *min fold change* parameter to 10 # - Click **Run** to launch the analysis. # + genepattern={"type": "task"} preprocessdataset_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00020') preprocessdataset_job_spec = preprocessdataset_task.make_job_spec() preprocessdataset_job_spec.set_parameter("input.filename", "https://datasets.genepattern.org/data/all_aml/all_aml_train.gct") preprocessdataset_job_spec.set_parameter("threshold.and.filter", "1") preprocessdataset_job_spec.set_parameter("floor", "100") preprocessdataset_job_spec.set_parameter("ceiling", "20000") preprocessdataset_job_spec.set_parameter("min.fold.change", "3") preprocessdataset_job_spec.set_parameter("min.delta", "100") preprocessdataset_job_spec.set_parameter("num.outliers.to.exclude", "0") preprocessdataset_job_spec.set_parameter("row.normalization", "0") preprocessdataset_job_spec.set_parameter("row.sampling.rate", "1") preprocessdataset_job_spec.set_parameter("threshold.for.removing.rows", "") preprocessdataset_job_spec.set_parameter("number.of.columns.above.threshold", "") preprocessdataset_job_spec.set_parameter("log2.transform", "0") preprocessdataset_job_spec.set_parameter("output.file.format", "3") preprocessdataset_job_spec.set_parameter("output.file", "<input.filename_basename>.preprocessed") genepattern.GPTaskWidget(preprocessdataset_task) # - # ## Step 2: HeatMapViewer # # Display a heat map of the preprocessed gene expression data. Since the *min fold change* was so stringent in the previous step, this will show only the genes that had significant changes in expression. # # ### Considerations # # * HeatMapViewer displays gene expression data as a heat map, which makes it easier to see patterns in the numeric data. Gene names are row labels and sample names are column labels. # * Notebooks may contain any number of visualizations. # * The features displayed here use Ensembl IDs. In the next section we will convert these IDs to gene names. # * Learn more by reading about the [HeatMapViewer](https://genepattern.broadinstitute.org/gp/getTaskDoc.jsp?name=HeatMapViewer) module. # # ### Note on instructions # - Many result files have similar names and differ only in their suffixes. When we indicate a file as `<filename>.gct` for example, we mean the result file that has the extension `.gct`. # <div class="alert alert-info"> # - When the job above shows that it is completed (status in the upper right corner of the job cell displays **Completed**): # - Click the link for the `<filename>.preprocessed.gct` result file. # - You will see a list of choices. # - Select **Send to Existing GenePattern Cell**. # - You will see a list of available cells. # - Select the **HeatMapViewer** cell. # - You will see the file populated in the *dataset* parameter of the **HeatMapViewer** cell below. # - Launch the **HeatMapViewer** job by clicking **Run** # + genepattern={"type": "task"} heatmapviewer_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.visualizer:00010') heatmapviewer_job_spec = heatmapviewer_task.make_job_spec() heatmapviewer_job_spec.set_parameter("dataset", "") genepattern.GPTaskWidget(heatmapviewer_task) # - # - In the cell below, for the *dataset file* parameter, click the dropdown arrow on the right side of the input box. # - You will see all of the available result files in this notebook that can be sent to this input. # - Select `<filename>.preprocessed.gct` # - Click **Run**. # + genepattern={"type": "task"} collapsedataset_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00134') collapsedataset_job_spec = collapsedataset_task.make_job_spec() collapsedataset_job_spec.set_parameter("dataset.file", "") collapsedataset_job_spec.set_parameter("chip.platform", "ftp://ftp.broadinstitute.org/pub/gsea/annotations/HU6800.chip") collapsedataset_job_spec.set_parameter("collapse.mode", "Maximum") collapsedataset_job_spec.set_parameter("output.file.name", "<dataset.file_basename>.collapsed") genepattern.GPTaskWidget(collapsedataset_task)
2017-12-15_CCMI_workshop/notebooks/2017-12-15_01_CCMI_GenePattern+Notebook+Introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this tutorial we will show how to access and navigate the Iteration/Expression Tree (IET) rooted in an `Operator`. # # # # Part I - Top Down # # Let's start with a fairly trivial example. First of all, we disable all performance-related optimizations, to maximize the simplicity of the created IET as well as the readability of the generated code. from devito import configuration configuration['dle'] = 'noop' configuration['openmp'] = False # Then, we create a `TimeFunction` with 3 points in each of the space `Dimension`s _x_ and _y_. # + from devito import Grid, TimeFunction grid = Grid(shape=(3, 3)) u = TimeFunction(name='u', grid=grid) # - # We now create an `Operator` that increments by 1 all points in the computational domain. # + from devito import Eq, Operator eq = Eq(u.forward, u+1) op = Operator(eq) # - # An `Operator` is an IET node that can generate, JIT-compile, and run low-level code (e.g., C). Just like all other types of IET nodes, it's got a number of metadata attached. For example, we can query an `Operator` to retrieve the input/output `Function`s. op.input op.output # If we print `op`, we can see how the generated code looks like. print(op) # An `Operator` is the root of an IET that typically consists of several nested `Iteration`s and `Expression`s – two other fundamental IET node types. The user-provided SymPy equations are wrapped within `Expressions`. Loop nest embedding such expressions are constructed by suitably nesting `Iterations`. # # The Devito compiler constructs the IET from a collection of `Cluster`s, which represent a higher-level intermediate representation (not covered in this tutorial). # # The Devito compiler also attaches to the IET key computational properties, such as _sequential_, _parallel_, and _affine_, which are derived through data dependence analysis. # # We can print the IET structure of an `Operator`, as well as the attached computational properties, using the utility function `pprint`. from devito.tools import pprint pprint(op) # In this example, `op` is represented as a `<Callable Kernel>`. Attached to it are metadata, such as `_headers` and `_includes`, as well as the `body`, which includes the children IET nodes. Here, the body is the concatenation of an `ArrayCast` and a `List` object. # op._headers op._includes op.body # We can explicitly traverse the `body` until we locate the user-provided `SymPy` equations. print(op.body[0]) # Printing the ArrayCast print(op.body[1]) # Printing the List # Below we access the `Iteration` representing the time loop. t_iter = op.body[1].body[0] t_iter # We can for example inspect the `Iteration` to discover what its iteration bounds are. t_iter.limits # And as we keep going down through the IET, we can eventually reach the `Expression` wrapping the user-provided SymPy equation. expr = t_iter.nodes[0].body[0].body[0].nodes[0].nodes[0].body[0] expr.view # Of course, there are mechanisms in place to, for example, find all `Expression`s in a given IET. The Devito compiler has a number of IET visitors, among which `FindNodes`, usable to retrieve all nodes of a particular type. So we easily # can get all `Expression`s within `op` as follows from devito.ir.iet import Expression, FindNodes exprs = FindNodes(Expression).visit(op) exprs[0].view
examples/compiler/03_iet-A.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/manjulamishra/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments/blob/master/DS_141_Statistics_Probability_and_Inference.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="eJGtmni-DezY" colab_type="text" # # Lambda School Data Science Module 141 # ## Statistics, Probability, and Inference # + [markdown] id="FMhDKOFND0qY" colab_type="text" # ## Prepare - examine what's available in SciPy # # As we delve into statistics, we'll be using more libraries - in particular the [stats package from SciPy](https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html). # + id="fQ9rkLJmEbsk" colab_type="code" outputId="b5d357e5-6514-49ed-e451-2b4b5a69e71c" colab={"base_uri": "https://localhost:8080/", "height": 4233} from scipy import stats dir(stats) # + id="bxW4SG_gJGlZ" colab_type="code" outputId="1571ce1f-d5b0-4972-a5da-03aa5d4a05f3" colab={"base_uri": "https://localhost:8080/", "height": 68} # As usual, lots of stuff here! There's our friend, the normal distribution norm = stats.norm() print(norm.mean()) print(norm.std()) print(norm.var()) # + id="RyNKPt_tJk86" colab_type="code" outputId="836c76a0-6e0b-415a-cc47-0225d0c038f1" colab={"base_uri": "https://localhost:8080/", "height": 68} # And a new friend - t t1 = stats.t(5) # 5 is df "shape" parameter print(t1.mean()) print(t1.std()) print(t1.var()) # + [markdown] id="SRn1zMuaKgxX" colab_type="text" # ![T distribution PDF with different shape parameters](https://upload.wikimedia.org/wikipedia/commons/4/41/Student_t_pdf.svg) # # *(Picture from [Wikipedia](https://en.wikipedia.org/wiki/Student's_t-distribution#/media/File:Student_t_pdf.svg))* # # The t-distribution is "normal-ish" - the larger the parameter (which reflects its degrees of freedom - more input data/features will increase it), the closer to true normal. # + id="seQv5unnJvpM" colab_type="code" outputId="b810aee8-3817-4da0-cace-8efb05312f7c" colab={"base_uri": "https://localhost:8080/", "height": 68} t2 = stats.t(30) # Will be closer to normal print(t2.mean()) print(t2.std()) print(t2.var()) # + [markdown] id="FOvEGMysLaE2" colab_type="text" # Why is it different from normal? To better reflect the tendencies of small data and situations with unknown population standard deviation. In other words, the normal distribution is still the nice pure ideal in the limit (thanks to the central limit theorem), but the t-distribution is much more useful in many real-world situations. # # History sidenote - this is "Student": # # ![<NAME>](https://upload.wikimedia.org/wikipedia/commons/4/42/William_Sealy_Gosset.jpg) # # *(Picture from [Wikipedia](https://en.wikipedia.org/wiki/File:William_Sealy_Gosset.jpg))* # # His real name is <NAME>, and he published under the pen name "Student" because he was not an academic. He was a brewer, working at Guinness and using trial and error to determine the best ways to yield barley. He's also proof that, even 100 years ago, you don't need official credentials to do real data science! # + [markdown] id="1yx_QilAEC6o" colab_type="text" # ## Live Lecture - let's perform and interpret a t-test # # We'll generate our own data, so we can know and alter the "ground truth" that the t-test should find. We will learn about p-values and how to interpret "statistical significance" based on the output of a hypothesis test. # + id="BuysRPs-Ed0v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="396bcae6-1919-4e09-94d3-b7a7b68df64c" survey_data = [0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0] import numpy as np import pandas as pd df = pd.DataFrame(survey_data) df.describe() # + id="lDtSGCpLnPBX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="1c567c12-ff3a-4a00-9215-b302c7573a9a" df.plot.hist(); # + id="0l0SiHdUoBl0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="37951522-19a6-48c7-9b53-b2d0325d3b4e" #now with confidence interval! import scipy scipy.stats.ttest_1samp(survey_data, 0.5) # + [markdown] id="3Y9oaxiwom9u" colab_type="text" # T test basically is: # # ![t test](https://wikimedia.org/api/rest_v1/media/math/render/svg/c01b7db797450f7809d3c85afa2d569e3f3fd575) # # where: X is the sample mean, mu is the poppulation mean, sigma the pop std Dev, and n is no of sample obs # + id="NTO57nqsoLcA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="527c9824-cf4c-45d5-c42c-8c6f2ab2c643" # the t-statistic is the ratio of the departure of the estimated value of a # parameter from its hypothesized value to its standard error ## We want to calculate (given the formula): tstat = 2.364321853156195 sample_stderr = 0.478518/np.sqrt(len(survey_data)) #in this case, sample Std dev sample_mean = 0.660000 #from the summary statistics table null_hypothesis_mean = 0.5 t_stat = (sample_mean - null_hypothesis_mean)/(sample_stderr) print(t_stat) # + id="W06STgJWqkv2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3179af0d-0d7b-4a02-9ea5-b0c91b6080ed" len(survey_data) # + id="5bjn6dVHqpix" colab_type="code" colab={} #Lets reproduce the results import random import numpy as np def make_soda_data(n=50): #return pd.DataFrame([random.randint(0, 1) for _ in range(n)]) #fair version #return pd.DataFrame([random.randit(0, 1) for _ in range(n)]) # Unfair version: (where the coin is barely unfair) return pd.DataFrame(np.random.binomial(n=1, p=0.5, size=n)) # + id="q92J4xjprPvK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="6a3c2ca0-93d3-4418-9933-a83ee32acaa4" make_soda_data(n=500).describe() # + id="SEQb43XZsC5Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="992bab84-114e-4851-c7bf-1666f2f2161f" t_statistics = [] p_values = [] n_experiments = 10 # Number of visitors for _ in range(n_experiments): df = make_soda_data(n=500000) ttest = scipy.stats.ttest_1samp(df, 0.5) t_statistics.append(ttest.statistic) p_values.append(ttest.pvalue) pd.DataFrame(t_statistics).describe() # + id="OiEmaegPsWvW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="27fa5231-a4e0-4961-a8b1-3af0875b3fdf" pd.DataFrame(p_values).describe() # + [markdown] id="egXb7YpqEcZF" colab_type="text" # ## Assignment - apply the t-test to real data # # Your assignment is to determine which issues have "statistically significant" differences between political parties in this [1980s congressional voting data](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records). The data consists of 435 instances (one for each congressperson), a class (democrat or republican), and 16 binary attributes (yes or no for voting for or against certain issues). Be aware - there are missing values! # # Your goals: # # 1. Load and clean the data (or determine the best method to drop observations when running tests) # 2. Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.01 # 3. Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.01 # 4. Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference) # # Note that this data will involve *2 sample* t-tests, because you're comparing averages across two groups (republicans and democrats) rather than a single group against a null hypothesis. # # Stretch goals: # # 1. Refactor your code into functions so it's easy to rerun with arbitrary variables # 2. Apply hypothesis testing to your personal project data (for the purposes of this notebook you can type a summary of the hypothesis you formed and tested) # + id="nstrmCG-Ecyk" colab_type="code" colab={} #lets load the file import pandas as pd url = ('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data') df = pd.read_csv(url, header = None) # + [markdown] id="Txvcm-bcrBKX" colab_type="text" # 7. Attribute Information: # 1. Class Name: 2 (democrat, republican) # 2. handicapped-infants: 2 (y,n) # 3. water-project-cost-sharing: 2 (y,n) # 4. adoption-of-the-budget-resolution: 2 (y,n) # 5. physician-fee-freeze: 2 (y,n) # 6. el-salvador-aid: 2 (y,n) # 7. religious-groups-in-schools: 2 (y,n) # 8. anti-satellite-test-ban: 2 (y,n) # 9. aid-to-nicaraguan-contras: 2 (y,n) # 10. mx-missile: 2 (y,n) # 11. immigration: 2 (y,n) # 12. synfuels-corporation-cutback: 2 (y,n) # 13. education-spending: 2 (y,n) # 14. superfund-right-to-sue: 2 (y,n) # 15. crime: 2 (y,n) # 16. duty-free-exports: 2 (y,n) # 17. export-administration-act-south-africa: 2 (y,n) # # + id="2Kr3r9DKFJyo" colab_type="code" colab={} # add the column names column_names = ['Class Name', 'handicapped', 'water-project-cost-sharing', 'adoption-of-the-budget-resolution', 'physician-fee-freeze', 'el-salvador-aid', 'religious-groups-in-schools', 'anti-satellite-test-ban', 'aid-to-nicaraguan-contras', 'mx-missile', 'immigration', 'synfuels-corporation-cutback', 'education-spending', 'superfund-right-to-sue', 'crime', 'duty-free-exports', 'export-administration-act-south-africa'] # + id="84MIaYNWGPn4" colab_type="code" colab={} #add the columns names as the header df = pd.read_csv(url, header = None, names = column_names) # + id="nLNLIpKmpxnI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 430} outputId="45d5925b-19fb-4dda-c2a5-9491fed084fe" #check the first 10 rows df.head(10) # + id="vajzz_NxGV3g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8dcfbfba-2c60-4d72-d7af-3fe5236b2238" #dataset size df.shape # + id="EyZ7zwowu40q" colab_type="code" colab={} df # + id="rYVbfUqGGW_T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="d733b6c6-02aa-4663-89a1-9780bcea2b38" #it doesn't show missing values because they are coded with "?" df.isnull().sum() # + id="UhPQGiIvG-M1" colab_type="code" colab={} #code all the "?" with NaN df = pd.read_csv(url, header = None, names = column_names, na_values=["?"], usecols=range(0,17)) # + id="wSdY02GCt-HX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="91f9d9bf-16ab-48ee-e6b5-74bf7e65195f" #how many missing vlaues in each column df.isna().sum() # + id="cek80F5Cuhq_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="22d9a5ec-c813-4a7d-9aa7-c9aaca309544" #checking the column names df.columns # + id="QmBOICxruAz9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="a15a5792-b53c-4b76-9269-06e3a616312e" df.head() # + id="YF_XQiLuuEWp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 244} outputId="49cd5295-3d52-4bd7-d504-534bb3a559e6" df.describe() # + id="kpo4j3xazZhA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 151} outputId="bcbd53f8-d503-49b0-ae72-0adcaff9381c" #since it's categorical data, it will make sense to fill NaN with mode df.mode() # + id="Cew94OO7vGDd" colab_type="code" colab={} #filling NaN with mode cols = ['Class Name', 'handicapped', 'water-project-cost-sharing', 'adoption-of-the-budget-resolution', 'physician-fee-freeze', 'el-salvador-aid', 'religious-groups-in-schools', 'anti-satellite-test-ban', 'aid-to-nicaraguan-contras', 'mx-missile', 'immigration', 'synfuels-corporation-cutback', 'education-spending', 'superfund-right-to-sue', 'crime', 'duty-free-exports', 'export-administration-act-south-africa'] df[cols]=df[cols].fillna(df.mode().iloc[0]) df_cleaned = df # + id="hzXh3pGdv07A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="735ca56c-f503-44ad-9245-7dc263f5f49c" #clean dataset df_cleaned.head() # + id="oNcgc9W42Mj5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="c540c33a-9bc3-4d52-d24d-d0bfc8c77bc2" df_cleaned.isna().sum() # + id="wRjHSWrh2qQz" colab_type="code" colab={} #encoding 'yes' and 'no' as 0, 1 df_cleaned.replace(('y', 'n'), (1, 0), inplace=True) # + id="D5naeDtC7MMx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="9a8d1991-dc58-493d-b15b-c8bcf7d615e4" df_cleaned.head() # + [markdown] id="1wKyIZ42ESXN" colab_type="text" # # + id="dhqG_tx07Pqn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 368} outputId="11b11f1d-f0e2-425e-b307-da79da50d6a7" df_cleaned.describe() # + id="lk6RCv43FY3_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 364} outputId="a1d5f2e8-0988-4731-edb6-124361771a77" df_cleaned.plot.hist() # + id="1SKmBm7VN2Yp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 2040} outputId="2776ba26-88e4-4d48-8d1f-ec8d7e0a4a1e" df_cleaned.iloc[:,range(1,17)] # + id="fkoPhtIiGMQF" colab_type="code" colab={} #Split the data set #democrats data set df_dem = df.loc[df['Class Name'] == 'democrat'] # + id="xUa5A0fKO-L8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="2d4eda95-3e94-4cfb-fefb-d860f2e38a18" df_dem.head() # + id="ilZOlK9FSffV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="449fdffd-f76e-4831-a5d8-9007ef25bae5" df_dem.shape # + id="Dsok5sRZPjqM" colab_type="code" colab={} #Republican Data set df_rep = df.loc[df['Class Name'] == 'republican'] # + id="UlVws1bSSh_J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="789c5a2a-446b-4f31-c97a-9c3300791fba" df_rep.shape # + id="4vw2CHsnPwdb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="9f7e9027-c717-4f58-9cff-5850d5044c1e" df_rep.head() # + id="S3vAwHkhWd8J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 230} outputId="cda8c75a-10c8-4f25-9422-64d3bc868965" groupby_party_dem = df_dem.groupby('Class Name') groupby_party.mean() # + id="92CBoNlKFln7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 156} outputId="c334dd02-8099-4ea1-f7b8-36e82ef333b7" import scipy scipy.stats.ttest_ind(df_dem.iloc[:,range(1,17)],df_rep.iloc[:,range(1,17)]) # + id="gAMi19_ecqav" colab_type="code" colab={} import scipy scipy.stats.ttest_ind(df_dem.iloc[:,range(1,17)],df_rep.iloc[:,range(1,17)]) # + id="myGg1FFgYWKC" colab_type="code" colab={}
DS_141_Statistics_Probability_and_Inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] colab_type="text" id="ciuZBM9czFoT" # # JP Morgan Quant Challenge : ML # ## Team Snick - <NAME> # + colab={} colab_type="code" id="XqupG0i_NXpE" # !pip install -q Cython contextlib2 pillow lxml matplotlib PyDrive # !pip install -q pycocotools # + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="wvLrLfKiNeM2" outputId="e61867fe-d95a-4e6a-c43f-d99e2ec5696b" fileId = '1QWVD3eFkqk1x1l0zfhbVxJzBpY0cHTg' import os from zipfile import ZipFile from shutil import copy from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) fileName = fileId + '.zip' downloaded = drive.CreateFile({'id': fileId}) downloaded.GetContentFile(fileName) ds = ZipFile(fileName) ds.extractall() os.remove(fileName) print('Extracted zip file ' + fileName) # + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="cxGHGBHoNquY" outputId="9cdeb706-a030-40b0-8459-1e4bb1fdd7f5" # !ls # + colab={} colab_type="code" id="60_7mAglNtDW" from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.decomposition import PCA from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import r2_score import sklearn # + colab={} colab_type="code" id="YCc09cTiN3NN" import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline from subprocess import check_output #print(check_output(["ls", "../input"]).decode("utf8")) # - # ## Data Import and Exploration # + colab={} colab_type="code" id="Pu40l-WbN90O" loc0 = (r'/content/housing_train.csv') loc1 = (r'/content/housing_test.csv') train = pd.read_csv(loc0, error_bad_lines=False) test = pd.read_csv(loc1, error_bad_lines=False) # + colab={} colab_type="code" id="m1p-4FaDcgkx" ww = train # + colab={} colab_type="code" id="udRpTlsxcjCF" train =ww # + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="Vzo0LmPfOMKV" outputId="5a4d5647-b8cc-4606-ad7c-cc8b1b17c32f" train.shape # + colab={"base_uri": "https://localhost:8080/", "height": 305} colab_type="code" id="X7y_oIYkOQgV" outputId="56795321-3875-4c91-9934-24c663ecd12e" print(test.shape) train.head() # + colab={} colab_type="code" id="9QmvcKHQc9rA" train.drop('AddressLine2', axis = 1, inplace = True) test.drop('AddressLine2', axis = 1, inplace = True) # + colab={} colab_type="code" id="QCdd66CpdWOp" train.drop('Locality', axis = 1, inplace = True) test.drop('Locality', axis = 1, inplace = True) # + colab={} colab_type="code" id="NZj3W83JexoR" train.drop('TID', axis = 1, inplace = True) test.drop('TID', axis = 1, inplace = True) # + colab={} colab_type="code" id="0cNw_cgrgNo5" train.drop('AddressLine1', axis = 1, inplace = True) test.drop('AddressLine1', axis = 1, inplace = True) # + colab={} colab_type="code" id="hYrroNcWVZQE" # Retaining the year component of the. train['Year'] = train.apply(lambda row: row[0].split("-")[0] , axis = 1) test['Year'] = test.apply(lambda row: row[0].split("-")[0] , axis = 1) train['Month'] = train.apply(lambda row: row[0].split("-")[1] , axis = 1) test['Month'] = test.apply(lambda row: row[0].split("-")[1] , axis = 1) train['Day'] = train.apply(lambda row: row[0].split("-")[2] , axis = 1) test['Day'] = test.apply(lambda row: row[0].split("-")[2] , axis = 1) train.drop('Date', axis = 1, inplace = True) test.drop('Date', axis = 1, inplace = True) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="7zgnk4JvbpuI" outputId="a974004f-84af-4bb2-f551-39a447fcb351" train.head(100) # + colab={} colab_type="code" id="2_by4AB5eSx1" train['Postcode'].fillna(method='ffill', inplace = True) train['Street'].fillna(method='ffill', inplace = True) # + colab={} colab_type="code" id="ovz7fJWKapHf" #train['Street1'] = train.apply(lambda row: str(row[4]).split(" ")[0] , axis = 1) #test['Street1'] = test.apply(lambda row: str(row[4]).split(" ")[0] , axis = 1) #train['Street2'] = train.apply(lambda row: str(row[4]).split(" ")[1] , axis = 1) #test['Street2'] = test.apply(lambda row: str(row[4]).split(" ")[1] , axis = 1) train.drop('Street', axis = 1, inplace = True) test.drop('Street', axis = 1, inplace = True) # + colab={} colab_type="code" id="UwhSrMEreVcw" #train['Postcode1'] = train.apply(lambda row: str(row[6]).split(" ")[0] , axis = 1) #test['Postcode1'] = test.apply(lambda row: str(row[6]).split(" ")[0] , axis = 1) #train['Postcode2'] = train.apply(lambda row: str(row[6]).split(" ")[1] , axis = 1) #test['Postcode2'] = test.apply(lambda row: str(row[6]).split(" ")[1] , axis = 1) # + colab={} colab_type="code" id="WJjnjfNfiRfq" train.drop('Postcode', axis = 1, inplace = True) test.drop('Postcode', axis = 1, inplace = True) # + colab={} colab_type="code" id="rUYfd5VsmeXe" train.drop('Town', axis = 1, inplace = True) test.drop('Town', axis = 1, inplace = True) # + colab={} colab_type="code" id="bnilraYxOUze" w = train # + colab={"base_uri": "https://localhost:8080/", "height": 180} colab_type="code" id="yuFDpglcd53g" outputId="ed82431b-6402-430e-f89a-fd26fce1b6ae" for col in train.columns: print(col,len(train[col].unique())) # + colab={} colab_type="code" id="SomM-R_6OZo0" train['OldvNew'] = train['OldvNew'].replace(['N', 'Y'], [0, 1]) train['Duration'] = train['Duration'].replace(['L' ,'F'], [0, 1]) train['Price Category'] = train['Price Category'].replace(['B', 'A'], [0, 1]) # + colab={"base_uri": "https://localhost:8080/", "height": 191} colab_type="code" id="2AfdD6TJObsx" outputId="0ed26c6a-296d-49e4-86c0-70d23b620da4" test.head(5) # + colab={"base_uri": "https://localhost:8080/", "height": 162} colab_type="code" id="-0Xmgf5COdF5" outputId="4c6731f5-24fb-453e-bd20-34c4032596a5" train.corr() # + colab={"base_uri": "https://localhost:8080/", "height": 598} colab_type="code" id="7ay6D6akOerl" outputId="3b07b52f-0422-4ce1-a377-82beb923f3f8" correlation = train.corr() plt.figure(figsize=(10,10)) sns.heatmap(correlation, vmax=1, square=True,annot=True,cmap='viridis') plt.title('Correlation between different fearures') # + colab={} colab_type="code" id="DuoHwmRAOgE6" #train.drop(['TID','Date', 'AddressLine1', 'AddressLine2','Street','Locality','Town','Taluka','Postcode'], axis = 1, inplace = True) #test.drop(['TID','Date', 'AddressLine1', 'AddressLine2','Street','Locality','Town','Taluka','Postcode'], axis = 1, inplace = True) # + colab={} colab_type="code" id="YKl1BsIjOhuN" # Preparing data to be fed to a predictive model train_Y = train['Price'] train_X = train.drop('Price', axis = 1) test_X = test # + colab={"base_uri": "https://localhost:8080/", "height": 191} colab_type="code" id="WMmU7d_-Olzb" outputId="35965524-9526-40b3-f86d-a15cf5b17942" train_X.head() # + colab={"base_uri": "https://localhost:8080/", "height": 163} colab_type="code" id="sq96B3eEji2v" outputId="6176813d-3421-444b-f105-a26ab6efce74" for col in train_X.columns: print(col,len(train_X[col].unique())) # - # ## Principal component analysis # + colab={"base_uri": "https://localhost:8080/", "height": 830} colab_type="code" id="BzW3y64xOnOy" outputId="c3417bcb-81b1-4151-e66a-831d46588921" pca = PCA(.95) x = pca.fit(pd.get_dummies(train_X)).transform(pd.get_dummies(train_X)) pca.explained_variance_ # + colab={} colab_type="code" id="ZxsvK7a-rKjn" h = x # + colab={} colab_type="code" id="k5W_K8AWxTww" #Due to time constraint I am reducing the components to 16 as they contribute to the most of the variance with minimum dimensions # + colab={"base_uri": "https://localhost:8080/", "height": 830} colab_type="code" id="F4YFWRQRq8Fr" outputId="455cc36a-9e61-41d9-a3b4-90b21455409a" pca2 = PCA(n_components=16, whiten='True') x = pca2.fit(pd.get_dummies(train_X)).transform(pd.get_dummies(train_X)) pca.explained_variance_ # + colab={} colab_type="code" id="PfzqJbLiOouS" from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score # + colab={} colab_type="code" id="YZROyMspOqaV" from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor # - # ## Model Training and Predictions # + colab={} colab_type="code" id="3PLjaJx-Ori4" X_train, X_test, y_train, y_test = train_test_split(x, train_Y, test_size = 0.2, random_state = 42) #CVtrain_X, CVtest_X = pd.get_dummies(CVtrain_X), pd.get_dummies(CVtest_X) # + colab={} colab_type="code" id="3HL9weCvptTp" def evaluate_models(number_of_est, maximum_depth, models, train_X, test_X): '''Function to evaluate the performance of a tree based model (based on R2 score), over a grid of number of estimators and maximum depth. Function takes in choice of model, array of n_estimators, array of max_depth and training and testing sets''' for model_choice in models: for n_est in number_of_est: for max_d in maximum_depth: model = model_choice(n_estimators=n_est, max_depth=max_d, random_state = 2) model.fit(X_train, y_train) CVpred = model.predict(X_test) mae = mean_absolute_error(y_test, CVpred) r2 = r2_score(y_test, CVpred) print(model_choice,',Estimators:',n_est,',Max_Depth:',max_d,',R2:', r2,',MAE:',mae) # + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="seydNZewt8rS" outputId="028ecd93-7d7b-4eb8-c17b-0adf9218a99d" model = LinearRegression() # 2. Use fit model.fit(X_train, y_train) # 3. Check the score model.score(X_test, y_test) # + colab={} colab_type="code" id="d6jlXHU3uhNZ" tttt = model.predict(X_test) # + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="082N0MD6uCwZ" outputId="3222139d-bff3-46bd-da5f-49ea1a9bb39a" X_train.shape # + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="zX9bUvJXuFdQ" outputId="8f82f032-3224-43a0-95df-9d06fbf95325" X_test.shape # + colab={} colab_type="code" id="UXnqm25apwbT" models = [RandomForestRegressor, GradientBoostingRegressor] number_of_est = [20, 30, 40, 50, 60] #number_of_est = [250, 80, 50, 60] #maximum_depth = [8, 10, 15] maximum_depth = [5, 10, 15, 20, 25] evaluate_models(number_of_est, maximum_depth, models, X_train, y_train) # + [markdown] colab_type="text" id="DxlCK9UjybJK" # #### Code for ensembling of the above trained model # + colab={} colab_type="code" id="LJFbDrmKrh_p" #from sklearn. ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier, VotingClassifier #eclf1 = VotingClassifier(estimators=[('lr', ada), ('rf', rf), ('gnb', gb),('svc',svc)], voting='hard') #eclf1 = eclf1.fit(X_train, y_train) #eclf1.score(X_test, y_test)
main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8 # language: python # name: python3 # --- # # Assignment 4 # # Welcome to Assignment 4. This will be the most fun. Now we will prepare data for plotting. # # Just make sure you hit the play button on each cell from top to down. There are three functions you have to implement. Please also make sure than on each change on a function you hit the play button again on the corresponding cell to make it available to the rest of this notebook. # # # This notebook is designed to run in a IBM Watson Studio default runtime (NOT the Watson Studio Apache Spark Runtime as the default runtime with 1 vCPU is free of charge). Therefore, we install Apache Spark in local mode for test purposes only. Please don't use it in production. # # In case you are facing issues, please read the following two documents first: # # https://github.com/IBM/skillsnetwork/wiki/Environment-Setup # # https://github.com/IBM/skillsnetwork/wiki/FAQ # # Then, please feel free to ask: # # https://coursera.org/learn/machine-learning-big-data-apache-spark/discussions/all # # Please make sure to follow the guidelines before asking a question: # # https://github.com/IBM/skillsnetwork/wiki/FAQ#im-feeling-lost-and-confused-please-help-me # # # If running outside Watson Studio, this should work as well. In case you are running in an Apache Spark context outside Watson Studio, please remove the Apache Spark setup in the first notebook cells. # + from IPython.display import Markdown, display def printmd(string): display(Markdown('# <span style="color:red">'+string+'</span>')) if ('sc' in locals() or 'sc' in globals()): printmd('<<<<<!!!!! It seems that you are running in a IBM Watson Studio Apache Spark Notebook. Please run it in an IBM Watson Studio Default Runtime (without Apache Spark) !!!!!>>>>>') # - # !pip install pyspark==2.4.5 # !pip install --upgrade pyspark try: from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession except ImportError as e: printmd('<<<<<!!!!! Please restart your kernel after installing Apache Spark !!!!!>>>>>') # + sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]")) spark = SparkSession \ .builder \ .getOrCreate() # - # Sampling is one of the most important things when it comes to visualization because often the data set gets so huge that you simply # # - can't copy all data to a local Spark driver (Watson Studio is using a "local" Spark driver) # - can't throw all data at the plotting library # # Please implement a function which returns a 10% sample of a given data frame: def getSample(): x=df.sample(False,0.1) return x # Now we want to create a histogram and boxplot. Please ignore the sampling for now and return a python list containing all temperature values from the data set def getListForHistogramAndBoxPlot(df,spark): result = spark.sql("select temperature from washing where temperature is not null") result_array = result.rdd.map(lambda row : row.temperature).sample(False,0.1).collect() if not type(result_array)==list: raise Exception('return type not a list') return result_array # Finally we want to create a run chart. Please return two lists (encapsulated in a python tuple object) containing temperature and timestamp (ts) ordered by timestamp. Please refer to the following link to learn more about tuples in python: https://www.tutorialspoint.com/python/python_tuples.htm #should return a tuple containing the two lists for timestamp and temperature #please make sure you take only 10% of the data by sampling #please also ensure that you sample in a way that the timestamp samples and temperature samples correspond (=> call sample on an object still containing both dimensions) def getListsForRunChart(df,spark): result = spark.sql("select temperature,ts from washing where temperature is not null and ts is not null") result_rdd = result.rdd.map(lambda row : (row.ts,row.temperature)) result_array_ts = result_rdd.map(lambda ts_temperature: ts_temperature[0]).collect() result_array_temperature = result_rdd.map(lambda ts_temperature: ts_temperature[1]).collect() x=result_array_ts,result_array_temperature return x # Now it is time to grab a PARQUET file and create a dataframe out of it. Using SparkSQL you can handle it like a database. # !wget https://github.com/IBM/coursera/blob/master/coursera_ds/washing.parquet?raw=true # !mv washing.parquet?raw=true washing.parquet df = spark.read.parquet('washing.parquet') df.createOrReplaceTempView('washing') df.show() # Now we gonna test the functions you've completed and visualize the data. # %matplotlib inline import matplotlib.pyplot as plt plt.hist(getListForHistogramAndBoxPlot(df,spark)) plt.show() plt.boxplot(getListForHistogramAndBoxPlot(df,spark)) plt.show() lists = getListsForRunChart(df,spark) plt.plot(lists[0],lists[1]) plt.xlabel("time") plt.ylabel("temperature") plt.show() # Congratulations, you are done! The following code submits your solution to the grader. Again, please update your token from the grader's submission page on Coursera # !rm -f rklib.py # !wget https://raw.githubusercontent.com/IBM/coursera/master/rklib.py # + from rklib import submitAll import json key = "S5PNoSHNEeisnA6YLL5C0g" email = "<EMAIL>" token = "<PASSWORD>vNcyCCQ1Gs" # + parts_data = {} parts_data["iLdHs"] = json.dumps(str(type(getListForHistogramAndBoxPlot(df,spark)))) parts_data["xucEM"] = json.dumps(len(getListForHistogramAndBoxPlot(df,spark))) parts_data["IyH7U"] = json.dumps(str(type(getListsForRunChart(df,spark)))) parts_data["MsMHO"] = json.dumps(len(getListsForRunChart(df,spark)[0])) submitAll(email, token, key, parts_data) # -
Data Engineering Foundations1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mihhaila-ntnu-no/overboard_tracker/blob/main/cmemsapi_functions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="01S66WriwXYA" outputId="ff30e023-96b7-40ab-81ea-3ba199d2a7d8" # #! /usr/bin/env python3 # -*- coding: utf-8 -*- """Main module.""" import calendar import datetime as dt import getpass as password import hashlib import logging import math import os import re import shutil import subprocess import sys import time from functools import reduce from importlib import reload from pathlib import Path import requests as rq import fire import lxml.html import pandas as pd import xarray as xr from funcy import omit DEFAULT_CURRENT_PATH = os.getcwd() BOLD = '\033[1m' END = '\033[0m' LOGFILE = Path( DEFAULT_CURRENT_PATH, 'log', ''.join(["CMEMS_API_", dt.datetime.now().strftime('%Y%m%d_%H%M'), ".log"])) try: if not LOGFILE.parent.exists(): LOGFILE.parent.mkdir(parents=True) if os.path.exists(LOGFILE): os.remove(LOGFILE) print(f'[INFO] Logging to: {str(LOGFILE)}') reload(logging) logging.basicConfig(filename=LOGFILE, level=logging.DEBUG, format='[%(asctime)s] - [%(levelname)s] - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') except IOError: print("[ERROR] Failed to set logger.") def set_target_directory(local_storage_directory=None): """ Returns working directory where data is saved. Default value (None) creates a directory (``copernicus-tmp-data``) in the current working directory. Parameters ---------- local_storage_directory : path or str, optional A path object or string. The default is None. Returns ------- target_directory : path A path to the directory where data is saved. """ if local_storage_directory: target_directory = Path(local_storage_directory) else: target_directory = Path(DEFAULT_CURRENT_PATH, 'copernicus-tmp-data') if not target_directory.exists(): target_directory.mkdir(parents=True) print(f'[INFO] Directory successfully created : {target_directory}.') return target_directory def multireplace(tobereplaced, substitute): """ Returns replaced string given string and substitute map. Parameters ---------- tobereplaced : str String to execute replacements on. substitute : dict Substitute dictionary {value to find: value to replace}. Returns ------- str Replaced string. """ substrings = sorted(substitute, key=len, reverse=True) regex = re.compile('|'.join(map(re.escape, substrings))) return regex.sub(lambda match: substitute[match.group(0)], tobereplaced) def query(question, default="yes"): """ Returns answer from a yes/no question, read from user\'s input. Parameters ---------- question : str String written as a question, displayed to user. default : str, optional String value to be presented to user to help . The default is "yes". Raises ------ ValueError Raise error to continue asking question until user inputs one of the valid choice. Returns ------- bool Returns ``True`` if user validates question, ``False`` otherwise. """ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError(f"[ERROR] Invalid default answer: '{default}'") while True: sys.stdout.write(question + prompt) choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("[ACTION] Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") def get_config_constraints(): """ Returns constraints configuration as ``dict`` from which data requests will be stacked. Returns ------- split_dict : TYPE DESCRIPTION. """ c_dict = { 'year': { 'depth': 6000, 'geo': 200 }, 'month': { 'depth': 6000, 'geo': 360 }, 'day': { 'depth': 6000, 'geo': 360 } } split_dict = { 'hourly_r': { 'pattern': [ '-hi', 'hourly', 'hts', 'fc-h', '1-027', '1-032', 'rean-h', '1hr', '3dinst', '_hm', 'BLENDED', '15min', 'MetO-NWS-WAV-RAN', 'skin', 'surface' ], 'year_s': c_dict['year'], 'month_s': c_dict['month'], 'day_s': c_dict['day'] }, 'day_r': { 'pattern': ['daily', 'weekly', 'an-fc-d', 'rean-d', 'day-', '-dm-'], 'year_s': c_dict['year'], 'month_s': c_dict['month'], 'day_s': c_dict['day'] }, 'month_r': { 'pattern': [ 'month', 'an-fc-m', 'rean-m', '-mm-', '-MON-', 'ran-arc-myoceanv2-be', 'CORIOLIS', 'bgc3d' ], 'year_s': c_dict['year'], 'month_s': c_dict['month'] } } return split_dict def get_credentials(file_rc=None, sep='='): """ Returns Copernicus Marine Credentials. Credentials can be specified in a file or if ommitted, manually by user's input. Parameters ---------- file_rc : str or path, optional Location of the file storing credentials. The default is None. sep : str, optional Character used to separate credential and its value. The default is `=`. Raises ------ SystemExit Raise an error to exit program at fatal error (wrong credentials etc). Returns ------- copernicus_username : str Copernicus Marine username. copernicus_password : str <PASSWORD>. """ lines = [] if not file_rc: file_rc = Path.cwd() / 'copernicus_credentials.txt' try: with open(file_rc, 'r') as cred: for line in cred: lines.append(line) except FileNotFoundError: print(f'[INFO] Credentials must be entered hereafter, obtained from: ' f'https://resources.marine.copernicus.eu/?option=com_sla') print( f'[INFO] If you have forgotten either your USERNAME ' f'(which {BOLD}is NOT your email address{END}) or your PASSWORD, ' f'please visit: https://marine.copernicus.eu/faq/forgotten-password/?idpage=169' ) time.sleep(2) usr = password.getpass( prompt=f"[ACTION] Please input your Copernicus {BOLD}USERNAME{END}" " (and hit `Enter` key):") time.sleep(2) pwd = password.getpass( prompt=f"[ACTION] Please input your Copernicus {BOLD}PASSWORD{END}" " (and hit `Enter` key):") lines.append(f'username{sep}{usr}') lines.append(f'password{sep}{pwd}') create_cred_file = query( f'[ACTION] For future usage, do you want to save credentials in a' ' configuration file?', 'yes') if create_cred_file: with open(file_rc, 'w') as cred: for line in lines: cred.write(''.join([line, '\n'])) if not all([sep in item for item in lines]): print('[ERROR] Sperator is not found. Must be specifed or corrected.\n' f'[WARNING] Please double check content of {file_rc}. ' f'It should match (please mind the `{sep}`):' f'\nusername{sep}<USERNAME>\npassword{sep}<PASSWORD>') raise SystemExit copernicus_username = ''.join(lines[0].strip().split(sep)[1:]) copernicus_password = ''.join(lines[1].strip().split(sep)[1:]) if not check_credentials(copernicus_username, copernicus_password): if file_rc.exists(): msg = f' from content of {file_rc}' else: msg = '' print( '[ERROR] Provided username and/or password could not be validated.\n' f'[WARNING] Please double check it{msg}. More help at: ' 'https://marine.copernicus.eu/faq/forgotten-password/?idpage=169') raise SystemExit print('[INFO] Credentials have been succcessfully loaded and verified.') return copernicus_username, copernicus_password def check_credentials(user, pwd): """ Check provided Copernicus Marine Credentials are correct. Parameters ---------- username : str Copernicus Marine Username, provided for free from https://marine.copernicus.eu . password : str Copernicus Marine Password, provided for free from https://marine.copernicus.eu . Returns ------- bool Returns ``True`` if credentials are correst, ``False`` otherwise. """ cmems_cas_url = 'https://cmems-cas.cls.fr/cas/login' conn_session = rq.session() login_session = conn_session.get(cmems_cas_url) login_from_html = lxml.html.fromstring(login_session.text) hidden_elements_from_html = login_from_html.xpath( '//form//input[@type="hidden"]') playload = { he.attrib['name']: he.attrib['value'] for he in hidden_elements_from_html } playload['username'] = user playload['password'] = <PASSWORD> conn_session.post(cmems_cas_url, data=playload) if 'CASTGC' not in conn_session.cookies: return False return True def get_viewscript(): """ Ask the user to input the ``VIEW_SCRIPT`` command. Returns ------- view_myscript : str String representing the ``TEMPLATE COMMAND`` generated by the webportal. Example is available at https://tiny.cc/get-viewscript-from-web """ uni_test = [ 'python -m motuclient --motu http', ' '.join([ '--out-dir <OUTPUT_DIRECTORY> --out-name <OUTPUT_FILENAME>', '--user <USERNAME> --pwd <PASSWORD>' ]) ] while True: view_myscript = input( f"[ACTION] Please paste the template command displayed on the webportal:\n" ) if not all([item in view_myscript for item in uni_test]): print( '[ERROR] Cannot parse VIEWSCRIPT. ' 'Please paste the ``TEMPLATE COMMAND`` as shown in this article: ' 'https://marine.copernicus.eu/faq/' 'how-to-write-and-run-the-script-to-download-' 'cmems-products-through-subset-or-direct-download-mechanisms/?idpage=169' ) else: return view_myscript def viewscript_string_to_dict(viewmy_script): """ Convert the ``VIEW SCRIPT`` string displayed by the webportal to a ``dict``. Parameters ---------- viewmy_script : TYPE DESCRIPTION. Returns ------- vs_dict : TYPE DESCRIPTION. """ vs_dict = dict( [e.strip().partition(" ")[::2] for e in viewmy_script.split('--')]) vs_dict['variable'] = [value for (var, value) in [e.strip().partition(" ")[::2] for e in viewmy_script.split('--')] if var == 'variable'] # pylint: disable=line-too-long vs_dict['abs_geo'] = [ abs(float(vs_dict['longitude-min']) - float(vs_dict['longitude-max'])), abs(float(vs_dict['latitude-min']) - float(vs_dict['latitude-max'])) ] try: vs_dict['abs_depth'] = abs( float(vs_dict['depth-min']) - float(vs_dict['depth-max'])) except KeyError: print(f"[INFO] The {vs_dict['product-id']} is 3D and not 4D:" " it does not contain depth dimension.") if len(vs_dict['date-min']) == 12: dtformat = '%Y-%m-%d' elif len(vs_dict['date-min']) > 12: dtformat = '%Y-%m-%d %H:%M:%S' vs_dict['dt-date-min'] = dt.datetime.strptime(vs_dict['date-min'][1:-1], dtformat) vs_dict['dt-date-max'] = dt.datetime.strptime(vs_dict['date-max'][1:-1], dtformat) if vs_dict['dt-date-max'].day == 1: vs_dict['dt-date-max'] = vs_dict['dt-date-max'] + dt.timedelta(days=1) vs_dict['delta-days'] = vs_dict['dt-date-max'] - vs_dict['dt-date-min'] vs_dict['prefix'] = '_'.join( list((vs_dict['service-id'].split('-')[0]).split('_')[i] for i in [0, -2, -1])) vs_dict['suffix'] = '.nc' if vs_dict['abs_geo'][0] == 0 and vs_dict['abs_geo'][1] == 0: vs_dict['gridpoint'] = 'gridpoint' if '-' in vs_dict['longitude-min']: vs_dict['gridpoint'] = '_'.join([ vs_dict['gridpoint'], vs_dict['longitude-min'].replace(".", "dot").replace("-", "W") ]) else: vs_dict['gridpoint'] = '_'.join([ vs_dict['gridpoint'], ''.join(['E', vs_dict['longitude-min'].replace('.', 'dot')]) ]) if '-' in vs_dict['latitude-min']: vs_dict['gridpoint'] = '_'.join([ vs_dict['gridpoint'], vs_dict['latitude-min'].replace(".", "dot").replace("-", "S") ]) else: vs_dict['gridpoint'] = '_'.join([ vs_dict['gridpoint'], ''.join(['N', vs_dict['latitude-min'].replace('.', 'dot')]) ]) if len(vs_dict['variable']) > 6: vs_dict['out_var_name'] = 'several_vars' else: vs_dict['out_var_name'] = '_'.join(vs_dict['variable']) return vs_dict def get_dates_stack(vs_dict, check_stack, size=None, renew=None): """ Update a ``dict`` containing ``VIEW SCRIPT`` values with dates for sub-requests. Parameters ---------- vs_dict : TYPE DESCRIPTION. check_stack : TYPE DESCRIPTION. size : TYPE, optional DESCRIPTION. The default is None. renew : TYPE, optional DESCRIPTION. The default is None. Returns ------- vs_dict : TYPE DESCRIPTION. """ if not size: cmd = 'cmd' else: cmd = 'size' if not renew: date_in = vs_dict['dt-date-min'] else: date_in = renew if check_stack == 'day': vs_dict[f'{cmd}-date-min'] = dt.datetime(date_in.year, date_in.month, date_in.day, 0) vs_dict[f'{cmd}-date-max'] = dt.datetime(date_in.year, date_in.month, date_in.day, 23, 30) vs_dict['format'] = "%Y%m%d" elif check_stack == 'month': vs_dict[f'{cmd}-date-min'] = dt.datetime(date_in.year, date_in.month, 1, 0) vs_dict[f'{cmd}-date-max'] = dt.datetime( date_in.year, date_in.month, calendar.monthrange(date_in.year, date_in.month)[1], 23, 30) vs_dict['format'] = "%Y%m" elif check_stack == 'year': if date_in.year == vs_dict['dt-date-max'].year: vs_dict[f'{cmd}-date-max'] = dt.datetime( date_in.year, vs_dict['dt-date-max'].month, calendar.monthrange(date_in.year, vs_dict['dt-date-max'].month)[1], 23, 30) else: vs_dict[f'{cmd}-date-max'] = dt.datetime(date_in.year, 12, 31, 23, 30) vs_dict[f'{cmd}-date-min'] = dt.datetime(date_in.year, date_in.month, date_in.day, 0) vs_dict['format'] = "%Y" else: print(f'No matching stack queries found for: {check_stack}') return vs_dict def viewscript_dict_to_string(size=None, strict=None, cmd=None, **kwargs): """ Convert the ``dict`` containing keys and values of the ``VIEW SCRIPT``, into a string as displayed by the webportal. Parameters ---------- size : TYPE, optional DESCRIPTION. The default is None. strict : TYPE, optional DESCRIPTION. The default is None. cmd : TYPE, optional DESCRIPTION. The default is None. **kwargs : TYPE DESCRIPTION. Returns ------- command : TYPE DESCRIPTION. """ if size: feature = 'size' elif strict: feature = 'dt' elif cmd: feature = 'cmd' vs_string = [] if 'python' in kwargs: vs_string.append(f"python {kwargs['python']}") if 'motu' in kwargs: vs_string.append(f"--motu {kwargs['motu']}") if 'service-id' in kwargs: vs_string.append(f"--service-id {kwargs['service-id']}") if 'product-id' in kwargs: vs_string.append(f"--product-id {kwargs['product-id']}") if 'longitude-min' in kwargs: vs_string.append(f"--longitude-min {kwargs['longitude-min']}") if 'longitude-max' in kwargs: vs_string.append(f"--longitude-max {kwargs['longitude-max']}") if 'latitude-min' in kwargs: vs_string.append(f"--latitude-min {kwargs['latitude-min']}") if 'latitude-max' in kwargs: vs_string.append(f"--latitude-max {kwargs['latitude-max']}") if f'{feature}-date-min' in kwargs: vs_string.append(f"--date-min \"{kwargs[f'{feature}-date-min']}\"") if f'{feature}-date-max' in kwargs: vs_string.append(f"--date-max \"{kwargs[f'{feature}-date-max']}\"") if 'depth-min' in kwargs: vs_string.append(f"--depth-min {kwargs['depth-min']}") if 'depth-max' in kwargs: vs_string.append(f"--depth-max {kwargs['depth-max']}") if 'variable' in kwargs: #if type(kwargs['variable']) == list: if isinstance(kwargs['variable'], list): for var in kwargs['variable']: vs_string.append(f"--variable {var}") # re-written due to pylint #3397 #[vs_string.append(f"--variable {var}") for var in kwargs['variable']] else: vs_string.append(f"--variable {kwargs['variable']}") if 'outname' in kwargs: vs_string.append(f"--out-name {kwargs['outname']}") if 'target_directory' in kwargs: vs_string.append(f"--out-dir {kwargs['target_directory']}") command = ' '.join(vs_string) return command def get_data(command=None, user=None, pwd=None, size=None): """ Returns status of binary netCDF file or, if ``size`` is specified, potential result file size, whose units is `kBytes`. Parameters ---------- command : TYPE, optional DESCRIPTION. The default is None. user : TYPE, optional DESCRIPTION. The default is None. pwd : TYPE, optional DESCRIPTION. The default is None. size : TYPE, optional DESCRIPTION. The default is None. Returns ------- returncode : TYPE DESCRIPTION. message : TYPE DESCRIPTION. """ if not user and not pwd: user, pwd = get_credentials() if not command: view_myscript = get_viewscript() command = view_myscript.replace( '--out-dir <OUTPUT_DIRECTORY> --out-name <OUTPUT_FILENAME> ' '--user <USERNAME> --pwd <PASSWORD>', '') msg = '' if size: msg = '--size -o console' get_command = ' '.join([command, msg, '-q -u ', user, ' -p ', pwd]) cmd_rep = get_command.replace(get_command.split(' ')[-1], '****') logging.info("SUBMIT REQUEST: %s", cmd_rep) process = subprocess.Popen(get_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) message, _ = process.communicate() returncode = process.returncode return returncode, message def check_data(returncode, message, command=None, user=None, stack=None, size=None): """ Returns ``True`` if status of the submitted request is successful, ``False`` otherwise. Parameters ---------- returncode : TYPE DESCRIPTION. message : TYPE DESCRIPTION. command : TYPE, optional DESCRIPTION. The default is None. user : TYPE, optional DESCRIPTION. The default is None. stack : TYPE, optional DESCRIPTION. The default is None. size : TYPE, optional DESCRIPTION. The default is None. Raises ------ SystemExit Raise an error to exit program at fatal error due to server maintenance. Returns ------- valid_check : bool DESCRIPTION. """ valid_check = False if returncode == 0: if b'[ERROR]' in message: logging.error("FAILED REQUEST - raised error:\n %s", message) else: if size: if stack: if b'code="005-0"' in message: valid_check = True elif b'code="005-0"' not in message and b'code="005-7"' in message: # Handling exceptions due to changes in MOTU API from v3.10 to v3.12 try: req_size = convert_size_hr( (float(str(message).split('=')[-1].split('"')[1])) * 1000) except ValueError: req_size = convert_size_hr( (float(str(message).split('=')[4].split('"')[1])) * 1000) treshold_size = convert_size_hr(1.0E8 * 1000) if req_size > treshold_size: token = hashlib.md5( (':'.join([command.rstrip(), user])).encode('utf-8')).hexdigest() token_url = 'https://github.com/copernicusmarine/cmemsapi/blob/master/_transactions' # pylint: disable=line-too-long resp = rq.get(f'{token_url}/{token}') if resp.status_code == 200: valid_check = True else: msg = ( '[ERROR] Your datarequest exceeds max limit set to 100 GiB.\n' '[ACTION] Please contact Support Team at:\n' ' https://marine.copernicus.eu/services-portfolio/contact-us/ \n' # pylint: disable=line-too-long f'[ACTION] And submit a query attaching your logile located here:\n' f' {LOGFILE}.\n' '[INFO] Once it is done and by the next 48 hours, ' 'the Support Team will authorize your request ' 'and send an email to the inbox linked to ' f'the Copernicus Marine Account (username = {user}) ' 'for confirmation and instructions.' ) print(msg) logging.error(msg) else: valid_check = True elif b'code="005-0"' in message: valid_check = True else: logging.info('Request status is successful') print( '[INFO] Server is releasing the token to successfully grant next request. ' 'It will resume AUTOMATICALLY.\n') time.sleep(5) valid_check = True else: logging.error("FAILED REQUEST - raised error:\n %s", message) print('[WARNING] Failed data request has been logged.\n') if b'HTTP Error 503' in message: print( 'HTTP Error 503 - Service is temporary down. Break for 5 minutes.' ) time.sleep(300) if b'HTTP Error 4' in message: logging.error('Permanent error. Exiting program.') raise SystemExit return valid_check def process_viewscript(target_directory, view_myscript=None, user=None, pwd=<PASSWORD>, forcestack=None): """ Generates as many data requests as required to match initial ``VIEW_SCRIPT``. Parameters ---------- target_directory : str or path DESCRIPTION. view_myscript : str, optional DESCRIPTION. The default is None. user : str, optional DESCRIPTION. The default is None. pwd : str, optional DESCRIPTION. The default is None. forcestack : bool, optional DESCRIPTION. The default is None. Raises ------ ValueError DESCRIPTION. Returns ------- TYPE On success, returns path of the output file matching the ``VIEW_SCRIPT`` data request, ``False`` otherwise. """ split_dict = get_config_constraints() outname = False if not user and not pwd: user, pwd = get_credentials() if not view_myscript: view_myscript = get_viewscript() else: uni_test = [ 'python -m motuclient --motu http', ' '.join([ '--out-dir <OUTPUT_DIRECTORY> --out-name <OUTPUT_FILENAME>', '--user <USERNAME> --pwd <PASSWORD>' ]) ] if not all([item in view_myscript for item in uni_test]): msg = ( '[DEBUG] Cannot parse VIEWSCRIPT. ' 'Please paste the ``TEMPLATE COMMAND`` as shown in this article: ' 'https://marine.copernicus.eu/faq/' 'how-to-write-and-run-the-script-to-download-' 'cmems-products-through-subset-or-direct-download-mechanisms/?idpage=169' ) raise ValueError(msg) view_script_command = view_myscript.replace( '--out-dir <OUTPUT_DIRECTORY> --out-name <OUTPUT_FILENAME> ' '--user <USERNAME> --pwd <PASSWORD>', '') init_returncode, init_message = get_data(view_script_command, user, pwd, size=True) if not check_data( init_returncode, init_message, view_script_command, user, size=True): return outname vs_dict = viewscript_string_to_dict(view_script_command) vs_dict['target_directory'] = str(target_directory) if not forcestack: for key_r, val_r in split_dict.items(): if any(x in vs_dict['product-id'] for x in val_r.get('pattern', 'Not Found')): for key_s, val_s in omit(split_dict[key_r].items(), 'pattern'): try: check = all([ val_s.get('depth') >= vs_dict['abs_depth'], *([g <= val_s.get('geo') for g in vs_dict['abs_geo']]) ]) except KeyError: check = all([ *([g <= val_s.get('geo') for g in vs_dict['abs_geo']]) ]) if check: check_stack = key_s[:-2] if vs_dict['delta-days'].days < 28: check_stack = 'day' vs_dict = get_dates_stack(vs_dict, check_stack, size=True) command_size = viewscript_dict_to_string(size=True, **vs_dict) returncode, message = get_data(command_size, user, pwd, size=True) if check_data(returncode, message, stack=check_stack, size=True): stack = check_stack break else: stack = forcestack try: msg = (f'[INFO] Data requests will be submitted by ' f'{stack} stacks.') except NameError: stack = 'day' msg = ('[WARNING] No matching temporal resolution. ' f'To be coded using CSW. Stack is set to {stack}.') print(msg) print('\n+------------------------------------+\n| ! - CONNECTION TO CMEMS' 'HUB - OPEN |\n+------------------------------------+\n\n') for retry in range(1, 4): retry_flag = False date_start = vs_dict['dt-date-min'] date_end = vs_dict['dt-date-max'] vs_dict = get_dates_stack(vs_dict, stack) while date_start <= date_end: date_end_format = vs_dict['cmd-date-max'].strftime( vs_dict['format']) try: vs_dict['outname'] = '-'.join([ 'CMEMS', vs_dict['prefix'], vs_dict['gridpoint'], vs_dict['out_var_name'], date_end_format + vs_dict['suffix'] ]) except KeyError: vs_dict['outname'] = '-'.join([ 'CMEMS', vs_dict['prefix'], vs_dict['out_var_name'], date_end_format + vs_dict['suffix'] ]) command = viewscript_dict_to_string(cmd=True, **vs_dict) outname = vs_dict['outname'] print( '\n----------------------------------\n' '- ! - Processing dataset request : ' f"{outname}\n----------------------------------\n") if not Path(target_directory / outname).exists(): print('## MOTU API COMMAND ##') print(command.replace(user, '*****').replace(pwd, '*****')) print( '\n[INFO] New data request has been submitted to Copernicus' 'Marine Servers. ' 'If successful, it will extract the data and create your' ' dataset on the fly. Please wait. \n') returncode, message = get_data(command, user, pwd) if check_data(returncode, message): print('[INFO] The dataset for {} has been stored in {}.'. format(outname, target_directory)) else: retry_flag = True else: print(f"[INFO] The dataset for {outname} " f"has already been downloaded in {target_directory}\n") date_start = vs_dict['cmd-date-max'] + dt.timedelta(days=1) vs_dict = get_dates_stack(vs_dict, stack, renew=date_start) if not retry_flag: break print("+-------------------------------------+\n| ! - CONNECTION TO CMEMS " "HUB - CLOSE |\n+-------------------------------------+\n") with open(LOGFILE) as logfile: if retry == 3 and 'ERROR' in logfile.read(): print("## YOUR ATTENTION IS REQUIRED ##") print(f'Some download requests failed, though {retry} retries. ' f'Please see recommendation in {LOGFILE})') print('TIPS: you can also apply hereafter recommendations.' '\n1. Do not move netCDF files' '\n2. Double check if a change must be done in the ' 'viewscript, FTR it is currently set to:\n') print(view_myscript) print( '\n3. Check there is not an ongoing maintenance by looking ' 'at the User Notification Service and Systems & Products Status:\n', 'https://marine.copernicus.eu/services-portfolio/news-flash/' '\n4. Then, if relevant, do relaunch manually this python ' 'script to automatically download only failed data request(s)' '\n5. Finally, feel free to contact our Support Team either:' '\n - By mail: <EMAIL> or \n - ' 'By using the webform: ' 'https://marine.copernicus.eu/services-portfolio/contact-us/' ' or \n - By leaving a post on the forum:' ' https://forum.marine.copernicus.eu\n\n') outname = False return outname def convert_size_hr(size_in_bytes): """ Get size from bytes and displays to user in human readable. Parameters ---------- size_in_bytes : TYPE DESCRIPTION. Returns ------- TYPE DESCRIPTION. """ if size_in_bytes == 0: return '0 Byte' size_standard = ('B', 'KiB', 'MiB', 'GiB', 'TiB') integer = int(math.floor(math.log(size_in_bytes, 1_024))) powmath = math.pow(1_024, integer) precision = 2 size = round(size_in_bytes / powmath, precision) return size, size_standard[integer] def get_disk_stat(drive=None): """ Get disk size statistics. Parameters ---------- drive : TYPE, optional DESCRIPTION. The default is None. Returns ------- disk_stat : TYPE DESCRIPTION. """ if not drive: drive = '/' disk_stat = list(shutil.disk_usage(drive)) return disk_stat def get_file_size(files): """ Get size of file(s) in bytes. Parameters ---------- files : TYPE DESCRIPTION. Returns ------- mds_size : TYPE DESCRIPTION. """ mds_size = 0 for file in files: with xr.open_dataset(file, decode_cf=False) as sds: mds_size = mds_size + sds.nbytes return mds_size def check_file_size(mds_size, default_nc_size=None): """ Check size of file(s). Parameters ---------- mds_size : TYPE DESCRIPTION. default_nc_size : TYPE, optional DESCRIPTION. The default is None. Returns ------- check_fs : TYPE DESCRIPTION. """ if not default_nc_size: default_nc_size = 16_000_000_000 check_fs = False size, unit = display_disk_stat(mds_size) if mds_size == 0: print(f'[ERROR-NETCDF] There is an error to assess the size of netCDF ' 'file(s). Please check if data are not corrupted.') elif size == 0: print(f'[ERROR] Program exit.') elif mds_size > default_nc_size: print(f'[INFO-NETCDF] The size of the netCDF file would be higher than' ' 16 GiB.') force = query( f'[ACTION-NETCDF] Do you still want to create the netCDF file of ' f'{BOLD}size {size} {unit}{END}?', 'no') if not force: print('[ERROR-NETCDF] Writing to disk action has been aborted by ' 'user due to file size issue.') print('[INFO-NETCDF] The script will try to write several netCDF ' 'files with lower file size.') else: check_fs = True else: check_fs = True return check_fs def display_disk_stat(mds_size): """ Display hard drive statistics to user. Parameters ---------- mds_size : TYPE DESCRIPTION. Returns ------- mds_size_hr : TYPE DESCRIPTION. """ disk_stat = get_disk_stat() free_after = disk_stat[2] - mds_size disk_stat.append(free_after) disk_stat.append(mds_size) try: total_hr, used_hr, free_hr, free_after_hr, mds_size_hr = [ convert_size_hr(item) for item in disk_stat ] except ValueError as error: msg = f"[WARNING] Operation shall be aborted to avoid NO SPACE LEFT ON\ DEVICE error: {error}" mds_size_hr = (0, 'B') else: space = '-' * 37 msg = ''.join( (f"[INFO] {space}\n", f"[INFO] Total Disk Space (before operation) :" f" {total_hr[1]} {total_hr[0]} \n", f"[INFO] Used Disk Space (before operation) :" f" {used_hr[1]} {used_hr[0]} \n", f"[INFO] Free Disk Space (before operation) :" f" {free_hr[1]} {free_hr[0]} \n", f"[INFO] Operation to save dataset to Disk :" f" {mds_size_hr[1]} {mds_size_hr[0]} \n", f"[INFO] Free Disk Space (after operation) :" f" {free_after_hr[1]} {free_after_hr[0]} \n", f"[INFO] {space}")) print(''.join(("[INFO] CHECK DISK STATISTICS\n", msg))) return mds_size_hr def get_file_pattern(outname, sep='-', rem=-1, advanced=True): """ Retrieve a ``file_pattern`` from a filename and advanced regex. Parameters ---------- outname : str Filename from which a pattern must be extracted. sep : str, optional Separator. The default is '-'. rem : TYPE, optional Removal parts. The default is -1. advanced : TYPE, optional Advanced regex. The default is True. Returns ------- file_pattern : str The ``file_pattern`` extracted from ``filename``. """ if 'pathlib' in str(type(outname)): outname = outname.name if advanced: file_pattern = outname.replace(outname.split(sep)[rem], '')[:-1] else: # To be coded pass return file_pattern def get_years(ncfiles, sep='-'): """ Retrieve a list of years from a list of netCDF filenames. Parameters ---------- ncfiles : list List of filenames from which years will be extracted. sep : TYPE, optional Separator. The default is '-'. Returns ------- years : set List of years. """ years = set([str(f).split(sep)[-1][:4] for f in ncfiles]) return years def get_ncfiles(target_directory, file_pattern=None, year=None): """ Retrieve list of files, based on parameters. Parameters ---------- target_directory : str DESCRIPTION. file_pattern : TYPE, optional DESCRIPTION. The default is None. year : TYPE, optional DESCRIPTION. The default is None. Returns ------- ncfiles : list List of strings containing absolute path to files. """ if 'str' in str(type(target_directory)): target_directory = Path(target_directory) if file_pattern and year: ncfiles = list(target_directory.glob(f'{file_pattern}*{year}*.nc')) elif file_pattern and not year: ncfiles = list(target_directory.glob(f'*{file_pattern}*.nc')) elif year and not file_pattern: ncfiles = list(target_directory.glob(f'*{year}*.nc')) else: ncfiles = list(target_directory.glob('*.nc')) return ncfiles def set_outputfile(file_pattern, target_directory, target_out_directory=None, start_year=None, end_year=None): """ Set output filename based on variables. Parameters ---------- file_pattern : TYPE DESCRIPTION. target_directory : TYPE DESCRIPTION. target_out_directory : TYPE, optional DESCRIPTION. The default is None. start_year : TYPE, optional DESCRIPTION. The default is None. end_year : TYPE, optional DESCRIPTION. The default is None. Returns ------- outputfile : TYPE DESCRIPTION. """ if not target_out_directory: target_out_directory = Path(target_directory.parent, 'copernicus-processed-data') elif 'str' in str(type(target_out_directory)): target_out_directory = Path(target_out_directory) if not target_out_directory.exists(): target_out_directory.mkdir(parents=True) if start_year == end_year or not end_year: outputfile = target_out_directory / f'{file_pattern}-{start_year}.nc' else: outputfile = target_out_directory / \ f'{file_pattern}-{start_year}_{end_year}.nc' return outputfile def over_write(outputfile): """ Ask user if overwrite action should be performed. Parameters ---------- outputfile : TYPE DESCRIPTION. Returns ------- ow : TYPE DESCRIPTION. """ ok_overwrite = True if outputfile.exists(): ok_overwrite = query( f'[ACTION] The file {outputfile} already exists. Do you want ' f'{BOLD}to overwrite{END} it?', 'no') return ok_overwrite def del_ncfiles(ncfiles): """ Delete files. Parameters ---------- ncfiles : TYPE DESCRIPTION. Returns ------- bool DESCRIPTION. """ for fnc in ncfiles: try: fnc.unlink() except OSError as error: print(f'[ERROR]: {fnc} : {error.strerror}') print( '[INFO-NETCDF] All inputs netCDF files have been successfully deleted.' ) return True def to_nc4(mds, outputfile): """ Convert file(s) to one single netCDF-4 file, based on computer limits. Parameters ---------- mds : TYPE DESCRIPTION. outputfile : TYPE DESCRIPTION. Returns ------- nc4 : TYPE DESCRIPTION. """ if 'xarray.core.dataset.Dataset' not in str(type(mds)): mds = xr.open_mfdataset(mds, combine='by_coords') if 'str' in str(type(outputfile)): outputfile = Path(outputfile) prepare_encoding = {} for variable in mds.data_vars: prepare_encoding[variable] = mds[variable].encoding prepare_encoding[variable]['zlib'] = True prepare_encoding[variable]['complevel'] = 1 encoding = {} for key_encod, var_encod in prepare_encoding.items(): encoding.update({ key_encod: { key: value for key, value in var_encod.items() if key != 'coordinates' } }) try: mds.to_netcdf(path=outputfile, mode='w', engine='netcdf4', encoding=encoding) except ValueError as error: print( f'[INFO-NETCDF] Convertion initialized but ended in error due to : {error}' ) nc4 = False else: real_file_size = convert_size_hr(outputfile.stat().st_size) space = '-' * 20 msg = ''.join((f"[INFO] {space}\n", f"[INFO-NETCDF] Output file :" f" {str(outputfile)}\n", f"[INFO-NETCDF] File format : netCDF-4\n", f"[INFO-NETCDF] File size : {real_file_size[0]}" f" {real_file_size[1]}\n", f"[INFO] {space}")) print(''.join(("[INFO] CONVERTING TO NETCDF4\n", msg))) nc4 = True return nc4 def to_csv(mds, outputfile): """ Convert file(s) to one single csv file, based on computer limits. Parameters ---------- mds : TYPE DESCRIPTION. outputfile : TYPE DESCRIPTION. Returns ------- csv : TYPE DESCRIPTION. """ if 'xarray.core.dataset.Dataset' not in str(type(mds)): mds = xr.open_mfdataset(mds, combine='by_coords') if 'str' in str(type(outputfile)): outputfile = Path(outputfile) msg2 = 'please contact support at: https://marine.copernicus.eu/services-portfolio/contact-us/' csv = False force = False ms_excel_row_limit = 1_048_576 nb_grid_pts = reduce((lambda x, y: x * y), list([len(mds[c]) for c in mds.coords])) if nb_grid_pts > ms_excel_row_limit: print(f'[INFO-CSV] The total number of rows exceeds MS Excel limit.' f' It is {BOLD}NOT recommended{END} to continue.') force = query( f'[ACTION-CSV] Do you still want to create this CSV file with' f' {BOLD}{nb_grid_pts} rows{END} (though most computers will run out of memory)?', 'no') if nb_grid_pts < ms_excel_row_limit or force: try: dataframe = mds.to_dataframe().reset_index().dropna() outputfile = outputfile.with_suffix('.csv') dataframe.to_csv(outputfile, index=False) except IOError: print(f'[INFO-CSV] Convertion initialized but ended in error.') else: space = '-' * 18 msg = ''.join( (f"[INFO] {space}\n", f"[INFO-CSV] Output file :" f" {str(outputfile)}\n", f"[INFO-CSV] File format : Comma-Separated Values\n", f"[INFO-CSV] Preview Stat:\n {dataframe.describe()}\n", f"[INFO] {space}")) print(''.join(("[INFO] CONVERTING TO CSV\n", msg))) csv = True else: print('[WARNING-CSV] Writing to disk action has been aborted by user ' f'due to very high number of rows ({nb_grid_pts}) exceeding most ' 'computers and softwares limits (such as MS Excel).') print(' '.join( ('[INFO-CSV] A new function is under beta-version to handle ' 'this use case automatically.\n' '[ACTION-CSV] Usage:\n' 'cmemstb to_mfcsv PATH_TO_NETCDF_DIRECTORY PATH_TO_OUTPUT_DIRECTORY\n' '[INFO-CSV] To upvote this feature,', msg2))) try: mds.close() del mds except NameError: print(''.join(('[DEBUG] ', msg2))) return csv def to_mfcsv(input_directory, output_directory, max_depth_level=None): """ Convert netcdf file(s) to multiple csv files, based on MS Excel Limits. Parameters ---------- input_directory : TYPE DESCRIPTION. output_directory : TYPE DESCRIPTION. max_depth_level : TYPE, optional DESCRIPTION. The default is None. Returns ------- mfcsv : TYPE DESCRIPTION. """ mfcsv = False if isinstance(input_directory, xr.Dataset): mds = input_directory else: try: # Either a string glob in the form "path/to/my/files/*.nc" # or an explicit list of files to open. mds = xr.open_mfdataset(input_directory, combine='by_coords') except Exception: input_directory = Path(input_directory) mds = xr.open_mfdataset( [str(item) for item in list(input_directory.glob('*.nc'))], combine='by_coords') if isinstance(output_directory, str): output_directory = Path(output_directory) try: if not output_directory.exists(): output_directory.mkdir(parents=True) print(f'[INFO] Directory successfully created : {output_directory}.') except Exception as exception: print(f"[ERROR] Failed to create directory due to {str(exception)}.") ms_excel_row_limit = 1_048_576 space = '-' * 17 nb_grid_pts = reduce((lambda x, y: x * y), list([len(mds[c]) for c in mds.coords])) if nb_grid_pts > ms_excel_row_limit: print(f"[INFO] The total number of rows for a single CSV file exceeds MS Excel limit.") variable_name = list(mds.data_vars.keys())[0] try: depth = len(mds.depth) if max_depth_level is None: depth = len(mds.depth) elif max_depth_level < 0: print(f"[ERROR] Maximum depth level must be a positive index" f" from 0 to {len(mds.depth)}") return mfcsv elif max_depth_level >= 0: depth = max_depth_level print(f"[INFO] As a consequence, the total number of CSV files " f"to be generated is: {len(mds.time) * (depth + 1)}") for t in range(len(mds.time)): for d in range(len(mds.depth)): if d > depth: break DF = mds.isel(depth=d, time=t).to_dataframe() if not DF[variable_name].dropna().empty: t_format = pd.to_datetime(str(DF['time'].values[0])).strftime("%Y%m%d") v_format = '_'.join([DF[column].name for column in DF if column not in ['lon', 'lat', 'longitude', 'latitude', 'depth', 'time']]) try: gb_format = '_'.join([str(len(mds[lonlat])) for lonlat in mds.coords if lonlat not in ['depth', 'time']]) except Exception as exception: print(f"[ERROR] Failed to set boundingbox: {str(exception)}") output_filename = f'CMEMS-time_{t_format}-depth_{d}-{v_format}.csv' else: output_filename = f'CMEMS-gridbox_{gb_format}-time_{t_format}-depth_{d}-{v_format}.csv' finally: output_fpath = output_directory / output_filename if not output_fpath.exists(): try: DF.dropna().to_csv(output_fpath) except Exception as exception: print(f"[ERROR] Failed to write to disk: {repr(exception)}.") else: msg = ''.join( (f"[INFO] {space}\n", f"[INFO-CSV] Output file :" f" {output_fpath}\n", f"[INFO-CSV] File format : Comma-Separated Values\n", f"[INFO-CSV] Preview Stat:\n {DF.dropna().describe()}\n", f"[INFO] {space}")) print(''.join(("[INFO] CONVERTING TO CSV\n", msg))) else: print(f"[INFO] The CSV file {output_filename} already exists" f" in {output_directory.absolute()}.") except AttributeError: print("[INFO] As a consequence, the total number of CSV files " f"to be generated is: {len(mds.time)}") for t in range(len(mds.time)): DF = mds.isel(time=t).to_dataframe() if not DF[variable_name].dropna().empty: t_format = pd.to_datetime(str(DF['time'].values[0])).strftime("%Y%m%d") v_format = '_'.join([DF[column].name for column in DF if column not in ['lon', 'lat', 'longitude', 'latitude', 'time']]) try: gb_format = '_'.join([str(len(mds[lonlat])) for lonlat in mds.coords if lonlat not in ['depth', 'time']]) except Exception as exception: print(f"[ERROR] Failed to set boundingbox: {str(exception)}") output_filename = f'CMEMS-time_{t_format}-{v_format}.csv' else: output_filename = f'CMEMS-gridbox_{gb_format}-time_{t_format}-{v_format}.csv' finally: output_fpath = output_directory / output_filename if not output_fpath.exists(): try: DF.dropna().to_csv(output_fpath) except Exception as exception: print(f"[ERROR] Failed to write to disk: {repr(exception)}.") else: msg = ''.join( (f"[INFO] {space}\n", f"[INFO-CSV] Output file :" f" {output_fpath}\n", f"[INFO-CSV] File format : Comma-Separated Values\n", f"[INFO-CSV] Preview Stat:\n {DF.dropna().describe()}\n", f"[INFO] {space}")) print(''.join(("[INFO] CONVERTING TO CSV\n", msg))) else: print(f"[INFO] The CSV file {output_filename} already exists" f" in {output_directory.absolute()}.") mfcsv = True return mfcsv def to_nc4_csv(ncfiles, outputfile, skip_csv=False, default_nc_size=None): """ Convert file(s) to both netCDF-4 and csv files, based on computer limits. Parameters ---------- ncfiles : TYPE DESCRIPTION. outputfile : TYPE DESCRIPTION. skip_csv : TYPE, optional DESCRIPTION. The default is False. default_nc_size : TYPE, optional DESCRIPTION. The default is None. Returns ------- nc4 : bool DESCRIPTION. csv : bool DESCRIPTION. check_ow : bool DESCRIPTION. """ nc4 = False csv = False if not default_nc_size: default_nc_size = 16_000_000_000 mds_size = get_file_size(ncfiles) check_fs = check_file_size(mds_size, default_nc_size) check_ow = over_write(outputfile) check_ow_csv = over_write(outputfile.with_suffix('.csv')) if check_ow and check_fs: with xr.open_mfdataset(ncfiles, combine='by_coords') as mds: nc4 = to_nc4(mds, outputfile) elif not check_ow: print('[WARNING-NETCDF] Writing to disk action has been aborted by ' 'user due to already existing file.') elif not check_fs: skip_csv = True if check_ow_csv and not skip_csv: with xr.open_mfdataset(ncfiles, combine='by_coords') as mds: csv = to_csv(mds, outputfile) return nc4, csv, check_ow def post_processing(outname, target_directory, target_out_directory=None, delete_files=True): """ Post-process the data already located on disk. Concatenate a complete timerange in a single netCDF-4 file, or if not possible, stack periods on minimum netCDF-4 files (either by year or by month). There is a possibility to delete old files to save space, thanks to convertion from nc3 to nc4 and to convert to `CSV`, if technically feasible. Parameters ---------- outname : TYPE DESCRIPTION. target_directory : TYPE DESCRIPTION. target_out_directory : TYPE, optional DESCRIPTION. The default is None. delete_files : TYPE, optional DESCRIPTION. The default is True. Raises ------ SystemExit DESCRIPTION. Returns ------- processing : bool DESCRIPTION. See Also -------- get_file_pattern : called from this method get_ncfiles : called from this method get_years : called from this method set_outputfile : called from this method to_nc4_csv : called from this method del_ncfiles : called from this method """ processing = False try: file_pattern = get_file_pattern(outname) except AttributeError: print(f'[ERROR] Program exits due to fatal error. There is no need ' 'to re-run this script if no action has been taken from user side.') raise SystemExit sel_files = get_ncfiles(target_directory, file_pattern) years = get_years(sel_files) try: single_outputfile = set_outputfile(file_pattern, target_directory, target_out_directory, start_year=min(years), end_year=max(years)) except ValueError as error: print( f'[ERROR] Processing failed due to no file matching pattern : {error}' ) else: nc4, csv, ow_choice = to_nc4_csv(sel_files, single_outputfile) if not nc4 and not csv and ow_choice: for year in years: print(year) ncfiles = get_ncfiles(target_directory, file_pattern, year) outfilemerged = set_outputfile(file_pattern, target_directory, target_out_directory, start_year=year) nc4, csv, ow_choice = to_nc4_csv(ncfiles, outfilemerged) if all([delete_files, nc4]): del_ncfiles(sel_files) processing = True return processing def get(local_storage_directory=None, target_out_directory=None, view_myscript=None, user=None, pwd=<PASSWORD>, forcestack=False, delete_files=True): """Download and post-process files to both compressed and tabular formats, if applicable. Download as many subsets of dataset required to fulfill an initial data request based on a template command, called ``VIEW SCRIPT`` generated by Copernicus Marine website (https://marine.copernicus.eu). Then, all files are post-processed locally. e.g to concatenate in a single file, to save space (thanks to nc3 -> nc4), to convert to ``CSV`` (if technically possible), and to delete old files. End-user is guided throughout the process if no parameter is declared. To get started, this function is the main entry point. Parameters ---------- local_storage_directory : TYPE, optional DESCRIPTION. The default is None. target_out_directory : TYPE, optional DESCRIPTION. The default is None. view_myscript : TYPE, optional DESCRIPTION. The default is None. user : TYPE, optional DESCRIPTION. The default is None. pwd : TYPE, optional DESCRIPTION. The default is None. forcestack : TYPE, optional DESCRIPTION. The default is False. delete_files : TYPE, optional DESCRIPTION. The default is True. Returns ------- True. See Also -------- process_viewscript : Method to parse `VIEW SCRIPT` post_processing : Method to convert downloaded data to other format Examples -------- Ex 1. Let the user be guided by the script with interactive questions: >>> cmemstb get Ex 2. Get data matching a ``VIEW SCRIPT`` template command passed as `parameter`: >>> cmemstb get --view_myscript="python -m motuclient --motu https://nrt.cmems-du.eu/motu-web/Motu --service-id GLOBAL_ANALYSIS_FORECAST_PHY_001_024-TDS --product-id global-analysis-forecast-phy-001-024 --longitude-min -20 --longitude-max 45 --latitude-min 25 --latitude-max 72 --date-min \\"2019-08-18 12:00:00\\" --date-max \\"2020-08-31 12:00:00\\" --depth-min 0.493 --depth-max 0.4942 --variable thetao --out-dir <OUTPUT_DIRECTORY> --out-name <OUTPUT_FILENAME> --user <USERNAME> --pwd <PASSWORD>" Notes ----- For Windows Operating System Users and when using the ``--view_myscript`` as parameter, you might want to double check that ``double quote`` around dates are well escaped (see above example). """ target_directory = set_target_directory(local_storage_directory) outname = process_viewscript(target_directory=target_directory, view_myscript=view_myscript, user=user, pwd=<PASSWORD>, forcestack=forcestack) post_processing(outname=outname, target_directory=target_directory, target_out_directory=target_out_directory, delete_files=delete_files) return True def cli(): """ Method to enable Command Line Interface and to expose only useful method for beginners. Returns ------- None. """ fire.Fire({ 'display_disk_stat': display_disk_stat, 'get': get, 'get_credentials': get_credentials, 'get_data': get_data, 'get_file_pattern': get_file_pattern, 'get_ncfiles': get_ncfiles, 'post_processing': post_processing, 'process_viewscript': process_viewscript, 'set_target_directory': set_target_directory, 'to_nc4_csv': to_nc4_csv, 'to_nc4': to_nc4, 'to_csv': to_csv, 'to_mfcsv': to_mfcsv }) # + id="k9RgDOBnx-Ph"
cmemsapi_functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="oFIozaUExVEL" # # Mount the drive & download required package # This notebook was made for Colab usage. If running on local, the next cell can be ommitted. # + # !pip install dgl-cu101 # !pip install scikit-optimize # !pip install boto3 from google.colab import drive drive.mount('/content/drive') import sys sys.path.append('/content/drive/My Drive/Code/') # %cd /content/drive/My\ Drive/Code/ from torch.multiprocessing import Pool, Process, set_start_method try: set_start_method('spawn') except RuntimeError: pass # + [markdown] id="0vYgFk5KxHBJ" # # Use case 1 : Hyperparametrization # - # !python main.py --from_beginning -v --visualization --check_embedding --remove 0.85 --num_epochs 100 --patience 5 --edge_batch_size 1024 --item_id_type 'ITEM IDENTIFIER' --duplicates 'keep_all' # + [markdown] id="BHekS5cQxjGZ" # # Use case 2 : Full training # - # !python main_train.py --fixed_params_path test/fixed_params_example.pkl --params_path test/params_example.pkl --visualization --check_embedding --remove .85 --edge_batch_size 512 # + [markdown] id="vZeLHdtTxjfT" # # Use case 3 : Inference # + [markdown] id="eGkv8ffZ4y26" # ## 3.1 : Specific users, creating the graph # - # !python main_inference.py --params_path test/final_params_example.pkl --user_ids 123456 \ # --user_ids 654321 --user_ids 999 \ # --trained_model_path test/final_model_trained_example.pth --k 10 --remove .99 # + [markdown] id="qlZ-rbWW46Ue" # ## 3.1 : All users, importing the graph # + pycharm={"name": "#%%\n"} # !python main_inference.py --params_path test/final_params_example.pkl \ # --user_ids all --use_saved_graph --graph_path test/final_graph_example.bin --ctm_id_path test/final_ctm_id_example.pkl \ # --pdt_id_path test/final_pdt_id_example.pkl --trained_model_path test/final_model_trained_example.pth \ # --k 10 --remove 0
src/recommendation/UseCases.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="tqrD7Yzlmlsk" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" id="2k8X1C1nmpKv" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="32xflLc4NTx-" # # Custom Federated Algorithms, Part 2: Implementing Federated Averaging # + [markdown] id="jtATV6DlqPs0" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/federated/tutorials/custom_federated_algorithms_2"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/main/docs/tutorials/custom_federated_algorithms_2.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/federated/blob/main/docs/tutorials/custom_federated_algorithms_2.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/federated/docs/tutorials/custom_federated_algorithms_2.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="_igJ2sfaNWS8" # This tutorial is the second part of a two-part series that demonstrates how to # implement custom types of federated algorithms in TFF using the # [Federated Core (FC)](../federated_core.md), which serves as a foundation for # the [Federated Learning (FL)](../federated_learning.md) layer (`tff.learning`). # # We encourage you to first read the # [first part of this series](custom_federated_algorithms_1.ipynb), which # introduce some of the key concepts and programming abstractions used here. # # This second part of the series uses the mechanisms introduced in the first part # to implement a simple version of federated training and evaluation algorithms. # # We encourage you to review the # [image classification](federated_learning_for_image_classification.ipynb) and # [text generation](federated_learning_for_text_generation.ipynb) tutorials for a # higher-level and more gentle introduction to TFF's Federated Learning APIs, as # they will help you put the concepts we describe here in context. # + [markdown] id="cuJuLEh2TfZG" # ## Before we start # # Before we start, try to run the following "Hello World" example to make sure # your environment is correctly setup. If it doesn't work, please refer to the # [Installation](../install.md) guide for instructions. # + id="rB1ovcX1mBxQ" #@test {"skip": true} # !pip install --quiet --upgrade tensorflow-federated # !pip install --quiet --upgrade nest-asyncio import nest_asyncio nest_asyncio.apply() # + id="-skNC6aovM46" import collections import numpy as np import tensorflow as tf import tensorflow_federated as tff # Must use the Python context because it # supports tff.sequence_* intrinsics. executor_factory = tff.framework.local_executor_factory( support_sequence_ops=True) execution_context = tff.framework.ExecutionContext( executor_fn=executor_factory) tff.framework.set_default_context(execution_context) # + id="zzXwGnZamIMM" @tff.federated_computation def hello_world(): return 'Hello, World!' hello_world() # + [markdown] id="iu5Gd8D6W33s" # ## Implementing Federated Averaging # # As in # [Federated Learning for Image Classification](federated_learning_for_image_classification.ipynb), # we are going to use the MNIST example, but since this is intended as a low-level # tutorial, we are going to bypass the Keras API and `tff.simulation`, write raw # model code, and construct a federated data set from scratch. # # + [markdown] id="b6qCjef350c_" # ### Preparing federated data sets # # For the sake of a demonstration, we're going to simulate a scenario in which we # have data from 10 users, and each of the users contributes knowledge how to # recognize a different digit. This is about as # non-[i.i.d.](https://en.wikipedia.org/wiki/Independent_and_identically_distributed_random_variables) # as it gets. # # First, let's load the standard MNIST data: # + id="uThZM4Ds-KDQ" mnist_train, mnist_test = tf.keras.datasets.mnist.load_data() # + id="PkJc5rHA2no_" [(x.dtype, x.shape) for x in mnist_train] # + [markdown] id="mFET4BKJFbkP" # The data comes as Numpy arrays, one with images and another with digit labels, both # with the first dimension going over the individual examples. Let's write a # helper function that formats it in a way compatible with how we feed federated # sequences into TFF computations, i.e., as a list of lists - the outer list # ranging over the users (digits), the inner ones ranging over batches of data in # each client's sequence. As is customary, we will structure each batch as a pair # of tensors named `x` and `y`, each with the leading batch dimension. While at # it, we'll also flatten each image into a 784-element vector and rescale the # pixels in it into the `0..1` range, so that we don't have to clutter the model # logic with data conversions. # + id="XTaTLiq5GNqy" NUM_EXAMPLES_PER_USER = 1000 BATCH_SIZE = 100 def get_data_for_digit(source, digit): output_sequence = [] all_samples = [i for i, d in enumerate(source[1]) if d == digit] for i in range(0, min(len(all_samples), NUM_EXAMPLES_PER_USER), BATCH_SIZE): batch_samples = all_samples[i:i + BATCH_SIZE] output_sequence.append({ 'x': np.array([source[0][i].flatten() / 255.0 for i in batch_samples], dtype=np.float32), 'y': np.array([source[1][i] for i in batch_samples], dtype=np.int32) }) return output_sequence federated_train_data = [get_data_for_digit(mnist_train, d) for d in range(10)] federated_test_data = [get_data_for_digit(mnist_test, d) for d in range(10)] # + [markdown] id="xpNdBimWaMHD" # As a quick sanity check, let's look at the `Y` tensor in the last batch of data # contributed by the fifth client (the one corresponding to the digit `5`). # + id="bTNuL1W4bcuc" federated_train_data[5][-1]['y'] # + [markdown] id="Xgvcwv7Obhat" # Just to be sure, let's also look at the image corresponding to the last element of that batch. # + id="cI4aat1za525" from matplotlib import pyplot as plt plt.imshow(federated_train_data[5][-1]['x'][-1].reshape(28, 28), cmap='gray') plt.grid(False) plt.show() # + [markdown] id="J-ox58PA56f8" # ### On combining TensorFlow and TFF # # In this tutorial, for compactness we immediately decorate functions that # introduce TensorFlow logic with `tff.tf_computation`. However, for more complex # logic, this is not the pattern we recommend. Debugging TensorFlow can already be # a challenge, and debugging TensorFlow after it has been fully serialized and # then re-imported necessarily loses some metadata and limits interactivity, # making debugging even more of a challenge. # # Therefore, **we strongly recommend writing complex TF logic as stand-alone # Python functions** (that is, without `tff.tf_computation` decoration). This way # the TensorFlow logic can be developed and tested using TF best practices and # tools (like eager mode), before serializing the computation for TFF (e.g., by invoking `tff.tf_computation` with a Python function as the argument). # + [markdown] id="RSd6UatXbzw-" # ### Defining a loss function # # Now that we have the data, let's define a loss function that we can use for # training. First, let's define the type of input as a TFF named tuple. Since the # size of data batches may vary, we set the batch dimension to `None` to indicate # that the size of this dimension is unknown. # + id="653xv5NXd4fy" BATCH_SPEC = collections.OrderedDict( x=tf.TensorSpec(shape=[None, 784], dtype=tf.float32), y=tf.TensorSpec(shape=[None], dtype=tf.int32)) BATCH_TYPE = tff.to_type(BATCH_SPEC) str(BATCH_TYPE) # + [markdown] id="pb6qPUvyh5A1" # You may be wondering why we can't just define an ordinary Python type. Recall # the discussion in [part 1](custom_federated_algorithms_1.ipynb), where we # explained that while we can express the logic of TFF computations using Python, # under the hood TFF computations *are not* Python. The symbol `BATCH_TYPE` # defined above represents an abstract TFF type specification. It is important to # distinguish this *abstract* TFF type from concrete Python *representation* # types, e.g., containers such as `dict` or `collections.namedtuple` that may be # used to represent the TFF type in the body of a Python function. Unlike Python, # TFF has a single abstract type constructor `tff.StructType` for tuple-like # containers, with elements that can be individually named or left unnamed. This # type is also used to model formal parameters of computations, as TFF # computations can formally only declare one parameter and one result - you will # see examples of this shortly. # # Let's now define the TFF type of model parameters, again as a TFF named tuple of # *weights* and *bias*. # + id="Og7VViafh-30" MODEL_SPEC = collections.OrderedDict( weights=tf.TensorSpec(shape=[784, 10], dtype=tf.float32), bias=tf.TensorSpec(shape=[10], dtype=tf.float32)) MODEL_TYPE = tff.to_type(MODEL_SPEC) print(MODEL_TYPE) # + [markdown] id="iHhdaWSpfQxo" # With those definitions in place, now we can define the loss for a given model, over a single batch. Note the usage of `@tf.function` decorator inside the `@tff.tf_computation` decorator. This allows us to write TF using Python like semantics even though were inside a `tf.Graph` context created by the `tff.tf_computation` decorator. # + id="4EObiz_Ke0uK" # NOTE: `forward_pass` is defined separately from `batch_loss` so that it can # be later called from within another tf.function. Necessary because a # @tf.function decorated method cannot invoke a @tff.tf_computation. @tf.function def forward_pass(model, batch): predicted_y = tf.nn.softmax( tf.matmul(batch['x'], model['weights']) + model['bias']) return -tf.reduce_mean( tf.reduce_sum( tf.one_hot(batch['y'], 10) * tf.math.log(predicted_y), axis=[1])) @tff.tf_computation(MODEL_TYPE, BATCH_TYPE) def batch_loss(model, batch): return forward_pass(model, batch) # + [markdown] id="8K0UZHGnr8SB" # As expected, computation `batch_loss` returns `float32` loss given the model and # a single data batch. Note how the `MODEL_TYPE` and `BATCH_TYPE` have been lumped # together into a 2-tuple of formal parameters; you can recognize the type of # `batch_loss` as `(<MODEL_TYPE,BATCH_TYPE> -> float32)`. # + id="4WXEAY8Nr89V" str(batch_loss.type_signature) # + [markdown] id="pAnt_UcdnvGa" # As a sanity check, let's construct an initial model filled with zeros and # compute the loss over the batch of data we visualized above. # + id="U8Ne8igan3os" initial_model = collections.OrderedDict( weights=np.zeros([784, 10], dtype=np.float32), bias=np.zeros([10], dtype=np.float32)) sample_batch = federated_train_data[5][-1] batch_loss(initial_model, sample_batch) # + [markdown] id="ckigEAyDAWFV" # Note that we feed the TFF computation with the initial model defined as a # `dict`, even though the body of the Python function that defines it consumes # model parameters as `model['weight']` and `model['bias']`. The arguments of the call # to `batch_loss` aren't simply passed to the body of that function. # # # What happens when we invoke `batch_loss`? # The Python body of `batch_loss` has already been traced and serialized in the above cell where it was defined. TFF acts as the caller to `batch_loss` # at the computation definition time, and as the target of invocation at the time # `batch_loss` is invoked. In both roles, TFF serves as the bridge between TFF's # abstract type system and Python representation types. At the invocation time, # TFF will accept most standard Python container types (`dict`, `list`, `tuple`, # `collections.namedtuple`, etc.) as concrete representations of abstract TFF # tuples. Also, although as noted above, TFF computations formally only accept a # single parameter, you can use the familiar Python call syntax with positional # and/or keyword arguments in case where the type of the parameter is a tuple - it # works as expected. # + [markdown] id="eB510nILYbId" # ### Gradient descent on a single batch # # Now, let's define a computation that uses this loss function to perform a single # step of gradient descent. Note how in defining this function, we use # `batch_loss` as a subcomponent. You can invoke a computation constructed with # `tff.tf_computation` inside the body of another computation, though typically # this is not necessary - as noted above, because serialization looses some # debugging information, it is often preferable for more complex computations to # write and test all the TensorFlow without the `tff.tf_computation` decorator. # + id="O4uaVxw3AyYS" @tff.tf_computation(MODEL_TYPE, BATCH_TYPE, tf.float32) def batch_train(initial_model, batch, learning_rate): # Define a group of model variables and set them to `initial_model`. Must # be defined outside the @tf.function. model_vars = collections.OrderedDict([ (name, tf.Variable(name=name, initial_value=value)) for name, value in initial_model.items() ]) optimizer = tf.keras.optimizers.SGD(learning_rate) @tf.function def _train_on_batch(model_vars, batch): # Perform one step of gradient descent using loss from `batch_loss`. with tf.GradientTape() as tape: loss = forward_pass(model_vars, batch) grads = tape.gradient(loss, model_vars) optimizer.apply_gradients( zip(tf.nest.flatten(grads), tf.nest.flatten(model_vars))) return model_vars return _train_on_batch(model_vars, batch) # + id="Y84gQsaohC38" str(batch_train.type_signature) # + [markdown] id="ID8xg9FCUL2A" # When you invoke a Python function decorated with `tff.tf_computation` within the # body of another such function, the logic of the inner TFF computation is # embedded (essentially, inlined) in the logic of the outer one. As noted above, # if you are writing both computations, it is likely preferable to make the inner # function (`batch_loss` in this case) a regular Python or `tf.function` rather # than a `tff.tf_computation`. However, here we illustrate that calling one # `tff.tf_computation` inside another basically works as expected. This may be # necessary if, for example, you do not have the Python code defining # `batch_loss`, but only its serialized TFF representation. # # Now, let's apply this function a few times to the initial model to see whether # the loss decreases. # + id="8edcJTlXUULm" model = initial_model losses = [] for _ in range(5): model = batch_train(model, sample_batch, 0.1) losses.append(batch_loss(model, sample_batch)) # + id="3n1onojT1zHv" losses # + [markdown] id="EQk4Ha8PU-3P" # ### Gradient descent on a sequence of local data # # Now, since `batch_train` appears to work, let's write a similar training # function `local_train` that consumes the entire sequence of all batches from one # user instead of just a single batch. The new computation will need to now # consume `tff.SequenceType(BATCH_TYPE)` instead of `BATCH_TYPE`. # + id="EfPD5a6QVNXM" LOCAL_DATA_TYPE = tff.SequenceType(BATCH_TYPE) @tff.federated_computation(MODEL_TYPE, tf.float32, LOCAL_DATA_TYPE) def local_train(initial_model, learning_rate, all_batches): @tff.tf_computation(LOCAL_DATA_TYPE, tf.float32) def _insert_learning_rate_to_sequence(dataset, learning_rate): return dataset.map(lambda x: (x, learning_rate)) batches_with_learning_rate = _insert_learning_rate_to_sequence(all_batches, learning_rate) # Mapping function to apply to each batch. @tff.federated_computation(MODEL_TYPE, batches_with_learning_rate.type_signature.element) def batch_fn(model, batch_with_lr): batch, lr = batch_with_lr return batch_train(model, batch, lr) return tff.sequence_reduce(batches_with_learning_rate, initial_model, batch_fn) # + id="sAhkS5yKUgjC" str(local_train.type_signature) # + [markdown] id="EYT-SiopYBtH" # There are quite a few details buried in this short section of code, let's go # over them one by one. # # First, while we could have implemented this logic entirely in TensorFlow, # relying on `tf.data.Dataset.reduce` to process the sequence similarly to how # we've done it earlier, we've opted this time to express the logic in the glue # language, as a `tff.federated_computation`. We've used the federated operator # `tff.sequence_reduce` to perform the reduction. # # The operator `tff.sequence_reduce` is used similarly to # `tf.data.Dataset.reduce`. You can think of it as essentially the same as # `tf.data.Dataset.reduce`, but for use inside federated computations, which as # you may remember, cannot contain TensorFlow code. It is a template operator with # a formal parameter 3-tuple that consists of a *sequence* of `T`-typed elements, # the initial state of the reduction (we'll refer to it abstractly as *zero*) of # some type `U`, and the *reduction operator* of type `(<U,T> -> U)` that alters the # state of the reduction by processing a single element. The result is the final # state of the reduction, after processing all elements in a sequential order. In # our example, the state of the reduction is the model trained on a prefix of the # data, and the elements are data batches. # # Second, note that we have again used one computation (`batch_train`) as a # component within another (`local_train`), but not directly. We can't use it as a # reduction operator because it takes an additional parameter - the learning rate. # To resolve this, we define an embedded federated computation `batch_fn` that # binds to the `local_train`'s parameter `learning_rate` in its body. It is # allowed for a child computation defined this way to capture a formal parameter # of its parent as long as the child computation is not invoked outside the body # of its parent. You can think of this pattern as an equivalent of # `functools.partial` in Python. # # The practical implication of capturing `learning_rate` this way is, of course, # that the same learning rate value is used across all batches. # # Now, let's try the newly defined local training function on the entire sequence # of data from the same user who contributed the sample batch (digit `5`). # + id="EnWFLoZGcSby" locally_trained_model = local_train(initial_model, 0.1, federated_train_data[5]) # + [markdown] id="y0UXUqGk9zoF" # Did it work? To answer this question, we need to implement evaluation. # + [markdown] id="a8WDKu6WYy__" # ### Local evaluation # # Here's one way to implement local evaluation by adding up the losses across all data # batches (we could have just as well computed the average; we'll leave it as an # exercise for the reader). # + id="0RiODuc6z7Ln" @tff.federated_computation(MODEL_TYPE, LOCAL_DATA_TYPE) def local_eval(model, all_batches): @tff.tf_computation(MODEL_TYPE, LOCAL_DATA_TYPE) def _insert_model_to_sequence(model, dataset): return dataset.map(lambda x: (model, x)) model_plus_data = _insert_model_to_sequence(model, all_batches) @tff.tf_computation(tf.float32, batch_loss.type_signature.result) def tff_add(accumulator, arg): return accumulator + arg return tff.sequence_reduce( tff.sequence_map( batch_loss, model_plus_data), 0., tff_add) # + id="pH2XPEAKa4Dg" str(local_eval.type_signature) # + [markdown] id="efX81HuE-BcO" # Again, there are a few new elements illustrated by this code, let's go over them # one by one. # # First, we have used two new federated operators for processing sequences: # `tff.sequence_map` that takes a *mapping function* `T->U` and a *sequence* of # `T`, and emits a sequence of `U` obtained by applying the mapping function # pointwise, and `tff.sequence_sum` that just adds all the elements. Here, we map # each data batch to a loss value, and then add the resulting loss values to # compute the total loss. # # Note that we could have again used `tff.sequence_reduce`, but this wouldn't be # the best choice - the reduction process is, by definition, sequential, whereas # the mapping and sum can be computed in parallel. When given a choice, it's best # to stick with operators that don't constrain implementation choices, so that # when our TFF computation is compiled in the future to be deployed to a specific # environment, one can take full advantage of all potential opportunities for a # faster, more scalable, more resource-efficient execution. # # Second, note that just as in `local_train`, the component function we need # (`batch_loss`) takes more parameters than what the federated operator # (`tff.sequence_map`) expects, so we again define a partial, this time inline by # directly wrapping a `lambda` as a `tff.federated_computation`. Using wrappers # inline with a function as an argument is the recommended way to use # `tff.tf_computation` to embed TensorFlow logic in TFF. # # Now, let's see whether our training worked. # + id="vPw6JSVf5q_x" print('initial_model loss =', local_eval(initial_model, federated_train_data[5])) print('locally_trained_model loss =', local_eval(locally_trained_model, federated_train_data[5])) # + [markdown] id="6Tvu70cnBsUf" # Indeed, the loss decreased. But what happens if we evaluated it on another # user's data? # + id="gjF0NYAj5wls" print('initial_model loss =', local_eval(initial_model, federated_train_data[0])) print('locally_trained_model loss =', local_eval(locally_trained_model, federated_train_data[0])) # + [markdown] id="7WPumnRTBzUs" # As expected, things got worse. The model was trained to recognize `5`, and has # never seen a `0`. This brings the question - how did the local training impact # the quality of the model from the global perspective? # + [markdown] id="QJnL2mQRZKTO" # ### Federated evaluation # # This is the point in our journey where we finally circle back to federated types # and federated computations - the topic that we started with. Here's a pair of # TFF types definitions for the model that originates at the server, and the data # that remains on the clients. # + id="LjGGhpoEBh_6" SERVER_MODEL_TYPE = tff.type_at_server(MODEL_TYPE) CLIENT_DATA_TYPE = tff.type_at_clients(LOCAL_DATA_TYPE) # + [markdown] id="4gTXV2-jZtE3" # With all the definitions introduced so far, expressing federated evaluation in # TFF is a one-liner - we distribute the model to clients, let each client invoke # local evaluation on its local portion of data, and then average out the loss. # Here's one way to write this. # + id="2zChEPzEBx4T" @tff.federated_computation(SERVER_MODEL_TYPE, CLIENT_DATA_TYPE) def federated_eval(model, data): return tff.federated_mean( tff.federated_map(local_eval, [tff.federated_broadcast(model), data])) # + [markdown] id="IWcNONNWaE0N" # We've already seen examples of `tff.federated_mean` and `tff.federated_map` # in simpler scenarios, and at the intuitive level, they work as expected, but # there's more in this section of code than meets the eye, so let's go over it # carefully. # # First, let's break down the *let each client invoke local evaluation on its # local portion of data* part. As you may recall from the preceding sections, # `local_eval` has a type signature of the form `(<MODEL_TYPE, LOCAL_DATA_TYPE> -> # float32)`. # # The federated operator `tff.federated_map` is a template that accepts as a # parameter a 2-tuple that consists of the *mapping function* of some type `T->U` # and a federated value of type `{T}@CLIENTS` (i.e., with member constituents of # the same type as the parameter of the mapping function), and returns a result of # type `{U}@CLIENTS`. # # Since we're feeding `local_eval` as a mapping function to apply on a per-client # basis, the second argument should be of a federated type `{<MODEL_TYPE, # LOCAL_DATA_TYPE>}@CLIENTS`, i.e., in the nomenclature of the preceding sections, # it should be a federated tuple. Each client should hold a full set of arguments # for `local_eval` as a member consituent. Instead, we're feeding it a 2-element # Python `list`. What's happening here? # # Indeed, this is an example of an *implicit type cast* in TFF, similar to # implicit type casts you may have encountered elsewhere, e.g., when you feed an # `int` to a function that accepts a `float`. Implicit casting is used scarcily at # this point, but we plan to make it more pervasive in TFF as a way to minimize # boilerplate. # # The implicit cast that's applied in this case is the equivalence between # federated tuples of the form `{<X,Y>}@Z`, and tuples of federated values # `<{X}@Z,{Y}@Z>`. While formally, these two are different type signatures, # looking at it from the programmers's perspective, each device in `Z` holds two # units of data `X` and `Y`. What happens here is not unlike `zip` in Python, and # indeed, we offer an operator `tff.federated_zip` that allows you to perform such # conversions explicity. When the `tff.federated_map` encounters a tuple as a # second argument, it simply invokes `tff.federated_zip` for you. # # Given the above, you should now be able to recognize the expression # `tff.federated_broadcast(model)` as representing a value of TFF type # `{MODEL_TYPE}@CLIENTS`, and `data` as a value of TFF type # `{LOCAL_DATA_TYPE}@CLIENTS` (or simply `CLIENT_DATA_TYPE`), the two getting # filtered together through an implicit `tff.federated_zip` to form the second # argument to `tff.federated_map`. # # The operator `tff.federated_broadcast`, as you'd expect, simply transfers data # from the server to the clients. # # Now, let's see how our local training affected the average loss in the system. # + id="tbmtJItcn94j" print('initial_model loss =', federated_eval(initial_model, federated_train_data)) print('locally_trained_model loss =', federated_eval(locally_trained_model, federated_train_data)) # + [markdown] id="LQi2rGX_fK7i" # Indeed, as expected, the loss has increased. In order to improve the model for # all users, we'll need to train in on everyone's data. # + [markdown] id="vkw9f59qfS7o" # ### Federated training # # The simplest way to implement federated training is to locally train, and then # average the models. This uses the same building blocks and patters we've already # discussed, as you can see below. # + id="mBOC4uoG6dd-" SERVER_FLOAT_TYPE = tff.type_at_server(tf.float32) @tff.federated_computation(SERVER_MODEL_TYPE, SERVER_FLOAT_TYPE, CLIENT_DATA_TYPE) def federated_train(model, learning_rate, data): return tff.federated_mean( tff.federated_map(local_train, [ tff.federated_broadcast(model), tff.federated_broadcast(learning_rate), data ])) # + [markdown] id="z2vACMsQjzO1" # Note that in the full-featured implementation of Federated Averaging provided by # `tff.learning`, rather than averaging the models, we prefer to average model # deltas, for a number of reasons, e.g., the ability to clip the update norms, # for compression, etc. # # Let's see whether the training works by running a few rounds of training and # comparing the average loss before and after. # + id="NLx-3rLs9jGY" model = initial_model learning_rate = 0.1 for round_num in range(5): model = federated_train(model, learning_rate, federated_train_data) learning_rate = learning_rate * 0.9 loss = federated_eval(model, federated_train_data) print('round {}, loss={}'.format(round_num, loss)) # + [markdown] id="Z0VjSLQzlUIp" # For completeness, let's now also run on the test data to confirm that our model # generalizes well. # + id="ZaZT45yFMOaM" print('initial_model test loss =', federated_eval(initial_model, federated_test_data)) print('trained_model test loss =', federated_eval(model, federated_test_data)) # + [markdown] id="pxlHHwLGlgFB" # This concludes our tutorial. # # Of course, our simplified example doesn't reflect a number of things you'd need # to do in a more realistic scenario - for example, we haven't computed metrics # other than loss. We encourage you to study # [the implementation](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/federated_averaging.py) # of federated averaging in `tff.learning` as a more complete example, and as a # way to demonstrate some of the coding practices we'd like to encourage.
site/en-snapshot/federated/tutorials/custom_federated_algorithms_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from oda_api.api import DispatcherAPI from oda_api.plot_tools import OdaImage,OdaLightCurve from oda_api.data_products import BinaryData import os from astropy.io import fits import numpy as np from numpy import sqrt import matplotlib.pyplot as plt # %matplotlib inline # + tags=["parameters"] source_name='Crab' ra=83.633080 dec=22.014500 radius=5. Tstart='2003-03-15T23:27:40.0' Tstop='2018-03-16T00:03:15.0' T_format='isot' time_bin=10000 time_bin_format='sec' E1_keV=30. E2_keV=100. E1_keV_1=100. E2_keV_1=300. instrument='isgri' product='isgri_lc' osa_version='OSA10.2' osa_version11='OSA11.0' host='www.astro.unige.ch/cdci/astrooda/dispatch-data' # - try: input = raw_input except NameError: pass token=input() # token for restricted access server cookies=dict(_oauth2_proxy=token) disp=DispatcherAPI(host=host) # + T1_utc='2003-03-15T00:00:00.0' T2_utc='2004-03-15T00:00:00.0' data=disp.get_product(instrument='isgri', product='isgri_image', T1=T1_utc, T2=T2_utc, E1_keV=E1_keV, E2_keV=E2_keV, osa_version='OSA10.2', RA=ra, DEC=dec, detection_threshold=10, product_type='Real') # - data.dispatcher_catalog_1.table # + FLAG=0 torm=[] for ID,n in enumerate(data.dispatcher_catalog_1.table['src_names']): if(n[0:3]=='NEW'): torm.append(ID) if(n==source_name): FLAG=1 data.dispatcher_catalog_1.table.remove_rows(torm) data.dispatcher_catalog_1.table.remove_rows(1) nrows=len(data.dispatcher_catalog_1.table['src_names']) # - api_cat=data.dispatcher_catalog_1.get_api_dictionary() # + import numpy as np for i in range(2003,2016): T1_utc=str(i)+'-03-15T00:00:00.0' T2_utc=str(i+1)+'-03-15T00:00:00.0' print(T1_utc,'-',T2_utc) #par_dict={'catalog_selected_objects': u'1,2,3,4', # 'E1_keV': E1_keV, 'E2_keV': E2_keV, # 'osa_version': osa_version, 'T_format': T_format, 'selected_catalog': catalog , # 'T2': T2_utc, 'time_bin': time_bin, 'T1': T1_utc, 'product': product, 'instrument': instrument, # 'radius': radius, 'RA': ra, 'src_name': src_name, 'DEC': dec, 'time_bin_format': time_bin_format} #data=disp.get_product(**par_dict) data=disp.get_product(instrument='isgri', product='isgri_lc', T1=T1_utc, T2=T2_utc, T_format=T_format, E1_keV=E1_keV, E2_keV=E2_keV, query_type='Real', osa_version='OSA10.2', RA=ra, DEC=dec, product_type='Real', time_bin=time_bin, selected_catalog=api_cat) crab_lc=data._p_list[0] lc=crab_lc.data_unit[1].data if i==2003: t_30_100=lc['TIME'] r_30_100=lc['RATE'] err_30_100=lc['ERROR'] else: t_30_100=np.concatenate((t_30_100,lc['TIME'])) r_30_100=np.concatenate((r_30_100,lc['RATE'])) err_30_100=np.concatenate((err_30_100,lc['ERROR'])) # + for i in range(2003,2016): T1_utc=str(i)+'-03-15T00:00:00.0' T2_utc=str(i+1)+'-03-15T00:00:00.0' print(T1_utc,'-',T2_utc) data=disp.get_product(instrument='isgri', product='isgri_lc', T1=T1_utc, T2=T2_utc, T_format=T_format, E1_keV=E1_keV_1, E2_keV=E2_keV_1, query_type='Real', osa_version='OSA10.2', RA=ra, DEC=dec, product_type='Real', time_bin=time_bin, selected_catalog=api_cat) crab_lc=data._p_list[0] lc=crab_lc.data_unit[1].data if i==2003: t_100_300=lc['TIME'] r_100_300=lc['RATE'] err_100_300=lc['ERROR'] else: t_100_300=np.concatenate((t_100_300,lc['TIME'])) r_100_300=np.concatenate((r_100_300,lc['RATE'])) err_100_300=np.concatenate((err_100_300,lc['ERROR'])) # + for i in range(2016,2018): T1_utc=str(i)+'-03-15T00:00:00.0' T2_utc=str(i+1)+'-03-15T00:00:00.0' print(T1_utc,'-',T2_utc) #par_dict={'catalog_selected_objects': u'1,2,3,4', 'E1_keV': E1_keV_1, 'E2_keV': E2_keV_1, 'osa_version': osa_version11, 'T_format': T_format, 'selected_catalog': catalog , 'T2': T2_utc, 'time_bin': time_bin, 'T1': T1_utc, 'product': product, 'instrument': instrument, 'radius': radius, 'RA': ra, 'src_name': src_name, 'DEC': dec, 'time_bin_format': time_bin_format} #data=disp.get_product(**par_dict) data=disp.get_product(instrument='isgri', product='isgri_lc', T1=T1_utc, T2=T2_utc, T_format=T_format, E1_keV=E1_keV_1, E2_keV=E2_keV_1, query_type='Real', osa_version='OSA11.0', RA=ra, DEC=dec, product_type='Real', time_bin=time_bin, selected_catalog=api_cat) crab_lc=data._p_list[0] lc=crab_lc.data_unit[1].data if i==2016: t_100_300_osa11=lc['TIME'] r_100_300_osa11=lc['RATE'] err_100_300_osa11=lc['ERROR'] else: t_100_300_osa11=np.concatenate((t_100_300_osa11,lc['TIME'])) r_100_300_osa11=np.concatenate((r_100_300_osa11,lc['RATE'])) err_100_300_osa11=np.concatenate((err_100_300_osa11,lc['ERROR'])) # + for i in range(2016,2018): T1_utc=str(i)+'-03-15T00:00:00.0' T2_utc=str(i+1)+'-03-15T00:00:00.0' print(T1_utc,'-',T2_utc) data=disp.get_product(instrument='isgri', product='isgri_lc', T1=T1_utc, T2=T2_utc, T_format=T_format, E1_keV=E1_keV, E2_keV=E2_keV, query_type='Real', osa_version='OSA11.0', RA=ra, DEC=dec, product_type='Real', time_bin=time_bin, selected_catalog=api_cat) crab_lc=data._p_list[0] lc=crab_lc.data_unit[1].data if i==2016: t_30_100_osa11=lc['TIME'] r_30_100_osa11=lc['RATE'] err_30_100_osa11=lc['ERROR'] else: t_30_100_osa11=np.concatenate((t_30_100_osa11,lc['TIME'])) r_30_100_osa11=np.concatenate((r_30_100_osa11,lc['RATE'])) err_30_100_osa11=np.concatenate((err_30_100_osa11,lc['ERROR'])) # + from math import sqrt,exp Nbins=30 time=np.linspace(52800,58300,Nbins) flux_bat=np.zeros(Nbins) error_bat=np.zeros(Nbins) dtime=time[1]-time[0] time_av=time-dtime/2. j=0 d=np.genfromtxt('Crab_SWIFT_BAT.dat') tt=d[:,0] r=d[:,1] rerr=d[:,2] k=0 while (j<len(time)): while ((tt[k]<time[j])): flux_bat[j]=flux_bat[j]+r[k]/(rerr[k])**2 error_bat[j]=error_bat[j]+1./(rerr[k])**2 k=k+1 if (k==len(tt)): break if (k==len(tt)): break else: j=j+1 for i in range(len(flux_bat)): if(error_bat[i]>0.): flux_bat[i]=flux_bat[i]/(error_bat[i]) error_bat[i]=1./sqrt(error_bat[i]) sc_bat=max(flux_bat) # + time=np.linspace(52800,58300,Nbins) flux25=np.zeros(Nbins) error25=np.zeros(Nbins) flux50=np.zeros(Nbins) error50=np.zeros(Nbins) flux100=np.zeros(Nbins) error100=np.zeros(Nbins) dtime=time[1]-time[0] time_av=time-dtime/2. j=0 d=np.genfromtxt('CRAB_GBM_occ_fluxhist_auto.txt') t1=d[:,0] t2=d[:,1] f25=d[:,4] ferr25=d[:,5] f50=d[:,6] ferr50=d[:,7] f100=d[:,8] ferr100=d[:,9] tt=(t1+t2)/2. k=0 while (j<len(time)): while ((tt[k]<time[j])): flux25[j]=flux25[j]+f25[k]/(ferr25[k])**2 error25[j]=error25[j]+1./(ferr25[k])**2 flux50[j]=flux50[j]+f50[k]/(ferr50[k])**2 error50[j]=error50[j]+1./(ferr50[k])**2 flux100[j]=flux100[j]+f100[k]/(ferr100[k])**2 error100[j]=error100[j]+1./(ferr100[k])**2 k=k+1 if (k==len(tt)): break if (k==len(tt)): break else: j=j+1 for i in range(len(flux25)): if(error25[i]>0.): flux25[i]=flux25[i]/(error25[i]) error25[i]=1./sqrt(error25[i]) if(error50[i]>0.): flux50[i]=flux50[i]/(error50[i]) error50[i]=1./sqrt(error50[i]) if(error100[i]>0.): flux100[i]=flux100[i]/(error100[i]) error100[i]=1./sqrt(error100[i]) sc_gbm25=max(flux25) sc_gbm50=max(flux50) sc_gbm100=max(flux100) # + t0=51544 from numpy import exp flux=np.zeros(Nbins) error=np.zeros(Nbins) dtime=time[1]-time[0] time_av=time-dtime/2. fig = plt.figure(figsize=(10,7)) ax1 = fig.add_subplot(211) ax2 = fig.add_subplot(212,sharex=ax1, sharey=ax1) #ax3 = fig.add_subplot(313,sharex=ax1, sharey=ax1) fig.subplots_adjust(hspace=0) j=0 k=0 while (j<len(time)): while ((t_30_100[k]+t0<time[j])): flux[j]=flux[j]+r_30_100[k]/(err_30_100[k])**2 error[j]=error[j]+1./(err_30_100[k])**2 k=k+1 if (k==len(t_30_100)): break if (k==len(t_30_100)): break else: j=j+1 for i in range(len(flux)): if(error[i]>0.): flux[i]=flux[i]/(error[i]) error[i]=1./sqrt(error[i]) sc=max(flux) ax1.errorbar(t_30_100+t0,r_30_100/sc,yerr=err_30_100/sc,linestyle='none',color='black',alpha=0.2,linewidth=2) ax1.errorbar(time_av[:-4],flux[:-4]/sc,yerr=error[:-4]/sc,xerr=dtime/2.,linestyle='none',color='blue',linewidth=4,alpha=0.5) flux=np.zeros(Nbins) error=np.zeros(Nbins) j=0 k=0 while (j<len(time)): while ((t_30_100_osa11[k]+t0<time[j])): flux[j]=flux[j]+r_30_100_osa11[k]/(err_30_100_osa11[k])**2 error[j]=error[j]+1./(err_30_100_osa11[k])**2 k=k+1 if (k==len(t_30_100_osa11)): break if (k==len(t_30_100_osa11)): break else: j=j+1 for i in range(len(flux)): if(error[i]>0.): flux[i]=flux[i]/(error[i]) error[i]=1./sqrt(error[i]) sc=max(flux) ax1.errorbar(t_30_100_osa11+t0,r_30_100_osa11/sc,yerr=err_30_100_osa11/sc,linestyle='none',color='black',alpha=0.2,linewidth=2) ax1.errorbar(time_av,flux/sc,yerr=error/sc,xerr=dtime/2.,linestyle='none',color='blue',linewidth=8,alpha=0.5) j=0 k=0 flux=np.zeros(Nbins) error=np.zeros(Nbins) while (j<len(time)): while ((t_100_300[k]+t0<time[j])): flux[j]=flux[j]+r_100_300[k]/(err_100_300[k])**2 error[j]=error[j]+1./(err_100_300[k])**2 k=k+1 if (k==len(t_100_300)): break if (k==len(t_100_300)): break else: j=j+1 for i in range(len(flux)): if(error[i]>0.): flux[i]=flux[i]/(error[i]) error[i]=1./sqrt(error[i]) sc=max(flux) ax2.errorbar(t_100_300+t0,r_100_300/sc,yerr=err_100_300/sc,linestyle='none',color='black',alpha=0.2,linewidth=2) ax2.errorbar(time_av[:-4],flux[:-4]/sc,yerr=error[:-4]/sc,xerr=dtime/2.,linestyle='none',color='blue',linewidth=4,alpha=0.5) j=0 k=0 flux=np.zeros(Nbins) error=np.zeros(Nbins) while (j<len(time)): while ((t_100_300_osa11[k]+t0<time[j])): flux[j]=flux[j]+r_100_300_osa11[k]/(err_100_300_osa11[k])**2 error[j]=error[j]+1./(err_100_300_osa11[k])**2 k=k+1 if (k==len(t_100_300_osa11)): break if (k==len(t_100_300_osa11)): break else: j=j+1 for i in range(len(flux)): if(error[i]>0.): flux[i]=flux[i]/(error[i]) error[i]=1./sqrt(error[i]) sc=max(flux) ax2.errorbar(t_100_300_osa11+t0,r_100_300_osa11/sc,yerr=err_100_300_osa11/sc,linestyle='none',color='black',alpha=0.2,linewidth=2) ax2.errorbar(time_av,flux/sc,yerr=error/sc,xerr=dtime/2.,linestyle='none',color='blue',linewidth=8,alpha=0.5) ax1.errorbar(time_av,1.*flux25/sc_gbm25,yerr=error25/sc_gbm25,xerr=dtime/2.,linestyle='none',color='green',alpha=0.5,linewidth=4,label='Fermi/GBM 25-50 keV') ax1.errorbar(time_av,1.*flux_bat/sc_bat,yerr=error_bat/sc_bat,xerr=dtime/2.,linestyle='none',color='magenta',alpha=0.5,linewidth=4,label='SWIFT/BAT') ax2.errorbar(time_av,flux100/sc_gbm100,yerr=error100/sc_gbm100,xerr=dtime/2.,linestyle='none',color='green',alpha=0.5,linewidth=4,label='Fermi/GBM 100-300 keV') plt.ylim(0.55,1.25) plt.xlim(52250,58500) ax1.set_ylim(0.55,1.25) ax1.tick_params(axis='both', which='major', labelsize=16) ax2.tick_params(axis='both', which='major', labelsize=16) ax1.set_yticks(np.linspace(0.5,1.2,8)) ax1.axvline(57357+31,color='black',linewidth=2,alpha=0.3) ax2.axvline(57357+31,color='black',linewidth=2,alpha=0.3) ax1.text(56500,1.15,'OSA 10.2',fontsize=16,alpha=0.3) ax1.text(57500,1.15,'OSA 11',fontsize=16,alpha=0.3) ax1.text(52700,1.15,'30-100 keV',fontsize=16) ax2.text(52700,1.15,'100-300 keV',fontsize=16) ax1.legend(loc='lower left') ax2.legend(loc='lower left') plt.xlabel('Time, MJD',fontsize=16) plt.ylabel('Normalized flux',fontsize=16) plt.savefig('Crab_lc_evolution.pdf',format='pdf',dpi=100) # -
examples/Crab_lc_longterm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # % run all_utilities.ipynb # - def unique_counts(col_names, mobile_data): for count in range(len(col_names)): list_names[count] = [] list_names[count] = mobile_data[col_names[count]].unique() num_unique = mobile_data[col_names[count]].nunique(dropna = True) print("\nThere are " + str(num_unique) + " unique " + str(col_names[count]) + "s.") all_uniques.append(list_names[count]) return (all_uniques) def replace_copies(df, replace_this, replace_with): ''' goes through raw_data and replaces specific strings with another specific string ''' new_data = pd.DataFrame() for count in range(len(df)): for num in range(len(replace_this)): if df['name'][count] == replace_this[num]: #print("repeat") df = df.replace(to_replace = replace_this[num], value = replace_with[num]) df = df.groupby(['name']).sum() df = df.reset_index() return df def normalize_data(raw_data): ''' Normalizes raw data based off the min/max ''' x = raw_data[['values']].values.astype(float) min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) return(x_scaled) def apply_normalizer(normalized_data, device_type, device_list): ''' Adds normalized data to dataframe ''' normalized_df = pd.DataFrame(normalized_data) normalized_df.columns = ['normalized'] for count in range(len(normalized_df)): device_list.append(device_type) normalized_df['device'] = device_list return normalized_df def get_top_users(df, NUM_TOP): ''' Takes the number of top users (NUM_TOP) and return dataframe with usernames and value counts ''' user_counts = pd.DataFrame() user_counts = df['username'].value_counts() user_counts = user_counts.reset_index() user_counts.columns = ['username', 'no. clicks'] top_users = pd.DataFrame() for count in range(NUM_TOP): top_users[count] = user_counts.loc[count] top_users = top_users.T # top_users = top_users.drop(index = 3) return(top_users.reset_index()) def normalize_data_users(raw_data): ''' Normalizes data based on min/max of indiviudal USER not total ''' x_scaled = raw_data.assign(values=raw_data.groupby('username').transform(lambda x: (x - x.min()) / (x.max()- x.min()))) return (x_scaled) def convert_time (df): ''' input: dataframe with datetime output: dataframe with timestamp column that holds datetime as string *except case for when milliseconds do not appear in the datetime time column ''' timestamp_list = [] count = 0 for num in range(len(df)): try: timestamp_list.append(datetime.datetime.strptime(df['time'][num], datetimeFormat)) except: timestamp_list.append(datetime.datetime.strptime(df['time'][num], '%Y-%m-%d %H:%M:%S %Z')) count += 1 df['timestamp'] = timestamp_list print("No milliseconds count: " + str(count)) df = df.sort_values(by = 'timestamp', ascending = True) return(df) def create_count_df_user (df): ''' input: dataframe with data and datetime as a string output: dataframe with username, device, week_year, and count columns ''' df['week_year'] = df['timestamp'].apply(lambda x: "%02d/%d" % (x.week, x.year)) df_grouped = df.groupby(['username', 'device']) df_counts = df_grouped.week_year.value_counts() df_counts.columns = ['username', 'device', 'week_year', 'count'] df_counts = df_counts.to_frame() df_counts.columns = ['count'] df_sorted_counts = df_counts.sort_values(by = 'week_year', ascending = True) df_sorted_counts = df_sorted_counts.reset_index() return (df_sorted_counts) def create_count_df_mobile (df): ''' input: dataframe with data and datetime as a string output: dataframe with username, device, week_year, and count columns ''' df['week_year'] = df['timestamp'].apply(lambda x: "%02d/%d" % (x.week, x.year)) df_grouped = df.groupby(['username']) df_counts = df_grouped.week_year.value_counts() df_counts.columns = ['username', 'week_year', 'count'] df_counts = df_counts.to_frame() df_counts.columns = ['count'] df_sorted_counts = df_counts.sort_values(by = 'week_year', ascending = True) df_sorted_counts = df_sorted_counts.reset_index() return (df_sorted_counts) def create_count_df_perweek (df): ''' input: dataframe with week_year, count (per user), device type output: dataframe with total counts per week_year, grouped by device type ''' df_grouped = df.groupby(['week_year', 'device']) df_counts = df_grouped['count'].sum() df_counts.columns = ['week_year', 'device', 'clicks_per_week'] df_counts = df_counts.to_frame() df_counts.columns = ['clicks_per_week'] df_sorted_counts = df_counts.sort_values(by = 'week_year', ascending = True) df_sorted_counts = df_sorted_counts.reset_index() return (df_sorted_counts) def plot_distinct_and_correct(df_distinct, df_correct, exp): ''' input: dataframes distinct data and final correctness data, and number of user experience groups output: two horizontal bar plots ''' print(" *delta_mean (positive = less than num_distinct average / required fewer attempts)") plt.figure(figsize=(15,10)) sns.catplot(x = 'delta_mean', y = 'question', hue = 'user_exp', data = df_distinct, kind = "bar", legend = True, aspect = 2) plt.title("Distance From Mean Per Question (Distinct Attempts)" + str(exp), fontsize = 14) plt.xlabel('delta_mean', fontsize=13) plt.ylabel('question', fontsize=13) plt.xlim(-5,5) plt.xticks( rotation=45, horizontalalignment='right', fontsize='large' ) plt.show() print(" *delta_mean (negative = greater than accuracy average / worse accuracy)") plt.figure(figsize=(15,10)) sns.catplot(x = 'delta_mean', y = 'question', hue = 'user_exp', data = df_correct, kind = "bar", legend = True, aspect = 2) plt.title("Distance From Mean Per Question (Accuracy)" + str(exp), fontsize = 14) plt.xlabel('delta_mean', fontsize=13) plt.ylabel('question', fontsize=13) plt.xlim(-.15,.05) plt.xticks( rotation=45, horizontalalignment='right', fontsize='large' ) plt.show() def wilcoxon_test(df_dist_1, df_dist_2, df_corr_1, df_corr_2): t, p_value = wilcoxon(df_dist_1, df_dist_2) print("Delta Mean Distinct Attempts:") print(' Statistic=%.3f, p=%.8f' % (t, p_value)) alpha = 0.05 if p_value > alpha: print(' Samples look equal (fail to reject H0)') else: print(' Samples do not look equal (reject H0)') t, p_value = wilcoxon(df_corr_1, df_corr_2) print("\nDelta Mean Correctness:") print(' Statistic=%.3f, p=%.8f' % (t, p_value)) alpha = 0.05 if p_value > alpha: print(' Samples look equal (fail to reject H0)') else: print(' Samples do not look equal (reject H0)') def wilcoxon_test_bon(df_dist_1, df_dist_2, df_corr_1, df_corr_2): BON = 3 #number of comparisons ^ t, p_value = wilcoxon(df_dist_1, df_dist_2) print("Delta Mean Distinct Attempts:") print(' Statistic=%.3f, p=%.8f' % (t, p_value)) alpha = 0.05 / BON if p_value > alpha: print(' Samples look equal (fail to reject H0)') else: print(' Samples do not look equal (reject H0)') t, p_value = wilcoxon(df_corr_1, df_corr_2) print("\nDelta Mean Correctness:") print(' Statistic=%.3f, p=%.8f' % (t, p_value)) alpha = 0.05 / BON if p_value > alpha: print(' Samples look equal (fail to reject H0)') else: print(' Samples do not look equal (reject H0)')
all_utilities.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Chapter 12 – Custom Models and Training with TensorFlow** # _This notebook contains all the sample code in chapter 12._ # <table align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/12_custom_models_and_training_with_tensorflow.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # </table> # # Setup # First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0. # + # Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x # !pip install -U tqdm except Exception: pass # TensorFlow ≥2.0 is required import tensorflow as tf from tensorflow import keras assert tf.__version__ >= "2.0" # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) tf.random.set_seed(42) # To plot pretty figures # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "deep" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # - # ## Tensors and operations # ### Tensors tf.constant([[1., 2., 3.], [4., 5., 6.]]) # matrix tf.constant(42) # scalar t = tf.constant([[1., 2., 3.], [4., 5., 6.]]) t t.shape t.dtype # ### Indexing t[:, 1:] t[..., 1, tf.newaxis] # ### Ops t + 10 tf.square(t) t @ tf.transpose(t) # ### Using `keras.backend` from tensorflow import keras K = keras.backend K.square(K.transpose(t)) + 10 # ### From/To NumPy a = np.array([2., 4., 5.]) tf.constant(a) t.numpy() np.array(t) tf.square(a) np.square(t) # ### Conflicting Types try: tf.constant(2.0) + tf.constant(40) except tf.errors.InvalidArgumentError as ex: print(ex) try: tf.constant(2.0) + tf.constant(40., dtype=tf.float64) except tf.errors.InvalidArgumentError as ex: print(ex) t2 = tf.constant(40., dtype=tf.float64) tf.constant(2.0) + tf.cast(t2, tf.float32) # ### Strings tf.constant(b"hello world") tf.constant("café") u = tf.constant([ord(c) for c in "café"]) u b = tf.strings.unicode_encode(u, "UTF-8") tf.strings.length(b, unit="UTF8_CHAR") tf.strings.unicode_decode(b, "UTF-8") # ### String arrays p = tf.constant(["Café", "Coffee", "caffè", "咖啡"]) tf.strings.length(p, unit="UTF8_CHAR") r = tf.strings.unicode_decode(p, "UTF8") r print(r) # ### Ragged tensors print(r[1]) print(r[1:3]) r2 = tf.ragged.constant([[65, 66], [], [67]]) print(tf.concat([r, r2], axis=0)) r3 = tf.ragged.constant([[68, 69, 70], [71], [], [72, 73]]) print(tf.concat([r, r3], axis=1)) tf.strings.unicode_encode(r3, "UTF-8") r.to_tensor() # ### Sparse tensors s = tf.SparseTensor(indices=[[0, 1], [1, 0], [2, 3]], values=[1., 2., 3.], dense_shape=[3, 4]) print(s) tf.sparse.to_dense(s) s2 = s * 2.0 try: s3 = s + 1. except TypeError as ex: print(ex) s4 = tf.constant([[10., 20.], [30., 40.], [50., 60.], [70., 80.]]) tf.sparse.sparse_dense_matmul(s, s4) s5 = tf.SparseTensor(indices=[[0, 2], [0, 1]], values=[1., 2.], dense_shape=[3, 4]) print(s5) try: tf.sparse.to_dense(s5) except tf.errors.InvalidArgumentError as ex: print(ex) s6 = tf.sparse.reorder(s5) tf.sparse.to_dense(s6) # ### Sets set1 = tf.constant([[2, 3, 5, 7], [7, 9, 0, 0]]) set2 = tf.constant([[4, 5, 6], [9, 10, 0]]) tf.sparse.to_dense(tf.sets.union(set1, set2)) tf.sparse.to_dense(tf.sets.difference(set1, set2)) tf.sparse.to_dense(tf.sets.intersection(set1, set2)) # ### Variables v = tf.Variable([[1., 2., 3.], [4., 5., 6.]]) v.assign(2 * v) v[0, 1].assign(42) v[:, 2].assign([0., 1.]) try: v[1] = [7., 8., 9.] except TypeError as ex: print(ex) v.scatter_nd_update(indices=[[0, 0], [1, 2]], updates=[100., 200.]) sparse_delta = tf.IndexedSlices(values=[[1., 2., 3.], [4., 5., 6.]], indices=[1, 0]) v.scatter_update(sparse_delta) # ### Tensor Arrays array = tf.TensorArray(dtype=tf.float32, size=3) array = array.write(0, tf.constant([1., 2.])) array = array.write(1, tf.constant([3., 10.])) array = array.write(2, tf.constant([5., 7.])) array.read(1) array.stack() mean, variance = tf.nn.moments(array.stack(), axes=0) mean variance # ## Custom loss function # Let's start by loading and preparing the California housing dataset. We first load it, then split it into a training set, a validation set and a test set, and finally we scale it: # + from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler housing = fetch_california_housing() X_train_full, X_test, y_train_full, y_test = train_test_split( housing.data, housing.target.reshape(-1, 1), random_state=42) X_train, X_valid, y_train, y_valid = train_test_split( X_train_full, y_train_full, random_state=42) scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_valid_scaled = scaler.transform(X_valid) X_test_scaled = scaler.transform(X_test) # - def huber_fn(y_true, y_pred): error = y_true - y_pred is_small_error = tf.abs(error) < 1 squared_loss = tf.square(error) / 2 linear_loss = tf.abs(error) - 0.5 return tf.where(is_small_error, squared_loss, linear_loss) plt.figure(figsize=(8, 3.5)) z = np.linspace(-4, 4, 200) plt.plot(z, huber_fn(0, z), "b-", linewidth=2, label="huber($z$)") plt.plot(z, z**2 / 2, "b:", linewidth=1, label=r"$\frac{1}{2}z^2$") plt.plot([-1, -1], [0, huber_fn(0., -1.)], "r--") plt.plot([1, 1], [0, huber_fn(0., 1.)], "r--") plt.gca().axhline(y=0, color='k') plt.gca().axvline(x=0, color='k') plt.axis([-4, 4, 0, 4]) plt.grid(True) plt.xlabel("$z$") plt.legend(fontsize=14) plt.title("Huber loss", fontsize=14) plt.show() # + input_shape = X_train.shape[1:] model = keras.models.Sequential([ keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal", input_shape=input_shape), keras.layers.Dense(1), ]) # - model.compile(loss=huber_fn, optimizer="nadam", metrics=["mae"]) model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) # ## Saving/Loading Models with Custom Objects model.save("my_model_with_a_custom_loss.h5") model = keras.models.load_model("my_model_with_a_custom_loss.h5", custom_objects={"huber_fn": huber_fn}) model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) def create_huber(threshold=1.0): def huber_fn(y_true, y_pred): error = y_true - y_pred is_small_error = tf.abs(error) < threshold squared_loss = tf.square(error) / 2 linear_loss = threshold * tf.abs(error) - threshold**2 / 2 return tf.where(is_small_error, squared_loss, linear_loss) return huber_fn model.compile(loss=create_huber(2.0), optimizer="nadam", metrics=["mae"]) model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) model.save("my_model_with_a_custom_loss_threshold_2.h5") model = keras.models.load_model("my_model_with_a_custom_loss_threshold_2.h5", custom_objects={"huber_fn": create_huber(2.0)}) model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) class HuberLoss(keras.losses.Loss): def __init__(self, threshold=1.0, **kwargs): self.threshold = threshold super().__init__(**kwargs) def call(self, y_true, y_pred): error = y_true - y_pred is_small_error = tf.abs(error) < self.threshold squared_loss = tf.square(error) / 2 linear_loss = self.threshold * tf.abs(error) - self.threshold**2 / 2 return tf.where(is_small_error, squared_loss, linear_loss) def get_config(self): base_config = super().get_config() return {**base_config, "threshold": self.threshold} model = keras.models.Sequential([ keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal", input_shape=input_shape), keras.layers.Dense(1), ]) model.compile(loss=HuberLoss(2.), optimizer="nadam", metrics=["mae"]) model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) model.save("my_model_with_a_custom_loss_class.h5") # + #model = keras.models.load_model("my_model_with_a_custom_loss_class.h5", # TODO: check PR #25956 # custom_objects={"HuberLoss": HuberLoss}) # - model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) # + #model = keras.models.load_model("my_model_with_a_custom_loss_class.h5", # TODO: check PR #25956 # custom_objects={"HuberLoss": HuberLoss}) # - model.loss.threshold # ## Other Custom Functions keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) # + def my_softplus(z): # return value is just tf.nn.softplus(z) return tf.math.log(tf.exp(z) + 1.0) def my_glorot_initializer(shape, dtype=tf.float32): stddev = tf.sqrt(2. / (shape[0] + shape[1])) return tf.random.normal(shape, stddev=stddev, dtype=dtype) def my_l1_regularizer(weights): return tf.reduce_sum(tf.abs(0.01 * weights)) def my_positive_weights(weights): # return value is just tf.nn.relu(weights) return tf.where(weights < 0., tf.zeros_like(weights), weights) # - layer = keras.layers.Dense(1, activation=my_softplus, kernel_initializer=my_glorot_initializer, kernel_regularizer=my_l1_regularizer, kernel_constraint=my_positive_weights) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal", input_shape=input_shape), keras.layers.Dense(1, activation=my_softplus, kernel_regularizer=my_l1_regularizer, kernel_constraint=my_positive_weights, kernel_initializer=my_glorot_initializer), ]) model.compile(loss="mse", optimizer="nadam", metrics=["mae"]) model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) model.save("my_model_with_many_custom_parts.h5") model = keras.models.load_model( "my_model_with_many_custom_parts.h5", custom_objects={ "my_l1_regularizer": my_l1_regularizer, "my_positive_weights": my_positive_weights, "my_glorot_initializer": my_glorot_initializer, "my_softplus": my_softplus, }) class MyL1Regularizer(keras.regularizers.Regularizer): def __init__(self, factor): self.factor = factor def __call__(self, weights): return tf.reduce_sum(tf.abs(self.factor * weights)) def get_config(self): return {"factor": self.factor} keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal", input_shape=input_shape), keras.layers.Dense(1, activation=my_softplus, kernel_regularizer=MyL1Regularizer(0.01), kernel_constraint=my_positive_weights, kernel_initializer=my_glorot_initializer), ]) model.compile(loss="mse", optimizer="nadam", metrics=["mae"]) model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) model.save("my_model_with_many_custom_parts.h5") model = keras.models.load_model( "my_model_with_many_custom_parts.h5", custom_objects={ "MyL1Regularizer": MyL1Regularizer, "my_positive_weights": my_positive_weights, "my_glorot_initializer": my_glorot_initializer, "my_softplus": my_softplus, }) # ## Custom Metrics keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal", input_shape=input_shape), keras.layers.Dense(1), ]) model.compile(loss="mse", optimizer="nadam", metrics=[create_huber(2.0)]) model.fit(X_train_scaled, y_train, epochs=2) # **Warning**: if you use the same function as the loss and a metric, you may be surprised to see different results. This is generally just due to floating point precision errors: even though the mathematical equations are equivalent, the operations are not run in the same order, which can lead to small differences. Moreover, when using sample weights, there's more than just precision errors: # * the loss since the start of the epoch is the mean of all batch losses seen so far. Each batch loss is the sum of the weighted instance losses divided by the _batch size_ (not the sum of weights, so the batch loss is _not_ the weighted mean of the losses). # * the metric since the start of the epoch is equal to the sum of weighted instance losses divided by sum of all weights seen so far. In other words, it is the weighted mean of all the instance losses. Not the same thing. # # If you do the math, you will find that loss = metric * mean of sample weights (plus some floating point precision error). model.compile(loss=create_huber(2.0), optimizer="nadam", metrics=[create_huber(2.0)]) sample_weight = np.random.rand(len(y_train)) history = model.fit(X_train_scaled, y_train, epochs=2, sample_weight=sample_weight) history.history["loss"][0], history.history["huber_fn"][0] * sample_weight.mean() # ### Streaming metrics precision = keras.metrics.Precision() precision([0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 0, 1, 0, 1]) precision([0, 1, 0, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0, 0, 0]) precision.result() precision.variables precision.reset_states() # Creating a streaming metric: class HuberMetric(keras.metrics.Metric): def __init__(self, threshold=1.0, **kwargs): super().__init__(**kwargs) # handles base args (e.g., dtype) self.threshold = threshold #self.huber_fn = create_huber(threshold) # TODO: investigate why this fails self.total = self.add_weight("total", initializer="zeros") self.count = self.add_weight("count", initializer="zeros") def huber_fn(self, y_true, y_pred): # workaround error = y_true - y_pred is_small_error = tf.abs(error) < self.threshold squared_loss = tf.square(error) / 2 linear_loss = self.threshold * tf.abs(error) - self.threshold**2 / 2 return tf.where(is_small_error, squared_loss, linear_loss) def update_state(self, y_true, y_pred, sample_weight=None): metric = self.huber_fn(y_true, y_pred) self.total.assign_add(tf.reduce_sum(metric)) self.count.assign_add(tf.cast(tf.size(y_true), tf.float32)) def result(self): return self.total / self.count def get_config(self): base_config = super().get_config() return {**base_config, "threshold": self.threshold} # **Warning**: when running the following cell, if you get autograph warnings such as `WARNING:tensorflow:AutoGraph could not transform [...] and will run it as-is`, then please install version 0.2.2 of the gast library (e.g., by running `!pip install gast==0.2.2`), then restart the kernel and run this notebook again from the beginning (see [autograph issue #1](https://github.com/tensorflow/autograph/issues/1) for more details): # + m = HuberMetric(2.) # total = 2 * |10 - 2| - 2²/2 = 14 # count = 1 # result = 14 / 1 = 14 m(tf.constant([[2.]]), tf.constant([[10.]])) # + # total = total + (|1 - 0|² / 2) + (2 * |9.25 - 5| - 2² / 2) = 14 + 7 = 21 # count = count + 2 = 3 # result = total / count = 21 / 3 = 7 m(tf.constant([[0.], [5.]]), tf.constant([[1.], [9.25]])) m.result() # - m.variables m.reset_states() m.variables # Let's check that the `HuberMetric` class works well: keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal", input_shape=input_shape), keras.layers.Dense(1), ]) model.compile(loss=create_huber(2.0), optimizer="nadam", metrics=[HuberMetric(2.0)]) model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32), epochs=2) model.save("my_model_with_a_custom_metric.h5") # + #model = keras.models.load_model("my_model_with_a_custom_metric.h5", # TODO: check PR #25956 # custom_objects={"huber_fn": create_huber(2.0), # "HuberMetric": HuberMetric}) # - model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32), epochs=2) # **Warning**: In TF 2.2, tf.keras adds an extra first metric in `model.metrics` at position 0 (see [TF issue #38150](https://github.com/tensorflow/tensorflow/issues/38150)). This forces us to use `model.metrics[-1]` rather than `model.metrics[0]` to access the `HuberMetric`. model.metrics[-1].threshold # Looks like it works fine! More simply, we could have created the class like this: class HuberMetric(keras.metrics.Mean): def __init__(self, threshold=1.0, name='HuberMetric', dtype=None): self.threshold = threshold self.huber_fn = create_huber(threshold) super().__init__(name=name, dtype=dtype) def update_state(self, y_true, y_pred, sample_weight=None): metric = self.huber_fn(y_true, y_pred) super(HuberMetric, self).update_state(metric, sample_weight) def get_config(self): base_config = super().get_config() return {**base_config, "threshold": self.threshold} # This class handles shapes better, and it also supports sample weights. keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal", input_shape=input_shape), keras.layers.Dense(1), ]) model.compile(loss=keras.losses.Huber(2.0), optimizer="nadam", weighted_metrics=[HuberMetric(2.0)]) sample_weight = np.random.rand(len(y_train)) history = model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32), epochs=2, sample_weight=sample_weight) history.history["loss"][0], history.history["HuberMetric"][0] * sample_weight.mean() model.save("my_model_with_a_custom_metric_v2.h5") # + #model = keras.models.load_model("my_model_with_a_custom_metric_v2.h5", # TODO: check PR #25956 # custom_objects={"HuberMetric": HuberMetric}) # - model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32), epochs=2) # **Warning**: In TF 2.2, tf.keras adds an extra first metric in `model.metrics` at position 0 (see [TF issue #38150](https://github.com/tensorflow/tensorflow/issues/38150)). This forces us to use `model.metrics[-1]` rather than `model.metrics[0]` to access the `HuberMetric`. model.metrics[-1].threshold # ## Custom Layers exponential_layer = keras.layers.Lambda(lambda x: tf.exp(x)) exponential_layer([-1., 0., 1.]) # Adding an exponential layer at the output of a regression model can be useful if the values to predict are positive and with very different scales (e.g., 0.001, 10., 10000): keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.Dense(30, activation="relu", input_shape=input_shape), keras.layers.Dense(1), exponential_layer ]) model.compile(loss="mse", optimizer="nadam") model.fit(X_train_scaled, y_train, epochs=5, validation_data=(X_valid_scaled, y_valid)) model.evaluate(X_test_scaled, y_test) class MyDense(keras.layers.Layer): def __init__(self, units, activation=None, **kwargs): super().__init__(**kwargs) self.units = units self.activation = keras.activations.get(activation) def build(self, batch_input_shape): self.kernel = self.add_weight( name="kernel", shape=[batch_input_shape[-1], self.units], initializer="glorot_normal") self.bias = self.add_weight( name="bias", shape=[self.units], initializer="zeros") super().build(batch_input_shape) # must be at the end def call(self, X): return self.activation(X @ self.kernel + self.bias) def compute_output_shape(self, batch_input_shape): return tf.TensorShape(batch_input_shape.as_list()[:-1] + [self.units]) def get_config(self): base_config = super().get_config() return {**base_config, "units": self.units, "activation": keras.activations.serialize(self.activation)} keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ MyDense(30, activation="relu", input_shape=input_shape), MyDense(1) ]) model.compile(loss="mse", optimizer="nadam") model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) model.evaluate(X_test_scaled, y_test) model.save("my_model_with_a_custom_layer.h5") model = keras.models.load_model("my_model_with_a_custom_layer.h5", custom_objects={"MyDense": MyDense}) class MyMultiLayer(keras.layers.Layer): def call(self, X): X1, X2 = X return X1 + X2, X1 * X2 def compute_output_shape(self, batch_input_shape): batch_input_shape1, batch_input_shape2 = batch_input_shape return [batch_input_shape1, batch_input_shape2] keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) inputs1 = keras.layers.Input(shape=[2]) inputs2 = keras.layers.Input(shape=[2]) outputs1, outputs2 = MyMultiLayer()((inputs1, inputs2)) # Let's create a layer with a different behavior during training and testing: class AddGaussianNoise(keras.layers.Layer): def __init__(self, stddev, **kwargs): super().__init__(**kwargs) self.stddev = stddev def call(self, X, training=None): if training: noise = tf.random.normal(tf.shape(X), stddev=self.stddev) return X + noise else: return X def compute_output_shape(self, batch_input_shape): return batch_input_shape model.compile(loss="mse", optimizer="nadam") model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) model.evaluate(X_test_scaled, y_test) # ## Custom Models X_new_scaled = X_test_scaled class ResidualBlock(keras.layers.Layer): def __init__(self, n_layers, n_neurons, **kwargs): super().__init__(**kwargs) self.hidden = [keras.layers.Dense(n_neurons, activation="elu", kernel_initializer="he_normal") for _ in range(n_layers)] def call(self, inputs): Z = inputs for layer in self.hidden: Z = layer(Z) return inputs + Z class ResidualRegressor(keras.models.Model): def __init__(self, output_dim, **kwargs): super().__init__(**kwargs) self.hidden1 = keras.layers.Dense(30, activation="elu", kernel_initializer="he_normal") self.block1 = ResidualBlock(2, 30) self.block2 = ResidualBlock(2, 30) self.out = keras.layers.Dense(output_dim) def call(self, inputs): Z = self.hidden1(inputs) for _ in range(1 + 3): Z = self.block1(Z) Z = self.block2(Z) return self.out(Z) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = ResidualRegressor(1) model.compile(loss="mse", optimizer="nadam") history = model.fit(X_train_scaled, y_train, epochs=5) score = model.evaluate(X_test_scaled, y_test) y_pred = model.predict(X_new_scaled) model.save("my_custom_model.ckpt") model = keras.models.load_model("my_custom_model.ckpt") history = model.fit(X_train_scaled, y_train, epochs=5) # We could have defined the model using the sequential API instead: keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) block1 = ResidualBlock(2, 30) model = keras.models.Sequential([ keras.layers.Dense(30, activation="elu", kernel_initializer="he_normal"), block1, block1, block1, block1, ResidualBlock(2, 30), keras.layers.Dense(1) ]) model.compile(loss="mse", optimizer="nadam") history = model.fit(X_train_scaled, y_train, epochs=5) score = model.evaluate(X_test_scaled, y_test) y_pred = model.predict(X_new_scaled) # ## Losses and Metrics Based on Model Internals class ReconstructingRegressor(keras.models.Model): def __init__(self, output_dim, **kwargs): super().__init__(**kwargs) self.hidden = [keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal") for _ in range(5)] self.out = keras.layers.Dense(output_dim) # TODO: check https://github.com/tensorflow/tensorflow/issues/26260 #self.reconstruction_mean = keras.metrics.Mean(name="reconstruction_error") def build(self, batch_input_shape): n_inputs = batch_input_shape[-1] self.reconstruct = keras.layers.Dense(n_inputs) super().build(batch_input_shape) def call(self, inputs, training=None): Z = inputs for layer in self.hidden: Z = layer(Z) reconstruction = self.reconstruct(Z) recon_loss = tf.reduce_mean(tf.square(reconstruction - inputs)) self.add_loss(0.05 * recon_loss) #if training: # result = self.reconstruction_mean(recon_loss) # self.add_metric(result) return self.out(Z) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = ReconstructingRegressor(1) model.compile(loss="mse", optimizer="nadam") history = model.fit(X_train_scaled, y_train, epochs=2) y_pred = model.predict(X_test_scaled) # ## Computing Gradients with Autodiff def f(w1, w2): return 3 * w1 ** 2 + 2 * w1 * w2 w1, w2 = 5, 3 eps = 1e-6 (f(w1 + eps, w2) - f(w1, w2)) / eps (f(w1, w2 + eps) - f(w1, w2)) / eps # + w1, w2 = tf.Variable(5.), tf.Variable(3.) with tf.GradientTape() as tape: z = f(w1, w2) gradients = tape.gradient(z, [w1, w2]) # - gradients # + with tf.GradientTape() as tape: z = f(w1, w2) dz_dw1 = tape.gradient(z, w1) try: dz_dw2 = tape.gradient(z, w2) except RuntimeError as ex: print(ex) # + with tf.GradientTape(persistent=True) as tape: z = f(w1, w2) dz_dw1 = tape.gradient(z, w1) dz_dw2 = tape.gradient(z, w2) # works now! del tape # - dz_dw1, dz_dw2 # + c1, c2 = tf.constant(5.), tf.constant(3.) with tf.GradientTape() as tape: z = f(c1, c2) gradients = tape.gradient(z, [c1, c2]) # - gradients # + with tf.GradientTape() as tape: tape.watch(c1) tape.watch(c2) z = f(c1, c2) gradients = tape.gradient(z, [c1, c2]) # - gradients # + with tf.GradientTape() as tape: z1 = f(w1, w2 + 2.) z2 = f(w1, w2 + 5.) z3 = f(w1, w2 + 7.) tape.gradient([z1, z2, z3], [w1, w2]) # + with tf.GradientTape(persistent=True) as tape: z1 = f(w1, w2 + 2.) z2 = f(w1, w2 + 5.) z3 = f(w1, w2 + 7.) tf.reduce_sum(tf.stack([tape.gradient(z, [w1, w2]) for z in (z1, z2, z3)]), axis=0) del tape # - with tf.GradientTape(persistent=True) as hessian_tape: with tf.GradientTape() as jacobian_tape: z = f(w1, w2) jacobians = jacobian_tape.gradient(z, [w1, w2]) hessians = [hessian_tape.gradient(jacobian, [w1, w2]) for jacobian in jacobians] del hessian_tape jacobians hessians # + def f(w1, w2): return 3 * w1 ** 2 + tf.stop_gradient(2 * w1 * w2) with tf.GradientTape() as tape: z = f(w1, w2) tape.gradient(z, [w1, w2]) # + x = tf.Variable(100.) with tf.GradientTape() as tape: z = my_softplus(x) tape.gradient(z, [x]) # - tf.math.log(tf.exp(tf.constant(30., dtype=tf.float32)) + 1.) # + x = tf.Variable([100.]) with tf.GradientTape() as tape: z = my_softplus(x) tape.gradient(z, [x]) # - @tf.custom_gradient def my_better_softplus(z): exp = tf.exp(z) def my_softplus_gradients(grad): return grad / (1 + 1 / exp) return tf.math.log(exp + 1), my_softplus_gradients def my_better_softplus(z): return tf.where(z > 30., z, tf.math.log(tf.exp(z) + 1.)) # + x = tf.Variable([1000.]) with tf.GradientTape() as tape: z = my_better_softplus(x) z, tape.gradient(z, [x]) # - # # Computing Gradients Using Autodiff keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) l2_reg = keras.regularizers.l2(0.05) model = keras.models.Sequential([ keras.layers.Dense(30, activation="elu", kernel_initializer="he_normal", kernel_regularizer=l2_reg), keras.layers.Dense(1, kernel_regularizer=l2_reg) ]) def random_batch(X, y, batch_size=32): idx = np.random.randint(len(X), size=batch_size) return X[idx], y[idx] def print_status_bar(iteration, total, loss, metrics=None): metrics = " - ".join(["{}: {:.4f}".format(m.name, m.result()) for m in [loss] + (metrics or [])]) end = "" if iteration < total else "\n" print("\r{}/{} - ".format(iteration, total) + metrics, end=end) # + import time mean_loss = keras.metrics.Mean(name="loss") mean_square = keras.metrics.Mean(name="mean_square") for i in range(1, 50 + 1): loss = 1 / i mean_loss(loss) mean_square(i ** 2) print_status_bar(i, 50, mean_loss, [mean_square]) time.sleep(0.05) # - # A fancier version with a progress bar: def progress_bar(iteration, total, size=30): running = iteration < total c = ">" if running else "=" p = (size - 1) * iteration // total fmt = "{{:-{}d}}/{{}} [{{}}]".format(len(str(total))) params = [iteration, total, "=" * p + c + "." * (size - p - 1)] return fmt.format(*params) progress_bar(3500, 10000, size=6) def print_status_bar(iteration, total, loss, metrics=None, size=30): metrics = " - ".join(["{}: {:.4f}".format(m.name, m.result()) for m in [loss] + (metrics or [])]) end = "" if iteration < total else "\n" print("\r{} - {}".format(progress_bar(iteration, total), metrics), end=end) mean_loss = keras.metrics.Mean(name="loss") mean_square = keras.metrics.Mean(name="mean_square") for i in range(1, 50 + 1): loss = 1 / i mean_loss(loss) mean_square(i ** 2) print_status_bar(i, 50, mean_loss, [mean_square]) time.sleep(0.05) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) n_epochs = 5 batch_size = 32 n_steps = len(X_train) // batch_size optimizer = keras.optimizers.Nadam(lr=0.01) loss_fn = keras.losses.mean_squared_error mean_loss = keras.metrics.Mean() metrics = [keras.metrics.MeanAbsoluteError()] for epoch in range(1, n_epochs + 1): print("Epoch {}/{}".format(epoch, n_epochs)) for step in range(1, n_steps + 1): X_batch, y_batch = random_batch(X_train_scaled, y_train) with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) for variable in model.variables: if variable.constraint is not None: variable.assign(variable.constraint(variable)) mean_loss(loss) for metric in metrics: metric(y_batch, y_pred) print_status_bar(step * batch_size, len(y_train), mean_loss, metrics) print_status_bar(len(y_train), len(y_train), mean_loss, metrics) for metric in [mean_loss] + metrics: metric.reset_states() try: from tqdm.notebook import trange from collections import OrderedDict with trange(1, n_epochs + 1, desc="All epochs") as epochs: for epoch in epochs: with trange(1, n_steps + 1, desc="Epoch {}/{}".format(epoch, n_epochs)) as steps: for step in steps: X_batch, y_batch = random_batch(X_train_scaled, y_train) with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) for variable in model.variables: if variable.constraint is not None: variable.assign(variable.constraint(variable)) status = OrderedDict() mean_loss(loss) status["loss"] = mean_loss.result().numpy() for metric in metrics: metric(y_batch, y_pred) status[metric.name] = metric.result().numpy() steps.set_postfix(status) for metric in [mean_loss] + metrics: metric.reset_states() except ImportError as ex: print("To run this cell, please install tqdm, ipywidgets and restart Jupyter") # ## TensorFlow Functions def cube(x): return x ** 3 cube(2) cube(tf.constant(2.0)) tf_cube = tf.function(cube) tf_cube tf_cube(2) tf_cube(tf.constant(2.0)) # ### TF Functions and Concrete Functions concrete_function = tf_cube.get_concrete_function(tf.constant(2.0)) concrete_function.graph concrete_function(tf.constant(2.0)) concrete_function is tf_cube.get_concrete_function(tf.constant(2.0)) # ### Exploring Function Definitions and Graphs concrete_function.graph ops = concrete_function.graph.get_operations() ops pow_op = ops[2] list(pow_op.inputs) pow_op.outputs concrete_function.graph.get_operation_by_name('x') concrete_function.graph.get_tensor_by_name('Identity:0') concrete_function.function_def.signature # ### How TF Functions Trace Python Functions to Extract Their Computation Graphs @tf.function def tf_cube(x): print("print:", x) return x ** 3 result = tf_cube(tf.constant(2.0)) result result = tf_cube(2) result = tf_cube(3) result = tf_cube(tf.constant([[1., 2.]])) # New shape: trace! result = tf_cube(tf.constant([[3., 4.], [5., 6.]])) # New shape: trace! result = tf_cube(tf.constant([[7., 8.], [9., 10.], [11., 12.]])) # no trace # It is also possible to specify a particular input signature: @tf.function(input_signature=[tf.TensorSpec([None, 28, 28], tf.float32)]) def shrink(images): print("Tracing", images) return images[:, fc00:db20:35b:7399::5, ::2] # drop half the rows and columns keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) img_batch_1 = tf.random.uniform(shape=[100, 28, 28]) img_batch_2 = tf.random.uniform(shape=[50, 28, 28]) preprocessed_images = shrink(img_batch_1) # Traces the function. preprocessed_images = shrink(img_batch_2) # Reuses the same concrete function. img_batch_3 = tf.random.uniform(shape=[2, 2, 2]) try: preprocessed_images = shrink(img_batch_3) # rejects unexpected types or shapes except ValueError as ex: print(ex) # ### Using Autograph To Capture Control Flow # A "static" `for` loop using `range()`: @tf.function def add_10(x): for i in range(10): x += 1 return x add_10(tf.constant(5)) add_10.get_concrete_function(tf.constant(5)).graph.get_operations() # A "dynamic" loop using `tf.while_loop()`: @tf.function def add_10(x): condition = lambda i, x: tf.less(i, 10) body = lambda i, x: (tf.add(i, 1), tf.add(x, 1)) final_i, final_x = tf.while_loop(condition, body, [tf.constant(0), x]) return final_x add_10(tf.constant(5)) add_10.get_concrete_function(tf.constant(5)).graph.get_operations() # A "dynamic" `for` loop using `tf.range()` (captured by autograph): @tf.function def add_10(x): for i in tf.range(10): x = x + 1 return x add_10.get_concrete_function(tf.constant(0)).graph.get_operations() # ### Handling Variables and Other Resources in TF Functions # + counter = tf.Variable(0) @tf.function def increment(counter, c=1): return counter.assign_add(c) # - increment(counter) increment(counter) function_def = increment.get_concrete_function(counter).function_def function_def.signature.input_arg[0] # + counter = tf.Variable(0) @tf.function def increment(c=1): return counter.assign_add(c) # - increment() increment() function_def = increment.get_concrete_function().function_def function_def.signature.input_arg[0] class Counter: def __init__(self): self.counter = tf.Variable(0) @tf.function def increment(self, c=1): return self.counter.assign_add(c) c = Counter() c.increment() c.increment() # + @tf.function def add_10(x): for i in tf.range(10): x += 1 return x tf.autograph.to_code(add_10.python_function) # - def display_tf_code(func): from IPython.display import display, Markdown if hasattr(func, "python_function"): func = func.python_function code = tf.autograph.to_code(func) display(Markdown('```python\n{}\n```'.format(code))) display_tf_code(add_10) # ## Using TF Functions with tf.keras (or Not) # By default, tf.keras will automatically convert your custom code into TF Functions, no need to use # `tf.function()`: # Custom loss function def my_mse(y_true, y_pred): print("Tracing loss my_mse()") return tf.reduce_mean(tf.square(y_pred - y_true)) # Custom metric function def my_mae(y_true, y_pred): print("Tracing metric my_mae()") return tf.reduce_mean(tf.abs(y_pred - y_true)) # Custom layer class MyDense(keras.layers.Layer): def __init__(self, units, activation=None, **kwargs): super().__init__(**kwargs) self.units = units self.activation = keras.activations.get(activation) def build(self, input_shape): self.kernel = self.add_weight(name='kernel', shape=(input_shape[1], self.units), initializer='uniform', trainable=True) self.biases = self.add_weight(name='bias', shape=(self.units,), initializer='zeros', trainable=True) super().build(input_shape) def call(self, X): print("Tracing MyDense.call()") return self.activation(X @ self.kernel + self.biases) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) # + # Custom model class MyModel(keras.models.Model): def __init__(self, **kwargs): super().__init__(**kwargs) self.hidden1 = MyDense(30, activation="relu") self.hidden2 = MyDense(30, activation="relu") self.output_ = MyDense(1) def call(self, input): print("Tracing MyModel.call()") hidden1 = self.hidden1(input) hidden2 = self.hidden2(hidden1) concat = keras.layers.concatenate([input, hidden2]) output = self.output_(concat) return output model = MyModel() # - model.compile(loss=my_mse, optimizer="nadam", metrics=[my_mae]) model.fit(X_train_scaled, y_train, epochs=2, validation_data=(X_valid_scaled, y_valid)) model.evaluate(X_test_scaled, y_test) # You can turn this off by creating the model with `dynamic=True` (or calling `super().__init__(dynamic=True, **kwargs)` in the model's constructor): keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = MyModel(dynamic=True) model.compile(loss=my_mse, optimizer="nadam", metrics=[my_mae]) # Not the custom code will be called at each iteration. Let's fit, validate and evaluate with tiny datasets to avoid getting too much output: model.fit(X_train_scaled[:64], y_train[:64], epochs=1, validation_data=(X_valid_scaled[:64], y_valid[:64]), verbose=0) model.evaluate(X_test_scaled[:64], y_test[:64], verbose=0) # Alternatively, you can compile a model with `run_eagerly=True`: keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = MyModel() model.compile(loss=my_mse, optimizer="nadam", metrics=[my_mae], run_eagerly=True) model.fit(X_train_scaled[:64], y_train[:64], epochs=1, validation_data=(X_valid_scaled[:64], y_valid[:64]), verbose=0) model.evaluate(X_test_scaled[:64], y_test[:64], verbose=0) # ## Custom Optimizers # Defining custom optimizers is not very common, but in case you are one of the happy few who gets to write one, here is an example: class MyMomentumOptimizer(keras.optimizers.Optimizer): def __init__(self, learning_rate=0.001, momentum=0.9, name="MyMomentumOptimizer", **kwargs): """Call super().__init__() and use _set_hyper() to store hyperparameters""" super().__init__(name, **kwargs) self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) # handle lr=learning_rate self._set_hyper("decay", self._initial_decay) # self._set_hyper("momentum", momentum) def _create_slots(self, var_list): """For each model variable, create the optimizer variable associated with it. TensorFlow calls these optimizer variables "slots". For momentum optimization, we need one momentum slot per model variable. """ for var in var_list: self.add_slot(var, "momentum") @tf.function def _resource_apply_dense(self, grad, var): """Update the slots and perform one optimization step for one model variable """ var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) # handle learning rate decay momentum_var = self.get_slot(var, "momentum") momentum_hyper = self._get_hyper("momentum", var_dtype) momentum_var.assign(momentum_var * momentum_hyper - (1. - momentum_hyper)* grad) var.assign_add(momentum_var * lr_t) def _resource_apply_sparse(self, grad, var): raise NotImplementedError def get_config(self): base_config = super().get_config() return { **base_config, "learning_rate": self._serialize_hyperparameter("learning_rate"), "decay": self._serialize_hyperparameter("decay"), "momentum": self._serialize_hyperparameter("momentum"), } keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([keras.layers.Dense(1, input_shape=[8])]) model.compile(loss="mse", optimizer=MyMomentumOptimizer()) model.fit(X_train_scaled, y_train, epochs=5) # # Exercises # ## 1. to 11. # See Appendix A. # # 12. Implement a custom layer that performs _Layer Normalization_ # _We will use this type of layer in Chapter 15 when using Recurrent Neural Networks._ # ### a. # _Exercise: The `build()` method should define two trainable weights *α* and *β*, both of shape `input_shape[-1:]` and data type `tf.float32`. *α* should be initialized with 1s, and *β* with 0s._ # Solution: see below. # ### b. # _Exercise: The `call()` method should compute the mean_ μ _and standard deviation_ σ _of each instance's features. For this, you can use `tf.nn.moments(inputs, axes=-1, keepdims=True)`, which returns the mean μ and the variance σ<sup>2</sup> of all instances (compute the square root of the variance to get the standard deviation). Then the function should compute and return *α*⊗(*X* - μ)/(σ + ε) + *β*, where ⊗ represents itemwise multiplication (`*`) and ε is a smoothing term (small constant to avoid division by zero, e.g., 0.001)._ class LayerNormalization(keras.layers.Layer): def __init__(self, eps=0.001, **kwargs): super().__init__(**kwargs) self.eps = eps def build(self, batch_input_shape): self.alpha = self.add_weight( name="alpha", shape=batch_input_shape[-1:], initializer="ones") self.beta = self.add_weight( name="beta", shape=batch_input_shape[-1:], initializer="zeros") super().build(batch_input_shape) # must be at the end def call(self, X): mean, variance = tf.nn.moments(X, axes=-1, keepdims=True) return self.alpha * (X - mean) / (tf.sqrt(variance + self.eps)) + self.beta def compute_output_shape(self, batch_input_shape): return batch_input_shape def get_config(self): base_config = super().get_config() return {**base_config, "eps": self.eps} # Note that making _ε_ a hyperparameter (`eps`) was not compulsory. Also note that it's preferable to compute `tf.sqrt(variance + self.eps)` rather than `tf.sqrt(variance) + self.eps`. Indeed, the derivative of sqrt(z) is undefined when z=0, so training will bomb whenever the variance vector has at least one component equal to 0. Adding _ε_ within the square root guarantees that this will never happen. # ### c. # _Exercise: Ensure that your custom layer produces the same (or very nearly the same) output as the `keras.layers.LayerNormalization` layer._ # Let's create one instance of each class, apply them to some data (e.g., the training set), and ensure that the difference is negligeable. # + X = X_train.astype(np.float32) custom_layer_norm = LayerNormalization() keras_layer_norm = keras.layers.LayerNormalization() tf.reduce_mean(keras.losses.mean_absolute_error( keras_layer_norm(X), custom_layer_norm(X))) # - # Yep, that's close enough. To be extra sure, let's make alpha and beta completely random and compare again: # + random_alpha = np.random.rand(X.shape[-1]) random_beta = np.random.rand(X.shape[-1]) custom_layer_norm.set_weights([random_alpha, random_beta]) keras_layer_norm.set_weights([random_alpha, random_beta]) tf.reduce_mean(keras.losses.mean_absolute_error( keras_layer_norm(X), custom_layer_norm(X))) # - # Still a negligeable difference! Our custom layer works fine. # ## 13. Train a model using a custom training loop to tackle the Fashion MNIST dataset # _The Fashion MNIST dataset was introduced in Chapter 10._ # ### a. # _Exercise: Display the epoch, iteration, mean training loss, and mean accuracy over each epoch (updated at each iteration), as well as the validation loss and accuracy at the end of each epoch._ (X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data() X_train_full = X_train_full.astype(np.float32) / 255. X_valid, X_train = X_train_full[:5000], X_train_full[5000:] y_valid, y_train = y_train_full[:5000], y_train_full[5000:] X_test = X_test.astype(np.float32) / 255. keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[28, 28]), keras.layers.Dense(100, activation="relu"), keras.layers.Dense(10, activation="softmax"), ]) n_epochs = 5 batch_size = 32 n_steps = len(X_train) // batch_size optimizer = keras.optimizers.Nadam(lr=0.01) loss_fn = keras.losses.sparse_categorical_crossentropy mean_loss = keras.metrics.Mean() metrics = [keras.metrics.SparseCategoricalAccuracy()] with trange(1, n_epochs + 1, desc="All epochs") as epochs: for epoch in epochs: with trange(1, n_steps + 1, desc="Epoch {}/{}".format(epoch, n_epochs)) as steps: for step in steps: X_batch, y_batch = random_batch(X_train, y_train) with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) for variable in model.variables: if variable.constraint is not None: variable.assign(variable.constraint(variable)) status = OrderedDict() mean_loss(loss) status["loss"] = mean_loss.result().numpy() for metric in metrics: metric(y_batch, y_pred) status[metric.name] = metric.result().numpy() steps.set_postfix(status) y_pred = model(X_valid) status["val_loss"] = np.mean(loss_fn(y_valid, y_pred)) status["val_accuracy"] = np.mean(keras.metrics.sparse_categorical_accuracy( tf.constant(y_valid, dtype=np.float32), y_pred)) steps.set_postfix(status) for metric in [mean_loss] + metrics: metric.reset_states() # ### b. # _Exercise: Try using a different optimizer with a different learning rate for the upper layers and the lower layers._ keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) lower_layers = keras.models.Sequential([ keras.layers.Flatten(input_shape=[28, 28]), keras.layers.Dense(100, activation="relu"), ]) upper_layers = keras.models.Sequential([ keras.layers.Dense(10, activation="softmax"), ]) model = keras.models.Sequential([ lower_layers, upper_layers ]) lower_optimizer = keras.optimizers.SGD(lr=1e-4) upper_optimizer = keras.optimizers.Nadam(lr=1e-3) n_epochs = 5 batch_size = 32 n_steps = len(X_train) // batch_size loss_fn = keras.losses.sparse_categorical_crossentropy mean_loss = keras.metrics.Mean() metrics = [keras.metrics.SparseCategoricalAccuracy()] with trange(1, n_epochs + 1, desc="All epochs") as epochs: for epoch in epochs: with trange(1, n_steps + 1, desc="Epoch {}/{}".format(epoch, n_epochs)) as steps: for step in steps: X_batch, y_batch = random_batch(X_train, y_train) with tf.GradientTape(persistent=True) as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) for layers, optimizer in ((lower_layers, lower_optimizer), (upper_layers, upper_optimizer)): gradients = tape.gradient(loss, layers.trainable_variables) optimizer.apply_gradients(zip(gradients, layers.trainable_variables)) del tape for variable in model.variables: if variable.constraint is not None: variable.assign(variable.constraint(variable)) status = OrderedDict() mean_loss(loss) status["loss"] = mean_loss.result().numpy() for metric in metrics: metric(y_batch, y_pred) status[metric.name] = metric.result().numpy() steps.set_postfix(status) y_pred = model(X_valid) status["val_loss"] = np.mean(loss_fn(y_valid, y_pred)) status["val_accuracy"] = np.mean(keras.metrics.sparse_categorical_accuracy( tf.constant(y_valid, dtype=np.float32), y_pred)) steps.set_postfix(status) for metric in [mean_loss] + metrics: metric.reset_states()
12_custom_models_and_training_with_tensorflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:batch_effects] # language: python # name: conda-env-batch_effects-py # --- # # Main # The goal of experiment 0 is to validate the approach of simulating batch effects data and using the SVCCA similarity metric to determine the affect each batch effect has on the representation of the data. # # Experiment 0.2 uses a subset of the full 5K dimensional dataset to look for trends in the similarity versus number of batch effects. import os import ast # + # Load config file config_file = os.path.join( os.path.abspath(os.path.join(os.getcwd(),"../..")), "data", "metadata", "config_exp_0.2.txt") d = {} float_params = ["learning_rate", "kappa", "epsilon_std"] str_params = ["analysis_name", "NN_architecture"] lst_params = ["num_batches"] with open(config_file) as f: for line in f: (name, val) = line.split() if name in float_params: d[name] = float(val) elif name in str_params: d[name] = str(val) elif name in lst_params: d[name] = ast.literal_eval(val) else: d[name] = int(val) # - # Print params print("Parameters:") for name, val in d.items(): print("{} = {}".format(name, val)) # + # Training print("Training VAE using params...") # %run ./1_train_vae.ipynb # + # Simulate data print("Simulating data...") # %run ./2_simulate_data_truncated.ipynb # + # Add batch effects to simulated data print("Adding batch effects to simulated data...") # %run ./3_add_batch_effects.ipynb # + # Calculate similarity between data with different batch effects print("Calculating similarity between representations...") # %run ./4_similarity_analysis.ipynb
archive/scripts/experiment_1/run_experiment_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # name: python3 # --- # + from os.path import basename, isfile, join, splitext import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from torchvision import transforms from insightface_func.face_detect_crop_single import Face_detect_crop from models.models import create_model from options.test_options import TestOptions import os import shutil from os.path import basename, exists, isfile, join, splitext import cv2 import numpy as np import torch from tqdm import tqdm from util.videoswap import lower_resolution, extract_audio, get_frames_n, _totensor import warnings warnings.filterwarnings('ignore') from face_seg.nets.MobileNetV2_unet import MobileNetV2_unet seg_model = MobileNetV2_unet(None).to('cuda') state_dict = torch.load('face_seg/checkpoints/model.pt', map_location='cpu') seg_model.load_state_dict(state_dict) seg_model.eval(); model, app = None, None transformer_Arcface = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) opt = TestOptions() opt.initialize() opt.parser.add_argument('-f') # dummy arg to avoid bug opt = opt.parse() opt.Arc_path = './weights/arcface_checkpoint.tar' opt.isTrain = False torch.nn.Module.dump_patches = True global model model = create_model(opt) model.eval() global app app = Face_detect_crop(name='antelope', root='./insightface_func/models') app.prepare(ctx_id=0, det_thresh=0.6, det_size=(256, 256)) source = '../reference_videos/gen_0.jpg' target = '../reference_videos/stocks/man_2.mp4' # source = 'IMG_1237.JPG' # target = 'IMG_1222.MOV' result_dir='./output' crop_size=224 assert isfile(source), f'Can\'t find source at {source}' assert isfile(target), f'Can\'t find target at {target}' output_filename = f'infer-{splitext(basename(source))[0]}-{splitext(basename(target))[0]}.mp4' output_path = join(result_dir, output_filename) assert model is not None assert app is not None img_a_whole = cv2.imread(source) img_a_align_crop, _ = app.get(img_a_whole, crop_size) img_a_align_crop_pil = Image.fromarray( cv2.cvtColor(img_a_align_crop[0], cv2.COLOR_BGR2RGB)) img_a = transformer_Arcface(img_a_align_crop_pil) img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2]) img_id = img_id.cuda() img_id_downsample = F.interpolate(img_id, scale_factor=0.5) latend_id = model.netArc(img_id_downsample) latend_id = latend_id.detach().to('cpu') latend_id = latend_id / np.linalg.norm(latend_id, axis=1, keepdims=True) latend_id = latend_id.to('cuda') # + import torchvision.transforms as transforms from fsr.models.SRGAN_model import SRGANModel import easydict esrgan_fsr_transform = transforms.Compose([transforms.Resize((128, 128)), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]) args = easydict.EasyDict({ 'gpu_ids': None, 'batch_size': 32, 'lr_G': 1e-4, 'weight_decay_G': 0, 'beta1_G': 0.9, 'beta2_G': 0.99, 'lr_D': 1e-4, 'weight_decay_D': 0, 'beta1_D': 0.9, 'beta2_D': 0.99, 'lr_scheme': 'MultiStepLR', 'niter': 100000, 'warmup_iter': -1, 'lr_steps': [50000], 'lr_gamma': 0.5, 'pixel_criterion': 'l1', 'pixel_weight': 1e-2, 'feature_criterion': 'l1', 'feature_weight': 1, 'gan_type': 'ragan', 'gan_weight': 5e-3, 'D_update_ratio': 1, 'D_init_iters': 0, 'print_freq': 100, 'val_freq': 1000, 'save_freq': 10000, 'crop_size': 0.85, 'lr_size': 128, 'hr_size': 512, # network G 'which_model_G': 'RRDBNet', 'G_in_nc': 3, 'out_nc': 3, 'G_nf': 64, 'nb': 16, # network D 'which_model_D': 'discriminator_vgg_128', 'D_in_nc': 3, 'D_nf': 64, # data dir 'pretrain_model_G': 'weights/90000_G.pth', 'pretrain_model_D': None }) esrgan_fsr_model = SRGANModel(args, is_train=False) esrgan_fsr_model.load() esrgan_fsr_model.netG.to('cuda') esrgan_fsr_model.netG.eval(); # + import matplotlib.pyplot as plt from scipy.special import expit from torchvision.transforms.functional import normalize def reverse2wholeimage(swaped_imgs, mats, crop_size, oriimg, save_path=''): target_image_list = [] img_mask_list = [] for swaped_img, mat in zip(swaped_imgs, mats): print('swaped_img:'); plt.imshow(swaped_img.cpu().detach().numpy().transpose((1, 2, 0))); plt.show() ### # https://github.com/kampta/face-seg seg_mask_logits = seg_model(swaped_img.unsqueeze(0)) seg_mask = seg_mask_logits.squeeze().cpu().detach().numpy().transpose((1, 2, 0)) img_mask = np.argmax(seg_mask, axis=2) == 1 img_mask = np.array(img_mask * 255, dtype=float) # img_mask = np.full((crop_size, crop_size), 255, dtype=float) # select and fill the biggest contour contours, _ = cv2.findContours(img_mask.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) img_mask_ = np.zeros_like(img_mask) cv2.drawContours(img_mask_, [max(contours, key = cv2.contourArea)], 0, 255, -1) img_mask = np.array(img_mask_) # SR-ESRGAN_fsr https://github.com/ewrfcas/Face-Super-Resolution swaped_img = esrgan_fsr_transform(torch.clone(swaped_img)) swaped_img = esrgan_fsr_model.netG(swaped_img.unsqueeze(0)) swaped_img = swaped_img.squeeze(0).cpu().detach().numpy().transpose((1, 2, 0)) swaped_img = np.clip(swaped_img / 2.0 + 0.5, 0, 1) # cv2.imwrite(splitext(save_path)[0] + '_' + splitext(save_path)[1], cv2.cvtColor(swaped_img * 255, cv2.COLOR_BGR2RGB)) # revert transformation mat_rev = cv2.invertAffineTransform(mat) mat_rev_face = np.array(mat_rev) mat_rev_face[:2, :2] = mat_rev_face[:2, :2] / (swaped_img.shape[0] / crop_size) orisize = (oriimg.shape[1], oriimg.shape[0]) target_image = cv2.warpAffine(swaped_img, mat_rev_face, orisize) target_image = np.array(target_image, dtype=np.float64)[..., ::-1] * 255 # print('target_image:'); plt.imshow(target_image); plt.show() ### # print(target_image.shape, target_image.min(), target_image.max()) ### print('face segmentation:') sigm_ = expit(seg_mask[:, :, 1]) fig, axs = plt.subplots(1, 4, figsize=(30, 30)) axs.flat[0].imshow(sigm_); axs.flat[0].set_xlabel('as is') sigm_[sigm_ < 0.5] = 0; axs.flat[1].imshow(sigm_); axs.flat[1].set_xlabel('>= 0.5') sigm_[sigm_ < 0.75] = 0; axs.flat[2].imshow(sigm_); axs.flat[2].set_xlabel('>= 0.75') sigm_[sigm_ < 0.9] = 0; axs.flat[3].imshow(sigm_); axs.flat[3].set_xlabel('>= 0.9') plt.show() ### ### print('hair segmentation:') sigm_ = expit(seg_mask[:, :, 2]) fig, axs = plt.subplots(1, 4, figsize=(30, 30)) axs.flat[0].imshow(sigm_); axs.flat[0].set_xlabel('as is') sigm_[sigm_ < 0.5] = 0; axs.flat[1].imshow(sigm_); axs.flat[1].set_xlabel('>= 0.5') sigm_[sigm_ < 0.75] = 0; axs.flat[2].imshow(sigm_); axs.flat[2].set_xlabel('>= 0.75') sigm_[sigm_ < 0.9] = 0; axs.flat[3].imshow(sigm_); axs.flat[3].set_xlabel('>= 0.9') plt.show() ### # print('img_mask:'); plt.imshow(img_mask); plt.show() ### # print(img_mask.shape, img_mask.min(), img_mask.max()) ### print('median blurring:') fig, axs = plt.subplots(1, 6, figsize=(30, 30)) axs.flat[0].imshow(img_mask); axs.flat[0].set_xlabel('as is') axs.flat[1].imshow(cv2.medianBlur(img_mask.astype(np.uint8), 3)); axs.flat[1].set_xlabel('kernel 3') axs.flat[2].imshow(cv2.medianBlur(img_mask.astype(np.uint8), 7)); axs.flat[2].set_xlabel('kernel 7') axs.flat[3].imshow(cv2.medianBlur(img_mask.astype(np.uint8), 11)); axs.flat[3].set_xlabel('kernel 11') axs.flat[4].imshow(cv2.medianBlur(img_mask.astype(np.uint8), 15)); axs.flat[4].set_xlabel('kernel 15') axs.flat[5].imshow(cv2.medianBlur(img_mask.astype(np.uint8), 21)); axs.flat[5].set_xlabel('kernel 21') plt.show() ### ### print('dilating after median blurring:') kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) # np.ones((10, 10), np.uint8) fig, axs = plt.subplots(1, 6, figsize=(30, 30)) axs.flat[0].imshow((cv2.dilate(img_mask, kernel, iterations=3) / 255)); axs.flat[0].set_xlabel('as is') axs.flat[1].imshow((cv2.dilate(cv2.medianBlur(img_mask.astype(np.uint8), 3), kernel, iterations=3) / 255)); axs.flat[1].set_xlabel('kernel 3') axs.flat[2].imshow((cv2.dilate(cv2.medianBlur(img_mask.astype(np.uint8), 7), kernel, iterations=3) / 255)); axs.flat[2].set_xlabel('kernel 7') axs.flat[3].imshow((cv2.dilate(cv2.medianBlur(img_mask.astype(np.uint8), 11), kernel, iterations=3) / 255)); axs.flat[3].set_xlabel('kernel 11') axs.flat[4].imshow((cv2.dilate(cv2.medianBlur(img_mask.astype(np.uint8), 15), kernel, iterations=3) / 255)); axs.flat[4].set_xlabel('kernel 15') axs.flat[5].imshow((cv2.dilate(cv2.medianBlur(img_mask.astype(np.uint8), 21), kernel, iterations=3) / 255)); axs.flat[5].set_xlabel('kernel 21') plt.show() ### ### from skimage.exposure import rescale_intensity print('smoothing edges:') fig, axs = plt.subplots(2, 7, figsize=(40, 10)) axs[0][0].imshow(img_mask); axs[0][0].set_xlabel('as is') blur = cv2.GaussianBlur(img_mask, (3, 3), 0, 0) axs[0][1].imshow(blur); axs[0][1].set_xlabel('blur 3') axs[1][1].imshow(rescale_intensity(blur, in_range=(127.5,255), out_range=(0,255))); axs[1][1].set_xlabel('smooth') blur = cv2.GaussianBlur(img_mask, (7, 7), 0, 0) axs[0][2].imshow(blur); axs[0][2].set_xlabel('blur 7') axs[1][2].imshow(rescale_intensity(blur, in_range=(127.5,255), out_range=(0,255))); axs[1][2].set_xlabel('smooth') blur = cv2.GaussianBlur(img_mask, (11, 11), 0, 0) axs[0][3].imshow(blur); axs[0][3].set_xlabel('blur 11') axs[1][3].imshow(rescale_intensity(blur, in_range=(127.5,255), out_range=(0,255))); axs[1][3].set_xlabel('smooth') blur = cv2.GaussianBlur(img_mask, (15, 15), 0, 0) axs[0][4].imshow(blur); axs[0][4].set_xlabel('blur 15') axs[1][4].imshow(rescale_intensity(blur, in_range=(127.5,255), out_range=(0,255))); axs[1][4].set_xlabel('smooth') blur = cv2.GaussianBlur(img_mask, (21, 21), 0, 0) axs[0][5].imshow(blur); axs[0][5].set_xlabel('blur 21') axs[1][5].imshow(rescale_intensity(blur, in_range=(127.5,255), out_range=(0,255))); axs[1][5].set_xlabel('smooth') blur = cv2.GaussianBlur(img_mask, (35, 35), 0, 0) axs[0][6].imshow(blur); axs[0][6].set_xlabel('blur 35') axs[1][6].imshow(rescale_intensity(blur, in_range=(127.5,255), out_range=(0,255))); axs[1][6].set_xlabel('smooth') plt.show() ### # img_mask = cv2.medianBlur(img_mask.astype(np.uint8), 15) # blur = cv2.GaussianBlur(img_mask, (35, 35), 0, 0) # img_mask = rescale_intensity(blur, in_range=(127.5,255), out_range=(0,255)) ### fig, axs = plt.subplots(2, 6, figsize=(40, 10)) kernel = np.ones((10, 10), np.uint8) img_mask_ = cv2.erode(img_mask, kernel, iterations=1) / 255 img_mask_ = cv2.warpAffine(img_mask_, mat_rev, orisize) img_mask_ = np.reshape(img_mask_, [img_mask_.shape[0], img_mask_.shape[1], 1]) img_ = img_mask_ * target_image + (1-img_mask_) * np.array(oriimg, dtype=np.float64) axs[0][0].imshow(img_mask_); axs[0][0].set_xlabel('mask as is') axs[1][0].imshow(cv2.cvtColor(img_.astype(np.uint8), cv2.COLOR_BGR2RGB)[500:1500]); axs[1][0].set_xlabel('result as is') kernel = np.ones((10, 10), np.uint8) img_mask_ = cv2.dilate(img_mask, kernel, iterations=1) / 255 img_mask_ = cv2.warpAffine(img_mask_, mat_rev, orisize) img_mask_ = np.reshape(img_mask_, [img_mask_.shape[0], img_mask_.shape[1], 1]) img_ = img_mask_ * target_image + (1-img_mask_) * np.array(oriimg, dtype=np.float64) axs[0][1].imshow(img_mask_); axs[0][1].set_xlabel('mask - dilate 10,1') axs[1][1].imshow(cv2.cvtColor(img_.astype(np.uint8), cv2.COLOR_BGR2RGB)[500:1500]); axs[1][1].set_xlabel('result - dilate 10,1') kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) img_mask_ = cv2.dilate(img_mask, kernel, iterations=1) / 255 img_mask_ = cv2.warpAffine(img_mask_, mat_rev, orisize) img_mask_ = np.reshape(img_mask_, [img_mask_.shape[0], img_mask_.shape[1], 1]) img_ = img_mask_ * target_image + (1-img_mask_) * np.array(oriimg, dtype=np.float64) axs[0][2].imshow(img_mask_); axs[0][2].set_xlabel('mask - EL + dilate 10,1') axs[1][2].imshow(cv2.cvtColor(img_.astype(np.uint8), cv2.COLOR_BGR2RGB)[500:1500]); axs[1][2].set_xlabel('result - EL + dilate 10,1') kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) img_mask_ = cv2.medianBlur(img_mask.astype(np.uint8), 15) img_mask_ = cv2.dilate(img_mask_, kernel, iterations=1) / 255 img_mask_ = cv2.warpAffine(img_mask_, mat_rev, orisize) img_mask_ = np.reshape(img_mask_, [img_mask_.shape[0], img_mask_.shape[1], 1]) img_ = img_mask_ * target_image + (1-img_mask_) * np.array(oriimg, dtype=np.float64) axs[0][3].imshow(img_mask_); axs[0][3].set_xlabel('mask - EL + MB + dilate 10,1') axs[1][3].imshow(cv2.cvtColor(img_.astype(np.uint8), cv2.COLOR_BGR2RGB)[500:1500]); axs[1][3].set_xlabel('result - EL + MB + dilate 10,1') kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) img_mask_ = cv2.medianBlur(img_mask.astype(np.uint8), 15) img_mask_ = cv2.GaussianBlur(img_mask_, (35, 35), 0, 0) img_mask_ = rescale_intensity(img_mask_, in_range=(127.5,255), out_range=(0,255)) img_mask_ = cv2.dilate(img_mask_, kernel, iterations=1) / 255 print(img_mask_.sum()) img_mask_ = cv2.warpAffine(img_mask_, mat_rev, orisize) img_mask_ = np.reshape(img_mask_, [img_mask_.shape[0], img_mask_.shape[1], 1]) img_ = img_mask_ * target_image + (1-img_mask_) * np.array(oriimg, dtype=np.float64) axs[0][4].imshow(img_mask_); axs[0][4].set_xlabel('mask - EL + MB + SM + dilate 10,1') axs[1][4].imshow(cv2.cvtColor(img_.astype(np.uint8), cv2.COLOR_BGR2RGB)[500:1500]); axs[1][4].set_xlabel('result - EL + MB + SM + dilate 10,1') kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) img_mask_ = cv2.medianBlur(img_mask.astype(np.uint8), 15) img_mask_ = cv2.dilate(img_mask_, kernel, iterations=1) img_mask_ = cv2.GaussianBlur(img_mask_, (35, 35), 0, 0) img_mask_ = rescale_intensity(img_mask_, in_range=(127.5,255), out_range=(0,255)) print(img_mask_.sum() / 255) img_mask_ = cv2.warpAffine(img_mask_ / 255, mat_rev, orisize) img_mask_ = np.reshape(img_mask_, [img_mask_.shape[0], img_mask_.shape[1], 1]) img_ = img_mask_ * target_image + (1-img_mask_) * np.array(oriimg, dtype=np.float64) axs[0][5].imshow(img_mask_); axs[0][5].set_xlabel('mask - EL + MB + dilate 10,1 + SM') axs[1][5].imshow(cv2.cvtColor(img_.astype(np.uint8), cv2.COLOR_BGR2RGB)[500:1500]); axs[1][5].set_xlabel('result - EL + MB + dilate 10,1 + SM') plt.show() ### # img_mask = cv2.medianBlur(img_mask.astype(np.uint8), 15) # blur = cv2.GaussianBlur(img_mask, (35, 35), 0, 0) # img_mask = rescale_intensity(blur, in_range=(127.5,255), out_range=(0,255)) kernel = np.ones((10, 10), np.uint8) # cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) # np.ones((10, 10), np.uint8) img_mask = cv2.erode(img_mask, kernel, iterations=1) / 255 print('img_mask:'); plt.imshow(img_mask); plt.show() ### print(img_mask.shape, img_mask.min(), img_mask.max()) img_mask = cv2.warpAffine(img_mask, mat_rev, orisize) img_mask = np.reshape(img_mask, [img_mask.shape[0], img_mask.shape[1], 1]) # img_mask[target_image[:, :, 0] == 0] = 0 # print('img_mask:'); plt.imshow(img_mask); plt.show() ### # print(img_mask.min(), img_mask.max()) img_mask_list.append(img_mask) target_image_list.append(target_image) img = np.array(oriimg, dtype=np.float64) for img_mask, target_image in zip(img_mask_list, target_image_list): img = img_mask * target_image + (1-img_mask) * img final_img = img.astype(np.uint8) print('final_img-RGB:'); plt.imshow(cv2.cvtColor(final_img, cv2.COLOR_BGR2RGB)); plt.show() ### cv2.imwrite(save_path, final_img) video_path = target temp_results_dir='./temp_results' swap_model = model detect_model = app id_veсtor = latend_id lower_resolution(video_path) print(f'=> Swapping face in "{video_path}"...') if exists(temp_results_dir): shutil.rmtree(temp_results_dir) os.makedirs(temp_results_dir) audio_path = join(temp_results_dir, splitext(basename(video_path))[0] + '.wav') extract_audio(video_path, audio_path) frame_count = get_frames_n(video_path) video = cv2.VideoCapture(video_path) fps = video.get(cv2.CAP_PROP_FPS) for i, frame_index in tqdm(enumerate(range(frame_count))): if i != 0: continue _, frame = video.read() detect_results = detect_model.get(frame, crop_size) if detect_results is not None: frame_align_crop_list = detect_results[0] frame_mat_list = detect_results[1] swap_result_list = [] for frame_align_crop in frame_align_crop_list: frame_align_crop_tensor = _totensor(cv2.cvtColor(frame_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda() swap_result = swap_model(None, frame_align_crop_tensor, id_veсtor, None, True)[0] swap_result_list.append(swap_result) reverse2wholeimage(swap_result_list, frame_mat_list, crop_size, frame, join(temp_results_dir, 'frame_{:0>7d}.jpg'.format(frame_index))) else: frame = frame.astype(np.uint8) cv2.imwrite(join(temp_results_dir, 'frame_{:0>7d}.jpg'.format(frame_index)), frame) # -
exp_5-square_removal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from matplotlib import style style.use('fivethirtyeight') import matplotlib.pyplot as plt import numpy as np import pandas as pd import datetime as dt # # Reflect Tables into SQLAlchemy ORM # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func # create engine to hawaii.sqlite engine = create_engine("sqlite:///Resources/hawaii.sqlite") # + # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect = True) # + # View all of the classes that automap found Base.classes.keys() # + # Save references to each table measurement = Base.classes.measurement station = Base.classes.station # + # Create our session (link) from Python to the DB session = Session(bind=engine) # - # # Exploratory Precipitation Analysis # + # Find the most recent date in the data set. engine.execute('SELECT max(date) FROM measurement').fetchall() # - # + # Design a query to retrieve the last 12 months of precipitation data and plot the results. # Starting from the most recent data point in the database. # Calculate the date one year from the last date in data set. # Perform a query to retrieve the data and precipitation scores precipitation_data = session.query(measurement.date, measurement.prcp).filter(measurement.date >= '2016-08-23').filter(measurement.date <= '2017-08-23').all() # Save the query results as a Pandas DataFrame and set the index to the date colum Predf = pd.DataFrame(precipitation_data) # Sort the dataframe by date Predf = Predf.set_index('date') # Use Pandas Plotting with Matplotlib to plot the data Predf.plot(rot=45) plt.savefig('Precipitation graph.png') plt.show() # According to the graph; it seems that the precipitaiton has a linear-ish groth # - # Use Pandas to calcualte the summary statistics for the precipitation data Predf.describe() # # Exploratory Station Analysis # + # Design a query to calculate the total number stations in the dataset station_total = session.query(station.id).count() station_total # So we can see that there are 9 stations in the dataset # + # Design a query to find the most active stations (i.e. what stations have the most rows?) # List the stations and the counts in descending order. active_stations = session.query(measurement.station, func.count(measurement.station)).filter(measurement.station == station.station).group_by(measurement.station).order_by(func.count(measurement.id).desc()).all() active_stations # So the first station showed it's the most active one # + # Using the most active station id from the previous query, calculate the lowest, highest, and average temperature. most_actives = 'USC00519281' result = session.query(func.min(measurement.tobs), func.max(measurement.tobs), func.avg(measurement.tobs)).filter(measurement.station == most_actives).all() lowest_temp = result[0][0] highest_temp =result[0][1] average_temp = result[0][2] print(f'The lowest temperature is {lowest_temp}') print(f'The highest temperature is {highest_temp}') print(f'The average temperature is {average_temp}') # + # Using the most active station id # Query the last 12 months of temperature observation data for this station and plot the results as a histogram active_id = active_stations[0][0] query_months = session.query(measurement.date, measurement.tobs).filter(measurement.date >= '2016-08-23').filter(measurement.date <= '2017-08-23').filter(measurement.station == active_id).all() temperature1_df = pd.DataFrame(query_months) temperature1_df = temperature1_df.set_index('date').sort_index(ascending=True) temperature1_df.plot.hist(bins=12) # - # # Close session # Close Session session.close()
climate_starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="6vddxASQcwOx" # # Functions # # Functions are blocks of code that can be used over and over again to perform a specific action. # # As we know, in Python, there are print (), len () etc. Many available functions are defined. # # We can use it in our own code by providing access to functions defined in libraries, modules and packages. These are called predefined functions, embedded functions (built-in) or library functions. We can use ready-made functions as well as create our own functions. (User-defined) # # # Functions prevent code repetition and our code stays more modular and organized. # # # ***** # # def "function_name"(parameter1,parameter2,..): # # # > "Do something" # # return "return something" (depends on functionality) # # # # + colab={} colab_type="code" id="xmv4r_jCboXC" def hello(): print("Hello Everyone!!") # + colab={} colab_type="code" id="9WyymUQqgSBQ" hello() #calling the func #the functions don't have any parameters # + colab={} colab_type="code" id="0mNT-X0DoP7y" def hello(name): print("Hello " + name) # + colab={} colab_type="code" id="DxZKzFIuoW08" hello("Asli") # - def func_in_func(name1): return hello(name1) func_in_func("Ugurcan") # + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" id="3AoI1i0igZC7" outputId="96d5c07a-fb7e-4965-b107-d7e3419b4e3d" def func1(): print("Hello World!!") func1() print("Google") func1() func1() func1() func1() # + colab={} colab_type="code" id="iiSPTWt3hJHo" def summ(a,b): summ = a + b print(summ) # + colab={} colab_type="code" id="WNgM1TR4haRG" summ(6.0,7.5) # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="MfnQx9FFhd5H" outputId="ef0339f9-f195-40fb-cad8-a3c231726c26" t = summ(8,9) t # + colab={} colab_type="code" id="NiymGvuihkZ2" def func(x,y): summ = x + y multip = x * y return (summ,multip) #t = summ #c = multip # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="SbtW4TLdh7Pp" outputId="b2dd6bb7-6769-4d82-96ee-d622de955917" t,c = func(23,45) print(t,c) # - func(23,45) # + colab={} colab_type="code" id="Nca_Rpf9iC-x" print("Sum of the values: " + str(t) + ", Multiplying of the values: " + str(c)) # + colab={} colab_type="code" id="5IS0ARTJiPY1" #Let's write a function that it will square the entered number, but will be terminated when you enter the number 5 and give us an error message. def sqr(x): if x == 5: return ("Terminated because you entered 5") result = x **2 return (result) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="w0Am5qkekKNH" outputId="3a1be730-2c76-47b4-a44f-87f746392359" sqr(10) # + colab={} colab_type="code" id="-A1MrkLDkMju" sqr(5) # + colab={} colab_type="code" id="gsu2kbuJnKmx" d = sqr(5) print(d) # + colab={} colab_type="code" id="ewtG6xuRkQsy" # Let's write a function that tells you whether the entered number is positive, negative or zero. def func(x): if x > 0: return ("Positive") elif x < 0: return ("Negative") else: return ("Zero") # + colab={} colab_type="code" id="UQ3T_8UTlJ8u" for i in [-2,5,6,0,-4,-7]: print(func(i)) # + colab={} colab_type="code" id="_gih-r9xlZBr" #factorial calculation #0! = 1 #1!= 1 #2!= 2 * 1 =2 #6! = 6 * 5* 4 *3 * 2 *1 = 720 def factorial(num): factorial = 1 if (num == 0 or num == 1): print("Factorial: ", factorial) else: while (num >= 1): factorial = factorial * num num -= 1 print("Factorial: ", factorial) # 1 * 5 = 5 = factorial # 5 * 4 = 20 # 20 * 3 = 60 #60 * 2 = 120 # 120 * 1 = 120 # + colab={} colab_type="code" id="T5ykiNt_yeck" factorial(5) # - def faktoriyel(sayi): faktoriyel = 1 for i in range(1,sayi+1): faktoriyel *= i return faktoriyel faktoriyel(5) # + colab={} colab_type="code" id="OQBMccOwyhl9" #using for loop def factorial2(num2): factorial2 = 1 if (num2 == 0 or num2 == 1): print("Factorial: ", factorial2) else: for i in range(factorial2, num2+1): factorial2 *= i print("Factorial: ", factorial2) # + colab={} colab_type="code" id="3e4oPm5yy3Co" factorial2(6) # + colab={} colab_type="code" id="blFFawxml7wW" def factorial3(nums): factorial3 = 1 if (nums == 0 or nums == 1): return ("Factorial: ", factorial3) else: for i in range(factorial3, nums+1): factorial3 *= i return (factorial3) # + colab={} colab_type="code" id="HVWS7rb9mAIf" x = factorial3(6) print(x) # - x # + colab={} colab_type="code" id="HIUms-AlzQu9" def hello2(name, capLetter = False): if capLetter: print("Hello " + name.upper()) else: print("Hello " + name) # + colab={} colab_type="code" id="DkHm9Vtw0O4M" hello2("asli") # + colab={} colab_type="code" id="dYyDrfTn0TOS" hello2("Asli", capLetter= True) # - #lambda function (lambda x: x + 1)(2) full_name = lambda first, last: f'Full name: {first.title()} {last.title()}' full_name('guido', '<NAME>') # ### * args and ** kwargs # * args (Non Keyword Arguments) # * kwargs (Keyword Arguments) # + colab={} colab_type="code" id="AzJDy8xD1Rbm" def multp(*args): result = 1 for i in args: result *= i print(result) # *args keeps the data as tuple type. # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="crk-VWf912z_" outputId="251acfa4-5dcc-44fc-ff37-89e125cab82e" multp(4,5,6,7,8,9) # + def multp1(*args): result = 2 for i in args: result *= i # result = result * i print(result) # *args keeps the data as tuple type. # - multp1([4,5,6,7]) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="sp9bJPmu1-lF" outputId="0aab548b-3a1f-4c19-ddfe-8287e43ee2f6" multp1(2,3,4,5) # - [4,5,6] * 3 # + def func_kwargs(**kwargs): print(kwargs) func_kwargs(name = "Murat", name2 = "Ömer", number=12345, can='Emir', beril='yılmaz') # + colab={} colab_type="code" id="M_gEN02J7Z6P" def salaryCalc(salary): if salary < 0: return("Invalid value") else: if 0 < salary <= 1000: salary = salary + salary * 0.15 elif salary <= 2000: salary = salary + salary * 0.1 elif salary <= 3000: salary = salary + salary * 0.05 else: salary = salary + salary * 0.025 return ("New salary: ", salary) # + colab={} colab_type="code" id="Tp8kE5iH7-_x" salaryCalc(-5) # - salaryCalc(800) # + colab={} colab_type="code" id="I9BT87fl-Kgr" def salaryCalc2(): salary = float(input("Please enter your current salary: ")) if salary < 0: return("Invalid value") else: if 0 < salary <= 1000: salary = salary + salary * 0.15 elif salary <= 2000: salary = salary + salary * 0.1 elif salary <= 3000: salary = salary + salary * 0.05 else: salary = salary + salary * 0.025 return ("New salary: ", salary) # + colab={} colab_type="code" id="bF1ty6Cc8U_X" new_salary = salaryCalc2() print(new_salary) # + [markdown] colab_type="text" id="6sOO_4CbBygM" # ### Let's write a function that returns a random word from a list. # # ### Modules # # import numpy # # import tensorflow as tf # # import myModules # # myModules.myFunc() # # from myModules import * # # myFunc() # + colab={} colab_type="code" id="fyf2VtDZ3ALS" words = ["artificial","intelligence","machine","learning","python","programming"] #from random import * import random as rnd def randomWord(words): index = rnd.randint(0, len(words)-1) return words[index] # - len(words) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="vdt230OzC_DR" outputId="d86a979d-f371-44b4-9769-3b074686680c" word = randomWord(words) print(word) # + [markdown] colab_type="text" id="MfFOsrA6AfmC" # ### Global & Local Variables # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="7_UD1zAn_lxX" outputId="943b58cd-497e-4cec-962f-37084451f85a" x = 5 print(x) # + colab={} colab_type="code" id="IisEjgqpAqCi" def display(): x = 4 return(x) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="6dhEuYDjAylB" outputId="3d88f156-bdd1-411b-9fcb-93715c02d292" display() # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="dPUxfagJA7Gn" outputId="ba6bf99f-ba77-4498-b6f5-fe4c2328d920" print(x) # + [markdown] colab_type="text" id="WJreHgrGllCV" # ## Methods # # Functions are called by name, it can take parameters inside and optionally the resulting value can be used outside of the function. # # # Methods are also called by name, in many ways they are like functions, but calling is performed through an object such as a String or list. # # # object.methodName(parameter) # + colab={} colab_type="code" id="G56qfrjqDEDP" s = input("Please enter a name: ") print(s.upper()) # + #it does not return any value list1 = [1,2,3,4,5,6] list1.remove(4) list1 # - list1 list1.index(6) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="7DzoBNeApbzZ" outputId="1574c1d7-156b-4650-f3b6-8a2b22b98eed" #return the index of the element with the highest value in a given list. myList = [45,7,23,6,12,78] maxElement = max(myList) maxIndex = myList.index(maxElement) print(maxIndex) # - # # Exceptions # # * Programmer Errors # * Program Bugs # * Exceptions # + # error example,SyntaxError. print "Hello World!" # + #bug example. num1 = input("Enter the first integer: ") num2 = input("Enter the second integer: ") print(num1, "+", num2, "=", num1 + num2) # + #exception example, ValueError. num3 = int(input("First integer: ")) num4 = int(input("Second integer: ")) print(num3, "/",num4, "=", num3/num4) # + # ZeroDivisionError. num3 = int(input("First integer: ")) num4 = int(input("Second integer: ")) print(num3, "/",num4, "=", num3/num4) # - # ## Exception Handling # # # try: # # # > the situations where we can get exceptions # # except "Exception Name": # # # > the operations in case of exceptions # # # # # + x = "<NAME>" int(x) # + try: int(x) except ValueError: print("Please enter an integer value!!!") # + num3 = input("First integer: ") num4 = input("Second integer: ") try: num3_int = int(num3) num4_int = int(num4) print(num3_int, "/",num4_int, "=", num3_int/num4_int) except ValueError: print("Please enter an integer value!!!") # + num3 = input("First integer: ") num4 = input("Second integer: ") try: num3_int = int(num3) num4_int = int(num4) print(num3_int, "/",num4_int, "=", num3_int/num4_int) except ZeroDivisionError: print("Please enter the second input different than 0 value!!!") ebru = int(input("Enter a integer number")) ebru # + num3 = input("First integer: ") num4 = input("Second integer: ") try: num3_int = int(num3) num4_int = int(num4) print(num3_int, "/",num4_int, "=", num3_int/num4_int) except ValueError: print("Please enter an integer value!!!") except ZeroDivisionError: print("Please enter the second input different than 0 value!!!") except: print("Unknown error...") # + num3 = input("First integer: ") num4 = input("Second integer: ") try: num3_int = int(num3) num4_int = int(num4) print(num3_int, "/",num4_int, "=", num3_int/num4_int) except (ValueError, ZeroDivisionError): print("Error!!!") # + #try/except/as num3 = input("First integer: ") num4 = input("Second integer: ") try: num3_int = int(num3) num4_int = int(num4) print(num3_int, "/",num4_int, "=", num3_int/num4_int) except ValueError as error: print("Error!!!") print("Error message: ", error) # + #exception handling in loop structure while True: num1 = input("First number: (Press q for quit the program): ") if num1 == "q": break num2 = input("Second number: ") try: num1_int = int(num1) num2_int = int(num2) print(num1_int, "/", num2_int, "=", num1_int / num2_int) except (ValueError, ZeroDivisionError): print("Error!") print("Please try again!") # + """ exception handling in functions using raise command """ def reverse(s): if (type(s) != str): raise ValueError("Please enter a String type.") else: return s[::-1] # - reverse("python") reverse(12)
4.Python-Programming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="6oXxxX9LZL-h" # # Dirichlet Process Mixture Models in Pyro # # # ## What are Bayesian nonparametric models? # Bayesian nonparametric models are models where the number of parameters grow freely with the amount of data provided; thus, instead of training several models that vary in complexity and comparing them, one is able to design a model whose complexity grows as more data are observed. The prototypical example of Bayesian nonparametrics in practice is the *Dirichlet Process Mixture Model* (DPMM). A DPMM allows for a practitioner to build a mixture model when the number of distinct clusters in the geometric structure of their data is unknown – in other words, the number of clusters is allowed to grow as more data is observed. This feature makes the DPMM highly useful towards exploratory data analysis, where few facets of the data in question are known; this presentation aims to demonstrate this fact. # # ## The Dirichlet Process (Ferguson, 1973) # Dirichlet processes are a family of probability distributions over discrete probability distributions. Formally, the Dirichlet process (DP) is specified by some base probability distribution $G_0: \Omega \to \mathbb{R}$ and a positive, real, scaling parameter commonly denoted as $\alpha$. A sample $G$ from a Dirichlet process with parameters $G_0: \Omega \to \mathbb{R}$ and $\alpha$ is itself a distribution over $\Omega$. For any disjoint partition $\Omega_1, ..., \Omega_k$ of $\Omega$, and any sample $G \sim DP(G_0, \alpha)$, we have: # # $$(G(\Omega_1), ..., G(\Omega_k)) \sim \text{Dir}(\alpha G_0(\Omega_1), ..., \alpha G_0(\Omega_k))$$ # # Essentially, this is taking a discrete partition of our sample space $\Omega$ and subsequently constructing a discrete distribution over it using the base distribution $G_0$. While quite abstract in formulation, the Dirichlet process is very useful as a prior in various graphical models. This fact becomes easier to see in the following scheme. # # ## The Chinese Restaurant Process (Aldous, 1985) # # Imagine a restaurant with infinite tables (indexed by the positive integers) that accepts customers one at a time. The $n$th customer chooses their seat according to the following probabilities: # # * With probability $\frac{n_t}{\alpha + n - 1}$, sit at table $t$, where $n_t$ is the number of people at table $t$ # * With probability $\frac{\alpha}{\alpha + n - 1}$, sit at an empty table # # If we associate to each table $t$ a draw from a base distribution $G_0$ over $\Omega$, and then associate unnormalized probability mass $n_t$ to that draw, the resulting distribution over $\Omega$ is equivalent to a draw from a Dirichlet process $DP(G_0, \alpha)$. # # Furthermore, we can easily extend this to define the generative process of a nonparametric mixture model: every table $t$ that has at least one customer seated is associated with a set of cluster parameters $\theta_t$, which were themselves drawn from some base distribution $G_0$. For each new observation, first assign that observation to a table according to the above probabilities; then, that observation is drawn from the distribution parameterized by the cluster parameters for that table. If the observation was assigned to a new table, draw a new set of cluster parameters from $G_0$, and then draw the observation from the distribution parameterized by those cluster parameters. # # While this formulation of a Dirichlet process mixture model is intuitive, it is also very difficult to perform inference on in a probabilistic programming framework. This motivates an alternative formulation of DPMMs, which has empirically been shown to be more conducive to inference (e.g. Blei and Jordan, 2004). # # ## The Stick-Breaking Method (Sethuraman, 1994) # # The generative process for the stick-breaking formulation of DPMMs proceeds as follows: # # * Draw $\beta_i \sim \text{Beta}(1, \alpha)$ for $i \in \mathbb{N}$ # * Draw $\theta_i \sim G_0$ for $i \in \mathbb{N}$ # * Construct the mixture weights $\pi$ by taking $\pi_i(\beta_{1:\infty}) = \beta_i \prod_{j<i} (1-\beta_j)$ # # * For each observation $n \in \{1, ..., N\}$, draw $z_n \sim \pi(\beta_{1:\infty})$, and then draw $x_n \sim f(\theta_{z_n})$ # # Here, the infinite nature of the Dirichlet process mixture model can more easily be seen. Furthermore, all $\beta_i$ are independent, so it is far easier to perform inference in a probabilistic programming framework. # # First, we import all the modules we're going to need: # + colab={"base_uri": "https://localhost:8080/", "height": 400} colab_type="code" id="VaAgD92IdE6i" outputId="47dd309c-4689-403e-900b-413381c4fedc" import matplotlib.pyplot as plt import numpy as np import pandas as pd from tqdm import tqdm import torch import torch.nn.functional as F from torch.distributions import constraints import pyro from pyro.distributions import * from pyro.infer import Predictive, SVI, Trace_ELBO from pyro.optim import Adam assert pyro.__version__.startswith('1.5.2') pyro.set_rng_seed(0) # + [markdown] colab_type="text" id="GomHVM-cBrQY" # # # ## **Inference** # # # # # ### Synthetic Mixture of Gaussians # # We begin by demonstrating the capabilities of Dirichlet process mixture models on a synthetic dataset generated by a mixture of four 2D Gaussians: # + colab={"base_uri": "https://localhost:8080/", "height": 281} colab_type="code" id="3_EyZAbwTMrs" outputId="f5c2222c-c3db-4a34-f021-75f38bbcb4c2" data = torch.cat((MultivariateNormal(-8 * torch.ones(2), torch.eye(2)).sample([50]), MultivariateNormal(8 * torch.ones(2), torch.eye(2)).sample([50]), MultivariateNormal(torch.tensor([1.5, 2]), torch.eye(2)).sample([50]), MultivariateNormal(torch.tensor([-0.5, 1]), torch.eye(2)).sample([50]))) plt.scatter(data[:, 0], data[:, 1]) plt.title("Data Samples from Mixture of 4 Gaussians") plt.show() N = data.shape[0] # + [markdown] colab_type="text" id="CglLQke4gEYd" # In this example, the cluster parameters $\theta_i$ are two dimensional vectors describing the means of a multivariate Gaussian with identity covariance. Therefore, the Dirichlet process base distribution $G_0$ is also a multivariate Gaussian (i.e. the conjugate prior), although this choice is not as computationally useful, since we are not performing coordinate-ascent variational inference but rather black-box variational inference using Pyro. # # First, let's define the "stick-breaking" function that generates our weights, given our samples of $\beta$: # + colab={} colab_type="code" id="2ngrqFlDQYpV" def mix_weights(beta): beta1m_cumprod = (1 - beta).cumprod(-1) return F.pad(beta, (0, 1), value=1) * F.pad(beta1m_cumprod, (1, 0), value=1) # + [markdown] colab_type="text" id="LAC0bWL6Qcc3" # Next, let's define our model. It may be helpful to refer the definition of the stick-breaking model presented in the first part of this tutorial. # # Note that all $\beta_i$ samples are conditionally independent, so we model them using a `pyro.plate` of size `T-1`; we do the same for all samples of our cluster parameters $\mu_i$. We then construct a Categorical distribution whose parameters are the mixture weights using our sampled $\beta$ values (line 9) below, and sample the cluster assignment $z_n$ for each data point from that Categorical. Finally, we sample our observations from a multivariate Gaussian distribution whose mean is exactly the cluster parameter corresponding to the assignment $z_n$ we drew for the point $x_n$. This can be seen in the Pyro code below: # + colab={} colab_type="code" id="WfnbSIocRlvQ" def model(data): with pyro.plate("beta_plate", T-1): beta = pyro.sample("beta", Beta(1, alpha)) with pyro.plate("mu_plate", T): mu = pyro.sample("mu", MultivariateNormal(torch.zeros(2), 5 * torch.eye(2))) with pyro.plate("data", N): z = pyro.sample("z", Categorical(mix_weights(beta))) pyro.sample("obs", MultivariateNormal(mu[z], torch.eye(2)), obs=data) # + [markdown] colab_type="text" id="1gBQj5RKRn8Z" # Now, it's time to define our guide and perform inference. # # The variational family $q(\beta, \theta, z)$ that we are optimizing over during variational inference is given by: # # $$q(\beta, \theta, z) = \prod_{t=1}^{T-1} q_t(\beta_t) \prod_{t=1}^T q_t(\theta_t) \prod_{n=1}^N q_n(z_n)$$ # # Note that since we are unable to computationally model the infinite clusters posited by the model, we truncate our variational family at $T$ clusters. This does not affect our model; rather, it is a simplification made in the *inference* stage to allow tractability. # # The guide is constructed exactly according to the definition of our variational family $q(\beta, \theta, z)$ above. We have $T-1$ conditionally independent Beta distributions for each $\beta$ sampled in our model, $T$ conditionally independent multivariate Gaussians for each cluster parameter $\mu_i$, and $N$ conditionally independent Categorical distributions for each cluster assignment $z_n$. # # Our variational parameters (`pyro.param`) are therefore the $T-1$ many positive scalars that parameterize the second parameter of our variational Beta distributions (the first shape parameter is fixed at $1$, as in the model definition), the $T$ many two-dimensional vectors that parameterize our variational multivariate Gaussian distributions (we do not parameterize the covariance matrices of the Gaussians, though this should be done when analyzing a real-world dataset for more flexibility), and the $N$ many $T$-dimensional vectors that parameterize our variational Categorical distributions: # + colab={} colab_type="code" id="Imw4wcVkT9er" def guide(data): kappa = pyro.param('kappa', lambda: Uniform(0, 2).sample([T-1]), constraint=constraints.positive) tau = pyro.param('tau', lambda: MultivariateNormal(torch.zeros(2), 3 * torch.eye(2)).sample([T])) phi = pyro.param('phi', lambda: Dirichlet(1/T * torch.ones(T)).sample([N]), constraint=constraints.simplex) with pyro.plate("beta_plate", T-1): q_beta = pyro.sample("beta", Beta(torch.ones(T-1), kappa)) with pyro.plate("mu_plate", T): q_mu = pyro.sample("mu", MultivariateNormal(tau, torch.eye(2))) with pyro.plate("data", N): z = pyro.sample("z", Categorical(phi)) # + [markdown] colab_type="text" id="H0He1id0T_bN" # When performing inference, we set our 'guess' for the maximum number of clusters in the dataset to $T = 6$. We define the optimization algorithm (`pyro.optim.Adam`) along with the Pyro SVI object and train the model for 1000 iterations. # # After performing inference, we construct the Bayes estimators of the means (the expected values of each factor in our variational approximation) and plot them in red on top of the original dataset. Note that we also have we removed any clusters that have less than a certain weight assigned to them according to our learned variational distributions, and then re-normalize the weights so that they sum to one: # + colab={"base_uri": "https://localhost:8080/", "height": 372} colab_type="code" id="x1Yidukpd9wO" outputId="b0cc290b-3285-4f36-c2a6-7195b6801482" T = 6 optim = Adam({"lr": 0.05}) svi = SVI(model, guide, optim, loss=Trace_ELBO()) losses = [] def train(num_iterations): pyro.clear_param_store() for j in tqdm(range(num_iterations)): loss = svi.step(data) losses.append(loss) def truncate(alpha, centers, weights): threshold = alpha**-1 / 100. true_centers = centers[weights > threshold] true_weights = weights[weights > threshold] / torch.sum(weights[weights > threshold]) return true_centers, true_weights alpha = 0.1 train(1000) # We make a point-estimate of our model parameters using the posterior means of tau and phi for the centers and weights Bayes_Centers_01, Bayes_Weights_01 = truncate(alpha, pyro.param("tau").detach(), torch.mean(pyro.param("phi").detach(), dim=0)) alpha = 1.5 train(1000) # We make a point-estimate of our model parameters using the posterior means of tau and phi for the centers and weights Bayes_Centers_15, Bayes_Weights_15 = truncate(alpha, pyro.param("tau").detach(), torch.mean(pyro.param("phi").detach(), dim=0)) plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) plt.scatter(data[:, 0], data[:, 1], color="blue") plt.scatter(Bayes_Centers_01[:, 0], Bayes_Centers_01[:, 1], color="red") plt.subplot(1, 2, 2) plt.scatter(data[:, 0], data[:, 1], color="blue") plt.scatter(Bayes_Centers_15[:, 0], Bayes_Centers_15[:, 1], color="red") plt.tight_layout() plt.show() # + [markdown] colab_type="text" id="DO3XDmEwC5ra" # The plots above demonstrate the effects of the scaling hyperparameter $\alpha$. A greater $\alpha$ yields a more heavy-tailed distribution of the weights, whereas smaller $\alpha$ will place more mass on fewer clusters. In particular, the middle cluster looks like it could be generated a single Gaussian (although in fact it was generated by two distinct Gaussians), and thus the setting of $\alpha$ allows the practitioner to further encode their prior beliefs about how many clusters the data contains. # + [markdown] colab_type="text" id="oWBlcZ1vfiBV" # ### Dirichlet Mixture Model for Long Term Solar Observations # # As mentioned earlier, the Dirichlet process mixture model truly shines when exploring a dataset whose latent geometric structure is completely unknown. To demonstrate this, we fit a DPMM on sunspot count data taken over the past 300 years (provided by the Royal Observatory of Belgium): # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="UlhV2Fx_fVGy" outputId="045dbfaa-27a0-4aa6-ee8c-de9f9937aa38" df = pd.read_csv('http://www.sidc.be/silso/DATA/SN_y_tot_V2.0.csv', sep=';', names=['time', 'sunspot.year'], usecols=[0, 1]) data = torch.tensor(df['sunspot.year'].values, dtype=torch.float32).round() N = data.shape[0] plt.hist(df['sunspot.year'].values, bins=40) plt.title("Number of Years vs. Sunspot Counts") plt.xlabel("Sunspot Count") plt.ylabel("Number of Years") plt.show() # + [markdown] colab_type="text" id="1BE18id2_VlV" # For this example, the cluster parameters $\theta_i$ are rate parameters since we are constructing a scale-mixture of Poisson distributions. Again, $G_0$ is chosen to be the conjugate prior, which in this case is a Gamma distribution, though this still does not strictly matter for doing inference through Pyro. Below is the implementation of the model: # + colab={"base_uri": "https://localhost:8080/", "height": 298} colab_type="code" id="f7mWbeTz_GLI" outputId="bd64a9bc-5aa2-4767-d71f-bdcb6279526c" def model(data): with pyro.plate("beta_plate", T-1): beta = pyro.sample("beta", Beta(1, alpha)) with pyro.plate("lambda_plate", T): lmbda = pyro.sample("lambda", Gamma(3, 0.05)) with pyro.plate("data", N): z = pyro.sample("z", Categorical(mix_weights(beta))) pyro.sample("obs", Poisson(lmbda[z]), obs=data) def guide(data): kappa = pyro.param('kappa', lambda: Uniform(0, 2).sample([T-1]), constraint=constraints.positive) tau_0 = pyro.param('tau_0', lambda: Uniform(0, 5).sample([T]), constraint=constraints.positive) tau_1 = pyro.param('tau_1', lambda: LogNormal(-1, 1).sample([T]), constraint=constraints.positive) phi = pyro.param('phi', lambda: Dirichlet(1/T * torch.ones(T)).sample([N]), constraint=constraints.simplex) with pyro.plate("beta_plate", T-1): q_beta = pyro.sample("beta", Beta(torch.ones(T-1), kappa)) with pyro.plate("lambda_plate", T): q_lambda = pyro.sample("lambda", Gamma(tau_0, tau_1)) with pyro.plate("data", N): z = pyro.sample("z", Categorical(phi)) T = 20 alpha = 1.1 n_iter = 1500 optim = Adam({"lr": 0.05}) svi = SVI(model, guide, optim, loss=Trace_ELBO()) losses = [] train(n_iter) samples = torch.arange(0, 300).type(torch.float) tau0_optimal = pyro.param("tau_0").detach() tau1_optimal = pyro.param("tau_1").detach() kappa_optimal = pyro.param("kappa").detach() # We make a point-estimate of our latent variables using the posterior means of tau and kappa for the cluster params and weights Bayes_Rates = (tau0_optimal / tau1_optimal) Bayes_Weights = mix_weights(1. / (1. + kappa_optimal)) def mixture_of_poisson(weights, rates, samples): return (weights * Poisson(rates).log_prob(samples.unsqueeze(-1)).exp()).sum(-1) likelihood = mixture_of_poisson(Bayes_Weights, Bayes_Rates, samples) plt.title("Number of Years vs. Sunspot Counts") plt.hist(data.numpy(), bins=60, density=True, lw=0, alpha=0.75); plt.plot(samples, likelihood, label="Estimated Mixture Density") plt.legend() plt.show() # + [markdown] colab_type="text" id="gjn4x2JqBfll" # The above plot is the mixture density of the Bayes estimators of the cluster parameters, weighted by their corresponding weights. As in the Gaussian example, we have taken the Bayes estimators of each cluster parameter and their corresponding weights by computing the posterior means of `lambda` and `beta` respectively. # + [markdown] colab_type="text" id="NaHLTiRJUxBR" # ### ELBO Behavior # # Below are plots of the behavior of the loss function (negative Trace_ELBO) over the SVI iterations during inference using Pyro, as well as a plot of the autocorrelations of the ELBO 'time series' versus iteration number. We can see that around 500 iterations, the loss stops decreasing significantly, so we can assume it takes around 500 iterations to achieve convergence. The autocorrelation plot reaches an autocorrelation very close to 0 around a lag of 500, further corroborating this hypothesis. Note that these are heuristics and do not necessarily imply convergence. # + colab={"base_uri": "https://localhost:8080/", "height": 369} colab_type="code" id="XzbxzSkOUveF" outputId="a752e5e8-1fcf-4ff9-d8db-8d3f59a6a32b" elbo_plot = plt.figure(figsize=(15, 5)) elbo_ax = elbo_plot.add_subplot(1, 2, 1) elbo_ax.set_title("ELBO Value vs. Iteration Number for Pyro BBVI on Sunspot Data") elbo_ax.set_ylabel("ELBO") elbo_ax.set_xlabel("Iteration Number") elbo_ax.plot(np.arange(n_iter), losses) autocorr_ax = elbo_plot.add_subplot(1, 2, 2) autocorr_ax.acorr(np.asarray(losses), detrend=lambda x: x - x.mean(), maxlags=750, usevlines=False, marker=',') autocorr_ax.set_xlim(0, 500) autocorr_ax.axhline(0, ls="--", c="k", lw=1) autocorr_ax.set_title("Autocorrelation of ELBO vs. Lag for Pyro BBVI on Sunspot Data") autocorr_ax.set_xlabel("Lag") autocorr_ax.set_ylabel("Autocorrelation") elbo_plot.tight_layout() plt.show() # + [markdown] colab_type="text" id="_sn3OS8tOoko" # ## **Criticism** # # # # + [markdown] colab_type="text" id="qvRAgapHZLPA" # ### Long-Term Sunspot Model # # Since we computed the approximate posterior of the DPMM that was fit to the long-term sunspot data, we can utilize some intrinsic metrics, such as the log predictive, posterior dispersion indices, and posterior predictive checks. # # Since the posterior predictive distribution for a Dirichlet process mixture model is itself a scale-mixture distribution that has an analytic approximation [(Blei and Jordan, 2004)](http://www.cs.columbia.edu/~blei/papers/BleiJordan2004.pdf), this makes it particularly amenable to the aforementioned metrics: # # $$p(x_{new} | X_{1:N}, \alpha, G_0) \approx \sum_{t=1}^T \mathbb{E}_q [\pi_t(\beta)] \ \mathbb{E}_q \left[p(x_{new} | \theta_t)\right].$$ # # In particular, to compute the log predictive, we first compute the posterior predictive distribution (defined above) after performing variational inference on our model using a training subsample of our data. The log predictive is then the log value of the predictive density evaluated at each point in the test subsample: # # $$\log p(x_{new} | X) = \log \mathbb{E}_{\beta, \theta | X} \left[ p(x_{new} | \beta, \theta) \right]$$ # # Since both the training samples and the testing samples were taken from the same dataset, we would expect the model to assign high probability to the test samples, despite not having seen them during inference. This gives a metric by which to select values of $T$, $\alpha$, and $G_0$, our hyperparameters: we would want to choose the values that maximize this value. # # We perform this process below with varying values of $\alpha$ to see what the optimal setting is. # + colab={"base_uri": "https://localhost:8080/", "height": 526} colab_type="code" id="3V2-DC9nobPi" outputId="473035e6-036f-4e34-9ea4-6c2dc5a4bec0" # Hold out 10% of our original data to test upon df_test = df.sample(frac=0.1) data = torch.tensor(df.drop(df_test.index)['sunspot.year'].values, dtype=torch.float).round() data_test = torch.tensor(df_test['sunspot.year'].values, dtype=torch.float).round() N = data.shape[0] N_test = data_test.shape[0] alphas = [0.05, 0.1, 0.5, 0.75, 0.9, 1., 1.25, 1.5, 2, 2.5, 3] log_predictives = [] for val in alphas: alpha = val T = 20 svi = SVI(model, guide, optim, loss=Trace_ELBO()) train(500) S = 100 # number of Monte Carlo samples to use in posterior predictive computations # Using pyro's built in posterior predictive class: posterior = Predictive(guide, num_samples=S, return_sites=["beta", "lambda"])(data) post_pred_weights = mix_weights(posterior["beta"]) post_pred_clusters = posterior["lambda"] # log_prob shape = N_test x S log_prob = (post_pred_weights.log() + Poisson(post_pred_clusters).log_prob(data.reshape(-1, 1, 1))).logsumexp(-1) mean_log_prob = log_prob.logsumexp(-1) - np.log(S) log_posterior_predictive = mean_log_prob.sum(-1) log_predictives.append(log_posterior_predictive) plt.figure(figsize=(10, 5)) plt.plot(alphas, log_predictives) plt.title("Value of the Log Predictive at Varying Alpha") plt.show() # + [markdown] colab_type="text" id="PGw4eD363PWP" # From the above plot, we would surmise that we want to set $\alpha > 1$, though the signal is not quite clear. A more comprehensive model criticism process would involve performing a grid search across all hyperparameters in order to find the one that maximizes the log predictive. # # + [markdown] colab_type="text" id="8A1gt4sn8Upo" # ## References # # # # 1. <NAME>. *A Bayesian Analysis of Some Nonparametric Problems*. The Annals of Statistics, Vol. 1, No. 2 (1973). # 2. <NAME>. *Exchangeability and Related Topics*. Ecole diete de Probabilities Saint Flour (1985). # 3. <NAME>. *A Constructive Definition of Dirichlet Priors*. Statistica, Sinica, 4:639-650 (1994). # 4. <NAME> and <NAME>. *Variational Inference for Dirichlet Process Mixtures*. Bayesian Analysis, Vol. 1, No. 1 (2004). # 5. Pedregosa, et al. *Scikit-Learn: Machine Learning in Python*. JMLR 12, pp. 2825-2830 (2011). # 6. <NAME>. *Pattern Recogition and Machine Learning*. Springer Ltd (2006). # 7. *Sunspot Index and Long-Term Solar Observations*. WDC-SILSO, Royal Observatory of Belgium, Brussels (2018). # 9. <NAME>. *Understanding predictive information criteria for Bayesian models*. Statistics and Computing, Springer Link, 2014. # # # #
tutorial/source/dirichlet_process_mixture.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Program to find sub-list in a given list lst=[1,1,5] def s_list(l): b=0 c=0 for item in lst: for i in range(b,len(l)): if item == l[i]: b=i+1 c=c+1 break if c==len(lst): print("It's a Match!") else: print("It's Gone!") abc=[[1,5,6,4,1,2,3,5],[1,5,6,5,1,2,3,6]] for j in abc: s_list(j) # # Question 2 # Prime number using filter function def prime(num): for i in range(2,num): if num%i==0: return False else: return True lst_prime=filter(prime,range(2500)) print(list(lst_prime)) # # Question 3 # Capitalizing the letters in a sentence using lambda function n=input() n=list(map(str,n.split())) s=map(lambda x:x.capitalize(),n) print(*s)
Day-5 Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #undersample from imblearn.under_sampling import NearMiss from imblearn.under_sampling import CondensedNearestNeighbour from imblearn.under_sampling import TomekLinks from imblearn.under_sampling import EditedNearestNeighbours from imblearn.under_sampling import OneSidedSelection from imblearn.under_sampling import NeighbourhoodCleaningRule #oversample from imblearn.over_sampling import SMOTE from imblearn.over_sampling import BorderlineSMOTE from imblearn.over_sampling import SVMSMOTE from imblearn.over_sampling import ADASYN #over+under from imblearn.combine import SMOTETomek from imblearn.combine import SMOTEENN #models from sklearn.linear_model import SGDClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import lightgbm as lgb from xgboost import XGBClassifier from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import MultinomialNB from sklearn.gaussian_process import GaussianProcessClassifier #utils import seaborn as sns import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import time from tqdm import tqdm import warnings warnings.filterwarnings("ignore") #metrics from sklearn import metrics from sklearn.metrics import confusion_matrix import seaborn as sns from sklearn.metrics import fbeta_score from sklearn.metrics import balanced_accuracy_score from sklearn.metrics import roc_auc_score from imblearn.metrics import geometric_mean_score from sklearn.metrics import f1_score # - # ## Read and understand data data = pd.read_csv('creditcard.csv') print(data.shape) data.head() # #### Load the data and reduce the memory usage of each dataframe def reduce_mem_usage(df): """ iterate through all the columns of a dataframe and modify the data type to reduce memory usage. """ start_mem = df.memory_usage().sum() / 1024**2 print('Memory usage of dataframe is {:.2f} MB'.format(start_mem)) for col in df.columns: col_type = df[col].dtype if col_type != object: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) else: df[col] = df[col].astype('category') end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df data = reduce_mem_usage(data) data.dtypes object_types = data.select_dtypes(include=['object','category']) object_types.columns # ## Statistical Summary data.describe() # ### Summarizing the number of unique values in each column # print(data.nunique()) # ### Columns with single value # Columns that have a single value are probably useless for modeling. These # columns are referred to zero-variance predictors, so we dont need to use them anymore. values = data.nunique() single_value_cols = [values.index[i] for i,v in enumerate(values) if v == 1] single_value_cols # ### Checking duplicate rows # # Duplicate row is a row where each value in each column for that row appears in identically the same column values in another row. They are could be useless to the modeling process. # # duplicated() function will report whether a given row is duplicated or not. All # rows are marked as either False to indicate that it is not a duplicate or True to indicate that # it is a duplicate. dups = data.duplicated() print(dups.any()) data['Class'].value_counts() data.drop_duplicates(inplace=True) print(data.shape) data['Class'].value_counts() # # Explotary Data Analysis f,ax=plt.subplots(1,2,figsize=(18,8)) data['Class'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True) ax[0].set_title('Class') ax[0].set_ylabel('') sns.countplot('Class',data=data,ax=ax[1]) ax[1].set_title('Class') plt.show() # The graphs show that our dataset is highly imbalanced. # # First let us understand the different types of features. # # Distribution of anomaly and normal data def plot_histogram(df, bins, column, log_scale=False): bins = 100 anomalies = df[df.Class == 1] normal = df[df.Class == 0] fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) fig.suptitle(f'Counts of {column} by Class') ax1.hist(anomalies[column], bins = bins, color="red") ax1.set_title('Anomaly') ax2.hist(normal[column], bins = bins, color="orange") ax2.set_title('Normal') plt.xlabel(f'{column}') plt.ylabel('Count') if log_scale: plt.yscale('log') plt.xlim((np.min(df[column]), np.max(df[column]))) plt.show() # A histogram of data for the column V12 in df. As we # can see, there is a very clear deviation seen with the anomalous values # compared to the normal values. Both plots share the same x-axis # scale, so while the counts might be very low compared to the normal # values, they are still spread out far more than the normal values for # the same range of V12 column values. for f in range(1, 29): print(f'V{f} Counts') plot_histogram(data,100, f'V{f}') # #### From this, you can definitely notice a right skew as well as the massive outliers present in the normal data. # # #### Other than a few defined spikes that stand out from where the normal points would have been, most of the fraudulent data in this context seems to blend in with the normal data. # ### Distrubiton of all numeric features numeric = data.select_dtypes(include = 'number') for col in list(numeric)[1:]: plt.figure(figsize=(12,5)) plt.title(f"Distribution of {col}") ax = sns.distplot(data[col]) plt.show() # As we see, we have huge amount of outliers which have to be handled before modeling. # # Data Splitting # + def load_data(data_path): data = pd.read_csv(data_path) data = reduce_mem_usage(data) X = data.iloc[:,:-1] y = data.iloc[:,-1] X_train,X_test,Y_train,Y_test = train_test_split(X,y,test_size=0.3, shuffle = True) return X_train,X_test,Y_train,Y_test X_train,X_test,Y_train,Y_test = load_data('creditcard.csv') # - # # Evaluation # Firstly, because most of the standard # metrics that are widely used assume a balanced class distribution, and because typically not # all classes, and therefore, not all prediction errors, are equal for imbalanced classification. So, accuracy is inappropriate for # imbalanced classification problems. # # The main reason is that the overwhelming number of # examples from the majority class (or classes) will overwhelm the number of examples in the # minority class, meaning that even unskillful models can achieve accuracy scores of 90 percent, # or 99 percent, depending on how severe the class imbalance happens to be. # # As a evaluation metric I am using __balanced_accuracy_score, roc_auc_score, geometric_mean_score.__ def evaluate_model(ytrue, ypred): metrics = {} metrics['balanced_accuracy_score']=balanced_accuracy_score(ytrue, ypred) metrics['roc_auc_score'] = roc_auc_score(ytrue, ypred) metrics['geometric_mean_score'] = geometric_mean_score(ytrue, ypred) return metrics # # Trying different approaches # # In # this project, I have discovered metrics that we can use for imbalanced classification. Beside of this, I have used different approaches: # # # ## 1. Data Sampling Methods # # Data sampling provides a collection of techniques that transform a training dataset in # order to balance or better balance the class distribution. lthough often described in terms of two-class classification problems, class imbalance also # affects those datasets with more than two classes that may have multiple minority classes # or multiple majority classes. __Oversampling methods duplicate examples in the minority class or synthesize new examples # from the examples in the minority class. Meanwhile, Undersampling methods delete or select a subset of examples from the majority class.__ # # ### 1.1 SMOTE (Synthetic Minority Oversampling Technique) # # SMOTE first selects a minority class instance a at random and finds its k nearest # minority class neighbors. The synthetic instance is then created by choosing one of # the k nearest neighbors b at random and connecting a and b to form a line segment # in the feature space. The synthetic instances are generated as a convex combination # of the two chosen instances a and b. # # ### 1.2 ADASYN (Adaptive Synthetic Sampling) # # It is a modification of SMOTE that is based on the idea of adaptively generating minority data samples # according to their distributions: more synthetic data is generated for minority class # samples that are harder to learn compared to those minority samples that are easier # to learn. # # # ### 1.3 Borderline-SMOTE # # A popular extension to SMOTE involves selecting those instances of the minority class that are # misclassified, such as with a k-nearest neighbor classification model. We can then oversample # just those difficult instances, providing more resolution only where it may be required. # # # ### 1.4 SVM-SMOTE # # This method is an alternative to Borderline-SMOTE where a SVM algorithm # is used instead of a KNN to identify misclassified examples on the decision boundary. In the SVMSMOTE(), borderline area is approximated by the support vectors obtained after training # a standard SVMs classifier on the original training set. New instances will be # randomly created along the lines joining each minority class support vector with a # number of its nearest neighbors using the interpolation # # # ### 1.5 Near Miss Undersampling # # In this method, we have three versions of # the technique, named NearMiss-1, NearMiss-2, and NearMiss-3. # # Here, distance is determined in feature space # using Euclidean distance or similar. # # ˆ NearMiss-1: Majority class examples with minimum average distance to three closest # minority class examples. # # ˆ NearMiss-2: Majority class examples with minimum average distance to three furthest # minority class examples. # # ˆ NearMiss-3: Majority class examples with minimum distance to each minority class # example. # # names = [SMOTE(),NearMiss(),BorderlineSMOTE(),SVMSMOTE(),ADASYN()] names def sampling_method(method_name,xtrain,ytrain): resample = method_name model = LogisticRegression() X_train_res,Y_train_res=resample.fit_resample(xtrain,ytrain) start_time = time.time() print(X_train_res.shape,Y_train_res.shape) model.fit(X_train_res,Y_train_res) pred = model.predict(X_test) metrics_dict = evaluate_model(Y_test, pred) metrics_dict['model_name'] = str(model) metrics_dict['resample_method'] = str(resample) metrics_dict['train_time']= time.time() - start_time return metrics_dict resample_results = [] for i in tqdm(names): result = sampling_method(i,X_train,Y_train) resample_results.append(result) # # Probalistic models # # Probabilistic models are # those models that are fit on the data under a probabilistic framework and often perform well # in general for imbalanced classification dataset. I will evaluate a suite of models that are known to be effective at predicting probabilities. # # Specifically, these are models that are fit under a probabilistic framework and explicitly predict a # calibrated probability for each example. A such, this makes them well-suited to this dataset, even # with the class imbalance. We will evaluate the following six probabilistic models implemented # with the scikit-learn library: # # #### Logistic Regression (LR) # #### Linear Discriminant Analysis (LDA) # #### Quadratic Discriminant Analysis (QDA) # #### Gaussian Naive Bayes (GNB) # #### Gaussian Process (GPC) prob_models = [LinearDiscriminantAnalysis(), LogisticRegression(solver='lbfgs'), QuadraticDiscriminantAnalysis(), GaussianNB(), GaussianProcessClassifier()] def prob_method(model_name,xtrain,ytrain): model = model_name start_time = time.time() print(xtrain.shape,ytrain.shape) model.fit(xtrain, ytrain) pred = model.predict(X_test) metrics_dict = evaluate_model(Y_test, pred) metrics_dict['model_name'] = str(model) metrics_dict['resample_method'] = 'Probalistic' metrics_dict['train_time']= time.time() - start_time return metrics_dict # + prob_results = [] for i in tqdm(prob_models): result = prob_method(i, X_train, Y_train) prob_results.append(result) # - # # Cost sensitive models # # Some machine learning algorithms can be adapted to pay more attention to one class than # another when fitting the model. These are referred to as cost-sensitive machine learning # models and they can be used for imbalanced classification by specifying a cost that is inversely # proportional to the class distribution. # # Cost-sensitive learning is a subfield of machine learning that takes the costs of prediction # errors (and potentially other costs) into account when training a machine learning model. It is # a field of study that is closely related to the field of imbalanced learning that is concerned with # classification on datasets with a skewed class distribution. cost_sens_models = [LogisticRegression(solver='lbfgs', class_weight='balanced'), DecisionTreeClassifier(class_weight='balanced')] def cost_method(model_name,xtrain,ytrain): model = model_name start_time = time.time() print(xtrain.shape,ytrain.shape) model.fit(xtrain, ytrain) pred = model.predict(X_test) metrics_dict = evaluate_model(Y_test, pred) metrics_dict['model_name'] = str(model) metrics_dict['resample_method'] = 'Cost Sensitive' metrics_dict['train_time']= time.time() - start_time return metrics_dict # + cost_results = [] for i in tqdm(cost_sens_models): result = cost_method(i, X_train, Y_train) cost_results.append(result) # - results = resample_results + prob_results + cost_results final = pd.DataFrame(results) final final.sort_values(by = 'balanced_accuracy_score', ascending=False) # # best model is: LogisticRegression() + ADASYN() # We can add new models or combinations as well. Because, a few approaches were applied.
imbalanced_data_handling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:fisi2028] # language: python # name: conda-env-fisi2028-py # --- import numpy as np import matplotlib.pyplot as plt Uakata = np.loadtxt('difusion.txt') # + n = 100 x = np.linspace(0,6,19) plt.plot(x,Uakata[n]) t = n*(10/100)# T=10; Nt = 100 plt.grid() plt.title("U(x) para el tiempo t = %s" % t ) plt.xlabel("distribución lineal [0,L]") plt.ylabel("Concentración del material [unidades :)]") # -
tareaMagC++/Grafica.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import pandas as pd # + #file = "../code/data/as-skitter.lpairs.space.csv" #file = "../code/data/soc-slashdot.ghpairs.space.csv" file = "../code/cmake-build-release/soc-slashdot.ghpairs.space.csv" #file = "../code/data/soc-slashdot.lpairs.space.csv" df = pd.read_csv(file) # add new columns for lastLayer w/o inter for prefix in ['nodes','edges','dfsNodes','dfsEdges']: for dir in ['S','T']: newName = f"{prefix}InLastLayer{dir}\inter" lastLayer = f"{prefix}InLastLayer{dir}" intersection = prefix+"InInter" if prefix[0:3]=='dfs' and dir=='T': df[newName] = [0]*len(df) else: df[newName] = df[lastLayer] - df[intersection] # add flow column df['flow'] = df['flowBefore'].shift(-1, fill_value=0) - df['flowBefore'] df['flow'] = df['flow'].clip(lower=0) print(f'read {file} with {len(df)} rows') # + colors = { 'BeforeLastLayerS': '#56B4E9', 'InLastLayerS\inter': '0.75', 'InInter': '#D55E00', 'InLastLayerT\inter': '0.75', 'BeforeLastLayerT': '#009E73', } allEdges = [ 'edgesBeforeLastLayerS', 'edgesInLastLayerS\inter', 'edgesInInter', 'edgesInLastLayerT\inter', 'edgesBeforeLastLayerT' ] bfsEdges = ['edgesBeforeLastLayerS', 'edgesBeforeLastLayerT'] dfsEdges = [ 'dfsEdgesBeforeLastLayerS', #'dfsEdgesInLastLayerS\inter', 'dfsEdgesInInter', 'dfsEdgesInLastLayerT\inter', 'dfsEdgesBeforeLastLayerT' ] num_pairs = len(df[df.DIRECTION=='UNI'].groupby(['S','T'])) # + fig, axs = plt.subplots(1,2,figsize=(16, 2)) for dir, group in df.groupby('DIRECTION'): ax = axs[1] if dir=='BI' else axs[0] data= group.sum() ax.invert_yaxis() ax.xaxis.set_visible(False) ax.set_xlim(0, data[allEdges].sum()) # total space start = {} offset = 0 for cat in allEdges: start[cat] = offset ax.barh('ALL', data[cat], left=start[cat], label=cat[5:], color=colors[cat[5:]]) offset += data[cat] # bfs search space for cat in bfsEdges: ax.barh('BFS', data[cat], left=start[cat], color=colors[cat[5:]]) # dfs search space for cat in dfsEdges: ax.barh('DFS', data[cat], left=start['e'+cat[4:]], color=colors[cat[8:]]) ax2 = ax.twinx() ax2.set_yticks(ax.get_yticks()) ax2.set_ylim(ax.get_ylim()) ax2.set_yticklabels(f'{data[cats].sum()//num_pairs}' for cats in [allEdges, bfsEdges, dfsEdges]) ax.set_title(f'{dir}-directional Search') axs[0].legend(loc='lower right') plt.savefig('eval_space_total.pdf', bbox_inches='tight') plt.show() # + # aggregate totals numPairs = len(df.groupby(['S','T'])) totals = {'allNodes': 70068*numPairs, 'allEdges': 358647*2*numPairs} for dir, dir_data in df.groupby('DIRECTION'): runs = dir_data.groupby(['S','T']) agg = runs.sum().sum() totals[dir+'bfsEdges'] = agg[bfsEdges].sum() totals[dir+'dfsEdges'] = agg[dfsEdges].sum() numBFS = len(dir_data) numDFS = len(dir_data)-len(runs) print('Direction ', dir) print('pairs ', len(runs)) print('avg dist ', runs['distOfSink'].first().mean()) print('avg SDEG ', runs.SDEG.first().mean()) print('avg TDEG ', runs.TDEG.first().mean()) print('avg rounds ', runs.ROUND.max().add(1).mean()) print('avg flow/R ', dir_data.flow.sum() / runs.ROUND.max().sum()) print('avg BFS ', totals[dir+'bfsEdges']//numBFS) print('avg DFS ', totals[dir+'dfsEdges']//numDFS) print() pairs = df[df.DIRECTION=='BI'].groupby(['S','T']).max() print('trivial ', len(pairs[pairs[['SDEG','TDEG']].min(axis=1) == pairs.flowBefore])) print() print('UNI / ALL') for trav in ['bfs', 'dfs']: print(f'UNI-{trav} looks at', '%.5f' % (totals[f'UNI{trav}Edges'] / totals[f'allEdges']), f'times all edges') print('UNI / BI') for trav in ['bfs', 'dfs']: print(f'UNI-{trav} looks at', '%.2f' % (totals[f'UNI{trav}Edges'] / totals[f'BI{trav}Edges']), f'times BI-{trav} edges') # -
plots/paper_space.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline # # # Gromov-Wasserstein Barycenter example # # # This example is designed to show how to use the Gromov-Wasserstein distance # computation in POT. # # # + # Author: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # License: MIT License import numpy as np import scipy as sp import scipy.ndimage as spi import matplotlib.pylab as pl from sklearn import manifold from sklearn.decomposition import PCA import ot # - # Smacof MDS # ---------- # # This function allows to find an embedding of points given a dissimilarity matrix # that will be given by the output of the algorithm # # def smacof_mds(C, dim, max_iter=3000, eps=1e-9): """ Returns an interpolated point cloud following the dissimilarity matrix C using SMACOF multidimensional scaling (MDS) in specific dimensionned target space Parameters ---------- C : ndarray, shape (ns, ns) dissimilarity matrix dim : int dimension of the targeted space max_iter : int Maximum number of iterations of the SMACOF algorithm for a single run eps : float relative tolerance w.r.t stress to declare converge Returns ------- npos : ndarray, shape (R, dim) Embedded coordinates of the interpolated point cloud (defined with one isometry) """ rng = np.random.RandomState(seed=3) mds = manifold.MDS( dim, max_iter=max_iter, eps=1e-9, dissimilarity='precomputed', n_init=1) pos = mds.fit(C).embedding_ nmds = manifold.MDS( 2, max_iter=max_iter, eps=1e-9, dissimilarity="precomputed", random_state=rng, n_init=1) npos = nmds.fit_transform(C, init=pos) return npos # Data preparation # ---------------- # # The four distributions are constructed from 4 simple images # # # + def im2mat(I): """Converts and image to matrix (one pixel per line)""" return I.reshape((I.shape[0] * I.shape[1], I.shape[2])) square = spi.imread('../data/square.png').astype(np.float64)[:, :, 2] / 256 cross = spi.imread('../data/cross.png').astype(np.float64)[:, :, 2] / 256 triangle = spi.imread('../data/triangle.png').astype(np.float64)[:, :, 2] / 256 star = spi.imread('../data/star.png').astype(np.float64)[:, :, 2] / 256 shapes = [square, cross, triangle, star] S = 4 xs = [[] for i in range(S)] for nb in range(4): for i in range(8): for j in range(8): if shapes[nb][i, j] < 0.95: xs[nb].append([j, 8 - i]) xs = np.array([np.array(xs[0]), np.array(xs[1]), np.array(xs[2]), np.array(xs[3])]) # - # Barycenter computation # ---------------------- # # # + ns = [len(xs[s]) for s in range(S)] n_samples = 30 """Compute all distances matrices for the four shapes""" Cs = [sp.spatial.distance.cdist(xs[s], xs[s]) for s in range(S)] Cs = [cs / cs.max() for cs in Cs] ps = [ot.unif(ns[s]) for s in range(S)] p = ot.unif(n_samples) lambdast = [[float(i) / 3, float(3 - i) / 3] for i in [1, 2]] Ct01 = [0 for i in range(2)] for i in range(2): Ct01[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[0], Cs[1]], [ps[0], ps[1] ], p, lambdast[i], 'square_loss', 5e-4, max_iter=100, tol=1e-3) Ct02 = [0 for i in range(2)] for i in range(2): Ct02[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[0], Cs[2]], [ps[0], ps[2] ], p, lambdast[i], 'square_loss', 5e-4, max_iter=100, tol=1e-3) Ct13 = [0 for i in range(2)] for i in range(2): Ct13[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[1], Cs[3]], [ps[1], ps[3] ], p, lambdast[i], 'square_loss', 5e-4, max_iter=100, tol=1e-3) Ct23 = [0 for i in range(2)] for i in range(2): Ct23[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[2], Cs[3]], [ps[2], ps[3] ], p, lambdast[i], 'square_loss', 5e-4, max_iter=100, tol=1e-3) # - # Visualization # ------------- # # The PCA helps in getting consistency between the rotations # # # + clf = PCA(n_components=2) npos = [0, 0, 0, 0] npos = [smacof_mds(Cs[s], 2) for s in range(S)] npost01 = [0, 0] npost01 = [smacof_mds(Ct01[s], 2) for s in range(2)] npost01 = [clf.fit_transform(npost01[s]) for s in range(2)] npost02 = [0, 0] npost02 = [smacof_mds(Ct02[s], 2) for s in range(2)] npost02 = [clf.fit_transform(npost02[s]) for s in range(2)] npost13 = [0, 0] npost13 = [smacof_mds(Ct13[s], 2) for s in range(2)] npost13 = [clf.fit_transform(npost13[s]) for s in range(2)] npost23 = [0, 0] npost23 = [smacof_mds(Ct23[s], 2) for s in range(2)] npost23 = [clf.fit_transform(npost23[s]) for s in range(2)] fig = pl.figure(figsize=(10, 10)) ax1 = pl.subplot2grid((4, 4), (0, 0)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax1.scatter(npos[0][:, 0], npos[0][:, 1], color='r') ax2 = pl.subplot2grid((4, 4), (0, 1)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax2.scatter(npost01[1][:, 0], npost01[1][:, 1], color='b') ax3 = pl.subplot2grid((4, 4), (0, 2)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax3.scatter(npost01[0][:, 0], npost01[0][:, 1], color='b') ax4 = pl.subplot2grid((4, 4), (0, 3)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax4.scatter(npos[1][:, 0], npos[1][:, 1], color='r') ax5 = pl.subplot2grid((4, 4), (1, 0)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax5.scatter(npost02[1][:, 0], npost02[1][:, 1], color='b') ax6 = pl.subplot2grid((4, 4), (1, 3)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax6.scatter(npost13[1][:, 0], npost13[1][:, 1], color='b') ax7 = pl.subplot2grid((4, 4), (2, 0)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax7.scatter(npost02[0][:, 0], npost02[0][:, 1], color='b') ax8 = pl.subplot2grid((4, 4), (2, 3)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax8.scatter(npost13[0][:, 0], npost13[0][:, 1], color='b') ax9 = pl.subplot2grid((4, 4), (3, 0)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax9.scatter(npos[2][:, 0], npos[2][:, 1], color='r') ax10 = pl.subplot2grid((4, 4), (3, 1)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax10.scatter(npost23[1][:, 0], npost23[1][:, 1], color='b') ax11 = pl.subplot2grid((4, 4), (3, 2)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax11.scatter(npost23[0][:, 0], npost23[0][:, 1], color='b') ax12 = pl.subplot2grid((4, 4), (3, 3)) pl.xlim((-1, 1)) pl.ylim((-1, 1)) ax12.scatter(npos[3][:, 0], npos[3][:, 1], color='r')
notebooks/plot_gromov_barycenter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import pandas as pd plt.style.use('ggplot') # ## Within Project # ### data # + df = pd.read_csv("./within_proj_data.csv") x = [i for i in range(16)] xticks = ["Appcelerator Studio", "Aptana Studio", "Bamboo", "Clover", "Data Management", "DuraCloud", "JIRA Software", "Mesos", "Moodle", "Mule", "Mule Studio", "Spring XD", "Talend Data Quality", "Talend ESB", "Titanium", "Usergrid"] sp_maen = df["SP_mean"].tolist() sp_std = df["SP_std"].tolist() gpt2sp = df["GPT2SP"].tolist() deepse = df["DeepSE"].tolist() lstm_rf = df["LSTM+RF"].tolist() lstm_svm = df["LSTM+SVM"].tolist() doc2vec_rf = df["Doc2Vec+RF"].tolist() bow_rf = df["BOW+RF"].tolist() lstm_atlm = df["LSTM+ATLM"].tolist() lstm_lr = df["LSTM+LR"].tolist() # - # ### Plot - Simple # + plt.plot(x, gpt2sp, label='MAE of GPT2SP') plt.plot(x, deepse, label='MAE of DeepSE') plt.plot(x, sp_mean, label='SP Mean') plt.plot(x, sp_std, label='SP STD') plt.xticks(np.arange(min(x), max(x)+1, 1.0)) ax = plt.gca() ax.legend(fontsize='15') ax.set_xticklabels(xticks, rotation=90, ha='left') fig = plt.gcf() fig.set_size_inches(10, 10) fig.savefig('test2png.png', dpi=100) # - # ### Plot - Complete # + plt.plot(x, gpt2sp) plt.plot(x, deepse) plt.plot(x, sp_mean) plt.plot(x, sp_std) plt.plot(x, lstm_rf) plt.plot(x, lstm_svm) plt.plot(x, doc2vec_rf) plt.plot(x, bow_rf) plt.plot(x, lstm_atlm) plt.plot(x, lstm_lr) plt.xticks(np.arange(min(x), max(x)+1, 1.0)) ax = plt.gca() ax.set_xticklabels(xticks, rotation=90, ha='left') fig = plt.gcf() fig.set_size_inches(10, 10) #fig.savefig('test2png.png', dpi=100) # + from numpy.random import randn from scipy.stats import pearsonr # calculate Pearson's correlation corr, _ = pearsonr(gpt2sp, sp_mean) print('Pearsons correlation GPT2SP vs SP_mean: %.3f' % corr) corr, _ = pearsonr(deepse, sp_mean) print('Pearsons correlation DeepSE vs SP_mean: %.3f' % corr) corr, _ = pearsonr(lstm_rf, sp_mean) print('Pearsons correlation LSTM+RF vs SP_mean: %.3f' % corr) corr, _ = pearsonr(lstm_svm, sp_mean) print('Pearsons correlation LSTM+SVM vs SP_mean: %.3f' % corr) corr, _ = pearsonr(doc2vec_rf, sp_mean) print('Pearsons correlation DOC2VEC+RF vs SP_mean: %.3f' % corr) corr, _ = pearsonr(bow_rf, sp_mean) print('Pearsons correlation BOW+RF vs SP_mean: %.3f' % corr) corr, _ = pearsonr(lstm_atlm, sp_mean) print('Pearsons correlation LSTM+ATLM vs SP_mean: %.3f' % corr) corr, _ = pearsonr(lstm_lr, sp_mean) print('Pearsons correlation LSTM+LR vs SP_mean: %.3f' % corr) print("---") # calculate Pearson's correlation corr, _ = pearsonr(gpt2sp, sp_std) print('Pearsons correlation GPT2SP vs SP_std: %.3f' % corr) corr, _ = pearsonr(deepse, sp_std) print('Pearsons correlation DeepSE vs SP_std: %.3f' % corr) corr, _ = pearsonr(lstm_rf, sp_std) print('Pearsons correlation LSTM+RF vs SP_std: %.3f' % corr) corr, _ = pearsonr(lstm_svm, sp_std) print('Pearsons correlation LSTM+SVM vs SP_std: %.3f' % corr) corr, _ = pearsonr(doc2vec_rf, sp_std) print('Pearsons correlation DOC2VEC+RF vs SP_std: %.3f' % corr) corr, _ = pearsonr(bow_rf, sp_std) print('Pearsons correlation BOW+RF vs SP_std: %.3f' % corr) corr, _ = pearsonr(lstm_atlm, sp_std) print('Pearsons correlation LSTM+ATLM vs SP_std: %.3f' % corr) corr, _ = pearsonr(lstm_lr, sp_std) print('Pearsons correlation LSTM+LR vs SP_std: %.3f' % corr) # -
data_model_analysis/mae_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Create All Run Files # ### Prepare everything # !pip install nose lightgbm trectools seaborn # !bash -c 'cd ../../.. && make test-python' # + import sys from trectools import TrecQrel, TrecRun, TrecEval sys.path.append('../python') from util import write_run_file def used_anchor_text(run): if 'marco-v1-no-anchor-no-orcas' in run.filename: return False names = ['all-50-features', 'marco-v1-all-121-features', 'marco-v1-no-orcas'] for i in names: if i in run.filename: return True return False def used_orcas_text(run): if 'marco-v1-no-anchor-no-orcas' in run.filename: return False names = ['marco-v1-all-121-features', 'marco-v1-no-anchor'] for i in names: if i in run.filename: return True return False def eval_run(run, qrels): return { 'run': run.get_runid().replace('train-with-', ''), 'used-anchor-text': used_anchor_text(run), 'used-orcas-text': used_orcas_text(run), 'MRR': TrecEval(run, qrels).get_reciprocal_rank(), 'MRR (Unjudged Removed)': TrecEval(run, qrels).get_reciprocal_rank(removeUnjudged=True), 'ndcg@5': TrecEval(run, qrels).get_ndcg(depth=5), 'ndcg@5 (Unjudged Removed)': TrecEval(run, qrels).get_ndcg(depth=5,removeUnjudged=True), 'ndcg@10': TrecEval(run, qrels).get_ndcg(depth=10), 'ndcg@10 (Unjudged Removed)': TrecEval(run, qrels).get_ndcg(depth=10,removeUnjudged=True) } # - # # Create Rankings for All 50 Features DIR='/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/all-50-features/' # MODELS=!ls $DIR |grep 'train-with' FEATURE_FILES = ['2021_trec_dl_test.rerank', 'docv2_dev1.rerank', 'docv2_dev2.rerank'] MODELS # + from tqdm import tqdm for model in tqdm(MODELS): for feature_file in FEATURE_FILES: out_file = DIR + model + '/' + (feature_file.replace('.rerank', '').replace('_', '-')) + '-run-file.txt' write_run_file( model_file=DIR + model + '/LightGBM_model.txt', feature_file=DIR + feature_file, system_name=model, out_file=out_file ) # - # # Create Rankings for 41 Features Without Anchor Text DIR='/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/2-no-anchor/' # MODELS=!ls $DIR |grep 'train-with' FEATURE_FILES = ['2021_trec_dl_test.rerank', 'docv2_dev1.rerank', 'docv2_dev2.rerank'] MODELS # + from tqdm import tqdm for model in tqdm(MODELS): for feature_file in FEATURE_FILES: out_file = DIR + model + '/' + (feature_file.replace('.rerank', '').replace('_', '-')) + '-run-file.txt' write_run_file( model_file=DIR + model + '/LightGBM_model.txt', feature_file=DIR + feature_file, system_name=model, out_file=out_file ) # - # # Evaluation of Runs on DEV1 # # Evaluation of Runs on DEV2 # + from trectools import TrecQrel, TrecRun, TrecEval qrels = TrecQrel("/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/resources/docv2_dev2_qrels.tsv") run_files = [TrecRun('/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/anserini-docv2-dev2-run-file.txt')] DIR='/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/' DIRS= ['all-50-features/', '2-no-anchor/'] for dir_name in DIRS: dir_name = DIR + dir_name # models = !ls $dir_name |grep 'train-with' for model in tqdm(models): run_files += [TrecRun(dir_name + model + '/docv2-dev2-run-file.txt')] # - # Expect from anserini docs an MRR of 0.1659 eval_run(run_files[0]) # + import pandas as pd df = [] for run in tqdm(run_files): df += [eval_run(run)] df = pd.DataFrame(df) df.sort_values('MRR', ascending=False) # - df.sort_values('ndcg@5', ascending=False) # + import seaborn as sns import matplotlib.pyplot as plt sns.catplot(data=df, x='run', y='ndcg@5', hue='used-anchor-text', kind='bar') plt.xticks(rotation=45) None # - # # Double Check that all files are parseable with trec_eval for f in run_files: print('\n\n\n' + f.filename) # !trec_eval -m recip_rank $qrels.filename $f.filename # !trec_eval -m recip_rank $qrels.filename /mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/all-50-features/train-with-100-trees-no-val/docv2-dev2-run-file.txt # !trec_eval -m recip_rank $qrels.filename /mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/all-50-features/train-with-1000-trees-no-val/docv2-dev2-run-file.txt # # Submitted Files # # - test topics # - test reranking for one topc # !md5sum /mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/all-50-features/train-with-5000-trees-var-02/2021-trec-dl-test-run-file.txt # !md5sum /mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/2-no-anchor/train-with-5000-trees-var-02/2021-trec-dl-test-run-file.txt # !md5sum /mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/all-50-features/train-with-1000-trees/2021-trec-dl-test-run-file.txt # !ls for i in range(3): t = TrecRun('../../../runs-submitted/webis-dl-' + str(i+1) + '.txt') print('\n\n\n' + t.get_runid()) print(len(t.topics())) print(t.topics()) # # Create rankings on MS-Marco V1 DIR='/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/marco-v1-all-121-features/' # MODELS=!ls $DIR |grep 'train-with' FEATURE_FILES = ['2021_trec_dl_test.rerank', 'docv2_dev1.rerank'] MODELS # + from tqdm import tqdm DIRS= ['marco-v1-all-121-features/', 'marco-v1-no-anchor/', 'marco-v1-no-orcas/', 'marco-v1-no-anchor-no-orcas/'] for model in MODELS: for tmp_dir in DIRS: DIR = '/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/' + tmp_dir for feature_file in tqdm(FEATURE_FILES): out_file = DIR + model + '/' + (feature_file.replace('.rerank', '').replace('_', '-')) + '-run-file.txt' write_run_file( model_file=DIR + model + '/LightGBM_model.txt', feature_file=DIR + feature_file, system_name=model, out_file=out_file ) # + from trectools import TrecQrel, TrecRun, TrecEval qrels = TrecQrel("/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/resources-trec-dl-20/msmarco-docdev-qrels.tsv") DIR='/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/' DIRS= ['marco-v1-all-121-features/', 'marco-v1-no-anchor/', 'marco-v1-no-orcas/', 'v1-no-anchor-no-orcas'] # TODO: ADD Baseline run_files = [] for dir_name in DIRS: dir_name = DIR + dir_name # models = !ls $dir_name |grep 'train-with' for model in tqdm(models): run_files += [TrecRun(dir_name + model + '/docv2-dev1-run-file.txt')] # + import pandas as pd df = [] for run in tqdm(run_files): df += [eval_run(run,qrels)] df = pd.DataFrame(df) df.sort_values('MRR', ascending=False) # + # https://github.com/castorini/anserini/blob/master/docs/regressions-msmarco-doc.md # BM25 0.2310 (MAP) # BM25 (tuned) 0.2788 (MAP) # BM25 (tuned) with docTTTTTquery 0.3270 (MRR) # TODO Add: https://github.com/castorini/anserini/blob/master/docs/regressions-dl20-doc.md # + import seaborn as sns import matplotlib.pyplot as plt sns.catplot(data=df[df['used-orcas-text'] == False], x='run', y='ndcg@5', hue='used-anchor-text', kind='bar') plt.xticks(rotation=45) None # + from trectools import TrecQrel, TrecRun, TrecEval qrels = TrecQrel("/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/resources-trec-dl-20/qrels.dl20-doc.txt") DIR='/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/lightgbm/' DIRS= ['marco-v1-all-121-features/', 'marco-v1-no-anchor/', 'marco-v1-no-orcas/', 'marco-v1-no-anchor-no-orcas/'] run_files = [TrecRun('/mnt/ceph/storage/data-in-progress/data-research/web-search/TREC-21/resources-trec-dl-20/msmarco-doctest2020-top100')] for dir_name in DIRS: dir_name = DIR + dir_name # models = !ls $dir_name |grep 'train-with' for model in tqdm(models): run_files += [TrecRun(dir_name + model + '/2021-trec-dl-test-run-file.txt')] # + import pandas as pd df = [] for run in tqdm(run_files): tmp_run = TrecRun() tmp_run.filename = run.filename tmp_run.run_data = run.run_data.copy() tmp_run.run_data = tmp_run.run_data[tmp_run.run_data['query'].astype(int).isin(qrels.topics())].reset_index() df += [eval_run(tmp_run,qrels)] df = pd.DataFrame(df) df.sort_values('MRR', ascending=False) # + import seaborn as sns import matplotlib.pyplot as plt sns.catplot(data=df[df['used-orcas-text'] == False], x='run', y='MRR', hue='used-anchor-text', kind='bar') plt.xticks(rotation=45) None
src/trec-dl-21-ltr/src/main/ipynb/create-run-files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as vis_util import numpy as np import os import six.moves.urllib as urllib import sys import tarfile import tensorflow as tf import zipfile from collections import defaultdict from io import StringIO from matplotlib import pyplot as plt from PIL import Image from object_detection.utils import ops as utils_ops # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML if tf.__version__ < '1.4.0': raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!') # %matplotlib inline # + # What model to download. MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17' MODEL_FILE = MODEL_NAME + '.tar.gz' DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/' # Path to frozen detection graph. This is the actual model that is used for the object detection. PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb' # List of the strings that is used to add correct label for each box. PATH_TO_OBJ_DET = os.path.join('/','usr','local','lib','python3.5','dist-packages','tensorflow','models','research','object_detection') PATH_TO_LABELS = os.path.join(PATH_TO_OBJ_DET,'data','mscoco_label_map.pbtxt') NUM_CLASSES = 90 # - opener = urllib.request.URLopener() opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE) tar_file = tarfile.open(MODEL_FILE) for file in tar_file.getmembers(): file_name = os.path.basename(file.name) if 'frozen_inference_graph.pb' in file_name: tar_file.extract(file, os.getcwd()) detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') label_map = label_map_util.load_labelmap(PATH_TO_LABELS) categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) def detect_objects(image_np, sess, detection_graph): # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image_np, axis=0) image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Each box represents a part of the image where a particular object was detected. boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. scores = detection_graph.get_tensor_by_name('detection_scores:0') classes = detection_graph.get_tensor_by_name('detection_classes:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0') # Actual detection. (boxes, scores, classes, num_detections) = sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded}) output_dir = (boxes, scores, classes, num_detections) # Visualization of the results of a detection. vis_util.visualize_boxes_and_labels_on_image_array( image_np, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores), category_index, min_score_thresh=.4, use_normalized_coordinates=True, line_thickness=3) return image_np def process_image(image): # NOTE: The output you return should be a color image (3 channel) for processing video below # you should return the final output (image with lines are drawn on lanes) with detection_graph.as_default(): with tf.Session(graph=detection_graph) as sess: image_process = detect_objects(image, sess, detection_graph) return image_process white_output = 'export/video_out.mp4' clip1 = VideoFileClip("data/video.mp4").subclip(0,10) white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! # %time white_clip.write_videofile(white_output, audio=False)
notebook/objectDetection/.ipynb_checkpoints/Video-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Jackil1993/GPT3_SCM/blob/main/OOP_queue.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="dHTx3fGBtB14" import matplotlib.pyplot as plt import seaborn as sns import numpy as np from numpy import random # + id="Biba9Yk9trCR" class Simulator(): def __init__(self, beta=2., a=0.8, b=1.3, c=4., verbose=False, pl=False): #distr param self.beta = beta self.a = a self.b = b self.c = c self.clock = 0.0 self.queue = [] self.next_arr = self.interarrival() self.next_dep = float('inf') self.timings = [] self.size = [] self.verbose = verbose self.pl = pl def interarrival(self): return random.exponential(self.beta) def service(self): return random.triangular(self.a, self.b, self.c) def arrival(self): self.clock = self.next_arr self.next_arr = self.clock + self.interarrival() if len(self.queue) == 0: self.next_dep = self.clock + self.service() self.queue.append('o') if self.verbose == True: print('{0:.2f}: New customer has arrived. {1} customers are in the queue'.format(self.clock, len(self.queue))) def departure(self): self.clock = self.next_dep self.queue.pop() if len(self.queue) == 0: self.next_dep = float('inf') else: self.next_dep = self.clock + self.service() if self.verbose == True: print('{0:.2f}: A customer has been departured. {1} customers are in the queue'.format(self.clock, len(self.queue))) def advance_time(self): if self.next_arr <= self.next_dep: self.arrival() else: self.departure() self.timings.append(self.clock) self.size.append(len(self.queue)) def simulate(self): while self.clock < 500.: self.advance_time() if self.pl == True: plt.plot(self.timings, self.size, color='green') plt.xlabel('Time (days)') plt.ylabel('Q size') plt.grid(True) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="oVztbDERtbE8" outputId="f70acb44-a3ac-4c9f-b24d-f81163748016" simulation = Simulator(pl=True) simulation.simulate() # + id="Jo5NoyC7t8Lk" stats = [] for i in range(1000): simulation1 = Simulator() simulation1.simulate() stats.append(np.mean(simulation1.size)) # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="VcVjD41sw-zI" outputId="9eb15209-dae3-4f62-f197-a7bc60f30292" sns.histplot(stats, kde=True)
OOP_queue.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Generate EAP for Allen morphologies - morphology 2 # # Note: this notebook can be run only once. To generate a new simulation you have to restart the kernel (Kernel->Restart) import neuron from math import sin, cos import numpy as np import LFPy import MEAutility as mu import matplotlib.pyplot as plt import neuroplotlib as npl from pathlib import Path import sys from pprint import pprint import os from axon_velocity.models import insert_biophysics, insert_simple_biophysics, \ get_default_biophysics_params, planarize_swc, save_cell, create_mea_probe, center_cell_xy from axon_velocity import plot_amplitude_map, plot_peak_latency_map # %matplotlib notebook save_fig = False save_results = True # + try: import neuron except: print('NEURON is not installed.') mechanism_folder = Path('..') / 'mechanisms' if not neuron.load_mechanisms(str(mechanism_folder)): print('Compile mod files in the mechanisms/ folder: from the mechanisms/ folder, run nrnivmodl') # - # simple biophysiscs: dendrite - pas / soma/axon HH # "complex" biophysics: dendrite - pas / soma - na + kv1 / axon - nax + kv1 simple_biophysics = False params_dict = get_default_biophysics_params() pprint(params_dict) # At this stage, one can also change the axial conductance (e.g. `sec.ra`), # which likely affects the conduction velocity. # The `planar` variable decides wheter the z-axis is compressed (similar to a cell culture - `planar=True`) or the original morphology is used (`planar=False`). planar = True z_offset = 5 # distance between cell plane and mea plane zspan = 0 # + morphology_dir = Path('..') / 'neuromorpho' / 'allen_cell_types' morph_id = '563818992' original_morphology_path = [m for m in morphology_dir.iterdir() if not m.name.startswith('.') and morph_id in str(m)][0] if planar: morphology_path = planarize_swc(original_morphology_path, span_um=zspan) else: morphology_path = original_morphology_path # - ax = npl.plot_neuron(morphology=str(morphology_path), plane='xy', color_axon='g') if save_fig: fig = ax.get_figure() fig_folder = Path('..') / 'figures' if not fig_folder.is_dir(): os.makedirs(fig_folder) fig.savefig(fig_folder / 'allen2.pdf') cell = LFPy.Cell(str(morphology_path), v_init=params_dict['v_init'], celsius=params_dict['celsius'], Ra=params_dict['ra'], cm=params_dict['cm'], pt3d=True) # center in the xy plane center_cell_xy(cell) ax = npl.plot_neuron(cell, plane='xy', color_axon='g') # ### Insert cell biophysics # # Here we make the cell active by inserting biophysical mechanisms. if simple_biophysics: insert_simple_biophysics(cell) else: insert_biophysics(cell, params_dict) # ### Stimulating the cell # # We can now add some stimulation. The stimulation can be a current clamp `iclamp` or synaptic inputs `syn`. The `stim_point` is where the cell will be stimulated (the closest cell segment to the `stim_point` is used). # + stim = 'syn' # or syn stim_idx = cell.somaidx syn_input_times = np.arange(2, 6) syn_params = {'idx' : stim_idx, 'e' : 0, # reversal potential 'syntype' : 'ExpSyn', # synapse type 'tau' : 2, # syn. time constant ms 'weight' : 0.05, # syn. weight 'record_current' : True # syn. current record } clamp_params = {'idx' : stim_idx, 'pptype' : 'IClamp', # IClamp point process 'dur' : 300, # dur in ms 'amp' : 2, # amp in nA 'delay' : 5 # delay in ms } #%% if stim == 'syn': synapse = LFPy.Synapse(cell, **syn_params) synapse.set_spike_times(np.array(syn_input_times)) else: clamp = LFPy.StimIntElectrode(cell=cell, **clamp_params) # + if not planar: shift_pos = np.min(cell.z) - z_offset shift_neg = np.max(cell.z) + z_offset if np.abs(shift_pos) > np.abs(shift_neg): shift = shift_neg else: shift = shift_pos else: shift = z_offset print(f"z-position of MEA: {shift}") # - # ### Define extracellular electrodes # # Let's now define the extracellular electrodes using the [MEAutility](https://meautility.readthedocs.io/en/latest/) package. # + mea_dim = 100 # n rows x n cols mea_pitch = 17.5 # rows and cols pitch elec_size = 5 hdmea = create_mea_probe(pitch=mea_pitch, dim=mea_dim, elec_size=elec_size, z_offset=z_offset) electrode = LFPy.RecExtElectrode(cell, probe=hdmea, n=10) # - # ### Run the simulation # # By passing the `electrode` argument `LFPy` also computes extracellular potentials. The `rec_vmem` argument allows to measure the membrane potenrtial at all segments. cell.simulate(probes=[electrode], rec_vmem=True) eap = electrode.data * 1000 # mV --> uV # ### Plot membrane potentials soma_idx = cell.somaidx[0] dend_idx = cell.get_closest_idx(-40, -120, 0) axon_idx = cell.get_closest_idx(300, 60, 0) plt.figure() plt.plot(cell.tvec, cell.vmem[soma_idx], label='soma') plt.plot(cell.tvec, cell.vmem[dend_idx], label='dend') plt.plot(cell.tvec, cell.vmem[axon_idx], label='axon') plt.legend() # + # cutout single template fs = 1 / cell.dt ms_before = 2 ms_after = 10 min_chan, min_idx = np.unravel_index(np.argmin(eap), eap.shape) # - eap_cut = eap[:, min_idx - int(ms_before * fs): min_idx + int(ms_after * fs)] ax = mu.plot_mea_recording(eap_cut, hdmea, colors='gray') npl.plot_neuron(cell, ax=ax, plane='xy', color='k', color_axon='g') # ### Plot amplitude and peak latency map plot_amplitude_map(eap_cut, hdmea.positions, log=True) plot_peak_latency_map(eap_cut, hdmea.positions) # ### Save templates and locations template = eap_cut locations = hdmea.positions[:, :-1] # save only x-y positions # save templates and locations if save_results: data_folder = Path('..') / 'simulated_data' / 'allen' if planar: save_path = data_folder / f'allen2_planar_{zspan}um' else: save_path = data_folder / 'allen2_original' if not save_path.is_dir(): os.makedirs(save_path) np.save(save_path / 'template.npy', template) np.save(save_path / 'locations.npy', locations) save_cell(cell, cell_name='allen2', save_folder=save_path)
simulations/simulation_notebooks/simulate_cell_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### This script is designed to take an input of location of a beacon in X and Y coordinates and bin size of historgram. It knows already the size of the arena and will produce a histogram/ matrix of numbers for occupancy/area per each bin. # import matplotlib.pyplot as plt import numpy as np import matplotlib.patches as patches from PIL import Image import matplotlib.image as mpimg import pandas as pd import math figures = 'C:/Users/Fabian/Desktop/Analysis/Round3_FS03_FS06/Figures/' processed= 'C:/Users/Fabian/Desktop/Analysis/Round3_FS03_FS06/processed/' # ### 1. input arena size cut = 0 # keeping the cut where rectangle of arena ends X_cut_min = -.59 Y_cut_max = 1.61 X_cut_max = .12 Y_cut_min = .00 print("area %s M*2" %((X_cut_max-X_cut_min)*(Y_cut_max-Y_cut_min))) # + xcut_offset=0 ycut_offset=0 fig, ax1 = plt.subplots(1, 1, sharex=True,dpi=300, sharey=True) fig.suptitle("Normalization visual") ax1.plot([(X_cut_min+cut)-xcut_offset,(X_cut_max-cut)-xcut_offset],[(Y_cut_max-cut)+ycut_offset,(Y_cut_max-cut)+ycut_offset] ,'r-') ax1.plot([(X_cut_min+cut)-xcut_offset,(X_cut_min+cut)-xcut_offset],[(Y_cut_min+cut)+ycut_offset,(Y_cut_max-cut)+ycut_offset] ,'r-') ax1.plot([(X_cut_max-cut)-xcut_offset,(X_cut_max-cut)-xcut_offset],[(Y_cut_min+cut)+ycut_offset,(Y_cut_max-cut)+ycut_offset] ,'r-') ax1.plot([(X_cut_max-cut)-xcut_offset,(X_cut_min+cut)-xcut_offset],[(Y_cut_min+cut)+ycut_offset,(Y_cut_min+cut)+ycut_offset] ,'r-') ax1.plot(-.45,.4,"go") rectangle = patches.Rectangle((-.59,0), .71,1.61, color="green") ax1.add_patch(rectangle) k=reversed(range(10)) print(k) color=iter(plt.cm.rainbow(np.linspace(0,1,10))) for i in reversed(range(10)): c=next(color) patch = patches.Circle((-.45,.4), radius=.15*i,color=c) ax1.add_patch(patch) patch.set_clip_path(rectangle) #get_clip_path(self)[source] ax1.axis("equal") plt.show() # - # ## 2. Now define the area of circles mathematically # ## a. get area of each circle withouth subtracting rectangle from math import pi r = float(input ("Input the radius of the circle : ")) print ("The area of the circle with radius " + str(r) + " is: " + str(pi * r**2)) hist=[] prev=0 for i in (range(10)): res=(pi * (.15*i)**2) hist.append(res-prev) prev=res print(hist) plt.bar(range(10),hist) # ### b. subract the area from the rectangle area # ## Tried mathematical approach on paper but there were at least 4 other combination for which it would have to be calculated.So it is possible but would take too long to write the code hence decided to use the pictures that canbe generated and eastimaed the pixels using different grayscale values # + xcut_offset=0 ycut_offset=0 def visualization(center=(-.45,.4),X_cut_min = -.59,Y_cut_max = 1.61,X_cut_max = .12,Y_cut_min = .00 ): """Makes a visual represetation of a banded rectangle with circles. to be exported and then can be counted""" fig, ax1 = plt.subplots(1, 1, sharex=True,dpi=400,) fig.suptitle("Normalization visual") ax1.plot(center[0],center[1],"go") rectangle = patches.Rectangle((X_cut_min,Y_cut_min), (abs(X_cut_min)+abs(X_cut_max)),Y_cut_max , color="green") ax1.add_patch(rectangle) k=reversed(range(10)) print(k) color=iter(plt.cm.rainbow(np.linspace(0,1,10))) for i in reversed(range(10)): c=next(color) patch = patches.Circle((center[0],center[1]), radius=.15*i,color=c) ax1.add_patch(patch) patch.set_clip_path(rectangle) ax1.axis("equal") return plt.show() visualization() # + xcut_offset=0 ycut_offset=0 def visualization_grey(center=(-.45,.4),dpi=500,X_cut_min = -.59,Y_cut_max = 1.61,X_cut_max = .12,Y_cut_min = .00 ): """Makes a visual represetation of a banded rectangle with circles. to be exported and then can be counted""" fig, ax1 = plt.subplots(1, 1, sharex=True,dpi=dpi,) fig.patch.set_visible(False) #fig.suptitle("Normalization visual") #ax1.plot(center[0],center[1],"go") rectangle = patches.Rectangle((X_cut_min,Y_cut_min), (abs(X_cut_min)+abs(X_cut_max)),Y_cut_max , color="black") ax1.add_patch(rectangle) k=reversed(range(10)) print(k) color=iter(plt.cm.binary(np.linspace(.01,.99,20))) for i in reversed(range(20)): c=next(color) patch = patches.Circle((center[0],center[1]), radius=.075*i,color=c) ax1.add_patch(patch) patch.set_clip_path(rectangle) ax1.axis("equal") ax1.axis("off") mng = plt.get_current_fig_manager() mng.full_screen_toggle() fig.savefig(figures + 'norm_graph.png', dpi=dpi, transparent=True) return plt.show() visualization_grey() # - img = mpimg.imread(figures + 'norm_graph.png') counts, bins, bars = plt.hist(img.ravel(), bins=18, range=(0.01, .99),) plt.hist(img.ravel(), bins=18, range=(0.01, .99), fc='k', ec='w') plt.show() imgplot = plt.imshow(img) counts img.ravel() norm= [] int(sum(counts)) for count in counts: k= count/int(sum(counts)) norm.append(k) norm # ### Now take the counts and multiply the distributions correctly - so make a histogram for each time beacon changes FS04=pd.read_excel(processed +'FS04_rears_new.xlsx', index_col=0) FS04.head() # + def get_rear_distance_from_beacon(df_rears_corrected): dist=[] for row in df_rears_corrected.iterrows(): #print(row[1][1]) #print(row[1][4]) #print(row[1][2]) #print(row[1][5]) dist.append(math.sqrt((row[1][1] - row[1][4])**2 + (row[1][2] - row[1][5])**2)) return dist plt.hist(get_rear_distance_from_beacon(FS04)) # + def make_simple_graphs (animal_ID,rearing): binwidth=.075 plt.tight_layout bins = np.arange(0, 1.425, binwidth) interval=bins print(bins) bins[1]= 0.075 fig, ax = plt.subplots(4,dpi=800,sharex=False) fig.suptitle(animal_ID +' rearing distance from beacons ephys',y=1) N, bins, patches=ax[0].hist(get_rear_distance_from_beacon(rearing.loc[rearing['Visibility']==1]),bins=bins,ec='w') print(len(norm)) print(len(bins)) print(N*norm) ax[0].set_title('Visible attempt') for i in range(0,1): patches[i].set_facecolor('g') for i in range(1, len(patches)): patches[i].set_facecolor('blue') fig.tight_layout(pad=1.5) N1, bins, patches=ax[1].hist(get_rear_distance_from_beacon(rearing.loc[rearing['Visibility']==0]),bins=bins,ec='w') print(N1*norm) ax[1].set_title('Invisible attempt') for i in range(0,1): patches[i].set_facecolor('g') for i in range(1, len(patches)): patches[i].set_facecolor('blue') fig.tight_layout(pad=1.5) patches = ax[2].bar(N*norm,bins) ax[2].set_title('Visible attempt Normalized') for i in range(0,1): patches[i].set_facecolor('g') for i in range(1, len(patches)): patches[i].set_facecolor('blue') fig.tight_layout(pad=1.5) number, bins, patches = ax[3].hist((N1*norm),bins=bins,ec='w') ax[3].set_title('invisible attempt Normalized') for i in range(0,1): patches[i].set_facecolor('g') for i in range(1, len(patches)): patches[i].set_facecolor('blue') fig.tight_layout(pad=1.5) plt.savefig('%srat_rearing_distance_from_beacons_norm%s.png'%(figures,animal_ID), dpi = 100) make_simple_graphs('FS04' ,FS04) # -
Code/.ipynb_checkpoints/20210811_FS_THE_WORKS_Normalize_occupancy_bins-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="ojm_6E9f9Kcf" # # CNN 315 # Rather than partition train/validation sets, use different sets. # Cut train set down to random LIMIT number of transcripts. # Use all of the validation set. # Use fit(shuffle) on the train set. # # TO DO: # Save five best models to same directory to make it easy to delete. # # + colab={"base_uri": "https://localhost:8080/"} id="hh6XplUvC0j0" outputId="c5914db0-d2df-44aa-d081-9ea1d2aff6a2" TRAIN_NC_FILENAME='ncRNA.gc36.short.fasta' TRAIN_PC_FILENAME='pcRNA.gc36.short.fasta' VALID_NC_FILENAME='ncRNA.gc36.long.fasta' VALID_PC_FILENAME='pcRNA.gc36.long.fasta' #NC_FILENAME='ncRNA.tiny50.fasta' #PC_FILENAME='pcRNA.tiny50.fasta' #NC_FILENAME='ncRNA.gc34.processed.fasta' #PC_FILENAME='pcRNA.gc34.processed.fasta' MODEL_FILE='JUNK1' # change this if you want to keep models DATAPATH='' try: from google.colab import drive IN_COLAB = True PATH='/content/drive/' drive.mount(PATH) DATAPATH=PATH+'My Drive/data/' # must end in "/" except: IN_COLAB = False DATAPATH='data/' # must end in "/" TRAIN_NC_FILENAME = DATAPATH+TRAIN_NC_FILENAME TRAIN_PC_FILENAME = DATAPATH+TRAIN_PC_FILENAME VALID_NC_FILENAME = DATAPATH+VALID_NC_FILENAME VALID_PC_FILENAME = DATAPATH+VALID_PC_FILENAME MODEL_FILE=DATAPATH+MODEL_FILE EPOCHS=200 SPLITS=5 K=1 VOCABULARY_SIZE=4**K+1 # e.g. K=3 => 64 DNA K-mers + 'NNN' EMBED_DIMEN=2 FILTERS=32 KERNEL=3 NEURONS=32 DROP=0.25 ACT="tanh" MINLEN=201 MAXLEN=2000 LIMIT=5588 # This is the size of our smallest set (protein coding short) # + colab={"base_uri": "https://localhost:8080/"} id="e9TY3HK9ZklE" outputId="52d69a0b-c395-4802-9cd4-d0710165ef63" # Load our own tools # TO DO: don't go to GitHub if the file is already local. GITHUB = True if GITHUB: # #!pip install requests # Uncomment this if necessary. Seems to be pre-installed. import requests r = requests.get('https://raw.githubusercontent.com/ShepherdCode/ShepherdML/master/Strings/tools_fasta.py') with open('tools_fasta.py', 'w') as f: f.write(r.text) # TO DO: delete the file after import import tools_fasta as tools tools.yahoo() # If this prints "Yahoo!" the the import was successful. TOOLS_CHANGED = False # set to True to re-run with a new version of tools if TOOLS_CHANGED: from importlib import reload tools=reload(tools) print(dir(tools)) # run this to see EVERYTHING in the tools module # + id="VQY7aTj29Kch" import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import cross_val_score from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import StratifiedKFold import tensorflow as tf from tensorflow import keras import time dt='float32' tf.keras.backend.set_floatx(dt) # + [markdown] id="j7jcg6Wl9Kc2" # Build model # + id="qLFNO1Xa9Kc3" def compile_model(model): ## learn rate = initial_learning_rate * decay_rate ^ (step / decay_steps) #adam_default_learn_rate = 0.001 #schedule = tf.keras.optimizers.schedules.ExponentialDecay( # initial_learning_rate = adam_default_learn_rate*10, # decay_steps=10000, decay_rate=0.99, staircase=True) #alrd = tf.keras.optimizers.Adam(learning_rate=schedule) #model.compile(loss=bc, optimizer=alrd, metrics=["accuracy"]) print("COMPILE...") bc=tf.keras.losses.BinaryCrossentropy(from_logits=False) model.compile(loss=bc, optimizer="adam", metrics=["accuracy"]) print("...COMPILED") return model def build_model(): SHAPE=(MAXLEN,5) # MAXLEN bases = time steps, 5 features = one hots clayer1 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same", input_shape=SHAPE) clayer2 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same") clayer3 = keras.layers.MaxPooling1D(2) clayer4 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same") clayer5 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same") clayer6 = keras.layers.MaxPooling1D(2) clayer7 = keras.layers.Flatten() dlayer1 = keras.layers.Dense(NEURONS, activation=ACT,dtype=dt, input_shape=[1000]) dlayer2 = keras.layers.Dropout(DROP) dlayer3 = keras.layers.Dense(NEURONS, activation=ACT,dtype=dt) dlayer4 = keras.layers.Dropout(DROP) output_layer = keras.layers.Dense(1, activation="sigmoid", dtype=dt) cnn = keras.models.Sequential() cnn.add(clayer1) cnn.add(clayer2) cnn.add(clayer3) cnn.add(clayer4) cnn.add(clayer5) cnn.add(clayer6) cnn.add(clayer7) cnn.add(dlayer1) cnn.add(dlayer2) cnn.add(dlayer3) cnn.add(dlayer4) cnn.add(output_layer) mlpc = compile_model(cnn) return mlpc # + [markdown] id="LdIS2utq9Kc9" # Cross validation # + id="BVo4tbB_9Kc-" def do_cross_validation(X_train,y_train,given_model,X_valid,y_valid): cv_scores = [] fold=0 # When not using ShuffleSplit, make sure to randomize train data. #splitter = ShuffleSplit(n_splits=SPLITS, test_size=0.1, random_state=37863) #for train_index,valid_index in splitter.split(X): while fold < SPLITS: fold += 1 # Avoid continually improving the same model. model = compile_model(keras.models.clone_model(given_model)) bestname=MODEL_FILE+".cv."+str(fold)+".best" es = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=1) mc = keras.callbacks.ModelCheckpoint( filepath=bestname, save_best_only=True, monitor='val_accuracy', mode='max') mycallbacks = [es,mc] print("FIT") start_time=time.time() history=model.fit(X_train, y_train, # batch_size=10, default=32 works nicely epochs=EPOCHS, verbose=1, # verbose=1 for ascii art, verbose=0 for none callbacks=mycallbacks, shuffle=True, validation_data=(X_valid,y_valid) ) end_time=time.time() elapsed_time=(end_time-start_time) print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time)) pd.DataFrame(history.history).plot(figsize=(8,5)) plt.grid(True) plt.gca().set_ylim(0,1) plt.show() best_model=keras.models.load_model(bestname) scores = best_model.evaluate(X_valid, y_valid, verbose=0) print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100)) cv_scores.append(scores[1] * 100) print() print("%d-way Cross Validation max %.2f%%, mean %.2f%% (+/- %.2f%%)" % (fold, np.amax(cv_scores), np.mean(cv_scores), np.std(cv_scores))) # + [markdown] id="qd3Wj_vI9KdP" # ## Train on RNA lengths 200-1Kb # + colab={"base_uri": "https://localhost:8080/"} id="G1HuSs8ZbeL4" outputId="3096bff3-4f56-491d-8d84-a1c07185e533" print ("Compile the model") model=build_model() print ("Summarize the model") print(model.summary()) # Print this only once #model.save(MODEL_FILE+'.model') # + colab={"base_uri": "https://localhost:8080/"} id="f8fNo6sn9KdH" outputId="9183e7e0-16d0-4046-809a-6a567ae8df83" def load_data(nc_filename,pc_filename,limit=None): nc_seq=tools.load_fasta(nc_filename,0) pc_seq=tools.load_fasta(pc_filename,1) if limit is not None: # Choose a random subset of size = limit. # Side effect: randomizes the data order. nfrac=1.0*limit/len(nc_seq) pfrac=1.0*limit/len(pc_seq) if nfrac<1.0: nc_seq=nc_seq.sample(frac=nfrac) if pfrac<1.0: pc_seq=pc_seq.sample(frac=pfrac) train_set=pd.concat((nc_seq,pc_seq),axis=0) # May not need to slice by length if input files already sliced subset=tools.make_slice(train_set,MINLEN,MAXLEN) # randset=subset.sample(frac=1) # unneccessary with fit(shuffle) (X1,y1)=tools.separate_X_and_y(subset) # X1 is pandas df of ("list" of one sequence) X2=X1.to_numpy() # numpy ndarray of ("list" of one sequence) X3=[elem[0] for elem in X2] # numpy dnarray of str X4=tools.uniform_length(X3,MAXLEN) X5= [tools.one_hot(x) for x in X4] X6=np.asarray(X5) y6=y1.to_numpy() return X6,y6 print("Load train data from files.") X_train,y_train = load_data(TRAIN_NC_FILENAME,TRAIN_PC_FILENAME,LIMIT) print("X_train",X_train.shape) print("Load valid data from files.") X_valid,y_valid = load_data(VALID_NC_FILENAME,VALID_PC_FILENAME) print("X_valid",X_valid.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="mQ8eW5Rg9KdQ" outputId="32861342-2253-486c-cdf8-76961ebf9a2c" print ("Cross valiation") do_cross_validation(X_train,y_train,model,X_valid,y_valid) print ("Done") # + id="p4fh2GI8beMQ"
Nasa2021/CNN_315.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/satojkovic/runnable_tutorials/blob/main/keras_gradcam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="7NhOsU3ebelp" import numpy as np import tensorflow as tf from tensorflow import keras # + id="jbla7UJIby3l" from IPython.display import Image, display import matplotlib.pyplot as plt import matplotlib.cm as cm # + id="ifKKV3u6b9xT" model_builder = keras.applications.xception.Xception img_size = (299, 299) preprocess_input = keras.applications.xception.preprocess_input decode_predictions = keras.applications.xception.decode_predictions # + id="Gc2Uck6lcNuE" model = model_builder(weights='imagenet') # + id="a6C_7ltIcTBb" model.summary() # + id="Als2GRdHcmYM" last_conv_layer_name = 'block14_sepconv2_act' img_path = keras.utils.get_file( 'african_elephant.jpg', 'https://i.imgur.com/Bvro0YD.png' ) # + id="_hqTueP1daRs" display(Image(img_path)) # + id="uGHT372OdgLB" def get_img_array(img_path, size): # `img` is a PIL image of size 299x299 img = keras.preprocessing.image.load_img(img_path, target_size=size) # `array` is a floa32 numpy array of shape 299x299x3 array = keras.preprocessing.image.img_to_array(img) # Add batch dimension array = np.expand_dims(array, axis=0) return array # + id="8goD5UMbeVT6" def make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=None): # outputs=[the activation of the last conv layer, the output predictions] grad_model = tf.keras.models.Model( inputs=[model.inputs], outputs=[model.get_layer(last_conv_layer_name).output, model.output] ) # Compute the gradient of the top predicted class for our input image # with respect to the activations of the last conv layer with tf.GradientTape() as tape: last_conv_layer_output, preds = grad_model(img_array) if pred_index is None: pred_index = tf.argmax(preds[0]) class_channel = preds[:, pred_index] print('last_conv_layer_output:', last_conv_layer_output.get_shape().as_list()) print('preds:', preds.get_shape().as_list()) tf.print('class_chanel value:', class_channel) # This is the gradient of the output neuron # with regard to the output feature map of the last conv layer grads = tape.gradient(class_channel, last_conv_layer_output) print('grads:', tf.shape(grads)) # This is a vector where each entry is the mean intensity of the gradient # over a specific feature map channel pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2)) print('pooled_grads:', tf.shape(pooled_grads)) # We multiply each channel in the feature map array # by "how important this channel is" with regard to top predicted class # then sum all the channels to obtain the heatmap class activation last_conv_layer_output = last_conv_layer_output[0] heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis] print('heatmap:', tf.shape(heatmap)) heatmap = tf.squeeze(heatmap) print('heatmap(squeeze)', tf.shape(heatmap)) # For visualization purpose, we will also normalize the heatmap between [0, 1] heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap) return heatmap.numpy() # + id="ZX40usagxWIg" from tensorflow.python.ops.gen_nn_ops import top_k img_array = preprocess_input(get_img_array(img_path, size=img_size)) model = model_builder(weights='imagenet') # Remove last layer's softmax model.layers[-1].activation = None preds = model.predict(img_array) print('Predicted:', decode_predictions(preds, top=1)[0]) heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name) plt.matshow(heatmap) plt.show() # + id="jo4-Yq851IZE" def save_and_display_gradcam(img_path, heatmap, cam_path='cam.jpg', alpha=0.4): img = keras.preprocessing.image.load_img(img_path) img = keras.preprocessing.image.img_to_array(img) heatmap = np.uint8(255 * heatmap) jet = cm.get_cmap('jet') # Use RGB values of the colormap jet_colors = jet(np.arange(256))[:, :3] jet_heatmap = jet_colors[heatmap] # Create an image with colorized heatmap jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap) jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0])) jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap) # Superimpose heatmap on the orignal image superimposed_img = jet_heatmap * alpha + img superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img) # Save the superimposed image superimposed_img.save(cam_path) # Display display(Image(cam_path)) # + id="Fe0KS3w25XRq" save_and_display_gradcam(img_path, heatmap) # + id="il0kHESq5ck6" img_path = keras.utils.get_file( "cat_and_dog.jpg", "https://storage.googleapis.com/petbacker/images/blog/2017/dog-and-cat-cover.jpg", ) display(Image(img_path)) # Prepare image img_array = preprocess_input(get_img_array(img_path, size=img_size)) # Print what the two top predicted classes are preds = model.predict(img_array) print("Predicted:", decode_predictions(preds, top=2)[0]) # + id="PSNB7hp38rdh" heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=260) save_and_display_gradcam(img_path, heatmap) # + id="0hJC300I8zrH" heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=285) save_and_display_gradcam(img_path, heatmap) # + id="gm_cZQyP81_F"
tensorflow/keras_gradcam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ![](https://images.unsplash.com/photo-1602084551218-a28205125639?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=2070&q=80) # <div class = 'alert alert-block alert-info' # style = 'background-color:#4c1c84; # color:#eeebf1; # border-width:5px; # border-color:#4c1c84; # font-family:Comic Sans MS; # border-radius: 50px 50px'> # <p style = 'font-size:24px'>Exp 035</p> # <a href = "#Config" # style = "color:#eeebf1; # font-size:14px">1.Config</a><br> # <a href = "#Settings" # style = "color:#eeebf1; # font-size:14px">2.Settings</a><br> # <a href = "#Data-Load" # style = "color:#eeebf1; # font-size:14px">3.Data Load</a><br> # <a href = "#Pytorch-Settings" # style = "color:#eeebf1; # font-size:14px">4.Pytorch Settings</a><br> # <a href = "#Training" # style = "color:#eeebf1; # font-size:14px">5.Training</a><br> # </div> # # <p style = 'font-size:24px; # color:#4c1c84'> # 実施したこと # </p> # <li style = "color:#4c1c84; # font-size:14px">使用データ:Jigsaw-Unbiased</li> # <li style = "color:#4c1c84; # font-size:14px">使用モデル:unitary/toxic-bert</li> # <li style = "color:#4c1c84; # font-size:14px">Attentionの可視化</li> # <br> # <h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;"> # Config # </h1> # <br> import sys sys.path.append("../src/utils/iterative-stratification/") sys.path.append("../src/utils/detoxify") sys.path.append("../src/utils/coral-pytorch/") sys.path.append("../src/utils/pyspellchecker") # + import warnings warnings.simplefilter('ignore') import os import gc gc.enable() import sys import glob import copy import math import time import random import string import psutil import pathlib from pathlib import Path from contextlib import contextmanager from collections import defaultdict from box import Box from typing import Optional from pprint import pprint import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import japanize_matplotlib from tqdm.auto import tqdm as tqdmp from tqdm.autonotebook import tqdm as tqdm tqdmp.pandas() ## Model from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold, KFold import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from transformers import AutoTokenizer, AutoModel, AdamW, AutoModelForSequenceClassification from transformers import RobertaModel, RobertaForSequenceClassification from transformers import RobertaTokenizer from transformers import LukeTokenizer, LukeModel, LukeConfig from transformers import get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup from transformers import BertTokenizer, BertForSequenceClassification, BertForMaskedLM from transformers import RobertaTokenizer, RobertaForSequenceClassification from transformers import XLMRobertaTokenizer, XLMRobertaForSequenceClassification from transformers import DebertaTokenizer, DebertaModel # Pytorch Lightning import pytorch_lightning as pl from pytorch_lightning.utilities.seed import seed_everything from pytorch_lightning import callbacks from pytorch_lightning.callbacks.progress import ProgressBarBase from pytorch_lightning import LightningDataModule, LightningDataModule from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor from pytorch_lightning.loggers import WandbLogger from pytorch_lightning.loggers.csv_logs import CSVLogger from pytorch_lightning.callbacks import RichProgressBar from sklearn.linear_model import Ridge from sklearn.svm import SVC, SVR from sklearn.feature_extraction.text import TfidfVectorizer from scipy.stats import rankdata from cuml.svm import SVR as cuml_SVR from cuml.linear_model import Ridge as cuml_Ridge import cudf from detoxify import Detoxify from iterstrat.ml_stratifiers import MultilabelStratifiedKFold from ast import literal_eval from nltk.tokenize import TweetTokenizer import spacy from scipy.stats import sem from copy import deepcopy from spellchecker import SpellChecker from typing import Text, Set, List # + import torch config = { "exp_comment":"Jigsaw-Classification をHateBERTで学習", "seed": 42, "root": "/content/drive/MyDrive/kaggle/Jigsaw/raw", "n_fold": 5, "epoch": 5, "max_length": 256, "environment": "AWS", "project": "Jigsaw", "entity": "dataskywalker", "exp_name": "035_exp", "margin": 0.5, "train_fold": [0, 1, 2, 3, 4], "trainer": { "gpus": 1, "accumulate_grad_batches": 8, "progress_bar_refresh_rate": 1, "fast_dev_run": True, "num_sanity_val_steps": 0, }, "train_loader": { "batch_size": 8, "shuffle": True, "num_workers": 1, "pin_memory": True, "drop_last": True, }, "valid_loader": { "batch_size": 2, "shuffle": False, "num_workers": 1, "pin_memory": True, "drop_last": False, }, "test_loader": { "batch_size": 2, "shuffle": False, "num_workers": 1, "pin_memory": True, "drop_last": False, }, "backbone": { "name": "GroNLP/hateBERT", "output_dim": 1, }, "optimizer": { "name": "torch.optim.AdamW", "params": { "lr": 1e-6, }, }, "scheduler": { "name": "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts", "params": { "T_0": 20, "eta_min": 0, }, }, "loss": "nn.MSELoss", } config = Box(config) config.tokenizer = AutoTokenizer.from_pretrained(config.backbone.name) config.model = BertForMaskedLM.from_pretrained(config.backbone.name) # pprint(config) # + config.tokenizer.save_pretrained(f"../data/processed/{config.backbone.name}") pretrain_model = BertForMaskedLM.from_pretrained(config.backbone.name) pretrain_model.save_pretrained(f"../data/processed/{config.backbone.name}") # + # 個人的にAWSやKaggle環境やGoogle Colabを行ったり来たりしているのでまとめています import os import sys from pathlib import Path if config.environment == 'AWS': INPUT_DIR = Path('/mnt/work/data/kaggle/Jigsaw/') MODEL_DIR = Path(f'../models/{config.exp_name}/') OUTPUT_DIR = Path(f'../data/interim/{config.exp_name}/') UTIL_DIR = Path('/mnt/work/shimizu/kaggle/PetFinder/src/utils') os.makedirs(MODEL_DIR, exist_ok=True) os.makedirs(OUTPUT_DIR, exist_ok=True) print(f"Your environment is 'AWS'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}\nUTIL_DIR is {UTIL_DIR}") elif config.environment == 'Kaggle': INPUT_DIR = Path('../input/*****') MODEL_DIR = Path('./') OUTPUT_DIR = Path('./') print(f"Your environment is 'Kaggle'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}") elif config.environment == 'Colab': INPUT_DIR = Path('/content/drive/MyDrive/kaggle/Jigsaw/raw') BASE_DIR = Path("/content/drive/MyDrive/kaggle/Jigsaw/interim") MODEL_DIR = BASE_DIR / f'{config.exp_name}' OUTPUT_DIR = BASE_DIR / f'{config.exp_name}/' os.makedirs(MODEL_DIR, exist_ok=True) os.makedirs(OUTPUT_DIR, exist_ok=True) if not os.path.exists(INPUT_DIR): print('Please Mount your Google Drive.') else: print(f"Your environment is 'Colab'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}") else: print("Please choose 'AWS' or 'Kaggle' or 'Colab'.\nINPUT_DIR is not found.") # - # Seed固定 seed_everything(config.seed) ## 処理時間計測 @contextmanager def timer(name:str, slack:bool=False): t0 = time.time() p = psutil.Process(os.getpid()) m0 = p.memory_info()[0] / 2. ** 30 print(f'<< {name} >> Start') yield m1 = p.memory_info()[0] / 2. ** 30 delta = m1 - m0 sign = '+' if delta >= 0 else '-' delta = math.fabs(delta) print(f"<< {name} >> {m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec", file=sys.stderr) # + [markdown] id="zWE2XhHeTFos" # <br> # <h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;"> # Data Load # </h1> # <br> # + colab={"base_uri": "https://localhost:8080/"} id="3DFxNX0CTD9t" outputId="240b449b-9f09-4519-d155-b4f865053621" ## Data Check for dirnames, _, filenames in os.walk(INPUT_DIR): for filename in filenames: print(f'{dirnames}/{filename}') # + val_df = pd.read_csv("/mnt/work/data/kaggle/Jigsaw/validation_data.csv") test_df = pd.read_csv("/mnt/work/data/kaggle/Jigsaw/comments_to_score.csv") display(val_df.head()) display(test_df.head()) # - # <br> # <h2 style = "font-size:45px; # font-family:Comic Sans MS ; # font-weight : normal; # background-color: #eeebf1 ; # color : #4c1c84; # text-align: center; # border-radius: 100px 100px;"> # Jigsaw Classification # </h2> # <br> train_df = pd.read_csv("../data/external/jigsaw-unbiased/train.csv") display(train_df.head(10)) display(train_df.shape) train_df["is_colon"] = train_df["comment_text"].progress_apply(lambda x:1 if ":" in x else 0) def preprocess_text(txt:str) -> str: new_texts = txt new_texts = new_texts.replace(":", ",") return new_texts train_df["text"] = train_df["comment_text"].progress_apply(preprocess_text) test_df["text"] = test_df["text"].progress_apply(preprocess_text) val_df["less_toxic"] = val_df["less_toxic"].progress_apply(preprocess_text) val_df["more_toxic"] = val_df["more_toxic"].progress_apply(preprocess_text) # + import re spell = SpellChecker(distance=1) def misspelt_words_fn(dataframe: pd.DataFrame, col="text") -> Set[Text]: misspelt_words = set() for tweet in dataframe[col].str.casefold(): [misspelt_words.add(word) for word in spell.unknown(tweet.split())] return misspelt_words WORD = re.compile(r'\w+') def reTokenize(tweet: Text) -> List[Text]: return WORD.findall(tweet.casefold()) PATTERN = re.compile(r"(.)\1{2,}") def reduce_lengthening(text: Text) -> Text: return PATTERN.sub(r"\1\1", text) def spell_correction(text: Text) -> Text: return ' '.join([spell.correction(word) if word in misspelt_words else word for word in reTokenize(reduce_lengthening(text))]) # + misspelt_words = misspelt_words_fn(train_df, "text") train_df["text"] = train_df["text"].progress_apply(spell_correction) misspelt_words = misspelt_words_fn(test_df, "text") test_df["text"] = test_df["text"].progress_apply(spell_correction) misspelt_words = misspelt_words_fn(val_df, "less_toxic") val_df["less_toxic"] = val_df["less_toxic"].progress_apply(spell_correction) misspelt_words = misspelt_words_fn(val_df, "more_toxic") val_df["more_toxic"] = val_df["more_toxic"].progress_apply(spell_correction) # + target_cols = [ "target", "severe_toxic", "obscene", "threat", "insult", "identity_hate" ] plt.figure(figsize=(12, 5)) sns.histplot(train_df["target"], color="#4c1c84") plt.grid() plt.show() # - def sample_df(df:pd.DataFrame, frac=0.2): ''' train_dfからtoxicとnon_toxicを抽出 non_toxicの割合をfracで調整 ''' print(f"Before: {df.shape}") label_cols = [ "target", "severe_toxicity", "identity_attack", "insult", "threat", "sexual_explicit" ] df["y"] = df[label_cols].sum(axis=1) df["y"] = df["y"]/df["y"].max() toxic_df = df[df["y"]>0].reset_index(drop=True) nontoxic_df = df[df["y"]==0].reset_index(drop=True) nontoxic_df = nontoxic_df.sample(frac=frac, random_state=config.seed) df = pd.concat([toxic_df, nontoxic_df], axis=0).sample(frac=1).reset_index(drop=True) print(f"After: {df.shape}") return df with timer("sampling df"): train_df = sample_df(train_df, frac=0.01) display(train_df.head()) target_cols = [ "target", "severe_toxicity", "identity_attack", "insult", "threat", "sexual_explicit" ] # <br> # <h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;"> # Pytorch Dataset # </h1> # <br> class JigsawDataset: def __init__(self, df, tokenizer, max_length, mode, target_cols): self.df = df self.max_len = max_length self.tokenizer = tokenizer self.mode = mode self.target_cols = target_cols if self.mode == "train": self.text = df["text"].values self.target = df[target_cols].values elif self.mode == "valid": self.more_toxic = df["more_toxic"].values self.less_toxic = df["less_toxic"].values else: self.text = df["text"].values def __len__(self): return len(self.df) def __getitem__(self, index): if self.mode == "train": text = self.text[index] target = self.target[index] inputs_text = self.tokenizer.encode_plus( text, truncation=True, return_attention_mask=True, return_token_type_ids=True, max_length = self.max_len, padding="max_length", ) text_ids = inputs_text["input_ids"] text_mask = inputs_text["attention_mask"] text_token_type_ids = inputs_text["token_type_ids"] return { 'text_ids': torch.tensor(text_ids, dtype=torch.long), 'text_mask': torch.tensor(text_mask, dtype=torch.long), 'text_token_type_ids': torch.tensor(text_token_type_ids, dtype=torch.long), 'target': torch.tensor(target, dtype=torch.float) } elif self.mode == "valid": more_toxic = self.more_toxic[index] less_toxic = self.less_toxic[index] inputs_more_toxic = self.tokenizer.encode_plus( more_toxic, truncation=True, return_attention_mask=True, return_token_type_ids=True, max_length = self.max_len, padding="max_length", ) inputs_less_toxic = self.tokenizer.encode_plus( less_toxic, truncation=True, return_attention_mask=True, return_token_type_ids=True, max_length = self.max_len, padding="max_length", ) target = 1 more_toxic_ids = inputs_more_toxic["input_ids"] more_toxic_mask = inputs_more_toxic["attention_mask"] more_token_type_ids = inputs_more_toxic["token_type_ids"] less_toxic_ids = inputs_less_toxic["input_ids"] less_toxic_mask = inputs_less_toxic["attention_mask"] less_token_type_ids = inputs_less_toxic["token_type_ids"] return { 'more_toxic_ids': torch.tensor(more_toxic_ids, dtype=torch.long), 'more_toxic_mask': torch.tensor(more_toxic_mask, dtype=torch.long), 'more_token_type_ids': torch.tensor(more_token_type_ids, dtype=torch.long), 'less_toxic_ids': torch.tensor(less_toxic_ids, dtype=torch.long), 'less_toxic_mask': torch.tensor(less_toxic_mask, dtype=torch.long), 'less_token_type_ids': torch.tensor(less_token_type_ids, dtype=torch.long), 'target': torch.tensor(target, dtype=torch.float) } else: text = self.text[index] inputs_text = self.tokenizer.encode_plus( text, truncation=True, return_attention_mask=True, return_token_type_ids=True, max_length = self.max_len, padding="max_length", ) text_ids = inputs_text["input_ids"] text_mask = inputs_text["attention_mask"] text_token_type_ids = inputs_text["token_type_ids"] return { 'text_ids': torch.tensor(text_ids, dtype=torch.long), 'text_mask': torch.tensor(text_mask, dtype=torch.long), 'text_token_type_ids': torch.tensor(text_token_type_ids, dtype=torch.long), } # <br> # <h2 style = "font-size:45px; # font-family:Comic Sans MS ; # font-weight : normal; # background-color: #eeebf1 ; # color : #4c1c84; # text-align: center; # border-radius: 100px 100px;"> # DataModule # </h2> # <br> class JigsawDataModule(LightningDataModule): def __init__(self, train_df, valid_df, test_df, cfg): super().__init__() self._train_df = train_df self._valid_df = valid_df self._test_df = test_df self._cfg = cfg def train_dataloader(self): dataset = JigsawDataset( df=self._train_df, tokenizer=self._cfg.tokenizer, max_length=self._cfg.max_length, mode="train", target_cols=target_cols ) return DataLoader(dataset, **self._cfg.train_loader) def val_dataloader(self): dataset = JigsawDataset( df=self._valid_df, tokenizer=self._cfg.tokenizer, max_length=self._cfg.max_length, mode="valid", target_cols=target_cols ) return DataLoader(dataset, **self._cfg.valid_loader) def test_dataloader(self): dataset = JigsawDataset( df=self._test_df, tokenizer = self._cfg.tokenizer, max_length=self._cfg.max_length, mode="test", target_cols=target_cols ) return DataLoader(dataset, **self._cfg.test_loader) # + ## DataCheck seed_everything(config.seed) sample_dataloader = JigsawDataModule(train_df, val_df, test_df, config).train_dataloader() for data in sample_dataloader: break # - print(data["text_ids"].size()) print(data["text_mask"].size()) print(data["text_token_type_ids"].size()) print(data["target"].size()) print(data["target"]) output = config.model( data["text_ids"], data["text_mask"], data["text_token_type_ids"], output_hidden_states=True, output_attentions=True, ) print(output["hidden_states"][-1].size(), output["attentions"][-1].size()) print(output["hidden_states"][-1][:, 0, :].size(), output["attentions"][-1].size()) # <br> # <h2 style = "font-size:45px; # font-family:Comic Sans MS ; # font-weight : normal; # background-color: #eeebf1 ; # color : #4c1c84; # text-align: center; # border-radius: 100px 100px;"> # LigitningModule # </h2> # <br> class JigsawModel(pl.LightningModule): def __init__(self, cfg, fold_num): super().__init__() self.cfg = cfg self.__build_model() self.criterion = eval(self.cfg.loss)() self.save_hyperparameters(cfg) self.fold_num = fold_num def __build_model(self): self.base_model = BertForMaskedLM.from_pretrained( self.cfg.backbone.name ) print(f"Use Model: {self.cfg.backbone.name}") self.norm = nn.LayerNorm(768) self.drop = nn.Dropout(p=0.3) self.head = nn.Linear(768, self.cfg.backbone.output_dim) def forward(self, ids, mask, token_type_ids): output = self.base_model( input_ids=ids, attention_mask=mask, token_type_ids=token_type_ids, output_hidden_states=True, output_attentions=True ) feature = self.norm(output["hidden_states"][-1][:, 0, :]) out = self.drop(feature) out = self.head(out) return { "logits":out, "feature":feature, "attention":output["attentions"], "mask":mask, } def training_step(self, batch, batch_idx): text_ids = batch["text_ids"] text_mask = batch['text_mask'] text_token_type_ids = batch['text_token_type_ids'] targets = batch['target'] outputs = self.forward(text_ids, text_mask, text_token_type_ids) loss = torch.sqrt(self.criterion(outputs["logits"], targets)) return { "loss":loss, "targets":targets, } def training_epoch_end(self, training_step_outputs): loss_list = [] for out in training_step_outputs: loss_list.extend([out["loss"].cpu().detach().tolist()]) meanloss = sum(loss_list)/len(loss_list) logs = {f"train_loss/fold{self.fold_num+1}": meanloss,} self.log_dict( logs, on_step=False, on_epoch=True, prog_bar=True, logger=True ) def validation_step(self, batch, batch_idx): more_toxic_ids = batch['more_toxic_ids'] more_toxic_mask = batch['more_toxic_mask'] more_text_token_type_ids = batch['more_token_type_ids'] less_toxic_ids = batch['less_toxic_ids'] less_toxic_mask = batch['less_toxic_mask'] less_text_token_type_ids = batch['less_token_type_ids'] targets = batch['target'] more_outputs = self.forward( more_toxic_ids, more_toxic_mask, more_text_token_type_ids ) less_outputs = self.forward( less_toxic_ids, less_toxic_mask, less_text_token_type_ids ) more_outputs = torch.sum(more_outputs["logits"], 1) less_outputs = torch.sum(less_outputs["logits"], 1) outputs = more_outputs - less_outputs logits = outputs.clone() logits[logits > 0] = 1 loss = self.criterion(logits, targets) return { "loss":loss, "pred":outputs, "targets":targets, } def validation_epoch_end(self, validation_step_outputs): loss_list = [] pred_list = [] target_list = [] for out in validation_step_outputs: loss_list.extend([out["loss"].cpu().detach().tolist()]) pred_list.append(out["pred"].detach().cpu().numpy()) target_list.append(out["targets"].detach().cpu().numpy()) meanloss = sum(loss_list)/len(loss_list) pred_list = np.concatenate(pred_list) pred_count = sum(x>0 for x in pred_list)/len(pred_list) logs = { f"valid_loss/fold{self.fold_num+1}":meanloss, f"valid_acc/fold{self.fold_num+1}":pred_count, } self.log_dict( logs, on_step=False, on_epoch=True, prog_bar=True, logger=True ) def configure_optimizers(self): optimizer = eval(self.cfg.optimizer.name)( self.parameters(), **self.cfg.optimizer.params ) self.scheduler = eval(self.cfg.scheduler.name)( optimizer, **self.cfg.scheduler.params ) scheduler = {"scheduler": self.scheduler, "interval": "step",} return [optimizer], [scheduler] # <br> # <h2 style = "font-size:45px; # font-family:Comic Sans MS ; # font-weight : normal; # background-color: #eeebf1 ; # color : #4c1c84; # text-align: center; # border-radius: 100px 100px;"> # Training # </h2> # <br> # + skf = KFold( n_splits=config.n_fold, shuffle=True, random_state=config.seed ) for fold, (_, val_idx) in enumerate(skf.split(X=train_df, y=train_df["severe_toxicity"])): train_df.loc[val_idx, "kfold"] = int(fold) train_df["kfold"] = train_df["kfold"].astype(int) train_df.head() # + ## Debug config.trainer.fast_dev_run = True config.backbone.output_dim = len(target_cols) for fold in config.train_fold: print("★"*25, f" Fold{fold+1} ", "★"*25) df_train = train_df[train_df.kfold != fold].reset_index(drop=True) datamodule = JigsawDataModule(df_train, val_df, test_df, config) sample_dataloader = JigsawDataModule(df_train, val_df, test_df, config).train_dataloader() config.scheduler.params.T_0 = config.epoch * len(sample_dataloader) model = JigsawModel(config, fold) lr_monitor = callbacks.LearningRateMonitor() loss_checkpoint = callbacks.ModelCheckpoint( filename=f"best_acc_fold{fold+1}", monitor=f"valid_acc/fold{fold+1}", save_top_k=1, mode="max", save_last=False, dirpath=MODEL_DIR, save_weights_only=True, ) wandb_logger = WandbLogger( project=config.project, entity=config.entity, name = f"{config.exp_name}", tags = ['Hate-BERT', "Jigsaw-Unbiased"] ) lr_monitor = LearningRateMonitor(logging_interval='step') trainer = pl.Trainer( max_epochs=config.epoch, callbacks=[loss_checkpoint, lr_monitor, RichProgressBar()], # deterministic=True, logger=[wandb_logger], **config.trainer ) trainer.fit(model, datamodule=datamodule) # + ## Training config.trainer.fast_dev_run = False config.backbone.output_dim = len(target_cols) for fold in config.train_fold: print("★"*25, f" Fold{fold+1} ", "★"*25) df_train = train_df[train_df.kfold != fold].reset_index(drop=True) datamodule = JigsawDataModule(df_train, val_df, test_df, config) sample_dataloader = JigsawDataModule(df_train, val_df, test_df, config).train_dataloader() config.scheduler.params.T_0 = config.epoch * len(sample_dataloader) model = JigsawModel(config, fold) lr_monitor = callbacks.LearningRateMonitor() loss_checkpoint = callbacks.ModelCheckpoint( filename=f"best_acc_fold{fold+1}", monitor=f"valid_acc/fold{fold+1}", save_top_k=1, mode="max", save_last=False, dirpath=MODEL_DIR, save_weights_only=True, ) wandb_logger = WandbLogger( project=config.project, entity=config.entity, name = f"{config.exp_name}", tags = ['Hate-BERT', "Jigsaw-Unbiased"] ) lr_monitor = LearningRateMonitor(logging_interval='step') trainer = pl.Trainer( max_epochs=config.epoch, callbacks=[loss_checkpoint, lr_monitor, RichProgressBar()], # deterministic=True, logger=[wandb_logger], **config.trainer ) trainer.fit(model, datamodule=datamodule) # + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') config.backbone.output_dim = len(target_cols) print(f"Device == {device}") MORE = np.zeros((len(val_df), config.backbone.output_dim)) LESS = np.zeros((len(val_df), config.backbone.output_dim)) PRED = np.zeros((len(test_df), config.backbone.output_dim)) attention_array = np.zeros((len(val_df), 256)) # attention格納 mask_array = np.zeros((len(val_df), 256)) # mask情報格納,後でattentionと掛け合わせる for fold in config.train_fold: pred_list = [] print("★"*25, f" Fold{fold+1} ", "★"*25) valid_dataloader = JigsawDataModule(train_df, val_df, test_df, config).val_dataloader() model = JigsawModel(config, fold) loss_checkpoint = callbacks.ModelCheckpoint( filename=f"best_acc_fold{fold+1}", monitor=f"valid_acc/fold{fold+1}", save_top_k=1, mode="max", save_last=False, dirpath="../input/toxicroberta/", ) model = model.load_from_checkpoint(MODEL_DIR/f"best_acc_fold{fold+1}.ckpt", cfg=config, fold_num=fold) model.to(device) model.eval() more_list = [] less_list = [] for step, data in tqdm(enumerate(valid_dataloader), total=len(valid_dataloader)): more_toxic_ids = data['more_toxic_ids'].to(device) more_toxic_mask = data['more_toxic_mask'].to(device) more_text_token_type_ids = data['more_token_type_ids'].to(device) less_toxic_ids = data['less_toxic_ids'].to(device) less_toxic_mask = data['less_toxic_mask'].to(device) less_text_token_type_ids = data['less_token_type_ids'].to(device) more_outputs = model( more_toxic_ids, more_toxic_mask, more_text_token_type_ids, ) less_outputs = model( less_toxic_ids, less_toxic_mask, less_text_token_type_ids ) more_list.append(more_outputs["logits"].detach().cpu().numpy()) less_list.append(less_outputs["logits"].detach().cpu().numpy()) MORE += np.concatenate(more_list)/len(config.train_fold) LESS += np.concatenate(less_list)/len(config.train_fold) # PRED += pred_list/len(config.train_fold) # - plt.figure(figsize=(12, 5)) plt.scatter(LESS, MORE) plt.xlabel("less-toxic") plt.ylabel("more-toxic") plt.grid() plt.show() val_df["less_attack"] = LESS.sum(axis=1) val_df["more_attack"] = MORE.sum(axis=1) val_df["diff_attack"] = val_df["more_attack"] - val_df["less_attack"] attack_score = val_df[val_df["diff_attack"]>0]["diff_attack"].count()/len(val_df) print(f"HATE-BERT Jigsaw-Classification Score: {attack_score:.6f}") # <br> # <h2 style = "font-size:45px; # font-family:Comic Sans MS ; # font-weight : normal; # background-color: #eeebf1 ; # color : #4c1c84; # text-align: center; # border-radius: 100px 100px;"> # Attention Visualize # </h2> # <br> # + text_df = pd.DataFrame() text_df["text"] = list(set(val_df["less_toxic"].unique().tolist() + val_df["more_toxic"].unique().tolist())) display(text_df.head()) display(text_df.shape) # + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') config.backbone.output_dim = len(target_cols) print(f"Device == {device}") attention_array = np.zeros((len(text_df), config.max_length)) # attention格納 mask_array = np.zeros((len(text_df), config.max_length)) # mask情報格納,後でattentionと掛け合わせる feature_array = np.zeros((len(text_df), 768)) PRED = np.zeros((len(text_df), config.backbone.output_dim)) for fold in config.train_fold: pred_list = [] print("★"*25, f" Fold{fold+1} ", "★"*25) test_dataloader = JigsawDataModule(train_df, val_df, text_df, config).test_dataloader() model = JigsawModel(config, fold) loss_checkpoint = callbacks.ModelCheckpoint( filename=f"best_acc_fold{fold+1}", monitor=f"valid_acc/fold{fold+1}", save_top_k=1, mode="max", save_last=False, dirpath="../input/toxicroberta/", ) model = model.load_from_checkpoint(MODEL_DIR/f"best_acc_fold{fold+1}.ckpt", cfg=config, fold_num=fold) model.to(device) model.eval() attention_list = [] feature_list = [] mask_list = [] pred_list = [] for step, data in tqdm(enumerate(test_dataloader), total=len(test_dataloader)): text_ids = data["text_ids"].to(device) text_mask = data["text_mask"].to(device) text_token_type_ids = data["text_token_type_ids"].to(device) mask_list.append(text_mask.detach().cpu().numpy()) outputs = model( text_ids, text_mask, text_token_type_ids, ) ## Last LayerのCLS Tokenに対するAttention last_attention = outputs["attention"][-1].detach().cpu().numpy() total_attention = np.zeros((last_attention.shape[0], config.max_length)) for batch in range(last_attention.shape[0]): for n_head in range(12): total_attention[batch, :] += last_attention[batch, n_head, 0, :] attention_list.append(total_attention) pred_list.append(outputs["logits"].detach().cpu().numpy()) feature_list.append(outputs["feature"].detach().cpu().numpy()) attention_array += np.concatenate(attention_list)/config.n_fold mask_array += np.concatenate(mask_list)/config.n_fold feature_array += np.concatenate(feature_list)/config.n_fold PRED += np.concatenate(pred_list)/len(config.train_fold) # - text_df["target"] = PRED[:, 0] text_df.to_pickle(OUTPUT_DIR/"text_df.pkl") np.save(OUTPUT_DIR/'toxic-attention.npy', attention_array) np.save(OUTPUT_DIR/'toxic-mask.npy', mask_array) np.save(OUTPUT_DIR/'toxic-feature.npy', feature_array) plt.figure(figsize=(12, 5)) sns.histplot(text_df["target"], color="#4c1c84") plt.grid() plt.show() # <br> # <h2 style = "font-size:45px; # font-family:Comic Sans MS ; # font-weight : normal; # background-color: #eeebf1 ; # color : #4c1c84; # text-align: center; # border-radius: 100px 100px;"> # Attention Load # </h2> # <br> text_df = pd.read_pickle(OUTPUT_DIR/"text_df.pkl") attention_array = np.load(OUTPUT_DIR/'toxic-attention.npy') mask_array = np.load(OUTPUT_DIR/'toxic-mask.npy') # + from IPython.display import display, HTML def highlight_r(word, attn): html_color = '#%02X%02X%02X' % (255, int(255*(1 - attn)), int(255*(1 - attn))) return '<span style="background-color: {}">{}</span>'.format(html_color, word) # + num = 12 ids = config.tokenizer(text_df.loc[num, "text"])["input_ids"] tokens = config.tokenizer.convert_ids_to_tokens(ids) attention = attention_array[num, :][np.nonzero(mask_array[num, :])] html_outputs = [] for word, attn in zip(tokens, attention): html_outputs.append(highlight_r(word, attn)) print(f"Offensive Score is {PRED[num, 0]}") display(HTML(' '.join(html_outputs))) display(text_df.loc[num, "text"]) # - text_df.sort_values("target", ascending=False).head(20) # + high_score_list = text_df.sort_values("target", ascending=False).head(20).index.tolist() for num in high_score_list: ids = config.tokenizer(text_df.loc[num, "text"])["input_ids"] tokens = config.tokenizer.convert_ids_to_tokens(ids) attention = attention_array[num, :][np.nonzero(mask_array[num, :])] html_outputs = [] for word, attn in zip(tokens, attention): html_outputs.append(highlight_r(word, attn)) print(f"Offensive Score is {PRED[num, 0]}") display(HTML(' '.join(html_outputs))) display(text_df.loc[num, "text"]) # -
notebooks/035_exp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="1GhdrwnPhxpM" colab_type="text" # # Regression on Gradient Boosting: CPU vs GPU # This is a basic tuturoal which shows how to run regression on gradient boosting on CPU and GPU on Google Colaboratory. It will give you an opportunity to see the speedup that you get from GPU training. The speedup is large even on Tesla K80 that is available in Colaboratory. On newer generations of GPU the speedup will be much bigger. # # We will use CatBoost gradient boosting library, which is known for it's good GPU performance. # # # + [markdown] id="cSWEX-NYh7-U" colab_type="text" # # # # !Set GPU as hardware accelerator! # First of all, you need to select GPU as hardware accelerator. There are two simple steps to do so: # #### Step 1. Navigate to 'Runtime' menu and select 'Change runtime type' # #### Step 2. Choose GPU as hardware accelerator. # That's all! # + [markdown] id="s4EMTHRqiDE1" colab_type="text" # ## Importing CatBoost # # Next big thing is to import CatBoost inside environment. Colaboratory has built in libraries installed and most libraries can be installed quickly with a simple `!pip install` command. # Please take notice you need to re-import library every time you starts new session of Colab. # + id="jeslyTF6peOo" colab_type="code" outputId="8a751506-2512-4d23-95da-0c87cf483d36" colab={"base_uri": "https://localhost:8080/", "height": 142} # !pip install catboost # + [markdown] id="_1rchBYFm4Je" colab_type="text" # ## Including libraries # Now we need to include the libraries: # # `CatBoostRegressor` - for regression, # # `timeit` - to measure time, # # `make_regression` - to generate dataset # + id="4BzUM4I8qBgR" colab_type="code" colab={} from catboost import CatBoostRegressor import timeit from sklearn.datasets import make_regression # + [markdown] id="GPx1En1liM3e" colab_type="text" # ## Generating dataset # The next step is dataset generating. GPU training is useful for large datsets. You will get a good speedup starting from 10k objects. # Because of that reason we have generated a large dataset (40.000 documents and 2.000 features) for this tutorial. # # We will generate a dataset using the `datasets.make_regression` module from the `sklearn` library, because this is the easiest way to load a large dataset into Google Kolab for our tests. The dataset is a linear regression with Gaussian noise. # # The code below does this. # + id="iY4R_dRnqIcs" colab_type="code" colab={} num_rows = 40000 num_colomns = 2000 X_train, y_train = make_regression(n_samples=num_rows, n_features=num_colomns, bias=100, noise=1.0, random_state=0) X_test, y_test = X_train, y_train # + [markdown] id="BudTE2Pmik6i" colab_type="text" # ## Training on CPU # Now we will train the model on CPU and measure execution time. # We will use 100 iterations for our CPU training since otherwise it will take a long time. # It will take around 8 minutes. # + id="G5r097DivJ0S" colab_type="code" outputId="0614ae36-bf37-41c9-8173-cf65fe38897a" colab={"base_uri": "https://localhost:8080/", "height": 303} def train_on_cpu(): model = CatBoostRegressor( iterations=100, learning_rate=0.03 ) model.fit( X_train, y_train, eval_set=(X_test, y_test), verbose=10 ); cpu_time = timeit.timeit('train_on_cpu()', setup="from __main__ import train_on_cpu", number=1) print('Time to fit model on CPU: {} sec'.format(int(cpu_time))) # + [markdown] id="YFJTWgfZiygP" colab_type="text" # ## Training on GPU # The previous code execution has been done on CPU. It's time to use GPU! # We need to use '*task_type='GPU'*' parameter value to run GPU training. Now the execution time wouldn't be so big :) # BTW if Colaboratory shows you a warning 'GPU memory usage is close to the limit', just press 'Ignore'. # + id="uhS9Wb59kClq" colab_type="code" outputId="84cd57a2-e1db-4720-9f86-1466781ec9fc" colab={"base_uri": "https://localhost:8080/", "height": 267} def train_on_gpu(): model = CatBoostRegressor( iterations=100, learning_rate=0.03, task_type='GPU' ) model.fit( X_train, y_train, eval_set=(X_test, y_test), verbose=10 ); gpu_time = timeit.timeit('train_on_gpu()', setup="from __main__ import train_on_gpu", number=1) print('Time to fit model on GPU: {} sec'.format(int(gpu_time))) # + id="wiilD1uZlSmp" colab_type="code" outputId="5cc6cf87-bd8e-42df-c7a2-51aeeceb3f98" colab={"base_uri": "https://localhost:8080/", "height": 35} print('GPU speedup over CPU: ' + '%.2f' % (cpu_time/gpu_time) + 'x') # + [markdown] id="Gw7qmALMi_yi" colab_type="text" # As you can see GPU is much faster than CPU on large datasets. It takes just 1 - 2 mins vs 7 - 8 mins to fit the model. # This is a good reason to use GPU instead of CPU! # # Thank you for attention!
catboost/tutorials/tools/google_colaboratory_cpu_vs_gpu_regression_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.9 64-bit (''xgboost'': conda)' # metadata: # interpreter: # hash: 85b62efde3748decc6f5e82aab2832b38da977e986235fba8ae17ce8224e114f # name: python3 # --- # + [markdown] tags=[] cell_id="00000-1006db30-f513-4800-85b5-e3d78b140c11" output_cleared=false deepnote_cell_type="markdown" # ## YouTube Trending Project # * ### Machine Learning Models # # ### Table of Contents: # * 1.Exploratory Data Analysis # * 2.Data Cleaning # * 3.Modeling # * 3.1 Predicting Likes # * 3.1.1 Pre-processing Data # * 3.1.1.1 Train-Test Split (80:20) # * 3.1.1.2 Initializing Pre-processing Pipeline # * 3.1.2 Hyperparameter Tuning (Gridsearch) # * 3.1.3 Regressors # * 3.1.3.1 Linear Regression # * 3.1.3.2 Random Forest # * 3.1.3.3 XGBoost # * 3.1.4 Random Forest # * 3.1.4.1 Feature Importance # * 3.1.5 Likes Evaluation # * 3.2 Predicting Views # * 3.2.1 Pre-processing Data # * 3.2.1.1 Train-Test Split (80:20) # * 3.2.1.2 Initializing Pre-processing Pipeline # * 3.2.2 Hyperparameter Tuning (Gridsearch) # * 3.2.3 Regressors # * 3.2.3.1 Linear Regression # * 3.2.3.2 Random Forest # * 3.2.3.3 XGBoost # * 3.2.4 Random Forest # * 3.2.4.1 Feature Importance # * 3.2.5 Views Evaluation # + [markdown] tags=[] cell_id="00001-7f44dcc6-0e32-4a48-b9e1-bf26ca1d4584" output_cleared=false deepnote_cell_type="markdown" # ### 3. Machine Learning Models # ##### Loading Data and Libraries # + tags=[] cell_id="00000-b7d0e551-34c7-4c00-9354-ff8e296ff92a" output_cleared=false source_hash="6ac18499" execution_millis=1686 deepnote_to_be_reexecuted=false execution_start=1611792608394 deepnote_cell_type="code" import helpers import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # Encoding and Data Split from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split # Modeling from sklearn import metrics import xgboost as xgb from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor # Tuning import optuna from sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler from sklearn.model_selection import cross_validate from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score # Reading the stitched data df = helpers.load_df("Data/Curated_US_Data.csv") df.head() # + [markdown] tags=[] cell_id="00003-f6a8a643-31d2-40a5-a660-7518fa19cf61" output_cleared=false deepnote_cell_type="markdown" # ### 3.1 Predicting Likes # #### 3.1.1 Preprocessing Data # ##### 3.1.1.1 Train-Test Split (80:20) # Splitting the data into train and test sets in a 80:20 ratio # + tags=[] cell_id="00001-659399c8-6ad7-41c2-8785-18cbce1fb9bc" output_cleared=true source_hash="6ae828b9" execution_millis=3 deepnote_to_be_reexecuted=false execution_start=1611792610088 deepnote_cell_type="code" X = df.drop(columns=['likes_log']) y = df['likes_log'] # + tags=[] cell_id="00005-51f8dcd0-90c8-4259-84fd-a0ef8d5dec97" deepnote_to_be_reexecuted=false source_hash="4b9c9c7e" execution_millis=8 output_cleared=true execution_start=1611792610093 deepnote_cell_type="code" X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2) # + [markdown] tags=[] cell_id="00006-c7f05310-5f50-4912-9e61-582298b20c6b" deepnote_cell_type="markdown" # ##### 3.1.1.2 Initializing Preprocessing Pipeline # Scaling numercal data and encoding categorical data # + tags=[] cell_id="00005-0ef86902-c720-4926-84f4-93d2da4739c7" deepnote_to_be_reexecuted=false source_hash="37eaf276" execution_millis=1 output_cleared=false execution_start=1611792610127 deepnote_cell_type="code" numeric_features = X.select_dtypes(include=['int64', 'float64']).drop(['durationHr','durationMin','durationSec', 'categoryId'],axis=1).columns categorical_features = list(X.select_dtypes(include=['object']).columns) + ['durationHr','durationMin','durationSec', 'categoryId'] preprocessor = ColumnTransformer( transformers=[ ('numerical', StandardScaler(), numeric_features), ('categorical', OneHotEncoder(handle_unknown = "ignore"), categorical_features)]) y # + tags=[] cell_id="00008-f97b92f2-90a0-49fa-a322-939cb49dc37b" deepnote_to_be_reexecuted=false source_hash="c10f6ff8" execution_millis=0 output_cleared=false execution_start=1611792610128 deepnote_cell_type="code" print('Numeric Features:', numeric_features) print('Categorical Features:', categorical_features) # + [markdown] tags=[] cell_id="00009-bd06f2bc-62e8-48fa-bd64-b8bcb5a104fa" deepnote_cell_type="markdown" # #### 3.1.2 Hyperparameter Tuning (Optuna) # Using bayesian hyperparameter optimization to find optimal parameters # + tags=[] cell_id="00010-51411f16-ba3e-4d1a-8d3f-0ee45be6e1b0" deepnote_to_be_reexecuted=false source_hash="98cfaaec" execution_millis=726545 output_cleared=false execution_start=1611792610129 deepnote_cell_type="code" # Define Objective Function to be Maximized def rfObjective(trial): # Define Random Forest Parameters rfParams={ 'n_estimators' : trial.suggest_int('n_estimators', 100, 500), 'max_depth' : trial.suggest_int('max_depth', 1, 50), 'min_samples_leaf' : trial.suggest_int('min_samples_leaf', 1,15), 'min_samples_split' : trial.suggest_int('min_samples_split', 2,15) } # Establish Random Forest Regressor Pipeline rfPipe = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', RandomForestRegressor( **rfParams ))]) for step in range(100): rfPipe.fit(X_train,y_train) # Report Intermediate Objective Value intermediate_value = rfPipe.score(X_test, y_test) trial.report(intermediate_value, step) # Handle pruning based on the intermediate value. if trial.should_prune(): raise optuna.exceptions.TrialPruned() return intermediate_value # Running the Study rfStudy = optuna.create_study(direction='maximize') rfStudy.optimize(rfObjective, n_trials=100) # Calculating the pruned and completed trials pruned_trials = [t for t in rfStudy.trials if t.state == optuna.trial.TrialState.PRUNED] complete_trials = [t for t in rfStudy.trials if t.state == optuna.trial.TrialState.COMPLETE] rfTrial = rfStudy.best_trial print('Number of finished trials: ', len(rfStudy.trials)) print('Number of pruned trials: ', len(pruned_trials)) print('Number of complete trials: ', len(complete_trials)) print('Accuracy: {}'.format(rfTrial.value)) print("Best hyperparameters: {}".format(rfTrial.params)) # + tags=[] cell_id="00012-f41b2cea-de6f-4896-bd28-990affcec1a7" deepnote_to_be_reexecuted=false source_hash="30de3e87" execution_millis=10578877 output_cleared=false execution_start=1611793336686 deepnote_cell_type="code" # Define Objective Function to be Maximized def xgbObjective(trial): # Define XGBoost Parameters xgbParams = { 'n_estimators' : trial.suggest_int('n_estimators', 100,500), 'max_depth' : trial.suggest_int('max_depth', 1, 20), 'eta' : trial.suggest_uniform('eta', 0.01, 1), # learning_rate 'subsample': trial.suggest_uniform('subsample', 0.1, 1), 'colsample_bytree': trial.suggest_uniform('colsample_bytree', 0.1, 1), 'gamma': trial.suggest_int('gamma', 0, 10), # min_split_loss 'min_child_weight' : trial.suggest_uniform('min_child_weight', 0.1, 1.0) } # Establish XGBoost Regressor Pipeline xgbPipe = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', xgb.XGBRegressor( **xgbParams ))]) for step in range(100): xgbPipe.fit(X_train,y_train) # Report Intermediate Objective Value intermediate_value = xgbPipe.score(X_test, y_test) trial.report(intermediate_value, step) # Handle pruning based on the intermediate value. if trial.should_prune(): raise optuna.exceptions.TrialPruned() return intermediate_value # Running the Study xgbStudy = optuna.create_study(direction='maximize') xgbStudy.optimize(xgbObjective, n_trials=100) # Calculating the pruned and completed trials pruned_trials = [t for t in xgbStudy.trials if t.state == optuna.trial.TrialState.PRUNED] complete_trials = [t for t in xgbStudy.trials if t.state == optuna.trial.TrialState.COMPLETE] xgbTrial = xgbStudy.best_trial print('Number of finished trials: ', len(xgbStudy.trials)) print('Number of pruned trials: ', len(pruned_trials)) print('Number of complete trials: ', len(complete_trials)) print('Accuracy: {}'.format(xgbTrial.value)) print("Best hyperparameters: {}".format(xgbTrial.params)) # + [markdown] tags=[] cell_id="00012-d973f97c-b3b7-4299-b290-29c79cecfae5" deepnote_cell_type="markdown" # #### 3.1.3 Regressors # * ##### 3.1.3.1 Linear Regression # * ##### 3.1.3.2 Random Forest # * ##### 3.1.3.3 XGBoost # # + tags=[] cell_id="00010-e2d9fba0-c2a3-4320-99da-95defe94e71e" deepnote_to_be_reexecuted=false execution_millis=12101 source_hash="f857934f" output_cleared=false execution_start=1611803915577 deepnote_cell_type="code" regressors = [ LinearRegression(), RandomForestRegressor(**rfTrial.params), xgb.XGBRegressor(**xgbTrial.params), ] for regressor in regressors: pipe = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', regressor)]) pipe.fit(X_train, y_train) print(regressor) y_pred = pipe.predict(X_test) d1 = {'True Labels': y_test, 'Predicted Labels': y_pred} print("Model Score: %.3f" % pipe.score(X_test, y_test)) mae = metrics.mean_absolute_error(y_test, pipe.predict(X_test)) mse = metrics.mean_squared_error(y_test,pipe.predict(X_test)) rmse = np.sqrt(metrics.mean_squared_error(y_test, pipe.predict(X_test))) print("mae: ", mae) print("mse: ", mse) print("rmse: ", rmse, "\n") data = pd.DataFrame(data = d1) # Residual Plot 1 fig1, ax1 = plt.subplots(1,2, figsize=(18,6)) g1 = sns.residplot(x= y_test, y= y_pred, ax=ax1[0], data = data, color="salmon") g1.set_xlabel('Predicted Likes', fontsize=16) g1.set_ylabel('Residual', fontsize=16) g1.set_title("Plot of Residuals", fontsize=18) # Residual Plot 2 g2 = sns.histplot(x=y_pred, ax=ax1[1], data = data, element="step", color="red", kde=True) g2.set_xlabel('Residuals', fontsize=16) g2.set_ylabel('Frequency', fontsize=16) g2.set_title("Plot of Residuals", fontsize=18) # Linear Plot # lm1 = sns.lmplot(x="True Labels", y="Predicted Labels", data = data, size = 10) # fig1 = lm1.fig # fig1.suptitle("Sklearn ", fontsize=18) # sns.set(font_scale = 1.5) # + [markdown] tags=[] cell_id="00013-2b5574b2-0a32-4c48-ac66-8520450b9c31" output_cleared=false deepnote_cell_type="markdown" # #### 3.1.4 Random Forest Regressor # + tags=[] cell_id="00027-27bdeddd-4846-4937-99f9-d0f5692051f4" deepnote_to_be_reexecuted=false source_hash="f7589ae5" execution_millis=19343 output_cleared=false execution_start=1611803927687 deepnote_cell_type="code" reg = RandomForestRegressor(**{'n_estimators': 456, 'max_depth': 16, 'min_samples_leaf': 1, 'min_samples_split': 3}, oob_score=True) pipe = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', reg)]) pipe.fit(X_train, y_train) print(reg) print("Model Train Score: %.3f" % pipe.score(X_train, y_train)) print("Model OOB Score: %.3f" % reg.oob_score_) print("Model Test Score: %.3f" % pipe.score(X_test, y_test)) # + [markdown] tags=[] cell_id="00013-200eef92-822a-422d-9092-71cb9548bc45" deepnote_cell_type="markdown" # ##### 3.1.4.1 Feature Importance # + tags=[] cell_id="00028-4a3595b1-2a5c-400f-a51a-398d1edd2460" deepnote_to_be_reexecuted=false source_hash="102cf22f" execution_millis=59 output_cleared=false execution_start=1611803947033 deepnote_cell_type="code" pd.DataFrame(zip(X.columns,reg.feature_importances_),columns=['feature','importance']).sort_values(by='importance',ascending=False) # + [markdown] tags=[] cell_id="00018-81ffc147-25f3-4223-b86f-15227a7ae53b" deepnote_cell_type="markdown" # #### 3.1.5 Likes Evaluation # + tags=[] cell_id="00019-ed3b8996-c49f-47f2-adac-379d8e06fee1" deepnote_to_be_reexecuted=false source_hash="68bb0a2c" execution_millis=19176 output_cleared=false execution_start=1611803947103 deepnote_cell_type="code" eval = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', xgb.XGBRegressor(**xgbTrial.params))]) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) mae = metrics.mean_absolute_error(y_test,y_pred) mse = metrics.mean_squared_error(y_test,y_pred) rmse = np.sqrt(metrics.mean_squared_error(y_test,y_pred)) r2 = metrics.r2_score(y_test, y_pred) print("mae: ", mae) print("mse: ", mse) print("rmse: ", rmse) print("r2: ", r2) # + tags=[] cell_id="00021-322e9c61-be1f-48f7-a303-3ee4360d7231" deepnote_to_be_reexecuted=false source_hash="fa3e210b" execution_millis=32 output_cleared=false execution_start=1611803966280 deepnote_cell_type="code" df = pd.DataFrame(data=list(zip(list(y_test), list(y_pred))),columns=['actual','predicted']) #Unlog Values for col in df.columns: df[col] = df[col].apply(lambda x: np.e**x) df # + [markdown] tags=[] cell_id="00018-03a4d0ae-2a4c-4685-8b0f-9028b748c41c" deepnote_cell_type="markdown" # ### 3.2 Predicting Views # #### 3.2.1 Preprocessing Data # ##### 3.2.1.1 Train-Test Split (80:20) # Splitting the data into train and test sets in a 80:20 ratio # + tags=[] cell_id="00024-47a826e9-9bc5-4223-b79e-e69b32814581" deepnote_to_be_reexecuted=false source_hash="76d153be" execution_millis=0 output_cleared=true execution_start=1611803966312 deepnote_cell_type="code" df = helpers.load_df("Data/Curated_US_Data.csv") X = df.drop(columns=['views_log']) y = df['views_log'] # + tags=[] cell_id="00025-9209b2c4-9a0f-4a0c-929c-cdba35223119" deepnote_to_be_reexecuted=false source_hash="4b9c9c7e" execution_millis=0 output_cleared=true execution_start=1611803966313 deepnote_cell_type="code" X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2) # + [markdown] tags=[] cell_id="00025-00492092-79e7-440b-a465-c6d661eb2278" deepnote_cell_type="markdown" # ##### 3.2.1.2 Initializing Preprocessing Pipeline # Scaling numercal data and encoding categorical data # + tags=[] cell_id="00026-f2178d02-28ba-4de5-9830-3128f13c4d00" deepnote_to_be_reexecuted=false source_hash="37eaf276" execution_millis=24 output_cleared=false execution_start=1611803966313 deepnote_cell_type="code" numeric_features = X.select_dtypes(include=['int64', 'float64']).drop(['durationHr','durationMin','durationSec', 'categoryId'],axis=1).columns categorical_features = list(X.select_dtypes(include=['object']).columns) + ['durationHr','durationMin','durationSec', 'categoryId'] preprocessor = ColumnTransformer( transformers=[ ('numerical', StandardScaler(), numeric_features), ('categorical', OneHotEncoder(handle_unknown = "ignore"), categorical_features)]) y # + tags=[] cell_id="00026-90ebc610-1fd5-43e1-be3e-1d7ba6a18a17" deepnote_to_be_reexecuted=false source_hash="c10f6ff8" execution_millis=0 output_cleared=false execution_start=1611803966338 deepnote_cell_type="code" print('Numeric Features:', numeric_features) print('Categorical Features:', categorical_features) # + [markdown] tags=[] cell_id="00028-9e125668-ae2a-4e51-bd28-0b4d3c7b046f" deepnote_cell_type="markdown" # #### 3.2.2 Hyperparameter Tuning (Optuna) # Using bayesian hyperparameter optimization to find optimal parameters # + tags=[] cell_id="00029-3c0e6035-74fc-401d-96b4-8433fc71f9ff" deepnote_to_be_reexecuted=false source_hash="98cfaaec" execution_millis=914607 output_cleared=false execution_start=1611803966338 deepnote_cell_type="code" # Define Objective Function to be Maximized def rfObjective(trial): # Define Random Forest Parameters rfParams={ 'n_estimators' : trial.suggest_int('n_estimators', 100, 500), 'max_depth' : trial.suggest_int('max_depth', 1, 50), 'min_samples_leaf' : trial.suggest_int('min_samples_leaf', 1,15), 'min_samples_split' : trial.suggest_int('min_samples_split', 2,15) } # Establish Random Forest Regressor Pipeline rfPipe = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', RandomForestRegressor( **rfParams ))]) for step in range(100): rfPipe.fit(X_train,y_train) # Report Intermediate Objective Value intermediate_value = rfPipe.score(X_test, y_test) trial.report(intermediate_value, step) # Handle pruning based on the intermediate value. if trial.should_prune(): raise optuna.exceptions.TrialPruned() return intermediate_value # Running the Study rfStudy = optuna.create_study(direction='maximize') rfStudy.optimize(rfObjective, n_trials=100) # Calculating the pruned and completed trials pruned_trials = [t for t in rfStudy.trials if t.state == optuna.trial.TrialState.PRUNED] complete_trials = [t for t in rfStudy.trials if t.state == optuna.trial.TrialState.COMPLETE] rfTrial = rfStudy.best_trial print('Number of finished trials: ', len(rfStudy.trials)) print('Number of pruned trials: ', len(pruned_trials)) print('Number of complete trials: ', len(complete_trials)) print('Accuracy: {}'.format(rfTrial.value)) print("Best hyperparameters: {}".format(rfTrial.params)) # + tags=[] cell_id="00030-a6002ecf-7a84-4622-bf90-2010789720a1" deepnote_to_be_reexecuted=false source_hash="30de3e87" execution_millis=166312 output_cleared=false execution_start=1611804880959 deepnote_cell_type="code" # Define Objective Function to be Maximized def xgbObjective(trial): # Define XGBoost Parameters xgbParams = { 'n_estimators' : trial.suggest_int('n_estimators', 100,500), 'max_depth' : trial.suggest_int('max_depth', 1, 20), 'eta' : trial.suggest_uniform('eta', 0.01, 1), # learning_rate 'subsample': trial.suggest_uniform('subsample', 0.1, 1), 'colsample_bytree': trial.suggest_uniform('colsample_bytree', 0.1, 1), 'gamma': trial.suggest_int('gamma', 0, 10), # min_split_loss 'min_child_weight' : trial.suggest_uniform('min_child_weight', 0.1, 1.0) } # Establish XGBoost Regressor Pipeline xgbPipe = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', xgb.XGBRegressor( **xgbParams ))]) for step in range(100): xgbPipe.fit(X_train,y_train) # Report Intermediate Objective Value intermediate_value = xgbPipe.score(X_test, y_test) trial.report(intermediate_value, step) # Handle pruning based on the intermediate value. if trial.should_prune(): raise optuna.exceptions.TrialPruned() return intermediate_value # Running the Study xgbStudy = optuna.create_study(direction='maximize') xgbStudy.optimize(xgbObjective, n_trials=100) # Calculating the pruned and completed trials pruned_trials = [t for t in xgbStudy.trials if t.state == optuna.trial.TrialState.PRUNED] complete_trials = [t for t in xgbStudy.trials if t.state == optuna.trial.TrialState.COMPLETE] xgbTrial = xgbStudy.best_trial print('Number of finished trials: ', len(xgbStudy.trials)) print('Number of pruned trials: ', len(pruned_trials)) print('Number of complete trials: ', len(complete_trials)) print('Accuracy: {}'.format(xgbTrial.value)) print("Best hyperparameters: {}".format(xgbTrial.params)) # + [markdown] tags=[] cell_id="00031-7ff0e26f-91e8-4d45-a4b5-8742073f561b" deepnote_cell_type="markdown" # #### 3.2.3 Regressors # * ##### 3.2.3.1 Linear Regression # * ##### 3.2.3.2 Random Forest # * ##### 3.2.3.3 XGBoost # # + tags=[] cell_id="00032-7f47bf70-270f-4f23-845e-e67b1c185340" deepnote_to_be_reexecuted=false source_hash="4120efc3" execution_millis=29143 output_cleared=false execution_start=1611805047279 deepnote_cell_type="code" regressors = [ LinearRegression(), RandomForestRegressor(**rfTrial.params), xgb.XGBRegressor(**xgbTrial.params), ] for regressor in regressors: pipe = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', regressor)]) pipe.fit(X_train, y_train) print(regressor) y_pred = pipe.predict(X_test) d1 = {'True Labels': y_test, 'Predicted Labels': y_pred} print("Model Score: %.3f" % pipe.score(X_test, y_test)) mae = metrics.mean_absolute_error(y_test, pipe.predict(X_test)) mse = metrics.mean_squared_error(y_test,pipe.predict(X_test)) rmse = np.sqrt(metrics.mean_squared_error(y_test, pipe.predict(X_test))) print("mae: ", mae) print("mse: ", mse) print("rmse: ", rmse, "\n") data = pd.DataFrame(data = d1) # Residual Plot 1 fig1, ax1 = plt.subplots(1,2, figsize=(18,6)) g1 = sns.residplot(x= y_test, y= y_pred, ax=ax1[0], data = data, color="salmon") g1.set_xlabel('Predicted Views', fontsize=16) g1.set_ylabel('Residual', fontsize=16) g1.set_title("Plot of Residuals", fontsize=18) # Residual Plot 2 g2 = sns.histplot(x=y_pred, ax=ax1[1], data = data, element="step", color="red", kde=True) g2.set_xlabel('Residuals', fontsize=16) g2.set_ylabel('Frequency', fontsize=16) g2.set_title("Plot of Residuals", fontsize=18) # Linear Plot # lm1 = sns.lmplot(x="True Labels", y="Predicted Labels", data = data, size = 10) # fig1 = lm1.fig # fig1.suptitle("Sklearn ", fontsize=18) # sns.set(font_scale = 1.5) # + [markdown] tags=[] cell_id="00033-6cad83eb-7674-44d9-b0a9-472f346c9ab2" deepnote_cell_type="markdown" # #### 3.2.4 Random Forest Regressor # + tags=[] cell_id="00034-5ef122fd-07b0-4605-b07d-383b411f0a5d" deepnote_to_be_reexecuted=false source_hash="2b7ae5e7" execution_millis=24729 output_cleared=false execution_start=1611805076429 deepnote_cell_type="code" reg = RandomForestRegressor(**rfTrial.params, oob_score=True) pipe = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', reg)]) pipe.fit(X_train, y_train) print(reg) print("Model Train Score: %.3f" % pipe.score(X_train, y_train)) print("Model OOB Score: %.3f" % reg.oob_score_) print("Model Test Score: %.3f" % pipe.score(X_test, y_test)) # + [markdown] tags=[] cell_id="00035-961be703-4f17-44ff-b5e9-529b9d8eee19" deepnote_cell_type="markdown" # ##### 3.2.4.1 Feature Importance # + tags=[] cell_id="00036-11907bf3-8299-40bb-856f-8554ff08cd43" deepnote_to_be_reexecuted=false source_hash="102cf22f" execution_millis=66 output_cleared=false execution_start=1611805101161 deepnote_cell_type="code" pd.DataFrame(zip(X.columns,reg.feature_importances_),columns=['feature','importance']).sort_values(by='importance',ascending=False) # + [markdown] tags=[] cell_id="00037-c721a042-31e8-4351-9f80-810f3a1136ef" deepnote_cell_type="markdown" # #### 3.2.5 Views Evaluation # + tags=[] cell_id="00038-4f792740-f72f-4c19-997b-5abb86c36e6f" deepnote_to_be_reexecuted=false source_hash="68bb0a2c" execution_millis=24388 output_cleared=false execution_start=1611805101236 deepnote_cell_type="code" eval = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', xgb.XGBRegressor(**xgbTrial.params))]) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) mae = metrics.mean_absolute_error(y_test,y_pred) mse = metrics.mean_squared_error(y_test,y_pred) rmse = np.sqrt(metrics.mean_squared_error(y_test,y_pred)) r2 = metrics.r2_score(y_test, y_pred) print("mae: ", mae) print("mse: ", mse) print("rmse: ", rmse) print("r2: ", r2) # + tags=[] cell_id="00039-69e54497-88d2-44d6-8122-6093d4826277" deepnote_to_be_reexecuted=false source_hash="fa3e210b" execution_millis=37 output_cleared=false execution_start=1611805125629 deepnote_cell_type="code" df = pd.DataFrame(data=list(zip(list(y_test), list(y_pred))),columns=['actual','predicted']) #Unlog Values for col in df.columns: df[col] = df[col].apply(lambda x: np.e**x) df
yt-model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] toc="true" # # Table of Contents # <p><div class="lev1"><a href="#Relative-Probabilities-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Relative Probabilities 1</a></div><div class="lev1"><a href="#Relative-Probabilities-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Relative Probabilities 2</a></div><div class="lev1"><a href="#Same-Coin"><span class="toc-item-num">3&nbsp;&nbsp;</span>Same Coin</a></div><div class="lev1"><a href="#Many-Flips"><span class="toc-item-num">4&nbsp;&nbsp;</span>Many Flips</a></div><div class="lev1"><a href="#Is-it-Fair-1"><span class="toc-item-num">5&nbsp;&nbsp;</span>Is it Fair 1</a></div><div class="lev1"><a href="#Is-it-Fair-2"><span class="toc-item-num">6&nbsp;&nbsp;</span>Is it Fair 2</a></div> # - # # Relative Probabilities 1 # Let's suppose we have a fair coin and we flipped it four times. I'd like you to tell me what is the probability that there is exactly one head divided by the probability that only the very first flip is a head. # **Answer** # - The correct answer is 4. # - To see this, note that for the first flip to be only a head, the following is the only sequence possible. # # ``` # H T T T # T H T T # T T H T # T T T H # ``` # heads = 0.5 # tails = 0.5 # ``` # (0.5 * 0.5 * 0.5 * 0.5) * 4 / (0.5 * 0.5 * 0.5 * 0.5) = 4 # ``` # # <img src="images/Screen Shot 2016-05-14 at 10.20.54 PM.png"/> # # *Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48452978/concepts/487550500923)* # # <!--TEASER_END--> # # Relative Probabilities 2 # Now, I'd like to tell me the ratio of the probability of exactly one head to the probability of # the first flip being heads regardless of the other flips. # **Answer** # - And the answer is 0.5 or 1/2. # - To see this, recall that there were four possibilities for the numerator--all equally likely, and if the first flip is heads and we have no other restrictions, the last three flips can be anything. # - So each one has two possibilities and ```2*2*2=8 and 4/8=0.5``` # <img src="images/Screen Shot 2016-05-14 at 10.25.27 PM.png"/> # # *Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48452978/concepts/487043430923)* # # <!--TEASER_END--> # # Same Coin # - Let's assume there are two coins equally distributed, same probability of having each. # - The first type of coins are fair. The second type of coins have a probability of heads of 0.9--90%. Let's assume you and I both flip a coin. When I flipped, I get the following sequence: HHT # - When you flipped, you get the following sequence: THH # - So please write in this box the probability that we flipped coins of the same type. # **Answer** # - https://discussions.udacity.com/t/explaining-solution-to-problem-set-2-question-3/76329 # # **Problem's given:** # # Probability we randomly choose the fair coin (coin 1): **`P(1)=0.5`** # # # Probability we randomly choose the loaded coin (coin 2): **`P(2)=0.5`** # # # Probability that the coin shows heads GIVEN that it's the fair coin (coin 1): **`P(H|1)=0.5`** # # # Probability that the coin shows heads GIVEN that it's the loaded coin (coin 2): **`P(H|2)=0.9`** # # # My flips: **HHT** # # # Your flips: **THH** # **Problem's aim:** # # The problem asks for the probability that both coins flipped were of the same kind. # # # Someone could argue: "Well that's easy. It's the probability of both people randomly selecting the fair coin (coin 1) plus the probability of both of them randomly selecting the loaded coin (coin 2). That is: $%P(1 \cap 1) + P(2 \cap 2)=P(1) \cdot P(1)+P(2) \cdot P(2)=0.5 \cdot 0.5+0.5 \cdot 0.5=0.5$%, since $%P(1 \cap 1)=P(1) \cdot P(1)$% is the joint probability of two independent events, the first person selecting coin 1 and the second person also selecting coin 1. This looks logical; half of the time the two persons select the same coin". The truth table verifies this: # # # -----1st person --------------------------- 2ndperson------- # # <code> coin1 coin1 # coin1 coin2 # coin2 coin1 # coin2 coin2 # </code> # But, wait. There is a catch: the flips were not taken into account! # # # Reading more carefully the problem asks for the probability that both coins flipped were of the same kind, GIVEN the flips observed, i.e. $%P(1|HHT)$% and $%P(2|HHT)$% # # # Note: The sequence of the flips does not change the probability of the flips, i.e. both flips are of equal probability. # # Solution: # # # # Given that we selected a fair coin (coin1), the probability for the flips is $$P(HHT|1)=0.5\cdot 0.5\cdot 0.5=0.125$$ Hence the probability $$P(HHT \cap 1)=P(HHT|1) \cdot P(1)=0.125 \cdot 0.5=0.0625$$ # # # Likewise, given that we selected the loaded coin (coin2), the probability for the flips is $$P(HHT|2)=0.9\cdot 0.9\cdot (1-0.9)=0.081$$ # # $$P(HHT \cap 2)=P(HHT|2) \cdot P(2)=0.081 \cdot 0.5=0.0405$$ # # # Thus $$P(HHT)=P(HHT \cap 1)+P(HHT \cap 2)=0.0625+0.0405=0.103$$ # # # So $$P(1|HHT)= \large \frac {P(HHT \cap 1)} {P(HHT)} \normalsize = \frac {0.0625}{0.103} =0.606796117$$ # # # and $$P(2|HHT)= \large \frac {P(HHT \cap 2)} {P(HHT)} \normalsize = \frac {0.0405}{0.103} =0.393203883$$ # # # Finally, the probability of both persons selecting the same kind of coins is $$P(1|HHT) \cdot P(1|HHT)+P(2|HHT) \cdot P(2|HHT)=0.606796117^2+0.393203883^2=0.522810821$$ # <img src="images/Screen Shot 2016-05-14 at 11.45.19 PM.png"/> # # *Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48452978/concepts/487441130923)* # # <!--TEASER_END--> # # Many Flips # Now, I'd like to ask you about what happens as the number of flips or events, in general, becomes a very large number. Check all that apply. # - The probability of every individual sequence becomes small # - The probability of every number of heads becomes small. So for example, does the probability of having one head or hundred heads or million heads, any specific number does that becomes small. # - Does the probability of every given proportion, for example, one head out of 100 flips, 10 heads out of 100 flips or 1500 heads out of a billion flips becomes small. # - Does every given range of proportions have a smaller and smaller probability. # - Are there some ranges of proportions for which the probability becomes small as we have many flips. # - Check all that apply. Also, assume that the probability of heads is neither 0 or 1. That is, we can get both heads and tails from this coin. # **Answer** # - https://discussions.udacity.com/t/help-me-visualize-many-flips-statements/76150/3 # <code># flips # cases Pr. per case possible cases # 2 2^2 = 4 1/4 = 0.25 HH, HT, TH, TT # 3 2^3 = 8 1/8 = 0.125 HHH, HHT, HTH, HTT, THH, THT, TTH, TTT # 4 2^4 = 16 1/16 = 0.0625 HHHH, HHHT, HHTH, HHTT, HTHH, HTHT, ... # </code> # 1 - The probability of every sequence becomes small. # # The probability of any one sequence is # # $$P(\mbox{any one sequence}) = \frac{1}{\mbox{num permutations}} = \frac{1}{2^{\mbox{flips}} }$$ # # # so more flips, smaller probability # # # 2 - The probability of every number of heads becomes small. # # This could mean 2 things. One, the total number of heads: # # <code>P(2 heads) for 2 flips = P(HH) = 1/4 = 0.25 # P(2 heads) for 3 flips = P(HHT)+P(HTH)+P(THH) = 1/8 * 3 = 3/8 = 0.375 # P(2 heads) for 4 flips = P(HHTT)+P(HTHT)+P(HTTH)+ # P(THTH)+P(THHT)+P(TTHH) = 1/16 * 6 = 0.375 # P(2 heads) for 5 flips = 10 * 1/32 = 0.3125 # </code> # which gets smaller with the number of flips or, two, the number of heads in a sequence, like exactly 2 heads followed by tails, in which case we have: # # <code>P(2 heads plus tails) for 2 flips = P(HH) = 1/4 # P(2 heads plus tails) for 3 flips = P(HHT) = 1/8 # P(2 heads plus tails) for 4 flips = P(HHTT) = 1/16 # </code> # which also gets smaller with the number of flips # # # 3 - The probability of every proportion of heads becomes small. # # For example, I want half the flips (i.e., a proportion) to be heads # # <code>P(1/2 of 2 flips are heads) = P(HT) + P(TH) # = 1/4 + 1/4 = 1/2 # P(1/2 of 4 flips are heads) = P(HHTT) + P(HTHT) + P(THTH) + # P(THHT)+ P(TTHH) + P(HTTH) # = 6*(1/2^4) = 3/8 # </code> # 4- The probability of every range of proportions becomes small. ok.. there were several comments about my original explanation of points 4 and 5 being unclear and.. sure enough.. they were. I was not happy with the explanations either so I figured out a way to explain them in a different way without getting too much into combinatorics. # # # First let's figure out the basics. If I flip a coin n times I generate one of $%2^n$% sequences. How many of those possible sequences have k heads? One way to find the answer is to use Pascal's triangle, in which an entry in the triangle is the sum of the two entries immediately above it: # # <img src="images/Screen Shot 2016-05-14 at 11.53.21 PM.png"/> # # Image from [mathisfun.com](mathisfun.com) # # # The sum of the numbers in any of the rows of the triangle gives the powers of 2: 2, 4, 8, 16, 32, 64, 128.. etc. which correspond to the number of sequences generated by flipping a coin 1, 2, 3, 4, 5, 6, and 7 times, respectively. In other words, if you flip a coin n times, you have any of $%2^n$% sequences which is also the sum of the numbers of the n-th row of Pascal's triangle (when the row at the top is the 0-th row). # # # The entries in Pascal's triangle can be interpreted as the number of times that a coin will land in, say, heads. For example, if you flip a coin 4 times, you will get one of $%2^4 = 16$% possible sequences. Looking at the 4th row of Pascal's triangle (the one whose entries add up to 16) we have the following correspondance: # # <code>1 = # of sequences with 0 heads = TTTT # 4 = # of sequences with 1 heads = TTTH, TTHT, THTT, HTTT # 6 = # of sequences with 2 heads = TTHH, THTH, HTTH, THHT, HTHT, HHTT # 4 = # of sequences with 3 heads = HHHT, HHTH, HTHH, HHHT # 1 = # of sequences with 4 heads = HHHH # </code> # Likewise, if we flip the coin 7 times we will have $%2^7 = 128$% cases and, looking at Pascal's triangle, # # we find that 21 of these sequences have 2 heads, 35 of them have 3 heads, 6 of them have 7 heads, and so on. # # # Another way to find any of the entries of Pascal's triangle is using the binomial coefficient: # # # $$\binom{n}{k} = \frac{n!}{k!(n-k)!}$$ # # # or $%C(n, k)$%, for short, where $%n!$% is the factorial of n, i.e., # # # $$ n! = n(n-1)(n-2)\cdots 2\cdot 1 $$ # # # So, for example, if we want to find how many of the $%2^7=128$% sequences generated with 7 flips have 2 heads we have # # <code>C(7,2) = 7! / (2! (7-2)!) # = 7! / (2! 5!) # = 5040 / (2 * 120) # = 21 # </code> # which agrees with the entry in Pascal's triangle. # # # Now back to the question. We are going to consider all of sequences that have less than 50% heads (i.e., the proportion is a given percentage( e.g., 25% heads) while the range of proportions is from 0 to 50%). We'll consider only the odd number of flips that gives us a nice boundary for the 50% bound. # # <code>Flips # sequences # seqs with [0-50)% heads P # 1 2^1 = 2 1 (i.e., H) 1*(1/2) = 0.5 # 3 2^3 = 8 1+3 = 4 4*(1/8) = 0.5 # 5 2^5 = 32 1+5+10 = 16 16*(1/32) = 0.5 # 7 2^7 = 128 1+7+21+35 = 64 64*(1/128) = 0.5 # etc. # </code> # Thus, the probability of the sequences of having less than 50% heads does not get smallerr but instead is constant and, hence, point 4 is False. # # # 5- The probablity of some ranges of proportions becomes small. Consider the probability of having sequences in which the proportion of heads is anywhere between 0 and 33%. Here we will use a larger Pascal's triangle that includes the entries for 9 and 12 flips: # # # # <img src="images/Screen Shot 2016-05-14 at 11.54.00 PM.png"/> # from [43things.com](43things.com) # # # So now, we consider sequences of n flips where flips is a multiple of 3, which gives us a nice boundary for the 33%, i.e., a third of the flips or less have to be heads: # # <code>Flips # sequences # seqs with [0-33]% heads P # 3 2^3 = 8 1+3 = 4 (TTT, HTT, THT, TTH) 4/8 = 0.5 # 6 2^6 = 64 1+6+15 = 22 22/64 = 0.343 # 9 2^9 = 512 1+9+36+84 = 130 130/512 = 0.254 # 12 2^12 = 4096 1+12+66+220+495 = 794 794/4096 = 0.1938 # etc. # </code> # where, for example, for 12 flips we have 1 flip with 0 heads, 12 with 1 head, 66 with 2 heads, 220 with 3 heads and 495 with 4 heads, i.e., up to 33% of the possible 12 flips. # # # A way to visualize this is the following. Look at the numbers that correspond to the 33%: 3 in the 3rd row, 15 in the 6th, 84 in the 9th and 495 in the 12th. Now, notice that all these numbers lie along a line that passes through the apex of the Pascal triangle. You could imagine continuing this line downwards, as you add more and more flips. For all these rows, all the entries to the left of this line correspond to sequences that have between 0 and 33% heads. However, notice that the values of these entries is progressively smaller with respect to those that accumulate in the middle of the rows. Thus, the larger the flip, the smaller the percentage of sequences that will have between 0-33% heads. # # # By the same token, the entries to the right of the line, which correspond fo the 33-100% heads case, have a number of heads that increases with the number of flips; this is expected as the 0-33% case is the complement of 33-100% case, i.e., if one becomes larger, the other one should become smaller. # # # You can 'swing' this line to the right, about the apex, until it is vertical. Now the triangle is split in equal parts and the entries on the left and right have the same values. Hence, at this point, the number of sequences that have 0-50% heads is exactly the same as those that have 50-100% heads. This was the case used in point 4 to show that not all range of proportions becomes smaller. # # # In summary, the probabiity of the sequences that have 33% or less heads (i.e., a range of proportions) grows smaller with the number of flips and point 5 is True # <img src="images/Screen Shot 2016-05-14 at 11.59.17 PM.png"/> # # *Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48452978/concepts/487245240923)* # # <!--TEASER_END--> # # Is it Fair 1 # - Let's assume we have a fair coin and a loaded coin and the loaded coin has a probability of heads of 0.9. # - Let's assume that we know that the probability of actually having a loaded coin is zero. # - In which of these cases is the probability of being fair given the flips <0.5. Check all that apply. Note that `4 H 0 T` means four head and zero tails. # **Answer** # - And in none of these cases is the probability of a fair coin given the data less than 0.5. # - We know this simply because the probability of having a loaded coin at all is zero. # - So if that doesn't make intuitive sense we can apply Bayes' Rule since P of loaded is zero P of fair is one and so P of flips given fair is always going to be equal to P of flips because that's the only way you can get the flips. # - The alternative is zero and so this whole expression will always be one. You could also do this with the reverse in P of loaded and you'd see it would always come out to be zero because this term will be zero. # <img src="images/Screen Shot 2016-05-15 at 12.04.33 AM.png"/> # # *Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48452978/concepts/487236350923)* # # <!--TEASER_END--> # # Is it Fair 2 # Now let me ask you if the probability of having a loaded coin is 0.1, what happens to the probability of fair given flips in each of these cases. Check all that have a probability of a fair coin of less than 0.5. # **Answer** #
intro_to_statistics/Problem Set 2 - Probability.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia nodeps 0.6.2 # language: julia # name: julia-nodeps-0.6 # --- # load packages using RData, LowRankApprox, DataFrames, Gadfly, Distributions # load data and label data = load("../data/1kgdata.rds"); label = Array(readtable("../data/label.1kg.txt", separator = ' ', header = false))[:]; # + # we subsample SNPs by taking 1,21,41,...th in order. X = copy(data["X"]); n,p = size(X); # get label and enumerate it unique_label = unique(label); W = zeros(n,length(unique_label)); for i = 1:length(unique_label) ind = find(label .== unique_label[i]); W[ind,i] = 1; end # + # let's simply draw histogram of MAFs set_default_plot_size(15cm,10cm) Gadfly.plot(x = mean(X,1)/2, Geom.histogram) # can we believe that the MAFs in ancentral population also looks like this? # I think the answer is fairly positive. # - # We first make a function to solve a binomial version of ASH. We assume that # # $$ # \log p(x|\mu,\tau) = \tau x \log \mu + \tau (2-x) \log (1-\mu) + a(x,\tau) # $$ # # and the distribution has mean $2\mu$ and variance $2\mu(1-\mu)/\tau$. # # Suppose we draw independent samples $x_1,\cdots,x_n$ from $\mu_1,\cdots,\mu_n$ and $\tau_1,\cdots,\tau_n$. # # Now let # # $$ # \log p(\mu|\nu,t) = t \nu \log \mu + t (2-\nu) \log (1-\mu) + h(\nu,t) # $$ # # This is a Beta$(t \nu + 1, t (2-\nu) + 1)$ distribution. # # Next, since MAFs can be approximated by a scale mixture of Beta$(2\tau+1,1)$ and Beta$(1,2\tau+1)$ distributions, i.e. $\nu = 0,2$. That is, # # $$ # \log p(\mu|\phi,t) = \sum_{k=1}^m \left[\phi_k p(\mu|0,t_k) + \phi_{k+m} p(\mu|1,t_k)\right] # $$ # # can be used to model MAFs in ancestral population. # + t = [0.001;1;10;100;1e8]; n = 1000 x = linspace(0.01,1-0.01,n+1) nu = 0; y = pdf.(Beta.(t*nu+1,t*(2-nu)+1),x')'; df = DataFrame(x = repeat(x,outer = length(t)), y = y[:], t = repeat(["t = 0";"t = 1";"t = 10";"t = 100";"t = ∞"], inner = n+1)) F1 = Gadfly.plot(df, x = :x, y = :y, color = :t, Geom.line, Coord.Cartesian(ymax = 15), Guide.title("Shape of densities p(μ|t,𝜈) when 𝜈 = 0"), Guide.ylabel("p(μ|t,𝜈)"), Guide.xlabel("μ")) nu = 1; y = pdf.(Beta.(t*nu+1,t*(2-nu)+1),x')'; df2 = DataFrame(x = repeat(x,outer = length(t)), y = y[:], t = repeat(["t = 0";"t = 1";"t = 10";"t = 100";"t = ∞"], inner = n+1)) F2 = Gadfly.plot(df2, x = :x, y = :y, color = :t, Geom.line, Coord.Cartesian(ymax = 15), Guide.title("Shape of densities p(μ|t,𝜈) when 𝜈 = 1"), Guide.ylabel("p(μ|t,𝜈)"), Guide.xlabel("μ")) set_default_plot_size(25cm,10cm) F = Gadfly.hstack(F1,F2) F |> SVG("Binomial_ASH_fig1.svg"); # - # ![](Binomial_ASH_fig1.svg?sanitize=true) # Note that this fits into the generalized ASH framework that I proposed. # # Next, we will make a function to estimate $\mu$ given $x$ and $\tau$ using the likelihood and the prior above, i.e. # # $$ # \log p(x|\mu,\tau) = \tau x \log \mu + \tau (2-x) \log (1-\mu) + a(x,\tau) # $$ # # $$ # \log p(\mu|\phi,t) = \sum_{k=1}^m \left[\phi_k p(\mu|0,t_k) + \phi_{k+m} p(\mu|1,t_k)\right] # $$ # # We use a multiplicative grid $t_k = 10^{-5 + k}$ for $k = 1,2,\cdots,9$. # # A marginal is # # $$ # p(x|\phi,t) = \int p(x|\mu,\tau) p(\mu|\phi,t) d\mu = \sum_{k=1}^m \left[ \phi_{k} \int p(x|\mu,\tau) p(\mu|0,t_k) d\mu + \phi_{k+m} \int p(x|\mu,\tau) p(\mu|1,t_k) d\mu \right] # $$ # # Suppose $x_1,\cdots,x_n$ are independently drawn from marginal with parameters $\mu_1,\cdots,\mu_n$ and $\tau_1,\cdots,\tau_n$. # # First, # # $$ # H(\nu,t)^{-1} = \exp[-h(\nu,t)] = B(t\nu+1,t(2-\nu) + 1) = \frac{\Gamma(t\nu+1)\Gamma(t(2-\nu)+1}{\Gamma(2t+2)} # $$ # # A marginal normalizing constant is $H(\tau x + t\nu + 1, \tau (2-x) + t (2-\nu) + 1)^{-1}$ since # # $$ # \log p(x_j|\mu_j,\tau_j) p(\mu_j|\nu,t_k) = (\tau_j + t_k) \frac{\tau_j x_j}{\tau_j + t_k} \log \mu_j + (\tau_j + t_k) \left( 2 - \frac{\tau_j x_j}{\tau_j + t_k} \right) \log (1-\mu_j) + C'''' # $$ # # Therefore, the problem of maximizing a marginal likelihood $p(x|\phi,t)$ over $\phi$ reduces to the mixsqp problem # # $$ # \textrm{arg min}_{\phi \in \mathcal{S}} -\frac1n \sum_{j=1}^n \log \sum_{k=1}^{2m} L_{jk} \phi_k # $$ # # where $S$ is a probability simplex of appropriate dimension and # # $$ # L_{jk} = B(1,2t+1)^{-1} B(\tau x + 1, \tau (2-x) + 2t + 1),\quad k = 1,\cdots, m \qquad \\ # L_{jk} = B(t+1,t+1)^{-1} B(\tau x + t + 1, \tau (2-x) + t + 1),\quad k = m+1,\cdots,2m # $$ # # Note that we have ignored the same normalizing constant $A(x_j,\tau_j)$ across $j$-th row. It has nothing to do with the objective function in terms of minimization. # + # simulation study srand(10) # sample μ from a mixture of Betas n_div_8 = 500; μ = [rand.(Beta.(1,50),n_div_8);rand.(Beta.(1,100),2 * n_div_8); rand.(Beta.(1,1),3 * n_div_8); rand.(Beta.(15,15),n_div_8);rand.(Beta.(30,30),n_div_8);] μ[μ .> 0.5] = 1 - μ[μ .> 0.5]; # major allele frequency to minor allele frequency n = length(μ); # sample inbreeding coefficient from Betas f = [rand.(Beta.(1,5),n_div_8*4);rand.(Beta.(1,10),n_div_8*4)]; # sample x from overdispersed Binomial(2,μ) with inbreeding coefficient f # we sample a n by r matrix by x_{jl} ~ Binomial(2,μ_j) for l = 1,...,r r = 100; temp = rand(n,r); x = zeros(n,r) x[temp .< (1-f).*μ.^2 + f.*μ + 2*(1-f).*μ.*(1-μ)] = 1; x[temp .< (1-f).*μ.^2 + f.*μ] = 2; # define mafs and if maf_j > 0.5 then replace it by 1 - maf_j maf = mean(x,2)/2; maf[maf .> 0.5] = 1 - maf[maf .> 0.5]; # plot the result set_default_plot_size(35cm,10cm) F1 = Gadfly.plot(x = μ, Geom.histogram, Guide.title("histogram of sampled μ"), Guide.xlabel("μ")); F2 = Gadfly.plot(x = f, Geom.histogram, Guide.title("histogram of sampled f"), Guide.xlabel("f")); F3 = Gadfly.plot(x = maf, Geom.histogram, Guide.title("histogram of sampled MAF"), Guide.xlabel("MAF"), Coord.Cartesian(xmax = 0.5)); F = hstack(F1,F2,F3); F |> SVG("Binomial_ASH_fig2.svg"); # - # ![](Binomial_ASH_fig2.svg?sanitize=true) # + # I think we can make a general ash if we specify H or h = log H # actually we only need to specify h(prior) - h(posterior) or H(prior)/H(posterior) function h_diff(x,τ,ν,t) function B(a,b) return lgamma.(a) + lgamma.(b) .- lgamma.(a + b) end return -B(ν .* t + 1, (2 - ν) .* t + 1)' .+ B(x .* τ .+ (ν .* t)' + 1, (2-x) .* τ .+ ((2-ν) .* t)' + 1) end # mixsqp include("../src/mixsqp.jl") # ash function function ash_binomial(x::Array, τ::Array, h_diff::Function; n::Int = length(x), ν::Array = [zeros(7);ones(5)], t::Array = [5.0.^(-3:3);5.0.^(-2:2)]) # contruct a likelihood function loglik = h_diff(x,τ,ν,t) lik = exp.(loglik .- maximum(loglik,2)); # solve mixsqp out = mixsqp(lik, verbose = false)["x"]; ind = find(out .> 0); # the posterior is a mixture of Beta distributions # Beta(x .* τ .+ (ν .* t)' + 1, (2-x) .* τ .+ ((2-ν) .* t)' + 1) # component posterior probability L = lik[:,ind]; cpp = L .* out[ind]'; cpp = cpp ./ sum(cpp,2); # component posterior mean cpm = (x .* τ .+ (ν[ind] .* t[ind])' + 1) ./ (2 * τ .+ 2 * t[ind]' + 2) # posterior mean pm = sum(cpp .* cpm, 2) return Dict([ (:p, out[ind]), (:L, L), (:ν, ν[ind]), (:t, t[ind]), (:lik, lik), (:cpp, cpp), (:cpm, cpm), (:pm, pm) ]) end # + τ = r./(f+1); out = ash_binomial(2 * maf,τ,h_diff) μ_hat = linspace(0.0005,1-0.0005,1000) y = pdf.(Beta.(out[:ν] .* out[:t] + 1, (2-out[:ν]) .* out[:t] + 1), μ_hat')'; y = sum(y .* out[:p]',2); y2 = [pdf.(Beta.(1,50), μ_hat'); pdf.(Beta.(1,100), μ_hat'); pdf.(Beta.(1,1), μ_hat'); pdf.(Beta.(15,15), μ_hat'); pdf.(Beta.(30,30), μ_hat')]; y2 = sum(y2 .* [1;2;3;1;1]/8,1)'; # fold the distribution at 0.5 μ_hat = μ_hat[1:500]; y = y[1:500] + y[1001 - (1:500)] y2 = y2[1:500] + y2[1001 - (1:500)] # plot the result set_default_plot_size(25cm,10cm) F1 = Gadfly.plot(x = μ_hat, y = y, Geom.bar, Guide.title("estimated mixture prior"), Guide.ylabel("prior"), Guide.xlabel("μ_hat"), Coord.Cartesian(ymax = 40)) F2 = Gadfly.plot(x = μ_hat, y = y2, Geom.bar, Guide.title("true mixture prior"), Guide.xlabel("μ"), Coord.Cartesian(ymax = 40)); F3 = Gadfly.plot(x = μ, y = out[:pm], Geom.point, Guide.title("true μ vs. hat μ"), Guide.xlabel("true μ"), Guide.ylabel("posterior mean"), Coord.Cartesian(ymax = 0.5)); F = hstack(F1,F2,F3) F |> SVG("Binomial_ASH_fig3.svg"); # - # ![](Binomial_ASH_fig3.svg?sanitize=true)
docs/Binomial_ASH.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # # Environment Setup # ### Complete NLTK installation # # NLTK is a rich toolkit with modular components, many of which are not installed by default. To install all the components, run the following cell. # # *Note that this can take upto 10-15 mins* import nltk nltk.download('all')
Hands-on lab/notebooks/00 Init.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="ALSfsSFjeV1o" # # Training the Boston Housing Dataset using PySyft and SocketWorker # # This tutorial is a 3 notebook tutorial. The partners notebooks are the notebooks entitled `SocketWorker Server Alice.ipynb` and `SocketWorker Server Bob.ipynb`. They are in the same folder as this notebook. You should execute this notebook **AFTER** you have executed the others. # # This tutorial is an example of training a neural network in a federated fashion on the Boston Housing dataset using socketworkers, python instances Alice and Bob running in the two other tabs you have opened in your browser. # # Before starting with this notebook, we recommend looking at `toy/Federated Learning Example.ipynb` which provides a basic example. # # Preformance: achieves ~20 MSE in 10 epochs in 25s _(Perf. measured on [colab.research.google.com/17upxC...](https://colab.research.google.com/drive/17upxCYJmJ6Zoxv0KjiJ1ZbchlJybsfhs))_ # # _This notebook doesn't intend to provide a good prediction model and rather focuses on computation overhead due to federated learning._ # # The base example without federated learning can be found here: [colab.research.google.com/drive/1ne4ra...](https://colab.research.google.com/drive/1ne4rap-8nD6-jABV94fkPBHvtPj-RrKY#scrollTo=i_gUp-uFfwGL) # # # + [markdown] colab_type="text" id="nFvJUEHOfA-r" # # Setting Up # + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="ZgomH7s4R5cT" outputId="83725aac-8149-4291-8bc1-8e9c299e0428" from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from torch.autograd import Variable from torch.utils.data import TensorDataset, DataLoader # from keras.datasets import boston_housing print(torch.__version__) # Training settings parser = argparse.ArgumentParser(description='PyTorch Example') parser.add_argument('--batch-size', type=int, default=8, metavar='N', help='input batch size for training (default: 8)') parser.add_argument('--test-batch-size', type=int, default=8, metavar='N', help='input batch size for testing (default: 8)') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.001)') parser.add_argument('--momentum', type=float, default=0.0, metavar='M', help='SGD momentum (default: 0.0)') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') args = parser.parse_args([]) torch.manual_seed(args.seed) kwargs = {} # - # ### Loading the dataset import pickle f = open('data/boston_housing.pickle','rb') ((X, y), (X_test, y_test)) = pickle.load(f) f.close() # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="caoZ_dM0wzdf" outputId="1a138f2e-f184-4b94-a93b-486617bde21a" X = torch.from_numpy(X).type(torch.FloatTensor) y = torch.from_numpy(y).type(torch.FloatTensor) X_test = torch.from_numpy(X_test).type(torch.FloatTensor) y_test = torch.from_numpy(y_test).type(torch.FloatTensor) # preprocessing mean = X.mean(0, keepdim=True) dev = X.std(0, keepdim=True) mean[:, 3] = 0. # the feature at column 3 is binary, dev[:, 3] = 1. # so I'd rather not standardize it X = (X - mean) / dev X_test = (X_test - mean) / dev train = TensorDataset(X, y) test = TensorDataset(X_test, y_test) train_loader = DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) # + [markdown] colab_type="text" id="Az9PH1BrfK46" # # Neural Network Structure # + colab={} colab_type="code" id="R5TyXfcOXp1w" class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(13, 32) self.fc2 = nn.Linear(32, 24) self.fc3 = nn.Linear(24, 1) def forward(self, x): x = x.view(-1, 13) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x model = Net() optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) # + [markdown] colab_type="text" id="8r78FvSffTp0" # # Hooking into Pytorch # + colab={} colab_type="code" id="ai5bqlEWSFgf" import syft import syft as sy from syft.core import utils import torch import torch.nn.functional as F import json import random from syft.core.frameworks.torch import utils as torch_utils from torch.autograd import Variable local_worker = sy.SocketWorker(id="local", port=2008, hook=None, is_client_worker=False) hook = sy.TorchHook(local_worker=local_worker, verbose=False) me = hook.local_worker me.hook = hook bob = sy.SocketWorker(id="bob", port=2006, hook=hook, is_pointer=True, is_client_worker=False) alice = sy.SocketWorker(id="alice", port=2007, hook=hook, is_pointer=True, is_client_worker=False) compute_nodes = [bob, alice] me.add_workers([bob, alice]) #bob.add_workers([me, alice]) #alice.add_workers([me, bob]) # + [markdown] colab_type="text" id="apSGFY_fv_7z" # **Send data to the worker** <br> # Usually they would already have it, this is just for demo # + colab={} colab_type="code" id="UByFBzz2wARz" train_distributed_dataset = [] for batch_idx, (data,target) in enumerate(train_loader): print(batch_idx) data = Variable(data) target = Variable(target.float()) data.send(compute_nodes[batch_idx % len(compute_nodes)]) target.send(compute_nodes[batch_idx % len(compute_nodes)]) train_distributed_dataset.append((data, target)) # + [markdown] colab_type="text" id="9vhghBr1fpPE" # # Training Function # + colab={} colab_type="code" id="Cgw89i1HSU5X" def train(epoch): model.train() for batch_idx, (data,target) in enumerate(train_distributed_dataset): worker = data.location model.send(worker) optimizer.zero_grad() # update the model pred = model(data) loss = F.mse_loss(pred, target.float()) loss.backward() model.get() optimizer.step() if batch_idx % args.log_interval == 0: loss.get() print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * args.batch_size, len(train_loader) * args.batch_size, 100. * batch_idx / len(train_loader), loss.data[0])) # + [markdown] colab_type="text" id="i_gUp-uFfwGL" # # Testing Function # + colab={} colab_type="code" id="TNED3GD6Y3Va" def test(): model.eval() test_loss = 0 for data, target in test_loader: data, target = Variable(data, volatile=True), Variable(target) output = model(data) test_loss += F.mse_loss(output, target.float(), size_average=False).data[0] # sum up batch loss pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}\n'.format(test_loss)) # + [markdown] colab_type="text" id="EmvTEpIbfzoC" # # Training The Dataset # + colab={"base_uri": "https://localhost:8080/", "height": 1094} colab_type="code" id="9JWkpGtoY48Y" outputId="4c6fbc4d-29f4-42bb-cd43-e7a76d8ea04e" # %%time for epoch in range(1, args.epochs + 1): train(epoch) # + [markdown] colab_type="text" id="V5nJSaJ4f9tk" # # Calculating Performance # + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="Il8R8NWBd-Xb" outputId="f01f338d-629e-48cd-faa9-e696d1e8b919" test() # + colab={} colab_type="code" id="ylB9_i-CVZM6"
examples/SocketWorker Boston Housing Client.ipynb