code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # import libraries import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split, cross_validate from sklearn.naive_bayes import GaussianNB from sklearn.metrics import confusion_matrix, classification_report from sklearn.feature_selection import SelectKBest, f_classif, mutual_info_classif from plot_confusion_matrix import plot_confusion_matrix import warnings warnings.filterwarnings('ignore') # + # Dowloaded dataset for credit card fraud from Kaggle # - data = pd.read_csv('data.csv') # Show first 5 rows of the csv file data.head() # In the class column, 1.0 indicates that the transaction if fraudulent, and 0 means that the transaction is genuine data.describe().round(decimals = 2) # Column Names print('Columns : ', list(data)) print('Number of Columns : ', len(list(data))) # In the dataset, each row is labelled as fraud (0), or genuine (1) n_genuine = len(data[data['Class'] == 0]) # Output is a dataframe and the length of the dataframe tells us the number of transactions which are genuine n_fraud =len(data[data['Class'] == 1]) # + # To plot the number of genuine and fraud transactions as a # pie chart to get a visual representation of the proportions # of fraud and genuine transactions print("Number of Genuine Transactions : ", n_genuine) print("Number of Fraudulent Transactions : ", n_fraud) plt.pie([n_genuine, n_fraud], labels = ['Genuine', 'Fraud'], radius = 1) plt.show() # The pie chart shows that the data is highly imbalanced. # - # ## Selecting the best features # X includes all rows of all columns excluding the last one # Y includes all rows of the last column X, Y = data.iloc[:, :-1], data.iloc[:, -1] X.head() Y.head() # Using f_classif score to select the k best features #K is specified as parameter to SelectKBest constructor # fit() function computes the scores and selects the k best features k = 10 k_best = SelectKBest(f_classif, k = k) k_best.fit(X,Y) # ### Discard all the "bad" features # + # Using get_support(), get the list of booleans that represent whether the # Ith feature is among the k-best or not # Then using the function drop to discard the "bad" features from the dataframe mask = k_best.get_support() not_mask = np.logical_not(mask) all_features = np.array(list(X)) best_features = all_features[mask] bad_features = all_features[not_mask] print("Best Features : ", best_features) print("Bad Features : ", bad_features) # - X = X.drop(bad_features, axis = 1) X.head() # ### Plot distribution plots of best features and bad features to observe the distribution of the data # + # If the distribution plots of genuine and fraudulent data line on top of each other, # it means that using this column, we will not be able to disinguish between genuine # and fraudulent transactions. # However if the plots have different shapes, then using this plot, we can distinguish between the two. def plot_fraud_genuine(features, data): plt_index = 0 plt.figure(figsize = (10,10)) plt.subplots_adjust(top = 0.99, bottom = 0.01, hspace = 1.5, wspace = 0.4) for feature in features: plt_index += 1 feature_data = pd.concat([data[feature], data['Class']], axis = 1) fraud = feature_data[data['Class'] == 1] genuine = feature_data[data['Class'] == 0] if len(genuine > 10000): genuine = genuine[::100] plt.subplot(5, 5, plt_index) sns.distplot(fraud[feature], label = 'fraud') sns.distplot(genuine[feature], label = 'genuine') plt.title(feature) #plt.legend() # - plot_fraud_genuine(best_features, data) # The plots do not have majoirty operlapping portions plot_fraud_genuine(bad_features, data) # The plots have majority portions overlapping # Split the datset into training and testing data X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2) # ## Cross Validation # The data is divided into k equal parts. Then each chunk is considered as testing data and the rest of the chunk is considered as the testing data. # # # Since the data is highly imbalanced, cross validation is used. # In this, the model is trained on the training dataset using cross validation. # # ## Why we use cross validation? # We have a very small number of fraud transactions. # # In order to make the model more general. nb = GaussianNB() cv_results = cross_validate(nb, X_train, Y_train, cv = 10, scoring = 'recall', return_train_score = True, return_estimator = True) print('Training scores from each fold : ', cv_results['train_score']) max_score_index = np.argmax(cv_results['train_score']) best_estimator = cv_results['estimator'][max_score_index] # ## Evaluate and visualize using Confusion Matrix. def display_results(estimator, X, Y): predicted = estimator.predict(X) cm = confusion_matrix(Y, predicted) report = classification_report(Y, predicted) print(report) plot_confusion_matrix(cm, classes = ['Genuine', 'Fraud'], title = "Fraud Detection") display_results(best_estimator, X_test, Y_test) display_results(best_estimator, X_train, Y_train)
Python/Credit_Card_Fraud_detection/Credit_Card_Fraud_Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 14 - Difference-in-Difference # # ## Three Billboards in the South of Brazil # # I remember when I worked with marketing and a great way to do it was with internet advertisement. Not because it is very efficient (although it is), but because it is very easy to know if it's effective or not. With online marketing, you have a way of knowing which customers saw the ad and you can track them with cookies to see if they ended up on your landing page or clicked some download button. You can also use machine learning to find prospects that are very similar to your customers and present the ad only to them. In this sense, online marketing is very precise: you target only those you want to and you can see if they respond as you would like them to. # # But not everyone is susceptible to online marketing. Sometimes you have to resort to less precise techniques, like a TV campaign or placing a billboard down the street. Usually, diversity of marketing channels is something marketing departments look for. But if online marketing is a professional fishing rod to catch that specific type of tuna, billboard and TV are giant nets you throw at a fish shoal and hope to catch at least some big ones. Another problem with billboard and TV ads is that it is much harder to know how effective they are. Sure, you could measure the purchase volume, or whatever you want to drive, before and after placing a billboard somewhere. If there is an increase, there is some evidence that the marketing is effective. But how would you know if this increase is not just some natural trend in the awareness of your product? In other words, how would you know the counterfactual \\(Y_0\\) of what would have happened if you didn't set up the billboards in the first place? # # ![img](./data/img/diff-in-diff/secrets.png) # # One technique to answer these types of questions is Difference-in-Difference, or diff-in-diff for close friends. Diff-in-diff is commonly used to assess the effect of macro interventions, like the effect of immigration on unemployment, the effect of gun law changes in crime rates or simply the difference in user engagement due to a marketing campaign. In all these cases, you have a period before and after the intervention and you wish to untangle the impact of the intervention from a general trend. As a motivating example, let's look at a question similar to the one I had to answer. # # In order to figure out how good billboards were as a marketing channel, we placed 3 billboards in the city of Porto Alegre, the capital of the state of Rio Grande do Sul. We wanted to see if that boosted deposits into our savings account. As a note for those not very familiar with Brazilian geography, Rio Grande do Sul is part of the south of the country, one of the most developed regions. # # Having this in mind, we decided to also look at data from another capital from the south, Florianopolis, the capital city of the state of Santa Catarina. The idea is that we could use Florianopolis as a control sample to estimate the counterfactual \\(Y_0\\) when compared to Porto Alegre (by the way, this was not the true experiment, which is confidential, but the idea is very similar). We placed the billboard in Porto Alegre for the entire month of June. The data we have looks like this: # + import warnings warnings.filterwarnings('ignore') import pandas as pd import numpy as np from matplotlib import style from matplotlib import pyplot as plt import seaborn as sns import statsmodels.formula.api as smf # %matplotlib inline style.use("fivethirtyeight") # - data = pd.read_csv("data/billboard_impact.csv") data.head() # Remember that deposits are our outcome variable, the one we wish to increase with the billboards. POA is a dummy indicator for the city of Porto Alegre. When it is zero, it means the samples are from Florianopolis. Jul is a dummy for the month of July, or for the post intervention period. When it is zero it refers to samples from May, the pre-intervention period. # # ## DID Estimator # # To avoid confusion between Time and Treatment, from now on, I'll use D to denote treatment and T to denote time. Let \\(Y_D(T)\\) be the potential outcome for treatment D on period T. In an ideal world where we have the ability to observe the counterfactual, we would estimate the treatment effect of an intervention the following way: # # $ # \hat{ATET} = E[Y_1(1) - Y_0(1)|D=1] # $ # # In words, the causal effect is the outcome in the period post intervention in case of a treatment minus the outcome in also in the period after the intervention, but in the case of no treatment. Of course, we can't measure this because \\(Y_0(1)\\) is counterfactual. # # One way around this is a before and after comparison. # # $ # \hat{ATET} = E[Y(1)|D=1] - E[Y(0)|D=1] # $ # # In our example, we would compare the average deposits from POA before and after the billboard was placed. # + poa_before = data.query("poa==1 & jul==0")["deposits"].mean() poa_after = data.query("poa==1 & jul==1")["deposits"].mean() poa_after - poa_before # - # This estimator is telling us that we should expect deposits to increase R$ 41,04 after the intervention. But can we trust this? # # Notice that \\(E[Y(0)|D=1]=E[Y_0(0)|D=1]\\), that is, the observed outcome for the treated unit **before the intervention** is equal to the counterfactual outcome for the treated unit also before the intervention. Since we are using, this to estimate the counterfactual **after the intervention** \\(E[Y_0(1)|D=1]\\), this estimation above assumes that \\(E[Y_0(1)|D=1] = E[Y_0(0)|D=1]\\). # # It is saying that in the case of no intervention, the outcome in the latter period would be the same as the outcome from the starting period. This would obviously be false if your outcome variable follows any kind of trend. For example, if deposits are going up in POA, \\(E[Y_0(1)|D=1] > E[Y_0(0)|D=1]\\), i.e. the outcome of the latter period would be greater than that of the starting period even in the absence of the intervention. With a similar argument, if the trend in Y is going down, \\(E[Y_0(1)|D=1] < E[Y_0(0)|D=1]\\). This is to show that this before and after thing is not a great estimator. # # Another idea is to compare the treated group with an untreated group that didn't get the intervention: # # $ # \hat{ATET} = E[Y(1)|D=1] - E[Y(1)|D=0] # $ # # In our example, it would be to compare the deposits from POA to that of Florianopolis in the post intervention period. fl_after = data.query("poa==0 & jul==1")["deposits"].mean() poa_after - fl_after # This estimator is telling us that the campaign is detrimental and that customers will decrease deposits by R$ 119.10. # # Notice that \\(E[Y(1)|D=0]=E[Y_0(1)|D=0]\\). And since we are using \\(E[Y(1)|D=0]\\) to estimate the counterfactual for the treated after the intervention, we are assuming we can replace the missing counterfactual like this: \\(E[Y_0(1)|D=0] = E[Y_0(1)|D=1]\\). But notice that this would only be true if both groups have a very similar baseline level. For instance, if Florianopolis has way more deposits than Porto Alegre, this would not be true because \\(E[Y_0(1)|D=0] > E[Y_0(1)|D=1]\\). On the other hand, if the level of deposits are lower in Florianopolis, we would have \\(E[Y_0(1)|D=0] < E[Y_0(1)|D=1]\\). # # Again, this is not a great idea. To solve this, we can use both space and time comparison. This is the idea of the difference in difference approach. It works by replacing the missing counterfactual the following way: # # $ # E[Y_0(1)|D=1] = E[Y_1(0)|D=1] + (E[Y_0(1)|D=0] - E[Y_0(0)|D=0]) # $ # # What this does is take the treated unit **before the intervention** and adds a trend component to it, which is estimated using the control \\(E[Y_0(1)|T=0] - E[Y_0(0)|T=0]\\). In words, it is saying that the treated **after the intervention**, had it not been treated, would look like the **treated before the treatment** plus a growth factor that is the same as the growth of the control. # # It is important to notice that this assumes that the trends in the treatment and control are the same: # # $ # E[Y_0(1) − Y_0(0)|D=1] = E[Y_0(1) − Y_0(0)|D=0] # $ # # where the left hand side is the counterfactual trend. Now, we can replace the estimated counterfactual in the treatment effect definition \\(E[Y_1(1)|D=1] - E[Y_0(1)|D=1]\\) # # $ # \hat{ATET} = E[Y(1)|D=1] - (E[Y(0)|D=1] + (E[Y(1)|D=0] - E[Y(0)|D=0]) # $ # # If we rearrange the terms, we get the classical Diff-in-Diff estimator. # # $ # \hat{ATET} = (E[Y(1)|D=1] - E[Y(1)|D=0]) - (E[Y(0)|D=1] - E[Y(0)|D=0]) # $ # # It gets that name because it gets the difference between the difference between treatment and control after and before the treatment. # # Here is what that looks in code. # + fl_before = data.query("poa==0 & jul==0")["deposits"].mean() diff_in_diff = (poa_after-poa_before)-(fl_after-fl_before) diff_in_diff # - # Diff-in-Diff is telling us that we should expect deposits to increase by R$ 6.52 per customer. Notice that the assumption that diff-in-diff makes is much more plausible than the other 2 estimators. It just assumes that the growth pattern between the 2 cities are the same. But it doesn't require them to have the same base level nor does it require the trend to be zero. # # To visualize what diff-in-diff is doing, we can project the growth trend from the untreated into the treated to see the counterfactual, that is, the number of deposits we should expect if there were no intervention. # + plt.figure(figsize=(10,5)) plt.plot(["May", "Jul"], [fl_before, fl_after], label="FL", lw=2) plt.plot(["May", "Jul"], [poa_before, poa_after], label="POA", lw=2) plt.plot(["May", "Jul"], [poa_before, poa_before+(fl_after-fl_before)], label="Counterfactual", lw=2, color="C2", ls="-.") plt.legend(); # - # See that small difference between the red and the yellow dashed lines? If you really focus you can see the small treatment effect on Porto Alegre. # # ![img](./data/img/diff-in-diff/cant-read.png) # # # Now, what you might be asking yourself is "how much can I trust this estimator? It is my right to have standard errors reported to me!". Which makes sense, since estimators without them look silly. To do so, we will use a neat trick that uses regression. Specifically, we will estimate the following linear model # # $ # Y_i = \beta_0 + \beta_1 POA_i + \beta_2 Jul_i + \beta_3 POA_i*Jul_i + e_i # $ # # Notice that \\(\beta_0\\) is the baseline of the control. In our case, is the level of deposits in Florianopolis in the month of May. If we turn on the treated city dummy, we get \\(\beta_1\\). So \\(\beta_0 + \beta_1\\) is the baseline of Porto Alegre in May, before the intervention, and \\(\beta_1\\) is the increase of Porto Alegre baseline on top of Florianopolis. If we turn the POA dummy off and turn the July Dummy on, we get \\(\beta_0 + \beta_2\\), which is the level of Florianópolis in July, after the intervention period. \\(\beta_2\\) is then the trend of the control, since we add it on top of the baseline to get the level of the control at the period post intervention. As a recap, \\(\beta_1\\) is the increment we get by going from the treated to the control, \\(\beta_2\\) is the increment we get by going from the period before to the period after the intervention. Finally, if we turn both dummies on, we get \\(\beta_3\\). \\(\beta_0 + \beta_1 + \beta_2 + \beta_3\\) is the level in Porto Alegre after the intervention. So \\(\beta_3\\) is the incremental impact when you go from May to July and from Florianopolis to POA. In other words, it is the Difference in Difference estimator. # # If you don't believe me, check for yourself. You should get the exact same number we got above. And also notice how we get our much wanted standard errors. smf.ols('deposits ~ poa*jul', data=data).fit().summary().tables[1] # ## Non Parallel Trends # # One obvious problem with Diff-in-Diff is failure to satisfy the parallel trend assumption. If the growth trend from the treated is different from the trend of the control, diff-in-diff will be biased. This is a common problem with non-random data, where the decision to treat a region is based on its potential to respond well to the treatment, or when the treatment is targeted at regions that are not performing very well. Take our marketing example. We decided to test billboards in Porto Alegre not in order to check the effect of billboards in general. The reason is simply because sales perform poorly there. Perhaps online marketing is not working there. In this case, it could be that the growth we would see in Porto Alegre without a billboard would be lower than the growth we observe in other cities. This would cause us to underestimate the effect of the billboard there. # # One way to check if this is happening is to plot the trend using past periods. For example, let's suppose POA had a small decreasing trend but Florianopolis was on a steep ascent. In this case, showing periods from before would reveal those trends and we would know Diff-in-Diff is not a reliable estimator. # + plt.figure(figsize=(10,5)) x = ["Jan", "Mar", "May", "Jul"] plt.plot(x, [120, 150, fl_before, fl_after], label="FL", lw=2) plt.plot(x, [60, 50, poa_before, poa_after], label="POA", lw=2) plt.plot(["May", "Jul"], [poa_before, poa_before+(fl_after-fl_before)], label="Counterfactual", lw=2, color="C2", ls="-.") plt.legend(); # - # We will see how to solve this problem with synthetic control. It will use multiple cities to create a synthetic city that closely follows the trend of the city of interest. But for now, remember that you always need to check if you have parallel trends when applying diff-in-diff. # # ![img](./data/img/diff-in-diff/non-parallel.png) # # One final issue that it's worth mentioning is that you won't be able to place confidence intervals around your Diff-in-Diff estimator if you only have aggregated thata. Say for instance you don't have data on what each of our customers from Florianópolis or Porto Alegre did. Instead, you only have the average deposits before and after the intervention for both cities. In this case, you will still be able to estimate the causal effect by Diff-in-Diff, but you won't know the variance of it. That's because all the variability in your data got squashed out in aggregation. # # ## Key Ideas # # We've explored a technique widely applied when we are estimating causal effects at more macro entities (schools, cities, states, countries...). Difference in Difference takes a treated unit before and after the treatment and compares the trend in the outcome to that of a control unit. Here, we've seen how this could be applied at estimating the effect of a city specific marketing campaign. # # Finally, we looked at how Diff-in-Diff fails if the trend between the treated and control unit is not the same. We also saw how diff-in-diff will be problematic if we only have aggregated data. # # ## References # # I like to think of this entire book as a tribute to <NAME>, <NAME> and <NAME> for their amazing Econometrics class. Most of the ideas here are taken from their classes at the American Economic Association. Watching them is what is keeping me sane during this tough year of 2020. # * [Cross-Section Econometrics](https://www.aeaweb.org/conference/cont-ed/2017-webcasts) # * [Mastering Mostly Harmless Econometrics](https://www.aeaweb.org/conference/cont-ed/2020-webcasts) # # I'll also like to reference the amazing books from Angrist. They have shown me that Econometrics, or 'Metrics as they call it, is not only extremely useful but also profoundly fun. # # * [Mostly Harmless Econometrics](https://www.mostlyharmlesseconometrics.com/) # * [Mastering 'Metrics](https://www.masteringmetrics.com/) # # Other important reference is <NAME> and <NAME>' book. It has been my trustworthy companion in the most thorny causal questions I had to answer. # # * [Causal Inference Book](https://www.hsph.harvard.edu/miguel-hernan/causal-inference-book/) # # Finally, I'd also like to compliment <NAME> and his brilliant work mingling Causal Inference and Rap quotes: # # * [Causal Inference: The Mixtape](https://www.scunning.com/mixtape.html) # # ![img](./data/img/poetry.png) # # ## Contribute # # Causal Inference for the Brave and True is an open-source material on causal inference, the statistics of science. It uses only free software, based in Python. Its goal is to be accessible monetarily and intellectually. # If you found this book valuable and you want to support it, please go to [Patreon](https://www.patreon.com/causal_inference_for_the_brave_and_true). If you are not ready to contribute financially, you can also help by fixing typos, suggesting edits or giving feedback on passages you didn't understand. Just go to the book's repository and [open an issue](https://github.com/matheusfacure/python-causality-handbook/issues). Finally, if you liked this content, please share it with others who might find it useful and give it a [star on GitHub](https://github.com/matheusfacure/python-causality-handbook/stargazers).
causal-inference-for-the-brave-and-true/14-Difference-in-Difference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Implementation of Quick Sort # # A quick sort first selects a value, which is called the pivot value. Although there are many different ways to choose the pivot value, we will simply use the first item in the list. The role of the pivot value is to assist with splitting the list. The actual position where the pivot value belongs in the final sorted list, commonly called the split point, will be used to divide the list for subsequent calls to the quick sort. # # Resources for Review # # Check out the resources below for a review of Insertion sort! # # * [Wikipedia](https://en.wikipedia.org/wiki/Quicksort) # * [Visual Algo](http://visualgo.net/sorting.html) # * [Sorting Algorithms Animcation with Pseudocode](http://www.sorting-algorithms.com/quick-sort) # + def quick_sort(arr): quick_sort_help(arr,0,len(arr)-1) def quick_sort_help(arr,first,last): if first<last: splitpoint = partition(arr,first,last) quick_sort_help(arr,first,splitpoint-1) quick_sort_help(arr,splitpoint+1,last) def partition(arr,first,last): pivotvalue = arr[first] leftmark = first+1 rightmark = last done = False while not done: while leftmark <= rightmark and arr[leftmark] <= pivotvalue: leftmark = leftmark + 1 while arr[rightmark] >= pivotvalue and rightmark >= leftmark: rightmark = rightmark -1 if rightmark < leftmark: done = True else: temp = arr[leftmark] arr[leftmark] = arr[rightmark] arr[rightmark] = temp temp = arr[first] arr[first] = arr[rightmark] arr[rightmark] = temp return rightmark # - arr = [2,5,4,6,7,3,1,4,12,11] quick_sort(arr) arr # # Good Job!
code/algorithms/course_udemy_1/Sorting and Searching/Implementation of Quick Sort.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Shrek the Third DataFrame # Recall from the [Analyzing White Space code](https://github.com/Data-Science-for-Linguists-2019/Animated-Movie-Gendered-Dialogue/blob/master/code/DreamWorks_code/Analyzing_White_Space.ipynb) that this movie only has three types of white space: 10, 11, or 26 spaces. This means it doesn't fit well into out streamline. Let's look at this case by itself. shrek3 = open(r'C:\Users\cassi\Desktop\Data_Science\Animated-Movie-Gendered-Dialogue\private\imsdb_raw_nov_2015\Animation\shrekthethird.txt') shrek3_script = shrek3.read() shrek3.close() import re import pandas as pd shrek3_script[:500] shrek3_script[246:300] shrek3_script = shrek3_script[246:] def white_space_count(script_name): white_space = re.findall(" {3,}", script_name) len_w_s = [len(x) for x in white_space] print(len_w_s[:100]) #print(len_w_s.index(25)) print(set(len_w_s)) for num in set(len_w_s): print(num, "white spaces appear", len_w_s.count(num), "times") white_space_count(shrek3_script) # Hmmm, one random grouping of 25 white spaces.... shrek3_script[:2000] shrek3_script[4000:6000] # + #10 after scene header #10 between scene header descriptions #26 before prince charming #11 between all his lines #10 after his last line and new scene description begins #11 before those pesky final screening script lines # + #removing those final screening script lines # - titles = re.findall(r"\n\n {1,}Shrek the Third - Final Screening Script [0-9]+\.", shrek3_script) len(titles) shrek3_script = re.sub(r"\n\n {1,}Shrek the Third - Final Screening Script [0-9]+\.", '', shrek3_script) white_space_count(shrek3_script) 1668+1+1552+1028 # Removing parentheticals off the bat def no_parentheses(script): new_script = re.sub(r" *\([^\)]*\)", '', script) return new_script par = re.findall(r" *\([^\)]*\)", shrek3_script) len(par) shrek3_script_2 = no_parentheses(shrek3_script) len(shrek3_script) len(shrek3_script_2) white_space_count(shrek3_script_2) 1602+1+1541+927 4249 - 4071 # I feel like this doesn't add up, but okay #since some items in par actually have multiple lone white spaces, but the new white space count isn't as low as that number # + #par # - # ### Attempting to Find actual lines id_lines = re.findall(r"\n\n {25,}(\b[A-Z]['A-Z ]{1,})+\n\n", shrek3_script_2) len(id_lines) sorted(set(id_lines)) # %pprint # Wow! This captured almost everything on the first try! The scene headers in this script aren't capitalized except for INT. or EXT., which contain punctuation (not included in my regular expression!). There is just one problem -- when Artie has an all capitalized line that extends over line breaks (which means the middle of it has no punctuation and is caught by my regular expression. This can be fixed by lowering that line (which will be done eventually anyway) new_scene = re.findall(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+\n\n", shrek3_script_2) len(new_scene) sorted(set(new_scene)) new_scene_2 = re.findall(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+\n\n", shrek3_script_2) len(new_scene_2) sorted(set(new_scene_2)) shrek3_script_3 = re.sub('BY A MONSTER TRYING TO RELATE TO', 'by a monster trying to relate to', shrek3_script_2) new_line = re.findall(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+\n\n", shrek3_script_3) len(sorted(set(new_line))) shrek3_script_marked = re.sub(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+\n\n", r"_NEWLINE_\1_", shrek3_script_3) shrek3_script_marked[:1000] cuts = re.findall(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+:", shrek3_script_marked) len(cuts) cuts shrek_3_script_marked = re.sub(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+:", '', shrek3_script_marked) # + #Now let's split it! # - script_lines = shrek_3_script_marked.split("_NEWLINE_") len(script_lines) script_lines = script_lines[1:] script_lines[:10] testing = script_lines[:50] testing_list = [] line_test = [] for line in testing: testing_list.extend(re.findall(r"\n\n {10}\w", line)) marker = re.sub(r"\n\n {10}\w", '_ENDLINE_', line) line_test.append(marker) keep_lines = [] for line in line_test: real_line = line.split('_ENDLINE_') keep_lines.append(real_line[0]) testing_list for line in line_test: print(line) keep_lines testing # + ## Seems to have worked! Let's generalize it to whole script! # - line_id = [] for line in script_lines: marker = re.sub(r"\n\n {10}\w", '_ENDLINE_', line) line_id.append(marker) real_script_lines = [] for line in line_id: real_line = line.split('_ENDLINE_') real_script_lines.append(real_line[0]) len(real_script_lines) #should be 871! real_script_lines[:10] real_script_lines[-10:] len(" ") real_script_lines ##removing white space ## Remember, all the white space here is 11 spaces long! white_space = [] for line in real_script_lines: white_space.extend(re.findall(r"\n\n {11}", line)) len(white_space) # ### Splitting Speaker/Text and creating a dataframe speaker_text = [] for line in real_script_lines: line_no_space = re.sub(r"\n\n {11}", ' ', line) line_tup = line_no_space.split('_') line_tup[0] = line_tup[0].lower().strip() line_tup[1] = line_tup[1].lower().strip() speaker_text.append(tuple(line_tup)) len(speaker_text) speaker_text[:10] speaker_text[-10:] #We don't need "The End" speaker_text = speaker_text[:-1] speaker_text[-10:] # ### Data Frame Time! shrek_the_third = pd.DataFrame(speaker_text, columns=["Speaker", "Text"]) shrek_the_third.head() shrek_the_third.to_pickle(r'..\..\..\Animated-Movie-Gendered-Dialogue\private\shrek3_lines.pkl')
code/DreamWorks_code/Shrek3_DataFrame.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.0 64-bit (''pytorch'': conda)' # name: python3 # --- # # Chapter 3 All about tensors # > A summary of chap 3 of Deep learning with PyTorch. # # - toc: true # - badges: true # - comments: true # - categories: [jupyter] # - image: images/chart-preview.png # ![](my_icons/ch31.JPG) a = [1.0, 2.0, 1.0] a[0] a[2] = 3.0 a import torch # <1> a = torch.ones(3) # <2> a a[1] float(a[1]) a[2] = 2.0 a points = torch.zeros(6) # <1> points[0] = 4.0 # <2> points[1] = 1.0 points[2] = 5.0 points[3] = 3.0 points[4] = 2.0 points[5] = 1.0 points = torch.tensor([4.0, 1.0, 5.0, 3.0, 2.0, 1.0]) points float(points[0]), float(points[1]) points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]]) points points.shape points = torch.zeros(3, 2) points points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]]) points points[0, 1] points[0] points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]]) points.storage() points_storage = points.storage() points_storage[0] points.storage()[1] points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]]) points_storage = points.storage() points_storage[0] = 2.0 points points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]]) second_point = points[1] second_point.storage_offset() second_point.size() second_point.shape points.stride() second_point = points[1] second_point.size() second_point.storage_offset() second_point.stride() points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]]) second_point = points[1] second_point[0] = 10.0 points points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]]) second_point = points[1].clone() second_point[0] = 10.0 points points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]]) points points_t = points.t() points_t id(points.storage()) == id(points_t.storage()) points.stride() points_t.stride() some_t = torch.ones(3, 4, 5) transpose_t = some_t.transpose(0, 2) some_t.shape transpose_t.shape some_t.stride() transpose_t.stride() points.is_contiguous() points_t.is_contiguous() points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]]) points_t = points.t() points_t points_t.storage() points_t.stride() points_t_cont = points_t.contiguous() points_t_cont points_t_cont.stride() points_t_cont.storage() double_points = torch.ones(10, 2, dtype=torch.double) short_points = torch.tensor([[1, 2], [3, 4]], dtype=torch.short) short_points.dtype double_points = torch.zeros(10, 2).double() short_points = torch.ones(10, 2).short() double_points = torch.zeros(10, 2).to(torch.double) short_points = torch.ones(10, 2).to(dtype=torch.short) points_64 = torch.rand(5, dtype=torch.double) # <1> points_short = points_64.to(torch.short) points_64 * points_short # works from PyTorch 1.3 onwards # reset points back to original value points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]]) some_list = list(range(6)) some_list[:] # <1> some_list[1:4] # <2> some_list[1:] # <3> some_list[:4] # <4> some_list[:-1] # <5> some_list[1:4:2] # <6> points[1:] # <1> points[1:, :] # <2> points[1:, 0] # <3> points[None] # <4> points = torch.ones(3, 4) points_np = points.numpy() points_np points = torch.from_numpy(points_np) torch.save(points, '../data/p1ch3/ourpoints.t') with open('../data/p1ch3/ourpoints.t','wb') as f: torch.save(points, f) points = torch.load('../data/p1ch3/ourpoints.t') with open('../data/p1ch3/ourpoints.t','rb') as f: points = torch.load(f) # + import h5py f = h5py.File('../data/p1ch3/ourpoints.hdf5', 'w') dset = f.create_dataset('coords', data=points.numpy()) f.close() # - f = h5py.File('../data/p1ch3/ourpoints.hdf5', 'r') dset = f['coords'] last_points = dset[-2:] last_points = torch.from_numpy(dset[-2:]) f.close() points_gpu = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]], device='cuda') points_gpu = points.to(device='cuda') points_gpu = points.to(device='cuda:0') points = 2 * points # <1> points_gpu = 2 * points.to(device='cuda') # <2> points_gpu = points_gpu + 4 points_cpu = points_gpu.to(device='cpu') points_gpu = points.cuda() # <1> points_gpu = points.cuda(0) points_cpu = points_gpu.cpu() # + a = torch.ones(3, 2) a_t = torch.transpose(a, 0, 1) a.shape, a_t.shape # + a = torch.ones(3, 2) a_t = a.transpose(0, 1) a.shape, a_t.shape # - a = torch.ones(3, 2) a.zero_() a _ = torch.tensor([0.2126, 0.7152, 0.0722], names=['c']) img_t = torch.randn(3, 5, 5) # shape [channels, rows, columns] weights = torch.tensor([0.2126, 0.7152, 0.0722]) batch_t = torch.randn(2, 3, 5, 5) # shape [batch, channels, rows, columns] img_gray_naive = img_t.mean(-3) batch_gray_naive = batch_t.mean(-3) img_gray_naive.shape, batch_gray_naive.shape unsqueezed_weights = weights.unsqueeze(-1).unsqueeze_(-1) img_weights = (img_t * unsqueezed_weights) batch_weights = (batch_t * unsqueezed_weights) img_gray_weighted = img_weights.sum(-3) batch_gray_weighted = batch_weights.sum(-3) batch_weights.shape, batch_t.shape, unsqueezed_weights.shape img_gray_weighted_fancy = torch.einsum('...chw,c->...hw', img_t, weights) batch_gray_weighted_fancy = torch.einsum('...chw,c->...hw', batch_t, weights) batch_gray_weighted_fancy.shape weights_named = torch.tensor([0.2126, 0.7152, 0.0722], names=['channels']) weights_named img_named = img_t.refine_names(..., 'channels', 'rows', 'columns') batch_named = batch_t.refine_names(..., 'channels', 'rows', 'columns') print("img named:", img_named.shape, img_named.names) print("batch named:", batch_named.shape, batch_named.names) weights_aligned = weights_named.align_as(img_named) weights_aligned.shape, weights_aligned.names gray_named = (img_named * weights_aligned).sum('channels') gray_named.shape, gray_named.names try: gray_named = (img_named[..., :3] * weights_named).sum('channels') except Exception as e: print(e) gray_plain = gray_named.rename(None) gray_plain.shape, gray_plain.names
_notebooks/2021-09-07-chapter3-tensors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] section="__no_section__" # # Import section # + section="__no_section__" import re import sys import os import shutil # + section="__no_section__" from os import path from concurrent import futures # + [markdown] section="__no_section__" # # Section 1 # # ## Iterator exhaustion # + section="__no_section__" # iterator exhaustion example x = (a for a in range(15)) if 3 in x: print(list(x)) # expected answer: [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] # + [markdown] section="__no_section__" # ## Late binding example # + section="__no_section__" # late binding on lambdas list_of_functions = [lambda x: x + n for n in range(10)] [f(5) for f in list_of_functions] # [19, 19, 19, 19, 19, 19, 19, 19, 19, 19]
resources/example2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Estimating the biomass of marine protists # Our estimate of the total biomass of marine protists relies on estimates of global biomass for many plankton groups. We included estimates of all plankton groups that are dominated by protists. The main groups with a significant biomass contribution were picoeukaryotes, microzooplankton (defined not to include copepod biomass), diatoms, *Phaeocystis* and Rhizaria. The estimates for all plankton groups except Rhizaria are based on [Buitenhuis et al.](http://search.proquest.com/openview/0e8e5672fa28111df473268e13f2f757/1?pq-origsite=gscholar&cbl=105729), which used data from the MAREDAT database. The protist group Rhizaria is under represented in the MAREDAT database, and thus our estimate for the total biomass of Rhizaria is based on *in situ* imaging work by [Biard et al.](http://dx.doi.org/10.1038/nature17652). # # For the etimates based on MAREDAT data, Buitenhuis et al. estimates the total biomass of a specific plankton group by using a characteristic biomass concentration for each depth (either a median or average of the values in the database) and applying across the entire volume of ocean at that depth. Buitenhuis et al. generates two types of estimates are supplied for the global biomass of each plankton group: a “minimum” estimate which uses the median concentration of biomass from the database, and a “maximum” estimate which uses the average biomass concentration. Because the distributions of values in the database are usually highly skewed by asymmetrically high values the median and mean are loosely associated by the authors of the MAREDAT study with a minimum and maximum estimate. The estimate based on the average value is more susceptible to biases in oversampling singular locations such as blooms of plankton species, or of coastal areas in which biomass concentrations are especially high, which might lead to an overestimate. On the other hand, the estimate based on the median biomass concentration might underestimate global biomass as it will reduce the effect of biologically relevant high biomass concentrations. Therefore, here and in all estimates based on MAREDAT data, we take the geometric mean of the “minimum” and “maximum” estimates (actually median and mean values of the distribution) as our best estimate, which will increase our robustness to the effects discussed above. # # We now discuss the estimates for each of the groups of protists. # # ## Picoeukaryotes # We estimate the total biomass of picoeukaryotes by first estimating the total biomass of picophytoplankton, and then calculating the fraction of eukaryotes out of the total biomass of picophytoplankton. Buitenhuis et al. reports a "minimum" estimate of 0.28 Gt C and a "maximum" estimate of 0.64 Gt C for the biomass of picophytoplankton. We calculate the geometric mean of those estimates: import pandas as pd from scipy.stats import gmean # Calculate the geometric mean of the "minimum" and "maximum" estimates from Buitenhuis et al. # for picophytoplankton picophyto_biomsss = gmean([0.28e15,0.64e15]) # To estimate the fraction of eukaryotes out of the total biomass of picophytoplankton, we rely on [Buitenhuis et al.](https://ueaeprints.uea.ac.uk/40778/) which estimates that they represent 49-69% of the global biomass of picophytoplankton. We use the geometric mean of this range as our best estimate of the fraction eukaryotes out of the total biomass of picophytoplankton. euk_frac = gmean([0.49,0.69]) auto_picoeuk_biomass = picophyto_biomsss*euk_frac auto_picoeuk_biomass/2e15 # Picoeukaryotes contain both protists and plant species (like chlorophytes). It seems that, from the available literature, the biomass distribution between them is not strongly favored towards one class ([Li et al.](http://dx.doi.org/10.1016/0198-0149(92)90085-8)). We thus estimate the protist fraction at about 50% of the biomass of picoeukaryotes: auto_pico_protists_fraction = 0.5 auto_pico_protists_biomass = auto_picoeuk_biomass*auto_pico_protists_fraction # Protists in the picoplankton to nanoplankton size range (0.8-5 µm in diameter) include not only autotrophic, but also heterotrophic organisms. As we could not find a reliable resource for estimating the biomass of heterotrophic pico-nanoplankton we use a recent global 18S ribosomal DNA sequencing effort that was part of the Tara Oceans campaign ([de Vargas et al.](http://dx.doi.org/10.1126/science.1261605)). # # We extracted data from Fig. 5A in de Vargas et al., which quantifies the ratio between autotropic and heterotrophic picoplankton and nanoplankton: pd.options.display.float_format = '{:,.1f}'.format # Load data from de Vargas on the ratio between autotrophic and heterotrophic protists pico_nano_data = pd.read_excel('marine_protists_data.xlsx',skiprows=1) pico_nano_data.head() # We calculate the geometric mean of the fraction of phototrophic and heterotrophic protists out of the total amount of 18S rDNA sequences. We use the ratio between these geometric means as our best estimate for the ratio between photosynthetic and heterotrophic protists. hetero_photo_ratio = gmean(pico_nano_data['Heterotrophic protist'])/gmean(pico_nano_data['Phototrophic protists']) print('Our best estimate of the ratio between heterotrophic and phototrophic protists in pico-nanoplankton is ≈%.f-fold' %hetero_photo_ratio) # We add the contribution of heterotrophic pico-nanoprotists to our estimate: pico_protists_biomass = (1+hetero_photo_ratio)*auto_pico_protists_biomass # Relying on 18S sequence abundance as a proxy for biomass is not a well established practice, and has various biases, but for lack of any other alternative we could find to perform the estimate, we chose to use it. Yet, we note that this plays a minor role in our analysis that in any case will not affect any of the major conclusions of our study. # # ## Microzooplankton # The estimate of microzooplankton in Buitenhuis et al. does not include copepod biomass by definition, and thus is suitable in order to estimate the total biomass of microzooplankton protists. Buitenhuis et al. reports a "minimum" estimate of 0.48 Gt C and a "maximum" estimate of 0.73 Gt C for the biomass of picophytoplankton. We calculate the geometric mean of those estimates: # Calculate the geometric mean of the "minimum" and "maximum" estimates from Buitenhuis et al. # for microzooplankton microzoo_biomsss = gmean([0.48e15,0.73e15]) # ## Diatoms # For diatoms, Buitenhuis et al. reports a "minimum" estimate of 0.1 Gt C and a "maximum" estimate of 0.94 Gt C for the biomass of picophytoplankton. We calculate the geometric mean of those estimates: # Calculate the geometric mean of the "minimum" and "maximum" estimates from Buitenhuis et al. # for diatoms diatom_biomsss = gmean([0.1e15,0.94e15]) # ## Phaeocystis # For Phaeocystis, reports a "minimum" estimate of 0.11 Gt C and a "maximum" estimate of 0.71 Gt C for the biomass of picophytoplankton. We calculate the geometric mean of those estimates: # Calculate the geometric mean of the "minimum" and "maximum" estimates from Buitenhuis et al. # for Phaeocystis phaeocystis_biomsss = gmean([0.11e15,0.71e15]) # As stated in Buitenhuis et al., the data from the MAREDAT initiative doesn’t contain the biomass of nanophytoplankton (phytoplankton between 2 and 20 µm) and autotrophic dinoflagellates. Nevertheless, this omission might be compensated by overestimation of Phaeocystis biomass because of sampling bias, so overall the sum of all the different phytoplankton fits well with total chlorophyll measurements from the WOA 2005. # # ## Rhizaria # For rhizaria, our estimate relies on data from Biard et al. Biard et al. divided the data into three depth layers (0-100 m, 100-200 m, and 200-500 m), and multiplied median biomass concentrations at each depth layer across the entire volume of water at that layer to generate global estimate. The biomass of Rhizaria in the top 500 meters of the ocean is estimated at ≈0.2 Gt C. rhizaria_biomass = 0.2e15 # To estimate the total biomass of marine protists, we sum up all of our estimates of the biomass of the different groups of protists: # + best_estimate = rhizaria_biomass + phaeocystis_biomsss + diatom_biomsss + microzoo_biomsss + pico_protists_biomass print('Our best estimate for the total biomass of marine protists is ≈%.1f Gt C' %(best_estimate/1e15)) # - # The estimates based on the MAREDAT database include measurements only for the top 200 meters of the water column. For rhizaria, our estimate includes the top 500 meters of the water column. For more details on possible contributions from deeper ocean laters, see the marine protists section in the Supplementary information. # # # Uncertanity analysis # We discuss the uncertainty of estimates based on the MAREDAT database in a dedicated section in the Supplementary Information. We crudly project an uncertainty of about an order of magnitude.
protists/marine_protists/marine_protists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dantecomedia/Video-Classifcation-using-Deep-Learning/blob/master/Burglary.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="QD0LCB7_a_WE" colab_type="code" colab={} # + id="z8z3mqiUaJP5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 110} outputId="12755eca-cbdc-47a8-b272-e50d6dffd846" import pandas as pd import os from sklearn.model_selection import train_test_split from sklearn.datasets import load_files import random import shutil # !pip install scikit-video import numpy as np from skvideo.io import FFmpegReader, ffprobe from skvideo.utils import rgb2gray from PIL import Image from keras.preprocessing import image from tqdm import tqdm import matplotlib.pyplot as plt from keras.utils import to_categorical import matplotlib.pyplot as plt from __future__ import absolute_import from keras import backend as K from keras import initializers from keras import regularizers from keras import constraints from keras import layers from keras.engine import InputSpec from keras.utils import conv_utils from keras.legacy.interfaces import conv3d_args_preprocessor, generate_legacy_interface from keras.layers import Conv3D from keras.backend.tensorflow_backend import _preprocess_padding, _preprocess_conv3d_input import tensorflow as tf from keras.models import Sequential from keras.layers import Conv3D, MaxPooling3D, GlobalAveragePooling3D from keras.layers.core import Dense import tensorflow.keras.backend as K from keras.callbacks import ModelCheckpoint from keras.layers.core import Dropout from sklearn.metrics import classification_report # + id="lmiHlGUEaUMY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="56f96682-b209-4606-fd4f-b5dcf22736a1" # !cp "/content/gdrive/My Drive/Security Dataset/Trimmed Dataset-20200612T201236Z-001.zip" "/content/" # !unzip "/content/Trimmed Dataset-20200612T201236Z-001.zip" # + id="_cwSemwCuDus" colab_type="code" colab={} shutil.rmtree("/content/curated_dataset") # + id="MgA-bCKOaazW" colab_type="code" colab={} # !mkdir curated_dataset # !cp -r "/content/Trimmed Dataset/Burglary" "/content/curated_dataset" # !cp -r "/content/Trimmed Dataset/Normal" "/content/curated_dataset" # + id="AS-ASXfTjbbE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="5493494e-d8a9-4e34-f53b-e1bcced0c930" import os path="/content/curated_dataset/" count=0 for i in os.listdir(path): count=0 for j in os.listdir(path+str(i)): count=count+1 print(i+":",count) # + id="ntnhsB4OkXfJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="136f7388-c5f3-4ddd-e834-defe0d63f25d" 1198-1009 # + id="L6jl0MdykT1x" colab_type="code" colab={} count=0 for i in os.listdir("/content/curated_dataset/Normal"): a=random.choice(os.listdir("/content/curated_dataset/Normal")) os.remove("/content/curated_dataset/Normal/"+str(a)) count=count+1 if count==189: break # + id="lrVgqCRDkbtd" colab_type="code" colab={} raw_data=load_files(os.getcwd()+r'/curated_dataset/', shuffle=True) files=raw_data['filenames'] targets=raw_data['target'] train_files, test_files, train_targets, test_targets=train_test_split(files, targets, test_size=1/3, random_state=191) # + id="llO7YgSakfSi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="600b9f38-ce16-4e48-96c7-4fa08b2d2f60" valid_files = train_files[930:] valid_targets = train_targets[930:] # Remaining data will be used for training the model train_files = train_files[:930] train_targets = train_targets[:930] # Generic details about the data print('Total number of videos:', len(files)) print('\nNumber of videos in training data:', train_files.shape[0]) print('Number of videos in validation data:', valid_files.shape[0]) print('Number of videos in test data:', test_files.shape[0]) # + id="zYq38QocuWvM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="7af04d1d-951d-4e4c-bf49-600ae0379eec" 210-80 # + id="tp-R-xnvkize" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="e01d1060-eb88-4e6b-9e7b-7a8b3928246d" print('The categorical labels are converted into integers.\nFollowing is the mapping - \n') for label in zip(range(14), raw_data['target_names']): print(label) # + id="brM3zgT-knNB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="e58306f9-77a9-4696-928a-3878db18478c" for pair in zip(train_files[:5], train_targets[:5]): print(pair) # + id="lX-jMpNekqWW" colab_type="code" colab={} class Videos(object): def __init__(self, target_size=None, to_gray=True, max_frames=None, extract_frames='middle', required_fps=None, normalize_pixels=None): """ Initializing the config variables Parameters: target_size (tuple): (New_Width, New_Height), Default 'None' A tuple denoting the target width and height of each frame in each of the video to_gray (boolean): Default 'True' If True, then each frame will be converted to gray scale. Otherwise, not. max_frames (int): Default 'None' The maximum number of frames to return for each video. Extra frames are removed based on the value of 'extract_frames'. extract_frames (str): {'first', 'middle', 'last'}, Default 'middle' 'first': Extract the first 'N' frames 'last': Extract the last 'N' frames 'middle': Extract 'N' frames from the middle Remove ((total_frames - max_frames) // 2) frames from the beginning as well as the end required_fps (int): Default 'None' Capture 'N' frame(s) per second from the video. Only the first 'N' frame(s) for each second in the video are captured. normalize_pixels (tuple/str): Default 'None' If 'None', the pixels will not be normalized. If a tuple - (New_min, New_max) is passed, Min-max Normalization will be used. If the value is 'z-score', then Z-score Normalization will be used. For each pixel p, z_score = (p - mean) / std """ self.target_size = target_size self.to_gray = to_gray self.max_frames = max_frames self.extract_frames = extract_frames self.required_fps = required_fps self.normalize_pixels = normalize_pixels self.fps = None def read_videos(self, paths): """ Parameters: paths (list): Required A list of paths of the videos to be read Returns: Numpy.ndarray A 5-d tensor with shape (<No. of Videos>, <No. of frames>, <height>, <width>, <channels>) """ list_of_videos = [ self._read_video(path) for path in tqdm(paths) ] tensor = np.vstack(list_of_videos) if self.normalize_pixels != None: # Pixels are normalized for each video individually if (type(self.normalize_pixels) == tuple) and (len(self.normalize_pixels) == 2): base = self.normalize_pixels[0] r = self.normalize_pixels[1] - base min_ = np.min(tensor, axis=(1, 2, 3), keepdims=True) max_ = np.max(tensor, axis=(1, 2, 3), keepdims=True) return ((tensor.astype('float32') - min_) / (max_ - min_)) * r + base elif self.normalize_pixels == 'z-score': mean = np.mean(tensor, axis=(1, 2, 3), keepdims=True) std = np.std(tensor, axis=(1, 2, 3), keepdims=True) return (tensor.astype('float32') - mean) / std else: raise ValueError('Invalid value of \'normalize_pixels\'') return tensor def get_frame_count(self, paths): """ Can be used to determine the value of `max_frames` Parameters: paths (list): Required A list of paths of the videos to be read Returns: dict (python dictionary) For each video, the total number of frames in that video is stored in the dictionary. """ frame_count = {} for path in paths: cap = FFmpegReader(filename=path) frame_count[path] = cap.inputframenum cap.close() return frame_count def _read_video(self, path): """ Parameters: path (str): Required Path of the video to be read Returns: Numpy.ndarray A 5-d tensor with shape (1, <No. of frames>, <height>, <width>, <channels>) """ cap = FFmpegReader(filename=path) list_of_frames = [] self.fps = int(cap.inputfps) # Frame Rate for index, frame in enumerate(cap.nextFrame()): capture_frame = True if self.required_fps != None: is_valid = range(self.required_fps) capture_frame = (index % self.fps) in is_valid if capture_frame: if self.target_size is not None: temp_image = image.array_to_img(frame) frame = image.img_to_array( temp_image.resize( self.target_size, Image.ANTIALIAS)).astype('uint8') # Shape of each frame -> (<height>, <width>, 3) list_of_frames.append(frame) temp_video = np.stack(list_of_frames) cap.close() if self.to_gray: temp_video = rgb2gray(temp_video) if self.max_frames is not None: temp_video = self._process_video(video=temp_video) return np.expand_dims(temp_video, axis=0) def _process_video(self, video): """ Parameters: video (Numpy.ndarray): Shape = (<No. of frames>, <height>, <width>, <channels>) Video whose frames are to be extracted Returns: Numpy.ndarray A tensor (processed video) with shape (<`max_frames`>, <height>, <width>, <channels>) """ total_frames = video.shape[0] if self.max_frames <= total_frames: if self.extract_frames == 'first': video = video[:self.max_frames] elif self.extract_frames == 'last': video = video[(total_frames - self.max_frames):] elif self.extract_frames == 'middle': # No. of frames to remove from the front front = ((total_frames - self.max_frames) // 2) + 1 video = video[front:(front + self.max_frames)] else: raise ValueError('Invalid value of \'extract_frames\'') else: raise IndexError( 'Required number of frames is greater than the total number of frames in the video') return video # + id="Yh8H4hSvkucy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 342} outputId="6e6c7b26-5c2c-4837-96bd-d00aa9fc885c" # The path of a sample video in the training data sample_files = train_files[:1] # An object of the class 'Videos' reader = Videos(target_size=None, to_gray=False) # Loading the sample videos, in their original format sample = reader.read_videos(sample_files) print('\nShape of the sample data:', sample.shape) # Displaying a frame from the sample video plt.imshow(sample[0][29]) # + id="YXjnBQERkxNj" colab_type="code" colab={} # An object of the class `Videos` to load the data in the required format reader = Videos(target_size=(128, 128), to_gray=True, max_frames=29, extract_frames='middle', normalize_pixels=(0, 1)) # + id="63_ZXr_bk1UN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="88e68564-6efd-4b0b-971a-8dd1c87b386a" X_train = reader.read_videos(train_files) y_train = to_categorical(train_targets, num_classes=2) print('Shape of training data:', X_train.shape) print('Shape of training labels:', y_train.shape) # + id="T1FUgPNwk3m9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="8da4f384-2378-47dc-fea5-5dbcaf47a094" X_valid = reader.read_videos(valid_files) y_valid = to_categorical(valid_targets, num_classes=2) print('Shape of validation data:', X_valid.shape) print('Shape of validation labels:', y_valid.shape) # + id="Xandxp6Mk6ZE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="a7b2e3d4-66f6-40af-8d7b-06d140174099" X_test = reader.read_videos(test_files) y_test = to_categorical(test_targets, num_classes=2) print('Shape of testing data:', X_test.shape) print('Shape of testing labels:', y_test.shape) # + id="SrcUyhxMk9DN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="a2af109f-f4a9-4593-c068-029c662281b3" # Displaying the first frame of the first processed video from the training data plt.imshow(np.squeeze(X_train[0][0], axis=2), cmap='gray') # + id="Xs3db-MBk_3g" colab_type="code" colab={} def depthwise_conv3d_args_preprocessor(args, kwargs): converted = [] if 'init' in kwargs: init = kwargs.pop('init') kwargs['depthwise_initializer'] = init converted.append(('init', 'depthwise_initializer')) args, kwargs, _converted = conv3d_args_preprocessor(args, kwargs) return args, kwargs, converted + _converted legacy_depthwise_conv3d_support = generate_legacy_interface( allowed_positional_args=['filters', 'kernel_size'], conversions=[('nb_filter', 'filters'), ('subsample', 'strides'), ('border_mode', 'padding'), ('dim_ordering', 'data_format'), ('b_regularizer', 'bias_regularizer'), ('b_constraint', 'bias_constraint'), ('bias', 'use_bias')], value_conversions={'dim_ordering': {'tf': 'channels_last', 'th': 'channels_first', 'default': None}}, preprocessor=depthwise_conv3d_args_preprocessor) class DepthwiseConv3D(Conv3D): """Depthwise 3D convolution. Depth-wise part of separable convolutions consist in performing just the first step/operation (which acts on each input channel separately). It does not perform the pointwise convolution (second step). The `depth_multiplier` argument controls how many output channels are generated per input channel in the depthwise step. # Arguments kernel_size: An integer or tuple/list of 3 integers, specifying the depth, width and height of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, width and height. Can be a single integer to specify the same value for all spatial dimensions. padding: one of `"valid"` or `"same"` (case-insensitive). depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filterss_in * depth_multiplier`. groups: The depth size of the convolution (as a variant of the original Depthwise conv) data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. depthwise_initializer: Initializer for the depthwise kernel matrix (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). depthwise_regularizer: Regularizer function applied to the depthwise kernel matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). dialation_rate: List of ints. Defines the dilation factor for each dimension in the input. Defaults to (1,1,1) activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). (see [regularizer](../regularizers.md)). depthwise_constraint: Constraint function applied to the depthwise kernel matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). # Input shape 5D tensor with shape: `(batch, depth, channels, rows, cols)` if data_format='channels_first' or 5D tensor with shape: `(batch, depth, rows, cols, channels)` if data_format='channels_last'. # Output shape 5D tensor with shape: `(batch, filters * depth, new_depth, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, new_depth, new_rows, new_cols, filters * depth)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. """ #@legacy_depthwise_conv3d_support def __init__(self, kernel_size, strides=(1, 1, 1), padding='valid', depth_multiplier=1, groups=None, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', dilation_rate = (1, 1, 1), depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv3D, self).__init__( filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, dilation_rate=dilation_rate, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.groups = groups self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer) self.dilation_rate = dilation_rate self._padding = _preprocess_padding(self.padding) self._strides = (1,) + self.strides + (1,) self._data_format = "NDHWC" self.input_dim = None def build(self, input_shape): if len(input_shape) < 5: raise ValueError('Inputs to `DepthwiseConv3D` should have rank 5. ' 'Received input shape:', str(input_shape)) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs to ' '`DepthwiseConv3D` ' 'should be defined. Found `None`.') self.input_dim = int(input_shape[channel_axis]) if self.groups is None: self.groups = self.input_dim if self.groups > self.input_dim: raise ValueError('The number of groups cannot exceed the number of channels') if self.input_dim % self.groups != 0: raise ValueError('Warning! The channels dimension is not divisible by the group size chosen') depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1], self.kernel_size[2], self.input_dim, self.depth_multiplier) self.depthwise_kernel = self.add_weight( shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, name='depthwise_kernel', regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint) if self.use_bias: self.bias = self.add_weight(shape=(self.groups * self.depth_multiplier,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=5, axes={channel_axis: self.input_dim}) self.built = True def call(self, inputs, training=None): inputs = _preprocess_conv3d_input(inputs, self.data_format) if self.data_format == 'channels_last': dilation = (1,) + self.dilation_rate + (1,) else: dilation = self.dilation_rate + (1,) + (1,) if self._data_format == 'NCDHW': outputs = tf.concat( [tf.nn.conv3d(inputs[0][:, i:i+self.input_dim//self.groups, :, :, :], self.depthwise_kernel[:, :, :, i:i+self.input_dim//self.groups, :], strides=self._strides, padding=self._padding, dilations=dilation, data_format=self._data_format) for i in range(0, self.input_dim, self.input_dim//self.groups)], axis=1) else: outputs = tf.concat( [tf.nn.conv3d(inputs[0][:, :, :, :, i:i+self.input_dim//self.groups], self.depthwise_kernel[:, :, :, i:i+self.input_dim//self.groups, :], strides=self._strides, padding=self._padding, dilations=dilation, data_format=self._data_format) for i in range(0, self.input_dim, self.input_dim//self.groups)], axis=-1) if self.bias is not None: outputs = K.bias_add( outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': depth = input_shape[2] rows = input_shape[3] cols = input_shape[4] out_filters = self.groups * self.depth_multiplier elif self.data_format == 'channels_last': depth = input_shape[1] rows = input_shape[2] cols = input_shape[3] out_filters = self.groups * self.depth_multiplier depth = conv_utils.conv_output_length(depth, self.kernel_size[0], self.padding, self.strides[0]) rows = conv_utils.conv_output_length(rows, self.kernel_size[1], self.padding, self.strides[1]) cols = conv_utils.conv_output_length(cols, self.kernel_size[2], self.padding, self.strides[2]) if self.data_format == 'channels_first': return (input_shape[0], out_filters, depth, rows, cols) elif self.data_format == 'channels_last': return (input_shape[0], depth, rows, cols, out_filters) def get_config(self): config = super(DepthwiseConv3D, self).get_config() config.pop('filters') config.pop('kernel_initializer') config.pop('kernel_regularizer') config.pop('kernel_constraint') config['depth_multiplier'] = self.depth_multiplier config['depthwise_initializer'] = initializers.serialize(self.depthwise_initializer) config['depthwise_regularizer'] = regularizers.serialize(self.depthwise_regularizer) config['depthwise_constraint'] = constraints.serialize(self.depthwise_constraint) return config DepthwiseConvolution3D = DepthwiseConv3D # + id="CKc2GlHAlIQP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 490} outputId="c8084616-89cd-4a42-c8ef-39077be243d6" model = Sequential() # Adding Alternate convolutional and pooling layers model.add(Conv3D(filters=16, kernel_size=(10, 3, 3), strides=(1, 1, 1), padding='same', activation='relu', input_shape=X_train.shape[1:])) model.add(MaxPooling3D(pool_size=2, strides=(1, 2, 2), padding='same')) model.add(Conv3D(filters=64, kernel_size=(5, 3, 3), strides=(3, 1, 1), padding='valid', activation='relu')) model.add(MaxPooling3D(pool_size=2, strides=(1, 2, 2), padding='same')) model.add(Conv3D(filters=256, kernel_size=(5, 3, 3), strides=(3, 1, 1), padding='valid', activation='relu')) model.add(MaxPooling3D(pool_size=2, strides=(1, 2, 2), padding='same')) # A global average pooling layer to get a 1-d vector # The vector will have a depth (same as number of elements in the vector) of 256 model.add(GlobalAveragePooling3D()) # The Global average pooling layer is followed by a fully-connected neural network, with one hidden and one output layer # Hidden Layer model.add(Dense(32, activation='relu')) # Output layer model.add(Dense(2, activation='softmax')) model.summary() # + id="CmHtvGzOlN-L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="79018fbd-1383-4759-fbf2-7f32d2f6cfbc" from keras.callbacks import ModelCheckpoint # Compiling the model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # Saving the model that performed the best on the validation set checkpoint = ModelCheckpoint(filepath='Model_Burglary_1.weights.best.hdf5', save_best_only=True, verbose=1) # Training the model for 80 epochs history = model.fit(X_train, y_train, batch_size=16, epochs=80, validation_data=(X_valid, y_valid), verbose=2, callbacks=[checkpoint]) # + id="OBWtaEVYlpdg" colab_type="code" colab={} model.load_weights('/content/Model_Burglary_1.weights.best.hdf5') # Testing the model on the Test data (loss, accuracy) = model.evaluate(X_test, y_test, batch_size=16, verbose=0) print('Accuracy on test data: {:.2f}%'.format(accuracy * 100)) # + id="LhFapgyQpBSN" colab_type="code" colab={} # Making the plot larger plt.figure(figsize=(12, 8)) loss = history.history['loss'] # Loss on the training data val_loss = history.history['val_loss'] # Loss on the validation data epochs = range(1, 81) plt.plot(epochs, loss, 'ro-', label='Training Loss') plt.plot(epochs, val_loss, 'go-', label = 'Validation Loss') plt.legend() # + id="JgeTMpRcpE8W" colab_type="code" colab={} # Making the plot larger plt.figure(figsize=(12, 8)) acc = history.history['accuracy'] # Loss on the training data val_acc = history.history['val_accuracy'] # Loss on the validation data epochs = range(1, 81) plt.plot(epochs, acc, 'ro-', label='Training Accuracy') plt.plot(epochs, val_acc, 'go-', label = 'Validation Accuracy') plt.legend() # + id="H3DJdm4NpIMa" colab_type="code" colab={} y_pred=model.predict(X_test) y_pred=y_pred>0.90 target_names = ['class Normal', 'class Burglary'] print(classification_report(y_test, y_pred, target_names=target_names)) # + id="0n99nJ57pM8k" colab_type="code" colab={} model_d = Sequential() # Adding Alternate convolutional and pooling layers model_d.add(DepthwiseConv3D( kernel_size=(10, 3, 3),depth_multiplier=2, strides=(1, 1, 1), padding='same', activation='relu', input_shape=X_train.shape[1:])) model_d.add(MaxPooling3D(pool_size=2, strides=(1, 2, 2), padding='same')) model_d.add(DepthwiseConv3D( depth_multiplier=4,kernel_size=(5, 3, 3), strides=(3, 1, 1), padding='valid', activation='relu')) model_d.add(MaxPooling3D(pool_size=2, strides=(1, 2, 2), padding='same')) model_d.add(DepthwiseConv3D(depth_multiplier=8, kernel_size=(5, 3, 3), strides=(3, 1, 1), padding='valid', activation='relu')) model_d.add(MaxPooling3D(pool_size=2, strides=(1, 2, 2), padding='same')) # A global average pooling layer to get a 1-d vector # The vector will have a depth (same as number of elements in the vector) of 256 model_d.add(GlobalAveragePooling3D()) # The Global average pooling layer is followed by a fully-connected neural network, with one hidden and one output layer # Hidden Layer model_d.add(Dense(32, activation='relu')) # Output layer model_d.add(Dense(2, activation='softmax')) model_d.summary() # + id="u9akiEFzpXLY" colab_type="code" colab={} from keras.callbacks import ModelCheckpoint # Compiling the model model_d.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # Saving the model that performed the best on the validation set checkpoint = ModelCheckpoint(filepath='Model_depthwise_1_Burglary.weights.best.hdf5', save_best_only=True, verbose=1) # Training the model for 40 epochs history = model_d.fit(X_train, y_train, batch_size=16, epochs=200, validation_data=(X_valid, y_valid), verbose=2, callbacks=[checkpoint]) # + id="adau7K4cpcbU" colab_type="code" colab={} model_d.load_weights('/content/Model_depthwise_1_Burglary.weights.best.hdf5') # Testing the model on the Test data (loss, accuracy) = model_d.evaluate(X_test, y_test, batch_size=16, verbose=0) print('Accuracy on test data: {:.2f}%'.format(accuracy * 100)) # Making the plot larger plt.figure(figsize=(12, 8)) loss = history.history['loss'] # Loss on the training data val_loss = history.history['val_loss'] # Loss on the validation data epochs = range(1, 201) plt.plot(epochs, loss, 'ro-', label='Training Loss') plt.plot(epochs, val_loss, 'go-', label = 'Validation Loss') plt.legend() # Making the plot larger plt.figure(figsize=(12, 8)) acc = history.history['accuracy'] # Loss on the training data val_acc = history.history['val_accuracy'] # Loss on the validation data epochs = range(1, 201) plt.plot(epochs, acc, 'ro-', label='Training Accuracy') plt.plot(epochs, val_acc, 'go-', label = 'Validation Accuracy') plt.legend() y_pred=model_d.predict(X_test) y_pred=y_pred>0.80 target_names = ['class Normal', 'class Burglary'] print(classification_report(y_test, y_pred, target_names=target_names)) # + id="qBYe1hDSrdbh" colab_type="code" colab={} # !mkdir "/content/gdrive/My Drive/Security Dataset/Burglary" # !cp -r "/content/curated_dataset" "/content/gdrive/My Drive/Security Dataset/Burglary" # + id="VOOrx1G4sIEo" colab_type="code" colab={} # !mkdir "/content/gdrive/My Drive/Security Dataset/Burglary/weights" # !cp -r "/content/Model_depthwise_1_Burglary.weights.best.hdf5" "/content/gdrive/My Drive/Security Dataset/Burglary/weights" # #!cp -r "/content/Model_depthwise_2_arson.weights.best.hdf5" "/content/gdrive/My Drive/Security Dataset/Assault/weights" # !cp -r "/content/Model_Burglary_1.weights.best.hdf5" "/content/gdrive/My Drive/Security Dataset/Burglary/weights" # #!cp -r "/content/Model_arson_2_larger_parameters.weights.best.hdf5" "/content/gdrive/My Drive/Security Dataset/Assault/weights" # + id="h8u-UhZqsux1" colab_type="code" colab={}
Burglary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.1 64-bit (''3.9.1'': pyenv)' # name: python391jvsc74a57bd06e942df4d85f9e057d39df3ee298da62417367022ee9076991545a9f83591799 # --- # # Review # # 1. What happens to the standard deviation of a sampling distribution when you increase the sample size? # # 2. The code below describes the probability distribution `P_2(x)` for some random variable X. # # a. What is the sample space? # # b. What is the expected value of X? # # + import numpy as np X = np.arange(-50, 50) Y = np.abs(np.sin(X/7)) Y = Y / np.sum(Y) ''' from matplotlib import pyplot as plt plt.plot(X,Y) ''' def P_2(x): for i in range(len(X)): if X[i] == x: return Y[i] return 0 # - # 3. A water sommelier is given random samples of tasty water from different brands. After conducting multiple tastings, they conclude that the sampling distribution of sample means of water pH level has a mean of 7.5 and a standard deviation of 0.1. # # a. Give a **point estimate** of the average pH level of tasty water. # # b. Give an **interval estimate** of the average pH level of tasty water. # # 4. The code below contains a list of pollen levels for 30 days in April, where 0 is low pollen and 12 is high pollen. Use the bootstrap method to find the 95% confidence interval for the mean pollen count in April. pollen = [8.4, 8.7, 9.9, 9.9, 8.4, 9.8, 9.7, 9.1, 10.3, 8.2, 8.6, 8.8, 10.7, 8.9, 9.2, 2.7, 10.6, 10.8, 8.9, 8.5, 8.5, 9.8, 10.1, 10.0, 10.1, 9.1, 9.2, 6.8, 10.5, 10.8] # 5. What is the difference between a null hypothesis and an alternative hypothesis? Give an example of each. # # Significance Testing # How do we talk about the **significance** of our data? For example, can we say for sure that one population parameter is greater than the other? We'll walk through the steps of a **significance test** with some lovely penguins. # # ## Step One: Assumptions # This time we are just regular scientists on Earth who have collected a sample of roughly 350 penguins. We work for a nutrition company, and we want to know how much a penguin's species affects its body mass. The *most important assumption* that significance testing relies on is that the sample was *randomly* collected! # 6. Give an example of another assumption that we are making about the sample, and how that might negatively affect the validity of our experiment. # ## Step Two: Hypotheses # What statement are we trying to make about penguins? In this scenario, we are interested in whether Gentoo penguins are heavier than Adelie penguins. # # Our null hypothesis (denoted by $H_0$): # Gentoo penguins are *not* heavier than Adelie penguins. # # Our alternative hypothesis (denoted by $H_a$): # Gentoo penguins *are* heavier than Adelie penguins. # 7. What would the null and alternative hypotheses be if we were interested in whether Adelie penguins had shorter bills than Chinstrap penguins? # ## Step Three: Getting a P-Value # We have to show strong evidence against the null hypothesis. In this example, our null hypothesis states that Gentoo penguins aren't heavier than Adelie penguins. # # First, we have to figure out what the sampling distribution of Adelie penguin body mass looks like. Since we are not able to gather more information about penguins, we can bootstrap our sample to emulate a sampling distribution. # # 8. Create a sampling distripution of sample body mass means by using bootstrap samples of Adelie penguins. Use `seaborn` or `altair` to visualize it. # *Hint: First fiter out Adelie penguins, then count how many Adelie penguins there are in order to figure out the size of the bootstrap samples.* # + # #!pip3 install palmerpenguins from palmerpenguins import load_penguins import pandas as pd penguins = load_penguins() penguins[penguins['species'] == "Adelie"] # - # 9. We will now calculate our **test statistic**, which is a point estimate. What is the average body mass of a Gentoo penguin? # We now have everything we need to calculate our **p-value**, which stands for **probability value**. Using our answers to (8) and (9), we are going to examine our sampling distribution, which assumes that the null hypothesis is true, and figure out how likely it is to get our test statistic. # 10. What is the probability of a body mass *greater than or equal to* your answer to (9) given the sampling distribution from (8)? This is the **p-value**! # *Hint: Just count the means in your bootstrap list!* # # 11. In this example, we used the mean of our sample of Gentoo penguins as our test statistic. Give an example of another test statistic that we could have used, and how that might change our results. # 12. Repeat questions 9 and 10 for Chinstrap penguins. What is the **p-value** for Chinstrap penguins? # ## Step Four: Drawing a Conclusion # Once we have a p-value, we can *interpret* it in order to draw a conclusion about our hypotheses. A common method is to create a threshold, and if the p-value falls below that threshold, then we can reject the null hypothesis. # # 13. A conventional (but arbitrary) threshold is 5%. Using this threshold, do we reject the null hypothesis for Gentoo penguins? What about Chinstrap penguins? # # 14. Would it be easier or harder to reject the null hypothesis if we lowered the threshold? Explain. # ## Limitations # A very common mistake is to calculate a p-value that falls below the arbitrarily decided threshold, and then use that result to make definitive statements about a population. # # **P-value tests can only *reject* null hypotheses, not *accept* alternative ones!** # 15. What is another limitation of p-value tests? (i.e sample vs population, definition of p-value, etc) # # ## Using Confidence Intervals Instead # Statistical inference can be done completely without p-values, despite the fact that p-values are heavily emphasized in research! # 16. Calculate the 99% confidence interval for Gentoo body mass using the bootstrap method. Does the mean Adelie penguin body mass fall within this interval? How can we interpret this result? # # Practice # 17. A manufacturer claims that the tote bags they produce are made of 85% recycled material. A suspicious customer thinks that the actual percentage is significantly less. This customer gets a sample of 50 tote bags and analyzes the recycled material percentage of each bag. What are the null and alternative hypotheses in this scenario? # # 18. Proponents of a 4 day work week suggest that students who go to school for 4 days instead of 5 will still perform just as well academically. To put their claim to the test, a team of researchers conduct a study on high schools throughout the country. What are the null and alternative hypotheses in this scenario? # # 19. A stage magician claims to have psychic abilities. They say that they can correctly guess the suit (diamond, club, heart, or spades) of any card without looking. To prove their claim, the magician has a random skeptic go through 4 decks of cards. # # a. What are the null and alternative hypotheses in this scenario? # # b. Suppose that the magician correctly guessed 124/200 cards. What can you conclude?
art of data/topics/inference/archive/significance_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import glob import numpy as np import tensorflow as tf KERNEL_SIZE = 15 STRIDE = 4 FRAME_RATE = 48000 NFFT = 512 TX = FRAME_RATE * 0.0195 FX = int(NFFT / 2) + 1 TY = round((TX - KERNEL_SIZE + STRIDE) / STRIDE) def _extract_feature(record, feature): example = tf.train.Example.FromString(record.numpy()) return example.features.feature[feature].float_list.value # + # Load tf record dataset def parser(record): X = tf.reshape( tf.py_function( lambda r: _extract_feature(r, "X"), (record,), tf.float32 ), [Tx, n_freq] ) Y = tf.reshape( tf.py_function( lambda r: _extract_feature(r, "Y"), (record,), tf.float32 ), [Ty, num_classes] ) return X, Y def dataset_input_fn(filenames, batch_size, num_epochs): dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(parser) dataset = dataset.shuffle(buffer_size=10000) dataset = dataset.batch(batch_size) dataset = dataset.repeat(num_epochs) #iterator = dataset.make_one_shot_iterator() #features, labels = iterator.get_next() return dataset # - from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras.models import Model, load_model, Sequential from tensorflow.keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D from tensorflow.keras.layers import GRU, Bidirectional, BatchNormalization, Reshape from tensorflow.keras.optimizers import Adam from tensorflow.keras.utils import to_categorical def seq_model(input_shape, n_classes): """ Function creating the model's graph in Keras. Argument: input_shape -- shape of the model's input data (using Keras conventions) Returns: model -- Keras model instance """ X_input = Input(shape = input_shape) # Step 1: CONV layer (≈4 lines) X = Conv1D(196, kernel_size=KERNEL_SIZE, strides=STRIDE)(X_input) # CONV1D X = BatchNormalization()(X) # Batch normalization X = Activation('relu')(X) # ReLu activation X = Dropout(0.8)(X) # dropout (use 0.8) # Step 2: First GRU Layer (≈4 lines) X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences) X = Dropout(0.8)(X) # dropout (use 0.8) X = BatchNormalization()(X) # Batch normalization # Step 3: Second GRU Layer (≈4 lines) X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences) X = Dropout(0.8)(X) # dropout (use 0.8) X = BatchNormalization()(X) # Batch normalization X = Dropout(0.8)(X) # dropout (use 0.8) # Step 4: Time-distributed dense layer (≈1 line) X = TimeDistributed(Dense(n_classes, activation = "sigmoid"))(X) # time distributed (sigmoid) model = Model(inputs = X_input, outputs = X) return model keras_model = seq_model((TX, FX), 1) keras_model.summary() opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01) keras_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # + training_set = dataset_input_fn(tfrecord_path, 16, None) history = keras_model.fit( training_set.make_one_shot_iterator(), steps_per_epoch=10, epochs=5, verbose = 1 ) # -
notebooks/multrigger-word-modeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tensorflow_2_00 # language: python # name: tensorflow_2_00 # --- import numpy as np import tensorflow as tf # Cifar10データロード (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() # 正規化 x_train = x_train/255.0 x_test = x_test/255.0 # CNN構築 model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D( 24, 3, activation='relu', padding='same', input_shape=(32, 32, 3)), # (32, 32, 24) tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'), # (30, 30, 32) tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid'), # (15, 15, 32) tf.keras.layers.Dropout(0.25), tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding='same'), # (15, 15, 64) tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'), # (13, 13, 64) tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid'), # (6, 6, 64) tf.keras.layers.Dropout(0.25), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation='softmax') ]) # モデル可視化 model.summary() # モデルチェックポイントのコールバック cp_callback = tf.keras.callbacks.ModelCheckpoint( '02_cnn_cifar10.hdf5', verbose=1, save_weights_only=False) # モデルコンパイル model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) # 訓練 model.fit( x_train, y_train, epochs=15, validation_data=(x_test, y_test), callbacks=[cp_callback] ) # モデル評価 val_loss, val_acc = model.evaluate(x_test, y_test, batch_size=128) # 保存したモデルのロード load_model = tf.keras.models.load_model("02_cnn_cifar10.hdf5") # + # テスト画像を1枚ロード from IPython.display import Image, display_png from PIL import Image img = Image.open('image/automobile10.png') img = img.resize((32, 32)) display_png(img) # - # 入力画像成形、および正規化 x = np.asarray(img) x = x.reshape(-1, 32, 32, 3) x = x.astype('float32') x /= 255 # 推論実行 predict_result = load_model.predict(x) # 推論結果表示 print(predict_result) print(np.squeeze(predict_result)) print(np.argmax(np.squeeze(predict_result)))
02_cnn_cifar10/02_cnn_cifar10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from pyvista import set_plot_theme set_plot_theme('document') # Extract Edges # ============= # # Extracts edges from a surface. # # sphinx_gallery_thumbnail_number = 2 import pyvista as pv from pyvista import examples # From vtk documentation, the edges of a mesh are one of the following: # # 1. boundary (used by one polygon) or a line cell # 2. non-manifold (used by three or more polygons) # 3. feature edges (edges used by two triangles and whose dihedral # angle \> feature\_angle) # 4. manifold edges (edges used by exactly two polygons). # # This filter will extract those edges given a feature angle and return a # dataset with lines that represent the edges of the original mesh. To # demonstrate, we will first extract the edges around Queen Nefertiti\'s # eyes: # # + # Load Queen Nefertiti mesh mesh = examples.download_nefertiti() # Extract the edges above a 12 degree feature angle edges = mesh.extract_feature_edges(12) # Render the edge lines on top of the original mesh p = pv.Plotter() p.add_mesh(mesh, color=True) p.add_mesh(edges, color="red", line_width=5) # Define a camera position that will zoom to her eye p.camera_position = [(96.0, -197.0, 45.0), (7.0, -109.0, 22.0), (0, 0, 1)] p.show() # - # We can do this analysis for any `pyvista.PolyData`{.interpreted-text # role="class"} object. Let\'s try the cow mesh example: # # + mesh = examples.download_cow() edges = mesh.extract_feature_edges(20) p = pv.Plotter() p.add_mesh(mesh, color=True) p.add_mesh(edges, color="red", line_width=5) p.camera_position = [(9.5, 3.0, 5.5), (2.5, 1, 0), (0, 1, 0)] p.show() # - # We can leverage the `pyvista.PolyData.n_open_edges`{.interpreted-text # role="any"} property and # `pyvista.PolyDataFilters.extract_feature_edges`{.interpreted-text # role="func"} filter to count and extract the open edges on a # `pyvista.PolyData`{.interpreted-text role="class"} mesh. # # Download a sample surface mesh with visible open edges mesh = examples.download_bunny() # We can get a count of the open edges with: # mesh.n_open_edges # And we can extract those edges with the `boundary_edges` option of # `pyvista.PolyDataFilters.extract_feature_edges`{.interpreted-text # role="func"}: # # + edges = mesh.extract_feature_edges(boundary_edges=True, feature_edges=False, manifold_edges=False) p = pv.Plotter() p.add_mesh(mesh, color=True) p.add_mesh(edges, color="red", line_width=5) p.camera_position = [(-0.2, -0.13, 0.12), (-0.015, 0.10, -0.0), (0.28, 0.26, 0.9)] p.show()
examples/01-filter/extract-edges.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # formats: ipynb,py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] colab_type="text" id="7yuytuIllsv1" # # # Assignment 2: Transformer Summarizer # # Welcome to the second assignment of course 4. In this assignment you will explore summarization using the transformer model. Yes, you will implement the transformer decoder from scratch, but we will slowly walk you through it. There are many hints in this notebook so feel free to use them as needed. # # <img src = "transformerNews.png"> # # # %% [markdown] colab_type="text" id="4-3lxSnXRWPx" # ## Outline # # - [Introduction](#0) # - [Part 1: Importing the dataset](#1) # - [1.1 Encode & Decode helper functions](#1.1) # - [1.2 Defining parameters](#1.2) # - [1.3 Exploring the data](#1.3) # - [Part 2: Summarization with transformer](#2) # - [2.1 Dot product attention](#2.1) # - [Exercise 01](#ex01) # - [2.2 Causal Attention](#2.2) # - [Exercise 02](#ex02) # - [2.3 Transformer decoder block](#2.3) # - [Exercise 03](#ex03) # - [2.4 Transformer Language model](#2.4) # - [Exercise 04](#ex04) # - [Part 3: Training](#3) # - [3.1 Training the model](#3.1) # - [Exercise 05](#ex05) # - [Part 4: Evaluation](#4) # - [4.1 Loading in a trained model](#4.1) # - [Part 5: Testing with your own input](#5) # - [Exercise 6](#ex06) # - [5.1 Greedy decoding](#5.1) # - [Exercise 07](#ex07) # %% [markdown] colab_type="text" id="H4NlfEQhRWPy" # <a name='0'></a> # ### Introduction # # Summarization is an important task in natural language processing and could be useful for a consumer enterprise. For example, bots can be used to scrape articles, summarize them, and then you can use sentiment analysis to identify the sentiment about certain stocks. Anyways who wants to read an article or a long email today, when you can build a transformer to summarize text for you. Let's get started, by completing this assignment you will learn to: # # - Use built-in functions to preprocess your data # - Implement DotProductAttention # - Implement Causal Attention # - Understand how attention works # - Build the transformer model # - Evaluate your model # - Summarize an article # # As you can tell, this model is slightly different than the ones you have already implemented. This is heavily based on attention and does not rely on sequences, which allows for parallel computing. # %% colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="CChWzW-rEHVb" outputId="a0b3e98b-7fc6-492d-c8ad-3a263b54f670" import sys import os import numpy as np import textwrap wrapper = textwrap.TextWrapper(width=70) import trax from trax import layers as tl from trax.fastmath import numpy as jnp # to print the entire np array np.set_printoptions(threshold=sys.maxsize) # %% [markdown] colab_type="text" id="kEL2rvaHRWP4" # <a name='1'></a> # ## Part 1: Importing the dataset # %% [markdown] # Trax makes it easy to work with Tensorflow's datasets: # %% colab={} colab_type="code" id="VInmKSkhEhle" # This will download the dataset if no data_dir is specified. # Downloading and processing can take bit of time, # so we have the data already in 'data/' for you # Importing CNN/DailyMail articles dataset train_stream_fn = trax.data.TFDS('cnn_dailymail', data_dir='data/', keys=('article', 'highlights'), train=True) # This should be much faster as the data is downloaded already. eval_stream_fn = trax.data.TFDS('cnn_dailymail', data_dir='data/', keys=('article', 'highlights'), train=False) # %% [markdown] # <a name='1.1'></a> # ## 1.1 Tokenize & Detokenize helper functions # # Just like in the previous assignment, the cell above loads in the encoder for you. Given any data set, you have to be able to map words to their indices, and indices to their words. The inputs and outputs to your [Trax](https://github.com/google/trax) models are usually tensors of numbers where each number corresponds to a word. If you were to process your data manually, you would have to make use of the following: # # - <span style='color:blue'> word2Ind: </span> a dictionary mapping the word to its index. # - <span style='color:blue'> ind2Word:</span> a dictionary mapping the index to its word. # - <span style='color:blue'> word2Count:</span> a dictionary mapping the word to the number of times it appears. # - <span style='color:blue'> num_words:</span> total number of words that have appeared. # # Since you have already implemented these in previous assignments of the specialization, we will provide you with helper functions that will do this for you. Run the cell below to get the following functions: # # - <span style='color:blue'> tokenize: </span> converts a text sentence to its corresponding token list (i.e. list of indices). Also converts words to subwords. # - <span style='color:blue'> detokenize: </span> converts a token list to its corresponding sentence (i.e. string). # %% colab={} colab_type="code" id="djTiSLcaNFGa" def tokenize(input_str, EOS=1): """Input str to features dict, ready for inference""" # Use the trax.data.tokenize method. It takes streams and returns streams, # we get around it by making a 1-element stream with `iter`. inputs = next(trax.data.tokenize(iter([input_str]), vocab_dir='vocab_dir/', vocab_file='summarize32k.subword.subwords')) # Mark the end of the sentence with EOS return list(inputs) + [EOS] def detokenize(integers): """List of ints to str""" s = trax.data.detokenize(integers, vocab_dir='vocab_dir/', vocab_file='summarize32k.subword.subwords') return wrapper.fill(s) # %% [markdown] colab_type="text" id="7WvhaFbCRWQS" # <a name='1.2'></a> # # ## 1.2 Preprocessing for Language Models: Concatenate It! # # This week you will use a language model -- Transformer Decoder -- to solve # an input-output problem. As you know, language models only predict the next # word, they have no notion of inputs. To create a single input suitable for # a language model, we concatenate inputs with targets putting a separator # in between. We also need to create a mask -- with 0s at inputs and 1s at targets -- so that the model is not penalized for mis-predicting the article and only focuses on the summary. See the preprocess function below for how this is done. # %% colab={} colab_type="code" id="c4rgPxYSRWQS" # Special tokens SEP = 0 # Padding or separator token EOS = 1 # End of sentence token # Concatenate tokenized inputs and targets using 0 as separator. def preprocess(stream): for (article, summary) in stream: joint = np.array(list(article) + [EOS, SEP] + list(summary) + [EOS]) mask = [0] * (len(list(article)) + 2) + [1] * (len(list(summary)) + 1) # Accounting for EOS and SEP yield joint, joint, np.array(mask) # You can combine a few data preprocessing steps into a pipeline like this. input_pipeline = trax.data.Serial( # Tokenizes trax.data.Tokenize(vocab_dir='vocab_dir/', vocab_file='summarize32k.subword.subwords'), # Uses function defined above preprocess, # Filters out examples longer than 2048 trax.data.FilterByLength(2048) ) # Apply preprocessing to data streams. train_stream = input_pipeline(train_stream_fn()) eval_stream = input_pipeline(eval_stream_fn()) train_input, train_target, train_mask = next(train_stream) assert sum((train_input - train_target)**2) == 0 # They are the same in Language Model (LM). # %% colab={"base_uri": "https://localhost:8080/", "height": 238} colab_type="code" id="uKFoGsUKSa_I" outputId="bc4d6634-d716-4311-d49c-1956bca2bc2d" # prints mask, 0s on article, 1s on summary print(f'Single example mask:\n\n {train_mask}') # %% colab={"base_uri": "https://localhost:8080/", "height": 459} colab_type="code" id="S4uHyCkbSuUo" outputId="52845be8-f2fc-4803-bf7a-ed9725fe2bac" # prints: [Example][<EOS>][<pad>][Example Summary][<EOS>] print(f'Single example:\n\n {detokenize(train_input)}') # %% [markdown] colab_type="text" id="T4sDS1WIVaYG" # <a name='1.3'></a> # # ## 1.3 Batching with bucketing # # As in the previous week, we use bucketing to create batches of data. # %% colab={} colab_type="code" id="oqj1NsbERWQX" # Bucketing to create batched generators. # Buckets are defined in terms of boundaries and batch sizes. # Batch_sizes[i] determines the batch size for items with length < boundaries[i] # So below, we'll take a batch of 16 sentences of length < 128 , 8 of length < 256, # 4 of length < 512. And so on. boundaries = [128, 256, 512, 1024] batch_sizes = [16, 8, 4, 2, 1] # Create the streams. train_batch_stream = trax.data.BucketByLength( boundaries, batch_sizes)(train_stream) eval_batch_stream = trax.data.BucketByLength( boundaries, batch_sizes)(eval_stream) # %% colab={} colab_type="code" id="P6M5OA8QRWQb" # Every execution will result in generation of a different article # Try running this cell multiple times to see how the length of the examples affects the batch size input_batch, _, mask_batch = next(train_batch_stream) # Shape of the input_batch input_batch.shape # %% colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="SjNOlljxTGuQ" outputId="9227c68c-6369-4ce8-8137-506c594f6ad2" # print corresponding integer values print(input_batch[0]) # %% [markdown] colab_type="text" id="GD-72TENV2Jk" # Things to notice: # - First we see the corresponding values of the words. # - The first 1, which represents the `<EOS>` tag of the article. # - Followed by a 0, which represents a `<pad>` tag. # - After the first 0 (`<pad>` tag) the corresponding values are of the words that are used for the summary of the article. # - The second 1 represents the `<EOS>` tag for the summary. # - All the trailing 0s represent `<pad>` tags which are appended to maintain consistent length (If you don't see them then it would mean it is already of max length) # # %% colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="Bu05ZwbWTE6P" outputId="3d455bd7-e343-4c25-a467-572d2abd837f" # print the article and its summary print('Article:\n\n', detokenize(input_batch[0])) # %% [markdown] colab_type="text" id="aNFVhgHoncGm" # You can see that the data has the following structure: # - <span style='color:blue'> [Article] </span> -> `<EOS>` -> `<pad>` -> <span style='color:blue'> [Article Summary] </span> -> `<EOS>` -> (possibly) multiple `<pad>` # # The loss is taken only on the summary using cross_entropy as loss function. # %% [markdown] colab_type="text" id="Un8NHIRoj-1W" # <a name='2'></a> # # Part 2: Summarization with transformer # # Now that we have given you the data generator and have handled the preprocessing for you, it is time for you to build your own model. We saved you some time because we know you have already preprocessed data before in this specialization, so we would rather you spend your time doing the next steps. # # You will be implementing the attention from scratch and then using it in your transformer model. Concretely, you will understand how attention works, how you use it to connect the encoder and the decoder. # # <img src="transformer_decoder_zoomin.png"> # # <a name='2.1'></a> # ## 2.1 Dot product attention # # Now you will implement dot product attention which takes in a query, key, value, and a mask. It returns the output. # # <img src ="dotproduct.png"> # # # Here are some helper functions that will help you create tensors and display useful information: # - `create_tensor` creates a `jax numpy array` from a list of lists. # - `display_tensor` prints out the shape and the actual tensor. # %% def create_tensor(t): """Create tensor from list of lists""" return jnp.array(t) def display_tensor(t, name): """Display shape and tensor""" print(f'{name} shape: {t.shape}\n') print(f'{t}\n') # %% [markdown] # Before implementing it yourself, you can play around with a toy example of `dot product attention` without the softmax operation. Technically it would not be `dot product attention` without the softmax but this is done to avoid giving away too much of the answer and the idea is to display these tensors to give you a sense of how they look like. # # The formula for attention is this one: # # $$ # \text { Attention }(Q, K, V)=\operatorname{softmax}\left(\frac{Q K^{T}}{\sqrt{d_{k}}}+{M}\right) V\tag{1}\ # $$ # # $d_{k}$ stands for the dimension of queries and keys. # # The `query`, `key`, `value` and `mask` vectors are provided for this example. # # Notice that the masking is done using very negative values that will yield a similar effect to using $-\infty $. # %% colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" id="_0x0HJXwRWQk" outputId="d6d78a8e-e3cc-47af-9584-2bdcdfcca0cd" q = create_tensor([[1, 0, 0], [0, 1, 0]]) display_tensor(q, 'query') k = create_tensor([[1, 2, 3], [4, 5, 6]]) display_tensor(k, 'key') v = create_tensor([[0, 1, 0], [1, 0, 1]]) display_tensor(v, 'value') m = create_tensor([[0, 0], [-1e9, 0]]) display_tensor(m, 'mask') # %% [markdown] # **Expected Output:** # ```CPP # query shape: (2, 3) # # [[1 0 0] # [0 1 0]] # # key shape: (2, 3) # # [[1 2 3] # [4 5 6]] # # value shape: (2, 3) # # [[0 1 0] # [1 0 1]] # # mask shape: (2, 2) # # [[ 0.e+00 0.e+00] # [-1.e+09 0.e+00]] # # ``` # %% colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="kVR9u4faRWQo" outputId="f01ea4ca-4152-4b54-b76a-e4b5917ae2b7" q_dot_k = q @ k.T / jnp.sqrt(3) display_tensor(q_dot_k, 'query dot key') # %% [markdown] # **Expected Output:** # ```CPP # query dot key shape: (2, 2) # # [[0.57735026 2.309401 ] # [1.1547005 2.8867514 ]] # ``` # %% masked = q_dot_k + m display_tensor(masked, 'masked query dot key') # %% [markdown] # **Expected Output:** # ```CPP # masked query dot key shape: (2, 2) # # [[ 5.7735026e-01 2.3094010e+00] # [-1.0000000e+09 2.8867514e+00]] # ``` # %% display_tensor(masked @ v, 'masked query dot key dot value') # %% [markdown] # **Expected Output:** # ```CPP # masked query dot key dot value shape: (2, 3) # # [[ 2.3094010e+00 5.7735026e-01 2.3094010e+00] # [ 2.8867514e+00 -1.0000000e+09 2.8867514e+00]] # ``` # %% [markdown] # In order to use the previous dummy tensors to test some of the graded functions, a batch dimension should be added to them so they mimic the shape of real-life examples. The mask is also replaced by a version of it that resembles the one that is used by trax: # %% q_with_batch = q[None,:] display_tensor(q_with_batch, 'query with batch dim') k_with_batch = k[None,:] display_tensor(k_with_batch, 'key with batch dim') v_with_batch = v[None,:] display_tensor(v_with_batch, 'value with batch dim') m_bool = create_tensor([[True, True], [False, True]]) display_tensor(m_bool, 'boolean mask') # %% [markdown] # **Expected Output:** # ```CPP # query with batch dim shape: (1, 2, 3) # # [[[1 0 0] # [0 1 0]]] # # key with batch dim shape: (1, 2, 3) # # [[[1 2 3] # [4 5 6]]] # # value with batch dim shape: (1, 2, 3) # # [[[0 1 0] # [1 0 1]]] # # boolean mask shape: (2, 2) # # [[ True True] # [False True]] # ``` # %% [markdown] # <a name='ex01'></a> # ### Exercise 01 # # **Instructions:** Implement the dot product attention. Concretely, implement the following equation # # # $$ # \text { Attention }(Q, K, V)=\operatorname{softmax}\left(\frac{Q K^{T}}{\sqrt{d_{k}}}+{M}\right) V\tag{1}\ # $$ # # $Q$ - query, # $K$ - key, # $V$ - values, # $M$ - mask, # ${d_k}$ - depth/dimension of the queries and keys (used for scaling down) # # You can implement this formula either by `trax` numpy (trax.math.numpy) or regular `numpy` but it is recommended to use `jnp`. # # Something to take into consideration is that within trax, the masks are tensors of `True/False` values not 0's and $-\infty$ as in the previous example. Within the graded function don't think of applying the mask by summing up matrices, instead use `jnp.where()` and treat the **mask as a tensor of boolean values with `False` for values that need to be masked and True for the ones that don't.** # # Also take into account that the real tensors are far more complex than the toy ones you just played with. Because of this avoid using shortened operations such as `@` for dot product or `.T` for transposing. Use `jnp.matmul()` and `jnp.swapaxes()` instead. # # This is the self-attention block for the transformer decoder. Good luck! # %% colab={} colab_type="code" id="kSauPt0NUl_o" # UNQ_C1 # GRADED FUNCTION: DotProductAttention def DotProductAttention(query, key, value, mask): """Dot product self-attention. Args: query (jax.interpreters.xla.DeviceArray): array of query representations with shape (L_q by d) key (jax.interpreters.xla.DeviceArray): array of key representations with shape (L_k by d) value (jax.interpreters.xla.DeviceArray): array of value representations with shape (L_k by d) where L_v = L_k mask (jax.interpreters.xla.DeviceArray): attention-mask, gates attention with shape (L_q by L_k) Returns: jax.interpreters.xla.DeviceArray: Self-attention array for q, k, v arrays. (L_q by L_k) """ assert query.shape[-1] == key.shape[-1] == value.shape[-1], "Embedding dimensions of q, k, v aren't all the same" ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # Save depth/dimension of the query embedding for scaling down the dot product depth = query.shape[-1] # Calculate scaled query key dot product according to formula above dots = jnp.matmul(query, jnp.swapaxes(key, -1, -2)) / jnp.sqrt(depth) # Apply the mask if mask is not None: # The 'None' in this line does not need to be replaced dots = jnp.where(mask, dots, jnp.full_like(dots, -1e9)) # Softmax formula implementation # Use trax.fastmath.logsumexp of dots to avoid underflow by division by large numbers # Hint: Last axis should be used and keepdims should be True # Note: softmax = e^(dots - logsumexp(dots)) = E^dots / sumexp(dots) logsumexp = trax.fastmath.logsumexp(dots, axis=-1, keepdims=True) # Take exponential of dots minus logsumexp to get softmax # Use jnp.exp() dots = jnp.exp(dots - logsumexp) # Multiply dots by value to get self-attention # Use jnp.matmul() attention = jnp.matmul(dots, value) ## END CODE HERE ### return attention # %% colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="8o0K7VWKRWQw" outputId="1c51af3a-5f11-480f-b33b-419072d8298c" DotProductAttention(q_with_batch, k_with_batch, v_with_batch, m_bool) # %% [markdown] # **Expected Output:** # ```CPP # DeviceArray([[[0.8496746 , 0.15032545, 0.8496746 ], # [1. , 0. , 1. ]]], dtype=float32) # ``` # %% [markdown] colab_type="text" id="2y2PSiLVRWQ2" # <a name='2.2'></a> # # ## 2.2 Causal Attention # # Now you are going to implement causal attention: multi-headed attention with a mask to attend only to words that occurred before. # # <img src = "causal.png"> # # In the image above, a word can see everything that is before it, but not what is after it. To implement causal attention, you will have to transform vectors and do many reshapes. You will need to implement the functions below. # # # <a name='ex02'></a> # ### Exercise 02 # # Implement the following functions that will be needed for Causal Attention: # # - <span style='color:blue'> compute_attention_heads </span>: Gets an input $x$ of dimension (batch_size, seqlen, n_heads $\times$ d_head) and splits the last (depth) dimension and stacks it to the zeroth dimension to allow matrix multiplication (batch_size $\times$ n_heads, seqlen, d_head). # - <span style='color:blue'> dot_product_self_attention </span>: Creates a mask matrix with `False` values above the diagonal and `True` values below and calls DotProductAttention which implements dot product self attention. # - <span style='color:blue'> compute_attention_output </span>: Undoes compute_attention_heads by splitting first (vertical) dimension and stacking in the last (depth) dimension (batch_size, seqlen, n_heads $\times$ d_head). These operations concatenate (stack/merge) the heads. # # Next there are some toy tensors which may serve to give you an idea of the data shapes and opperations involved in Causal Attention. They are also useful to test out your functions! # %% colab={"base_uri": "https://localhost:8080/", "height": 442} colab_type="code" id="VRH67YcrRWQ3" outputId="847a9416-877a-4246-c738-0eacdf46de59" tensor2d = create_tensor(q) display_tensor(tensor2d, 'query matrix (2D tensor)') tensor4d2b = create_tensor([[q, q], [q, q]]) display_tensor(tensor4d2b, 'batch of two (multi-head) collections of query matrices (4D tensor)') tensor3dc = create_tensor([jnp.concatenate([q, q], axis = -1)]) display_tensor(tensor3dc, 'one batch of concatenated heads of query matrices (3d tensor)') tensor3dc3b = create_tensor([jnp.concatenate([q, q], axis = -1), jnp.concatenate([q, q], axis = -1), jnp.concatenate([q, q], axis = -1)]) display_tensor(tensor3dc3b, 'three batches of concatenated heads of query matrices (3d tensor)') # %% [markdown] # It is important to know that the following 3 functions would normally be defined within the `CausalAttention` function further below. # # However this makes these functions harder to test. Because of this, these functions are shown individually using a `closure` (when necessary) that simulates them being inside of the `CausalAttention` function. This is done because they rely on some variables that can be accessed from within `CausalAttention`. # # ### Support Functions # # <span style='color:blue'> compute_attention_heads </span>: Gets an input $x$ of dimension (batch_size, seqlen, n_heads $\times$ d_head) and splits the last (depth) dimension and stacks it to the zeroth dimension to allow matrix multiplication (batch_size $\times$ n_heads, seqlen, d_head). # # **For the closures you only have to fill the inner function.** # %% # UNQ_C2 # GRADED FUNCTION: compute_attention_heads_closure def compute_attention_heads_closure(n_heads, d_head): """ Function that simulates environment inside CausalAttention function. Args: d_head (int): dimensionality of heads. n_heads (int): number of attention heads. Returns: function: compute_attention_heads function """ def compute_attention_heads(x): """ Compute the attention heads. Args: x (jax.interpreters.xla.DeviceArray): tensor with shape (batch_size, seqlen, n_heads X d_head). Returns: jax.interpreters.xla.DeviceArray: reshaped tensor with shape (batch_size X n_heads, seqlen, d_head). """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # Size of the x's batch dimension batch_size = x.shape[0] # Length of the sequence # Should be size of x's first dimension without counting the batch dim seqlen = x.shape[1] # Reshape x using jnp.reshape() # batch_size, seqlen, n_heads*d_head -> batch_size, seqlen, n_heads, d_head x = jnp.reshape(x, (batch_size, seqlen, n_heads, d_head)) # Transpose x using jnp.transpose() # batch_size, seqlen, n_heads, d_head -> batch_size, n_heads, seqlen, d_head # Note that the values within the tuple are the indexes of the dimensions of x and you must rearrange them x = jnp.transpose(x, (0, 2, 1, 3)) # Reshape x using jnp.reshape() # batch_size, n_heads, seqlen, d_head -> batch_size*n_heads, seqlen, d_head x = jnp.reshape(x, (-1, seqlen, d_head)) ### END CODE HERE ### return x return compute_attention_heads # %% display_tensor(tensor3dc3b, "input tensor") result_cah = compute_attention_heads_closure(2,3)(tensor3dc3b) display_tensor(result_cah, "output tensor") # %% [markdown] # **Expected Output:** # ```CPP # input tensor shape: (3, 2, 6) # # [[[1 0 0 1 0 0] # [0 1 0 0 1 0]] # # [[1 0 0 1 0 0] # [0 1 0 0 1 0]] # # [[1 0 0 1 0 0] # [0 1 0 0 1 0]]] # # output tensor shape: (6, 2, 3) # # [[[1 0 0] # [0 1 0]] # # [[1 0 0] # [0 1 0]] # # [[1 0 0] # [0 1 0]] # # [[1 0 0] # [0 1 0]] # # [[1 0 0] # [0 1 0]] # # [[1 0 0] # [0 1 0]]] # ``` # %% [markdown] # <span style='color:blue'> dot_product_self_attention </span>: Creates a mask matrix with `False` values above the diagonal and `True` values below and calls DotProductAttention which implements dot product self attention. # %% # UNQ_C3 # GRADED FUNCTION: dot_product_self_attention def dot_product_self_attention(q, k, v): """ Masked dot product self attention. Args: q (jax.interpreters.xla.DeviceArray): queries. k (jax.interpreters.xla.DeviceArray): keys. v (jax.interpreters.xla.DeviceArray): values. Returns: jax.interpreters.xla.DeviceArray: masked dot product self attention tensor. """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # Hint: mask size should be equal to L_q. Remember that q has shape (batch_size, L_q, d) mask_size = q.shape[-2] # Creates a matrix with ones below the diagonal and 0s above. It should have shape (1, mask_size, mask_size) # Notice that 1's and 0's get casted to True/False by setting dtype to jnp.bool_ # Use jnp.tril() - Lower triangle of an array and jnp.ones() mask = jnp.tril(jnp.ones((1, mask_size, mask_size), dtype=jnp.bool_), k=0) ### END CODE HERE ### return DotProductAttention(q, k, v, mask) # %% dot_product_self_attention(q_with_batch, k_with_batch, v_with_batch) # %% [markdown] # **Expected Output:** # ```CPP # DeviceArray([[[0. , 1. , 0. ], # [0.8496746 , 0.15032543, 0.8496746 ]]], dtype=float32) # ``` # %% [markdown] # <span style='color:blue'> compute_attention_output </span>: Undoes compute_attention_heads by splitting first (vertical) dimension and stacking in the last (depth) dimension (batch_size, seqlen, n_heads $\times$ d_head). These operations concatenate (stack/merge) the heads. # %% # UNQ_C4 # GRADED FUNCTION: compute_attention_output_closure def compute_attention_output_closure(n_heads, d_head): """ Function that simulates environment inside CausalAttention function. Args: d_head (int): dimensionality of heads. n_heads (int): number of attention heads. Returns: function: compute_attention_output function """ def compute_attention_output(x): """ Compute the attention output. Args: x (jax.interpreters.xla.DeviceArray): tensor with shape (batch_size X n_heads, seqlen, d_head). Returns: jax.interpreters.xla.DeviceArray: reshaped tensor with shape (batch_size, seqlen, n_heads X d_head). """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # Length of the sequence # Should be size of x's first dimension without counting the batch dim seqlen = x.shape[1] # Reshape x using jnp.reshape() to shape (batch_size, n_heads, seqlen, d_head) x = jnp.reshape(x, ( -1, n_heads, seqlen, d_head)) # Transpose x using jnp.transpose() to shape (batch_size, seqlen, n_heads, d_head) x = jnp.transpose(x, ( 0, 2, 1 , 3)) ### END CODE HERE ### # Reshape to allow to concatenate the heads return jnp.reshape(x, (-1, seqlen, n_heads * d_head)) return compute_attention_output # %% display_tensor(result_cah, "input tensor") result_cao = compute_attention_output_closure(2,3)(result_cah) display_tensor(result_cao, "output tensor") # %% [markdown] # **Expected Output:** # ```CPP # input tensor shape: (6, 2, 3) # # [[[1 0 0] # [0 1 0]] # # [[1 0 0] # [0 1 0]] # # [[1 0 0] # [0 1 0]] # # [[1 0 0] # [0 1 0]] # # [[1 0 0] # [0 1 0]] # # [[1 0 0] # [0 1 0]]] # # output tensor shape: (3, 2, 6) # # [[[1 0 0 1 0 0] # [0 1 0 0 1 0]] # # [[1 0 0 1 0 0] # [0 1 0 0 1 0]] # # [[1 0 0 1 0 0] # [0 1 0 0 1 0]]] # ``` # %% [markdown] # ### Causal Attention Function # # Now it is time for you to put everything together within the `CausalAttention` or Masked multi-head attention function: # %% [markdown] # <img src = "masked-attention.png"> # # **Instructions:** Implement the causal attention. # Your model returns the causal attention through a $tl.Serial$ with the following: # # - <span style='color:blue'> [tl.Branch](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.combinators.Branch) </span>: consisting of 3 [tl.Dense(d_feature), ComputeAttentionHeads] to account for the queries, keys, and values. # - <span style='color:blue'> [tl.Fn](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.base.Fn)</span>: Takes in dot_product_self_attention function and uses it to compute the dot product using $Q$, $K$, $V$. # - <span style='color:blue'> [tl.Fn](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.base.Fn)</span>: Takes in compute_attention_output_closure to allow for parallel computing. # - <span style='color:blue'> [tl.Dense](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dense)</span>: Final Dense layer, with dimension `d_feature`. # # Remember that in order for trax to properly handle the functions you just defined, they need to be added as layers using the [`tl.Fn()`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.base.Fn) function. # %% colab={} colab_type="code" id="B9Adn6DtRWRG" # UNQ_C5 # GRADED FUNCTION: CausalAttention def CausalAttention(d_feature, n_heads, compute_attention_heads_closure=compute_attention_heads_closure, dot_product_self_attention=dot_product_self_attention, compute_attention_output_closure=compute_attention_output_closure, mode='train'): """Transformer-style multi-headed causal attention. Args: d_feature (int): dimensionality of feature embedding. n_heads (int): number of attention heads. compute_attention_heads_closure (function): Closure around compute_attention heads. dot_product_self_attention (function): dot_product_self_attention function. compute_attention_output_closure (function): Closure around compute_attention_output. mode (str): 'train' or 'eval'. Returns: trax.layers.combinators.Serial: Multi-headed self-attention model. """ assert d_feature % n_heads == 0 d_head = d_feature // n_heads ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # HINT: The second argument to tl.Fn() is an uncalled function (without the parentheses) # Since you are dealing with closures you might need to call the outer # function with the correct parameters to get the actual uncalled function. ComputeAttentionHeads = tl.Fn('AttnHeads', compute_attention_heads_closure(n_heads, d_head), n_out=1) return tl.Serial( tl.Branch( # creates three towers for one input, takes activations and creates queries keys and values [tl.Dense(d_feature), ComputeAttentionHeads], # queries [tl.Dense(d_feature), ComputeAttentionHeads], # keys [tl.Dense(d_feature), ComputeAttentionHeads], # values ), tl.Fn('DotProductAttn', dot_product_self_attention, n_out=1), # takes QKV # HINT: The second argument to tl.Fn() is an uncalled function # Since you are dealing with closures you might need to call the outer # function with the correct parameters to get the actual uncalled function. tl.Fn('AttnOutput', compute_attention_output_closure(n_heads, d_head), n_out=1), # to allow for parallel tl.Dense(d_feature) # Final dense layer ) ### END CODE HERE ### # %% # Take a look at the causal attention model print(CausalAttention(d_feature=512, n_heads=8)) # %% [markdown] # **Expected Output:** # ```CPP # Serial[ # Branch_out3[ # [Dense_512, AttnHeads] # [Dense_512, AttnHeads] # [Dense_512, AttnHeads] # ] # DotProductAttn_in3 # AttnOutput # Dense_512 # ] # ``` # %% [markdown] colab_type="text" id="W6zwtPjqRWRJ" # <a name='2.3'></a> # # ## 2.3 Transformer decoder block # # Now that you have implemented the causal part of the transformer, you will implement the transformer decoder block. Concretely you will be implementing this image now. # # <img src = "transformer_decoder_1.png" style = "height:300px"> # # To implement this function, you will have to call the `CausalAttention` or Masked multi-head attention function you implemented above. You will have to add a feedforward which consists of: # # - <span style='color:blue'> [tl.LayerNorm](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.normalization.LayerNorm) </span>: used to layer normalize # - <span style='color:blue'> [tl.Dense](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dense) </span>: the dense layer # - <span style='color:blue'> [ff_activation](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.activation_fns.Relu) </span>: feed forward activation (we use ReLu) here. # - <span style='color:blue'> [tl.Dropout](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dropout) </span>: dropout layer # - <span style='color:blue'> [tl.Dense](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dense) </span>: dense layer # - <span style='color:blue'> [tl.Dropout](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dropout) </span>: dropout layer # # Finally once you implement the feedforward, you can go ahead and implement the entire block using: # # - <span style='color:blue'> [tl.Residual](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.combinators.Residual) </span>: takes in the tl.LayerNorm(), causal attention block, tl.dropout. # # - <span style='color:blue'> [tl.Residual](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.combinators.Residual) </span>: takes in the feedforward block you will implement. # # <a name='ex03'></a> # ### Exercise 03 # **Instructions:** Implement the transformer decoder block. Good luck! # %% colab={} colab_type="code" id="gKOxnRbp1K5U" # UNQ_C6 # GRADED FUNCTION: DecoderBlock def DecoderBlock(d_model, d_ff, n_heads, dropout, mode, ff_activation): """Returns a list of layers that implements a Transformer decoder block. The input is an activation tensor. Args: d_model (int): depth of embedding. d_ff (int): depth of feed-forward layer. n_heads (int): number of attention heads. dropout (float): dropout rate (how much to drop out). mode (str): 'train' or 'eval'. ff_activation (function): the non-linearity in feed-forward layer. Returns: list: list of trax.layers.combinators.Serial that maps an activation tensor to an activation tensor. """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # Create masked multi-head attention block using CausalAttention function causal_attention = CausalAttention( d_model, n_heads=n_heads, mode=mode ) # Create feed-forward block (list) with two dense layers with dropout and input normalized feed_forward = [ # Normalize layer inputs tl.LayerNorm(), # Add first feed forward (dense) layer (don't forget to set the correct value for n_units) tl.Dense(d_ff), # Add activation function passed in as a parameter (you need to call it!) ff_activation(), # Generally ReLU # Add dropout with rate and mode specified (i.e., don't use dropout during evaluation) tl.Dropout(rate=dropout, mode=mode), # Add second feed forward layer (don't forget to set the correct value for n_units) tl.Dense(d_model), # Add dropout with rate and mode specified (i.e., don't use dropout during evaluation) tl.Dropout(rate=dropout,mode=mode) ] # Add list of two Residual blocks: the attention with normalization and dropout and feed-forward blocks return [ tl.Residual( # Normalize layer input tl.LayerNorm(), # Add causal attention block previously defined (without parentheses) causal_attention, # Add dropout with rate and mode specified tl.Dropout(rate=dropout, mode=mode) ), tl.Residual( # Add feed forward block (without parentheses) feed_forward ), ] ### END CODE HERE ### # %% # Take a look at the decoder block print(DecoderBlock(d_model=512, d_ff=2048, n_heads=8, dropout=0.1, mode='train', ff_activation=tl.Relu)) # %% [markdown] # **Expected Output:** # ```CPP # [Serial[ # Branch_out2[ # None # Serial[ # LayerNorm # Serial[ # Branch_out3[ # [Dense_512, AttnHeads] # [Dense_512, AttnHeads] # [Dense_512, AttnHeads] # ] # DotProductAttn_in3 # AttnOutput # Dense_512 # ] # Dropout # ] # ] # Add_in2 # ], Serial[ # Branch_out2[ # None # Serial[ # LayerNorm # Dense_2048 # Relu # Dropout # Dense_512 # Dropout # ] # ] # Add_in2 # ]] # ``` # %% [markdown] colab_type="text" id="SoFv-nfLRWRN" # <a name='2.4'></a> # ## 2.4 Transformer Language Model # # You will now bring it all together. In this part you will use all the subcomponents you previously built to make the final model. Concretely, here is the image you will be implementing. # <img src = "transformer_decoder.png" style = "height:400px"> # # # <a name='ex04'></a> # ### Exercise 04 # **Instructions:** Previously you coded the decoder block. Now you will code the transformer language model. Here is what you will need. # # - <span style="color:blue"> positional_enconder </span>- a list containing the following layers: # - <span style="color:blue"> [tl.Embedding](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Embedding) # - <span style="color:blue"> [tl.Dropout](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dropout) # - <span style="color:blue"> [tl.PositionalEncoding](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.attention.PositionalEncoding) # # - A list of `n_layers` <span style="color:blue"> decoder blocks</span>. # - <span style="color:blue"> [tl.Serial](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.combinators.Serial): </span> takes in the following layers or lists of layers: # - <span style="color:blue"> [tl.ShiftRight](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.attention.ShiftRight): </span>: shift the tensor to the right by padding on axis 1. # - <span style="color:blue"> positional_encoder </span>: encodes the text positions. # - <span style="color:blue"> decoder_blocks </span>: the ones you created. # - <span style="color:blue"> [tl.LayerNorm](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.normalization.LayerNorm) </span>: a layer norm. # - <span style="color:blue"> [tl.Dense](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dense) </span>: takes in the vocab_size. # - <span style="color:blue"> [tl.LogSoftmax](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.LogSoftmax) </span>: to predict. # # Go go go!! You can do it :) # # # %% colab={} colab_type="code" id="0yi4LJO1RWRS" # UNQ_C7 # GRADED FUNCTION: TransformerLM def TransformerLM(vocab_size=33300, d_model=512, d_ff=2048, n_layers=6, n_heads=8, dropout=0.1, max_len=4096, mode='train', ff_activation=tl.Relu): """Returns a Transformer language model. The input to the model is a tensor of tokens. (This model uses only the decoder part of the overall Transformer.) Args: vocab_size (int): vocab size. d_model (int): depth of embedding. d_ff (int): depth of feed-forward layer. n_layers (int): number of decoder layers. n_heads (int): number of attention heads. dropout (float): dropout rate (how much to drop out). max_len (int): maximum symbol length for positional encoding. mode (str): 'train', 'eval' or 'predict', predict mode is for fast inference. ff_activation (function): the non-linearity in feed-forward layer. Returns: trax.layers.combinators.Serial: A Transformer language model as a layer that maps from a tensor of tokens to activations over a vocab set. """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # Embedding inputs and positional encoder positional_encoder = [ # Add embedding layer of dimension (vocab_size, d_model) tl.Embedding(vocab_size, d_model), # Use dropout with rate and mode specified tl.Dropout(rate=dropout, mode=mode), # Add positional encoding layer with maximum input length and mode specified tl.PositionalEncoding(max_len=max_len, mode=mode)] # Create stack (list) of decoder blocks with n_layers with necessary parameters decoder_blocks = [ DecoderBlock(d_model, d_ff, n_heads, dropout, mode, ff_activation) for _ in range(n_layers)] # Create the complete model as written in the figure return tl.Serial( # Use teacher forcing (feed output of previous step to current step) tl.ShiftRight(mode=mode), # Specify the mode! # Add positional encoder positional_encoder, # Add decoder blocks decoder_blocks, # Normalize layer tl.LayerNorm(), # Add dense layer of vocab_size (since need to select a word to translate to) # (a.k.a., logits layer. Note: activation already set by ff_activation) tl.Dense(vocab_size), # Get probabilities with Logsoftmax tl.LogSoftmax() ) ### END CODE HERE ### # %% # Take a look at the Transformer print(TransformerLM(n_layers=1)) # %% [markdown] # **Expected Output:** # ```CPP # Serial[ # ShiftRight(1) # Embedding_33300_512 # Dropout # PositionalEncoding # Serial[ # Branch_out2[ # None # Serial[ # LayerNorm # Serial[ # Branch_out3[ # [Dense_512, AttnHeads] # [Dense_512, AttnHeads] # [Dense_512, AttnHeads] # ] # DotProductAttn_in3 # AttnOutput # Dense_512 # ] # Dropout # ] # ] # Add_in2 # ] # Serial[ # Branch_out2[ # None # Serial[ # LayerNorm # Dense_2048 # Relu # Dropout # Dense_512 # Dropout # ] # ] # Add_in2 # ] # LayerNorm # Dense_33300 # LogSoftmax # ] # ``` # %% [markdown] colab_type="text" id="dRRKnoAdvmJ7" # <a name='3'></a> # # Part 3: Training # # Now you are going to train your model. As usual, you have to define the cost function, the optimizer, and decide whether you will be training it on a `gpu` or `cpu`. In this case, you will train your model on a cpu for a few steps and we will load in a pre-trained model that you can use to predict with your own words. # %% [markdown] colab_type="text" id="l1lkVebQRWRV" # <a name='3.1'></a> # ### 3.1 Training the model # # You will now write a function that takes in your model and trains it. To train your model you have to decide how many times you want to iterate over the entire data set. Each iteration is defined as an `epoch`. For each epoch, you have to go over all the data, using your training iterator. # # <a name='ex05'></a> # ### Exercise 05 # **Instructions:** Implement the `train_model` program below to train the neural network above. Here is a list of things you should do: # # - Create the train task by calling [`trax.supervised.training.TrainTask`](https://trax-ml.readthedocs.io/en/latest/trax.supervised.html#trax.supervised.training.TrainTask) and pass in the following: # - <span style='color:blue'> labeled_data </span> = train_gen # - <span style='color:blue'> loss_fn </span> = [tl.CrossEntropyLoss()](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.metrics.CrossEntropyLoss) # - <span style='color:blue'> optimizer </span> = [trax.optimizers.Adam(0.01)](https://trax-ml.readthedocs.io/en/latest/trax.optimizers.html#trax.optimizers.adam.Adam) # - <span style='color:blue'> lr_schedule </span> = [lr_schedule](https://trax-ml.readthedocs.io/en/latest/trax.supervised.html#trax.supervised.lr_schedules.warmup_and_rsqrt_decay) # # # - Create the eval task by calling [`trax.supervised.training.EvalTask`](https://trax-ml.readthedocs.io/en/latest/trax.supervised.html#trax.supervised.training.EvalTask) and pass in the following: # - <span style='color:blue'> labeled_data </span> = eval_gen # - <span style='color:blue'> metrics </span> = tl.CrossEntropyLoss() and [tl.Accuracy()](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.metrics.Accuracy) # # # - Create the training loop by calling [`trax.supervised.Training.Loop`](https://trax-ml.readthedocs.io/en/latest/trax.supervised.html#trax.supervised.training.Loop) and pass in the following: # - <span style='color:blue'> TransformerLM </span> # - <span style='color:blue'> train_task </span> # - <span style='color:blue'> eval_task </span> = [eval_task] # - <span style='color:blue'> output_dir</span> = output_dir # # You will be using a cross entropy loss, with Adam optimizer. Please read the [Trax](https://trax-ml.readthedocs.io/en/latest/index.html) documentation to get a full understanding. # # The training loop that this function returns can be runned using the `run()` method by passing in the desired number of steps. # %% colab={} colab_type="code" id="gM2gpu4xvjtX" from trax.supervised import training # UNQ_C8 # GRADED FUNCTION: train_model def training_loop(TransformerLM, train_gen, eval_gen, output_dir = "~/model"): ''' Input: TransformerLM (trax.layers.combinators.Serial): The model you are building. train_gen (generator): Training stream of data. eval_gen (generator): Evaluation stream of data. output_dir (str): folder to save your file. Returns: trax.supervised.training.Loop: Training loop. ''' output_dir = os.path.expanduser(output_dir) # trainer is an object lr_schedule = trax.lr.warmup_and_rsqrt_decay(n_warmup_steps=1000, max_value=0.01) ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### train_task = training.TrainTask( labeled_data=train_gen, # The training generator loss_layer=tl.CrossEntropyLoss(), # Loss function optimizer=trax.optimizers.Adam(0.01), # Optimizer (Don't forget to set LR to 0.01) lr_schedule=lr_schedule, n_steps_per_checkpoint=10 ) eval_task = training.EvalTask( labeled_data=eval_gen, # The evaluation generator metrics=[tl.CrossEntropyLoss(), tl.Accuracy()] # CrossEntropyLoss and Accuracy ) ### END CODE HERE ### loop = training.Loop(TransformerLM(d_model=4, d_ff=16, n_layers=1, n_heads=2, mode='train'), train_task, eval_tasks=[eval_task], output_dir=output_dir) return loop # %% [markdown] # Notice that the model will be trained for only 10 steps. # # Even with this constraint the model with the original default arguments took a very long time to finish. Because of this some parameters are changed when defining the model that is fed into the training loop in the function above. # %% colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" id="BFRBTwSqRWRZ" outputId="aff859e5-8f4a-4d3b-f1d3-98e137581a77" # Should take around 1.5 minutes # !rm -f ~/model/model.pkl.gz loop = training_loop(TransformerLM, train_batch_stream, eval_batch_stream) loop.run(10) # %% [markdown] colab_type="text" id="XKrEBjmskeWa" # <a name='4'></a> # # Part 4: Evaluation # # <a name='4.1'></a> # ### 4.1 Loading in a trained model # # In this part you will evaluate by loading in an almost exact version of the model you coded, but we trained it for you to save you time. Please run the cell below to load in the model. # # As you may have already noticed the model that you trained and the pretrained model share the same overall architecture but they have different values for some of the parameters: # # # `Original (pretrained) model: ` # # TransformerLM(vocab_size=33300, d_model=512, d_ff=2048, n_layers=6, n_heads=8, # dropout=0.1, max_len=4096, ff_activation=tl.Relu) # # `Your model:` # # TransformerLM(d_model=4, d_ff=16, n_layers=1, n_heads=2) # # **Only the parameters shown for your model were changed. The others stayed the same.** # %% colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="zWoSzR5tkoAx" outputId="2b9f1cca-4778-4509-bd9e-bd1738625a4e" # Get the model architecture model = TransformerLM(mode='eval') # Load the pre-trained weights model.init_from_file('model.pkl.gz', weights_only=True) # %% [markdown] colab_type="text" id="ilM9C8P3RWRf" # <a name='5'></a> # # Part 5: Testing with your own input # # You will now test your input. You are going to implement greedy decoding. This consists of two functions. The first one allows you to identify the next symbol. It gets the argmax of the output of your model and then returns that index. # # <a name='ex06'></a> # ### Exercise 06 # **Instructions:** Implement the next symbol function that takes in the cur_output_tokens and the trained model to return the index of the next word. # %% colab={} colab_type="code" id="rD_bXRCpRWRg" # UNQ_C9 def next_symbol(cur_output_tokens, model): """Returns the next symbol for a given sentence. Args: cur_output_tokens (list): tokenized sentence with EOS and PAD tokens at the end. model (trax.layers.combinators.Serial): The transformer model. Returns: int: tokenized symbol. """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # current output tokens length token_length = len(cur_output_tokens) # calculate the minimum power of 2 big enough to store token_length # HINT: use np.ceil() and np.log2() # add 1 to token_length so np.log2() doesn't receive 0 when token_length is 0 padded_length = 2**int(np.ceil(np.log2(token_length + 1))) # Fill cur_output_tokens with 0's until it reaches padded_length padded = cur_output_tokens + [0] * (padded_length - token_length) padded_with_batch = np.array(padded)[None, :] # Don't replace this 'None'! This is a way of setting the batch dim # model expects a tuple containing two padded tensors (with batch) output, _ = model((padded_with_batch, padded_with_batch)) # HINT: output has shape (1, padded_length, vocab_size) # To get log_probs you need to index output with 0 in the first dim # token_length in the second dim and all of the entries for the last dim. log_probs = output[0, token_length, :] ### END CODE HERE ### return int(np.argmax(log_probs)) # %% # Test it out! sentence_test_nxt_symbl = "I want to fly in the sky." detokenize([next_symbol(tokenize(sentence_test_nxt_symbl)+[0], model)]) # %% [markdown] # **Expected Output:** # ```CPP # 'The' # ``` # %% [markdown] colab_type="text" id="2AwrQFglRWRj" # <a name='5.1'></a> # ### 5.1 Greedy decoding # # Now you will implement the greedy_decode algorithm that will call the `next_symbol` function. It takes in the input_sentence, the trained model and returns the decoded sentence. # # <a name='ex07'></a> # ### Exercise 07 # # **Instructions**: Implement the greedy_decode algorithm. # %% colab={} colab_type="code" id="6HwIdimiN0k2" # UNQ_C10 # Decoding functions. def greedy_decode(input_sentence, model): """Greedy decode function. Args: input_sentence (string): a sentence or article. model (trax.layers.combinators.Serial): Transformer model. Returns: string: summary of the input. """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # Use tokenize() cur_output_tokens = tokenize(input_sentence) + [0] generated_output = [] cur_output = 0 EOS = 1 while cur_output != EOS: # Get next symbol cur_output = next_symbol(cur_output_tokens, model) # Append next symbol to original sentence cur_output_tokens.append(cur_output) # Append next symbol to generated sentence generated_output.append(cur_output) print(detokenize(generated_output)) ### END CODE HERE ### return detokenize(generated_output) # %% colab={"base_uri": "https://localhost:8080/", "height": 306} colab_type="code" id="9kHuIDGW1sOr" outputId="2525ca2c-4625-47c0-8456-f75598581993" # Test it out on a sentence! test_sentence = "It was a sunny day when I went to the market to buy some flowers. But I only found roses, not tulips." print(wrapper.fill(test_sentence), '\n') print(greedy_decode(test_sentence, model)) # %% [markdown] colab_type="text" id="CA-279WI2D3G" # **Expected Output:** # ```CPP # : # : I # : I just # : I just found # : I just found ros # : I just found roses # : I just found roses, # : I just found roses, not # : I just found roses, not tu # : I just found roses, not tulips # : I just found roses, not tulips # : I just found roses, not tulips. # : I just found roses, not tulips.<EOS> # : I just found roses, not tulips.<EOS> # ``` # %% colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="DYgX-mzjyUia" outputId="b901e164-48b3-4124-d21a-fe7443d15b79" # Test it out with a whole article! article = "It’s the posing craze sweeping the U.S. after being brought to fame by skier <NAME>, soccer star <NAME>, baseball player <NAME> - and even Republican politician <NAME>. But now four students at Riverhead High School on Long Island, New York, have been suspended for dropping to a knee and taking up a prayer pose to mimic Denver Broncos quarterback Tim Tebow. <NAME>, <NAME>, <NAME> and <NAME> were all suspended for one day because the ‘Tebowing’ craze was blocking the hallway and presenting a safety hazard to students. Scroll down for video. Banned: <NAME>, <NAME>, <NAME> and <NAME> (all pictured left) were all suspended for one day by Riverhead High School on Long Island, New York, for their tribute to Broncos quarterback Tim Tebow. Issue: Four of the pupils were suspended for one day because they allegedly did not heed to warnings that the 'Tebowing' craze at the school was blocking the hallway and presenting a safety hazard to students." print(wrapper.fill(article), '\n') print(greedy_decode(article, model)) # %% [markdown] # **Expected Output:** # ```CPP # Jordan # <NAME> # <NAME> # <NAME> # <NAME>, # <NAME>, Wayne # <NAME>, <NAME> # <NAME>, <NAME> # <NAME>, <NAME> # <NAME>, <NAME>, # . # . # . # # Final summary: # # <NAME>, <NAME>, <NAME> and <NAME> were # suspended for one day. Four students were suspended for one day # because they allegedly did not heed to warnings that the 'Tebowing' # craze was blocking the hallway and presenting a safety hazard to # students.<EOS> # ``` # %% [markdown] # **Congratulations on finishing this week's assignment!** You did a lot of work and now you should have a better understanding of the encoder part of Transformers and how Transformers can be used for text summarization. # # **Keep it up!**
4. Natural Language Processing with Attention Models/Week 2 Summarization with Transformer Models/Assignment 2 Transformer Summarizer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Planar Point Patterns in PySAL # # **Author: <NAME> <<EMAIL>> and <NAME> <<EMAIL>>** # # ## Introduction # This notebook introduces the basic PointPattern class in PySAL and covers the following: # # * [What is a point pattern?](#What-is-a-point-pattern?) # * [Creating Point Patterns](#Creating-Point-Patterns) # * [Atributes of Point Patterns](#Attributes-of-PySAL-Point-Patterns) # * [Intensity Estimates](#Intensity-Estimates) # * [Next steps](#Next-steps) # ## What is a point pattern? # # We introduce basic terminology here and point the interested reader to more [detailed references](#References) on the underlying theory of the statistical analysis of point patterns. # # ### Points and Event Points # # To start we consider a series of *point locations*, $(s_1, s_2, \ldots, s_n)$ in a study region $\Re$. We limit our focus here to a two-dimensional space so that $s_j = (x_j, y_j)$ is the spatial coordinate pair for point location $j$. # # We will be interested in two different types of points. # # #### Event Points # # *Event Points* are locations where something of interest has occurred. The term *event* is very general here and could be used to represent a wide variety of phenomena. Some examples include: # # * [locations of individual plants of a certain species](http://link.springer.com/chapter/10.1007/978-3-642-01976-0_7#page-1) # * [archeological sites](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&uact=8&ved=0ahUKEwjA46Si2oTKAhUU1GMKHZUBCBEQFgghMAA&url=http%3A%2F%2Fdiscovery.ucl.ac.uk%2F11345%2F&usg=AFQjCNG5dKBcsVJQZ9M20U5AOMTt3P6AWQ&sig2=Nt8ViSs8Q2G_-q1BSnNvKg&bvm=bv.110151844,d.cGc) # * [addresses of disease cases](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&uact=8&ved=0ahUKEwiy7NSE2oTKAhUOyWMKHb7cDA4QFgghMAA&url=http%3A%2F%2Fwww.jstor.org%2Fstable%2F622936&usg=AFQjCNExfettAsU3i-Hs7twmB6_iVkghUA&sig2=tPROSM6wMtbZT0qlg_N6Hw&bvm=bv.110151844,d.cGc) # * [locations of crimes](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=2&cad=rja&uact=8&ved=0ahUKEwiogfbl2YTKAhVT42MKHfTFCdUQFggqMAE&url=https%3A%2F%2Fgeodacenter.asu.edu%2Fsystem%2Ffiles%2Fpoints.pdf&usg=AFQjCNFase8ykAPuopayUDHQRvgj8S4Vsw&sig2=Ezzx45MLZIFaepvcOjV-aw) # * the [distribution of neurons](http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2889688/) # # among [many others](https://en.wikipedia.org/wiki/Point_process). # # It is important to recognize that in the statistical analysis of point patterns the interest extends beyond the observed point pattern at hand. # The observed patterns are viewed as realizations from some underlying spatial stochastic process. # # # #### Arbitrary Points # # The second type of point we consider are those locations where the phenomena of interest has not been observed. These go by various names such as "empty space" or "regular" points, and at first glance might seem less interesting to a spatial analayst. However, these types of points play a central role in a class of point pattern methods that we explore below. # # # ### Point Pattern Analysis # # The analysis of event points focuses on a number of different characteristics of the collective spatial pattern that is observed. Often the pattern is jugded against the hypothesis of complete spatial randomness (CSR). That is, one assumes that the point events arise independently of one another and with constant probability across $\Re$, loosely speaking. # # Of course, many of the empirical point patterns we encounter do not appear to be generated from such a simple stochastic process. The depatures from CSR can be due to two types of effects. # # #### First order effects # # For a point process, the first-order properties pertain to the intensity of the process across space. Whether and how the intensity of the point pattern varies within our study region are questions that assume center stage. Such variation in the itensity of the pattern of, say, addresses of individuals with a certain type of non-infectious disease may reflect the underlying population density. In other words, although the point pattern of disease cases may display variation in intensity in our study region, and thus violate the constant probability of an event condition, that spatial drift in the pattern intensity could be driven by an underlying covariate. # # # # #### Second order effects # # The second channel by which departures from CSR can arise is through interaction and dependence between events in space. The canonical example being contagious diseases whereby the presence of an infected individual increases the probability of subsequent additional cases nearby. # # # When a pattern departs from expectation under CSR, this is suggestive that the underlying process may have some spatial structure that merits further investigation. Thus methods for detection of deviations from CSR and testing for alternative processes have given rise to a large literature in point pattern statistics. # # ### Methods of Point Pattern Analysis in PySAL # # The points module in PySAL implements basic methods of point pattern analysis organized into the following groups: # # * Point Processing # * Centrography and Visualization # * Quadrat Based Methods # * Distance Based Methods # # In the remainder of this notebook we shall focus on point processing. import libpysal as ps import numpy as np from pointpats import PointPattern # ## Creating Point Patterns # ### From lists # # We can build a point pattern by using Python lists of coordinate pairs $(s_0, s_1,\ldots, s_m)$ as follows: points = [[66.22, 32.54], [22.52, 22.39], [31.01, 81.21], [9.47, 31.02], [30.78, 60.10], [75.21, 58.93], [79.26, 7.68], [8.23, 39.93], [98.73, 77.17], [89.78, 42.53], [65.19, 92.08], [54.46, 8.48]] p1 = PointPattern(points) p1.mbb # Thus $s_0 = (66.22, 32.54), \ s_{11}=(54.46, 8.48)$. p1.summary() type(p1.points) np.asarray(p1.points) p1.mbb # ### From numpy arrays points = np.asarray(points) points p1_np = PointPattern(points) p1_np.summary() # ### From shapefiles # # This example uses 200 randomly distributed points within the counties of Virginia. Coordinates are for UTM zone 17 N. f = ps.examples.get_path('vautm17n_points.shp') fo = ps.io.open(f) pp_va = PointPattern(np.asarray([pnt for pnt in fo])) fo.close() pp_va.summary() # ## Attributes of PySAL Point Patterns pp_va.summary() pp_va.points pp_va.head() pp_va.tail() # ### Intensity Estimates # The intensity of a point process at point $s_i$ can be defined as: # # $$\lambda(s_j) = \lim \limits_{|\mathbf{A}s_j| \to 0} \left \{ \frac{E(Y(\mathbf{A}s_j)}{|\mathbf{A}s_j|} \right \} $$ # # where $\mathbf{A}s_j$ is a small region surrounding location $s_j$ with area $|\mathbf{A}s_j|$, and $E(Y(\mathbf{A}s_j)$ is the expected number of event points in $\mathbf{A}s_j$. # # The intensity is the mean number of event points per unit of area at point $s_j$. # # # # Recall that one of the implications of CSR is that the intensity of the point process is constant in our study area $\Re$. In other words $\lambda(s_j) = \lambda(s_{j+1}) = \ldots = \lambda(s_n) = \lambda \ \forall s_j \in \Re$. Thus, if the area of $\Re$ = $|\Re|$ the expected number of event points in the study region is: $E(Y(\Re)) = \lambda |\Re|.$ # In PySAL, the intensity is estimated by using a geometric object to encode the study region. We refer to this as the window, $W$. The reason for distinguishing between $\Re$ and $W$ is that the latter permits alternative definitions of the bounding object. # **Intensity estimates are based on the following:** # $$\hat{\lambda} = \frac{n}{|W|}$$ # # where $n$ is the number of points in the *window* $W$, and $|W|$ is the area of $W$. # **Intensity based on minimum bounding box:** # $$\hat{\lambda}_{mbb} = \frac{n}{|W_{mbb}|}$$ # # where $W_{mbb}$ is the minimum bounding box for the point pattern. pp_va.lambda_mbb # **Intensity based on convex hull:** # $$\hat{\lambda}_{hull} = \frac{n}{|W_{hull}|}$$ # # where $W_{hull}$ is the convex hull for the point pattern. pp_va.lambda_hull # ## Next steps # # There is more to learn about point patterns in PySAL. # # The [centrographic notebook](centrography.ipynb) illustrates a number of spatial descriptive statistics and visualization of point patterns. # # Clearly the window chosen will impact the intensity estimate. For more on **windows** see the [window notebook](window.ipynb). # # To test if your point pattern departs from complete spatial randomness see the [distance statistics notebook](distance_statistics.ipynb) and [quadrat statistics notebook](Quadrat_statistics.ipynb). # # # To simulate different types of point processes in various windows see [process notebook](process.ipynb). # # If you have point pattern data with additional attributes associated with each point see how to handle this in the [marks notebook](marks.ipynb). # #
notebooks/pointpattern.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import pandas as pd import sklearn.linear_model data = pd.read_csv("Cars93.csv") Y = np.array(data['Price']) columns = ['MPG.city'] X = np.array(data[columns]) plt.scatter(X, Y) regresion = sklearn.linear_model.LinearRegression() regresion.fit(X, Y) regresion.coef_, regresion.intercept_ # $Y = \beta_0 + \beta_1 * X_1$ y_predict = regresion.predict(X) plt.scatter(X, y_predict) plt.scatter(X, Y) regresion.score(X, Y) def fit_una_variable(nombre): data = pd.read_csv("Cars93.csv") Y = np.array(data['Price']) columns = [nombre] X = np.array(data[columns]) regresion = sklearn.linear_model.LinearRegression() regresion.fit(X, Y) y_predict = regresion.predict(X) score = regresion.score(X, Y) plt.scatter(X, y_predict) plt.scatter(X, Y) plt.title("Variable = {} , Score = {:.2f}".format(nombre, score)) fit_una_variable('MPG.city') fit_una_variable('Horsepower') fit_una_variable('Weight') data = pd.read_csv("Cars93.csv") Y = np.array(data['Price']) columns = ['Horsepower', 'Weight'] X = np.array(data[columns]) regresion = sklearn.linear_model.LinearRegression() regresion.fit(X, Y) y_predict = regresion.predict(X) score = regresion.score(X, Y) score plt.scatter(X[:,0], y_predict) plt.scatter(X[:,0], Y) #plt.title("Variable = {} , Score = {:.2f}".format(nombre, score)) data = pd.read_csv("Cars93.csv") Y = np.array(data['Price']) columns = ['Horsepower', 'Weight', 'MPG.city'] X = np.array(data[columns]) regresion = sklearn.linear_model.LinearRegression() regresion.fit(X, Y) y_predict = regresion.predict(X) score = regresion.score(X, Y) print(score) plt.scatter(X[:,0], y_predict) plt.scatter(X[:,0], Y) #plt.title("Variable = {} , Score = {:.2f}".format(nombre, score)) data = pd.read_csv("Cars93.csv") Y = np.array(data['Price']) columns = ['MPG.city', 'MPG.highway', 'EngineSize', 'Horsepower', 'RPM', 'Rev.per.mile', 'Fuel.tank.capacity', 'Length', 'Width', 'Turn.circle', 'Weight'] X = np.array(data[columns]) regresion = sklearn.linear_model.LinearRegression() regresion.fit(X, Y) y_predict = regresion.predict(X) score = regresion.score(X, Y) print(score) plt.scatter(X[:,2], y_predict) plt.scatter(X[:,2], Y) import sklearn.model_selection X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.3) np.shape(X_train), np.shape(X_test) # + data = pd.read_csv("Cars93.csv") Y = np.array(data['Price']) columns = ['MPG.city', 'MPG.highway', 'EngineSize', 'Horsepower', 'RPM', 'Rev.per.mile', 'Fuel.tank.capacity', 'Length', 'Width', 'Turn.circle', 'Weight'] X = np.array(data[columns]) X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.3) regresion = sklearn.linear_model.LinearRegression() regresion.fit(X_train, Y_train) score = regresion.score(X_test, Y_test) print(score) y_predict = regresion.predict(X) plt.scatter(X[:,2], y_predict) plt.scatter(X[:,2], Y) # - regresion.coef_
IntroDataScience/streaming/02_regresion_lineal/regresion_lineal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/African-Quant/FOREX_RelativeStrengthOscillator/blob/main/Oanda_RelativeStrength_EG.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} cellView="form" id="wUDVru-VWnPy" outputId="d9c837df-72f5-4b31-fd73-fba5895ee0c1" #@title Installation # !pip install git+https://github.com/yhilpisch/tpqoa.git --upgrade --quiet # !pip install pykalman --quiet # !pip install --upgrade mplfinance --quiet # + cellView="form" id="hjeaeByhXE0e" #@title Imports import tpqoa import numpy as np import pandas as pd from pykalman import KalmanFilter # %matplotlib inline from pylab import mpl, plt plt.style.use('seaborn') mpl.rcParams['savefig.dpi'] = 300 mpl.rcParams['font.family'] = 'serif' from datetime import date, timedelta import warnings warnings.filterwarnings("ignore") # + id="8IlXbX4_XdeD" cellView="form" #@title Oanda API path = '/content/drive/MyDrive/Oanda_Algo/pyalgo.cfg' api = tpqoa.tpqoa(path) # + id="G5RU1cM6XmNK" cellView="form" #@title Symbols/Currency Pairs def symbolsList(): symbols = [] syms = api.get_instruments() for x in syms: symbols.append(x[1]) return symbols symbols = symbolsList() pairs = ['AUD_CAD', 'AUD_CHF', 'AUD_JPY', 'AUD_NZD', 'AUD_USD', 'CAD_CHF', 'CAD_JPY', 'CHF_JPY', 'EUR_AUD', 'EUR_CAD', 'EUR_CHF', 'EUR_GBP', 'EUR_JPY', 'EUR_NZD', 'EUR_USD', 'GBP_AUD', 'GBP_CAD', 'GBP_CHF', 'GBP_JPY', 'GBP_NZD', 'GBP_USD', 'NZD_CAD', 'NZD_CHF', 'NZD_JPY', 'NZD_USD', 'USD_CAD', 'USD_CHF', 'USD_JPY',] # + cellView="form" id="mUpY_WkbYZ-n" #@title getData(instr, gran = 'D', td=1000) def getData(instr, gran = 'D', td=1000): start = f"{date.today() - timedelta(td)}" end = f"{date.today() - timedelta(1)}" granularity = gran price = 'M' # price: string one of 'A' (ask), 'B' (bid) or 'M' (middle) data = api.get_history(instr, start, end, granularity, price) data.drop(['complete'], axis=1, inplace=True) data.reset_index(inplace=True) data.rename(columns = {'time':'Date','o':'Open','c': 'Close', 'h':'High', 'l': 'Low'}, inplace = True) data.set_index('Date', inplace=True) return data # + id="cMJzJoCNYbgb" cellView="form" #@title Indexes def USD_Index(): '''Creating a USD Index from a basket of instruments denominated in dollars ''' USD = ['EUR_USD', 'GBP_USD', 'USD_CAD', 'USD_CHF', 'USD_JPY', 'AUD_USD', 'NZD_USD'] df = pd.DataFrame() for i in USD: data = getData(i).ffill(axis='rows') # setting the Dollar as the base if '_USD' == i[-4:]: data[f'{i}'] = (data['Close'])**(-1) else: data[f'{i}'] = data['Close'] df = pd.concat([df, data.loc[:,f'{i}']], axis=1) df['US_index'] = 1 for i in range(len(USD)): df['US_index'] *= df[USD[i]] return ((df['US_index'])**(1/(len(USD)))).to_frame() def EURO_Index(): '''Creating a EUR Index from a basket of instruments denominated in EUROs ''' EUR = ['EUR_USD', 'EUR_GBP', 'EUR_JPY', 'EUR_CHF', 'EUR_CAD', 'EUR_AUD', 'EUR_NZD'] df = pd.DataFrame() for i in EUR: data = getData(i).ffill(axis='rows') data[f'{i}'] = data['Close'] df = pd.concat([df, data.loc[:,f'{i}']], axis=1) df['EUR_index'] = 1 for i in range(len(EUR)): df['EUR_index'] *= df[EUR[i]] return ((df['EUR_index'])**(1/(len(EUR)))).to_frame() def GBP_Index(): '''Creating a GBP Index from a basket of instruments denominated in Pound Sterling ''' GBP = ['GBP_USD', 'EUR_GBP', 'GBP_JPY', 'GBP_CHF', 'GBP_CAD', 'GBP_AUD', 'GBP_NZD'] df = pd.DataFrame() for i in GBP: data = getData(i).ffill(axis='rows') # setting the Dollar as the base if '_GBP' == i[-4:]: data[f'{i}'] = (data['Close'])**(-1) else: data[f'{i}'] = data['Close'] df = pd.concat([df, data.loc[:,f'{i}']], axis=1) df['GBP_index'] = 1 for i in range(len(GBP)): df['GBP_index'] *= df[GBP[i]] return ((df['GBP_index'])**(1/(len(GBP)))).to_frame() def CHF_Index(): '''Creating a CHF Index from a basket of instruments denominated in Swiss Francs ''' CHF = ['CHF_JPY', 'EUR_CHF', 'GBP_CHF', 'USD_CHF', 'CAD_CHF', 'AUD_CHF', 'NZD_CHF'] df = pd.DataFrame() for i in CHF: data = getData(i).ffill(axis='rows') # setting the Dollar as the base if '_CHF' == i[-4:]: data[f'{i}'] = (data['Close'])**(-1) else: data[f'{i}'] = data['Close'] df = pd.concat([df, data.loc[:,f'{i}']], axis=1) df['CHF_index'] = 1 for i in range(len(CHF)): df['CHF_index'] *= df[CHF[i]] return ((df['CHF_index'])**(1/(len(CHF)))).to_frame() def CAD_Index(): '''Creating a CAD Index from a basket of instruments denominated in Canadian Dollars ''' CAD = ['CAD_JPY', 'EUR_CAD', 'GBP_CAD', 'USD_CAD', 'CAD_CHF', 'AUD_CAD', 'NZD_CAD'] df = pd.DataFrame() for i in CAD: data = getData(i).ffill(axis='rows') # setting the Dollar as the base if '_CAD' == i[-4:]: data[f'{i}'] = (data['Close'])**(-1) else: data[f'{i}'] = data['Close'] df = pd.concat([df, data.loc[:,f'{i}']], axis=1) df['CAD_index'] = 1 for i in range(len(CAD)): df['CAD_index'] *= df[CAD[i]] return ((df['CAD_index'])**(1/(len(CAD)))).to_frame() def JPY_Index(): '''Creating a JPY Index from a basket of instruments denominated in Swiss Francs ''' JPY = ['CAD_JPY', 'EUR_JPY', 'GBP_JPY', 'USD_JPY', 'CHF_JPY', 'AUD_JPY', 'NZD_JPY'] df = pd.DataFrame() for i in JPY: data = getData(i).ffill(axis='rows') # setting the Japanese Yen as the base data[f'{i}'] = (data['Close'])**(-1) df = pd.concat([df, data.loc[:,f'{i}']], axis=1) df['JPY_index'] = 1 for i in range(len(JPY)): df['JPY_index'] *= df[JPY[i]] return ((df['JPY_index'])**(1/(len(JPY)))).to_frame() def AUD_Index(): '''Creating a AUD Index from a basket of instruments denominated in Australian Dollar ''' AUD = ['AUD_JPY', 'EUR_AUD', 'GBP_AUD', 'AUD_USD', 'AUD_CAD', 'AUD_CHF', 'AUD_NZD'] df = pd.DataFrame() for i in AUD: data = getData(i).ffill(axis='rows') # setting the Aussie Dollar as the base if '_AUD' == i[-4:]: data[f'{i}'] = (data['Close'])**(-1) else: data[f'{i}'] = data['Close'] df = pd.concat([df, data.loc[:,f'{i}']], axis=1) df['AUD_index'] = 1 for i in range(len(AUD)): df['AUD_index'] *= df[AUD[i]] return ((df['AUD_index'])**(1/(len(AUD)))).to_frame() def NZD_Index(): '''Creating a NZD Index from a basket of instruments denominated in New Zealand Dollar ''' NZD = ['NZD_JPY', 'EUR_NZD', 'GBP_NZD', 'NZD_USD', 'NZD_CAD', 'NZD_CHF', 'AUD_NZD'] df = pd.DataFrame() for i in NZD: data = getData(i).ffill(axis='rows') # setting the Dollar as the base if '_NZD' == i[-4:]: data[f'{i}'] = (data['Close'])**(-1) else: data[f'{i}'] = data['Close'] df = pd.concat([df, data.loc[:,f'{i}']], axis=1) df['NZD_index'] = 1 for i in range(len(NZD)): df['NZD_index'] *= df[ NZD[i]] return ((df['NZD_index'])**(1/(len(NZD)))).to_frame() def eSuperRCS(df): """ This code computes the super smoother introduced by <NAME> """ spr = df.to_frame().copy() # HighPass filter cyclic components whose periods are shorter than 48 bars alpha1 = (np.cos(0.707*2*np.pi/48) + np.sin(0.707*2*np.pi/48) - 1)/np.cos(0.707*2*np.pi/48) hiPass = pd.DataFrame(None, index=spr.index, columns=['filtered']) for i in range(len(spr)): if i < 3: hiPass.iloc[i, 0] = spr.iat[i, 0] else: hiPass.iloc[i, 0] = ((1 - alpha1/2)*(1 - alpha1/2)*(spr.iat[i, 0] - 2*spr.iat[i-1, 0] + spr.iat[i-2, 0] + 2*(1 - alpha1)*hiPass.iat[i-1, 0] - (1 - alpha1)**2 *hiPass.iat[i-2, 0])) # SuperSmoother a1 = np.exp(-1.414*(np.pi) / 10) b1 = 2*a1*np.cos(1.414*(np.pi) / 10) c2 = b1 c3 = -a1*a1 c1 = 1 - c2 - c3 Filt = pd.DataFrame(None, index=spr.index, columns=['filtered']) for i in range(len(spr)): if i < 3: Filt.iloc[i, 0] = hiPass.iat[i, 0] else: Filt.iloc[i, 0] = c1*(hiPass.iat[i, 0] + hiPass.iat[i - 1, 0]/ 2 + c2*Filt.iat[i-1, 0] + c3*Filt.iat[i-2, 0]) Filt['eSuperRCS'] = RSI(Filt['filtered']) return Filt['eSuperRCS'] def RSI(series, period=25): delta = series.diff() up = delta.clip(lower=0) dn = -1*delta.clip(upper=0) ema_up = up.ewm(com=period-1, adjust=False).mean() ewm_dn = dn.ewm(com=period-1, adjust=False).mean() rs = (ema_up/ewm_dn) return 100 - 100 / (1 + rs) def will_pr(data, lb=14): df = data[['High', 'Low', 'Close']].copy() df['max_hi'] = data['High'].rolling(window=lb).max() df['min_lo'] = data['Low'].rolling(window=lb).min() df['will_pr'] = 0 for i in range(len(df)): try: df.iloc[i, 5] = ((df.iat[i, 3] - df.iat[i, 2])/(df.iat[i, 3] - df.iat[i, 4])) * (-100) except ValueError: pass return df['will_pr'] # + id="DqtAFQUEjlEg" eg = getData('EUR_GBP') eur = EURO_Index() gbp = GBP_Index() df = pd.concat((eg, eur, gbp), axis=1).ffill(axis='rows') # + id="qcwUEFKVjuX2" colab={"base_uri": "https://localhost:8080/", "height": 825} outputId="ce4c4427-0f07-4071-f91d-d7576d24bae2" df # + id="hQAPAmYfju_m" tickers = ['EUR_index', 'GBP_index'] # + id="5lE_6W4TkDbz" colab={"base_uri": "https://localhost:8080/", "height": 366} outputId="147884e3-a8d6-489e-8c7c-69bd213306bf" cumm_rtn = (1 + df[tickers].pct_change()).cumprod() cumm_rtn.plot(); plt.ylabel('Cumulative Return'); plt.xlabel('Time'); plt.title('Cummulative Plot of EUR_index & GBP_index'); # + id="0nUAi8hekGcz" import statsmodels.api as sm obs_mat = sm.add_constant(df[tickers[0]].values, prepend=False)[:, np.newaxis] # y is 1-dimensional, (alpha, beta) is 2-dimensional kf = KalmanFilter(n_dim_obs=1, n_dim_state=2, initial_state_mean=np.ones(2), initial_state_covariance=np.ones((2, 2)), transition_matrices=np.eye(2), observation_matrices=obs_mat, observation_covariance=10**2, transition_covariance=0.01**2 * np.eye(2)) # + id="dMyTnry_kS-f" state_means, state_covs = kf.filter(df[tickers[1]]) # + id="siSqwP3IkciC" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="df62f7fa-c5e5-4535-c567-d7762bab0f10" beta_kf = pd.DataFrame({'Slope': state_means[:, 0], 'Intercept': state_means[:, 1]}, index=df.index) spread_kf = df[tickers[0]] - df[tickers[1]] * beta_kf['Slope'] - beta_kf['Intercept'] spread_kf = spread_kf spread_kf.plot(); # + id="CG4UupEVlwfi" colab={"base_uri": "https://localhost:8080/"} outputId="2bd21566-7205-4463-bd6e-e415e8015b10" len(df) # + id="x0AHmIoJkwZ_" df['spread'] = spread_kf df['EUR/GBP'] = df['EUR_index']/df['GBP_index'] df['eSuperRCS'] = eSuperRCS(df['spread']) df = df.iloc[-700:] # + id="mHIwTqDQk6cY" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="8c9a8e60-05ae-4349-b075-243c6e18e677" fig = plt.figure(figsize=(10, 7)) ax1, ax2 = fig.subplots(nrows=2, ncols=1) ax1.plot(df.index, df['Close'],color='cyan' ) ax2.plot(df.index, df['EUR/GBP'].values, color='maroon') ax1.set_title('EUR_GBP') ax2.set_title('EUR/GBP') plt.show() # + id="9IJteDBFlUvX" def viewPlot(data, win = 150): fig = plt.figure(figsize=(17, 10)) ax1, ax2 = fig.subplots(nrows=2, ncols=1) df1 = data.iloc[-win:, ] # High and Low prices are plotted for i in range(len(df1)): ax1.vlines(x = df1.index[i], ymin = df1.iat[i, 2], ymax = df1.iat[i, 1], color = 'magenta', linewidth = 2) ax2.plot(df1.index, df1['eSuperRCS'].values, color='maroon') ax2.axhline(55, color='green') ax2.axhline(45, color='green') ax2.axhline(50, color='orange') ax1.set_title('EUR_GBP') ax2.set_title('spread oscillator') return plt.show() # + id="fVK-Kou8m81i" colab={"base_uri": "https://localhost:8080/", "height": 606} outputId="d0e1a728-4d6d-4996-bb7c-ca8f5befb16b" viewPlot(df, win = 150) # + id="2hkoPilbtgP3"
Oanda_RelativeStrength_EG.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tensorflow_p36] # language: python # name: conda-env-tensorflow_p36-py # --- from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # + from __future__ import print_function from util_functions import process_files import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras import backend as K import numpy as np from numpy import linalg as LA # + batch_size = 32 num_classes = 2 epochs = 30 # load the data x_train, y_train, f_train = process_files(dataset='training', features=['mfccs', 'mfcc_deltas'], shape='stacked') x_test, y_test, f_test = process_files(dataset='test', features=['mfccs', 'mfcc_deltas'], shape='stacked') #normalise x_train = x_train - x_train.mean() x_test = x_test - x_test.mean() x_train = x_train/LA.norm(x_train) x_test = x_test/LA.norm(x_test) # input image dimensions input_d = x_train.shape[1] #Depth input_h = x_train.shape[2] #Height input_w = x_train.shape[3] #Width #Reshaping to feed to network x_train = x_train.reshape(x_train.shape[0], input_h*input_w*input_d) x_test = x_test.reshape(x_test.shape[0], input_h*input_w*input_d) y_train = y_train.reshape(x_train.shape[0], 1) y_test = y_test.reshape(x_test.shape[0], 1) input_dim = input_h*input_w*input_d #Making them floats for TF x_train = x_train.astype('float32') x_test = x_test.astype('float32') # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes=num_classes) y_test = keras.utils.to_categorical(y_test, num_classes=num_classes) # iterate over different hyperparameters to finetune them learning_rates = [0.1, 0.01, 0.001, 0.0001] dropout_rates = [0, 0.1, 0.2, 0.3, 0.4, 0.5] for lr in learning_rates: for dr in dropout_rates: print('learning rate: ' + str(lr) + ', dropout: ' + str(dr) + '\n') # give unique name to the model model_name = 'lr_' + str(lr).replace('.', '_') + '_dr_' + str(dr).replace('.', '_') model = Sequential() model.add(Dense(64, activation='relu', input_shape=(input_dim,))) model.add(Dropout(dr)) model.add(Dense(128, activation='relu')) model.add(Dropout(dr)) model.add(Dense(256, activation='relu')) model.add(Dropout(dr)) model.add(Dense(2, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Nadam(lr=lr), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) print('----------------------------------------------------\n') # -
keras_model_mfcc/1. Initial classifiers for 30 epochs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.0 64-bit (''tcc'': virtualenv)' # name: python3 # --- # + import sys import numpy as np import pandas as pd import datetime as dt import seaborn as sns import matplotlib.pyplot as plt sys.path.append('../../src') from paths import FXD_DB_DIR, OFFSTD_DB_DIR from utils.functions import clean_folder, load_datasets_from_csv from utils.formatter import remove_offset from plot import show_serie, show_hist # %load_ext autoreload # %autoreload 2 # - clean_folder(OFFSTD_DB_DIR, "*.csv") datasets = load_datasets_from_csv(FXD_DB_DIR).copy() air_old = datasets['air']['Pwr'].copy() air = datasets['air'] air_pwr = air['Pwr'] tr = (air_pwr > 20) & (air_pwr <=90) tr_air = air_pwr > 90 u_tr = air_pwr[tr].mean() # ### Where only TR signal is present, set to 0 by subtract itself air_pwr[tr] -= air_pwr[tr] # ### Where TR signal is mixed with Air signal, remove the mean value of TR signal air_pwr[tr_air] -= u_tr # ## Comparation fig, ax = plt.subplots(figsize=(10,4), sharex=True) ax.plot(air_old[:1440]) ax.plot(air_pwr[:1440], c='g', ls = '--') tr_signal = (air_old - air_pwr) datasets['air']['TR'] = tr_signal # ## Fridge fridge_old = datasets['fridge'].copy() fridge = datasets['fridge'] fg_pwr = fridge['Pwr'] lp = (fg_pwr > 5) & (fg_pwr < 30) u_lp = fg_pwr[lp].mean() fg_pwr[lp] -= fg_pwr[lp] fig, ax = plt.subplots(figsize=(18,4), sharex=True) plt.plot(fridge_old['Pwr'][:1440]) plt.plot(fg_pwr[:1440], c='g', ls = '--') # ## Fan fan_old = datasets['fan'].copy() fan = datasets['fan'] fan_pwr = fan['Pwr'] lp_f = (fan_pwr > 3) & (fan_pwr < 20) u_lpf = fan_pwr[lp_f].mean() fan_pwr[lp_f] -= fan_pwr[lp_f] fig, ax = plt.subplots(figsize=(18,4), sharex=True) plt.plot(fan_old['Pwr'][:5440]) plt.plot(fan_pwr[:5440], c='g', ls = '--') pc_old = datasets['computer'].copy() pc = datasets['computer'] pc_pwr = pc['Pwr'] rasp = (pc_pwr > 2) & (pc_pwr < 15) u_rasp = pc_pwr[rasp].mean() pc_pwr[rasp] -= pc_pwr[rasp] fig, ax = plt.subplots(figsize=(18,4), sharex=True) plt.plot(pc_old['Pwr'][:5440]) plt.plot(pc_pwr[:5440], c='g', ls = '--') for key in datasets: print(key) datasets[key].name = str(key) datasets[key].to_csv(OFFSTD_DB_DIR+"/{}.csv".format(key), float_format='%.4f')
data_processing/TREATER/2.offset_remover.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ml-project # language: python # name: ml-project # --- import sys import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import TensorDataset, DataLoader import torch.optim as optim from sklearn.model_selection import train_test_split from torch.autograd import Variable from tqdm import trange sys.path.append('/home/raffaele/Documents/ml-project/src/') # ## Import Dataset data = np.genfromtxt('/home/raffaele/Documents/ml-project/cup/ML-CUP20-TR.csv', delimiter=',', dtype=np.float32) X = data[:, 1:-2] y = data[:, -2:] print(X.shape) print(y.shape) # ### Split train set and Validation Set Xtrain, Xval, ytrain, yval = train_test_split(X, y, test_size=0.10, random_state=42) print(Xtrain.shape) print(ytrain.shape) print(Xval.shape) print(yval.shape) BATCH_SIZE = len(Xtrain) train_dataset = TensorDataset(torch.Tensor(Xtrain), torch.Tensor(ytrain)) train_loader = DataLoader(train_dataset, batch_size = BATCH_SIZE, shuffle=True) # train_loader = DataLoader(train_dataset, shuffle=True) # ## Define Models class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.input_layer = nn.Linear(10,100) self.hidden1 = nn.Linear(100,50) self.output = nn.Linear(50,2) def forward(self, x): x = torch.relu(self.input_layer(x)) x = torch.relu(self.hidden1(x)) x = self.output(x) return x net = Net() print(net) # + def train(net, optimizer, epochs=100, val_split=None): loss_list = [] acc_list = [] val_loss_list = [] val_acc_list = [] history = {"loss" : loss_list, "acc" : acc_list, "val_loss": val_loss_list, "val_acc" : val_acc_list} # optimizer = optim.SGD(net.parameters(),lr = 0.01,momentum = 0.) # criterion = nn.MSELoss() if (len(val_split) == 2): test_dataset = TensorDataset(torch.Tensor(val_split[0]), torch.Tensor(val_split[1])) test_loader = DataLoader(test_dataset, batch_size = BATCH_SIZE, shuffle=True) # test_loader = DataLoader(test_dataset, shuffle=True) for epoch in (t := trange(epochs)): for inputs, targets in train_loader: optimizer.zero_grad() out = net(inputs) loss = MEE(out, targets) # loss = nn.MSELoss(out, targets) loss.backward() optimizer.step() acc,_ = evaluate(net, train_loader, verbose=False) val_acc, val_loss = evaluate(net, test_loader, verbose=False, criterion=True) val_loss_list.append(val_loss) loss_list.append(loss) acc_list.append(acc) val_acc_list.append(val_acc) t.set_description('epoch %d/%d loss=%.5f acc=%.2f val_loss=%.5f val_acc=%.2f' %(epoch+1, epochs, loss.item(), acc, val_loss, val_acc)) return history # - def evaluate(net, test_loader, verbose=True, criterion=False): correct = 0 total = 0 loss = 0 with torch.no_grad(): for data in test_loader: X,y = data output = net(X) if (criterion): loss = MEE(output, y) # loss = nn.MSELoss(out, targets) for idx, i in enumerate(output): # pred = torch.round(torch.max(i)) pred = output[idx] # print(pred) # print(y[idx]) # print(pred) # print(y[idx]) if ((pred == y[idx]).all()): correct+=1 total+=1 if verbose: print("Accuracy: ", round(correct/total, 2)) # print(correct) return round(correct/total, 2), loss # ### Initialize the weights # + def init_weights(m): if type(m) == nn.Linear: torch.nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0.0) net.apply(init_weights) # - def MEE(y_real, y_pred): # return torch.mean(torch.cdist(y_real, y_pred, p=2)) # return torch.div(torch.sum(F.pairwise_distance(y_real, y_pred, p=2)), len(y_real)) return torch.mean(torch.linalg.norm(y_real - y_pred, axis=1)) # return torch.mean(torch.sqrt(torch.square((y_real - y_pred)))) # return torch.div(torch.linalg.norm(y_pred - y_real), len(y_real)) optimizer = optim.SGD(net.parameters(),lr = 0.006, momentum = 0.8, weight_decay=0.0001) history = train(net, epochs=1000, optimizer=optimizer, val_split=(Xval, yval)) import sys sys.path.append('/home/raffaele/Documents/ml-project/src/') from torch_utility import * plot_loss(history) data = [ history['loss'][-1], history['acc'][-1], history['val_loss'][-1], history['val_acc'][-1], ] table_info(data) to_predict = torch.tensor(list(Xval), dtype=torch.float, requires_grad=False) out = net(to_predict) out = out.detach().numpy() x = out[:,0] y = out[:,1] plt.scatter(x,y) x_real = yval[:,0] y_real = yval[:,1] plt.scatter(x_real, y_real) # + from sklearn.metrics import euclidean_distances def mean_euclidean_error(y_true, y_pred): assert y_true.shape == y_pred.shape # return np.mean(np.linalg.norm(y_pred - y_true)) # return np.divide(np.linalg.norm(y - y_real), len(y_real)) # return np.mean(euclidean_distances(y_true, y_pred)) return np.mean(np.linalg.norm(y_true - y_pred, axis=1)) #utilizzare questa loss la prossima grid # - mean_euclidean_error(out, yval) # + from sklearn.metrics import euclidean_distances def mean_euclidean_error(y_true, y_pred): assert y_true.shape == y_pred.shape # return np.mean(np.linalg.norm(y_pred - y_true)) # return np.divide(np.linalg.norm(y - y_real), len(y_real)) # return np.mean(euclidean_distances(y_true, y_pred)) return np.mean(np.linalg.norm(y_true - y_pred, axis=1)) #utilizzare questa loss la prossima grid # - class MEE(torch.nn.Module): def __init__(self): super(MEE, self).__init__() def forward(self, y_true, y_pred): # return torch.mean(torch.linalg.norm(y_pred - y_true)) # return torch.mean(torch.cdist(y_true, y_pred, p=2)) return torch.div(torch.sum(torch.pairwise_distance(y_true, y_pred)), len(y_true)) # return torch.div(torch.linalg.norm(y_pred - y_true, ord=None), len(y_true)) # return torch.div(torch.linalg.norm(y_pred - y_true), len(y_true)) class Net(nn.Module): def __init__(self, num_units): super(Net, self).__init__() self.input_layer = nn.Linear(10,num_units) self.output = nn.Linear(num_units,2) def forward(self, x): x = torch.sigmoid(self.input_layer(x)) x = self.output(x) return x from skorch import NeuralNetRegressor from skorch.callbacks import EarlyStopping test_net = Net(100,) nett = NeuralNetRegressor(test_net, max_epochs=1000, lr=0.01, batch_size=64, optimizer=optim.SGD, optimizer__momentum=0.8, optimizer__weight_decay=0.0001, optimizer__nesterov = True, criterion=MEE, callbacks=[EarlyStopping(patience=100)] ) # Training nett.fit(Xtrain, ytrain) # + train_loss = nett.history[:, 'train_loss'] valid_loss = nett.history[:, 'valid_loss'] plt.plot(train_loss, '-', label='training') plt.plot(valid_loss, '--', label='validation') plt.ylim(2,4) plt.xlim(50,1000) plt.legend() plt.show() # - prova = nett.predict(Xval) x1 = prova[:,0] y1 = prova[:,1] plt.scatter(x1, y1) x_real = yval[:,0] y_real = yval[:,1] plt.scatter(x_real, y_real) mean_euclidean_error(prova, yval)
src/pytorch/Cup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import molsysmt as msm # # Get sequence alignment molecular_system_1 = msm.convert('pdb_id:181l', to_form='molsysmt.MolSys') molecular_system_2 = msm.convert('pdb_id:1l17', to_form='molsysmt.MolSys') msm.get(molecular_system_1, selection='molecule_type=="protein"', n_groups=True) msm.get(molecular_system_2, selection='molecule_type=="protein"', n_groups=True) # + #msm.topology.get_sequence_alignment(molecular_system_1, selection='molecule_type=="protein"', # reference_molecular_system=molecular_system_2, reference_selection='molecule_type=="protein"', # prettyprint=True) # + #seq, seq_ref = msm.topology.get_sequence_alignment(molecular_system_1, selection='molecule_type=="protein"', # reference_molecular_system=molecular_system_2, reference_selection='molecule_type=="protein"') # + #seq # + #seq_ref
docs/contents/user/topology/get_sequence_alignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Example with magics # ========== # The usual matplotlib inline as an example before, between and after a python commands # %matplotlib inline import matplotlib as mpl # %matplotlib inline a = 10 # %matplotlib inline # A single magic # %cd # Something with %test inside a markdown text print('Something %magic like inside a python code') print('%magic okay')
tests/sphinx_supp_py2/raw_examples/example_magics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/VuBrian22/Torrent-to-Google-Drive/blob/main/Torrent2GoogleDrive.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="rSKVePS7Foif" # # **Torrent to Google Drive (Updated with New Features) - <NAME>** # **Credit: r12habh (https://github.com/r12habh)** # + [markdown] id="t3NGw1GuyNhe" # ## **Optional: Run an Internet Speed Test (Hosted by Speedtest by Ookla)** # + id="dGMZnQ4swLKk" pip install speedtest-cli && speedtest-cli # + [markdown] id="avwfXH0q96Cz" # # **STEP 1 : MOUNT YOUR GOOGLE DRIVE WHERE YOU WANT TO STORE THE DATA** # + cellView="form" id="U2avD1zRIlWA" outputId="52af49be-4a69-4e40-aafd-54cdb689924b" colab={"base_uri": "https://localhost:8080/", "height": 34} #@markdown <br><center><img src='https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/Google_Drive_logo.png/600px-Google_Drive_logo.png' height="50" alt="Gdrive logo"/></center> #@markdown <center><h3>Mount Gdrive to /content/drive</h3></center><br> MODE = "MOUNT" #@param ["MOUNT", "UNMOUNT"] #Mount your Gdrive! from google.colab import drive drive.mount._DEBUG = False if MODE == "MOUNT": drive.mount('/content/drive', force_remount=True) elif MODE == "UNMOUNT": try: drive.flush_and_unmount() except ValueError: pass get_ipython().system_raw("rm -rf /root/.config/Google/DriveFS") # + [markdown] id="4bAO4BwL-LNG" # # **STEP 2 : INSTALL THE PYTHON LIBRARIES** # + id="m6hF0emftx4h" cellView="form" #@title #@markdown <h3>⬅️ Click Here to START server</h3> # !apt install python3-libtorrent import libtorrent as lt ses = lt.session() ses.listen_on(6881, 6891) downloads = [] from IPython.display import HTML, clear_output clear_output() print("Server Started Successfully") # + [markdown] id="S7H4e7_9Ya5w" # # *Following steps for Google Drive Upload (Personal Drive)* # <br><center><img src='https://drive.google.com/uc?id=1iqeDl-Jdv6FyeswW4Jt7bDN-e6VdHMb2' height="100" /></center> # # + [markdown] id="x9FayJLM-cMc" # # **STEP 3 : UPLOAD .TORRENT FILE OR PASTE MAGNET LINK [for GDrive]** # **This supports simultaneous torrent uploads :D** # + id="0et2A6N3udA0" cellView="form" outputId="3de798b3-a843-4e0f-ae83-b2f2c8b890b1" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "Ly8gQ29weXJpZ2h0IDIwMTcgR29vZ2xlIExMQwovLwovLyBMaWNlbnNlZCB1bmRlciB0aGUgQXBhY2hlIExpY2Vuc2UsIFZlcnNpb24gMi4wICh0aGUgIkxpY2Vuc2UiKTsKLy8geW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoIHRoZSBMaWNlbnNlLgovLyBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKLy8KLy8gICAgICBodHRwOi8vd3d3LmFwYWNoZS5vcmcvbGljZW5zZXMvTElDRU5TRS0yLjAKLy8KLy8gVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQovLyBkaXN0cmlidXRlZCB1bmRlciB0aGUgTGljZW5zZSBpcyBkaXN0cmlidXRlZCBvbiBhbiAiQVMgSVMiIEJBU0lTLAovLyBXSVRIT1VUIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4KLy8gU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAovLyBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS4KCi8qKgogKiBAZmlsZW92ZXJ2aWV3IEhlbHBlcnMgZm9yIGdvb2dsZS5jb2xhYiBQeXRob24gbW9kdWxlLgogKi8KKGZ1bmN0aW9uKHNjb3BlKSB7CmZ1bmN0aW9uIHNwYW4odGV4dCwgc3R5bGVBdHRyaWJ1dGVzID0ge30pIHsKICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnc3BhbicpOwogIGVsZW1lbnQudGV4dENvbnRlbnQgPSB0ZXh0OwogIGZvciAoY29uc3Qga2V5IG9mIE9iamVjdC5rZXlzKHN0eWxlQXR0cmlidXRlcykpIHsKICAgIGVsZW1lbnQuc3R5bGVba2V5XSA9IHN0eWxlQXR0cmlidXRlc1trZXldOwogIH0KICByZXR1cm4gZWxlbWVudDsKfQoKLy8gTWF4IG51bWJlciBvZiBieXRlcyB3aGljaCB3aWxsIGJlIHVwbG9hZGVkIGF0IGEgdGltZS4KY29uc3QgTUFYX1BBWUxPQURfU0laRSA9IDEwMCAqIDEwMjQ7CgpmdW5jdGlvbiBfdXBsb2FkRmlsZXMoaW5wdXRJZCwgb3V0cHV0SWQpIHsKICBjb25zdCBzdGVwcyA9IHVwbG9hZEZpbGVzU3RlcChpbnB1dElkLCBvdXRwdXRJZCk7CiAgY29uc3Qgb3V0cHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKG91dHB1dElkKTsKICAvLyBDYWNoZSBzdGVwcyBvbiB0aGUgb3V0cHV0RWxlbWVudCB0byBtYWtlIGl0IGF2YWlsYWJsZSBmb3IgdGhlIG5leHQgY2FsbAogIC8vIHRvIHVwbG9hZEZpbGVzQ29udGludWUgZnJvbSBQeXRob24uCiAgb3V0cHV0RWxlbWVudC5zdGVwcyA9IHN0ZXBzOwoKICByZXR1cm4gX3VwbG9hZEZpbGVzQ29udGludWUob3V0cHV0SWQpOwp9CgovLyBUaGlzIGlzIHJvdWdobHkgYW4gYXN5bmMgZ2VuZXJhdG9yIChub3Qgc3VwcG9ydGVkIGluIHRoZSBicm93c2VyIHlldCksCi8vIHdoZXJlIHRoZXJlIGFyZSBtdWx0aXBsZSBhc3luY2hyb25vdXMgc3RlcHMgYW5kIHRoZSBQeXRob24gc2lkZSBpcyBnb2luZwovLyB0byBwb2xsIGZvciBjb21wbGV0aW9uIG9mIGVhY2ggc3RlcC4KLy8gVGhpcyB1c2VzIGEgUHJvbWlzZSB0byBibG9jayB0aGUgcHl0aG9uIHNpZGUgb24gY29tcGxldGlvbiBvZiBlYWNoIHN0ZXAsCi8vIHRoZW4gcGFzc2VzIHRoZSByZXN1bHQgb2YgdGhlIHByZXZpb3VzIHN0ZXAgYXMgdGhlIGlucHV0IHRvIHRoZSBuZXh0IHN0ZXAuCmZ1bmN0aW9uIF91cGxvYWRGaWxlc0NvbnRpbnVlKG91dHB1dElkKSB7CiAgY29uc3Qgb3V0cHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKG91dHB1dElkKTsKICBjb25zdCBzdGVwcyA9IG91dHB1dEVsZW1lbnQuc3RlcHM7CgogIGNvbnN0IG5leHQgPSBzdGVwcy5uZXh0KG91dHB1dEVsZW1lbnQubGFzdFByb21pc2VWYWx1ZSk7CiAgcmV0dXJuIFByb21pc2UucmVzb2x2ZShuZXh0LnZhbHVlLnByb21pc2UpLnRoZW4oKHZhbHVlKSA9PiB7CiAgICAvLyBDYWNoZSB0aGUgbGFzdCBwcm9taXNlIHZhbHVlIHRvIG1ha2UgaXQgYXZhaWxhYmxlIHRvIHRoZSBuZXh0CiAgICAvLyBzdGVwIG9mIHRoZSBnZW5lcmF0b3IuCiAgICBvdXRwdXRFbGVtZW50Lmxhc3RQcm9taXNlVmFsdWUgPSB2YWx1ZTsKICAgIHJldHVybiBuZXh0LnZhbHVlLnJlc3BvbnNlOwogIH0pOwp9CgovKioKICogR2VuZXJhdG9yIGZ1bmN0aW9uIHdoaWNoIGlzIGNhbGxlZCBiZXR3ZWVuIGVhY2ggYXN5bmMgc3RlcCBvZiB0aGUgdXBsb2FkCiAqIHByb2Nlc3MuCiAqIEBwYXJhbSB7c3RyaW5nfSBpbnB1dElkIEVsZW1lbnQgSUQgb2YgdGhlIGlucHV0IGZpbGUgcGlja2VyIGVsZW1lbnQuCiAqIEBwYXJhbSB7c3RyaW5nfSBvdXRwdXRJZCBFbGVtZW50IElEIG9mIHRoZSBvdXRwdXQgZGlzcGxheS4KICogQHJldHVybiB7IUl0ZXJhYmxlPCFPYmplY3Q+fSBJdGVyYWJsZSBvZiBuZXh0IHN0ZXBzLgogKi8KZnVuY3Rpb24qIHVwbG9hZEZpbGVzU3RlcChpbnB1dElkLCBvdXRwdXRJZCkgewogIGNvbnN0IGlucHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKGlucHV0SWQpOwogIGlucHV0RWxlbWVudC5kaXNhYmxlZCA9IGZhbHNlOwoKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIG91dHB1dEVsZW1lbnQuaW5uZXJIVE1MID0gJyc7CgogIGNvbnN0IHBpY2tlZFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgaW5wdXRFbGVtZW50LmFkZEV2ZW50TGlzdGVuZXIoJ2NoYW5nZScsIChlKSA9PiB7CiAgICAgIHJlc29sdmUoZS50YXJnZXQuZmlsZXMpOwogICAgfSk7CiAgfSk7CgogIGNvbnN0IGNhbmNlbCA9IGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoJ2J1dHRvbicpOwogIGlucHV0RWxlbWVudC5wYXJlbnRFbGVtZW50LmFwcGVuZENoaWxkKGNhbmNlbCk7CiAgY2FuY2VsLnRleHRDb250ZW50ID0gJ0NhbmNlbCB1cGxvYWQnOwogIGNvbnN0IGNhbmNlbFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgY2FuY2VsLm9uY2xpY2sgPSAoKSA9PiB7CiAgICAgIHJlc29sdmUobnVsbCk7CiAgICB9OwogIH0pOwoKICAvLyBXYWl0IGZvciB0aGUgdXNlciB0byBwaWNrIHRoZSBmaWxlcy4KICBjb25zdCBmaWxlcyA9IHlpZWxkIHsKICAgIHByb21pc2U6IFByb21pc2UucmFjZShbcGlja2VkUHJvbWlzZSwgY2FuY2VsUHJvbWlzZV0pLAogICAgcmVzcG9uc2U6IHsKICAgICAgYWN0aW9uOiAnc3RhcnRpbmcnLAogICAgfQogIH07CgogIGNhbmNlbC5yZW1vdmUoKTsKCiAgLy8gRGlzYWJsZSB0aGUgaW5wdXQgZWxlbWVudCBzaW5jZSBmdXJ0aGVyIHBpY2tzIGFyZSBub3QgYWxsb3dlZC4KICBpbnB1dEVsZW1lbnQuZGlzYWJsZWQgPSB0cnVlOwoKICBpZiAoIWZpbGVzKSB7CiAgICByZXR1cm4gewogICAgICByZXNwb25zZTogewogICAgICAgIGFjdGlvbjogJ2NvbXBsZXRlJywKICAgICAgfQogICAgfTsKICB9CgogIGZvciAoY29uc3QgZmlsZSBvZiBmaWxlcykgewogICAgY29uc3QgbGkgPSBkb2N1bWVudC5jcmVhdGVFbGVtZW50KCdsaScpOwogICAgbGkuYXBwZW5kKHNwYW4oZmlsZS5uYW1lLCB7Zm9udFdlaWdodDogJ2JvbGQnfSkpOwogICAgbGkuYXBwZW5kKHNwYW4oCiAgICAgICAgYCgke2ZpbGUudHlwZSB8fCAnbi9hJ30pIC0gJHtmaWxlLnNpemV9IGJ5dGVzLCBgICsKICAgICAgICBgbGFzdCBtb2RpZmllZDogJHsKICAgICAgICAgICAgZmlsZS5sYXN0TW9kaWZpZWREYXRlID8gZmlsZS5sYXN0TW9kaWZpZWREYXRlLnRvTG9jYWxlRGF0ZVN0cmluZygpIDoKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgJ24vYSd9IC0gYCkpOwogICAgY29uc3QgcGVyY2VudCA9IHNwYW4oJzAlIGRvbmUnKTsKICAgIGxpLmFwcGVuZENoaWxkKHBlcmNlbnQpOwoKICAgIG91dHB1dEVsZW1lbnQuYXBwZW5kQ2hpbGQobGkpOwoKICAgIGNvbnN0IGZpbGVEYXRhUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICAgIGNvbnN0IHJlYWRlciA9IG5ldyBGaWxlUmVhZGVyKCk7CiAgICAgIHJlYWRlci5vbmxvYWQgPSAoZSkgPT4gewogICAgICAgIHJlc29sdmUoZS50YXJnZXQucmVzdWx0KTsKICAgICAgfTsKICAgICAgcmVhZGVyLnJlYWRBc0FycmF5QnVmZmVyKGZpbGUpOwogICAgfSk7CiAgICAvLyBXYWl0IGZvciB0aGUgZGF0YSB0byBiZSByZWFkeS4KICAgIGxldCBmaWxlRGF0YSA9IHlpZWxkIHsKICAgICAgcHJvbWlzZTogZmlsZURhdGFQcm9taXNlLAogICAgICByZXNwb25zZTogewogICAgICAgIGFjdGlvbjogJ2NvbnRpbnVlJywKICAgICAgfQogICAgfTsKCiAgICAvLyBVc2UgYSBjaHVua2VkIHNlbmRpbmcgdG8gYXZvaWQgbWVzc2FnZSBzaXplIGxpbWl0cy4gU2VlIGIvNjIxMTU2NjAuCiAgICBsZXQgcG9zaXRpb24gPSAwOwogICAgd2hpbGUgKHBvc2l0aW9uIDwgZmlsZURhdGEuYnl0ZUxlbmd0aCkgewogICAgICBjb25zdCBsZW5ndGggPSBNYXRoLm1pbihmaWxlRGF0YS5ieXRlTGVuZ3RoIC0gcG9zaXRpb24sIE1BWF9QQVlMT0FEX1NJWkUpOwogICAgICBjb25zdCBjaHVuayA9IG5ldyBVaW50OEFycmF5KGZpbGVEYXRhLCBwb3NpdGlvbiwgbGVuZ3RoKTsKICAgICAgcG9zaXRpb24gKz0gbGVuZ3RoOwoKICAgICAgY29uc3QgYmFzZTY0ID0gYnRvYShTdHJpbmcuZnJvbUNoYXJDb2RlLmFwcGx5KG51bGwsIGNodW5rKSk7CiAgICAgIHlpZWxkIHsKICAgICAgICByZXNwb25zZTogewogICAgICAgICAgYWN0aW9uOiAnYXBwZW5kJywKICAgICAgICAgIGZpbGU6IGZpbGUubmFtZSwKICAgICAgICAgIGRhdGE6IGJhc2U2NCwKICAgICAgICB9LAogICAgICB9OwogICAgICBwZXJjZW50LnRleHRDb250ZW50ID0KICAgICAgICAgIGAke01hdGgucm91bmQoKHBvc2l0aW9uIC8gZmlsZURhdGEuYnl0ZUxlbmd0aCkgKiAxMDApfSUgZG9uZWA7CiAgICB9CiAgfQoKICAvLyBBbGwgZG9uZS4KICB5aWVsZCB7CiAgICByZXNwb25zZTogewogICAgICBhY3Rpb246ICdjb21wbGV0ZScsCiAgICB9CiAgfTsKfQoKc2NvcGUuZ29vZ2xlID0gc2NvcGUuZ29vZ2xlIHx8IHt9OwpzY29wZS5nb29nbGUuY29sYWIgPSBzY29wZS5nb29nbGUuY29sYWIgfHwge307CnNjb3BlLmdvb2dsZS5jb2xhYi5fZmlsZXMgPSB7CiAgX3VwbG9hZEZpbGVzLAogIF91cGxvYWRGaWxlc0NvbnRpbnVlLAp9Owp9KShzZWxmKTsK", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 72} #@title #@markdown <h3>⬅️ Click Here to </h3> #@markdown <h3>Upload torrent file</h3> #@markdown <h5>You can run this cell to add more files as many times as you want</h5> #@markdown <h5>No parallel downloading using .torrent files, use magnet for that</h5><br> from google.colab import files source = files.upload() params = { "save_path": "/content/drive/My Drive/Torrent", "ti": lt.torrent_info(list(source.keys())[0]), } downloads.append(ses.add_torrent(params)) # + id="Cwi1GMlxy3te" cellView="form" outputId="17b6c7d1-ced0-4671-9a7d-ca57d7147591" colab={"base_uri": "https://localhost:8080/", "height": 71} #@title #@markdown <h3>⬅️ Click Here to </h3> #@markdown <h3>Add From Magnet Link</h3> #@markdown <h5>You can run this cell to add more files as many times as you want</h5><br> params = {"save_path": "/content/drive/My Drive/Torrent"} while True: magnet_link = input("Enter Magnet Link Or Type Exit: ") if magnet_link.lower() == "exit": break downloads.append( lt.add_magnet_uri(ses, magnet_link, params) ) # + [markdown] id="BrkGxzX9bfy2" # # *Following steps for Team Drive Upload (Shared Drive)* # <br><center><img src='https://drive.google.com/uc?id=1AC4hpO-pE2FyHzTQBrwq27kGbZDOeVuO' height="100" /></center> # # + [markdown] id="7S10BUccJnQz" # # **STEP 3 : UPLOAD .TORRENT FILE OR PASTE MAGNET LINK [for Team Drive]** # + cellView="both" id="qp5Va6shTTVX" #@title #@markdown <h3>⬅️ Click Here to </h3> #@markdown <h3>Upload torrent file</h3> #@markdown <h5>You can run this cell to add more files as many times as you want</h5> #@markdown <h5>Doesn't support parallel downloding, use magnet method for that</h5><br> #@markdown <h3>Enter the path of TD as shown in the video :</h3> path = "/content/drive/Shared drives/Google_Drive/Torrent" #@param {type:"string"} check=[] from google.colab import files source = files.upload() params = { "save_path": path, "ti": lt.torrent_info(list(source.keys())[0]), } downloads.append(ses.add_torrent(params)) clear_output() print("Started Successfully") # + cellView="both" id="DmbhGmxLTXmo" #@title #@markdown <h3>⬅️ Click Here to </h3> #@markdown <h3>Add From Magnet Link</h3> #@markdown <h5>You can run this cell to add more files as many times as you want</h5><br> #@markdown <h3>Enter the path of TD as shown in the video :</h3> path = "/content/drive/Shared drives/Google_Drive/Torrent" #@param {type:"string"} check=[] params = {"save_path": path} while True: magnet_link = input("Enter Magnet Link Or Type Exit: ") if magnet_link.lower() == "exit": break downloads.append( lt.add_magnet_uri(ses, magnet_link, params) ) clear_output() print("Started Successfully") # + [markdown] id="hjImxBzAISUX" # # **STEP 4 : START CLOUD UPLOAD** # + id="DBNoYYoSuDBT" cellView="form" #@title #@markdown <h3>⬅️ Click Here to Start Download</h3> import time from IPython.display import display import ipywidgets as widgets state_str = [ "queued", "checking", "downloading metadata", "downloading", "finished", "seeding", "allocating", "checking fastresume", ] layout = widgets.Layout(width="auto") style = {"description_width": "initial"} download_bars = [ widgets.FloatSlider( step=0.01, disabled=True, layout=layout, style=style ) for _ in downloads ] display(*download_bars) while downloads: next_shift = 0 for index, download in enumerate(downloads[:]): bar = download_bars[index + next_shift] if not download.is_seed(): s = download.status() bar.description = " ".join( [ download.name(), str(s.download_rate / 1000), "kB/s", state_str[s.state], ] ) bar.value = s.progress * 100 else: next_shift -= 1 ses.remove_torrent(download) downloads.remove(download) bar.close() # Seems to be not working in Colab (see https://github.com/googlecolab/colabtools/issues/726#issue-486731758) download_bars.remove(bar) print(download.name(), "complete") time.sleep(1)
Torrent2GoogleDrive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="BqkGR-LB_-Bf" import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.set_theme(style="darkgrid") import scipy as sp from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from datetime import datetime # - exec(open("../../header.py").read()) # + id="vXpomyt6FMz2" # read in data and replace nans data = pd.read_csv(raw_root('risk_factors_cervical_cancer.csv')).\ replace('?', np.nan) # + id="V73VBEsDITTj" # impute, default is mean my_imputer = SimpleImputer() data_final = pd.DataFrame(my_imputer.fit_transform(data)) data_final.columns= data.columns # + id="cWk2pkalG5Rm" # all the target variables, i only use Biopsy targets = ['Hinselmann', 'Schiller','Citology','Biopsy'] # + id="tux7W3G4GlBM" # split data X = data_final.loc[:, ~data_final.columns.isin(targets)] y = data_final['Biopsy'] # + colab={"base_uri": "https://localhost:8080/"} id="xB4klszcFiEB" outputId="2985105a-80fe-44cd-9235-a7c09a7d7b0d" # fit RF rf = RandomForestClassifier(n_estimators = 500) rf.fit(X, y) # - X.head() print(f"Out of {len(y)} y-values, there are {np.sum(y == 1)} 1s and {np.sum(y == 0)} 0s") # + id="bMxOrVBnjXCv" def uniform_sample(df, feature, frac_to_plot, seednum = None, trace = False): ''' #' Uniformly sample across quantiles of x_j #' to ensure not to leave out portions of the #' dist'n of x. #' @param df : Covariate matrix. #' @param feature : Target covariate bin. #' @param frac_to_plot : Fraction of data set to plot. #' @param seed_num : Random seed for reproducibility. #' @return Uniformly sampled dataframe with N * frac_to_plot observations. #' @examples #' uniform_sample(X, 'Age', .33, 420) ''' df = df.copy() # get number of rows to sample N = df.shape[0] * frac_to_plot if trace: print(f"Sampling {N} observations") # get amount for each quantile (sometimes uneven) quantile = [N // 4 + (1 if x < N % 4 else 0) for x in range (4)] if trace: print(f"quantile {quantile}") # create labels and bins for quantiles bins, labels = [0, .25, .5, .75, 1.], ['q1', 'q2', 'q3', 'q4'], # create col to get quantiles of x_j to not leave out portions of the dist'n of x df['quantile'] = pd.qcut(df[feature], q = bins, labels = labels) if trace: print(df['quantile'][:3]) # uniformly sample quantiles out = pd.concat([df[df['quantile'].eq(label)].sample(int(quantile[i])) for i, label in enumerate(labels)]).\ drop(columns = ['quantile']) return out # - uniform_sample(X, 'Age', 0.01, trace = True) # + id="RkxNpAU8ojAL" def ice_plot(plot_data, feature, plot_num = 100, y_var = "Predicted Probability"): ''' #' Generates ICE data #' @param plot_data : ICE data to plot #' @param feature : Target covariate to plot. #' @return ICE plot #' @examples #' ice_plot(X, 'Age') ''' ob_sample = np.random.choice(plot_data.obs.unique(), size = plot_num, replace = False) ob_sample = np.append(ob_sample, [-1]) mean_line = plot_data\ .groupby(feature)\ .agg(y = (y_var, 'mean'))\ .reset_index()\ .rename({'y':y_var}, axis = 1)\ .assign(obs = -1, mean_line = 1) plot_sub_data = plot_data\ .loc[lambda x:x.obs.isin(ob_sample)]\ .assign(mean_line = 0)\ .append(mean_line, ignore_index = True) # set fig size fig, ax = plt.subplots() # plot ICE for ob in ob_sample: d = plot_sub_data.loc[lambda x:x.obs == ob] if max(d.mean_line) == 0: alpha = 0.1 color = "black" label = "" elif max(d.mean_line) == 1: alpha = 5 color = "red" label = "Mean line" print("Plot mean line") ax.plot(feature, y_var, label = label, alpha = alpha, data = d, color = color) ax.set_title('{} ICE Plot'.format(feature), fontsize=18) ax.set_xlabel(feature, fontsize=18) ax.set_ylabel('Predicted Probability', fontsize=16) ax.legend() return (fig, ax) # + id="R_OSdXKHrvLk" def ice(X, clf, feature, model_type, num_per_ob = 30, frac_to_plot = 1, seednum = None): ''' #' Generates ICE data #' @param X : Covariate matrix. #' @param clf : ML classifier. #' @param feature : Target covariate to plot. #' @param frac_to_plot : Fraction of data set to plot. #' @param seed_num : Random seed for reproducibility. #' @return ICE data (dataframe) with N observations. #' @examples #' ice_plot(X, rf, 'Age', frac_to_plot = .33 , seednum = 420) ''' # uniformly sample X = uniform_sample(X, feature, frac_to_plot, seednum) feature_min = np.min(X[feature]) feature_max = np.max(X[feature]) feature_range = np.linspace(feature_min, feature_max, num = num_per_ob) df = pd.DataFrame() start = datetime.now() for i in X.index: # make temp df for each instance temp_df = X.loc[np.repeat(i, num_per_ob)].copy().reset_index(drop = True) temp_df[feature] = feature_range # get predictions if model_type == "binary": preds = clf.predict_proba(temp_df)[:,1] else: preds = clf.predict(temp_df) temp_df['y_pred'] = preds temp_df['obs'] = i df = df\ .append(temp_df, ignore_index = True) end = datetime.now() print(f"Time to create dataframe: {end - start}") ice_plot(df, feature, plot_num = 300, y_var = "y_pred") return # - seednum = 420 a = ice(X = X, clf = rf, feature = 'Age', model_type = 'binary', frac_to_plot = .50, seednum = seednum)
notebooks/01-replication/02-ice-plot-from-scratch.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.3 # language: julia # name: julia-1.0 # --- # # Exercises: HPC # ### π in parallel # # Let's compute the value of π=3.1415926.... in parallel using Monte Carlo simulations. # # Below is a function that does this by throwing darts inside a box of $[-1, 1]^2$. It follows from the definition of π that the area fo a circle is $A = \pi r^2$, where $r$ is the radius of a circle. We develop a Monte Carlo algorithm to compute π by randomly throwing darts (i.e., generating uniformly distributed random numbers inside a unit box) and counting the fraction of the points that land inside the circle. # Let's try to visualize the setup first. # + using Plots pyplot() plt = plot( xlims=(-2,2), ylims=(-2,2), aspect_ratio=1) for n in 1:500 x = rand()*2 - 1 y = rand()*2 - 1 r = sqrt(x^2 + y^2) if r < 1 #inside scatter!([x], [y], color="red", label="") else #outside scatter!([x], [y], color="blue",label="") end end plt #display plot # - # Can you see the circle? Now, let's do some actual computing. function compute_pi(N::Int) n_landed_in_circle = 0 # counts number of points that have radial coordinate < 1, i.e. in circle for i = 1:N x = rand() * 2 - 1 # uniformly distributed number on x-axis y = rand() * 2 - 1 # uniformly distributed number on y-axis r = sqrt(x*x + y*y) # radius squared, in radial coordinates if r < 1 n_landed_in_circle += 1 end end return n_landed_in_circle / N * 4.0 end @time compute_pi(10^9) #my very own π! # #### Actual exercise # Your mission? Parallellize the `compute_pi` function! # # Hint: see the `for` loop? Remember `@distributed`? Remember that when using `@distributed` the result of each iteration is taken as the value of the last expression inside the loop. Therefore if your loops ends in # ```julia # for # #blaablaa # #... # if something # 1 # else # 0 # end # end # ``` # it will result in returning either `1` or `0`. # # You can also try adapting this to use `pmap` if you like. # Remember to add workers before we run our sweet new function with this command: using Distributed addprocs(4) nprocs() # Finally, after you are done, see the full story at [Parallel Monte Carlo in Julia](http://corysimon.github.io/articles/parallel-monte-carlo-in-julia/) # ### Advanced: Distributed Arrays # # Install `DistributedArrays` as # ```julia # Pkg.add("DistributedArrays") # ``` # # then try them out as # ```julia # addprocs(4) #adding 4 workers to share the load # @everywhere using DistributedArrays #loading DAs for every worker # A = fill(1.1, (100, 100) ) #create array # DA = distribute(A) #distribute it to workers # sum(DA) # ``` # ### Advanced: MPI # # Install `MPI` package by running: # ```julia # Pkg.update() # Pkg.add("MPI") # ``` # In case of problems, see the [readme](https://github.com/JuliaParallel/MPI.jl) # # Because of how MPI works, we need to explicitly write our code into a file. Create `01-hello.jl` and `01-hello-impl.jl` as follows: # # `01-hello.jl` should look like this: # ```julia # # import MPI # include("01-hello-impl.jl") # # function main() # MPI.Init() # # do_hello() # # MPI.Finalize() # end # # main() # ``` # and the actual implementation file `01-hello-impl.jl` like this # ```julia # # function do_hello() # comm = MPI.COMM_WORLD # println("Hello world, I am $(MPI.Comm_rank(comm)) of $(MPI.Comm_size(comm))") # MPI.Barrier(comm) # end # ``` # # # You can execute your code the normal way as # ```bash # mpirun -np 3 julia 01-hello.jl # ``` # # See the MPI.jl [examples](https://github.com/JuliaParallel/MPI.jl/tree/master/examples) for more.
exercises/07_Exercises-performance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def testing_adjacent_elements(current_list, prev_list, n_adjacent_start): len_list = len(current_list) start_pos = n_adjacent_start for i in range(start_pos, len_list): if i == start_pos: if prev_list[i] == current_list[0] or prev_list[i] == current_list[1]: # if current_list[i] == prev_list[i] or current_list[i] == prev_list[i+1]: return False elif i == (len_list - 1): if current_list[i] == prev_list[i-1] or current_list[i] == prev_list[i]: return False else: if current_list[i] == prev_list[i-1] or current_list[i] == prev_list[i] or current_list[i] == prev_list[i+1]: return False return True def testing_adjacent_elements(current_list, prev_list, prev_list_start, n_rows): len_list = len(current_list) current_list_end = len_list - prev_list_start for i, j in zip(range(prev_list_start, len_list), range(current_list_end)): if i == prev_list_start: if prev_list[i] == current_list[j] or prev_list[i] == current_list[j+1]: return False elif i == (len_list - 1) and current_list_end == n_rows: if prev_list[i] == current_list[j-1] or prev_list[i] == current_list[j]: return False else: if prev_list[i] == current_list[j-1] or prev_list[i] == current_list[j] or prev_list[i] == current_list[j+1]: return False return True # + import random import math import numpy as np n_reps = 3 n_plots = 10 n_cols = 2 n_rows = int(math.ceil(float(n_plots)/n_cols)) n_adjacent_start = n_rows*(n_cols - 1) # For cases like n_cols = 4 and n_plots = 9 (better n_cols = 3) if n_plots == n_adjacent: n_cols -= 1 n_adjacent_start = n_rows*(n_cols - 1) print("Cols: " + str(n_cols)) print("Rows: " + str(n_rows)) print(n_adjacent_start) prev_list = [] current_list = range(n_plots) for i in range(n_reps): current_list = random.sample(current_list, len(current_list)) if len(prev_list)!=0: while(testing_adjacent_elements(current_list, prev_list, n_adjacent_start, n_rows) == False): current_list = random.sample(current_list, len(current_list)) prev_list = current_list print(current_list) print(np.asarray(current_list).reshape(n_rows, n_cols)) # - A = [[1, 4, 5, 12], [-5, 8, 9, 0], [-6, 7, 11, 19]] print A for i, j in zip(range(5, 10), range(4)): print i,j range(5) import numpy as np array = np.arange(29).reshape(4,5) print array
.ipynb_checkpoints/RandomPlots-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # KMeans 算法探究 # # 聚类算法是一种非监督算法,当你拥有大量的无标签数据的时候,最方便的做法就是对这些数据用聚类算法进行分类,打上标签。 # # # > **什么是非监督学习?** # > 非监督学习与监督学习相对应。打个比方,监督学习就是给你在看了很多猫的照片后,学习到了猫的特征,于是在新看到猫片时,就马上认出了这是猫了;而非监督学习就是你看了很多动物照片,你发现有一类动物四条腿,有长胡须,浑身是毛,于是认为这可能是同一种物种(猫)。简单来说,监督学习就是老师教知识,你学会举一反三;非监督学习就是你从大量的数据中自己总结知识,自学成才。 # # kmeans 就是一种非监督的聚类算法,它十分简单易懂,并且还很有效。 # # 你可以在很多地方应用看到KMeans的应用: # # * 图像分割 # * 基因片段分类 # * 文章标签归类 # * 物种种群分类 # * 异常数据检测 # # 是的,KMeans 就是这么有用的算法,关键是还很简单。现在我们用一个简单的场景来实现一个 KMeans。 # # 在一个平面上,有一堆看起来杂乱无章的点(至少计算机看起来是这样的),我们希望按照点的聚集密度将这些点分类。 # + import numpy as np import matplotlib.pyplot as plt # 先在四个中心点附近产生一堆数据 real_center = [(1, 1), (1, 2), (2, 2), (2, 1)] point_number = 50 points_x = [] points_y = [] for center in real_center: offset_x, offset_y = np.random.randn(point_number) * 0.3, np.random.randn(point_number) * 0.25 x_val, y_val = center[0] + offset_x, center[1] + offset_y points_x.append(x_val) points_y.append(y_val) points_x = np.concatenate(points_x) points_y = np.concatenate(points_y) # 绘制点图 plt.scatter(points_x, points_y, color='green', marker='+') # 绘制中心点 center_x, center_y = zip(*real_center) plt.scatter(center_x, center_y, color='red', marker='^') plt.xlim(0, 3) plt.ylim(0, 3) plt.show() print(len(points_x)) print(len(points_y)) #print(points_x) #print(points_y) # - # 我们以(1, 1), (1, 2), (2, 2), (2, 1)四个点为中心产生了随机分布的点,如果我们的聚类算法正确的话,我们找到的中心点应该和这四个点很接近。先用简单的语言描述 kmeans 算法步骤: # # - 第一步 - 随机选择 K 个点作为点的聚类中心,这表示我们要将数据分为 K 类。 # - 第二步 - 遍历所有的点 P, 算出 P 到每个聚类中心的距离,将 P 放到最近的聚类中心的点集中。遍历结束后我们将得到 K 个点集。 # - 第三步 - 遍历每一个点集,算出每一个点集的中心位置,将其作为新的聚类中心。 # - 第四步 - 重复步骤 2 和步骤 3,直到聚类中心位置不再移动。 # + # 第一步,随机选择 K 个点 K = 4 p_list = np.stack([points_x, points_y], axis=1) #np.stack(arrays, axis=1)表示arrays[0][0], arrays[1][0]和arrays[2][0]进行堆叠,然后是arrays[0][1],arrays[1][1]与arrays[2][1]进行堆叠。 #print(p_list) index = np.random.choice(len(p_list), size=K) print(index) centeroid = p_list[index] print(centeroid) # 以下是画图部分 for p in centeroid: plt.scatter(p[0], p[1], marker='^') plt.xlim(0, 3) plt.ylim(0, 3) plt.show() # + # 第二步,遍历所有点 P,将 P 放入最近的聚类中心的集合中 points_set = {key: [] for key in range(K)} for p in p_list: nearest_index = np.argmin(np.sum((centeroid - p) ** 2, axis=1) ** 0.5)#得到的是最小值的索引值 points_set[nearest_index].append(p) # 以下是画图部分 for k_index, p_set in points_set.items(): p_xs = [p[0] for p in p_set] p_ys = [p[1] for p in p_set] plt.scatter(p_xs, p_ys, color='C{}'.format(k_index))#format===格式化输出 for ix, p in enumerate(centeroid): plt.scatter(p[0], p[1], color='C{}'.format(ix), marker='^', edgecolor='black', s=128) plt.xlim(0, 3) plt.ylim(0, 3) plt.show() # - # 第三步,遍历每一个点集,计算新的聚类中心 for k_index, p_set in points_set.items(): p_xs = [p[0] for p in p_set] p_ys = [p[1] for p in p_set] centeroid[k_index, 0] = sum(p_xs) / len(p_set) centeroid[k_index, 1] = sum(p_ys) / len(p_set) # + # 第四步,重复进行以上步骤 for i in range(10): points_set = {key: [] for key in range(K)} for p in p_list: nearest_index = np.argmin(np.sum((centeroid - p) ** 2, axis=1) ** 0.5) points_set[nearest_index].append(p) for k_index, p_set in points_set.items(): p_xs = [p[0] for p in p_set] p_ys = [p[1] for p in p_set] centeroid[k_index, 0] = sum(p_xs) / len(p_set) centeroid[k_index, 1] = sum(p_ys) / len(p_set) for k_index, p_set in points_set.items(): p_xs = [p[0] for p in p_set] p_ys = [p[1] for p in p_set] plt.scatter(p_xs, p_ys, color='C{}'.format(k_index)) for ix, p in enumerate(centeroid): plt.scatter(p[0], p[1], color='C{}'.format(ix), marker='^', edgecolor='black', s=128) plt.xlim(0, 3) plt.ylim(0, 3) plt.annotate('{} episode'.format(i + 1), xy=(2, 2.5), fontsize=14) plt.show() print(centeroid) # - # # 寻找 K 值 # # 以上已经介绍了 KMeans 方法的具体流程,但是我们还面临一个问题,如何确定 K 值——在以上的演示中,由于数据是我们自己生成的,所以我们很容易就确定了 K 值,但是真实的环境下,我们往往不能立马清楚 K 值的大小。 # # 一种比较通用的解决方法是计算每个点到自己的聚类中心的平均距离,虽然说,K 值越大,理论上这个平均距离会越小。但是当我们画出平均距离随K值的变化曲线后,会发现其中存在一个肘点——在这个肘点前,平均距离随K值变大迅速下降,而在这个肘点后,平均距离的下降将变得缓慢。现在我们使用 sklearn 库中的 KMeans 方法来跑一下聚类过程,然后将到聚类中心的平均值变化作图。 # + from sklearn.cluster import KMeans loss = [] for i in range(1, 10): kmeans = KMeans(n_clusters=i, max_iter=100).fit(p_list) loss.append(kmeans.inertia_ / point_number / K) plt.plot(range(1, 10), loss) plt.show() # - # 很好,我们看到在 K = 4 是我们的肘点了。在 K = 4 之前,距离迅速下降,而在到了 4 后,下降速度明显降低。那我们就可以知道,4 就是一个最优的聚类数啦
DataModeling/K-means/kmeans.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="q0UKXn5mqg2v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 462} outputId="db46e9b5-ba5a-4de5-db03-0976d4eb8b3d" executionInfo={"status": "ok", "timestamp": 1583327372817, "user_tz": -60, "elapsed": 10431, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} # !pip install --upgrade tables # !pip install eli5 # + id="TwU7C7QEpvug" colab_type="code" colab={} import pandas as pd import numpy as np from sklearn.dummy import DummyRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error as mae from sklearn.model_selection import cross_val_score import eli5 from eli5.sklearn import PermutationImportance # + id="ieK7wd50qOpw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e9d60b06-72dd-42c2-dd0b-0a849863e024" executionInfo={"status": "ok", "timestamp": 1583327643117, "user_tz": -60, "elapsed": 881, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} # cd 'drive/My Drive/Colab Notebooks/DW.Matrix/Matrix_Two/DW_Matrix_Car' # + [markdown] id="1lJrH8WsrAOm" colab_type="text" # ## Wczytywanie nadych # + id="WkE1xJ3crLhF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5bebcb06-32db-4efb-eb01-3cb1dcd512e8" executionInfo={"status": "ok", "timestamp": 1583327704122, "user_tz": -60, "elapsed": 4133, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} df = pd.read_hdf('data/car.h5') df.shape # + id="bJb68-m7rsN2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="77693a9f-42bf-4a30-f78e-b949920f2733" executionInfo={"status": "ok", "timestamp": 1583327712943, "user_tz": -60, "elapsed": 561, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} df.columns # + id="nexUXcaYr5Pd" colab_type="code" colab={} # + [markdown] id="WIVsNdt8sQLZ" colab_type="text" # ## Dummy Model # + id="dpMVpE3TsSGy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f41e0f0c-fd5d-44e9-d1e8-e4a6b265956b" executionInfo={"status": "ok", "timestamp": 1583327829511, "user_tz": -60, "elapsed": 630, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} df.select_dtypes(np.number).columns # + id="aP_5JlD2sVru" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="570d12c4-2c02-4112-b452-848feab85dc3" executionInfo={"status": "ok", "timestamp": 1583328121053, "user_tz": -60, "elapsed": 545, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} feats = ['car_id'] X = df[feats].values Y = df.price_value.values model = DummyRegressor() model.fit(X, Y) Y_pred = model.predict(X) mae(Y, Y_pred) # + id="7IJeEP6-tJws" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="eeb811b3-c8ae-4a33-f633-3e061ca32fb0" executionInfo={"status": "ok", "timestamp": 1583328234424, "user_tz": -60, "elapsed": 744, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} [x for x in df.columns if 'price' in x] # + id="wtlq9x3Nt1kL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="5c82dbcc-a285-4935-822d-c55f8b679ee8" executionInfo={"status": "ok", "timestamp": 1583328264825, "user_tz": -60, "elapsed": 754, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} df.price_currency.value_counts() # + id="Km58T7jut_7t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="11cab724-c048-452e-89f6-7318f54e7b3b" executionInfo={"status": "ok", "timestamp": 1583328396822, "user_tz": -60, "elapsed": 605, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} df.price_currency.value_counts(normalize = True) * 100 # + id="q-Ffl7s9uUiA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c2664aa8-a9ec-4502-c5c0-5a62ab949238" executionInfo={"status": "ok", "timestamp": 1583328558826, "user_tz": -60, "elapsed": 566, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} df = df[df['price_currency'] != 'EUR'] df.shape # + [markdown] id="Rva-aIFOvV0o" colab_type="text" # ## Features # + id="RyjNd1xAvWpu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 678} outputId="f804579a-da1a-4837-9401-d3144a0e0708" executionInfo={"status": "ok", "timestamp": 1583328635829, "user_tz": -60, "elapsed": 647, "user": {"displayName": "<NAME>015b", "photoUrl": "", "userId": "06169683424349565308"}} df.head() # + id="4X9K5LfsvaiQ" colab_type="code" colab={} SUFFIX_CAT = '__cat' for feat in df.columns: if isinstance(df[feat][0], list): continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: df[feat] = factorized_values else: df[feat + SUFFIX_CAT] = factorized_values # + id="30ml-iNBvl2D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6cfc850f-37cb-4171-ad3b-4f08a1e06f94" executionInfo={"status": "ok", "timestamp": 1583329928217, "user_tz": -60, "elapsed": 996, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} cat_feats = [x for x in df.columns if SUFFIX_CAT in x] cat_feats = [x for x in cat_feats if 'price' not in x] len(cat_feats) # + id="1qHFDyLUz82m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a397d0a1-ab97-4af0-9e05-55f09bc38ffb" executionInfo={"status": "ok", "timestamp": 1583331537395, "user_tz": -60, "elapsed": 4429, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06169683424349565308"}} X = df[cat_feats].values Y = df.price_value.values model = DecisionTreeRegressor(max_depth = 5) scores = cross_val_score(model, X, Y, cv = 3, scoring = 'neg_mean_absolute_error') np.mean(scores) # + id="nWLpkw5s2Jxg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="b3936307-59bb-4b5d-ee34-605a4367acd3" executionInfo={"status": "ok", "timestamp": 1583331583284, "user_tz": -60, "elapsed": 49658, "user": {"displayName": "<NAME>015b", "photoUrl": "", "userId": "06169683424349565308"}} m = DecisionTreeRegressor(max_depth = 5) m.fit(X, Y) imp = PermutationImportance(m, random_state = 0).fit(X, Y) eli5.show_weights(imp, feature_names = cat_feats) # + id="rNWUKJYM3jXD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6a2669f0-06f7-47a3-f705-dc6795f181ff" executionInfo={"status": "ok", "timestamp": 1583331583286, "user_tz": -60, "elapsed": 48591, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06169683424349565308"}} proper_list = ['param_moc__cat', 'feature_kamera-cofania__cat', 'param_pojemność-skokowa__cat', 'feature_bluetooth__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_światła-led__cat'] X = df[proper_list].values Y = df.price_value.values model = DecisionTreeRegressor(max_depth = 5) scores = cross_val_score(model, X, Y, cv = 3, scoring = 'neg_mean_absolute_error') np.mean(scores) # + id="dlxAdlW-6HyF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d9a75cc5-75bc-4d6a-a944-01eb90cac46d" executionInfo={"status": "ok", "timestamp": 1583331583509, "user_tz": -60, "elapsed": 47484, "user": {"displayName": "<NAME>\u015b", "photoUrl": "", "userId": "06169683424349565308"}} proper_list += ['param_typ__cat', 'param_marka-pojazdu__cat', 'feature_czujniki-parkowania-przednie__cat'] X = df[proper_list].values Y = df.price_value.values model = DecisionTreeRegressor(max_depth = 5) scores = cross_val_score(model, X, Y, cv = 3, scoring = 'neg_mean_absolute_error') np.mean(scores) # + id="6Lcuwgeh72Ws" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9bb294a0-7a59-4795-c0ad-c1c2910f86bc" executionInfo={"status": "ok", "timestamp": 1583331919724, "user_tz": -60, "elapsed": 835, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06169683424349565308"}} proper_list += ['feature_tuner-tv__cat'] X = df[proper_list].values Y = df.price_value.values model = DecisionTreeRegressor(max_depth = 5) scores = cross_val_score(model, X, Y, cv = 3, scoring = 'neg_mean_absolute_error') np.mean(scores) # + id="4hq7eOyO78N5" colab_type="code" colab={}
Day3_Simple_Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Programming languages are much simpler than human languages. Nonetheless, # there are some rules of grammar to learn in any language, and that is where we # will begin. In this text, we will use the [Python](https://www.python.org/) # programming language. Learning the grammar rules is essential, and the same # rules used in the most basic programs are also central to more sophisticated # programs. # Programs contain *expressions*. # # You can think of an expression as a recipe that returns a value. For example, # this is an expression: 3 * 4 # Notice that the Notebook displays the value. This the *value* that the # expression returns. # # An expression can describe to the computer how to combine pieces of data. In # our example, we have a multiplication expression that consists of a `*` symbol # between two numerical expressions. Expressions, such as `3 * 4`, are # *evaluated* by the computer. The value (the result of *evaluation*) of the # last expression in each cell, `12` in this case, is displayed below the cell. # The grammar rules of a programming language are rigid. In Python, the `*` # symbol cannot appear twice in a row. The computer will not try to interpret an # expression that differs from its prescribed expression structures. Instead, it # will show a `SyntaxError` error. The *Syntax* of a language is its set of # grammar rules, and a `SyntaxError` indicates that an expression structure # doesn't match any of the rules of the language. # # For example, consider this code, and the resulting error: # + tags=["raises-exception"] 3 * * 4 # - # Small changes to an expression can change its meaning entirely. Below, the # space between the `*`'s has been removed. Because `**` appears between two # numerical expressions, the expression is a well-formed *exponentiation* # expression (the first number raised to the power of the second: 3 times 3 # times 3 times 3). The symbols `*` and `**` are called *operators*, and the # values they combine are called *operands*. 3 ** 4 # **Common Operators.** Data science often involves combining numerical values, # and the set of operators in a programming language are designed to so that # expressions can be used to express any sort of arithmetic. In Python, the # following operators are essential. # # | Expression Type | Operator | Example | Value | # |-----------------|----------|------------|-----------| # | Addition | `+` | `2 + 3` | `5` | # | Subtraction | `-` | `2 - 3` | `-1` | # | Multiplication | `*` | `2 * 3` | `6` | # | Division | `/` | `7 / 3` | `2.66667` | # | Remainder | `%` | `7 % 3` | `1` | # | Exponentiation | `**` | `2 ** 0.5` | `1.41421` | # Python expressions obey the same familiar rules of *precedence* as in algebra: # multiplication and division occur before addition and subtraction. Parentheses # can be used to group together smaller expressions within a larger expression. # # Multiplication has precedence (we do `2 * 3` before adding to 1). 1 + 2 * 3 # We can use parentheses to group expressions that should be evaluated first. # Here we force `1 + 2` *before* the multiplication by 3. (1 + 2) * 3 # In the expression above, the `1 + 2` is a sub-expression within the whole # expression `(1 + 2) * 3`. When Python sees this line, it finds the expression # `(1 + 2)` and evaluates that to get `3`. Then it evaluates `3 * 3` to get # `9`. # # As you learn to program, you will learn to think about what Python is doing, # and the logical steps it follows. Over time, you will find it easier to # predict what it will do, and understand why. # # ### Example ### # # Here, from the Washington Post in the early 1980s, is a graph that attempts to # compare the earnings of doctors with the earnings of other professionals over # a few decades. Do we really need to see two heads (one with a stethoscope) on # each bar? [<NAME>](https://en.wikipedia.org/wiki/Edward_Tufte), # Professor at Yale and one of the world's experts on visualizing quantitative # information, coined the term "chartjunk" for such unnecessary embellishments. # This graph is also an example of the "low data-to-ink ratio" that Tufte # deplores. # # ![Washington Post graph]({{ site.baseurl }}/images/bad_post_graph.png) # Most importantly, the horizontal axis of the graph is is not drawn to scale. # This has a significant effect on the shape of the bar graphs. When drawn to # scale and shorn of decoration, the graphs reveal trends that are quite # different from the apparently linear growth in the original. The elegant graph # below is due to <NAME>aka, one of the originators of the statistical system # R. # # ![Ross Ihaka's version of Post graph]({{ site.baseurl }}/images/ihaka_fixed_post_graph.png) # # In the period 1939 to 1963, the doctors' incomes went up from \$3,262 to # \$25,050. So during that period the average increase in income per year was # about \$900. (25050 - 3262)/(1963 - 1939) # In Ross Ihaka's graph you can see that in this period, the doctors' incomes # rise roughly linearly at a fairly steady rate. That rate is about \$900, as we # have just calculated. # # But in the period 1963 to 1976, the rate is more than three times as high: (62799 - 25050)/(1976 - 1963) # That is why the graph rises much more steeply after 1963. # This chapter introduces many types of expressions. Learning to program # involves trying out everything you learn in combination, investigating the # behavior of the computer. What happens if you divide by zero? What happens if # you divide twice in a row? You don't always need to ask an expert (or the # Internet); many of these details can be discovered by trying them out # yourself. # # {% data8page Expressions %}
notebooks/02/Expressions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Single Subject viewer: version v1.3, April 21 - May 18, 2021 # ## fistula data, thresholded, binarized and segmented # modifications: viewing for accuracy study # comparison with 5 subjects grayscale, 2 with power Doppler data # Data: all .npy format <br> # (1) usn_cropped, <br> # (2) binary_usn, <br> # (3) fistulogram_cropped <br> # (4) fistula_binary <br> # ## only crop, threshold, and segmentation; no filtering # Original August 20, 2020 - Sept 15, 2020 <br> # Revised February, March, April - June 2021 # Author: William (<NAME> <br> # For investigational use only and prepared for release under open source 3-clause BSD license <br> # libraries for data analysis, plotting, and interaction import os import matplotlib.pyplot as plt import numpy as np import ipywidgets as widgets from ipywidgets import interact, VBox, HBox # generate the file_list from specified directory file_directory = './accuracy_test_np_data/' # select appropriate directory path # substring = '.npy' # select appropriate file type substring = '.npy' file_list = [] all_files = os.listdir(file_directory) for i in range(0,len(all_files)): if substring in all_files[i]: file_list.append(all_files[i]) file_list.sort() # needed to correct os.listdir() order print('.npy file list \n') for i in file_list: print(i) # manually set .npy files for selection and use: examples below file_directory = './accuracy_test_np_data/' # select appropriate directory path # subject 5 grayscale: example usn_file_npy = 'sono_data_subject_5_grayscale.npy' usn_binary_segment = 'usn_binary_subject_5_grayscale.npy' fistulogram_cropped = 'fistulogram_subject_5.npy' fistula_binary = 'fistulogram_binary_subject_5.npy' # subject 5 Doppler: replace ultrasound images # usn_file_npy = 'sono_data_subject_5_Doppler.npy' # usn_binary_segment = 'usn_binary_subject_5_Doppler.npy' # load data pixel_data = np.load(file_directory + usn_file_npy) vessel_segment = np.load(file_directory + usn_binary_segment) fistulogram_data = np.load(file_directory + fistulogram_cropped) fistulogram_segment = np.load(file_directory + fistula_binary) shape = pixel_data.shape # ultrasound viewer def m_plot_frame(frame_number): plt.imshow(pixel_data[frame_number,:,:,0], cmap='gray') return widgets.Label(value='file: ' + usn_file_npy) description_wider = {'description_width': 'initial'} interact(m_plot_frame, frame_number=widgets.IntSlider(value=0, \ min=0, max=shape[0]-1, step=1,description='frame range',\ style=description_wider), ); # + # CAUTION # thresholding [SKIP this if already starting with binary segment] def threshold_frame(frame_number, threshold): plt.imshow(pixel_data[frame_number,:,:,0], \ vmin = threshold_frame.widget.children[1].value[0], vmax = threshold_frame.widget.children[1].value[1], cmap='gray') return description_wider = {'description_width': 'initial'} interact(threshold_frame, frame_number=widgets.IntSlider(min=0, max=shape[0]-1, step=1,value=0, style=description_wider), threshold = widgets.IntRangeSlider(value=[20,50],\ min=0, max=255, step=1, description='thresh_range', continuous_update=False, orientation='horizontal', readout_format='d'), ); # - # # Now use "vessel_segment" data # + # now let's examine the vessel segment interactively def plot_vessel_segment(frame_number): frame = vessel_segment[frame_number,:,:] plt.imshow(frame, cmap='gray') plt.title('vessel segment data', loc='center') plt.xlabel('pixels') plt.ylabel('pixels') description_wider = {'description_width': 'initial'} interact(plot_vessel_segment, frame_number=widgets.IntSlider(min=0, max=len(vessel_segment)-1, step=1,value=0, style=description_wider)); # - # this one without morphologic, binary, median filtering z_max, y_max, x_max = vessel_segment.shape # + # tomogram (sono-tomogram) t_plot_frame # examine slices (note: we choose / focus on U-D (up - down), # ASPECT RATIOS (select approximate, based on sweep speed of transducer): # apsect ration range 0.12 to 0.5 for this study def t_plot_frame(top_view_slice): frame = vessel_segment[:,top_view_slice,:] frame = frame.T plt.imshow(frame, cmap='gray', aspect=0.4) # adjust aspect ratio plt.title('ultrasound "sono-tomography" data', loc='center') plt.xlabel('pixels') plt.ylabel('pixels') description_wider = {'description_width': 'initial'} interact(t_plot_frame, top_view_slice=widgets.IntSlider(min=0, max=y_max-1, step=1,value=0, style=description_wider)); # + # SONO ANGIOGRAM # select slices to sum together from sono-tomogram # ASPECT RATIOS (approximate): # apsect ration range 0.12 to 0.5 for this study def frame_sum(top, bottom): sono_a = np.sum(vessel_segment[:,top:bottom,:], axis = 1) return sono_a def sono_frame(slice_range): top = slice_range[0] bottom = slice_range[1] frame = frame_sum(top,bottom) frame = frame.T plt.imshow(frame, cmap='gray', aspect=0.4) # adjust aspect ratio plt.title('ultrasound sono-angiogram data', loc='center') plt.xlabel('pixels') plt.ylabel('pixels') description_wider = {'description_width': 'initial'} interact(sono_frame, slice_range=widgets.IntRangeSlider(value=[0, y_max-1], min=0, max=y_max-1, step=1, description='slice_sum range:', readout=True, continuous_update=False, style=description_wider)); # - # # Fistulogram viewer # + def plot_frame(frame_number): # frame = fistulogram_data[frame_number,:,:] frame = fistulogram_data[frame_number,:,:] plt.imshow(frame, cmap='gray') plt.title('fistulogram data: view frames', loc='center') plt.xlabel('pixels') plt.ylabel('pixels') description_wider = {'description_width': 'initial'} # interact(plot_frame, frame_number=widgets.IntSlider(min=0, # max=len(fistulogram_data)-1, step=1,value=0, style=description_wider)); interact(plot_frame, frame_number=widgets.IntSlider(min=0, max=len(fistulogram_data)-1, step=1,value=0, style=description_wider)); # + # Fistulogram - binary viewer # examine the fistula data, binary and segmented def plot_fistula_segment(frame_number): frame = fistulogram_segment[frame_number,:,:] plt.imshow(frame, cmap='gray') plt.title('vessel segment data', loc='center') plt.xlabel('pixels') plt.ylabel('pixels') description_wider = {'description_width': 'initial'} interact(plot_fistula_segment, frame_number=widgets.IntSlider(min=0, max=len(fistulogram_segment)-1, step=1,value=0, style=description_wider)); # - # # FINI
3D_viewer_v1.3_fistulogram_binary_single_subject_analyze_published.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduzione alla programmazione Python by <NAME> # # ![python logo](img/Python_logo-large.png) # # Salve a tutti e benvenuti nella sezione di programmazione di **Starting Finance Club Torino**, in questa sezione vedremo come imparare a utilizzare python 3 per fare machine learning sia nella finanza che in altri aspetti, ma prima incominciamo con l'introdurre cosa sia python e quali sono le sue caratteristiche principali detto questo iniziamo subito! # # # # Storia di Python # # # Python è un linguaggio di programmazione dinamico ad alto livello creato da __[<NAME> Rosssum](https://en.wikipedia.org/wiki/Guido_van_Rossum")__ nel 1991 con l'idea di avere le seguenti caratteristiche: # - facile comprensione # - programmazione ad oggetti # - flessibile # # Nel corso del tempo alcune funzionalità sono state aggiunte e migliorate ed è ora uno dei linguaggi più utilizzati al mondo con applicazioni in diversi ambiti. In questa sezione noi ci focalizzeremo all'utilizzo di python 3, rilasciato dal 2008 e al momento la versione più diffusa di esso. # Ora ci focalizzeremo sulla comprensione delle sue proprietà e quali sono i concetti dietro ad essi. # # # Python è un linguaggio dinamico e ad alto livello # # Con questo ci si intende a riferire al fatto che python è un linguaggio che **non richiede l'uso di istruzioni programmate a livello macchina** come 0 e 1, infatti python si occupa di convertire lui stesso le informazioni fornite scritte nella sua lingua che è definita in maniera molto simile alle conversazioni che si farebbero in inglese dovendo specificare poco o nulla alla macchina! E' questo il motivo principale per cui questo linguaggio è facile da imparare scrivere e capire oltre che flessibile! # # # Python è un linguaggio programmato ad oggetti # # Questo concetto verrà poi analizzato in maniera più profonda in seguito, per il momento basta pensare ad questo concetto come un paradigma di programmazione in cui sono presenti degli **oggetti in grado di contenere dati o codice** che possono interagire tra di loro e permettendo ad ognuno di essi di compiere funzionalità proprie di esso, ma specificate da noi. # # # Python è un linguaggio interpretato # # A differenza di altri linguaggi che sono compilate ovvero viene prima letto per intero il file contente il codice e in seguito viene eseguito, **Python legge ed esegue riga per riga il codice**, questo rallenta la velocità della macchina nell'eseguire le istruzioni, ma permette di capire esattamente dove il codice sia andato storto. # # Ora che abbiamo saputo delle sue caratteristiche principali è il momento di capire come installare Python usando Anaconda. # # # Installazione Python usando Anaconda # # Il primo passo è andare sul sito di __[Anaconda](https://www.anaconda.com/products/individual)__ scorrere verso il basso fino a che non trovare gli Anaconda Installers: # # ![Anaconda Installer](img\Anacondainstaller.png) # Dopodichè scaricate la versione secondo il sistema operativo che avete ed eseguite il file scaricato, usando le impostazioni di default aspettate che l'installazione sia completata(questo potrebbe richiedere diversi minuti) dopo avere finito l'installazione lanciate l'installazione, per i diversi sistemi operativi potete riferirvi a questa __[guida](https://docs.anaconda.com/anaconda/user-guide/getting-started/)__, qualora aveste problemi di installazione potete riferivi a questo __[link](https://docs.anaconda.com/anaconda/install/)__. # # Il nostro primo programma in Python con Jupyter e Spider # # Ora che anaconda è stato installato è il momento di scrivere il nostro primo codice che sarà quello di far stampare alla macchina Ciao Mondo!, in primo passo avviamo anaconda navigator e dovrebbe uscire una schermata del genere: # ![navigator](img\navigator.png) # Ora noi useremo principalmente __[Jupyter Notebook](https://jupyter.org/)__, ma potete anche usare un altro software per programmare come __[Spider](https://www.spyder-ide.org/)__ o __[Vscode](https://code.visualstudio.com/)__ a voi la scelta ora mostrerò come usarli. # # Partendo da **Jupyter** cliccate su **Launch** dell'icona Jupyter notebook, dovrebbe aprirvi una pagina del vostro browser di questo tipo: # # ![jupyter](img\jupyter.png) # # Andate nella cartella in cui volete creare un notebook o create una facendo New $\rightarrow$ folder e chiamate la cartella come volete, creiamo ora un nuovo notebook di python 3 facendo New $\rightarrow$ python 3: # # ![file guide](img\fileguide.png) # # Ora dovrebbe aprirsi una nuova pagina di questo tipo: # # ![new notebook](img\newnotebook.png) # # Scrivete dentro la cella: print("C<NAME>!") #print dice alla macchina di stampare il contenuto all'interno delle parentesi #in questo caso il contenuto è una stringa e può essere denotato con "messaggio" o 'messaggio' #vedremo questo in maniera più profonda dopo # Schiacciate Shift + Enter o cliccate Run per ottenere scritto sotto il messaggio racchiuso dentro print, nella stessa maniera potete avviare Spider, scrivere print("Ciao Mondo!") e otterrete nella console di destra il risultato desiderato! # # ![spider](img\spider.png) # # *** # COMPLIMENTI AVETE ORA SCRITTO ED ESEGUITO IL VOSTRO PRIMO CODICE IN PYTHON! # Nelle lezioni successive vedremo cosa sono le variabili, i tipi di dati, le funzioni e molto altro!
.ipynb_checkpoints/Introduzione a python-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('cleaned_and_wrangled.csv') # + ### My baselines: df['Product'].value_counts(normalize=True) ### I want to predict what factors cause people to purchase accessories # - print(df.shape) df.head(10) plt.scatter(df['Product'], df['Product Revenue'], alpha =.01); import numpy as np plt.scatter(np.log(df['Count of Sessions']), df['Product Revenue'], alpha =.01); plt.scatter(np.log(df['Count of Sessions']), df['Product'], alpha =.01); # + test_frame = df[df['Product']=='Boat A'] test_frame['Product Revenue'].value_counts() #### There are a few weird outliers, and quite a few where the person bought two boats. I need to find a way to split this into 2 seperate transactions. # + test_frame = df[df['Product']=='Accessory'] test_frame['Product Revenue'].value_counts(ascending=False) > 1000 # + ### TODO ### Need to turn dates into datetime objects ## Need to turn prices into numbers not strings ## Double check my wrangling and cleaning functions to make sure that the buckets are accurate (consistent with the new metrics I just imported this evening) ### Could get twice as much data if I could figure out what's up with the 'Product Revenue' metric on Google Analytics ### Add in more features by exporting more csv's from Google Analytics # + #type(test_frame['Product Revenue'][0]) #test_frame['Product Revenue'].strip('$') #test_frame # -
EDA #3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Data Analysis and Machine Learning Applications for Physicists # *Material for a* [*University of Illinois*](http://illinois.edu) *course offered by the* [*Physics Department*](https://physics.illinois.edu). *This content is maintained on* [*GitHub*](https://github.com/illinois-mla) *and is distributed under a* [*BSD3 license*](https://opensource.org/licenses/BSD-3-Clause). # # [Table of contents](Contents.ipynb) # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns; sns.set() import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') # ## Load Data from mls import locate_data a_data = pd.read_hdf(locate_data('cluster_a_data.hf5')) b_data = pd.read_hdf(locate_data('cluster_b_data.hf5')) c_data = pd.read_hdf(locate_data('cluster_c_data.hf5')) d_data = pd.read_hdf(locate_data('cluster_d_data.hf5')) cluster_3d = pd.read_hdf(locate_data('cluster_3d_data.hf5')) cosmo_data = pd.read_hdf(locate_data('cosmo_data.hf5')) # ## SciKit Learn # This will be our first time using the [SciKit Learn package](http://scikit-learn.org/stable/). We don't include it in our standard preamble since it contains many modules (sub-packages). Instead, we import each module as we need it. The ones we need now are: from sklearn import preprocessing, cluster # ## Find Structure in Data # The type of structure we can look for is "clusters" of "nearby" samples, but the definition of these terms requires some care. # # ### Distance between samples # # In the simplest case, all features $x_{ij}$ have the same (possibly dimensionless) units, and the natural distance between samples (rows) $j$ and $k$ is: # # $$ \Large # d(j, k) = \sum_{\text{features}\,i} (x_{ji} - x_{ki})^2 \; . # $$ # # However, what if some columns have different units? For example, what is the distance between: # # $$ \Large # \left( 1.2, 0.4~\text{cm}, 5.2~\text{kPa}\right) # $$ # # and # # $$ \Large # \left( 0.7, 0.5~\text{cm}, 4.9~\text{kPa}\right) # $$ # # ? # # ML algorithms are generally unit-agnostic, so will happily combine features with different units but that may not be what you really want. # ### Whitening transformation # # One reasonable solution is to normalize each feature with the [whitening transformation](https://en.wikipedia.org/wiki/Whitening_transformation): # # $$ \Large # x \rightarrow (x - \mu) / \sigma # $$ # # where $\mu$, $\sigma$ are the mean and standard deviation of the original feature values. # # It is called "whitening" because it transforms the input vector into a [white noise vector](https://en.wikipedia.org/wiki/White_noise) # # ![WN1](img/Clustering/440px-White_noise.png) # # ![WN2](img/Clustering/White-noise-mv255-240x180.png) # # Generally speaking, a whitening transformation is a linear transformation that transforms a vector of random variables with a known covariance matrix into a set of new variables whose covariance is the identity matrix, meaning that they are uncorrelated and each have unit variance. # # Suppose that $X$ is a column vector of random data with a non-singular covariance matrix $M$. Then the transformation # # $$ \Large # Y = WX # $$ # # with a whitening matrix $W$ satisfying the condition # # $$ \Large # W^T W = M^{-1} # $$ # # yields the whitened random vector $Y$ with unit diagonal convariance. There are an infinite nunber of possible whitening matrices that satisfy the condition above. One common choice is via the Principle Component Analyis (PCA) method which utilizes the eigen-system of $M$ to whiten $X$. We will come back to this in a bit when we talk more about dimensionality reduction. # ### Why whiten the inputs? # # In general, learning algorithms benefit from standardization of the data set to minimize differences in the mean and variance of the input features. If some outliers are present in the set, robust scalers or transformers are more appropriate. We will learn more about this when we talk about the PCA method, but you can read [Importance of Feature Scaling](http://scikit-learn.org/stable/auto_examples/preprocessing/plot_scaling_importance.html#sphx-glr-auto-examples-preprocessing-plot-scaling-importance-py) if you want to read more now. # The [sklearn.preprocessing module](http://scikit-learn.org/stable/modules/preprocessing.html) automates this process with: cosmo_data.describe() cosmo_normed = cosmo_data.copy() cosmo_normed[cosmo_data.columns] = preprocessing.scale(cosmo_data) cosmo_normed.describe() # However, this may discard useful information contained in the relative normalization between features. To normalize only certain columns use, for example: cosmo_normed = cosmo_data.copy() for colname in 'ln10^{10}A_s', 'H0': cosmo_normed[colname] = preprocessing.scale(cosmo_data[colname]) # ## What is a "cluster"? # In the simplest case (a), clusters are well separated by a line (in 2D, or hyperplane in more dimensions) and can be unambiguously identified by looking only at the distance between pairs of samples. # # In practice, clusters might overlap leading to ambiguities (b), or the clusters we expect to find might require considering groups of more than two samples at a time (c), or might have a non-linear separation boundary (d). # # ![Cluster Types](img/Clustering/cluster_types.png) # ## Examples of Clustering in Physics # # The ability to cluster data from physics instruments is critical to being able to extract important features from the data on the road to making inference about nature. # # ### Physics at the LHC # # There are many examples of data clustering in the study of particle collisions at the LHC. One important exampe is the clustering of data from calorimeter detectors to find the remants of quark or gluon production -- called "jets". A jet is a narrow cone of hadrons and other particles produced by the hadronization of a quark or gluon in a particle physics or heavy ion experiment. Particles carrying a color charge, such as quarks, cannot exist in free form because of QCD confinement which only allows for colorless states. # # ![EventDisplay](img/Clustering/ATLAS_event_display_vp1_run266904_evt25855182_2015-06-03T13-41-48_b.png) # # ![TwoJets](img/Clustering/2jets.jpg) # # ### Astronomy # # Astrophysical objects have a variety of distinct objects and emissions that reflect the richness and beauty of our Universe. So, its not surprising that clustering algorthims to identify and ultimately understand astrophysical objects play a central role in Astronomy. # # As an example, astronomers use properties of Gamma Ray Bursts (GRBs) such as their location in the sky, arrival time, duration, fluence, spectral hardness to find subtypes/classes of events: # # ![](img/Clustering/GRBs.png) # # ![](img/Clustering/times.png) # # ## K-means Clustering # The [K-means algorithm](https://en.wikipedia.org/wiki/K-means_clustering) is fast and robust, but assumes that your data consists of roughly round clusters of the same size (where the meanings of "round" and "size" depend on how your data is scaled). # # Most sklearn algorithms use a similar calling pattern: # ``` # result = module.ClassName(..args..).fit(data) # ``` # For the [KMeans algorithm](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans): a_fit = cluster.KMeans(n_clusters=2).fit(a_data) # We will use the following function to display 2D cluster results (don't worry about the [details](https://seaborn.pydata.org/tutorial/color_palettes.html) unless you are interested): def display(data, fit): n_clusters = len(np.unique(fit.labels_)) # Pick good colors to distinguish the different clusters. import matplotlib.colors cmap = matplotlib.colors.ListedColormap( sns.color_palette("husl", n_clusters).as_hex()) plt.scatter(data.iloc[:, 0], data.iloc[:, 1], s=10, c=fit.labels_, cmap=cmap) # Use standard axes to match the plot above. plt.xlim(-9, +9) plt.ylim(-5, +5) plt.gca().set_aspect(1.) display(a_data, a_fit) # + [markdown] solution2="shown" solution2_first=true # **EXERCISE:** Use KMeans to fit the three other (b,c,d) 2D datasets with `n_clusters=2` and generate similar plots. Which fits give the expected results? # + solution2="shown" b_fit = cluster.KMeans(n_clusters=2).fit(b_data) display(b_data, b_fit) # + solution2="shown" c_fit = cluster.KMeans(n_clusters=2).fit(c_data) display(c_data, c_fit) # + solution2="shown" d_fit = cluster.KMeans(n_clusters=2).fit(d_data) display(d_data, d_fit) # + [markdown] solution2="shown" # The fit results look reasonable for (b), although the sharp dividing line between the two clusters looks artificial. # # The fit results for (c) and (d) do not match what we expect because KMeans only considers one pair at a time, so cannot identify larger scale patterns that are obvious by eye. # + # Add your solution here... # - # ### Hyperparameters # Algorithms have many parameters that influence their results for a given dataset, but these fall into two categories: # - Parameters whose values are determined by the data during the fitting process. # - Parameters which must be externally set. # # We refer the second group as "hyperparameters" and set their values during the "model selection" process, which we will discuss later. # + [markdown] solution2="shown" solution2_first=true # **DISCUSS:** Are all of the arguments of the [KMeans constructor](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans) hyperparameters? # + [markdown] solution2="shown" # In principle, yes, but in practice some of these arguments will have no (or minimal) impact on the algorithm result under normal conditions. The arguments that are most clearly hyperparameters are: # - `n_clusters`, `algorithm`, `tol` # # The arguments that are most clearly not hyperparameters are: # - `verbose`, `precompute_distances`, `n_jobs` # # The remaining arugments are in the gray area. In general, it is prudent to experiment with your actual data to identify which arguments affect your results significantly. # + [markdown] solution2="hidden" solution2_first=true # **EXERCISE:** Fit dataset (b) with the `n_clusters` hyperparameter set to 3 and display the results. Comparing with the 2-cluster fit above, by eye, what do think is the "true" number of clusters? How might you decide between 2 and 3 more objectively? # + solution2="hidden" b_fit_3 = cluster.KMeans(n_clusters=3).fit(b_data) display(b_data, b_fit_3) # + [markdown] solution2="hidden" # The plot above makes a convincing case (to me, at least) that there are three clusters. However, the "truth" in this case is two clusters. # # This illustrates the dangers of superimposing a fit result on your data: it inevitably "draws your eye" and makes the fit more credible. Look out for examples of this when reading papers or listening to talks! # + # Add your solution here... # - # ### Clustering in many dimensions # An algorithm to find clusters in 2D data is just automating what you could already by eye. However, most clustering algorithms also work well with higher dimensional data, where the clusters might not be visible in any single 2D projection. fit_3d = cluster.KMeans(n_clusters=3).fit(cluster_3d) cluster_3d['label'] = fit_3d.labels_ sns.pairplot(cluster_3d, vars=('x0', 'x1', 'x2'), hue='label'); # These clusters look quite arbitrary in each of the 2D scatter plots. However, they are actually very well separated, as we can see if we rotate the axes: R = np.array( [[ 0.5 , -0.5 , -0.707], [ 0.707, 0.707, 0. ], [ 0.5 , -0.5 , 0.707]]) rotated_3d = cluster_3d.copy() rotated_3d[['x0', 'x1', 'x2']] = cluster_3d[['x0', 'x1', 'x2']].dot(R) sns.pairplot(rotated_3d, vars=('x0', 'x1', 'x2'), hue='label'); # This example is contrived, but the lesson is that clustering algorithms can discover higher-dimensional structure that you might miss with visualization. # ## General comments on ML Algorithms # Now that we have introduced our first ML algorithm, this is a good time for some general comments. # Most ML algorithms have some common features: # - They seek to maximize (or minimize) some goal function $f(\theta, D)$ of the (fixed) data $D$, for some (unknown) parameters $\theta$. # - The goal function embodies some model (perhaps only implicitly) of what the data is expected to look like. # # Questions to ask about the goal function: # - Is there a single global optimum by construction? (i.e., is $\pm f$ a [convex function](https://en.wikipedia.org/wiki/Convex_function)?) # - If not, might there be multiple local optima? # # Questions to ask about how the algorithm optimizes its goal function: # - Is it exact or approximate? # - If it is approximate, is it also iterative? If so, what are the convergence criteria? # - How does the running time scale with the number of samples and number of features? # The goal function of the KMeans algorithm is: # # $$ \Large # \sum_{i=1}^n\, \sum_{c_j = i}\, \left| x_j - \mu_i\right|^2 # $$ # # where $c_j = 1$ if sample $j$ is assigned to cluster $i$ or otherwise $c_j = 0$, and # # $$ \Large # \mu_i = \sum_{c_j = i}\, x_j # $$ # # is the mean of samples assigned to cluster $i$. The outer sum is over the number of clusters $n$ and $j$ indexes samples. If we consider sample $x_j$ to be a vector, then its elements are the feature values. # + [markdown] solution2="hidden" solution2_first=true # **DISCUSS:** What are the parameters of the KMeans goal function? How many parameters are there? # + [markdown] solution2="hidden" # The parameters are the binary values $c_j$ and there is one per sample (row). Note that the number of parameters is independent of the number of features (columns) in the data. # # The number of clusters $n$ is a hyperparameter since it is externally set and not adjusted by the algorithm in response to the data. # # The means $\mu_i$ are not independent parameters since their values are fixed by the $c_j$ (given the data). # - # ### Supervised vs Unsupervised # ML algorithms come in two flavors, depending on whether they require some training data where you already know the answer ("supervised") or not ("unsupervised"). Clustering algorithms are unsupervised. # # An advantage of unsupervised ML is that it works with any input data, and can discover patterns that you might not already know about (as in the 3D example above). Even when you have training data available, an unsupervised algorithm can still be useful. # # The disadvantage of unsupervised learning is that we cannot formulate objective measures for how well an algorithm is performing, so the results are always somewhat subjective. # ## Expectation Maximization # The KMeans algorithm uses an iterative solution based on the [Expectation-Maximization (EM)](https://en.wikipedia.org/wiki/Expectation-maximization_algorithm) principle. This is a powerful approach used by many algorithms, which we will revist several times during the course. # ## Other Clustering Methods # We have focused on KMeans as a prototypical clustering algorithm, but there are [many others to chose from](http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html). # # We will finish this section with some brief experimentation with two alternatives that use more global information than KMeans, so are better suited to examples (c) and (d) above: # - Spectral clustering: [sklearn](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.SpectralClustering.html), [wikipedia](https://en.wikipedia.org/wiki/Spectral_clustering). # - Density-based spatial clustering of applications with noise (DBSCAN): [sklearn](http://scikit-learn.org/stable/modules/clustering.html#spectral-clustering), [wikipedia](https://en.wikipedia.org/wiki/DBSCAN). # + [markdown] solution2="hidden" solution2_first=true # **EXERCISE:** Use `cluster.SpectralClustering` to fit `c_data` and `d_data` and display the results. Adjust the default hyperparameters, if necessary, to obtain the expected results. # + solution2="hidden" c_fit = cluster.SpectralClustering(n_clusters=2).fit(c_data) display(c_data, c_fit) # + solution2="hidden" d_fit = cluster.SpectralClustering(n_clusters=2, gamma=2.0).fit(d_data) display(d_data, d_fit) # + # Add your solution here... # + [markdown] solution2="hidden" solution2_first=true # **EXERCISE:** Use `cluster.DBSCAN` to fit `c_data` and `d_data` and display the results. Adjust the default hyperparameters, if necessary, to obtain the expected results. # + solution2="hidden" c_fit = cluster.DBSCAN(eps=1.5).fit(c_data) display(c_data, c_fit) # + solution2="hidden" d_fit = cluster.DBSCAN().fit(d_data) display(d_data, d_fit) # + # Add your solution here...
notebooks/Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Preparación de datasets # # En este pequeño script tomamos el dataset original de kaggle [House Sales in King County](https://www.kaggle.com/harlfoxem/housesalesprediction) y lo dividimos en 3 ficheros: # - historic: simulará la base de datos histórica de ventas (es decir, casas ya vendidas de las que disponemos de todas las features y del precio al que se vendieron), # - to_stream: servirá como origen de datos para un generador pseudoaleatorio de "casas vendidas" (también disponemos de todas las features y del precio al que se acaba de vender, pero en el momento de entrar al stream no han alimentado aún el modelo), # - unknown_target: simulará una base de datos de casas actualmente en venta (disponemos de las features pero aún no se han vendido y por tanto no conocemos su target. # # Cada dataset contendrá, respectivamente, un 40%, 50% y 10% del original. # # Hemos hecho un pequeño descarte inicial de varias columnas para simplificar el ejercicio. # import pandas as pd import numpy as np from ejercicios.houses import SEED, HISTORIC, TO_STREAM, UNKNOWN np.random.seed(SEED) # + ds = pd.read_csv('data/kc_house_data.csv') features = list(ds.columns) features.remove('date') features.remove('condition') features.remove('grade') features.remove('zipcode') features.remove('lat') features.remove('long') features.remove('sqft_basement') features.remove('sqft_above') features.remove('sqft_living15') features.remove('sqft_lot15') features.remove('yr_renovated') ds = ds[features] # - mask = np.random.rand(len(ds)) < 0.5 static = ds[mask] to_stream = ds[~mask] # + mask = np.random.rand(len(static)) < 0.8 historic = static[mask] unknown_target = static[~mask] features.remove('price') unknown_target = unknown_target[features] # - historic.to_csv(HISTORIC, index=False) to_stream.to_csv(TO_STREAM, index=False) unknown_target.to_csv(UNKNOWN, index=False)
python/ejercicios/houses/build_datasets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ! wget -N http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data import numpy as np import pandas as pd from matplotlib import pyplot as plt # %matplotlib inline data = pd.read_csv('abalone.data', names=['Sex', 'Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight', 'Rings']) data.head() # Now let's convert categorical feature 'Sex' to numerical via one-hot encoding data = pd.get_dummies(data) data.head() # ## Analysis data.describe() data.corr() X = data.drop(columns=['Rings']) X = StandardScaler().fit_transform(X) y = data['Rings'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.33, random_state=17) # ## Classification from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier def score(model): model.fit(X_train, y_train) print('Train score: {}'.format(model.score(X_train, y_train))) print('Test score: {}'.format(model.score(X_test, y_test))) # ## K-Neighbors score(KNeighborsClassifier(29)) # ## SVM + linear kernel score(SVC(kernel='linear')) # ## Decision tree score(DecisionTreeClassifier(max_depth=4)) # ## Random forest score(RandomForestClassifier(max_depth=4, n_estimators=10, max_features=2)) # ## Multi-layer perceptron score(MLPClassifier(alpha=1)) # ## AdaBoost score(AdaBoostClassifier()) # ## Regression from sklearn.linear_model import LinearRegression from sklearn.svm import SVR # ## Linear regression score(LinearRegression()) # ## SVM + RBF kernel score(SVR(kernel='rbf', C=1e3, gamma=0.1)) # ## SVM + polynomial kernel score(SVR(kernel='poly', C=1e3, degree=2))
ml/lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true import re import keras.backend as keras_backend from keras.layers import Conv1D, Lambda, Add import numpy as np import matplotlib.pylab as plt from traits.api import Enum, HasStrictTraits, Int, Instance, List, Tuple from blusky.transforms.apply_father_wavelet_1d import ApplyFatherWavlet1D from blusky.transforms.cascade_tree import CascadeTree from blusky.transforms.cascade_1d import Cascade1D from blusky.transforms.default_decimation import (NoDecimation, DefaultDecimation) from blusky.transforms.i_decimation_method import IDecimationMethod from blusky.wavelets.i_wavelet_1d import IWavelet1D from blusky.utils.pad_1d import pad_to_log2, Pad1D # + deletable=true editable=true from os import path import blusky.datasets as datasets ts_path = path.join( path.dirname(datasets.__file__), "timeseries.csv" ) my_data = np.genfromtxt(ts_path, delimiter=',', skip_header=1) ts = my_data[:,1] # + deletable=true editable=true len(ts) # + deletable=true editable=true plt.figure() plt.plot(ts) plt.show() # + deletable=true editable=true from blusky.wavelets.wavelet_factories_1d import (vanilla_gabor_1d, vanilla_morlet_1d) from keras.layers import Input from keras import Model N = 128 order = 1 sample_rate = 0.001 J = int(np.log2(N) - 2) wavelets = [vanilla_morlet_1d(sample_rate, j=i) for i in range(0,J)] father_wavelet = vanilla_gabor_1d(sample_rate, j=J-1) deci = DefaultDecimation(oversampling=1) inp = Input(shape=(N,1)) # pad pad_1d = Pad1D(wavelets, decimation=deci) padded = pad_1d.pad(inp) # cascade_tree = CascadeTree(padded, order=order) cascade = Cascade1D(decimation=deci) convs = cascade.transform(cascade_tree, wavelets=wavelets) # Create layers to remove padding cascade_tree = CascadeTree(padded, order=order) cascade_tree.generate(wavelets, pad_1d._unpad_same) unpad = cascade_tree.get_convolutions() # Remove the padding unpadded_convs = [i[1](i[0]) for i in zip(convs, unpad)] appl = ApplyFatherWavlet1D(wavelet=father_wavelet, J=J-1, img_size=(N,), sample_rate=sample_rate) sca_transf = appl.convolve(unpadded_convs) model = Model(inputs=inp, outputs=sca_transf) #model.summary() data = np.zeros((1,N,1)) data[0,:,0] = ts[124:] result = model.predict(data) print(np.squeeze(np.array(result))) # + deletable=true editable=true len(ts[124:]) # + deletable=true editable=true
notebooks/Hack - Cascade 1D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # # Policy Gradient # # # ## Learning Policies Directly # # ### Constraints on the Policy Parameterization # # $$ # \begin{gather*} # \pi(a|s,\mathbf{\theta}) \geq 0 & \forall a \in \mathcal{A},s \in \mathcal{S}\\ # \sum_{a\in\mathcal{A}}{\pi(a|s,\mathbf{\theta})=1} & \forall s \in \mathcal{S} # \end{gather*} # $$ # # ### The Softmax Policy Parameterization # # $$ # \pi(a|s,\mathbf{\theta})\doteq\frac{\underbrace{e^{h(s,a,\mathbf{\theta})}}{\text{Action Preference}}}{\sum_{b\in\mathcal{A}}{e^{h(s,b,\mathbf{\theta})}}} # $$ # # ## Advantages of Policy Parameterization # # ### Parameterized stochastic policies are useful because # # - They can autonomously *decrease exploration* over time # - They can avoid failures due to deterministic policies with *limited function approximation* # - Sometimes the policy is less complicated than the value function # ## The Objective for Learning Policies # # ### The Average Reward Objective # # $$ # r(\pi)=\underbrace{\sum_{s}\mu(s)\underbrace{\sum_{a}\pi(a|s,\mathbf{\theta})\underbrace{\sum_{s',r}p(s',r|s,a)r}_{\mathbb{E}[R_t|S_t=s,A_t=a]}}_{\mathbb{E}_\pi[R_t|S_t=s]}}_{\mathbb{E}_\pi[R_t]} # $$ # # ### Optimizing The Average Reward Objective # # - Policy-Gradient Method # # $$ # \nabla r(\pi)=\nabla\sum_{s}\mu(s)\sum_{a}\pi(a|s,\mathbf{\theta})\sum_{s',r}p(s',r|s,a)r # $$ # # ### The Challenge of Policy Gradient Methods # # - We can use the average reward as an objective for policy optimization # # $$ # \nabla_\mathbf{\theta} r(\pi)=\nabla_\mathbf{\theta}\sum_{s}\underbrace{\mu(s)}_{\text{Depends on }\mathbf{\theta}}\sum_{a}\pi(a|s,\mathbf{\theta})\sum_{s',r}p(s',r|s,a)r # $$ # # $$ # \begin{align*} # \nabla_\mathbf{w}\overline{VE} &= \nabla_\mathbf{w}\sum_{s}\mu(s)[v_\pi(s)-\hat{v}(s,\mathbf{w})]^2 \\ # &= \sum_{s}\mu(s)\nabla_\mathbf{w}[v_\pi(s)-\hat{v}(s,\mathbf{w})]^2 # \end{align*} # $$ # # # ## Example: Short corridor with switched actions # + ####################################################################### # Copyright (C) # # 2018 <NAME> (<EMAIL>) # # 2018 <NAME>(<EMAIL>) # # Permission given to modify the code as long as you keep this # # declaration at the top # ####################################################################### import numpy as np import matplotlib import matplotlib.pyplot as plt from tqdm import tqdm # %matplotlib inline def true_value(p): """ True value of the first state Args: p (float): probability of the action 'right'. Returns: True value of the first state. The expression is obtained by manually solving the easy linear system of Bellman equations using known dynamics. """ return (2 * p - 4) / (p * (1 - p)) class ShortCorridor: """ Short corridor environment, see Example 13.1 """ def __init__(self): self.reset() def reset(self): self.state = 0 def step(self, go_right): """ Args: go_right (bool): chosen action Returns: tuple of (reward, episode terminated?) """ if self.state == 0 or self.state == 2: if go_right: self.state += 1 else: self.state = max(0, self.state - 1) else: if go_right: self.state -= 1 else: self.state += 1 if self.state == 3: # terminal state return 0, True else: return -1, False def softmax(x): t = np.exp(x - np.max(x)) return t / np.sum(t) class ReinforceAgent: """ ReinforceAgent that follows algorithm 'REINFORNCE Monte-Carlo Policy-Gradient Control (episodic)' """ def __init__(self, alpha, gamma): # set values such that initial conditions correspond to left-epsilon greedy self.theta = np.array([-1.47, 1.47]) self.alpha = alpha self.gamma = gamma # first column - left, second - right self.x = np.array([[0, 1], [1, 0]]) self.rewards = [] self.actions = [] def get_pi(self): h = np.dot(self.theta, self.x) t = np.exp(h - np.max(h)) pmf = t / np.sum(t) # never become deterministic, # guarantees episode finish imin = np.argmin(pmf) epsilon = 0.05 if pmf[imin] < epsilon: pmf[:] = 1 - epsilon pmf[imin] = epsilon return pmf def get_p_right(self): return self.get_pi()[1] def choose_action(self, reward): if reward is not None: self.rewards.append(reward) pmf = self.get_pi() go_right = np.random.uniform() <= pmf[1] self.actions.append(go_right) return go_right def episode_end(self, last_reward): self.rewards.append(last_reward) # learn theta G = np.zeros(len(self.rewards)) G[-1] = self.rewards[-1] for i in range(2, len(G) + 1): G[-i] = self.gamma * G[-i + 1] + self.rewards[-i] gamma_pow = 1 for i in range(len(G)): j = 1 if self.actions[i] else 0 pmf = self.get_pi() grad_ln_pi = self.x[:, j] - np.dot(self.x, pmf) update = self.alpha * gamma_pow * G[i] * grad_ln_pi self.theta += update gamma_pow *= self.gamma self.rewards = [] self.actions = [] class ReinforceBaselineAgent(ReinforceAgent): def __init__(self, alpha, gamma, alpha_w): super(ReinforceBaselineAgent, self).__init__(alpha, gamma) self.alpha_w = alpha_w self.w = 0 def episode_end(self, last_reward): self.rewards.append(last_reward) # learn theta G = np.zeros(len(self.rewards)) G[-1] = self.rewards[-1] for i in range(2, len(G) + 1): G[-i] = self.gamma * G[-i + 1] + self.rewards[-i] gamma_pow = 1 for i in range(len(G)): self.w += self.alpha_w * gamma_pow * (G[i] - self.w) j = 1 if self.actions[i] else 0 pmf = self.get_pi() grad_ln_pi = self.x[:, j] - np.dot(self.x, pmf) update = self.alpha * gamma_pow * (G[i] - self.w) * grad_ln_pi self.theta += update gamma_pow *= self.gamma self.rewards = [] self.actions = [] def trial(num_episodes, agent_generator): env = ShortCorridor() agent = agent_generator() rewards = np.zeros(num_episodes) for episode_idx in range(num_episodes): rewards_sum = 0 reward = None env.reset() while True: go_right = agent.choose_action(reward) reward, episode_end = env.step(go_right) rewards_sum += reward if episode_end: agent.episode_end(reward) break rewards[episode_idx] = rewards_sum return rewards # + epsilon = 0.05 fig, ax = plt.subplots(1, 1) # Plot a graph p = np.linspace(0.01, 0.99, 100) y = true_value(p) ax.plot(p, y, color='red') # Find a maximum point, can also be done analytically by taking a derivative imax = np.argmax(y) pmax = p[imax] ymax = y[imax] ax.plot(pmax, ymax, color='green', marker="*", label="optimal point: f({0:.2f}) = {1:.2f}".format(pmax, ymax)) # Plot points of two epsilon-greedy policies ax.plot(epsilon, true_value(epsilon), color='magenta', marker="o", label="epsilon-greedy left") ax.plot(1 - epsilon, true_value(1 - epsilon), color='blue', marker="o", label="epsilon-greedy right") ax.set_ylabel("Value of the first state") ax.set_xlabel("Probability of the action 'right'") ax.set_title("Short corridor with switched actions") ax.set_ylim(ymin=-105.0, ymax=5) ax.legend() # - # ## The Policy Gradient Theorem # # ### The Gradient of the Objective # # $$ # \begin{align*} # \nabla r(\pi) &= \nabla\sum_{s}\mu(s)\sum_{a}\pi(a|s,\mathbf{\theta})\sum_{s',r}p(s',r|s,a)r \\ # &= \sum_{s}\nabla\mu(s)\sum_{a}\pi(a|s,\mathbf{\theta})\sum_{s',r}p(s',r|s,a)r + \sum_{s}\mu(s)\nabla\sum_{a}\pi(a|s,\theta)\sum_{s',r}p(s',r|s,a)r # \end{align*} # $$ # # ### The Policy Gradient Theorem # # - The *policy gradient theorem* gives an expression for the gradient of the average reward # # $$ # \nabla r(\pi) = \sum_{s}\mu(s)\sum_{a}\nabla \pi(a|s,\theta)q_\pi(s,a) # $$ # # # ## Estimating the Policy Gradient # # ### Getting Stochastic Samples of the Gradient # # $$ # \begin{gather*} # \nabla r(\pi)=\sum_{s}\mu(s)\sum_{a}\nabla \pi(a|s,\mathbf{\theta})q_\pi(s,a)\\ # \mathbf{\theta}_{t+1}\doteq\mathbf{\theta}_{t}+\alpha\sum_{a}\nabla\pi(a|S_t,\mathbf{\theta}_t)q_\pi(S_t,a)\\ # \\ # S_0,A_0,R_1,S_1,A_1,\ldots,S_t,A_t,R_{t+1},\ldots # \end{gather*} # $$ # # ### Unbiasedness of the Stochastic Samples # # $$ # \begin{align*} # \nabla r(\pi) &=\sum_{s}\mu(s)\sum_{a}\nabla\pi(a|s,\mathbf{\theta})q_\pi(s,a)\\ # &=\mathbb{E}_\mu[\sum_{a}\nabla\pi(a|S,\mathbf{\theta})q_\pi(S,a)] # \end{align*} # $$ # # ### Getting Stochastic Samples with One Action # # $$ # \begin{align*} # &\sum_{a}\nabla\pi(a|S,\mathbf{\theta})q_\pi(S,a)\\ # &=\sum_{a}\pi(a|S,\mathbf{\theta})\frac{1}{\pi(a|S,\mathbf{\theta})}\nabla\pi(a|S,\mathbf{\theta})q_\pi(S,a)\\ # &=\mathbb{E}_\pi[\frac{\nabla\pi(A|S,\mathbf{\theta})}{\pi(A|S,\mathbf{\theta})}q_\pi(S,A)] # \end{align*} # $$ # # ### Stochastic Gradient Ascent for Policy Parameters # # $$ # \begin{align*} # \mathbf{\theta}_{t+1}&\doteq\mathbf{\theta}_{t}+\alpha\frac{\nabla\pi(A_t|S_t,\mathbf{\theta}_t)}{\pi(A_t|S_t,\mathbf{\theta}_t)}q_\pi(S_t,A_t)\\ # &=\mathbf{\theta}_{t}+\alpha\nabla\ln\pi(A_t|S_t,\mathbf{\theta}_t)q_\pi(S_t,A_t)&(\because \nabla\ln\left(f(x)\right)=\frac{\nabla f(x)}{f(x)}) # \end{align*} # $$ # # ### Computing the Update # # $$ # \mathbf{\theta}_{t+1}=\mathbf{\theta}_{t}+\alpha\underbrace{\nabla\ln\pi(A_t|S_t,\mathbf{\theta}_t)}_{\text{gradient of the policy (computable)}}\underbrace{q_\pi(S_t,A_t)}_{\text{estimate of the differntial valus (computable)}} # $$ # # ## Actor-Critic Algorithm # # - It is useful to learn a value function to estimate the gradient for the policy parameters # - The *actor-critic algorithm* implements this idea, with a critic that learns a value function for the actor # # ### Approximating the Actin Value in the Policy Update # # $$ # \begin{align*} # \mathbf{\theta}_{t+1} &= \mathbf{\theta}_{t}+\alpha\nabla\ln\pi(A_t|S_t,\mathbf{\theta}_t)q_\pi(S_t,A_t)\\ # &\doteq \mathbf{\theta}_{t}+\alpha\nabla\ln\pi(A_t|S_t,\theta_t)[R_{t+1}-\bar{R}+\hat{v}(S_{t+1}, \mathbf{w})] # \end{align*} # $$ # # ### Subtracting the Current State's Value Estimate # # $$ # \mathbf{\theta}_{t+1} \doteq \mathbf{\theta}_{t}+\alpha\nabla\ln\pi(A_t|S_t,\mathbf{\theta}_t)[\underbrace{R_{t+1}-\bar{R}+\hat{v}(S_{t+1}, \mathbf{w})-\underbrace{\hat{v}(S_t,\mathbf{w})}_{\text{Does not affect the Expected Update}}}_{\text{TD Error }\delta}] # $$ # # ### Adding a Baseline # # $$ # \begin{align*} # &\mathbf{E}_\pi[\nabla\ln\pi(A_t|S_t,\theta_t)[R_{t+1}-\bar{R}+\hat{v}(S_{t+1}, \mathbf{w})-\underbrace{\hat{v}(S_t,\mathbf{w})}_{\text{Reduces the update variance}}]|S_t=s]\\ # =&\mathbf{E}_\pi[\nabla\ln\pi(A_t|S_t,\theta_t)[R_{t+1}-\bar{R}+\hat{v}(S_{t+1}, \mathbf{w})]|S_t=s]\\ # &-\underbrace{\mathbf{E}_\pi[\nabla\ln\pi(A_t|S_t,\theta_t)\hat{v}(S_t,\mathbf{w})|S_t=s]}_{0} # \end{align*} # $$ # # ## Actor-Critic with Softmax Policies # # ### Recap: Actor-Critic # # $$ # \begin{align*} # \mathbf{w} &\leftarrow \mathbf{w} + \alpha^\mathbf{w}\delta\nabla\hat{v}(S,\mathbf{w})\\ # \mathbf{\theta} &\leftarrow \alpha^\mathbf{\theta}\delta\nabla\ln\pi(A|S,\mathbf{\theta}) # \end{align*} # $$ # # ### Policy Update with a Softmax Policy # # $$ # \begin{gather*} # \mathbf{\theta} \leftarrow \alpha^\mathbf{\theta}\delta\nabla\ln\pi(A|S,\mathbf{\theta})\\ # \pi(a|s,\mathbf{\theta})\doteq\frac{e^{h(s,a,\mathbf{\theta})}}{\sum_{b\in\mathcal{A}}e^{h(s,b,\mathbf{\theta})}} # \end{gather*} # $$ # # ### Features of the Action Preference Function # # $$ # \begin{gather*} # \hat{v}(s,\mathbf{w})\doteq\mathbf{w}^T\mathbf{x}(s)\\ # h(s,a,\mathbf{\theta})\doteq\mathbf{\theta}^T\mathbf{x}_h(s,a) # \end{gather*} # $$ # # ### Actor-Critic with a Softmax Policy # # $$ # \begin{align*} # &\mathbf{w} \leftarrow \mathbf{w} + \alpha^\mathbf{w}\delta\mathbf{x}(S)\\ # &\mathbf{\theta} \leftarrow \alpha^\mathbf{\theta}\delta\nabla\ln\pi(A|S,\mathbf{\theta})\\ # &\nabla\ln\pi(a|s,\mathbf{\theta})=\mathbf{x}_h(s,a)-\sum_{b}\pi(b|s,\mathbf{\theta})\mathbf{x}_h(s,b) # \end{align*} # $$ # ## Demonstration with Actor-Critic # # ### # # ## Gaussian Poliies for Continuous Actions # # ### Gaussian Distribution # # $$ # p(x)\doteq\frac{1}{\sigma\sqrt{2\pi}}\exp(-\frac{(x-\mu)^2}{2\sigma^2}) # $$ # # ### Gaussian Policy # # $$ # \begin{align*} # &\pi(a|s,\mathbf{\theta})\doteq\frac{1}{\sigma(s,\mathbf{\theta})\sqrt{2\pi}}\exp(-\frac{(x-\mu)^2}{2\sigma^2})\\ # &\mu(s,\mathbf{\theta})\doteq\mathbf{\theta}_{\mu}^{T}\mathbf{x}(s)\\ # &\sigma(s,\mathbf{\theta})\doteq\exp(\mathbf{\theta}_\sigma^T\mathbf{x}(s))\\ # &\mathbf{\theta}\doteq\left[ # \begin{array}{c} # \mathbf{\theta}_\mu\\ # \mathbf{\theta}_\sigma\\ # \end{array} # \right] # \end{align*} # $$ # # ### Gradient of the Log of the Gaussian Policy # # $$ # \begin{align*} # &\nabla\ln\pi(a|s,\mathbf{\theta}_\mu)=\frac{1}{\sigma(s,\mathbf{\theta})^2}(a-\mu(s,\mathbf{\theta}))\mathbf{x}(s)\\ # &\nabla\ln\pi(a|s,\mathbf{\theta}_\sigma)=\left(\frac{(a-\mu(s,\mathbf{\theta}))^2}{\sigma(s,\mathbf{\theta})^2}-1\right)\mathbf{x}(s) # \end{align*} # $$
12_Policy-Gradient.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exclusion # # # %load_ext autoreload # %autoreload 2 import xml.etree.ElementTree import os,glob,sys import numpy as np import pandas as pd import seaborn as sns # + sys.path.append('C:\\Users\\Vinny\\work\\vimms') sys.path.append('C:\\Users\\Vinny\\work\\pymzmine') sys.path.append('/Users/simon/git/vimms') sys.path.append('/Users/simon/git/pymzm') sys.path.append('/home/simon/git/vimms') sys.path.append('/home/simon/git/pymzm') # - from vimms.PythonMzmine import * from vimms.MassSpec import IndependentMassSpectrometer from vimms.Controller import TopNController,WeightedDEWController from vimms.PythonMzmine import * from vimms.Roi import make_roi, RoiToChemicalCreator from vimms.BOMAS import * from vimms.Common import * from vimms.Environment import * from pathlib import Path from vimms.PlotsForPaper import get_frag_events from vimms.DataGenerator import extract_hmdb_metabolite, get_data_source, get_spectral_feature_database set_log_level_warning() # # Test on Simulator (with same settings as MS) base_dir = os.path.abspath('/Users/simon/University of Glasgow/Vinny Davies - CLDS Metabolomics Project/Trained Models') base_dir = os.path.abspath('/Users/simon/University of Glasgow/Vinny Davies - CLDS Metabolomics Project/TopN_vs_ROI/QCB/simulator_input') ps_frag_QCB = load_obj(Path(base_dir, 'peak_sampler_mz_rt_int_beerqcb_fragmentation.p')) QCB_MZML2CHEMS_DICT = {'min_ms1_intensity': 0, 'mz_tol': 5, 'mz_units':'ppm', 'min_length':1, 'min_intensity':0, 'start_rt':0, 'stop_rt':1560} seed_mzml_file = '/Users/simon/University of Glasgow/<NAME> - CLDS Metabolomics Project/Experimental_Results/20200715_TopN_vs_ROI/from_controllers/QCB/TopN_QCB.mzML' # TopN_QCB_dataset = mzml2chems(seed_mzml_file, ps_frag_QCB, QCB_MZML2CHEMS_DICT, n_peaks=None) # + # save_obj(TopN_QCB_dataset, '/Users/simon/University of Glasgow/Vinny Davies - CLDS Metabolomics Project/Experimental_Results/20200715_TopN_vs_ROI/from_controllers/QCB/TopN_QCB.p') # - # TopN_QCB_dataset = load_obj('/Users/simon/OneDrive - University of Glasgow/CLDS Metabolomics Project/TopNvsTopNroi/QCB/SimulatorTests/TopN_QCB_dataset.mzml') # TopN_QCB_dataset = load_obj(Path(base_dir,'TopN_QCB_dataset.mzml')) TopN_QCB_dataset = load_obj(Path(base_dir,'/Users/simon/University of Glasgow/Vinny Davies - CLDS Metabolomics Project/Experimental_Results/20200715_TopN_vs_ROI/from_controllers/QCB/Fullscan_QCB.p')) # ## Set some parameters that are constant for all simulations # + min_rt = 0 max_rt = 26*60 # entire run min_ms1_intensity = 5000 mz_tol = 10 rt_tol = 15 N = 10 # these are derived from real data (see bottom of notebook) # roi_time_dict = {1: 0.71,2:0.20} topn_time_dict = {1: 0.60,2:0.20} ionisation_mode = POSITIVE isolation_width = 1 test_path = '/Users/simon/git/vimms/experimental/simon_res/' # min_roi_intensity = 500 # min_roi_length = 3 # still in scans, as to work in seconds, need to pass parameter. But doesn't matter when parameter below is equal to 1! # min_roi_length_for_fragmentation = 1 # - from vimms.MassSpec import IndependentMassSpectrometer # ### Top N # + controller = TopNController(ionisation_mode, N, isolation_width, mz_tol, rt_tol, min_ms1_intensity) mass_spec = IndependentMassSpectrometer(ionisation_mode, TopN_QCB_dataset, ps_frag_QCB, add_noise=True, scan_duration_dict = topn_time_dict) env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True) env.run() # test_path = '/Users/simon/git/vimms/experimental/simon_res/' env.write_mzML(test_path, 'topN.mzml') # - # ## Excluded t0_vals = [1,3,5,10,15,30,60] t0_vals = [1,3,10,15,30,60] rt_tol_vals = [15,30,60,120,240,300,360] rt_tol_vals = [15,60,120,240,360,3600] for t0 in t0_vals: for r in rt_tol_vals: if t0 > r: # impossible combination continue print(t0,r) out_name = 'exlude_log_{}_{}.mzml'.format(t0,r) if os.path.isfile(os.path.join(test_path,out_name)): print("Already done") continue # ROI can also be fragmented again if intensity falls to <1% of the max *since* last fragmentation from vimms.Controller import TopN_SmartRoiController controller = WeightedDEWController(ionisation_mode, N, isolation_width, mz_tol, r,min_ms1_intensity, exclusion_t_0 = t0, log_intensity = True) mass_spec = IndependentMassSpectrometer(ionisation_mode, TopN_QCB_dataset, ps_frag_QCB, add_noise=True, scan_duration_dict=topn_time_dict) env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True) env.run() env.write_mzML(test_path, out_name) # ## ROI-based scoring # # - Make the ROIs in the data # - Count how many have fragmentations in them from vimms.Roi import make_roi topn_file = os.path.join(test_path, 'topN.mzml') roi_file = os.path.join(test_path,'ROI.mzml') smart_file = os.path.join(test_path,'SMART.mzml') good_topn,junk_topn = make_roi(topn_file,mz_tol = mz_tol,mz_units = 'ppm',min_length = min_roi_length,min_intensity = min_roi_intensity) good_roi,junk_roi = make_roi(roi_file,mz_tol = mz_tol,mz_units = 'ppm',min_length = min_roi_length,min_intensity = min_roi_intensity) good_smart,junk_smart = make_roi(smart_file,mz_tol = mz_tol,mz_units = 'ppm',min_length = min_roi_length,min_intensity = min_roi_intensity) sys.path.append('/Users/simon/git/pymzm') from ms2_matching import MZMLFile topn_mz_file = MZMLFile(topn_file) roi_mz_file = MZMLFile(roi_file) smart_mz_file = MZMLFile(smart_file) # + def summarise(mz_file_object): n_scans = len(mz_file_object.scans) n_ms1_scans = len(list(filter(lambda x: x.ms_level == 1,mz_file_object.scans))) n_ms2_scans = len(list(filter(lambda x: x.ms_level == 2,mz_file_object.scans))) print("Total scans = {}, MS1 = {}, MS2 = {}".format(n_scans,n_ms1_scans,n_ms2_scans)) print("TopN:") summarise(topn_mz_file) print("ROI:") summarise(roi_mz_file) print("SMART:") summarise(smart_mz_file) # + # This method is very slow.... :-) def match_scans_to_rois(mz_file_object,roi_list): roi2scan = {roi:[] for roi in roi_list} scan2roi = {scan:[] for scan in filter(lambda x: x.ms_level == 2,mz_file_object.scans)} for scan in mz_file_object.scans: if scan.ms_level == 2: pmz = scan.precursor_mz scan_rt_in_seconds = 60*scan.previous_ms1.rt_in_minutes in_mz_range = list(filter(lambda x: min(x.mz_list) <= pmz and max(x.mz_list) >= pmz,roi_list)) in_rt_range = list(filter(lambda x: x.rt_list[0]<= scan_rt_in_seconds and x.rt_list[-1] >= scan_rt_in_seconds,in_mz_range)) for roi in in_rt_range: roi2scan[roi].append(scan) scan2roi[scan].append(roi) return roi2scan,scan2roi # - roi_roi2scan,roi_scan2roi = match_scans_to_rois(roi_mz_file,good_roi) topn_roi2scan,topn_scan2roi = match_scans_to_rois(topn_mz_file,good_topn) smart_roi2scan,smart_scan2roi = match_scans_to_rois(smart_mz_file,good_smart) # compute proportion of ROIs that have scans for each def prop_roi_with_scans(roi2scan): with_scan = 0 without_scan = 0 for r,scans in roi2scan.items(): if len(scans) == 0: without_scan += 1 else: with_scan += 1 return with_scan,without_scan,len(roi2scan) print("ROI: ",prop_roi_with_scans(roi_roi2scan)) print("topN: ",prop_roi_with_scans(topn_roi2scan)) print("SMART: ",prop_roi_with_scans(smart_roi2scan)) # # Test with the picked peaks # # - This code uses a method called `load_picked_boxes` and one called `map_boxes_to_scans` from my pymzm repository (in `ms2_matching.py`) # - Load picked boxes loads the peak *boxes* from the output of mzmine (i.e. min/max mz, min/max rt) # - Map boxes to scans takes an MZMLFile object (also from the pymzm repo) and maps the ms2 scans to the boxes (and vice versa). Note that this method was originally designed to map scans to any box they overlap with (i.e. any box where the precursor plus or minus isolation intersected the box). This can be *switched off* by setting the isolation with to zero. Now, it will only find things where the precursor mz is in the box (and RT is ok, obviously) # - We can then simply see how many of the boxes are associated with scans and this tells us how many picked peaks have scans # + # test with picked peaks # from ms2_matching import MZMLFile # Big differences between the following two files! # perhaps an alignment thing... # peak_csv_file = os.path.join(qcb_dir,'from_controller_TopN_QCB_pp.csv') # peak_csv_file = os.path.join(qcb_dir,'QCB_22May19_1_pp.csv') peak_csv_file = os.path.join(test_path,'QCB','qcb_pp_wide.csv') peak_csv_file = '/Users/simon/University of Glasgow/Vinny Davies - CLDS Metabolomics Project/Experimental_Results/20200715_TopN_vs_ROI/from_controllers/QCB/TopN_QCB_box.csv' from ms2_matching import load_picked_boxes,map_boxes_to_scans boxes = load_picked_boxes(peak_csv_file) # + # dp_values = [0.1,0.5,1,5,20,50] # dp_values = [0.1,0.5] counts = np.zeros((len(t0_vals),len(rt_tol_vals))) for i,t0 in enumerate(t0_vals): for j,r in enumerate(rt_tol_vals): print(t0,r) if t0 > r: # impossible combination counts[i,j] = np.nan # fix for colour on boxplot continue fname = 'exlude_{}_{}.mzml'.format(t0,r) mz_file = MZMLFile(os.path.join(test_path,fname)) scans2boxes,boxes2scans = map_boxes_to_scans(mz_file,boxes,half_isolation_window = 0) counts[i,j] = len(boxes2scans) print(counts) # - plt.imshow(counts,aspect='auto') plt.yticks(range(len(t0_vals)),t0_vals) plt.xticks(range(len(rt_tol_vals)),rt_tol_vals) plt.colorbar() log_counts_dict = {} # + counts = np.zeros((len(t0_vals),len(rt_tol_vals))) for i,t0 in enumerate(t0_vals): if not t0 in log_counts_dict: log_counts_dict[t0] = {} for j,r in enumerate(rt_tol_vals): print(t0,r) if t0 > r: # impossible combination counts[i,j] = np.nan # fix for colour on boxplot log_counts_dict[t0][r] = np.nan continue if not r in log_counts_dict[t0]: fname = 'exlude_log_{}_{}.mzml'.format(t0,r) mz_file = MZMLFile(os.path.join(test_path,fname)) scans2boxes,boxes2scans = map_boxes_to_scans(mz_file,boxes,half_isolation_window = 0) counts[i,j] = len(boxes2scans) log_counts_dict[t0][r] = counts[i,j] else: counts[i][j] = log_counts_dict[t0][r] print(counts) # - plt.imshow(counts,aspect='auto') plt.yticks(range(len(t0_vals)),t0_vals) plt.xticks(range(len(rt_tol_vals)),rt_tol_vals) plt.colorbar() print(len(boxes)) # + # optimal pars: t0 = 15, rt_tol = 120.0 # - # smart_scans2boxes,smart_boxes2scans = map_boxes_to_scans(smart_mz_file,boxes,half_isolation_window = 0) # roi_scans2boxes,roi_boxes2scans = map_boxes_to_scans(roi_mz_file,boxes,half_isolation_window = 0) topn_mz_file = MZMLFile(os.path.join(test_path,'topN.mzml')) topn_scans2boxes,topn_boxes2scans = map_boxes_to_scans(topn_mz_file,boxes,half_isolation_window = 0) # note 0 in previous call is to only find the scans that have a precursor *in* the box, not those that overlap print("topN: ",len(topn_boxes2scans)) print("Boxes: ",len(boxes)) print("ROI: ",len(roi_boxes2scans)) print("topN: ",len(topn_boxes2scans)) print("SMART: ",len(smart_boxes2scans)) # are they picking the same ones roi_box_set = set(roi_boxes2scans.keys()) topn_box_set = set(topn_boxes2scans.keys()) smart_box_set = set(smart_boxes2scans.keys()) print(len(topn_box_set.intersection(smart_box_set))) # ## QCA experiments # + base_dir = os.path.abspath('/Users/simon/University of Glasgow/Vinny Davies - CLDS Metabolomics Project/Trained Models') base_dir_qcb = os.path.abspath('/home/simon/git/vimms/experimental/simon_res/QCB') base_dir_qca = os.path.abspath('/home/simon/git/vimms/experimental/simon_res/QCA') ps_frag_QCB = load_obj(Path(base_dir_qcb, 'peak_sampler_mz_rt_int_beerqcb_fragmentation.p')) # - QCB_MZML2CHEMS_DICT = {'min_ms1_intensity': 0, 'mz_tol': 5, 'mz_units':'ppm', 'min_length':1, 'min_intensity':0, 'start_rt':0, 'stop_rt':1560} TopN_QCA_dataset = load_obj('/home/simon/git/vimms/experimental/simon_res/QCA/TopN_QCA_dataset.mzml') # + min_rt = 0 max_rt = 26*60 # entire run min_ms1_intensity = 5000 mz_tol = 10 rt_tol = 15 N = 10 # these are derived from real data (see bottom of notebook) roi_time_dict = {1: 0.71,2:0.20} topn_time_dict = {1: 0.60,2:0.20} ionisation_mode = POSITIVE isolation_width = 1 test_path = '/home/simon/git/vimms/experimental/simon_res/' min_roi_intensity = 500 min_roi_length = 3 # still in scans, as to work in seconds, need to pass parameter. But doesn't matter when parameter below is equal to 1! min_roi_length_for_fragmentation = 1 # - output_folder = os.path.join(test_path,'output_files','QCA') # ## TopN # + controller = TopNController(ionisation_mode, N, isolation_width, mz_tol, rt_tol, min_ms1_intensity) mass_spec = IndependentMassSpectrometer(ionisation_mode, TopN_QCA_dataset, ps_frag_QCB, add_noise=True, scan_duration_dict = topn_time_dict) env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True) env.run() # test_path = '/Users/simon/git/vimms/experimental/simon_res/' env.write_mzML(output_folder, 'topN.mzml') # + # SMART ROI reset_length_seconds = 1e6 # set so reset never happens iif_values = [2,3,5,10,1e3,1e6] # dp_values = [1,5,20,50] # dp_values = [0.1,0.5] # dp_values = [0] dp_values = [0,0.1,0.5,1,5] # iif_values = [1e6] # - for iif in iif_values: for dp in dp_values: print(iif,dp) intensity_increase_factor = iif # fragment ROI again if intensity increases 10 fold drop_perc = dp/100 # ROI can also be fragmented again if intensity falls to <1% of the max *since* last fragmentation from vimms.Controller import TopN_SmartRoiController controller = TopN_SmartRoiController(ionisation_mode, isolation_width, mz_tol, min_ms1_intensity, min_roi_intensity, min_roi_length, N = N, rt_tol = rt_tol, min_roi_length_for_fragmentation = min_roi_length_for_fragmentation, reset_length_seconds = reset_length_seconds, intensity_increase_factor = intensity_increase_factor, drop_perc = drop_perc) mass_spec = IndependentMassSpectrometer(ionisation_mode, TopN_QCA_dataset, ps_frag_QCB, add_noise=True, scan_duration_dict=roi_time_dict) env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True) env.run() out_name = 'SMART3_{}_{}.mzml'.format(iif,dp) env.write_mzML(output_folder, out_name) # test with picked peaks from ms2_matching import MZMLFile,load_picked_boxes,map_boxes_to_scans peak_csv_file = os.path.join(test_path,'QCA','qca_pp_wide.csv') boxes = load_picked_boxes(peak_csv_file) topn_mz_file = MZMLFile(os.path.join(output_folder,'topN.mzml')) topn_scans2boxes,topn_boxes2scans = map_boxes_to_scans(topn_mz_file,boxes,half_isolation_window = 0) # note 0 in previous call is to only find the scans that have a precursor *in* the box, not those that overlap print("topN: ",len(topn_boxes2scans)) counts = np.zeros((len(iif_values),len(dp_values))) for i,iif in enumerate(iif_values): for j,dp in enumerate(dp_values): print(iif,dp) fname = 'SMART3_{}_{}.mzml'.format(iif,dp) mz_file = MZMLFile(os.path.join(test_path,'output_files','QCA',fname)) scans2boxes,boxes2scans = map_boxes_to_scans(mz_file,boxes,half_isolation_window = 0) counts[i,j] = len(boxes2scans) print(counts) plt.imshow(counts,aspect='auto') plt.yticks(range(len(iif_values)),iif_values) plt.xticks(range(len(dp_values)),dp_values) plt.colorbar() # ## Extract some timings from real data # # - Cells below extract the scan durations from the real data and compute the mean -- these are then used above in the simulations mzml_QCB_TopN real_QCB_TopN = MZMLFile(mzml_QCB_TopN) time_dict = {(1,1):[],(1,2):[],(2,1):[],(2,2):[]} for i,s in enumerate(real_QCB_TopN.scans[:-1]): current = s.ms_level next_ = real_QCB_TopN.scans[i+1].ms_level tup = (current,next_) time_dict[tup].append(60*real_QCB_TopN.scans[i+1].rt_in_minutes - 60*s.rt_in_minutes) for k,v in time_dict.items(): if len(v) > 0: print(k,sum(v)/len(v)) # + mzml_QCB_TopN_Roi real_QCB_TopN_Roi = MZMLFile(mzml_QCB_TopN_Roi) # - time_dict = {(1,1):[],(1,2):[],(2,1):[],(2,2):[]} for i,s in enumerate(real_QCB_TopN_Roi.scans[:-1]): current = s.ms_level next_ = real_QCB_TopN_Roi.scans[i+1].ms_level tup = (current,next_) time_dict[tup].append(60*real_QCB_TopN_Roi.scans[i+1].rt_in_minutes - 60*s.rt_in_minutes) if tup == (1,1): print(i) for k,v in time_dict.items(): if len(v) > 0: print(k,sum(v)/len(v)) print(time_dict[(1,1)]) print(real_QCB_TopN_Roi.scans[6534].rt_in_minutes) print(real_QCB_TopN_Roi.scans[6535].rt_in_minutes) roi_time_dict = {1: 0.71,2:0.20} topn_time_dict = {1: 0.60,2:0.20} # ## Simon peak picking xml_file = '/Users/simon/git/vimms/batch_files/QCB_mzmine_batch_ms2.xml' output_dir = '/Users/simon/git/vimms/experimental/simon_res/' pick_peaks([smart_file],xml_template=xml_file, output_dir=output_dir) pick_peaks([topn_file],xml_template=xml_file, output_dir=output_dir) pick_peaks([roi_file],xml_template=xml_file, output_dir=output_dir) from alignment import JoinAligner tt = JoinAligner() tt.add_file('/Users/simon/University of Glasgow/Vinny Davies - CLDS Metabolomics Project/TopNvsTopNroi/QCB/QCB_22May19_1_pp.csv') print(len(tt.peaksets)) tt.add_file('/Users/simon/git/vimms/experimental/simon_res/topN_pp.csv') print(len(tt.peaksets)) tt_mat = tt.to_matrix() plt.imshow(tt_mat,aspect='auto') s = (tt_mat > 0).sum(axis=1) count = (s==2).sum() print(count) ss = JoinAligner() ss.add_file('/Users/simon/University of Glasgow/Vinny Davies - CLDS Metabolomics Project/TopNvsTopNroi/QCB/QCB_22May19_1_pp.csv') ss.add_file('/Users/simon/git/vimms/experimental/simon_res/SMART_pp.csv') ss_mat = ss.to_matrix() plt.imshow(ss_mat,aspect='auto') s = (ss_mat > 0).sum(axis=1) count = (s==2).sum() print(count) print(topn_file)
experimental/ExcludedSimulations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## MatrixTable # # If you've gotten this far, you're probably thinking: # # - "Can't I do all of this in `pandas` or `R`?" # - "What does this have to do with biology?" # # The two crucial features that Hail adds are _scalability_ and the _domain-specific primitives_ needed to work easily with biological data. Fear not! You've learned most of the basic concepts of Hail and now are ready for the bit that makes it possible to represent and compute on genetic matrices: the [MatrixTable](https://hail.is/docs/devel/hail.MatrixTable.html). # + [markdown] slideshow={"slide_type": "slide"} # In the last example, the ratings table had a compound key: `movie_id` and `user_id`. The ratings were secretly a movie-by-user matrix! # # However, since this matrix is very sparse, it is reasonably represented in a so-called "coordinate form" `Table`, where each row of the table is an entry of the sparse matrix. For large and dense matrices (like sequencing data), the per-row overhead of coordinate reresentations is untenable. That's why we built `MatrixTable`, a 2-dimensional generalization of `Table`. # + [markdown] slideshow={"slide_type": "slide"} # ## MatrixTable Anatomy # # Recall that `Table` has two kinds of fields: # # - global fields # - row fields # # `MatrixTable` has four kinds of fields: # # - global fields # - row fields # - column fields # - entry fields # + [markdown] slideshow={"slide_type": "subslide"} # Row fields are fields that are stored once per row. These can contain information about the rows, or summary data calculated per row. # # Column fields are stored once per column. These can contain information about the columns, or summary data calculated per column. # # Entry fields are the piece that makes this structure a matrix -- there is an entry for each (row, column) pair. # + [markdown] slideshow={"slide_type": "slide"} # ## Importing and Reading # # Like tables, matrix tables can be [imported](https://hail.is/docs/devel/methods/impex.html) from a variety of formats: VCF, (B)GEN, PLINK, TSV, etc. Matrix tables can also be *read* from a "native" matrix table format. Let's read a sample of prepared 1KG data. # + slideshow={"slide_type": "fragment"} import hail as hl import matplotlib.pyplot as plt import seaborn seaborn.set() # %matplotlib inline hl.utils.get_1kg('data/') # + slideshow={"slide_type": "slide"} mt = hl.read_matrix_table('data/1kg.mt') mt.describe() # + [markdown] slideshow={"slide_type": "slide"} # There are a few things to note: # # - There is a single column field `s`. This is the sample ID from the VCF. Note, it is the column key. # - There is a compound row key: `locus` and `alleles`. # - `locus` has type `locus<GRCh37>` # - `alleles` has type `array<str>` # - GT has type `call`. That's a genotype call! # - There is another key type: the partition key. This key relates to how the rows are grouped into partitions for processing. Don't worry about this for now. # + [markdown] slideshow={"slide_type": "slide"} # Whereas table expressions could be indexed by nothing or indexed by rows, matrix table expression have four options: nothing, indexed by row, indexed by column, or indexed by row and column (the entries). Let's see some examples. # + slideshow={"slide_type": "fragment"} mt.s.describe() # + slideshow={"slide_type": "fragment"} mt.GT.describe() # + [markdown] slideshow={"slide_type": "slide"} # ## MatrixTable operations # We belabored the operations on `Table`s because they all have natural analogs (sometimes several) on `MatrixTable`s. For example: # # - `count` => `count_{rows, cols}` (and `count` which returns both) # - `filter` => `filter_{rows, cols, entries}` # - `annotate` => `annotate_{rows, cols, entries}` (and globals for both) # - `select` => `select_{rows, cols, entries}` (and globals for both) # - `transmute` => `transmute_{rows, cols, entries}` (and globals for both) # - `group_by` => `group_{rows, cols}_by` # - `explode` => `expode_{rows, cols}` # - `aggregate` => `aggregate_{rows, cols, entries}` # + [markdown] slideshow={"slide_type": "slide"} # Some operations are unique to `MatrixTable`: # # - The row fields can be accessed as a `Table` with [rows](https://hail.is/docs/devel/hail.MatrixTable.html#hail.MatrixTable.rows) # - The column fields can be accessed as a `Table` with [cols](https://hail.is/docs/devel/hail.MatrixTable.html#hail.MatrixTable.cols). # - The entire field space of a `MatrixTable` can be accessed as a coordinate-form `Table` with [entries](https://hail.is/docs/devel/hail.MatrixTable.html#hail.MatrixTable.entries). Be careful with this! While it's fast to aggregate or query, trying to write this `Table` to disk could produce files _thousands of times larger_ than the corresponding `MatrixTable`. # # Let's explore `mt` using these tools. Let's get the size of the dataset. # + slideshow={"slide_type": "fragment"} mt.count() # (rows, cols) # + [markdown] slideshow={"slide_type": "slide"} # Let's look at the first few row keys (variants) and column keys (sample IDs). # + slideshow={"slide_type": "fragment"} mt.rows().select().show() # + slideshow={"slide_type": "slide"} mt.s.show() # + [markdown] slideshow={"slide_type": "slide"} # Let's investigate the genotypes and the call rate. Let's look at the first few genotypes: # + slideshow={"slide_type": "fragment"} mt.GT.show() # + [markdown] slideshow={"slide_type": "slide"} # All homozygous reference, which is not surprising. Let's look at the distribution of genotype calls: # + slideshow={"slide_type": "fragment"} mt.aggregate_entries(hl.agg.counter(mt.GT.n_alt_alleles())) # + [markdown] slideshow={"slide_type": "slide"} # Let's compute the overall call rate directly, and then plot the distribution of call rate per variant. # + slideshow={"slide_type": "subslide"} mt.aggregate_entries(hl.agg.fraction(hl.is_defined(mt.GT))) # + [markdown] slideshow={"slide_type": "slide"} # Here's a nice trick: you can use an aggregator inside `annotate_rows` and it will aggregate over columns, that is, summarize the values in the row using the aggregator. Let's compute and plot call rate per variant. # + slideshow={"slide_type": "slide"} mt2 = mt.annotate_rows(call_rate = hl.agg.fraction(hl.is_defined(mt.GT))) mt2.describe() # + slideshow={"slide_type": "slide"} hist_start = 0 hist_end = 1.0 n_bins = 100 bin_size = (hist_end - hist_start) / n_bins hist = mt2.aggregate_rows(hl.agg.hist(mt2.call_rate, hist_start, hist_end, n_bins)) plt.xlim(0.0, 1.0) plt.bar(hist.bin_edges[:-1], hist.bin_freq, width=bin_size, label='call_rate') plt.xlabel('Call Rate') plt.ylabel('Count') plt.title('Variant Call Rate Histogram') plt.legend(loc=2) plt.show() # + [markdown] slideshow={"slide_type": "slide"} # ## Exercise: GQ vs DP # # In this exercise, you'll use Hail to investigate a strange property of sequencing datasets. # # The `DP` field is the sequencing depth (the number of reads). # # Let's first plot a histogram of `DP`: # + slideshow={"slide_type": "slide"} hist = mt.aggregate_entries(hl.agg.hist(mt.DP, 0, 40, 40)) plt.xlim(0, 40) plt.xlabel('DP') plt.ylabel('Count') plt.title('DP Histogram') plt.bar(hist.bin_edges[:-1], hist.bin_freq, width=1, label='DP') plt.legend() plt.show() # + [markdown] slideshow={"slide_type": "slide"} # Now, let's do the same thing for GQ. # # The `GQ` field is the phred-scaled "genotype quality". The formula to convert to a linear-scale confidence (0 to 1) is `10 ** -(mt.GQ / 10)`. GQ is truncated to lie between 0 and 99. # # + slideshow={"slide_type": "fragment"} hist = mt.aggregate_entries(hl.agg.hist(mt.GQ, 0, 100, 100)) plt.xlim(0, 100) plt.xlabel('GQ') plt.ylabel('Count') plt.title('GQ Histogram') plt.bar(hist.bin_edges[:-1], hist.bin_freq, width=1, label='GQ') plt.legend() plt.show() # - # Whoa! That's a strange distribution! There's a big spike at 100. The rest of the values have roughly the same shape as the DP distribution, but form a [Dimetrodon](https://en.wikipedia.org/wiki/Dimetrodon). Use Hail to figure out what's going on!
python/hail/docs/tutorials/09-matrixtable.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/saicodes/awesome-collection-of-google-colab-notebooks/blob/master/Linear_Regression_from_scratch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="H5u40ptcxGjJ" colab_type="text" # Linear Regression # ======== # Linear regression is the simplest of all machine learning models. It tries to approximate the pattern of the given data into a straight line. # # Let us optimize a linear model using the Gradient Descent algorithm. # # # <img src="https://cdn-images-1.medium.com/max/800/1*KwdVLH5e_P9h8hEzeIPnTg.png" width = 300px> # + [markdown] id="yn9O8t8WxfoG" colab_type="text" # Let us import all the neccesary libraries. Matplotlib helps us woth plotting graphs for visualization. Numpy is the common matrix math library used in python. # # + [markdown] id="QUG0WGVyPD-i" colab_type="text" # # + id="chN4F4dwj_WG" colab_type="code" colab={} import matplotlib.pyplot as plt import numpy as np # + id="Bb9Dr5zXPFg2" colab_type="code" colab={} # + [markdown] id="LvVjQ4msyhkD" colab_type="text" # Now we generate artificial data. Do not worry about this part. # # Let us assume that we are given the dataset: # # x_sample = Distance of Uber Ride # y_sample = Corresponding Cost # # # + id="63GaP_GYim1C" colab_type="code" colab={} # Linear Regression # Generating artificial dataset x_sample = np.linspace(1,15,100) noise1 = np.random.uniform(-0.3,0.3, size=(100)) noise2 = np.random.uniform(-20,20, size=(100)) y_sample = 4*x_sample + 200 y_sample = y_sample + noise2 x_sample = x_sample + noise1 # + id="4HuCdnWzVGmy" colab_type="code" colab={} # + [markdown] id="O53mvKg_y6OA" colab_type="text" # Now that we got the data, lets try to visualize it using matplotlib's plot function. # + id="HUd3bSt7kKcz" colab_type="code" outputId="6060293c-1340-4309-a790-b1d4d5f8b5b6" colab={"base_uri": "https://localhost:8080/", "height": 378} plt.xlabel("Distance to Destination") plt.ylabel("Uber cost at various time stamps") plt.plot(x_sample,y_sample, ".r") # + [markdown] id="NaRdJYzJzHpy" colab_type="text" # We want to approximate this data into a linear function. # # Let us assume : # # y = mx + c # # as the function. # # Here m is the slope of the line, and c is the y intercept. # # + id="pzQqb52sk3rd" colab_type="code" colab={} def linear_function(m,c,x): return m*x+c # + [markdown] id="ACqR6XxkzpJM" colab_type="text" # Here comes the important part. # # IMPORTANT # ======== # # for all those who planned to scroll without paying attention! # # # We define our algorithm's performance here. We model our loss function as the mean squared error between the predicted values and the dataset # # <img src="https://cdn-images-1.medium.com/max/800/1*AQKoBlrYPA6kjvW8XomKUQ.png" width= 300px> # # (read the b as c or whatever!) # + id="_puaQ3NFm96I" colab_type="code" colab={} def error(m,c,x_sample, y_sample): return np.sum((linear_function(m,c,x_sample) - y_sample)**2)/len(x_sample) # + [markdown] id="621Fy_Iz0pa9" colab_type="text" # The Error is a function of both m and c. We would like to get that error minimized. # # # How? # # Method 1: Take the partial derrivatives with respect to m and c, and equate them to zero. Take 2nd order derrivatives and verify that it is a minima. # Drawbacks of this method, this is not scalable to large datasets. # # Method 2: Take the derrivatives and move in the direction where error decreases maximum. Do this a lot of times, till you become the happiest person in the world and error does not change much. # # <img src ="https://cdn-images-1.medium.com/max/800/1*3YJx2rdqMW5ccRJZFH9v6w.png" width=300 > # # # + id="zAdUK8HEm_Fj" colab_type="code" colab={} def derrivative_of_error_m(m,c,x_sample,y_sample): derrivative = -2*np.sum((y_sample - linear_function(m,c,x_sample))*x_sample) return derrivative/len(x_sample) def derrivative_of_error_c(m,c,x_sample,y_sample): derrivative = -2 * np.sum((y_sample - linear_function(m,c,x_sample))) return derrivative/len(x_sample) # + [markdown] id="sB5bMYe-2PAG" colab_type="text" # We create an optimizer object. If you are not familiar with Classes, you can do the same thing without classes. We basically run a while loop inside till the error becomes lower than the given threshold. We also put a condition to the number of iterations so that the loop breaks if we are not converging. # # # Notice the parameter Learning Rate. # # Learning rate is crucial for the convergence of the algorithm. # Low learning rate can make it take a long time to converge. A high learning rate can cause the parameters to be optimised to oscilate near the minima or even diverge. # # <img src="https://www.jeremyjordan.me/content/images/2018/02/Screen-Shot-2018-02-24-at-11.47.09-AM.png" width = 500px> # # # # + id="LmgLD7b_ne05" colab_type="code" colab={} class GradientDescent(): def __init__(self,m,c, learning_rate): self.m = 0 self.c = 0 self.error = 10000 self.learning_rate = learning_rate # logging stuffs, don't worry self.error_log = [] self.m_log = [] self.c_log = [] self.dm_log = [] self.dc_log = [] def optimize(self, x_sample,y_sample, threshold = 10): num_iter = 0 while(self.error> threshold and num_iter <10000): num_iter = num_iter+1 self.error = error(self.m, self.c, x_sample, y_sample) dm = self.learning_rate*(derrivative_of_error_m(self.m, self.c, x_sample, y_sample)) dc = self.learning_rate*(derrivative_of_error_c(self.m, self.c, x_sample, y_sample)) self.m = self.m - dm self.c = self.c - dc #logging self.m_log.append(self.m) self.c_log.append(self.c) self.dm_log.append(dm) self.dc_log.append(dc) self.error_log.append(self.error) print("Current Loss:\t %f" %self.error) print("Current Value:\n\tm:\t%f\n\tc:\t%f" %(self.m,self.c)) print("_"*100) # + [markdown] id="SCjecXg833EN" colab_type="text" # We now initialise the variables and define the "hyperparameters" and run the algorithm. # # Hyperparameters are those important variables like learning rate, number of neurons in a neural network etc. # # # + id="FDjxm0-bodAi" colab_type="code" outputId="7b91b203-fdf1-491c-c157-fb29ebab0557" colab={"base_uri": "https://localhost:8080/", "height": 54247} initial_m = 0 initial_c = 0 learning_rate = 0.01 alg = GradientDescent(initial_m,initial_c,learning_rate) alg.optimize(x_sample,y_sample,200) # + [markdown] id="vncZjpjk4d07" colab_type="text" # Congratulations! # # # You have now trained your first machine learning model from scratch! Without any machine learning libraries! # # Disclaimer: For those who have not done this before only. # # Now that we have got the m and c values of our line, let us plot it! # + id="URRUWiGyrKKv" colab_type="code" outputId="a7188603-d7cc-4429-e3ba-99f9caf8c7b5" colab={"base_uri": "https://localhost:8080/", "height": 378} plt.xlabel("Distance to Destination") plt.ylabel("Uber cost at various time stamps") plt.plot(x_sample,y_sample, ".r") x = np.linspace(1,15,100) y = linear_function(alg.m,alg.c,x) plt.plot(x,y,"-b") # + [markdown] id="T_QFjoiT5KDB" colab_type="text" # Try plotting other logs and try to understand their behaviour. # + id="Cu19217lvon7" colab_type="code" outputId="113ac5a3-793a-45bd-ad4e-92db9d60b490" colab={"base_uri": "https://localhost:8080/", "height": 378} plt.xlabel("Num Iterations") plt.ylabel("Error") plt.plot(alg.error_log, 'r-') # + id="hlJfUVE4wF72" colab_type="code" outputId="54b5b981-1335-4ddb-ea65-d8e138c04812" colab={"base_uri": "https://localhost:8080/", "height": 382} plt.xlabel("Num Iterations") plt.ylabel("m Value") plt.plot(alg.m_log, 'b-') # + id="AKk7vTdbwbLt" colab_type="code" outputId="43b90c56-cf1d-4632-c07a-2ee6c34fff9c" colab={"base_uri": "https://localhost:8080/", "height": 378} plt.xlabel("Num Iterations") plt.ylabel("c Value") plt.plot(alg.c_log, 'c-') # + id="qB7YTjdAwxkL" colab_type="code" outputId="25afff8f-f31a-47ed-a34a-9bba0013fd13" colab={"base_uri": "https://localhost:8080/", "height": 378} plt.xlabel("Num Iterations") plt.ylabel("dm Value") plt.plot(alg.dm_log, 'k-') # + id="MpVtL8ysw4AI" colab_type="code" outputId="2d3fd2d2-add6-4417-a776-42f00d34b3f4" colab={"base_uri": "https://localhost:8080/", "height": 378} plt.xlabel("Num Iterations") plt.ylabel("dc Value") plt.plot(alg.dc_log, 'm-') # + id="Sq5v_f1WxAnS" colab_type="code" colab={}
Linear_Regression_from_scratch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.0 64-bit # language: python # name: python3 # --- # # Sentiment Analysis - CP322 # ## <NAME> (190954880) | <NAME> (190723380) | <NAME> () # + # import libraries import nltk import pandas as pd import sklearn import re from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.stem.porter import PorterStemmer from nltk.tokenize import word_tokenize # visualization import matplotlib.pyplot as plt from nltk.probability import FreqDist # download dependenciesquirements nltk.download('punkt') nltk.download('averaged_perceptron_tagger') nltk.download('stopwords') nltk.download('wordnet') nltk.download('wordnet') nltk.download('omw-1.4') nltk.download('treebank') nltk.download('tagsets') nltk.download('vader_lexicon') print('Finished downloading') # + # import data df = pd.read_csv('reviews.csv') # remove all reviews with no positive feedback to remove potential spam and unhelpful reviews df = df[df['Positive Feedback Count'] > 0] # remove all null and unnecessary features for reviews reviews = df.drop(labels=['Clothing ID', 'Title'], axis=1) reviews.dropna(inplace=True) reviews.head() reviews.shape # - # remove all null and unnecessary features for titles titles = df.drop(labels=['Clothing ID', 'Review Text'], axis=1) titles.dropna(inplace=True) titles.head() # + # Tokenize all reviews into words corpusReview = [] for review in reviews['Review Text']: corpusReview.append(word_tokenize(review)) # make stopwords and prepare for stemming stop_words=set(stopwords.words("english")) stem = PorterStemmer() # Filter out all the stopwords and stem the words filteredCorpusReview = [] flattenedCorpusReview = [] for i in range(len(corpusReview)): filteredCorpusReview.append([]) for token in corpusReview[i]: if token not in stop_words and not re.match(r'^[_\W0-9]+$', token): # remove stop words and single special character words filteredCorpusReview[i].append(stem.stem(token)) flattenedCorpusReview.append(stem.stem(token)) filteredCorpusReview[i] = ' '.join(filteredCorpusReview[i]) # + # Tokenize all Titles into words corpusTitle = [] for title in titles['Title']: corpusTitle.append(word_tokenize(title)) # Filter out all the stopwords and stem the words filteredCorpusTitle = [] flattenedCorpusTitle = [] for i in range(len(corpusTitle)): filteredCorpusTitle.append([]) for token in corpusTitle[i]: if token not in stop_words and not re.match(r'^[_\W0-9]+$', token): # remove stop words and single special character words filteredCorpusTitle[i].append(stem.stem(token)) flattenedCorpusTitle.append(stem.stem(token)) filteredCorpusTitle[i] = ' '.join(filteredCorpusTitle[i]) # - # ### Visualization # + # Visualize the frequency of words in reviews fdistReview = FreqDist(flattenedCorpusReview) print(fdistReview) # Plot the frequency of review words fdistReview.plot(30,cumulative=False) plt.show() # + # Visualize the frequency of words in reviews fdistTitle = FreqDist(flattenedCorpusTitle) print(fdistTitle) # Plot the frequency of review words fdistTitle.plot(30,cumulative=False) plt.show() print(len(filteredCorpusTitle)) # - from sklearn.feature_extraction.text import CountVectorizer #tokenizer to remove unwanted elements from out data like symbols and numbers cvr = CountVectorizer(lowercase=True,ngram_range = (1,1)) trainingReview = cvr.fit_transform(filteredCorpusReview).toarray() #tokenizer to remove unwanted elements from out data like symbols and numbers cvt = CountVectorizer(lowercase=True,ngram_range = (1,1)) trainingTitle = cvt.fit_transform(filteredCorpusTitle).toarray() # Create a dataframe to visualize the bag of words model dfr = pd.DataFrame(data=trainingReview,columns = cvr.get_feature_names_out()) dfr # Create a dataframe to visualize the bag of words model dft = pd.DataFrame(data=trainingTitle,columns = cvt.get_feature_names_out()) dft # ### Trial Run With User Generated Ratings from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(trainingReview, reviews['Rating'], test_size=0.3) from sklearn.naive_bayes import MultinomialNB #Import scikit-learn metrics module for accuracy calculation from sklearn import metrics # Model Generation Using Multinomial Naive Bayes clf = MultinomialNB().fit(X_train, y_train) predicted= clf.predict(X_test) print("MultinomialNB Accuracy:",metrics.accuracy_score(y_test, predicted)) from sklearn.feature_extraction.text import TfidfVectorizer tf=TfidfVectorizer() text_tf= tf.fit_transform(filteredCorpusReview).toarray() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( text_tf, reviews['Rating'], test_size=0.3) # Model Generation Using Multinomial Naive Bayes clf = MultinomialNB().fit(X_train, y_train) predicted= clf.predict(X_test) print("MultinomialNB Accuracy:",metrics.accuracy_score(y_test, predicted)) # Helps map sentiment values to rating values def map(value, minFrom, maxFrom, minTo, maxTo): return (value - minFrom) * (maxTo - minTo) / (maxFrom - minFrom) + minTo from nltk.corpus import subjectivity from nltk.sentiment import SentimentAnalyzer from nltk.sentiment.util import * from nltk.sentiment.vader import SentimentIntensityAnalyzer # + sid = SentimentIntensityAnalyzer() for review in filteredCorpusReview: print(review) ss = sid.polarity_scores(review) for k in sorted(ss): print('{0}: {1}, '.format(k, ss[k]), end='') print() # - for review in reviews["Review Text"]: print(review) ss = sid.polarity_scores(review) for k in sorted(ss): print('{0}: {1}, '.format(k, ss[k]), end='') print() rpususa
Project_helpme.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.5 # language: python # name: python3 # --- # ### Preparing the Dataset # #### Importing the libraries import numpy as np import pandas as pd # #### Importing the dataset dataset = pd.read_csv(os.environ['DSX_PROJECT_DIR']+'/datasets/Wine.csv') X = dataset.iloc[:, 0:13].values #Explanatory variables y = dataset.iloc[:, 13].values #Target variable dataset.head(2) X[0:2] y[0:2] # ### Feature Engineering # #### Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X = sc.fit_transform(X) X[0:2] # #### Applying PCA from sklearn.decomposition import PCA pca = PCA(n_components = 2) X = pca.fit_transform(X) X[0:2] explained_variance = pca.explained_variance_ratio_ explained_variance*100 # ### Exporting the results features = pd.DataFrame(X) features.to_csv(os.environ['DSX_PROJECT_DIR']+'/datasets/features.csv') features.head(2) target = pd.DataFrame(y) target.to_csv(os.environ['DSX_PROJECT_DIR']+'/datasets/target.csv') target[0:2]
notebooks/pca-features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 # %matplotlib inline # - # ## The forward and backward passes # [Jump_to lesson 8 video](https://course.fast.ai/videos/?lesson=8&t=4960) # + #export from exp.nb_01 import * def get_data(): path = datasets.download_data(MNIST_URL, ext='.gz') with gzip.open(path, 'rb') as f: ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1') return map(tensor, (x_train,y_train,x_valid,y_valid)) def normalize(x, m, s): return (x-m)/s # - x_train,y_train,x_valid,y_valid = get_data() train_mean,train_std = x_train.mean(),x_train.std() train_mean,train_std x_train = normalize(x_train, train_mean, train_std) # NB: Use training, not validation mean for validation set x_valid = normalize(x_valid, train_mean, train_std) train_mean,train_std = x_train.mean(),x_train.std() train_mean,train_std #export def test_near_zero(a,tol=1e-3): assert a.abs()<tol, f"Near zero: {a}" test_near_zero(x_train.mean()) test_near_zero(1-x_train.std()) n,m = x_train.shape c = y_train.max()+1 n,m,c # ## Foundations version # ### Basic architecture # [Jump_to lesson 8 video](https://course.fast.ai/videos/?lesson=8&t=5128) # num hidden nh = 50 # [Tinker practice](https://course.fast.ai/videos/?lesson=8&t=5255) # standard xavier init w1 = torch.randn(m,nh)/math.sqrt(m) b1 = torch.zeros(nh) w2 = torch.randn(nh,1)/math.sqrt(nh) b2 = torch.zeros(1) test_near_zero(w1.mean()) test_near_zero(w1.std()-1/math.sqrt(m)) # This should be ~ (0,1) (mean,std)... x_valid.mean(),x_valid.std() def lin(x, w, b): return x@w + b t = lin(x_valid, w1, b1) #...so should this, because we used xavier init, which is designed to do this t.mean(),t.std() def relu(x): return x.clamp_min(0.) t = relu(lin(x_valid, w1, b1)) #...actually it really should be this! t.mean(),t.std() # From pytorch docs: `a: the negative slope of the rectifier used after this layer (0 for ReLU by default)` # # $$\text{std} = \sqrt{\frac{2}{(1 + a^2) \times \text{fan_in}}}$$ # # This was introduced in the paper that described the Imagenet-winning approach from *He et al*: [Delving Deep into Rectifiers](https://arxiv.org/abs/1502.01852), which was also the first paper that claimed "super-human performance" on Imagenet (and, most importantly, it introduced resnets!) # [Jump_to lesson 8 video](https://course.fast.ai/videos/?lesson=8&t=5128) # kaiming init / he init for relu w1 = torch.randn(m,nh)*math.sqrt(2/m) w1.mean(),w1.std() t = relu(lin(x_valid, w1, b1)) t.mean(),t.std() #export from torch.nn import init w1 = torch.zeros(m,nh) init.kaiming_normal_(w1, mode='fan_out') t = relu(lin(x_valid, w1, b1)) # + # init.kaiming_normal_?? # - w1.mean(),w1.std() t.mean(),t.std() w1.shape import torch.nn torch.nn.Linear(m,nh).weight.shape # + # torch.nn.Linear.forward?? # + # torch.nn.functional.linear?? # + # torch.nn.Conv2d?? # + # torch.nn.modules.conv._ConvNd.reset_parameters?? # - # what if...? def relu(x): return x.clamp_min(0.) - 0.5 # kaiming init / he init for relu w1 = torch.randn(m,nh)*math.sqrt(2./m ) t1 = relu(lin(x_valid, w1, b1)) t1.mean(),t1.std() def model(xb): l1 = lin(xb, w1, b1) l2 = relu(l1) l3 = lin(l2, w2, b2) return l3 # %timeit -n 10 _=model(x_valid) assert model(x_valid).shape==torch.Size([x_valid.shape[0],1]) # ### Loss function: MSE # [Jump_to lesson 8 video](https://course.fast.ai/videos/?lesson=8&t=6372) model(x_valid).shape # We need `squeeze()` to get rid of that trailing (,1), in order to use `mse`. (Of course, `mse` is not a suitable loss function for multi-class classification; we'll use a better loss function soon. We'll use `mse` for now to keep things simple.) #export def mse(output, targ): return (output.squeeze(-1) - targ).pow(2).mean() y_train,y_valid = y_train.float(),y_valid.float() preds = model(x_train) preds.shape mse(preds, y_train) # ### Gradients and backward pass # [Jump_to lesson 8 video](https://course.fast.ai/videos/?lesson=8&t=6493) def mse_grad(inp, targ): # grad of loss with respect to output of previous layer inp.g = 2. * (inp.squeeze() - targ).unsqueeze(-1) / inp.shape[0] def relu_grad(inp, out): # grad of relu with respect to input activations inp.g = (inp>0).float() * out.g def lin_grad(inp, out, w, b): # grad of matmul with respect to input inp.g = out.g @ w.t() w.g = (inp.unsqueeze(-1) * out.g.unsqueeze(1)).sum(0) b.g = out.g.sum(0) def forward_and_backward(inp, targ): # forward pass: l1 = inp @ w1 + b1 l2 = relu(l1) out = l2 @ w2 + b2 # we don't actually need the loss in backward! loss = mse(out, targ) # backward pass: mse_grad(out, targ) lin_grad(l2, out, w2, b2) relu_grad(l1, l2) lin_grad(inp, l1, w1, b1) forward_and_backward(x_train, y_train) # Save for testing against later w1g = w1.g.clone() w2g = w2.g.clone() b1g = b1.g.clone() b2g = b2.g.clone() ig = x_train.g.clone() # We cheat a little bit and use PyTorch autograd to check our results. xt2 = x_train.clone().requires_grad_(True) w12 = w1.clone().requires_grad_(True) w22 = w2.clone().requires_grad_(True) b12 = b1.clone().requires_grad_(True) b22 = b2.clone().requires_grad_(True) def forward(inp, targ): # forward pass: l1 = inp @ w12 + b12 l2 = relu(l1) out = l2 @ w22 + b22 # we don't actually need the loss in backward! return mse(out, targ) loss = forward(xt2, y_train) loss.backward() test_near(w22.grad, w2g) test_near(b22.grad, b2g) test_near(w12.grad, w1g) test_near(b12.grad, b1g) test_near(xt2.grad, ig ) # ## Refactor model # ### Layers as classes # [Jump_to lesson 8 video](https://course.fast.ai/videos/?lesson=8&t=7112) class Relu(): def __call__(self, inp): self.inp = inp self.out = inp.clamp_min(0.)-0.5 return self.out def backward(self): self.inp.g = (self.inp>0).float() * self.out.g class Lin(): def __init__(self, w, b): self.w,self.b = w,b def __call__(self, inp): self.inp = inp self.out = inp@self.w + self.b return self.out def backward(self): self.inp.g = self.out.g @ self.w.t() # Creating a giant outer product, just to sum it, is inefficient! self.w.g = (self.inp.unsqueeze(-1) * self.out.g.unsqueeze(1)).sum(0) self.b.g = self.out.g.sum(0) class Mse(): def __call__(self, inp, targ): self.inp = inp self.targ = targ self.out = (inp.squeeze() - targ).pow(2).mean() return self.out def backward(self): self.inp.g = 2. * (self.inp.squeeze() - self.targ).unsqueeze(-1) / self.targ.shape[0] class Model(): def __init__(self, w1, b1, w2, b2): self.layers = [Lin(w1,b1), Relu(), Lin(w2,b2)] self.loss = Mse() def __call__(self, x, targ): for l in self.layers: x = l(x) return self.loss(x, targ) def backward(self): self.loss.backward() for l in reversed(self.layers): l.backward() w1.g,b1.g,w2.g,b2.g = [None]*4 model = Model(w1, b1, w2, b2) # %time loss = model(x_train, y_train) # %time model.backward() test_near(w2g, w2.g) test_near(b2g, b2.g) test_near(w1g, w1.g) test_near(b1g, b1.g) test_near(ig, x_train.g) # ### Module.forward() class Module(): def __call__(self, *args): self.args = args self.out = self.forward(*args) return self.out def forward(self): raise Exception('not implemented') def backward(self): self.bwd(self.out, *self.args) class Relu(Module): def forward(self, inp): return inp.clamp_min(0.)-0.5 def bwd(self, out, inp): inp.g = (inp>0).float() * out.g class Lin(Module): def __init__(self, w, b): self.w,self.b = w,b def forward(self, inp): return inp@self.w + self.b def bwd(self, out, inp): inp.g = out.g @ self.w.t() self.w.g = torch.einsum("bi,bj->ij", inp, out.g) self.b.g = out.g.sum(0) class Mse(Module): def forward (self, inp, targ): return (inp.squeeze() - targ).pow(2).mean() def bwd(self, out, inp, targ): inp.g = 2*(inp.squeeze()-targ).unsqueeze(-1) / targ.shape[0] class Model(): def __init__(self): self.layers = [Lin(w1,b1), Relu(), Lin(w2,b2)] self.loss = Mse() def __call__(self, x, targ): for l in self.layers: x = l(x) return self.loss(x, targ) def backward(self): self.loss.backward() for l in reversed(self.layers): l.backward() w1.g,b1.g,w2.g,b2.g = [None]*4 model = Model() # %time loss = model(x_train, y_train) # %time model.backward() test_near(w2g, w2.g) test_near(b2g, b2.g) test_near(w1g, w1.g) test_near(b1g, b1.g) test_near(ig, x_train.g) # ### Without einsum # [Jump_to lesson 8 video](https://course.fast.ai/videos/?lesson=8&t=7484) class Lin(Module): def __init__(self, w, b): self.w,self.b = w,b def forward(self, inp): return inp@self.w + self.b def bwd(self, out, inp): inp.g = out.g @ self.w.t() self.w.g = inp.t() @ out.g self.b.g = out.g.sum(0) w1.g,b1.g,w2.g,b2.g = [None]*4 model = Model() # %time loss = model(x_train, y_train) # %time model.backward() test_near(w2g, w2.g) test_near(b2g, b2.g) test_near(w1g, w1.g) test_near(b1g, b1.g) test_near(ig, x_train.g) # ### nn.Linear and nn.Module #export from torch import nn class Model(nn.Module): def __init__(self, n_in, nh, n_out): super().__init__() self.layers = [nn.Linear(n_in,nh), nn.ReLU(), nn.Linear(nh,n_out)] self.loss = mse def __call__(self, x, targ): for l in self.layers: x = l(x) return self.loss(x.squeeze(), targ) model = Model(m, nh, 1) # %time loss = model(x_train, y_train) # %time loss.backward() # ## Export # !./notebook2script.py 02_fully_connected.ipynb
nbs/dl2/02_fully_connected.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import os import matplotlib.pyplot as plt from E3Dpy import e3d_creator # %load_ext autoreload # %autoreload 2 # - # ## Define model parameters # + mod_m1=e3d_creator.e3d_model('M1') mod_m1.assign_model_parameters(10,2,0.05,10) mod_m1.import_velocity('../Data/Antarctica_firn_vel_model.txt') mod_m1.position_receivers(3,7,dx=0.5) # - mod_m1.define_source(5,0.5,src_type=4,Mxx=-0.6710,Myy=0.0669,Mzz=0.6040,Mxy=0.2416,Mxz=0.4762,Myz=-0.5523) # mod_m1.define_source(5,0.5,src_type=6) # ## Plot model mod_m1.plot_model() mod_m1.plot_velocity() # ## Export e3d parameter file mod_m1.create_e3d_file()
Examples/Creator_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Demonstration of optimization of GTM hyperparameters with k3nerror # # <NAME> # + import matplotlib.figure as figure import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_iris from gtm import GTM from k3nerror import k3nerror # - # settings candidates_of_shape_of_map = np.arange(30, 31, dtype=int) candidates_of_shape_of_rbf_centers = np.arange(2, 22, 2, dtype=int) candidates_of_variance_of_rbfs = 2 ** np.arange(-5, 4, 2, dtype=float) candidates_of_lambda_in_em_algorithm = 2 ** np.arange(-4, 0, dtype=float) candidates_of_lambda_in_em_algorithm = np.append(0, candidates_of_lambda_in_em_algorithm) number_of_iterations = 300 display_flag = 0 k_in_k3nerror = 10 # load an iris dataset iris = load_iris() # input_dataset = pd.DataFrame(iris.data, columns=iris.feature_names) input_dataset = iris.data color = iris.target # autoscaling input_dataset = (input_dataset - input_dataset.mean(axis=0)) / input_dataset.std(axis=0, ddof=1) # grid search parameters_and_k3nerror = [] all_calculation_numbers = len(candidates_of_shape_of_map) * len(candidates_of_shape_of_rbf_centers) * len( candidates_of_variance_of_rbfs) * len(candidates_of_lambda_in_em_algorithm) calculation_number = 0 for shape_of_map_grid in candidates_of_shape_of_map: for shape_of_rbf_centers_grid in candidates_of_shape_of_rbf_centers: for variance_of_rbfs_grid in candidates_of_variance_of_rbfs: for lambda_in_em_algorithm_grid in candidates_of_lambda_in_em_algorithm: calculation_number += 1 print([calculation_number, all_calculation_numbers]) # construct GTM model model = GTM([shape_of_map_grid, shape_of_map_grid], [shape_of_rbf_centers_grid, shape_of_rbf_centers_grid], variance_of_rbfs_grid, lambda_in_em_algorithm_grid, number_of_iterations, display_flag) model.fit(input_dataset) if model.success_flag: # calculate of responsibilities responsibilities = model.responsibility(input_dataset) # calculate the mean of responsibilities means = responsibilities.dot(model.map_grids) # calculate k3n-error k3nerror_of_gtm = k3nerror(input_dataset, means, k_in_k3nerror) else: k3nerror_of_gtm = 10 ** 100 parameters_and_k3nerror.append( [shape_of_map_grid, shape_of_rbf_centers_grid, variance_of_rbfs_grid, lambda_in_em_algorithm_grid, k3nerror_of_gtm]) # optimized GTM parameters_and_k3nerror = np.array(parameters_and_k3nerror) optimized_hyperparameter_number = \ np.where(parameters_and_k3nerror[:, 4] == np.min(parameters_and_k3nerror[:, 4]))[0][0] shape_of_map = [parameters_and_k3nerror[optimized_hyperparameter_number, 0], parameters_and_k3nerror[optimized_hyperparameter_number, 0]] shape_of_rbf_centers = [parameters_and_k3nerror[optimized_hyperparameter_number, 1], parameters_and_k3nerror[optimized_hyperparameter_number, 1]] variance_of_rbfs = parameters_and_k3nerror[optimized_hyperparameter_number, 2] lambda_in_em_algorithm = parameters_and_k3nerror[optimized_hyperparameter_number, 3] # construct GTM model model = GTM(shape_of_map, shape_of_rbf_centers, variance_of_rbfs, lambda_in_em_algorithm, number_of_iterations, display_flag) model.fit(input_dataset) # calculate of responsibilities responsibilities = model.responsibility(input_dataset) # plot the mean of responsibilities means = responsibilities.dot(model.map_grids) plt.figure(figsize=figure.figaspect(1)) plt.scatter(means[:, 0], means[:, 1], c=color) plt.ylim(-1.1, 1.1) plt.xlim(-1.1, 1.1) plt.xlabel("z1 (mean)") plt.ylabel("z2 (mean)") plt.show() print("Optimized hyperparameters") print("Optimal map size: {0}, {1}".format(shape_of_map[0], shape_of_map[1])) print("Optimal shape of RBF centers: {0}, {1}".format(shape_of_rbf_centers[0], shape_of_rbf_centers[1])) print("Optimal variance of RBFs: {0}".format(variance_of_rbfs)) print("Optimal lambda in EM algorithm: {0}".format(lambda_in_em_algorithm))
Python/demo_opt_gtm_with_k3nerror.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 基于 GraphSage 的无监督学习 # # # GraphScope提供了处理学习任务的功能。本次教程,我们将会展示GraphScope如何使用GraphSage算法训练一个无监督学习模型。 # # 本次教程的学习任务是链接预测,通过计算在图中顶点之间存在边的概率来预测链接。 # # 在这一任务中,我们使用GraphScope内置的GraphSage算法在[PPI](https://humgenomics.biomedcentral.com/articles/10.1186/1479-7364-3-3-291)数据集上训练一个模型,这一训练模型可以用来预测蛋白质结构之间的链接。这一任务可以被看作在一个异构链接网络上的无监督训练任务。 # # 在这一任务中,GraphSage算法会将图中的结构信息和属性信息压缩为每个节点上的低维嵌入向量,这些嵌入和表征可以进一步用来预测节点间的链接。 # # 这一教程将会分为以下几个步骤: # - 建立会话和载图 # - 启动GraphScope的学习引擎,并将图关联到引擎上 # - 使用内置的GCN模型定义训练过程,并定义相关的超参 # - 开始训练 # # 首先,我们要新建一个会话,并载入数据 # + import os import graphscope k8s_volumes = { "data": { "type": "hostPath", "field": { "path": "/testingdata", "type": "Directory" }, "mounts": { "mountPath": "/home/jovyan/datasets", "readOnly": True } } } # 建立会话 graphscope.set_option(show_log=True) sess = graphscope.session(k8s_volumes=k8s_volumes) # 加载PPI图数据 graph = sess.load_from( edges={ "link": [ ( "/home/jovyan/datasets/ppi/edge.csv", [], ("src_id", "protein"), ("dst_id", "protein"), ), ] }, vertices={ "protein": ( "/home/jovyan/datasets/ppi/node.csv", ), }, ) # - # ## Launch learning engine # # 然后,我们需要定义一个特征列表用于图的训练。训练特征集合必须从点的属性集合中选取。在这个例子中,我们选择了属性集合中所有以"feat-"为前缀的属性作为训练特征集,这一特征集也是PPI数据中点的特征集。 # # 借助定义的特征列表,接下来,我们使用会话的`learning`方法来开启一个学习引擎。(`learning`方法的文档可参考[Session](https://graphscope.io/docs/reference/session.html)) # # 在这个例子中,我们在`learning`方法中,指定在数据中`protein`类型的顶点和`link`类型边上进行模型训练。 # # 利用`gen_labels`参数,我们将`protein`点数据集作为训练集。 # + # define the features for learning paper_features = [] for i in range(50): paper_features.append("feat-" + str(i)) # launch a learning engine. lg = sess.learning(graph, nodes=[("protein", paper_features)], edges=[("protein", "link", "protein")], gen_labels=[ ("train", "protein", 100, (0, 100)), ]) # - # # # 这里我们使用内置的GraphSage模型定义训练过程。你可以在[Graph Learning Model](https://graphscope.io/docs/learning_engine.html#data-model)获取更多内置学习模型的信息。 # # # 在本次示例中,我们使用tensorflow作为NN后端训练器。 # + import numpy as np from graphscope.learning.examples import GraphSage from graphscope.learning.graphlearn.python.model.tf.trainer import LocalTFTrainer from graphscope.learning.graphlearn.python.model.tf.optimizer import get_tf_optimizer # unsupervised GraphSage. def train(config, graph): def model_fn(): return GraphSage( graph, config["class_num"], config["features_num"], config["batch_size"], categorical_attrs_desc=config["categorical_attrs_desc"], hidden_dim=config["hidden_dim"], in_drop_rate=config["in_drop_rate"], neighs_num=config["neighs_num"], hops_num=config["hops_num"], node_type=config["node_type"], edge_type=config["edge_type"], full_graph_mode=config["full_graph_mode"], unsupervised=config['unsupervised'], ) trainer = LocalTFTrainer( model_fn, epoch=config["epoch"], optimizer=get_tf_optimizer( config["learning_algo"], config["learning_rate"], config["weight_decay"] ), ) trainer.train() embs = trainer.get_node_embedding() np.save(config['emb_save_dir'], embs) # define hyperparameters config = { "class_num": 128, # output dimension "features_num": 50, "batch_size": 512, "categorical_attrs_desc": "", "hidden_dim": 128, "in_drop_rate": 0.5, "hops_num": 2, "neighs_num": [5, 5], "full_graph_mode": False, "agg_type": "gcn", # mean, sum "learning_algo": "adam", "learning_rate": 0.01, "weight_decay": 0.0005, 'unsupervised': True, "epoch": 1, 'emb_save_dir': './id_emb', "node_type": "protein", "edge_type": "link", } # - # ## Run training process # # # 在定义完训练过程和超参后,现在我们可以使用学习引擎和定义的超参开始训练过程。 train(config, lg) # # 训练完毕后,需要关掉会话 sess.close()
tutorials/zh/6_unsupervised_learning_with_graphsage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + tags=["hide-input"] import matplotlib.pyplot as plt import numpy as np import pandas as pd from makeitpop import makeitpop, update_derivatives, cmaps import seaborn as sns sns.set(font_scale=2, style='white') # Get the perceptual derivatives we use for popping so we can visualize derivatives, derivatives_scaled = update_derivatives(cmaps) # - # # Introducing _makeitpop_, a tool to perceptually warp your data!" # # :::{note} # It should go without saying, but **you should never do the stuff that you're about to read about here**. Data is meant to speak for itself, and our visualizations should accurately reflect the data above all else.* # ::: # # When I was in graduate school, I tended to get on my soapbox and tell everybody # why they should [stop using Jet](http://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/) # and adopt a "perceptually-flat" colormap like [viridis, magma, or inferno](https://bids.github.io/colormap/). # # Surprisingly (ok, maybe not so surprisingly) I got a lot of pushback from people. Folks would say _"But I like **jet**, it really highlights my data, it makes the images 'pop' more effectively than viridis!"_. # # Unfortunately it turns out that when a colormap "makes your data pop", it really just means "[warps your perception of the visualized data so that you see non-linearities when there are none](https://bids.github.io/colormap/)". AKA, a colormap like Jet actually _mis-represents_ the data. # # But what does this really mean? It's difficult to talk and think about coor - especially when it comes to relating color with objective relationships # between data. Rather than talking about colormaps in the abstract, what if we could *visualize* the warping that is performed by colormaps like Jet? # # In this post I'll show that this is possible! Introducing **`makeitpop`**. # # # ## What does _makeitpop_ do? # # Makeitpop lets you apply the same perceptual warping that would _normally_ be accomplished # with a colormap like Jet, but applies this warping to _the data itself_! This lets us # get the same effect with a nice linear colormap like `viridis`! # # For example, let's take a look at the [image demo in matplotlib](https://matplotlib.org/gallery/images_contours_and_fields/image_demo.html#sphx-glr-gallery-images-contours-and-fields-image-demo-py). In it, we create two blobs that are meant to be visualized as an image. We'll visualize this with our old friend viridis. # + # Create a mesh grid with two gaussian blobs delta = 0.025 x = y = np.arange(-3.0, 3.0, delta) X, Y = np.meshgrid(x, y) Z1 = np.exp(-X**2 - Y**2) Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2) Z = (Z1 - Z2) * 2 # Visualize it fig, ax = plt.subplots(figsize=(5, 5)) ax.imshow(Z, cmap=plt.cm.viridis, origin='lower', extent=[-3, 3, -3, 3], vmax=abs(Z).max(), vmin=-abs(Z).max()) ax.set(title="Original data\nlinear colormap") plt.tight_layout() # - # Hmmm, not too bad...but it's a bit _boring_, no? Why can't we make it snazzier? I know, let's use Jet! # + tags=["hide-input"] # Visualize our data fig, ax = plt.subplots(figsize=(5, 5)) ax.imshow(Z, cmap=plt.cm.jet, origin='lower', extent=[-3, 3, -3, 3], vmax=abs(Z).max(), vmin=-abs(Z).max()) ax.set(title="Original data\nnon-linear colormap") plt.tight_layout() # - # Oooh now that's what I'm talking about. You can clearly see two peaks of significant results # at the center of each circle. Truly this is fit for publishing in _Nature_. # # But...as you all know, this data only _looks_ better because we've used a colormap that distorts # our perception of the underlying data. # # Let's illustrate this by **making it pop**! # + tags=["hide-input"] # Pop the data! Z_popped = makeitpop(Z, 'jet', scaling_factor=20) # Visualize the warped data fig, ax = plt.subplots(figsize=(5, 5)) ax.imshow(Z_popped, cmap=plt.cm.viridis, origin='lower', extent=[-3, 3, -3, 3], vmax=abs(Z).max(), vmin=-abs(Z).max()) ax.set(title="Warped data\nlinear colormap") plt.tight_layout() # - # Excellent! We're using a nice, perceptually-flat colormap like viridis, but we've attained # an effect similar to the one that *Jet* would have created! # # Now let's visualize all three next to each other so that we can see the total effect: # + tags=["hide-input"] fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True) kws_img = dict(extent=[-3, 3, -3, 3], origin='lower', vmax=abs(Z).max(), vmin=-abs(Z).max()) # Raw data with a perceptually-flat colormap axs[0].imshow(Z, cmap=plt.cm.viridis, **kws_img) axs[0].set(title="Original data\nlinear colormap") # Raw data with a perceptually-distorted colormap axs[1].imshow(Z, cmap=plt.cm.jet, **kws_img) axs[1].set(title="Original data\nnon-linear colormap") # Distorted data with a perceptually-flat colormap axs[2].imshow(Z_popped, cmap=plt.cm.viridis, **kws_img) axs[2].set(title="Warped data\nlinear colormap") plt.tight_layout() # - # ## Let's see it in the real world # # Thus far I've been using toy examples to illustrate how makeitpop works. Let's see how # things look on an actual dataset collected in the wild. # # For this, we'll use the excellent [nilearn](https://nilearn.github.io/) package. This has a # few datasets we can download to demonstrate our point. First we'll load the data and prep it: # + from nilearn import datasets from nilearn import plotting import nibabel as nb # Load a sample dataset tmap_filenames = datasets.fetch_localizer_button_task()['tmaps'] tmap_filename = tmap_filenames[0] # Threshold our data for viz brain = nb.load(tmap_filename) brain_data = brain.get_fdata() mask = np.logical_or(brain_data < -.01, brain_data > .01) # - # Next, we'll create a "popped" version of the data, where we apply the non-linear warping # properties of *Jet* to our data, so that we can visualize the same effect in linear space. # Create a copy of the data, then pop it # We'll use a scaling factor to highlight the effect brain_popped = brain_data.copy() brain_popped[mask] = makeitpop(brain_popped[mask], colormap='jet', scaling_factor=75) brain_popped = nb.Nifti1Image(brain_popped, brain.affine) # Now, I'll plot the results for each. # # * First, we'll see the raw data on a linear colormap. This is the way we'd display the data to show the true underlying relationships between datapoints. # * Next, we'll show the same data plotted with *Jet*. See how many more significant voxels there are! (/s) # * Finally, we'll plot the "popped" data using a linear colormap (viridis). This accurately represents the underlying data, but the data _itself_ has been distorted! # + tags=["hide-input"] for i_brain, name in [(brain, 'original'), (brain, 'original (jet)'), (brain_popped, 'popped brain')]: cmap = 'jet' if name == 'original (jet)' else 'viridis' plotting.plot_stat_map(i_brain, cmap=cmap, vmax=7, display_mode='x', title=name, cut_coords=np.linspace(20, 50, 5, dtype=int)) # - # As you can see, different kinds of results show up when your perception of the data is affected by the colormap. # ## How does this work? # # OK, so what is the black voodoo magic that makes it possible to "make your data pop"? # # It all comes down to your visual system. I won't go into a ton of detail # because <NAME> and <NAME> [already gave a great talk about this](https://www.youtube.com/watch?v=xAoljeRJ3lU), however here is a lay-person's take: # # When we use color to represent data, we are mapping a range of data values onto # a range of color values. Usually this means defining a min / max for our data, then mapping data # values linearly from 0 to 1, and finally mapping those values onto RGB values in a colormap. # # Implicit in this process is the idea that stepping across our space in the _data_ equates to # an equal step in our _perception_ of the color that is then chosen. We want a one-to-one mapping between the two. # # Unfortunately, this isn't how our visual system works. # # In reality, our brains do all kinds of strange things when interpreting color. They are biased to detect changes between particular kinds of colors, and biased to miss the transition between others. # # *Jet* uses a range of colors that highlight this fact. It transitions through colors such that _linear_ changes in our data are perceived as _nonlinear_ changes when we look at the visualization. That's what makes the data "pop". # # ## Perceptual "delta" curves # # You can determine the extent to which a colormap "warps" your perception of the data by calculating the "perceptual deltas" as you move across the values of a colormap (e.g. as you move from 0 to 1, and their corresponding colors). # # These deltas essentially mean "how much is the _next_ color in the colormap perceived as different from the _current_ color?" If your colormap is perceptually flat, the delta will be the *same* no matter where you are on the range from 0 to 1. # # Let's see what the deltas look like for *Jet*: # + tags=["hide-input"] def plot_colormap_deltas(deltas, cmap, ax=None): if ax is None: fig, ax = plt.subplots(figsize=(10, 5)) xrange = np.arange(len(derivatives)) sc = ax.scatter(xrange, deltas, c=xrange, vmin=xrange.min(), vmax=xrange.max(), cmap=plt.cm.get_cmap(cmap), s=20) ax.plot(xrange, deltas, c='k', alpha=.1) return ax ax = plot_colormap_deltas(derivatives['jet'].values, 'jet') ylim = ax.get_ylim() # So we can compare with other colormaps ax.set(title="Perceptual deltas with Jet") # - # Oops. # # As you can see, Jet does **not** have a flat line for perceptual deltas. Each "jump" you see above is a moment where Jet is actually _mis-representing_ differences in the data. For shame, Jet. # # Now let's see what this looks like for viridis: # + tags=["hide-input"] ax = plot_colormap_deltas(derivatives['viridis'].values, 'viridis') ax.set(ylim=ylim, title="Perceptual deltas with viridis"); # - # Ahhh, sweet, sweet linear representation of data. # # In case you're curious, here are the "perceptual deltas" for several colormaps. # In this case, I've centered them and scaled each by the variance of the largest colormap, # so that they are easier to compare. # + tags=["hide-input"] fig, ax = plt.subplots(figsize=(10, 5)) for cmap, deltas in derivatives_scaled.items(): if cmap == 'linear': continue ax = plot_colormap_deltas(deltas.values, cmap, ax=ax) ax.set(title="Scaled perceptual deltas for a bunch of colormaps"); # - # ## We can even warp 1-dimensional data! # # Let's see how this principle affects our perception with a different kind of visual # encoding. Now that we know these perceptual warping functions, we can get all the # data-warping properties of jet, but in one dimension! # # Here's a line. # + tags=["hide-input"] fig, ax = plt.subplots(figsize=(5, 5)) x = np.linspace(0, 1, 100) ax.plot(x, x, 'k-', lw=8, alpha=.4, label='True Data') ax.set_title('Totally boring line.\nNothing to see here.'); # - # Ew. Boring. # # Now, let's make it pop! We'll loop through a few colormaps, applying its color # warping function to the y-axis of our line as we step through it. # + tags=["hide-input"] names = ['jet', 'viridis', 'rainbow', 'spring', 'hsv'] fig, ax = plt.subplots(figsize=(10, 10)) x = np.linspace(0, 1, 1000) ax.plot(x, x, 'k-', lw=12, alpha=.4, label='True Data') for nm in names: ax.plot(x, makeitpop(x, colormap=nm, scaling_factor=40), label=nm, lw=4) ax.legend(loc=(1.05, .6)) ax.set_title('Making data "pop" is fun!') # - # As you can see, data looks much more interesting when it's been non-linearly warped! # It looks particularly striking when you see it on a 1-D plot. This is effectively # what colormaps such as Jet are doing in 2 dimensions! We're simply bringing the fun # back to 1-D space. # # Let's see how it looks on some scatterplots. We'll plot the raw data in the background in grey, # and the "popped" data in front in color. Notice how some colormaps distort the y-values more # than others. # + tags=["hide-input"] names = ['viridis', 'jet', 'rainbow', 'spring', 'hsv'] fig, axs = plt.subplots(1, len(names), figsize=(20, 4), sharex=True, sharey=True) x = np.linspace(0, 1, 100) y = x + np.random.randn(len(x)) * .2 for name, ax in zip(names, axs): ax.scatter(x, y, c='k', s=80, alpha=.2) ax.scatter(x, makeitpop(y, name, 40), c=y, s=40, alpha=.8, cmap=plt.get_cmap(name)) ax.set(title=name) # - # ## So what should we do? # # The reason that I wrote this blog post (and made this silly package) is to illustrate what we're really # doing when we use a colormap like *Jet*, and to highlight the importance of using a perceptually-flat colormap. Sure, we want to choose the visualization that best-makes our point, # but a colormap like Jet is _actively mis-representing your data_. You'd _never_ consider changing the raw # data values so that an effect popped out, and you'd _never_ alter the y-values of a scatterplot so that something shows up. Well, this is perceptually what you're doing when you visualize 2-D data with Jet. # # Here are a few things to keep in mind moving forward: # # * Don't use Jet # * If you review a paper or are an editor for a journal, consider asking authors to use a perceptually flat colormap (this is usually just a matter of changing `cmap='viridis'`!) # * Be aware of the effects that color has on the point you're trying to make. Perceptual warping is # just one of many potential issues with choosing the right color. # # ## Wrapping up # # I hope that this post has been a fun and slightly informative take on the nuances of colormaps, and the unintended effects that they might have. # # So, `tl;dr`: # # * Jet (and many other colormaps) mis-represent your perception of the data # * Perceptually flat colormaps like Viridis, Magma, Inferno, or Parula minimize this effect # * You can calculate the _extent_ to which this mis-representation happens as you move along the colormap # * We can then use this function to _distort_ data so that the data itself contains this mis-representation # * But doing so would be super unethical, so in the end **you should stop using jet and use a perceptually-flat colormap like viridis.** # # # # If you'd like to check out the `makeitpop` package, see the [GitHub repo here](https://github.com/choldgraf/makeitpop). In addition, all of the examples in this post are runnable # on Binder! You can launch an interactive session with this code by clicking on the Binder button # at the top of this page! # ## Addendum: Ok but how does `makeitpop` _actually_ work? # # In this section I'll describe the (admittedly hacky) way that I've written `makeitpop`. # As I mentioned before, [all the `makeitpop` code is on GitHub](https://github.com/choldgraf/makeitpop) and # Pull Requests are more than welcome to improve the process (I'm looking at you, "histogram matching" people!) # # Here's what `makeitpop` does: # # 1. Collects a list of the "perceptual deltas". These are calculated from the equations given in [`viscm`](https://github.com/matplotlib/viscm), which was released as a part of the original work that created `viridis`. # 2. Centers each colormap's deltas at 0. # 3. Scales each colormap's deltas by the _largest variance_ across all colormaps. This is to make sure that warping the data is done with the relative differences of each colormap in mind. # 4. When `makeitpop` is called, the function then: # 1. Scales the input data linearly between 0 and 1 # 2. Calculates the point-by-point derivative for linearly spaced points between 0 and 1 (the derivative is the same for all points here). # 3. Multiplies each derivative by the scaled perceptual deltas function, plus an extra scaling factor that accentuates the effect. # 4. Adds each value to the scaled input data. # 5. Un-scales the altered input data so that it has the same min/max as before. # # There are plenty of ways you could do this more effectively (for example, by matching empirical CDFs and # using linear interpolation to map the delta function of one colormap onto the delta function for another # colormap). If you'd like to contribute or suggest something, feel free to do so! However, I'm just creating # this package to highlight an idea, and think this approach gets close enough with relatively little complexity. # # # Some extras # Here's a viz that will let you visualize how different colormaps distort data. We'll show a # gradient of linearly-spaced values, both using a warping colormap such as "jet" and a linear # colormap like "vidiris". Then, we'll "pop" the data and re-visualize with viridis. # + tags=["hide-input"] # Create a gradient of datapoints data = np.linspace(0, 1, 256) data = np.tile(data, [100, 1]) # Pop the data cmap_warping = 'jet' scaling_factor = 50 data_popped = makeitpop(data, cmap_warping, scaling_factor) # Visualize fig, axs = plt.subplots(6, 1, figsize=(10, 15), sharex=True) axs[0].pcolormesh(derivatives_scaled.index, range(len(data)), data, vmin=0, vmax=1, cmap=cmap_warping) axs[0].set(title="Raw data with colormap {}".format(cmap_warping)) axs[1].plot(derivatives_scaled.index, derivatives_scaled[cmap_warping].values) axs[1].set(xlim=[0, 1], ylim=[-1.1, 1.1], title="First row of image") axs[2].pcolormesh(derivatives_scaled.index, range(len(data)), data, vmin=0, vmax=1, cmap='viridis') axs[2].set(title="Raw data with colormap {}".format('viridis')) axs[3].plot(derivatives_scaled.index, data[0]) axs[3].set_xlim([0, 1]) axs[4].pcolormesh(derivatives_scaled.index, range(len(data)), data_popped, vmin=0, vmax=1, cmap='viridis') axs[4].set(title="Data warped with colormap {}".format(cmap_warping)) axs[5].plot(derivatives_scaled.index, data_popped[0]) axs[5].set_xlim([0, 1]) plt.tight_layout()
posts/2018/2018-06-04-makeitpop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import sys import cv2 from matplotlib import pyplot as plt import math from math import sin,cos,radians #Move to parent folder sys.path.insert(0, '../../croprows-cli/src/') # - def label(image, text): #Labels the given image with the given text return cv2.putText(image, text, (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 255) def contrast_stretch(im): #Performs a simple contrast stretch of the given image, from 5-95%. in_min = np.percentile(im, 5) in_max = np.percentile(im, 95) out_min = 0.0 out_max = 255.0 out = im - in_min out *= ((out_min - out_max) / (in_min - in_max)) out += in_min return out def disp_multiple(im1=None, im2=None, im3=None, im4=None): """ Combines four images for display. """ height, width = im1.shape combined = np.zeros((2 * height, 2 * width, 3), dtype=np.uint8) combined[0:height, 0:width, :] = cv2.cvtColor(im1, cv2.COLOR_GRAY2RGB) combined[height:, 0:width, :] = cv2.cvtColor(im2, cv2.COLOR_GRAY2RGB) combined[0:height, width:, :] = cv2.cvtColor(im3, cv2.COLOR_GRAY2RGB) combined[height:, width:, :] = cv2.cvtColor(im4, cv2.COLOR_GRAY2RGB) return combined img = cv2.imread("../orthomosaics/results/testfile3/mosaic_5_5_1077px_25_tiles/mosaic-cl_3-rw_2.jpg") plt.figure(figsize=(7, 7)) plt.imshow(img) plt.show() # + # Get the individual colour components of the image b, g, r = cv2.split(img) plt.figure(figsize=(7, 7)) plt.imshow(r) plt.show() plt.figure(figsize=(7, 7)) plt.imshow(g) plt.show() plt.figure(figsize=(7, 7)) plt.imshow(b) plt.show() # + # Calculate the NDVI # Bottom of fraction bottom = (r.astype(float) + b.astype(float)) bottom[bottom == 0] = 0.01 # Make sure we don't divide by zero! ndvi = (r.astype(float) - b) / bottom ndvi = contrast_stretch(ndvi) ndvi = ndvi.astype(np.uint8) # Do the labelling label(b, 'Blue') label(g, 'Green') label(r, 'NIR') label(ndvi, 'NDVI') # Combine ready for display combined = disp_multiple(b, g, r, ndvi) # - plt.figure(figsize=(7, 7)) plt.imshow(combined) plt.show() # + img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) # # equalize the histogram of the Y channel img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0]) # # convert the YUV image back to RGB format img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) plt.figure(figsize=(7, 7)) plt.imshow(img_output) plt.show() # + gamma=0.4 invGamma = 1.0 / gamma table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8") lut = cv2.LUT(img, table) plt.figure(figsize=(7, 7)) plt.imshow(lut) plt.show() # + def hisEqulColor(img): ycrcb=cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB) channels=cv2.split(ycrcb) print(len(channels)) cv2.equalizeHist(channels[0],channels[0]) cv2.merge(channels,ycrcb) cv2.cvtColor(ycrcb,cv2.COLOR_YCR_CB2BGR,img) return img hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) plt.figure(figsize=(7, 7)) plt.imshow(hsv) plt.show() his = hisEqulColor(img) plt.figure(figsize=(7, 7)) plt.imshow(his) plt.show() his2 = hisEqulColor(lut) plt.figure(figsize=(7, 7)) plt.imshow(his2) plt.show() # -
test/notebooks/Unit_Image_Equalize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd company_df = pd.read_csv("full company data.csv", parse_dates=["Date"], index_col="Date") competitor_df = pd.read_csv("full competitor data.csv", parse_dates=["Date"], index_col="Date") market_df = pd.read_csv("market data.csv", parse_dates=["Date"], index_col="Date") company_df # - company_df.mean().round(2) company_df.describe().round(2) company_df["Market Share pc"] = company_df["Revenue"] / market_df["Revenue"] * 100 company_df company_df["Market Share pc"] company_df["Percent Change"] = (company_df["Market Share pc"] - company_df["Market Share pc"][0]) / company_df["Market Share pc"][0] *100 company_df #check if there is correlation between the add spent and revenue company_df["Ad Spent"].corr(company_df["Percent Change"]) # + competitor_df["Market Share pc"] = competitor_df["Revenue"] / market_df["Revenue"] * 100 competitor_df # - competitor_df["Percent Change"] = (competitor_df["Market Share pc"] - competitor_df["Market Share pc"][0]) / competitor_df["Market Share pc"][0] *100 competitor_df competitor_df["Ad Spent"].corr(competitor_df["Percent Change"]) competitor_df["Profit Margins"].corr(competitor_df["Market Share pc"]) company_df["Profit Margins"].corr(company_df["Market Share pc"]) company_df["Profit Margins"].corr(company_df["Revenue"]) company_df["Products launched"].corr(company_df["Revenue"]) market_df market_df["Ad Spent"].corr(market_df["Revenue"]) market_df["Products launched"].corr(market_df["Revenue"]) competitor_df.to_csv("fully complete competitor data") company_df.to_csv("fully complete company data")
Section 3 Code/.ipynb_checkpoints/Applying mathematical operations 3-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: kamjiPython3 # language: python # name: kamjipython3 # --- import torch import numpy as np import matplotlib.pyplot as plt from dcgan_main import * params = torch.load('CelebA_DCGAN_results/generator_param.pkl') G=generator() G.load_state_dict(params) z=torch.randn((1, 100)).view(-1, 100, 1, 1) img1=G(z) img1=(img1.cpu().detach().numpy().squeeze().transpose(1,2,0)+1)/2.0 plt.imshow(img1) z_saved=z.clone() # + z1=torch.randn((1, 100)).view(-1, 100, 1, 1) z2=torch.randn((1, 100)).view(-1, 100, 1, 1) #linear interpolation fig=plt.figure(figsize=(8, 8)) columns = 6 rows = 6 for i in range(1, columns*rows +1): rate=i/(columns*rows) temp=z1*rate+z2*(1-rate) #z1과 z2 사이를 등분해서 이동 img=G(temp) img=(img.cpu().detach().numpy().squeeze().transpose(1,2,0) +1) /2.0 fig.add_subplot(rows, columns, i) plt.imshow(img) plt.show()
generate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Merging East River Tree Stem Geolocation Points # **Author:** '<NAME>' <br> # **Creation Date:** '09/21/2020' <br> # **Revision Date:** '12/22/2020' <br> # # --- # # ## Contents # # 1 - [Front matter](#front)<br> # 2 - [Libraries](#libraries)<br> # 3 - [Import reference table](#import)<br> # 4 - [Exploratory analysis](#eda)<br> # 5 - [Rename and move](#rename)<br> # 6 - [Prepare for append](#prep)<br> # 7 - [Append](#append)<br> # # --- # # ## Front matter<a id='front'></a> # # This notebook contains markdown and code for post-processing point shapefiles generated from Trimble Geo7X GPS acquisitions in the East River domain. The script appends the `Site` name and `subdirectory` to each shapefile name, then selects all projected point shapefiles, groups them by `Site` name, and merges points from the same site. The result is a set of shapefiles containing tree geolocation points, one set for each site in the watershed where stem geolocations were acquired from 2018–2020. Most output files contain some extraneous points marking corners and errata, which are cleaned out in '00_EastRiver_Clean_Tree_GPSPoints.ipynb'. # # The script was developed in `Python 3.8.2` on a Macbook Pro 2014 running OSX 10.14.6. # # ## Libraries<a id='libraries'></a> # + tags=[] import os import pandas as pd import geopandas as gpd import numpy as np import math import re from matplotlib import pyplot as plt from os.path import join, getsize # %matplotlib inline import ipywidgets as widgets from ipywidgets import interact, interactive, fixed, interact_manual # - # Define the working directory and list contents os.getcwd() directory = os.sep.join(['/Users', 'hmworsham', 'Desktop', 'RMBL', 'Projects', 'Watershed_Spatial_Dataset']) source_dir = os.sep.join([directory, 'Source']) out_dir = os.sep.join([directory, 'Output']) gps_dir = os.sep.join([source_dir, 'RMBL_GPS_Data_All']) os.listdir(gps_dir)[0:10] # ## Import reference table<a id='reference'></a> # First we import a CSV describing filenames and associated sites. Then we slice to create a simple list of filenames and the site at which the data inside those files were acquired. # gps_index = pd.read_csv(os.sep.join([source_dir, 'EastRiver_GPS_Data_Index.csv'])) gps_index.loc[:, 'Filename':'Site'].head(10) # ## Exploratory analysis<a id='eda'></a> # Some simple exploratory analysis reveals see how many unique files are associated with each site. gps_index.groupby('Site').count()['Filename'] # + tags=[] # Scratch to set up the syntax for the function below that will relate filenames in the directory to filenames and site associations in the index dataframe gps_index_sites = gps_index.loc[:,'Filename':'Site'] gps_index_sites.loc[gps_index_sites['Filename'] == 'WORSHAMM071610A'].iloc[0, 1] # - # List the filenames in all subdirectories of `directory` by walking the subdirectories and string-splitting on the last `/` in the path to isolate filenames. # + tags=[] for subdir, dirs, files in os.walk(gps_dir): for filename in files: subdir_name = subdir.rsplit('/', 1)[-1] print(subdir_name) # - # ## Rename and move<a id='rename'></a> # The function below finds the name of the `subdirectory` that each shapefile lives in and finds the `Site` with which that subdirectory is associated. The function renames each shapefile by appending the `subdirectory` name and `Site` name to the filename, then moves all files into a single directory. gps_dir # + tags=[] for subdir, dirs, files in os.walk(gps_dir): for filename in files: gps_index_sites = gps_index.loc[:, 'Filename':'Site'] subdir_name = subdir.rsplit('/', 1)[-1] index_sitename = str(gps_index_sites.loc[gps_index_sites['Filename'] == subdir_name, 'Site'].values).strip("[]").strip("'") newname = subdir_name + '_' + index_sitename + '_' + filename oldpath = subdir + os.sep + filename newpath = subdir + os.sep + newname print(oldpath) print(newname) print(newpath) os.rename(oldpath, newpath) if not re.search('Line.+', filename) and not re.search('Area.+', filename) and not re.search('Icon.+', filename): print(filepath) # - # ## Prepare for append<a id='prep'></a> # list all files renamed_dir = '/Users/hmworsham/Desktop/RMBL/Projects/Watershed_Spatial_Dataset/Scratch/RMBL_GPS_Data_RENAMED' allfiles = os.listdir(renamed_dir) allfiles[0:10] #allfiles # + tags=[] # generate a list of unique site names represented in the dataset sitelist = gps_index['Site'].unique().tolist() sitelist # + tags=[] # filter only shapefiles containing point data types in the correct projection # the target filenames will contain the tag "Project" AND either of the strings "Student" or "Point" # filenames without "Student" and filenames containing "Area" and "Line" strings will be filtered out point_str = 'Point_' stud_str = 'Student' proj_str = '_Project' sf_allpoint = [i for i in allfiles if ((point_str in i or stud_str in i) and proj_str in i)] print(len(allfiles)) print(len(sf_allpoint)) sf_allpoint # + tags=[] # manipulate the gps_index dataframe notcorners = gps_index[~gps_index['Contents'].str.contains('corner')] # filter out names of subdirs containing corners notcorners = notcorners['Filename'].to_list() print(notcorners[:10]) print(len(sf_allpoint)) print(len(notcorners)) # - # find files representing trees only, with .shp extension trees_allfiles = [i for i in sf_allpoint if any(ii in i for ii in notcorners)] trees_sf = [t for t in trees_allfiles if t.endswith('.shp')] # + tags=[] print(len(trees_sf)) print(trees_sf[:10]) # - # ## Append<a id='append'></a> # # 1. group files according to site name by finding common value from sitelist in `matches` string # 2. for each site, select the first shapefile and assign it as base object # 3. append all other shapefiles to the base object with `gpd.append()` # 4. project crs to wgs84 utm zone 13 # 4. export the gpdf as a shapefile named: sitelist[i] + '_' + 'TreeStem_pts_WGS84UTM13.shp' # + tags=[] # add full path to all filenames trees_sf_paths = [renamed_dir + os.sep + i for i in trees_sf] trees_sf_paths[:5] # - # group files by plot using list comprehension trees_sf_grouped = [[s for s in trees_sf_paths if key in s] for key in set(sitelist)] trees_sf_grouped = [i for i in trees_sf_grouped if len(i) != 0] # filter out a few artifact empty lists # + tags=[] # output list of grouped tree shapefiles in directory trees_sf_grouped # - # aggregate, import, and append alltrees_gpdf = [] for thing in trees_sf_grouped: site_gpdf = [] for i in thing: gpdf = gpd.read_file(i) site_gpdf.append(gpdf) alltrees = site_gpdf[0].append(site_gpdf[1:]) alltrees.to_crs(epsg = 32613, inplace = True) alltrees = alltrees.loc[alltrees.geom_type == 'Point'] site = [s for s in sitelist if s in thing[0]][0] alltrees['Site'] = site alltrees.to_file('/Users/hmworsham/Desktop/RMBL/Projects/Watershed_Spatial_Dataset/Scratch/RMBL_GPS_Data_MERGEDBYPLOT/' + site + '.shp') alltrees_gpdf.append(alltrees)
Forest_Inventory_Dataset/Tree_Geolocations/00_EastRiver_Merge_Tree_GPSPoints.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import sympy sympy.init_printing(use_latex='mathjax') # %matplotlib inline # ## 적분 # - 적분(integral)은 미분과 반대되는 개념이다. # - 부정적분(indefinite integral) # - 정적분(definite integral) # #### 부정적분(indefinite integral) # - 부정적분은 정확하게 미분과 반대되는 개념, 즉 반-미분(anti-derivative)이다. 함수 f(x)가 어떤 함수를 미분하여 나온 결과인 도함수라고 가정하고 이 도함수 f(x)에 대한 미분되기 전의 원래의 함수를 찾는 과정(integration), 또는 그 결과 (integral)을 말한다. # - 부정적분으로 찾은 원래의 함수를 표기할 때는 도함수를 대문자화하여 표기할 때도 있지만, 다음처럼 Integral기호로 나타내는 것이 일반적이다. 여기에서 도함수가 f(x)이므로 미분하기 전의 함수를 F(x)또는 Sf(x)dx로 쓴다. dx는 x라는 변수로 적분했다는 것을 나타내는 기호로 편미분에 대응하는 적분을 표기할 때 필요하다. # - dF(x)/dx = f(x) <-> F(x) = Sf(x)dx + C # C는 상수값 # + # 연습문제1 # 부정적분을 구하라 # 1. S3x^2dx # 2. S(3x^2-6x+1)dx # 3. S(2+6x+4exp(x) + 5/x)dx # 4. S((2x)/x^2-1)dx # - # #### 편미분의 도함수 # - 편미분을 한 도함수에서 원래의 함수를 찾을 수도 있다. 다음 식은 f(x,y)가 원래의 함수를 어떻게 미분했는지에 따라 원래의 함수를 표기하는 방법이 달라진다는 것을 나타낸다. # - 만약 f(x,y)가 함수 F1(x,y)를 x로 편미분한 함수였다면 이 함수를 찾는 식은 다음과 같다. # - roundF1(x,y)/round(x) = f(x,y) <-> F1(x,y) = Sf(x,y)dx + C(y) # - 주의할 점은 상수항 C(y)가 y의 함수일 수 있다는 점이다. C(y)는 x없이 y만으로 이루어진 함수를 뜻한다. y만의 함수는 x로 편미분하면 0이 되기 때문이다. 물론 반드시 y의 함수이어야 하는 것은 아니고 단순한 숫자 상수일 수도 있다. # - 마찬가지로 만약 f(x, y)가 함수 F2(x,y)를 y로 편미분한 함수였다면 이 함수를 찾는 식은 다음과 같다 # - round(f2(x,y)/round(y) = f(x,y) <-> F2(x,y) = Sf(x,y)dy + C(x) # + # 연습문제2 # 다음 부정적분을 구하라 # 1. S(1+xy)dx # 2. S(xyexp(x^2 + y^2)dx) # - # #### 다차 도함수와 다중 적분 # - 미분을 여러번 한 결과로 나온 다차 도함수로부터 원래의 함수를 찾아내려면 여러번 적분을 하는 다중 적분(multiple integration)이 필요함. # - 예를 들어 f(x,y)가 함수 F3(x,y)를 x로 한번 편미분한 후, y로 다시 편미분하여 나온 이차 도함수였다면 이 함수를 찾는 식은 다음과 같다. # - round^2(F3(x))/round(x)round(y) = f(x,y) <-> F3(x,y) = SSf(x,y)dxdy # + # 연습문제3 # 다음 부정적분을 구하라 # SSxyexp(x^2 + y^2)dxdy # - # ## Sympy를 이용한 부정적분 # #### 다음은 SymPy의 integrate 명령으로 부정적분을 하는 예제이다. 상수항은 반환하지 않는다. import sympy sympy.init_printing(use_latex='mathjax') x = sympy.symbols('x') f = x * sympy.exp(x) + sympy.exp(x) f sympy.integrate(f) x, y = sympy.symbols('x y') f = 2 * x + y sympy.integrate(f, x) # + # 연습문제4 # 지금까지 구한 연습 문제의 답을 Sympy를 사용하여 구하라 # + import sympy sympy.init_printing(use_latex='mathjax') # 1번 x, y = sympy.symbols('x y') f = 3* x ** 2 sympy.integrate(f, x) # - # 1.2번 f = 3*x**2 - 6*x + 1 sympy.integrate(f, x) f = 2+6*x + 4*sympy.exp(x) + 5/x sympy.integrate(f, x) f = 2*x / x ** 2 - 1 sympy.integrate(f, x) f = 1 + x*y sympy.integrate(f, x) f = x*y*sympy.exp(x**2 + y**2) sympy.integrate(f, x) # + import sympy sympy.init_printing(use_latex='mathjax') f = x*y*sympy.exp(x**2 + y**2) sympy.integrate(f, x, y) # - # ## 정적분(Definite integral) # - 정적분은 독립변수x가 어떤 구간 [a,b]사이일 때 그 구간에서 함수 f(x)의 값과 수평선(x축)이 이루는 면적을 구하는 행위 혹은 그 값을 말한다. 수학 기호로는 다음과 같이 표기한다. # - S위에 b, a * f(x)dx # + # 정적분 # + from matplotlib.patches import Polygon def f(x): return x**3 - 3*x ** 2 + x + 6 a, b = 0, 2 x = np.linspace(a - 0.5, b + 0.5, 50) y = f(x) ax = plt.subplot(111) plt.plot(x, y, 'r', linewidth=2) plt.ylim(ymin=0) ix = np.linspace(a,b) iy = f(ix) verts = [(a,0)] + list(zip(ix,iy)) + [(b,0)] poly = Polygon(verts, facecolor = '0.9', edgecolor='0.5') ax.add_patch(poly) plt.text(0.5 * (a+b), 0.2 * (f(a) + f(b)), r"$\int_a^b f(x)dx$", horizontalalignment='center', fontsize=20) plt.figtext(0.9, 0.05, '$x$') plt.figtext(0.1, 0.9, '$y$') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.xaxis.set_ticks_position('bottom') ax.set_xticks((a,b)) ax.set_xticklabels(('$a$', '$b$')) ax.set_yticks([]) plt.show() # - # - 정적분은 얼핏 미분과 아무런 상관이 없어 보이지만 부정적분으로 구한 함수 F(x)를 이용하면 다음처럼 정적분의 값을 구할 수 있다. 이를 **미적분학의 기본 정리(Fundamental Theorem of Calculus)**라고 부른다. # $\int_a^b f(x)dx$ # - $\int_a^b f(x)dx$ = F(b) - F(a) # - 정적분은 SymPy등으로 부정적분을 한 뒤 미적분학의 기본 정리를 사용하여 푸는 방법과 원래 함수의 면적 부분을 실제로 잘개 쪼개어 면적을 근사하게 구하는 수치적 부정적분방법으로 구할 수 있다. # - 예를 들어 다음 정적분을 구하는 문제를 생각하자. # $\int_2^0 (x^3 - 3x^2+x+6)dx$ # #### 부정 적분 x, y = sympy.symbols('x y') f = x**3 - 3*x**2+x+6 F = sympy.integrate(f) F # #### 정적분 (F.subs(x, 2) - F.subs(x, 0)).evalf() # ## 수치적분 # - 수치 적분은 함수를 아주 작은 구간으로 나누어 실제 면적을 계산함으로써 정적분의 값을 구하는 방법이다. Scipy의 integrate서브 패키지의 quad(적분), dblquad(이중적분), tplquad(삼중 적분) 함수 등은 수치 적분을 이용하여 정적분의 값을 계산한다. # + def f(x): return x ** 3 - 3 * x ** 2 + x + 6 sp.integrate.quad(f, 0, 2) # 수치 적분 # - # - 수치적 정적분 결과값의 두번째 숫자는 오차의 상한값을 뜻한다. 수치적분으로 구한 값과 정적분으로 구한 값이 같다는 것을 알 수 있다. # - 연습 문제 5 # - 다음 정적분의 값을 부정적분과 수치적분 두 가지 방법으로 구하라. # - $\int_0^1 (3x^2-6x+1)dx$ # + # 부정적분 방법 import sympy sympy.init_printing(use_latex='mathjax') x = sympy.symbols('x') f = 3*x**2 - 6*x + 1 F = sympy.integrate(f, x) F # F(x)까지는 찾음. 그 다음으로 대입 (F.subs(x, 1) - F.subs(x, 0)).evalf() # + # 수치적분 방법 import sympy sympy.init_printing(use_latex='mathjax') def f(x): return 3*x**2 - 6*x + 1 sp.integrate.quad(f, 0, 1) # 함수 # - # - 연습문제 5.2 # - 부정적분과 수치적분 두 가지 방법으로 구하라. # - $\int_1^10 (2+6x+4e(x)+5/x)dx$ # # + # 부정적분 # + import sympy sympy.init_printing(use_latex='mathjax') x = sympy.symbols('x') f = 2 + 6*x + 4*sympy.exp(x) + 5 / x F = sympy.integrate(f, x) (F.subs(x, 10) - (F.subs(x, 1))).evalf(), f # + # 수치적분 def f(x): return 2 + 6*x + 4*sympy.exp(x) + 5 / x sp.integrate.quad(f, 1, 10) # - # ## 다변수 정적분 # - 입력 변수가 2개인 2차원 함수 f(x,y)의 경우에는 정적분을 다양한 방법으로 정의 할 수 있다. # #### 두 변수로 이중 적분하는 경우 # - 두 변수로 모두 적분하는 것은 다음과 같이 2차원 평면에서 주어진 사각형 영역 아래의 부피를 구하는 것과 같다. # - 수치 이중 적분을 하려면 Scipy의 integrate 서브패키지의 dblquad명령을 사용한다. # - 함수 사용법은 다음과 같다. # - dblquad(func, a, b, gfun, hfun) # # + def f(x,y): return np.exp(-x*y) / y**2 sp.integrate.dblquad(f, 1, np.Inf, lambda x: 0, lambda x: np.inf) # - # #### 하나의 변수로 단일 적분하는 경우 # - f(x,y)가 2차원 함수이지만 하나의 변수만 진짜 입력 변수로 보고 나머지 하나는 상수로 보는 방법이다. # - $\int_a^b f(x,y)dx$ # - 예를 들어 다음과 같은 함수를 생각하자. # - f(x, y) = 4x^2 + 4xy + y^2 # - 여기에서 변수 x만 진짜 입력 변수로 보고 y는 단순히 정해지지 않은 상수로 보면, 이 함수는 다음과 같은 1차원 함수이다. # - 2차원 함수 f(x,y)를 y를 고정시켜 절단한 단면 함수로 볼 수 있다. # - y가 고정되어 있다는 것을 강조하기 위해 다음처럼 표기하기도 한다. # - y가 변수가 아니라는 점을 강조하기 위해 함수 표기에서 쉼표가 아니라 세미콜런이 씌여졌다는 점에 주의하라. # - f(x;y) = 4x^2 + (4y)*x + (y^2) # #### 연습문제 6 # 다음 정적분의 값을 구하라 # S1,-1S1,-1 (1 + xy) dxdy # + import sympy sympy.init_printing(use_latex = 'mathjax') x, y = sympy.symbols('x y') def f(x,y): return 1 + x*y sp.integrate.dblquad(f, -1, 1, lambda x: -1, lambda x: 1) # -
2018_05_26_Integral_review.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Apache Toree - Scala // language: scala // name: apache_toree_scala // --- // <p><font size=-1 color=gray> // &copy; Copyright 2018 IBM Corp. All Rights Reserved. // <p> // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file // except in compliance with the License. You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed under the // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing permissions and // limitations under the License. // </font></p> // # Ingest Clickstream Events // // This notebook uses the [Scala](https://www.scala-lang.org/) programming language // to interact with IBM Db2 Event Stream. It demonstrates how to: // // * Connect to Event Store // * Drop and create a database // * Define a table schema // * Drop and create a table // * Load a CSV file into a DataFrame // * Batch insert from a DataFrame into a table // // ## Connect to IBM Db2 Event Store // // ### Determine the IP address of your host // // Obtain the IP address of the host that you want to connect to by running the appropriate command for your operating system: // // * On Mac, run: `ifconfig` // * On Windows, run: `ipconfig` // * On Linux, run: `hostname -i` // // Edit the `HOST = "XXX.XXX.XXX.XXX"` value in the next cell to provide the IP address. // + // Set your host IP address val Host = "XXX.XXX.XXX.XXX" //Port will be 1100 for version 1.1.2 or later (5555 for version 1.1.1) val Port = "1100" // - // ## Import Scala packages // + import sys.process._ import scala.concurrent.{Await, Future} import scala.concurrent.duration.Duration import collection.JavaConverters._ import org.apache.spark.sql.Row import org.apache.spark.sql.types._ import org.apache.spark.sql.SparkSession import org.apache.spark.sql.DataFrameReader import spark.implicits._ import com.ibm.event.catalog.TableSchema import com.ibm.event.oltp.EventContext import com.ibm.event.example.DataGenerator import com.ibm.event.common.ConfigurationReader import com.ibm.event.oltp.InsertResult // - // ## Connect to Event Store ConfigurationReader.setConnectionEndpoints(Host + ":" + Port) // ## Create a database // // Only one database can be active in Event Store. If you already have a database, you don't need to create one. // To create a database in Event Store, you can use the createDatabase function. If you want to drop an existing // database to create a new one, use the dropDatabase function first. // + // See the comments and run this cell if you need to DROP and/or CREATE the database. // EventContext.dropDatabase("TESTDB") // Uncomment this if you want to drop an existing TESTDB val context = EventContext.createDatabase("TESTDB") // Comment this out to re-use an existing TESTDB val error = context.openDatabase() error.map(e => sys.error(e.toString)) // - // ## Create a table // // ### Define the schema // + val clickdataSchema = StructType(Array( StructField("eventId", LongType, false), StructField("eventType", StringType, false), StructField("timestamp", StringType, false), StructField("ipaddress", StringType, false), StructField("sessionId", StringType, false), StructField("userId", StringType, false), StructField("pageUrl", StringType, false), StructField("browser", StringType, false))) // Define Table schema for clickstream data val clickStreamSchema = TableSchema( "ClickStreamTable", clickdataSchema, Array("eventId"), Array("eventId")) // - // ### Create the Table // If you want to drop the existing table to create a new one, use the dropTable function first. // + // Create the table - skip if table is already created // var res = context.dropTable(clickStreamSchema.tableName) // Uncomment to drop existing table var res = context.createTable(clickStreamSchema) if (res.isDefined) { println(s"Error while creating table ${clickStreamSchema.tableName}\n: ${res.get}") } else { println(s"Table ${clickStreamSchema.tableName} successfully created.") } // - val clickstreamTable = context.getTable("ClickStreamTable") // ## Load data from the CSV file to a DataFrame // Use the `add data assets` in the UI to make the file available to the notebook. // Then read the file from the assets directory into a DataFrame. // Initialize spark session val spark: SparkSession = SparkSession.builder().getOrCreate() val clickStreamDF = spark.read.option("header", "true").option("inferSchema", false).schema(clickdataSchema).csv("assets/clickstream_data.csv") clickStreamDF.show(5) // ## Load data from the DataFrame to the table // Use the batchInsert function to load the data. // Iteratively Insert rows in batch val iter = clickStreamDF.toLocalIterator() val error = context.batchInsert(clickstreamTable, iter.asScala) if (error.isDefined) { System.err.println(error) } println("Ingest completed successfully")
notebooks/ingest_clickstream_events.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # + [markdown] nbpresent={"id": "782a07bf-08de-4030-88e1-6731c4ac956e"} # ## Train a model with Mushroom data using XGBoost algorithm # ### Model is trained with XGBoost installed in notebook instance # ### In the later examples, we will train using SageMaker's XGBoost algorithm # + nbpresent={"id": "6c6a8672-d428-410a-82fa-7f587c9ef2ae"} # Install xgboost in notebook instance. #### Command to install xgboost # !pip install xgboost==0.90 # + nbpresent={"id": "652b58d4-3b75-405f-9f11-24d0cd1f9656"} import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt import itertools import xgboost as xgb from sklearn.metrics import classification_report, confusion_matrix # + nbpresent={"id": "a3946273-d086-4564-b0f1-6adc225191c3"} column_list_file = 'mushroom_train_column_list.txt' train_file = 'mushroom_train.csv' validation_file = 'mushroom_validation.csv' # + nbpresent={"id": "7c803d6c-74cc-40d2-ab48-747ff4346c22"} columns = '' with open(column_list_file,'r') as f: columns = f.read().split(',') # + nbpresent={"id": "630dde8d-44b9-415d-8876-4e873407d0fc"} columns # + nbpresent={"id": "d6ff2283-cb13-468f-b0cc-0aefeab7b57f"} # Specify the column names as the file does not have column header df_train = pd.read_csv(train_file,names=columns) df_validation = pd.read_csv(validation_file,names=columns) # + nbpresent={"id": "a195ae30-1962-4427-859b-73a013dc10d6"} df_train.head() # + nbpresent={"id": "e30e8aeb-1ca2-4851-bc2d-1bdee29ab1cf"} df_validation.head() # + nbpresent={"id": "3b240613-803d-4fa9-93cf-53ef68df7b93"} X_train = df_train.iloc[:,1:] # Features: 1st column onwards y_train = df_train.iloc[:,0].ravel() # Target: 0th column X_validation = df_validation.iloc[:,1:] y_validation = df_validation.iloc[:,0].ravel() # + nbpresent={"id": "9edc89e7-45d3-4350-9eb4-3e0938c3c55e"} # Launch a classifier # XGBoost Training Parameter Reference: # https://github.com/dmlc/xgboost/blob/master/doc/parameter.md #classifier = xgb.XGBClassifier (objective='binary:logistic',n_estimators=50) classifier = xgb.XGBClassifier (objective='binary:logistic') # + nbpresent={"id": "348296fb-8c9b-4598-ad2e-d1fe8e10f76a"} classifier # + nbpresent={"id": "9839d7ce-e791-4d93-bc5f-28604ffde022"} classifier.fit(X_train, y_train, eval_set = [(X_train, y_train), (X_validation, y_validation)], eval_metric=['logloss']) # + nbpresent={"id": "e08f22c1-4346-4e2d-96a2-9974ed5c59ff"} eval_result = classifier.evals_result() # + nbpresent={"id": "092776c3-a611-4f40-91e2-664b3b99d05e"} training_rounds = range(len(eval_result['validation_0']['logloss'])) # + nbpresent={"id": "2e9af3f7-fb85-4c52-83d5-ff9cae457294"} print(training_rounds) # + nbpresent={"id": "5e71239a-e321-43ba-ac2c-993b57b3be3a"} plt.scatter(x=training_rounds,y=eval_result['validation_0']['logloss'],label='Training Error') plt.scatter(x=training_rounds,y=eval_result['validation_1']['logloss'],label='Validation Error') plt.grid(True) plt.xlabel('Iterations') plt.ylabel('LogLoss') plt.title('Training Vs Validation Error') plt.legend() plt.show() # + nbpresent={"id": "f144f315-6d38-429e-8c17-06c17a446198"} xgb.plot_importance(classifier) plt.show() # + nbpresent={"id": "3312675d-307c-4eff-b835-34f0e7f57924"} df = pd.read_csv(validation_file,names=columns) # + nbpresent={"id": "afad019f-88df-4893-bb3d-b7f2b7db214b"} df.head() # + nbpresent={"id": "9b5cb70d-6069-4511-810e-fd17e72667dd"} X_test = df.iloc[:,1:] # + nbpresent={"id": "f611c852-50e3-4a1a-9134-c1c6e82ad780"} result = classifier.predict(X_test) # - result[:5] # + nbpresent={"id": "2c573c2b-4143-4e01-b107-e6b871ce0249"} df['predicted_class'] = result # + nbpresent={"id": "5ad0fa04-6896-46b5-bc23-40d61480d7ca"} df.head() # - df.class_edible.value_counts() df.predicted_class.value_counts() # ## Binary Classifier Metrics # + # Reference: https://scikit-learn.org/stable/modules/model_evaluation.html # Explicitly stating labels. Pass=1, Fail=0 def true_positive(y_true, y_pred): return confusion_matrix(y_true, y_pred,labels=[1,0])[0, 0] def true_negative(y_true, y_pred): return confusion_matrix(y_true,y_pred,labels=[1,0])[1, 1] def false_positive(y_true, y_pred): return confusion_matrix(y_true, y_pred,labels=[1,0])[1, 0] def false_negative(y_true, y_pred): return confusion_matrix(y_true, y_pred,labels=[1,0])[0, 1] # + # Compute Binary Classifier Metrics # Returns a dictionary {"MetricName":Value,...} def binary_classifier_metrics(y_true, y_pred): metrics = {} # References: # https://docs.aws.amazon.com/machine-learning/latest/dg/binary-classification.html # https://en.wikipedia.org/wiki/Confusion_matrix # Definition: # true positive = tp = how many samples were correctly classified as positive (count) # true negative = tn = how many samples were correctly classified as negative (count) # false positive = fp = how many negative samples were mis-classified as positive (count) # false_negative = fn = how many positive samples were mis-classified as negative (count) # positive = number of positive samples (count) # = true positive + false negative # negative = number of negative samples (count) # = true negative + false positive tp = true_positive(y_true, y_pred) tn = true_negative(y_true, y_pred) fp = false_positive(y_true, y_pred) fn = false_negative(y_true, y_pred) positive = tp + fn negative = tn + fp metrics['TruePositive'] = tp metrics['TrueNegative'] = tn metrics['FalsePositive'] = fp metrics['FalseNegative'] = fn metrics['Positive'] = positive metrics['Negative'] = negative # True Positive Rate (TPR, Recall) = true positive/positive # How many positives were correctly classified? (fraction) # Recall value closer to 1 is better. closer to 0 is worse if tp == 0: recall = 0 else: recall = tp/positive metrics['Recall'] = recall # True Negative Rate = True Negative/negative # How many negatives were correctly classified? (fraction) # True Negative Rate value closer to 1 is better. closer to 0 is worse if tn == 0: tnr = 0 else: tnr = tn/(negative) metrics['TrueNegativeRate'] = tnr # Precision = True Positive/(True Positive + False Positive) # How many positives classified by the algorithm are really positives? (fraction) # Precision value closer to 1 is better. closer to 0 is worse if tp == 0: precision = 0 else: precision = tp/(tp + fp) metrics['Precision'] = precision # Accuracy = (True Positive + True Negative)/(total positive + total negative) # How many positives and negatives were correctly classified? (fraction) # Accuracy value closer to 1 is better. closer to 0 is worse accuracy = (tp + tn)/(positive + negative) metrics['Accuracy'] = accuracy # False Positive Rate (FPR, False Alarm) = False Positive/(total negative) # How many negatives were mis-classified as positives (fraction) # False Positive Rate value closer to 0 is better. closer to 1 is worse if fp == 0: fpr = 0 else: fpr = fp/(negative) metrics['FalsePositiveRate'] = fpr # False Negative Rate (FNR, Misses) = False Negative/(total Positive) # How many positives were mis-classified as negative (fraction) # False Negative Rate value closer to 0 is better. closer to 1 is worse fnr = fn/(positive) metrics['FalseNegativeRate'] = fnr # F1 Score = harmonic mean of Precision and Recall # F1 Score closer to 1 is better. Closer to 0 is worse. if precision == 0 or recall == 0: f1 = 0 else: f1 = 2*precision*recall/(precision+recall) metrics['F1'] = f1 return metrics # - # Reference: # https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] #print("Normalized confusion matrix") #else: # print('Confusion matrix, without normalization') #print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() # Compute confusion matrix #{0:'edible',1:'poisonous'}) cnf_matrix = confusion_matrix(df['class_edible'], df['predicted_class'],labels=[1,0]) # Plot confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['Poisonous','Edible'], title='Confusion Matrix') # Plot confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['Poisonous','Edible'], title='Confusion Matrix',normalize=True) metrics = [binary_classifier_metrics(df['class_edible'], df['predicted_class'])] df_metrics=pd.DataFrame.from_dict(metrics) df_metrics.index = ['Model'] df_metrics # + print('Counts') print(df_metrics[['TruePositive', 'FalseNegative', 'FalsePositive', 'TrueNegative',]].round(2)) print() print('Fractions') print(df_metrics[['Recall', 'FalseNegativeRate', 'FalsePositiveRate', 'TrueNegativeRate',]].round(2)) print() print(df_metrics[['Precision', 'Accuracy', 'F1']].round(2)) # - print(classification_report( df['class_edible'], df['predicted_class'], labels=[1,0], target_names=['Poisonous','Edible']))
xgboost/MushroomClassification/mushroom_xgboost_localmode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Document Embedding with Amazon SageMaker Object2Vec # 1. [Introduction](#Introduction) # 2. [Background](#Background) # 1. [Embedding documents using Object2Vec](#Embedding-documents-using-Object2Vec) # 3. [Download and preprocess Wikipedia data](#Download-and-preprocess-Wikipedia-data) # 1. [Install and load dependencies](#Install-and-load-dependencies) # 2. [Build vocabulary and tokenize datasets](#Build-vocabulary-and-tokenize-datasets) # 3. [Upload preprocessed data to S3](#Upload-preprocessed-data-to-S3) # 4. [Define SageMaker session, Object2Vec image, S3 input and output paths](#Define-SageMaker-session,-Object2Vec-image,-S3-input-and-output-paths) # 5. [Train and deploy doc2vec](#Train-and-deploy-doc2vec) # 1. [Learning performance boost with new features](#Learning-performance-boost-with-new-features) # 2. [Training speedup with sparse gradient update](#Training-speedup-with-sparse-gradient-update) # 6. [Apply learned embeddings to document retrieval task](#Apply-learned-embeddings-to-document-retrieval-task) # 1. [Comparison with the StarSpace algorithm](#Comparison-with-the-StarSpace-algorithm) # ## Introduction # In this notebook, we introduce four new features to Object2Vec, a general-purpose neural embedding algorithm: negative sampling, sparse gradient update, weight-sharing, and comparator operator customization. The new features together broaden the applicability of Object2Vec, improve its training speed and accuracy, and provide users with greater flexibility. See [Introduction to the Amazon SageMaker Object2Vec](https://aws.amazon.com/blogs/machine-learning/introduction-to-amazon-sagemaker-object2vec/) if you aren’t already familiar with Object2Vec. # # We demonstrate how these new features extend the applicability of Object2Vec to a new Document Embedding use-case: A customer has a large collection of documents. Instead of storing these documents in its raw format or as sparse bag-of-words vectors, to achieve training efficiency in the various downstream tasks, she would like to instead embed all documents in a common low-dimensional space, so that the semantic distance between these documents are preserved. # ## Background # Object2Vec is a highly customizable multi-purpose algorithm that can learn embeddings of pairs of objects. The embeddings are learned such that it preserves their pairwise similarities in the original space. # # - Similarity is user-defined: users need to provide the algorithm with pairs of objects that they define as similar (1) or dissimilar (0); alternatively, the users can define similarity in a continuous sense (provide a real-valued similarity score). # # - The learned embeddings can be used to efficiently compute nearest neighbors of objects, as well as to visualize natural clusters of related objects in the embedding space. In addition, the embeddings can also be used as features of the corresponding objects in downstream supervised tasks such as classification or regression. # ### Embedding documents using Object2Vec # We demonstrate how, with the new features, Object2Vec can be used to embed a large collection of documents into vectors in the same latent space. # # Similar to the widely used Word2Vec algorithm for word embedding, a natural approach to document embedding is to preprocess documents as (sentence, context) pairs, where the sentence and its matching context come from the same document. The matching context is the entire document with the given sentence removed. The idea is to embed both sentence and context into a low dimensional space such that their mutual similarity is maximized, since they belong to the same document and therefore should be semantically related. The learned encoder for the context can then be used to encode new documents into the same embedding space. In order to train the encoders for sentences and documents, we also need negative (sentence, context) pairs so that the model can learn to discriminate between semantically similar and dissimilar pairs. It is easy to generate such negatives by pairing sentences with documents that they do not belong to. Since there are many more negative pairs than positives in naturally occurring data, we typically resort to random sampling techniques to achieve a balance between positive and negative pairs in the training data. The figure below shows pictorially how the positive pairs and negative pairs are generated from unlabeled data for the purpose of learning embeddings for documents (and sentences). # <img src="doc_embedding_illustration.png" width="800"> # We show how Object2Vec with the new *negative sampling feature* can be applied to the document embedding use-case. In addition, we show how the other new features, namely, *weight-sharing*, *customization of comparator operator*, and *sparse gradient update*, together enhance the algorithm's performance and user-experience in and beyond this use-case. Sections [Learning performance boost with new features](#Learning-performance-boost-with-new-features) and [Training speedup with sparse gradient update](#Training-speedup-with-sparse-gradient-update) in this notebook provide a detailed introduction to the new features. # ## Download and preprocess Wikipedia data # Please be aware of the following requirements about the acknowledgment, copyright and availability, cited from the [data source description page](https://github.com/facebookresearch/StarSpace/blob/master/LICENSE.md). # # > Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # + language="bash" # # DATANAME="wikipedia" # DATADIR="/tmp/wiki" # # mkdir -p "${DATADIR}" # # if [ ! -f "${DATADIR}/${DATANAME}_train250k.txt" ] # then # echo "Downloading wikipedia data" # wget --quiet -c "https://dl.fbaipublicfiles.com/starspace/wikipedia_train250k.tgz" -O "${DATADIR}/${DATANAME}_train.tar.gz" # tar -xzvf "${DATADIR}/${DATANAME}_train.tar.gz" -C "${DATADIR}" # wget --quiet -c "https://dl.fbaipublicfiles.com/starspace/wikipedia_devtst.tgz" -O "${DATADIR}/${DATANAME}_test.tar.gz" # tar -xzvf "${DATADIR}/${DATANAME}_test.tar.gz" -C "${DATADIR}" # fi # # - datadir = '/tmp/wiki' # !ls /tmp/wiki # ### Install and load dependencies # !pip install jsonlines # + # note: please run on python 3 kernel import os import random import math import scipy import numpy as np import re import string import json, jsonlines from collections import defaultdict from collections import Counter from itertools import chain, islice from nltk.tokenize import TreebankWordTokenizer from sklearn.preprocessing import normalize ## sagemaker api import sagemaker, boto3 from sagemaker.session import s3_input from sagemaker.predictor import json_serializer, json_deserializer # - # ### Build vocabulary and tokenize datasets # + BOS_SYMBOL = "<s>" EOS_SYMBOL = "</s>" UNK_SYMBOL = "<unk>" PAD_SYMBOL = "<pad>" PAD_ID = 0 TOKEN_SEPARATOR = " " VOCAB_SYMBOLS = [PAD_SYMBOL, UNK_SYMBOL, BOS_SYMBOL, EOS_SYMBOL] ##### utility functions for preprocessing def get_article_iter_from_file(fname): with open(fname) as f: for article in f: yield article def get_article_iter_from_channel(channel, datadir='/tmp/wiki'): if channel == 'train': fname = os.path.join(datadir, 'wikipedia_train250k.txt') return get_article_iter_from_file(fname) else: iterlist = [] suffix_list = ['train250k.txt', 'test10k.txt', 'dev10k.txt', 'test_basedocs.txt'] for suffix in suffix_list: fname = os.path.join(datadir, 'wikipedia_'+suffix) iterlist.append(get_article_iter_from_file(fname)) return chain.from_iterable(iterlist) def readlines_from_article(article): return article.strip().split('\t') def sentence_to_integers(sentence, word_dict, trim_size=None): """ Converts a string of tokens to a list of integers """ if not trim_size: return [word_dict[token] if token in word_dict else 0 for token in get_tokens_from_sentence(sentence)] else: integer_list = [] for token in get_tokens_from_sentence(sentence): if len(integer_list) < trim_size: if token in word_dict: integer_list.append(word_dict[token]) else: integer_list.append(0) else: break return integer_list def get_tokens_from_sentence(sent): """ Yields tokens from input string. :param line: Input string. :return: Iterator over tokens. """ for token in sent.split(): if len(token) > 0: yield normalize_token(token) def get_tokens_from_article(article): iterlist = [] for sent in readlines_from_article(article): iterlist.append(get_tokens_from_sentence(sent)) return chain.from_iterable(iterlist) def normalize_token(token): token = token.lower() if all(s.isdigit() or s in string.punctuation for s in token): tok = list(token) for i in range(len(tok)): if tok[i].isdigit(): tok[i] = '0' token = "".join(tok) return token # + # function to build vocabulary def build_vocab(channel, num_words=50000, min_count=1, use_reserved_symbols=True, sort=True): """ Creates a vocabulary mapping from words to ids. Increasing integer ids are assigned by word frequency, using lexical sorting as a tie breaker. The only exception to this are special symbols such as the padding symbol (PAD). :param num_words: Maximum number of words in the vocabulary. :param min_count: Minimum occurrences of words to be included in the vocabulary. :return: word-to-id mapping. """ vocab_symbols_set = set(VOCAB_SYMBOLS) raw_vocab = Counter() for article in get_article_iter_from_channel(channel): article_wise_vocab_list = list() for token in get_tokens_from_article(article): if token not in vocab_symbols_set: article_wise_vocab_list.append(token) raw_vocab.update(article_wise_vocab_list) print("Initial vocabulary: {} types".format(len(raw_vocab))) # For words with the same count, they will be ordered reverse alphabetically. # Not an issue since we only care for consistency pruned_vocab = sorted(((c, w) for w, c in raw_vocab.items() if c >= min_count), reverse=True) print("Pruned vocabulary: {} types (min frequency {})".format(len(pruned_vocab), min_count)) # truncate the vocabulary to fit size num_words (only includes the most frequent ones) vocab = islice((w for c, w in pruned_vocab), num_words) if sort: # sort the vocabulary alphabetically vocab = sorted(vocab) if use_reserved_symbols: vocab = chain(VOCAB_SYMBOLS, vocab) word_to_id = {word: idx for idx, word in enumerate(vocab)} print("Final vocabulary: {} types".format(len(word_to_id))) if use_reserved_symbols: # Important: pad symbol becomes index 0 assert word_to_id[PAD_SYMBOL] == PAD_ID return word_to_id # + # build vocab dictionary def build_vocabulary_file(vocab_fname, channel, num_words=50000, min_count=1, use_reserved_symbols=True, sort=True, force=False): if not os.path.exists(vocab_fname) or force: w_dict = build_vocab(channel, num_words=num_words, min_count=min_count, use_reserved_symbols=True, sort=True) with open(vocab_fname, "w") as write_file: json.dump(w_dict, write_file) channel = 'train' min_count = 5 vocab_fname = os.path.join(datadir, 'wiki-vocab-{}250k-mincount-{}.json'.format(channel, min_count)) build_vocabulary_file(vocab_fname, channel, num_words=500000, min_count=min_count, force=True) # + print("Loading vocab file {} ...".format(vocab_fname)) with open(vocab_fname) as f: w_dict = json.load(f) print("The vocabulary size is {}".format(len(w_dict.keys()))) # + # Functions to build training data # Tokenize wiki articles to (sentence, document) pairs def generate_sent_article_pairs_from_single_article(article, word_dict): sent_list = readlines_from_article(article) art_len = len(sent_list) idx = random.randint(0, art_len-1) wrapper_text_idx = list(range(idx)) + list(range((idx+1) % art_len, art_len)) wrapper_text_list = sent_list[:idx] + sent_list[(idx+1) % art_len : art_len] wrapper_tokens = [] for sent1 in wrapper_text_list: wrapper_tokens += sentence_to_integers(sent1, word_dict) sent_tokens = sentence_to_integers(sent_list[idx], word_dict) yield {'in0':sent_tokens, 'in1':wrapper_tokens, 'label':1} def generate_sent_article_pairs_from_single_file(fname, word_dict): with open(fname) as reader: iter_list = [] for article in reader: iter_list.append(generate_sent_article_pairs_from_single_article(article, word_dict)) return chain.from_iterable(iter_list) # + # Build training data # Generate integer positive labeled data train_prefix = 'train250k' fname = "wikipedia_{}.txt".format(train_prefix) outfname = os.path.join(datadir, '{}_tokenized.jsonl'.format(train_prefix)) counter = 0 with jsonlines.open(outfname, 'w') as writer: for sample in generate_sent_article_pairs_from_single_file(os.path.join(datadir, fname), w_dict): writer.write(sample) counter += 1 print("Finished generating {} data of size {}".format(train_prefix, counter)) # - # Shuffle training data # !shuf {outfname} > {train_prefix}_tokenized_shuf.jsonl # + ## Function to generate dev/test data (with both positive and negative labels) def generate_pos_neg_samples_from_single_article(word_dict, article_idx, article_buffer, negative_sampling_rate=1): sample_list = [] # generate positive samples sent_list = readlines_from_article(article_buffer[article_idx]) art_len = len(sent_list) idx = random.randint(0, art_len-1) wrapper_text_idx = list(range(idx)) + list(range((idx+1) % art_len, art_len)) wrapper_text_list = sent_list[:idx] + sent_list[(idx+1) % art_len : art_len] wrapper_tokens = [] for sent1 in wrapper_text_list: wrapper_tokens += sentence_to_integers(sent1, word_dict) sent_tokens = sentence_to_integers(sent_list[idx], word_dict) sample_list.append({'in0':sent_tokens, 'in1':wrapper_tokens, 'label':1}) # generate negative sample buff_len = len(article_buffer) sampled_inds = np.random.choice(list(range(article_idx)) + list(range((article_idx+1) % buff_len, buff_len)), size=negative_sampling_rate) for n_idx in sampled_inds: other_article = article_buffer[n_idx] context_list = readlines_from_article(other_article) context_tokens = [] for sent2 in context_list: context_tokens += sentence_to_integers(sent2, word_dict) sample_list.append({'in0': sent_tokens, 'in1':context_tokens, 'label':0}) return sample_list # - # Build dev and test data for data in ['dev10k', 'test10k']: fname = os.path.join(datadir,'wikipedia_{}.txt'.format(data)) test_nsr = 5 outfname = '{}_tokenized-nsr{}.jsonl'.format(data, test_nsr) article_buffer = list(get_article_iter_from_file(fname)) sample_buffer = [] for article_idx in range(len(article_buffer)): sample_buffer += generate_pos_neg_samples_from_single_article(w_dict, article_idx, article_buffer, negative_sampling_rate=test_nsr) with jsonlines.open(outfname, 'w') as writer: writer.write_all(sample_buffer) # ### Upload preprocessed data to S3 # + tags=["parameters"] TRAIN_DATA="train250k_tokenized_shuf.jsonl" DEV_DATA="dev10k_tokenized-nsr{}.jsonl".format(test_nsr) TEST_DATA="test10k_tokenized-nsr{}.jsonl".format(test_nsr) # NOTE: define your s3 bucket and key here bucket = '<YOUR S3 BUCKET>' S3_KEY = 'object2vec-doc2vec' # + magic_args="-s \"$TRAIN_DATA\" \"$DEV_DATA\" \"$TEST_DATA\" \"$bucket\" \"$S3_KEY\"" language="bash" # # aws s3 cp "$1" s3://$4/$5/input/train/ # aws s3 cp "$2" s3://$4/$5/input/validation/ # aws s3 cp "$3" s3://$4/$5/input/test/ # - # ## Define Sagemaker session, Object2Vec image, S3 input and output paths # + from sagemaker import get_execution_role from sagemaker.amazon.amazon_estimator import get_image_uri region = boto3.Session().region_name print("Your notebook is running on region '{}'".format(region)) sess = sagemaker.Session() role = get_execution_role() print("Your IAM role: '{}'".format(role)) container = get_image_uri(region, 'object2vec') print("The image uri used is '{}'".format(container)) print("Using s3 buceket: {} and key prefix: {}".format(bucket, S3_KEY)) # + ## define input channels s3_input_path = os.path.join('s3://', bucket, S3_KEY, 'input') s3_train = s3_input(os.path.join(s3_input_path, 'train', TRAIN_DATA), distribution='ShardedByS3Key', content_type='application/jsonlines') s3_valid = s3_input(os.path.join(s3_input_path, 'validation', DEV_DATA), distribution='ShardedByS3Key', content_type='application/jsonlines') s3_test = s3_input(os.path.join(s3_input_path, 'test', TEST_DATA), distribution='ShardedByS3Key', content_type='application/jsonlines') # - ## define output path output_path = os.path.join('s3://', bucket, S3_KEY, 'models') # ## Train and deploy doc2vec # We combine four new features into our training of Object2Vec: # # - Negative sampling: With the new `negative_sampling_rate` hyperparameter, users of Object2Vec only need to provide positively labeled data pairs, and the algorithm automatically samples for negative data internally during training. # # - Weight-sharing of embedding layer: The new `tied_token_embedding_weight` hyperparameter gives user the flexibility to share the embedding weights for both encoders, and it improves the performance of the algorithm in this use-case # # - The new `comparator_list` hyperparameter gives users the flexibility to mix-and-match different operators so that they can tune the algorithm towards optimal performance for their applications. # ### Learning performance boost with new features # _Table 1_ below shows the effect of these features on these two metrics evaluated on a test set obtained from the same data creation process. # # We see that when negative sampling and weight-sharing of embedding layer is on, and when we use a customized comparator operator (Hadamard product), the model has improved test performance. When all these features are combined together (last row of Table 1), the algorithm has the best performance as measured by accuracy and cross-entropy. # # # ### Table 1 # # |negative_sampling_rate|weight-sharing|comparator operator| Test accuracy | Test cross-entropy| # | :------------- | :----------: | :-----------: | :----------: | ----------: | # | off | off | default | 0.167 | 23 | # | 3 | off | default | 0.92 | 0.21 | # | 5 | off | default | 0.92 | 0.19 | # | off | on | default | 0.167 | 23 | # | 3 | on | default | 0.93 | 0.18 | # | 5 | on | default | 0.936 | 0.17 | # | off | on | customized | 0.17 | 23 | # | 3 | on | customized | 0.93 | 0.18 | # | 5 | on | customized | 0.94 | 0.17 | # # # # # - The new `token_embedding_storage_type` hyperparameter flags the use of sparse gradient update, which takes advantage of the sparse input format of Object2Vec. We tested and summarized the training speedup with different GPU and `max_seq_len` configurations in the table below. In a word, we see 2-20 times speed up on different machine and algorithm configurations. # ### Training speedup with sparse gradient update # _Table 2_ below shows the training speeds up with sparse gradient update feature turned on, as a function of number of GPUs used for training. # # ### Table 2 # # |num_gpus|Throughput (samples/sec) with dense storage|Throughput with sparse storage|max_seq_len (in0/in1)|Speedup X-times | # | :------------- | :----------: | :-----------:| :----------: | ----------: | # | 1 | 5k | 14k | 50 | 2.8 | # | 2 | 2.7k | 23k | 50 | 8.5 | # | 3 | 2k | 23~26k | 50 | 10 | # | 4 | 2k | 23k | 50 | 10 | # | 8 | 1.1k | 19k~20k | 50 | 20 | # | 1 | 1.1k | 2k | 500 | 2 | # | 2 | 1.5k | 3.6k | 500 | 2.4 | # | 4 | 1.6k | 6k | 500 | 3.75 | # | 6 | 1.3k | 6.7k | 500 | 5.15 | # | 8 | 1.1k | 5.6k | 500 | 5 | # + # Define training hyperparameters hyperparameters = { "_kvstore": "device", "_num_gpus": 'auto', "_num_kv_servers": "auto", "bucket_width": 0, "dropout": 0.4, "early_stopping_patience": 2, "early_stopping_tolerance": 0.01, "enc0_layers": "auto", "enc0_max_seq_len": 50, "enc0_network": "pooled_embedding", "enc0_pretrained_embedding_file": "", "enc0_token_embedding_dim": 300, "enc0_vocab_size": 267522, "enc1_network": "enc0", "enc_dim": 300, "epochs": 20, "learning_rate": 0.01, "mini_batch_size": 512, "mlp_activation": "relu", "mlp_dim": 512, "mlp_layers": 2, "num_classes": 2, "optimizer": "adam", "output_layer": "softmax", "weight_decay": 0 } hyperparameters['negative_sampling_rate'] = 3 hyperparameters['tied_token_embedding_weight'] = "true" hyperparameters['comparator_list'] = "hadamard" hyperparameters['token_embedding_storage_type'] = 'row_sparse' # get estimator doc2vec = sagemaker.estimator.Estimator(container, role, train_instance_count=1, train_instance_type='ml.p2.xlarge', output_path=output_path, sagemaker_session=sess) # + # set hyperparameters doc2vec.set_hyperparameters(**hyperparameters) # fit estimator with data doc2vec.fit({'train': s3_train, 'validation':s3_valid, 'test':s3_test}) # + # deploy model doc2vec_model = doc2vec.create_model( serializer=json_serializer, deserializer=json_deserializer, content_type='application/json') predictor = doc2vec_model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') # - # ## Apply learned embeddings to document retrieval task # # After training the model, we can use the encoders in Object2Vec to map new articles and sentences into a shared embedding space. Then we evaluate the quality of these embeddings with a downstream document retrieval task. # # In the retrieval task, given a sentence query, the trained algorithm needs to find its best matching document (the ground-truth document is the one that contains it) from a pool of documents, where the pool contains 10,000 other non ground-truth documents. def generate_tokenized_articles_from_single_file(fname, word_dict): for article in get_article_iter_from_file(fname): integer_article = [] for sent in readlines_from_article(article): integer_article += sentence_to_integers(sent, word_dict) yield integer_article # + def read_jsonline(fname): """ Reads jsonline files and returns iterator """ with jsonlines.open(fname) as reader: for line in reader: yield line def send_payload(predictor, payload): return predictor.predict(payload) def write_to_jsonlines(data, fname): with jsonlines.open(fname, 'a') as writer: data = data['predictions'] writer.write_all(data) def eval_and_write(predictor, fname, to_fname, batch_size): if os.path.exists(to_fname): print("Removing exisiting embedding file {}".format(to_fname)) os.remove(to_fname) print("Getting embedding of data in {} and store to {}...".format(fname, to_fname)) test_data_content = list(read_jsonline(fname)) n_test = len(test_data_content) n_batches = math.ceil(n_test / float(batch_size)) start = 0 for idx in range(n_batches): if idx % 10 == 0: print("Inference on the {}-th batch".format(idx+1)) end = (start + batch_size) if (start + batch_size) <= n_test else n_test payload = {'instances': test_data_content[start:end]} data = send_payload(predictor, payload) write_to_jsonlines(data, to_fname) start = end def get_embeddings(predictor, test_data_content, batch_size): n_test = len(test_data_content) n_batches = math.ceil(n_test / float(batch_size)) start = 0 embeddings = [] for idx in range(n_batches): if idx % 10 == 0: print("Inference the {}-th batch".format(idx+1)) end = (start + batch_size) if (start + batch_size) <= n_test else n_test payload = {'instances': test_data_content[start:end]} data = send_payload(predictor, payload) embeddings += data['predictions'] start = end return embeddings # - basedocs_fpath = os.path.join(datadir, 'wikipedia_test_basedocs.txt') test_fpath = '{}_tokenized-nsr{}.jsonl'.format('test10k', test_nsr) eval_basedocs = 'test_basedocs_tokenized_in0.jsonl' basedocs_emb = 'test_basedocs_embeddings.jsonl' sent_doc_emb = 'test10k_embeddings_pairs.jsonl' import jsonlines import numpy as np basedocs_emb = 'test_basedocs_embeddings.jsonl' sent_doc_emb = 'test10k_embeddings_pairs.jsonl' # + batch_size = 100 # tokenize basedocs with jsonlines.open(eval_basedocs, 'w') as writer: for data in generate_tokenized_articles_from_single_file(basedocs_fpath, w_dict): writer.write({'in0': data}) # get basedocs embedding eval_and_write(predictor, eval_basedocs, basedocs_emb, batch_size) # get embeddings for sentence and ground-truth article pairs sentences = [] gt_articles = [] for data in read_jsonline(test_fpath): if data['label'] == 1: sentences.append({'in0': data['in0']}) gt_articles.append({'in0': data['in1']}) sent_emb = get_embeddings(predictor, sentences, batch_size) doc_emb = get_embeddings(predictor, gt_articles, batch_size) with jsonlines.open(sent_doc_emb, 'w') as writer: for (sent, doc) in zip(sent_emb, doc_emb): writer.write({'sent': sent['embeddings'], 'doc': doc['embeddings']}) # - del w_dict del sent_emb, doc_emb # The blocks below evaluate the performance of Object2Vec model on the document retrieval task. # # We use two metrics hits@k and mean rank to evaluate the retrieval performance. Note that the ground-truth documents in the pool have the query sentence removed from them -- else the task would have been trivial. # # * hits@k: It calculates the fraction of queries where its best-matching (ground-truth) document is contained in top k retrieved documents by the algorithm. # * mean rank: It is the average rank of the best-matching documents, as determined by the algorithm, over all queries. # + # Construct normalized basedocs, sentences, and ground-truth docs embedding matrix basedocs = [] with jsonlines.open(basedocs_emb) as reader: for line in reader: basedocs.append(np.array(line['embeddings'])) sent_embs = [] gt_doc_embs = [] with jsonlines.open(sent_doc_emb) as reader2: for line2 in reader2: sent_embs.append(line2['sent']) gt_doc_embs.append(line2['doc']) basedocs_emb_mat = normalize(np.array(basedocs).T, axis=0) sent_emb_mat = normalize(np.array(sent_embs), axis=1) gt_emb_mat = normalize(np.array(gt_doc_embs).T, axis=0) # - def get_chunk_query_rank(sent_emb_mat, basedocs_emb_mat, gt_emb_mat, largest_k): # this is a memory-consuming step if chunk is large dot_with_basedocs = np.matmul(sent_emb_mat, basedocs_emb_mat) dot_with_gt = np.diag(np.matmul(sent_emb_mat, gt_emb_mat)) final_ranking_scores = np.insert(dot_with_basedocs, 0, dot_with_gt, axis=1) query_rankings = list() largest_k_list = list() for row in final_ranking_scores: ranking_ind = np.argsort(row) # sorts row in increasing order of similarity score num_scores = len(ranking_ind) query_rankings.append(num_scores-list(ranking_ind).index(0)) largest_k_list.append(np.array(ranking_ind[-largest_k:]).astype(int)) return query_rankings, largest_k_list # `Note: We evaluate the learned embeddings on chunks of test sentences-document pairs to save run-time memory; this is to make sure that our code works on the smallest notebook instance *ml.t2.medium*. If you have a larger notebook instance, you can increase the chunk_size to speed up evaluation. For instances larger than ml.t2.xlarge, you can set chunk_size = num_test_samples` # + chunk_size = 1000 num_test_samples = len(sent_embs) assert num_test_samples%chunk_size == 0, "Chunk_size must be divisible by {}".format(num_test_samples) num_chunks = int(num_test_samples / chunk_size) k_list = [1, 5, 10, 20, 50] largest_k = max(k_list) query_all_rankings = list() all_largest_k_list = list() for i in range(0, num_chunks*chunk_size, chunk_size): print("Evaluating on the {}-th chunk".format(i)) j = i+chunk_size sent_emb_submat = sent_emb_mat[i:j, :] gt_emb_submat = gt_emb_mat[:, i:j] query_rankings, largest_k_list = get_chunk_query_rank(sent_emb_submat, basedocs_emb_mat, gt_emb_submat, largest_k) query_all_rankings += query_rankings all_largest_k_list.append(np.array(largest_k_list).astype(int)) all_largest_k_mat = np.concatenate(all_largest_k_list, axis=0).astype(int) print("Summary:") print("Mean query ranks is {}".format(np.mean(query_all_rankings))) print("Percentiles of query ranks is 50%:{}, 80%:{}, 90%:{}, 99%:{}".format(*np.percentile(query_all_rankings, [50, 80, 90, 99]))) for k in k_list: top_k_mat = all_largest_k_mat[:, -k:] unique, counts = np.unique(top_k_mat, return_counts=True) print("The hits at {} score is {}/{}".format(k, counts[0], len(top_k_mat))) # - # ### Comparison with the StarSpace algorithm # # We compare the performance of Object2Vec with the StarSpace (https://github.com/facebookresearch/StarSpace) algorithm on the document retrieval evaluation task, using a set of 250 thousand Wikipedia documents. The experimental results displayed in the table below, show that Object2Vec significantly outperforms StarSpace on all metrics although both models use the same kind of encoders for sentences and documents. # # # | Algorithm | hits@1 | hits@10 | hits@20 | mean rank | # | :------------- | :----------: | :-----------:| :----------: | ----------: | # | StarSpace | 21.98% | 42.77% | 50.55% | 303.34 | # | Object2Vec | 26.40% | 47.42% | 53.83% | 248.67 | # predictor.delete_endpoint()
introduction_to_applying_machine_learning/object2vec_document_embedding/object2vec_document_embedding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Gradient descent algorithm for Scenario 2 # # # In this part, we implement an gradient descent algorithm to optimization the objective loss function in Scenario 2: # # # $$\min F := \min \frac{1}{2(n-i)} \sum_{i=1000}^n (fbpredic(i) + a*tby(i) +b*ffr(i) + c*fta(i) - asp(i))^2$$ # # Gradient descent: # # $$ \beta_k = \beta_{k-1} + \delta* \nabla F, $$ # where $\delta$ control how far does each iteration go. # # # ### Detailed plan # # First, split the data as train and test with 80% and 20% respectively. For the training part, we need prophet() predicted price, there are a couple of issues. One is prophet() can not predict too far in the future. The other is we can not call prophet() too many times, this takes a lot of time. So we will use a sliding window strategy: # # 1, Split the train data as train_1 and train_2, where train_1 is used as a sliding window to fit prophet(), and give predictions in train_2. Train_2 is used train the model we proposed above. # # 2, After we got full size (size of train_2) predictions from prophet(), then we use gradient descent to fit the above model, extracting the coefficients of features to make predicution in the testing data. # # + import pandas as pd import numpy as np from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn.preprocessing import FunctionTransformer from numpy import meshgrid ## For plotting import matplotlib.pyplot as plt from matplotlib import style import datetime as dt import seaborn as sns sns.set_style("whitegrid") # - df= pd.read_csv('df7.csv', parse_dates=['Date']) df = df.rename(columns = {"Date":"ds","Close":"y"}) df # len(df) df.columns # + from datetime import datetime p = 0.9 # Train around 90% of dataset cutoff = int((p*len(df)//100)*100) df_train = df[:cutoff].copy() df_test = df.drop(df_train.index).copy() print(df_train, df_test) # - # Use prophet() to make predictions, we will split training as train_1 and train_2 with ratio 40% vs 60%, # train_1 will be used to fit prophet(), then predict on train_2. Getting the predictions, feed the data into # the Scenario 2 model, train again to get the parameters a,b,c,.... # + #prophet part from fbprophet import Prophet start = 1000 # 1000 # the number of initial data for training pred_size =100 # predicted periods num_winds = int((df_train.shape[0]-start)/pred_size) #(4000-3000)/100 =30 pro_pred = [] # use accumulated data to predict the next pred_size data for i in range(num_winds): tmp_train = df_train.iloc[: start+ i*pred_size].copy() fbp = Prophet(daily_seasonality=True) # fit close price using fbprophet model fbp.fit(tmp_train[['ds','y']]) # predict pred_size futures and get the forecast price fut = fbp.make_future_dataframe(periods = pred_size,) tmp_forecast = fbp.predict(fut) # only require the forcast on test data of temporary training data pred = tmp_forecast[start+ i*pred_size:].yhat pro_pred.append(pred) # - pro_pred flat_pro_pred = [item for l1 in pro_pred for item in l1] df.columns df= pd.read_csv('df7.csv', parse_dates=['Date']) df = df.rename(columns = {"Date":"ds","Close":"y"}) # + df['tby_sqsq'] = df['tby']**2 # df['eps_sqrt'] = np.sqrt(df['eps']) df['une_div_vix'] =df['une'] * df['div'] * df['vix'] df = df.drop(columns=['tby','ffr', 'div', 'une','vix']) # - df.columns possible_features = ['fta', 'eps', 'tby_sqsq', 'une_div_vix'] df_train = df[:cutoff].copy() df_test = df[cutoff:].copy() from sklearn.linear_model import LinearRegression reg = LinearRegression(fit_intercept=False, normalize=True, copy_X = True) reg.fit(df_train[start:cutoff][possible_features], df_train[start:cutoff]['y'] - flat_pro_pred) # + coef = [] for i in range(len(possible_features)): coef.append(np.round(reg.coef_[i],5)) print(coef) # + # Forecast the Test Data from fbprophet import Prophet test_time = int((1-p)* len(df)) fbp = Prophet(daily_seasonality=True) fbp.fit(df_train[['ds','y']]) fut = fbp.make_future_dataframe(periods = test_time,) forecast = fbp.predict(fut) pred_test = forecast[cutoff:cutoff+test_time].yhat pred_test = pred_test.ravel() # - len(pred_test) pp_test = pred_test.copy() # predicted price on testing data pp_train = flat_pro_pred.copy() # predicted price on training data for i in range(len(possible_features)): pp_test += coef[i] * df_test[df_test.columns[i+2]][:test_time].ravel() pp_train += coef[i] * df_train[df_train.columns[i+2]][start:].ravel() from sklearn.metrics import mean_squared_error as MSE # MSE for test data # Actual close price: df_test[:test_time].y # Predicted price by prophet: pred_test # Predicted price by tuning mse1 = MSE(df_test[:test_time].y,pred_test) # mse2 = MSE(df_test[:test_time].y, pp_test) print(mse1,mse2) # MSE for train data mse3 = MSE(df_train[start:].y, flat_pro_pred) mse4 = MSE(df_train[start:].y, pp_train) print(mse3,mse4) # + train_pred_yhat = [np.nan for i in range(start)] + flat_pro_pred train_pp_train = [np.nan for i in range(start)] + pp_train.tolist() # - train_date = df_train[['ds']].to_numpy().ravel() train_date fc_train = pd.DataFrame(data={'ds':train_date,'fbsp':train_pred_yhat, 'imsp': train_pp_train}) fc_train m = len(forecast) -cutoff test_pred_yhat = forecast.loc[cutoff:].yhat.copy().to_numpy().ravel() test_date = df_test[['ds']][:m].to_numpy().ravel() fc_test = pd.DataFrame(data={'ds':test_date, 'fbsp':test_pred_yhat, 'imsp': pp_test.tolist() }) fc_test # + plt.figure(figsize=(18,10)) # plot the training data plt.plot(df_train.ds,df_train.y,'b', label = "Training Data") plt.plot(df_train.ds, fc_train.imsp,'g-', label = "Improved Fitted Values") # plot the fit plt.plot(df_train.ds, fc_train.fbsp,'r-', label = "FB Fitted Values") # # plot the forecast plt.plot(df_test[:m].ds, fc_test.fbsp,'r--', label = "FB Forecast") plt.plot(df_test[:m].ds, fc_test.imsp,'g--', label = "Improved Forecast") plt.plot(df_test[:m].ds,df_test[:m].y,'b--', label = "Test Data") plt.legend(fontsize=14) plt.xlabel("Date", fontsize=16) plt.ylabel("SP&500 Close Price", fontsize=16) plt.show() # -
scratch work/Yuqing-Data-Merge/.ipynb_checkpoints/Scenario2-v9-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (master thesis) # language: python # name: masterthesis # --- # # Data on spread of salary # ![image.png](attachment:image.png) import pandas as pd import matplotlib.pyplot as plt import seaborn as sbn import pickle from scipy.interpolate import interp1d df_men_raw = pd.read_csv('..//data//LONS_50_men_quartiles.csv', encoding='latin-1', sep=';', header=None) df_men_raw.columns = ['gender', 'personal_group', 'payment', 'sectors', 'year', 'age', 'mean', 'lower_quartile', 'median' ,'upper_quartile'] df_women_raw = pd.read_csv('..//data//LONS_50_women_quartiles.csv', encoding='latin-1', sep=';', header=None) df_women_raw.columns = ['gender', 'personal_group', 'payment', 'sectors', 'year', 'age', 'mean', 'lower_quartile', 'median' ,'upper_quartile'] df_men = df_men_raw[['age', 'mean','lower_quartile', 'median','upper_quartile']] df_women = df_women_raw[['age', 'mean','lower_quartile', 'median','upper_quartile']] # + def get_low_age(x): r = x.split('-')[0] return int(r) def get_high_age(x): r = x.split('-')[1] v = r.split()[0] return int(v) df_men['low_age'] = df_men['age'].apply(get_low_age) df_men['high_age'] = df_men['age'].apply(get_high_age) df_men['mean_age'] = (df_men['low_age'] + df_men['high_age']) / 2 df_women['low_age'] = df_women['age'].apply(get_low_age) df_women['high_age'] = df_women['age'].apply(get_high_age) df_women['mean_age'] = (df_women['low_age'] + df_women['high_age']) / 2 # + f, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,4), sharey=True) ax1.fill_between(df_men['mean_age'], df_men['lower_quartile'], df_men['upper_quartile'], alpha=0.3, label='upper/lower quartiles') ax1.plot(df_men['mean_age'], df_men['mean'], label='mean') ax1.plot(df_men['mean_age'], df_men['median'], ls='--', color='black', label='median') ax1.legend() ax1.set_title('Men Wage Profiles') ax2.fill_between(df_women['mean_age'], df_women['lower_quartile'], df_women['upper_quartile'], alpha=0.3, label='upper/lower quartiles') ax2.plot(df_women['mean_age'], df_women['mean'], label='mean') ax2.plot(df_women['mean_age'], df_women['median'], ls='--', color='black', label='median') ax2.legend() ax2.set_title('Women Wage Profiles') # - df_men.to_pickle('..//data//wage_quartiles_empirical_men.pkl') df_women.to_pickle('..//data//wage_quartiles_empirical_women.pkl') df_women['median'].mean() df_men['mean'].mean() df_women['mean'].mean() df_men # # Creating wage path men interp1d men_wage, men_age = df_men['mean'], df_men['mean_age'] g = interp1d(men_age, men_wage, kind='quadratic', bounds_error=False, fill_value='extrapolate') f = lambda x: max(120, g(x)) men_wage_hat = [f(i) for i in range(18,70)] # + C=0.6 _f, ax = plt.subplots(1,1, figsize=(C*14,C*8)) ax.plot(range(18,70), men_wage_hat, label='Interpolated men wage') ax.scatter(men_age, men_wage, marker='+', s=200, color='black', label='Empirical men wage') ax.set_ylabel('wage DKK') ax.set_xlabel('age') ax.legend() _f.savefig('..//figures//interpolated_men_wage') # - # so i can subset py index men_wage_hat_long = [f(i) for i in range(0,90)] plt.plot(men_wage_hat_long) with open('..//data//men_wage_path.pkl', 'wb') as f: pickle.dump(men_wage_hat_long, f)
src/cleaning_salary_distribution_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # `cmip6_preprocessing` demo for AWS webinar # # I am running this from the Pangeo deployment on AWS (more infos [here](https://pangeo.io/cloud.html#)). import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = 12, 6 # %config InlineBackend.figure_format = 'retina' # ## Request Dask Cluster for parallel processing of the data # # This notebook server does not have enough cores to efficiently work with the data, so lets get a dask cluster set up first: # + from dask_gateway import GatewayCluster from distributed import Client cluster = GatewayCluster() cluster.scale(30) client = Client(cluster) client # - # ## Load and clean the data # # For this example we use a catalog of CMIP6 zarr files, maintained by the Pangeo Project, and hosted publicly on S3. For more info on the pangeo CMIP6 data click [here](https://pangeo-data.github.io/pangeo-cmip6-cloud/). # # This example uses the custom intake-esm catalog provided, but all functions shown here can be applied to an xarray dataset directly import intake url = "https://cmip6-pds.s3.amazonaws.com/pangeo-cmip6.json" col = intake.open_esm_datastore(url) # + # This function is the 'all-in-one' cleaning component of cmip6_preprocessing from cmip6_preprocessing.preprocessing import combined_preprocessing selected_models = ["IPSL-CM6A-LR", "ACCESS-ESM1-5", 'GFDL-ESM4', 'CESM2', 'MPI-ESM1-2-LR'] query = dict( experiment_id=["historical", "ssp585"], source_id=selected_models, ) kwargs = dict( zarr_kwargs={"consolidated": True, "use_cftime": True}, preprocess=combined_preprocessing, # This is the only modification needed aggregate=False, storage_options={'anon':True}, ) # load two dataset dictionaries: One for the surface temperature and another # for the horizontal grid area dset_dict = col.search( variable_id="tos", member_id=["r4i1p1f1", "r3i1p1f1", "r5i1p1f1","r2i1p1f1","r1i1p1f1"], table_id="Omon", **query ).to_dataset_dict(**kwargs) metric_dict = col.search( variable_id="areacello", **query ).to_dataset_dict(**kwargs) # - # ## Postprocessing - Combining datasets for final analysis # # Now we will add the metrics (horizontal cell area) and concatenate the members of each model into # + from cmip6_preprocessing.postprocessing import match_metrics, concat_members dset_dict_w_metrics = match_metrics(dset_dict, metric_dict, ['areacello']) dset_dict_combined = concat_members(dset_dict_w_metrics) # - dset_dict_combined['CESM2.gn.historical.Omon'] # + import matplotlib.pyplot as plt color_dict = {k:f"C{ki}" for ki, k in enumerate(selected_models)} plt.figure(figsize=[8,4]) for ni, (name, ds) in enumerate(dset_dict_combined.items()): ds = ds.sel(time=slice('1850','2100')) # Weighted average of surface ocean temperatures sst = ds.tos.weighted(ds.areacello.fillna(0)).mean(['x','y']) # average over 2 years sst = sst.coarsen(time=12).mean() ### Plotting ### color = color_dict[ds.source_id] # plot single members sst.plot(hue='member_id', color=color, label=name, add_legend=False, alpha=0.25) # plot member average sst.mean('member_id').plot(linewidth=2, color=color,add_legend=False) plt.ylabel('Global Average Sea Surface Temperature') # -
AWS_webinar_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #slicing import numpy as np arr = np.arange(10) s = arr[0:9:2] #s = slice (0,9,2) print(s) #array slicing import numpy as np arr = np.array([[1,2,4], [3,5,4], [8,6,7]]) print (arr[1:]) print("our array is") print(arr) print('\n') print("when used elipsis") print(arr[1,...]) #elipsis print('\n') # + import numpy as np a = np.array([[11,22],[33,44 ],[55,66]]) x, y = a[:, :-1], a[:, -1] print("x") print(x) print("y") print(y) # -
slicing_and_indexing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ! pip install -e "d:/code/sdk-cli-v2/src/azure-ml" subscription_id = '5f08d643-1910-4a38-a7c7-84a39d4f42e0' resource_group = 'sdk_vnext_cli' workspace = 'sdk_vnext_cli' # %env AZUREML_DEV_URL_MFE=http://localhost:65535/mferp/managementfrontend # + from azure.ml import MLClient # ml_client = MLClient(subscription_id, resource_group) ml_client = MLClient(subscription_id, resource_group, default_workspace_name=workspace) # - ml_client.endpoints.create(file="../yaml/batch_endpoint.yaml") ml_client.endpoints.get(name="myBatchEndpoint", type="batch").properties.__dict__
sdk2-src/samples/curated_sdk/batch_endpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TFF/Anthos Hello World # # Before running the notebook make sure that you have obtained access credentials to GCP by executing # # ``` # gcloud auth login # ``` # # from the JupyterLab terminal # + import collections import time import numpy as np import grpc import sys import tensorflow as tf import tensorflow_federated as tff import nest_asyncio nest_asyncio.apply() # - # ### Define federated computation # # Note that we need the explicit `tf.function` to make `tf.print` work. # + @tff.tf_computation(tf.int64) @tf.function def hello(n): tf.print("Hello: ", n) return n @tff.federated_computation(tff.FederatedType(tf.int64, tff.CLIENTS)) def compute_federated_sum(federated_n): return tff.federated_sum(tff.federated_map(hello, federated_n)) # - # ### Run computation locally tff.backends.native.set_local_execution_context() compute_federated_sum([1, 2, 1]) # ### Run computation on the executor in the `server` cluster # + port = 8000 ip_address = '10.108.9.176' # Cluster IP channels = [grpc.insecure_channel(f'{ip_address}:{port}')] tff.backends.native.set_remote_execution_context(channels, rpc_mode='REQUEST_REPLY') # - # The first call may hang. Interrupt and execute it again. compute_federated_sum([1]) # #### Double check that the computation executed on the remote executor in the server cluster # # Get cluster credentials. # + server_cluster_name = 'tff-server' zone = 'us-west1-a' # !gcloud container clusters get-credentials {server_cluster_name} --zone {zone} # - # List pods # + namespace = 'tff' # pods = !kubectl get pods -n {namespace} --no-headers -o custom-columns=":metadata.name" remote_executor_pod = [pod for pod in pods if pod.startswith('remote')][0] remote_executor_pod # - # Retrieve logs # logs = !kubectl logs {remote_executor_pod} logs[-3:-1] # ### Run computation on the client executors # + port = 8000 ip_addresses = ['172.16.58.3', '192.168.127.12'] # LoadBalancer IPs channels = [grpc.insecure_channel(f'{ip_address}:{port}') for ip_address in ip_addresses] tff.backends.native.set_remote_execution_context(channels, rpc_mode='REQUEST_REPLY') # - # The first call may hang. Interrupt and execute it again. compute_federated_sum([1, 2]) # #### Double check that the computation executed on the remote executor on one of the client cluster # # Get cluster credentials. # + client_cluster_name = 'tff-client-1' zone = 'us-west1-a' # !gcloud container clusters get-credentials {client_cluster_name} --zone {zone} # - # List pods # + namespace = 'tff' # pods = !kubectl get pods -n {namespace} --no-headers -o custom-columns=":metadata.name" remote_executor_pod = [pod for pod in pods if pod.startswith('remote')][0] remote_executor_pod # - # Retrieve logs # logs = !kubectl logs {remote_executor_pod} logs[-3:-1]
notebooks/hello-world.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # Define data directories # # %cd C:\Users\Natallia\PycharmProjects\spam-classifier import os SPAM = "spam" NON_SPAM = "non_spam" EMAILS_DIR = os.path.join("training_emails") EMAILS_SPAM_DIR = os.path.join(EMAILS_DIR, SPAM) EMAILS_NON_SPAM_DIR = os.path.join(EMAILS_DIR, NON_SPAM) # + # Helper to fetch training emails # Skip if the emails are already downloaded import email.policy import tarfile from six.moves import urllib HARD_HAM="hard_ham" EASY_HAM="easy_ham" CORPUS_VERSION="20030228_" DOWNLOAD_ROOT = "http://spamassassin.apache.org/old/publiccorpus/" SPAM_URL = DOWNLOAD_ROOT + CORPUS_VERSION + "spam.tar.bz2" NON_SPAM_URL = DOWNLOAD_ROOT + CORPUS_VERSION + EASY_HAM + ".tar.bz2" def fetch_spam_data(data_path=EMAILS_DIR): if not os.path.isdir(data_path): os.makedirs(data_path) for filename, url in (("ham.tar.bz2", NON_SPAM_URL), ("spam.tar.bz2", SPAM_URL)): path = os.path.join(data_path, filename) if not os.path.isfile(path): urllib.request.urlretrieve(url, path) tar_bz2_file = tarfile.open(path) tar_bz2_file.extractall(data_path) tar_bz2_file.close() os.remove(os.path.join(data_path,"ham.tar.bz2")) os.remove(os.path.join(data_path,"spam.tar.bz2")) os.rename(os.path.join(data_path,EASY_HAM), os.path.join(EMAILS_DIR,"non_spam")) fetch_spam_data() # + # load email test set import email.parser import email.policy import os import re # remove index files # TODO test for dir in [EMAILS_SPAM_DIR, EMAILS_NON_SPAM_DIR]: index_file = os.path.join(dir, "cmds") if os.path.isfile(index_file): os.remove(index_file) spam_fileNames = [name for name in os.listdir(EMAILS_SPAM_DIR)] non_spam_fileNames = [name for name in os.listdir(EMAILS_NON_SPAM_DIR)] # Load emails (text) from the predefined folders def load_email(is_spam, filename, file_path = EMAILS_DIR): directory = SPAM if is_spam else NON_SPAM with open(os.path.join(file_path, directory, filename), "rb") as f: return email.parser.BytesParser(policy=email.policy.default).parse(f) spam_emails = [load_email(is_spam=True, filename=name) for name in spam_fileNames] non_spam_emails = [load_email(is_spam=False, filename=name) for name in non_spam_fileNames] # + # extract text from test emails from html import unescape import numpy as np from sklearn.model_selection import train_test_split def html_to_plain_text(html): text = re.sub('<head.*?>.*?</head>', '', html, flags=re.M | re.S | re.I) text = re.sub('<a\\s.*?>', ' HYPERLINK ', text, flags=re.M | re.S | re.I) text = re.sub('<.*?>', '', text, flags=re.M | re.S) text = re.sub(r'(\s*\n)+', '\n', text, flags=re.M | re.S) return unescape(text) def email_to_text(email): html = None for part in email.walk(): c_type = part.get_content_type() if c_type not in ("text/plain", "text/html"): continue try: content = part.get_content() except: # in case of encoding issues content = str(part.get_payload()) if c_type == "text/plain": return content else: html = content if html: return html_to_plain_text(html) spam_data = [email_to_text(x) for x in spam_emails] non_spam_data = [email_to_text(x) for x in non_spam_emails] # leave first 500 files if the entire data set cannot be manipulated in memory (train_test_split raises memory error) # non_spam_data = non_spam_data[:500] X = np.array(non_spam_data + spam_data) y = np.array([0] * len(non_spam_data) + [1] * len(spam_data)) # 0-ham & 1-spam X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # + # save test email text in a folder with expected structure DATA_DIR = os.path.join("training_data") DATA_SPAM_DIR = os.path.join(DATA_DIR, SPAM) DATA_NON_SPAM_DIR = os.path.join(DATA_DIR, NON_SPAM) for directory in [DATA_DIR, DATA_SPAM_DIR, DATA_NON_SPAM_DIR]: if not os.path.exists(directory): os.makedirs(directory) # save preprocessed email text def save_email_text(is_spam, filename, email, spam_path = DATA_DIR): directory = SPAM if is_spam else NON_SPAM print(email, file=open(os.path.join(spam_path, directory, filename), 'w', encoding='utf-8')) assert len(spam_fileNames) == len(spam_data) assert len(non_spam_fileNames) == len(non_spam_data) for i, text in enumerate(spam_data): save_email_text(is_spam=True, filename=spam_fileNames[i], email=text) for i, text in enumerate(non_spam_data, start=0): save_email_text(is_spam=False, filename=non_spam_fileNames[i], email=text) # -
prepare_data.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.2 # language: julia # name: julia-1.5 # --- # # Hyperelasticity Example # dependencies using LFAToolkit using LinearAlgebra using Pkg Pkg.activate("./") Pkg.instantiate() using Plots # + code_folding=[20, 49] # setup mesh = Mesh3D(1.0, 1.0, 1.0) finep = 2 coarsep = 1 numbercomponents = 3 dimension = 3 finebasis = TensorH1LagrangeBasis(finep + 1, finep + 1, numbercomponents, dimension) coarsebasis = TensorH1LagrangeBasis(coarsep + 1, finep + 1, numbercomponents, dimension) ctofbasis = TensorH1LagrangeBasis(coarsep + 1, finep + 1, numbercomponents, dimension, lagrangequadrature = true) # constants E = 1E6 # Young's modulus ν = 0.3 # Poisson's ratio K = E/(3*(1 - 2*ν)) # bulk modulus λ = E*ν/((1 + ν)*(1 - 2*ν)) # Lamé parameters μ = E/(2*(1 + ν)) # state gradu = [1; 2; 3]*ones(1, 3); function neohookeanweakform(deltadu::Array{Float64}, w::Array{Float64}) # dP = dF S + F dS # deformation gradient F = gradu + I J = det(F) # Green-Lagrange strain tensor E = (gradu*gradu' + gradu'*gradu)/2 # right Cauchy-Green tensor C = 2*E + I C_inv = C^-1 # second Piola-Kirchhoff S = λ*log(J)*C_inv + 2*μ*C_inv*E # delta du deltadu = deltadu' # dF dF = deltadu + I # deltaE deltaE = (deltadu*deltadu' + deltadu'*deltadu)/2 # dS dS = λ*sum(C_inv.*deltaE)*C_inv + 2*(μ - λ*log(J))*C_inv*deltaE*C_inv # dP dP = (dF*S + F*dS) * w[1] return [dP'] end # linearized Neo-Hookean operators function makeoperator(basis::TensorBasis) inputs = [ OperatorField(basis, [EvaluationMode.gradient], "gradent of deformation"), OperatorField(basis, [EvaluationMode.quadratureweights], "quadrature weights"), ] outputs = [ OperatorField( basis, [EvaluationMode.gradient], "test function gradient of deformation", ), ] return Operator(neohookeanweakform, mesh, inputs, outputs) end fineoperator = makeoperator(finebasis) coarseoperator = makeoperator(coarsebasis) # Chebyshev smoother chebyshev = Chebyshev(fineoperator) # p-multigrid preconditioner multigrid = PMultigrid(fineoperator, coarseoperator, chebyshev, [ctofbasis, ctofbasis, ctofbasis])
examples/jupyter/demo311_hyperelasticity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" # # MixUp augmentation for image classification # # **Author:** [<NAME>](https://twitter.com/RisingSayak)<br> # **Date created:** 2021/03/06<br> # **Last modified:** 2021/03/06<br> # **Description:** Data augmentation using the mixup technique for image classification. # + [markdown] colab_type="text" # ## Introduction # + [markdown] colab_type="text" # _mixup_ is a *domain-agnostic* data augmentation technique proposed in [mixup: Beyond Empirical Risk Minimization](https://arxiv.org/abs/1710.09412) # by Zhang et al. It's implemented with the following formulas: # # ![](https://i.ibb.co/DRyHYww/image.png) # # (Note that the lambda values are values with the [0, 1] range and are sampled from the # [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution).) # # The technique is quite systematically named - we are literally mixing up the features and # their corresponding labels. Implementation-wise it's simple. Neural networks are prone # to [memorizing corrupt labels](https://arxiv.org/abs/1611.03530). mixup relaxes this by # combining different features with one another (same happens for the labels too) so that # a network does not get overconfident about the relationship between the features and # their labels. # # mixup is specifically useful when we are not sure about selecting a set of augmentation # transforms for a given dataset, medical imaging datasets, for example. mixup can be # extended to a variety of data modalities such as computer vision, naturallanguage # processing, speech, and so on. # # This example requires TensorFlow 2.4 or higher. # + [markdown] colab_type="text" # ## Setup # + colab_type="code" import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.keras import layers # + [markdown] colab_type="text" # ## Prepare the dataset # # In this example, we will be using the [FashionMNIST](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/) dataset. But this same recipe can # be used for other classification datasets as well. # + colab_type="code" (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() x_train = x_train.astype("float32") / 255.0 x_train = np.reshape(x_train, (-1, 28, 28, 1)) y_train = tf.one_hot(y_train, 10) x_test = x_test.astype("float32") / 255.0 x_test = np.reshape(x_test, (-1, 28, 28, 1)) y_test = tf.one_hot(y_test, 10) # + [markdown] colab_type="text" # ## Define hyperparameters # + colab_type="code" AUTO = tf.data.AUTOTUNE BATCH_SIZE = 64 EPOCHS = 10 # + [markdown] colab_type="text" # ## Convert the data into TensorFlow `Dataset` objects # + colab_type="code" # Put aside a few samples to create our validation set val_samples = 2000 x_val, y_val = x_train[:val_samples], y_train[:val_samples] new_x_train, new_y_train = x_train[val_samples:], y_train[val_samples:] train_ds_one = ( tf.data.Dataset.from_tensor_slices((new_x_train, new_y_train)) .shuffle(BATCH_SIZE * 100) .batch(BATCH_SIZE) ) train_ds_two = ( tf.data.Dataset.from_tensor_slices((new_x_train, new_y_train)) .shuffle(BATCH_SIZE * 100) .batch(BATCH_SIZE) ) # Because we will be mixing up the images and their corresponding labels, we will be # combining two shuffled datasets from the same training data. train_ds = tf.data.Dataset.zip((train_ds_one, train_ds_two)) val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(BATCH_SIZE) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(BATCH_SIZE) # + [markdown] colab_type="text" # ## Define the mixup technique function # # To perform the mixup routine, we create new virtual datasets using the training data from # the same dataset, and apply a lambda value within the [0, 1] range sampled from a [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution) # — such that, for example, `new_x = lambda * x1 + (1 - lambda) * x2` (where # `x1` and `x2` are images) and the same equation is applied to the labels as well. # + colab_type="code" def sample_beta_distribution(size, concentration_0=alpha, concentration_1=alpha): gamma_1_sample = tf.random.gamma(shape=[size], alpha=concentration_1) gamma_2_sample = tf.random.gamma(shape=[size], alpha=concentration_0) return gamma_1_sample / (gamma_1_sample + gamma_2_sample) def mix_up(ds_one, ds_two, alpha=0.2): # Unpack two datasets images_one, labels_one = ds_one images_two, labels_two = ds_two batch_size = tf.shape(images_one)[0] # Sample lambda and reshape it to do the mixup l = sample_beta_distribution(batch_size, alpha, alpha) x_l = tf.reshape(l, (batch_size, 1, 1, 1)) y_l = tf.reshape(l, (batch_size, 1)) # Perform mixup on both images and labels by combining a pair of images/labels # (one from each dataset) into one image/label images = images_one * x_l + images_two * (1 - x_l) labels = labels_one * y_l + labels_two * (1 - y_l) return (images, labels) # + [markdown] colab_type="text" # **Note** that here , we are combining two images to create a single one. Theoretically, # we can combine as many we want but that comes at an increased computation cost. In # certain cases, it may not help improve the performance as well. # + [markdown] colab_type="text" # ## Visualize the new augmented dataset # + colab_type="code" # First create the new dataset using our `mix_up` utility train_ds_mu = train_ds.map( lambda ds_one, ds_two: mix_up(ds_one, ds_two, alpha=0.2), num_parallel_calls=AUTO ) # Let's preview 9 samples from the dataset sample_images, sample_labels = next(iter(train_ds_mu)) plt.figure(figsize=(10, 10)) for i, (image, label) in enumerate(zip(sample_images[:9], sample_labels[:9])): ax = plt.subplot(3, 3, i + 1) plt.imshow(image.numpy().squeeze()) print(label.numpy().tolist()) plt.axis("off") # + [markdown] colab_type="text" # ## Model building # + colab_type="code" def get_training_model(): model = tf.keras.Sequential( [ layers.Conv2D(16, (5, 5), activation="relu", input_shape=(28, 28, 1)), layers.MaxPooling2D(pool_size=(2, 2)), layers.Conv2D(32, (5, 5), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Dropout(0.2), layers.GlobalAvgPool2D(), layers.Dense(128, activation="relu"), layers.Dense(10, activation="softmax"), ] ) return model # + [markdown] colab_type="text" # For the sake of reproducibility, we serialize the initial random weights of our shallow # network. # + colab_type="code" initial_model = get_training_model() initial_model.save_weights("initial_weights.h5") # + [markdown] colab_type="text" # ## 1. Train the model with the mixed up dataset # + colab_type="code" model = get_training_model() model.load_weights("initial_weights.h5") model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(train_ds_mu, validation_data=val_ds, epochs=EPOCHS) _, test_acc = model.evaluate(test_ds) print("Test accuracy: {:.2f}%".format(test_acc * 100)) # + [markdown] colab_type="text" # ## 2. Train the model *without* the mixed up dataset # + colab_type="code" model = get_training_model() model.load_weights("initial_weights.h5") model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) # Notice that we are NOT using the mixed up dataset here model.fit(train_ds_one, validation_data=val_ds, epochs=EPOCHS) _, test_acc = model.evaluate(test_ds) print("Test accuracy: {:.2f}%".format(test_acc * 100)) # + [markdown] colab_type="text" # Readers are encouraged to try out mixup on different datasets from different domains and # experiment with the lambda parameter. You are strongly advised to check out the # [original paper](https://arxiv.org/abs/1710.09412) as well - the authors present several ablation studies on mixup # showing how it can improve generalization, as well as show their results of combining # more than two images to create a single one. # + [markdown] colab_type="text" # ## Notes # # * With mixup, you can create synthetic examples — especially when you lack a large # dataset - without incurring high computational costs. # * [Label smoothing](https://www.pyimagesearch.com/2019/12/30/label-smoothing-with-keras-tensorflow-and-deep-learning/) and mixup usually do not work well together because label smoothing # already modifies the hard labels by some factor. # * mixup does not work well when you are using [Supervised Contrastive # Learning](https://arxiv.org/abs/2004.11362) (SCL) since SCL expects the true labels # during its pre-training phase. # * A few other benefits of mixup include (as described in the [paper](https://arxiv.org/abs/1710.09412)) robustness to # adversarial examples and stabilized GAN (Generative Adversarial Networks) training. # * There are a number of data augmentation techniques that extend mixup such as # [CutMix](https://arxiv.org/abs/1905.04899) and [AugMix](https://arxiv.org/abs/1912.02781).
examples/vision/ipynb/mixup.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # International CO2 Emissiions # # By Utplakshi-19BCE1597 and <NAME>-19BCE1593 # # ## Attaching all libraries needed` library(ggplot2) library(dplyr) library(ggrepel) library(ggthemes) library(RColorBrewer) library(skimr) library(psych) library(janitor) # ## We will be using two datasets namesly greenhouse_gas_inventory dataset and CO2Emission_LifeExp.csv. We will only be using the first dataset for data exploration. The second datset will be used for data exploration, analysis and visualization # ### Loading and opening the dataset, The dataset df contains 8406 entries. Classifying value fo co2 emmissions for each country from # ### 1990 to 2014. df <- read.csv(file = 'E:/Datasets/greenhouse_gas_inventory_data_data.csv', sep = ',') head(df) # ### Loading another dataset callled df1, This dataset contains Country with CO2 emmission and their population and life expectancy. # ### We will use this dataset to determine whether high population contributes to high polution df1<- read.csv(file = 'E:/Datasets/CO2Emission_LifeExp.csv', sep = ',') head(df1) # # Data Cleaning # ### Cleaning the values under 'Category' into shorter values: Data Cleaning # ### Before Data Cleaning: head(df$category) # ### Checking for unique categories in df. We can see the names of these categories are long so we need to shorten them. unique(df['category']) skim(df) # ### To shorten the values of the column we will be using transmute funtion. It will shorten the values like carbon_dioxide_co2_emissions_without_land_use_land_use_change_and_forestry_lulucf_in_kilotonne_co2_equivalent to'CO2' and so on... #Data Cleaning: dataset <- df %>% transmute(category=recode(category, carbon_dioxide_co2_emissions_without_land_use_land_use_change_and_forestry_lulucf_in_kilotonne_co2_equivalent='CO2', greenhouse_gas_ghgs_emissions_including_indirect_co2_without_lulucf_in_kilotonne_co2_equivalent='GHG-indirect-CO2', greenhouse_gas_ghgs_emissions_without_land_use_land_use_change_and_forestry_lulucf_in_kilotonne_co2_equivalent='GHG', hydrofluorocarbons_hfcs_emissions_in_kilotonne_co2_equivalent='HFC', methane_ch4_emissions_without_land_use_land_use_change_and_forestry_lulucf_in_kilotonne_co2_equivalent='CH4', nitrogen_trifluoride_nf3_emissions_in_kilotonne_co2_equivalent='HF3', nitrous_oxide_n2o_emissions_without_land_use_land_use_change_and_forestry_lulucf_in_kilotonne_co2_equivalent='N2Os', perfluorocarbons_pfcs_emissions_in_kilotonne_co2_equivalent='PFCs', sulphur_hexafluoride_sf6_emissions_in_kilotonne_co2_equivalent='SF6', unspecified_mix_of_hydrofluorocarbons_hfcs_and_perfluorocarbons_pfcs_emissions_in_kilotonne_co2_equivalent='HFC-PFC-mix')) # # For second dataset # ### Renaming the Column "country_or_area" to "country" and removing UN because it is not a country dataset <- df%>% rename(country=country_or_area)%>%filter(country!='European Union') str(df1) skim(df1) # ### To check if values should be dropped or not nrowsdf1=nrow(df1) nrowsdf1 ncompletedf1=sum(complete.cases(df1)) ncompletedf1 ncompletedf1/nrowsdf1 # ### Since the ratio of completed cases and number of rows is 1 we can interpret that there are no missing values in the data # ### The dataframe df1 is complete without any missing values therefore no more data cleaning is required. # ### Understanding Dataframe df: summary(df) max(df$value) print(table(df$country, df$category)) subset(df,year==2014&value>40000) # # Implementing machine learning algorithms. # # 1.KNN # # 2.PCA # # 3.Linear Regression # # Lets divides the data sets into training dataset and test datasets. set.seed(222) df2 <- sample(2, nrow(df1), replace = TRUE, prob = c(0.8, 0.2)) training <- df1[df2==1,] testing <- df1[df2==2,] # ## KNN Machine Learning Algorithm in R pairs.panels(training[,c(3:7)], gap = 0, bg = c("red", "yellow", "blue")[training$Species], pch=21) pc <- prcomp(training[,c(3:7)], center = TRUE, scale. = TRUE) attributes(pc) pc$scale print(pc) # ## Orthogonality of PCs pairs.panels(pc$x, gap=0, bg = c("red", "yellow", "blue")[training$Species], pch=21) # ## Making a linear regression model plot(testing$LifeExpectancy, testing$YearlyChange) plot(testing$CO2Emissions~testing$LifeExpectancy) emission.reg<-lm(CO2Emissions~LifeExpectancy, data=testing) abline(emission.reg,col='red') plot(testing$CO2Emissions~testing$Population) population.reg<-lm(CO2Emissions~Population,data=testing) abline(emission.reg,col='blue') summary(emission.reg) summary(population.reg) emission.reg population.reg # ## Making a correlation matrix and Interpreting results through it. # ### Installing package that we need for this visualization install.packages("corrplot") # ### Attaching the package and making a correlational matrix. # # ### In Correlation matrix -1 indicates a perfectly negative linear correlation between two variables. # ### 0 indicates no linear correlation between two variables. # ### 1 indicates a perfectly positive linear correlation between two variables. library(corrplot) M <- cor(df1[3:7]) corrplot.mixed(M) corrplot(M, method = 'shade', order = 'AOE', diag = FALSE) # ### From the above graph we can see that their is perfectly negative realtionship with yearly change mostly and the componets having a negative relationship are Life Expectancy, PerCapita and Co2 emissions. Where as we can see some components have constant relationship i.e 0 here 0 indicates no linear correlation/constant realtionship between two variables. We can see that population and CO2 Emissions have highly positive realtionship from which we can interpret that When the population Increases the Co2 Emissions also increases # # Using the seriation package we will be attempting Bar-Joseph seration algorithm for fast optimal leaf ordering. That refines output from heirachichal clustering analysis. library(seriation) # Remove the column `country` and `code` (column 1 and 2) s<-df1[3:7] set.seed(123) # Reorder the objects randomly s <- s[sample(seq_len(nrow(s))),] head(s, 2) # Compute dissimilarity matrix dist_result <- dist(s) # Seriate objects, reorder rows based on their similarity object_order <- seriate(dist_result) # Extract object orders head(get_order(object_order), 15) # Visualize the effect of seriation on dissimilarity matrix pimage(dist_result, main = "Random order") pimage(dist_result, order = object_order, main = "Reordered") pimage(scale(s), main = "Random") # Heatmap of the reordered data pimage(scale(s), order = c(object_order, NA), main = "Reordered") install.packages('gplots') # + library(gplots) # Standardize the data df_scaled <- scale(s, center = FALSE) # Produce a heat map with optimally reordered dendrograms # Hierarchical clustering is used to produce dendrograms hmap(df_scaled, margin = c(7, 4), cexCol = 1, labRow = FALSE) # - # Specify seriation method # seriation on the dissimilarity matrices for rows and columns is performed hmap(df_scaled, method = "MDS") # ## Bertin’s permutation matrix # # ## The idea is to reveal a more homogeneous structure in a data matrix by simultaneously rearranging rows and columns. The rearranged matrix is displayed and cases and variables can be grouped manually to gain a better understanding of the data. # + # Replace original values by their ranks g <- head(apply(df1[3:7], 2, rank), 30) # Perform seriation on row and columns row_order <- seriate(dist(g, "minkowski", p = 1), method ="TSP") col_order <- seriate(dist(t(g), "minkowski", p = 1), method ="TSP") orders <- c(row_order, col_order) # Visualization: matrix of bars # Original matrix bertinplot(g) # - # Rearranged matrix bertinplot(g, orders)
International CO2 Emissions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:carnd-term1] # language: python # name: conda-env-carnd-term1-py # --- # + # import libraries here import pickle import os from urllib.request import urlretrieve from zipfile import ZipFile import random import csv import cv2 import numpy as np from keras.models import Sequential from keras.layers import Flatten, Dense, Lambda, Cropping2D, Activation, Dropout from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D print('All libraries imported successfully') # + # downloading data def download(url, file): if not os.path.isfile(file): print('Downloading ' + file + '...') urlretrieve(url, file) print('Download Finished') #Unzip the downloaded file to get pickled data zip = ZipFile('data.zip') zip.extractall() # Downloading the training and test dataset. download('https://d17h27t6h515a5.cloudfront.net/topher/2016/December/584f6edd_data/data.zip', 'data.zip') # Wait until you see that all files have been downloaded. print('All files downloaded.') # + # reading measurement file lines = [] with open('./data/driving_log.csv') as csv_file: reader = csv.reader(csv_file) for line in reader: lines.append(line) print('mesurement file read successful') # + # reading images and measurements from measurement file images = [] measurements = [] for line in lines[1:]: # reading center image r = random.uniform(0,1) if r <= 0.3: source_path = line[1] file_name = source_path.split('/')[-1] current_path = './data/IMG/' + file_name image = cv2.imread(current_path) measurement = float(line[3]) + 0.25 images.append(image) measurements.append(measurement) if r > 0.2 and r < 0.8: source_path = line[0] file_name = source_path.split('/')[-1] current_path = './data/IMG/' + file_name image = cv2.imread(current_path) measurement = float(line[3]) images.append(image) measurements.append(measurement) if r >= 0.7: source_path = line[2] file_name = source_path.split('/')[-1] current_path = './data/IMG/' + file_name image = cv2.imread(current_path) measurement = float(line[3]) - 0.25 images.append(image) measurements.append(measurement) X_train = np.array(images) y_train = np.array(measurements) print('data reading completed.') # + # generating model model = Sequential() # Pre-processing data model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160,320,3))) model.add(Cropping2D(cropping=((70,25),(0,0)))) # Layer 1: convolution layer, input 65 x 320 x 3, output 63 x 318 x 32 #model.add(Convolution2D(24, 5, 5, subsample=(2,2),activation="relu")) model.add(Convolution2D(24, 5, 5, subsample=(2,2), activation="relu")) model.add(Convolution2D(24, 5, 5, activation="relu")) # Layer 2: convolution layer model.add(Convolution2D(36, 5, 5, subsample=(2,2), activation="relu")) # Layer 3: convolution layer model.add(Convolution2D(48, 3, 3, subsample=(2,2), activation="relu")) model.add(Convolution2D(64, 3, 3, activation="relu")) model.add(Convolution2D(80, 3, 3, activation="relu")) model.add(Flatten()) model.add(Dense(100)) model.add(Activation("relu")) #model.add(Dropout(0.3)) model.add(Dense(50)) model.add(Activation("relu")) #model.add(Dropout(0.35)) model.add(Dense(10)) model.add(Activation("relu")) #model.add(Dropout(0.2)) model.add(Dense(1)) model.summary() # + model.compile(loss='mse',optimizer='adam') model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=5) model.save('model1.h5') # -
model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/lucasmoratof/customers_review_project/blob/master/NLP_Customer_Review_ML_Complete.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="gR-woI-YoNGa" colab_type="code" colab={} import pandas as pd import nltk from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression # + id="dAB-SITcoXso" colab_type="code" colab={} reviews = pd.read_csv('https://raw.githubusercontent.com/lucasmoratof/customers_review_project/master/reviews_for_nlp.csv', usecols=['review_comment_message', 'is_good_review']) # + id="L4vAgfRdocYO" colab_type="code" outputId="3ee5b1c0-1f9b-4d19-bed2-3faa1e3c2aca" colab={"base_uri": "https://localhost:8080/", "height": 196} reviews.head() # + [markdown] id="6eBnik4J3hsv" colab_type="text" # I will try some techniques to count the number of characters and words in each review. # + id="1DvIagii3SeY" colab_type="code" colab={} # count the lenght of each review reviews['char_count'] = reviews['review_comment_message'].apply(len) # + id="lGazaA8d4DFZ" colab_type="code" outputId="6d60667d-fc1a-4e81-dc0d-4d5cffb1f866" colab={"base_uri": "https://localhost:8080/", "height": 117} reviews['char_count'].head() # + id="Jj-B5HtM4Fzz" colab_type="code" outputId="fd7bbcf3-d679-4047-d4ce-dee61f4b8be7" colab={"base_uri": "https://localhost:8080/", "height": 33} # average characters in the reviews reviews['char_count'].mean() # + id="IpsOXx714bP_" colab_type="code" outputId="6ca99d44-082c-4cb7-8783-e729ca2a242c" colab={"base_uri": "https://localhost:8080/", "height": 33} # create a function to count the number of words in each comment def count_words(string): words = string.split() return len(words) # applying the funciton to create a new feature reviews['word_count'] = reviews['review_comment_message'].apply(count_words) # finding the average number of words in the reviews print(reviews['word_count'].mean()) # + [markdown] id="8P_0660z_lKf" colab_type="text" # Some text preprocessing techiniques: # # - Convert words into lowercase # - Removing leading and trailing whitespaces # - Removing punctuation # - Removing stopwords # - Expanding contractions # - Removing special characters (numbers, emojis, etc) # # **Tokenization** is the process of converting words into a numerical format, called token. We can also convert sentences and ponctuation into tokens. # # **Lemmatization** is the process of converting word into it's lowercase base form. # + id="Tsc03Yd4AuxK" colab_type="code" outputId="caf81e12-8ba4-4ad2-eaeb-e68c4bf821eb" colab={"base_uri": "https://localhost:8080/", "height": 50} # If you need to download the model (works on google colab) import spacy.cli spacy.cli.download("pt_core_news_sm") # + id="4AE-Ieu8_97N" colab_type="code" colab={} # Load the Portuguese model import spacy nlp = spacy.load("pt_core_news_sm") # + id="a4WTPiJfA3KP" colab_type="code" colab={} doc= nlp(reviews['review_comment_message'][2]) # IMPORTANT, when you pass the strings through nlp(), it performs Lemmatization by default # + id="mdRq_iuACtDG" colab_type="code" colab={} tokens = [token.text for token in doc] lemmas= [token.lemma_ for token in doc] # + id="etQEoE9hFfOu" colab_type="code" outputId="cf9c7a37-f96f-43c5-9516-fd6b0db613b9" colab={"base_uri": "https://localhost:8080/", "height": 70} print(tokens, "\n", lemmas) # + id="lExguy8BFjD3" colab_type="code" colab={} # Stopwords stopwords = spacy.lang.pt.stop_words.STOP_WORDS # + id="CrJp0jwNG_Dj" colab_type="code" outputId="07028041-da87-41c6-bed7-37f9f7ad9047" colab={"base_uri": "https://localhost:8080/", "height": 33} no_stops= [lemma for lemma in lemmas if lemma.isalpha() and lemma not in stopwords] print(' '.join(no_stops)) # + id="0rr0qCFGHVvx" colab_type="code" colab={} # Creating a function that combines tokenization and lemmatization def preprocessing(text): doc= nlp(text) # creates the document lemmas= [token.lemma_ for doc in doc] # extracts the lemmas # time to remove stopwords (remember that we are using the Portuguese version) clean_lemmas= [lemma for lemma in lemmas if lemma.isalpha() and lemma not in stopwords] return ' '.join(clean_lemmas) # + [markdown] id="9L6VC_C1hlO7" colab_type="text" # Part of Speech - POS # It determines the meaning of each word, like proper noun, verb, etc. # + id="0Ai4YBpniKat" colab_type="code" outputId="edb1bb62-8bb7-4796-a8ed-55a009fcc5bc" colab={"base_uri": "https://localhost:8080/", "height": 53} # load the model nlp= spacy.load('pt_core_news_sm') # create the doc doc= nlp(reviews['review_comment_message'][2]) # generate tokens and pos tags pos= [(token.text, token.pos_) for token in doc] print(pos) # + [markdown] id="z-c5T_KXjHMh" colab_type="text" # Below I will create to functions, to count the number of proper nouns and nouns, then, I will apply these function on the data separating good reviews and bad reviews. Finally, I will calculate the mean of PROPN and NOUNS on both groups and compare. # + id="97mLDuWnl9Li" colab_type="code" colab={} # PROPN def proper_nouns(text, model=nlp): # Create doc object doc= model(text) # Generate list of POS tags pos= [token.pos_ for token in doc] return pos.count('PROPN') # NOUN def nouns(text, model=nlp): doc= nlp(text) pos= [token.pos_ for token in doc] return pos.count('NOUN') # + id="s_CnyVvYmnnG" colab_type="code" colab={} # Create two columns, witht the number of nouns and proper nouns reviews['num_propn'] = reviews['review_comment_message'].apply(proper_nouns) reviews['num_noun'] = reviews['review_comment_message'].apply(nouns) # + id="_yhLoWcUm7_Y" colab_type="code" outputId="cc1d8b34-c024-4ee6-9cae-48f21020b46a" colab={"base_uri": "https://localhost:8080/", "height": 50} # computing the mean of proper nouns good_propn= reviews[reviews['is_good_review']== 1]['num_propn'].mean() bad_propn= reviews[reviews['is_good_review']== 0]['num_propn'].mean() # computing the mean of nouns good_noun= reviews[reviews['is_good_review']== 1]['num_noun'].mean() bad_noun= reviews[reviews['is_good_review']== 0]['num_noun'].mean() # print results to compare print("Mean number of proper nouns for good and bad reviews are %.2f and %.2f respectively"%(good_propn, bad_propn)) print("Mean number of nouns for good and bad reviews are %.2f and %.2f respectively"%(good_noun, bad_noun)) # + [markdown] id="AX3gXTlJpfgy" colab_type="text" # Named Entity Recognition # # It classifies named entities into predefined categories, like person, organization, country, etc. # # Uses: # - Efficient search algorithms # - Question answering # - News article classification # - Customer service # + id="qGBKCEYcwD-X" colab_type="code" outputId="6023b772-9485-4192-cda1-f4dc6d05b42c" colab={"base_uri": "https://localhost:8080/", "height": 33} # Let's practice NER nlp= spacy.load('pt_core_news_sm') text= reviews['review_comment_message'][11] doc= nlp(text) # print all named entities: for ent in doc.ents: print(ent.text, ent.label_) # + [markdown] id="LEFxF-z41bvu" colab_type="text" # To find person's names, we can use the following function: # + id="Ez6Jv9BVAiHr" colab_type="code" colab={} def find_persons(text, model=nlp): doc= model(text) persons= [ent.text for ent in doc.ents if ent.label_== 'PERSON'] return persons # + [markdown] id="quSVEyXZBQA0" colab_type="text" # Vectorization # # The process of converting text into vectors, so it can be used in ML # # Bag of Words is a model that do vectorization. It's important to perform text preprocessing as it leads to smaller vocabularies, and reducing the number of dimensions helps improve performance. # # CountVectorizer, from scikit-learn, is the tool used to perform bag of words. # It needs some arguuments to pre-processing text. # + id="XH_3jbeFDYhm" colab_type="code" outputId="14a423a2-7e8d-4ea7-d151-c272b906b9bf" colab={"base_uri": "https://localhost:8080/", "height": 50} from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import train_test_split # Create CountVectorizer object, specifying the arguments to preprocess text stop_words_port= spacy.lang.pt.stop_words.STOP_WORDS vectorizer= CountVectorizer(stop_words=stop_words_port) # Split into training and test sets X_train, X_test, y_train, y_test= train_test_split(reviews['review_comment_message'], reviews['is_good_review'], test_size=0.25, random_state=24) # Generate training Bow vectors X_train_bow= vectorizer.fit_transform(X_train) # Generate test Bow vector X_test_bow= vectorizer.transform(X_test) print(X_train_bow.shape) print(X_test_bow.shape) # + [markdown] id="Nc1LdpyTvteF" colab_type="text" # We will try the Naive Bayes classifier to this problem. # + id="i-m9DPtGv5vH" colab_type="code" outputId="c867ba4c-a4e7-4b6a-9cd3-2774d59fafd3" colab={"base_uri": "https://localhost:8080/", "height": 33} # Import multinomialNB from sklearn.naive_bayes import MultinomialNB # create MultinomialNB object clf= MultinomialNB() # Train clf clf.fit(X_train_bow, y_train) # Compute accuracy on test set accuracy= clf.score(X_test_bow, y_test) print("The accuracy of the classifier is %.3f" % accuracy) # + id="bOXKQQpMSPu2" colab_type="code" outputId="a30b3fee-85bb-4132-a7f4-171399f3f76d" colab={"base_uri": "https://localhost:8080/", "height": 33} # Predict the sentiment of a negative review review= "detestei o produto, nao gostei do vendedor, estou insatisfeito" prediction= clf.predict(vectorizer.transform([review]))[0] print("The sentiment predicted by the classifier is %i" % prediction) # + [markdown] id="L2EV8OM8SexR" colab_type="text" # On the example above, the model correct classified a bad review. # + [markdown] id="-JAAja-2m6pd" colab_type="text" # Techniques to give context to a review # # n-grams # # It is a contiguous sequence of n-elements, or words, in a given document. A bag of words is n-gram model where n= 1. # # Example: "I love you". If n=1, we have: # - "I" # - "Love" # - "You" # # If we change n to 2, we would have: # - "I love" # - "love you" # # It helps the model to undestand the relationship between the words. # # + id="yfYH18VYWanO" colab_type="code" outputId="b5702e82-b91f-4ad3-a7ab-4fc8a44d127b" colab={"base_uri": "https://localhost:8080/", "height": 84} # To avoid the curse of dimensionality, don't use more than n=3 # We are going to compare how much it increases when we increase the n-gram vectorizer_ng1 = CountVectorizer(ngram_range=(1, 1)) ng1 = vectorizer_ng1.fit_transform(X_train) vectorizer_ng2 = CountVectorizer(ngram_range=(1, 2)) ng2 = vectorizer_ng2.fit_transform(X_train) vectorizer_ng3 = CountVectorizer(ngram_range=(1, 3)) ng3 = vectorizer_ng3.fit_transform(X_train) print("number of features by n-grams is:\n ng1= %i \n ng2= %i \n ng3= %i" % (ng1.shape[1], ng2.shape[1], ng3.shape[1])) # + [markdown] id="0lWRvrBNY2SS" colab_type="text" # We can see that with n=1 we have 13k features, while with n=3 it increases to 295k. # + id="9Zlv15guW-sE" colab_type="code" outputId="001c62fb-3ad1-4d16-fe72-5eb227449ae1" colab={"base_uri": "https://localhost:8080/", "height": 33} # We will try the same model again, now with n-gram= 2 vectorizer_ng= CountVectorizer(stop_words=stop_words_port, ngram_range=(1,3)) X_train_bow_ng= vectorizer_ng.fit_transform(X_train) X_test_bow_ng= vectorizer_ng.transform(X_test) clf.fit(X_train_bow_ng, y_train) accuracy_ng= clf.score(X_test_bow_ng, y_test) print("The accuracy of the classifier is %.3f" % accuracy_ng) # + [markdown] id="e9wH0cslXw9q" colab_type="text" # Term Frenquency - Inverse Document Frequency - **TF-IDF** # # The idea is, more frequent the word is accross all documents, plus the number of times it occurs, more weight it should have. # # # + id="EwCg--uDXzi7" colab_type="code" outputId="71ca57ff-3e3f-41cc-afdb-7498b6df464a" colab={"base_uri": "https://localhost:8080/", "height": 33} # instead using CountVectorizer(), we will use TfadVectorizer() from scikit-learn from sklearn.feature_extraction.text import TfidfVectorizer vectorizer= TfidfVectorizer() tfidf_matrix= vectorizer.fit_transform(X_train) print(tfidf_matrix.shape) # + [markdown] id="RtasV4TMDjNN" colab_type="text" # # Cosine similarity # # It is the cosine distance between two vectors # + id="p-aIN0hUF0-G" colab_type="code" outputId="8b95a575-0e6c-4765-e586-59cb32d8b75a" colab={"base_uri": "https://localhost:8080/", "height": 150} from sklearn.metrics.pairwise import cosine_similarity import time # record time start= time.time() # Compute cosine similarity matrix cosine_sim= cosine_similarity(tfidf_matrix, tfidf_matrix) # print the cosine similarity matrix print(cosine_sim) # Print time taken print("Time taken: %s seconds" %(time.time() - start)) # + id="1LnAsRGzLVgG" colab_type="code" outputId="bf6da3fd-d891-432f-ee42-301434689e9d" colab={"base_uri": "https://localhost:8080/", "height": 150} # we can use linear_kernal to calculate cosine similarity. It takes less time to process and it produces the same result. from sklearn.metrics.pairwise import linear_kernel import time # record time start= time.time() # Compute cosine similarity matrix cosine_sim= linear_kernel(tfidf_matrix, tfidf_matrix) # print the cosine similarity matrix print(cosine_sim) # Print time taken print("Time taken: %s seconds" %(time.time() - start)) # + [markdown] id="g3SqmC8xai_j" colab_type="text" # Word embeddings # To find similarity between words or sentences. # + id="AST7VrbIdLtN" colab_type="code" outputId="f85cf3ad-ebfa-48f6-f449-4073e4e8b268" colab={"base_uri": "https://localhost:8080/", "height": 117} reviews['review_comment_message'].head() # + id="Ynh9fl8xdVRD" colab_type="code" outputId="dd2c2629-84d6-44ad-ede7-6bcdcc42d55d" colab={"base_uri": "https://localhost:8080/", "height": 187} # let's check how similar are the reviews # first, creat a Doc review_1_doc= nlp(reviews['review_comment_message'][1]) review_2_doc= nlp(reviews['review_comment_message'][2]) review_3_doc= nlp(reviews['review_comment_message'][3]) # Now, use the function similarity print(review_1_doc.similarity(review_2_doc)) print(review_2_doc.similarity(review_3_doc)) print(review_3_doc.similarity(review_1_doc)) # + id="5A7VuI6YeFHS" colab_type="code" outputId="e5420ca7-bd9f-484e-d42c-bbde98598ca2" colab={"base_uri": "https://localhost:8080/", "height": 50} # trying Multinomial Naive Bayes with Tfidf vectorization from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix from sklearn.naive_bayes import MultinomialNB import time # Create CountVectorizer object, specifying the arguments to preprocess text stop_words_port= spacy.lang.pt.stop_words.STOP_WORDS vectorizer= TfidfVectorizer(stop_words=stop_words_port) # Split into training and test sets X_train, X_test, y_train, y_test= train_test_split(reviews['review_comment_message'], reviews['is_good_review'], test_size=0.25, random_state=24) start= time.time() # Generate training Bow vectors X_train_vec= vectorizer.fit_transform(X_train) # Generate test Bow vector X_test_vec= vectorizer.transform(X_test) # create MultinomialNB object clf= MultinomialNB() # Train clf clf.fit(X_train_vec, y_train) # Compute accuracy on test set accuracy= clf.score(X_test_vec, y_test) print("The accuracy of the classifier is %.3f" % accuracy) print("Time taken: %s seconds" %(time.time() - start)) # + id="WSGA_y7Wk9KK" colab_type="code" outputId="d38749fd-afed-43e7-837e-93583a7dd13e" colab={"base_uri": "https://localhost:8080/", "height": 167} import sklearn.metrics as metrics from sklearn.metrics import classification_report, confusion_matrix clf_y_pred = clf.predict(X_test_vec) print(metrics.classification_report(y_test, clf_y_pred)) # + id="6s-HRKptmtvP" colab_type="code" colab={}
NLP_Customer_Review_ML_Complete.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #importing the required module and functions from ... #the package for a quick formation evaluation import petroeval as pet from petroeval import evaluate_reservoir from petroeval import log_plot, three_plots from petroeval import FormationEvaluation from petroeval import visualizations from petroeval.visualizations import summary # + #Reading/Importing a single lasio file las = pet.read_lasio('BOGI 02.las') # + #Reading/Importing multiple lasio files las1 = 'ATAGA 5.LAS' #file path las2 = 'ATAGA 10.las' #file path dual_las = pet.read_lasios(las1, las2) dual_las # + #converting las file to dataframe df = las.df() df.head() # + #create the reservoir section object with the FormationEvaluation class #Well log attributes/columns titles are passed as arguments reservoir1 = FormationEvaluation(df, gr='GR', nphi='NPHI', dens='DENS', res='RES', top=2, bottom=20000, cutoff=72) #The show_table method is used to create the table and the formation evaluation parameters table = reservoir1.show_table(baseline_default=True) print(reservoir1) # + #To check missing values before table.isna().sum() # + #fill missing values using mean values of the columns, specify value if mean shouldn't be applied df1 = reservoir1.fill_missing(use_mean=False, value=55) df1.isna().sum() # - pet.visualizations.summary(table) # + #Printng out a summary of the petrophysical estimates #baseline_default argument is set to False, so specified shale baseline cutoff is used print(reservoir1.parameters(baseline_default=False)) # - pet.log_plot(df) pet.three_plots(df, x1='GR', x2='NPHI', x3='DENS')
petroeval/Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true import math import requests import numpy as np pricelist_url = "https://cloudpricingcalculator.appspot.com/static/data/pricelist.json" # + deletable=true editable=true r = requests.get(pricelist_url) pricelist = r.json() print("Pricing as of {}".format(pricelist['updated'])) # + deletable=true editable=true class HostedCourse(dict): def __init__(self, data, pricelist, **kw): dict.__init__(self) self.update(data) self.pricelist = pricelist self.update(kw['update']) def monthly_cost_all_pd(self): pd_total_size = self['num_users'] * self['pd_size_gb'] pd_rate_gb_month = self.pricelist['gcp_price_list'][self['pd_type']][self['node_region']] return pd_rate_gb_month * pd_total_size def monthly_cost_node(self): node_rate = self.pricelist['gcp_price_list'][self['node_type']][self['node_region']] return node_rate * self['sustained_use_factor'] * self['node_monthly_uptime_h'] def pods_per_node(self): mem_per_node = int(self.pricelist['gcp_price_list'][self['node_type']]['memory']) return math.floor((mem_per_node-1)/self['mem_per_pod_gb']) def derived_node_count(self): return math.ceil(self['num_active_pods'] / self.pods_per_node()) def monthly_cost_all_nodes(self): return self.derived_node_count() * self.monthly_cost_node() def monthly_cost_total(self): return self.monthly_cost_all_nodes() + self.monthly_cost_all_pd() def monthly_cost_per_student(self): return self.monthly_cost_total() / self['num_users'] # + deletable=true editable=true # defaults dsep = { ## GKE 'node_region': 'us', # persistent disk 'pd_type': 'CP-COMPUTEENGINE-STORAGE-PD-SSD', # nodes # CP-COMPUTEENGINE-VMIMAGE-N1-HIGHMEM-4: 4 cores, 26GB # CP-COMPUTEENGINE-VMIMAGE-N1-STANDARD-4: 4 cores, 15GB 'node_type': 'CP-COMPUTEENGINE-VMIMAGE-N1-HIGHMEM-4', 'num_users': 1, 'num_active_pods': 1, 'pd_size_gb': 10, 'node_monthly_uptime_h': 30*24, 'mem_per_pod_gb': 2, # https://cloud.google.com/compute/#pricing # "Sustained Use Discounts # Earn up to a 30% net discount for instances that run for an entire month. # Compute Engine automatically discounts instances running more than 25% of # the days in a month1." # This is theoretically represented by data['gcp_price_list']['sustained_use_tiers'] # which suggests discounts of up to 60% which is at odds with the statement above. # We'll assume a sustained discount of not the max of 30%, but of 15%. # FIXME: rejigger as a function of node_monthly_uptime_h 'sustained_use_factor': 0.85, } # + deletable=true editable=true courses = { 'data8_2g': HostedCourse(dsep, pricelist, update={ 'num_users': 900, 'num_active_pods': 221, # 80th percentile; FIXME: acquire via datadogs? }), 'prob140_2g': HostedCourse(dsep, pricelist, update={ 'num_users': 60, 'num_active_pods': 21, }), 'stat28_2g': HostedCourse(dsep, pricelist, update={ 'num_users': 60, 'num_active_pods': 9, }), 'data8_1g': HostedCourse(dsep, pricelist, update={ 'num_users': 900, 'num_active_pods': 221, # 80th percentile; FIXME: acquire via datadogs? 'mem_per_pod_gb': 1, }), 'prob140_1g': HostedCourse(dsep, pricelist, update={ 'num_users': 60, 'num_active_pods': 21, 'mem_per_pod_gb': 1, }), 'stat28_1g': HostedCourse(dsep, pricelist, update={ 'num_users': 60, 'num_active_pods': 9, 'mem_per_pod_gb': 1, }), } # + deletable=true editable=true def show_node_info(nt): '''Display a node type's hourly rate and it's memory and core counts.''' print("rate: {:.3f}, mem_gb: {}, cores: {}".format( pricelist['gcp_price_list'][nt]['us'], pricelist['gcp_price_list'][nt]['memory'], pricelist['gcp_price_list'][nt]['cores'] )) # + deletable=true editable=true show_node_info('CP-COMPUTEENGINE-VMIMAGE-N1-HIGHMEM-4') show_node_info('CP-COMPUTEENGINE-VMIMAGE-N1-STANDARD-4') # + deletable=true editable=true for k in sorted(courses.keys()): course = courses[k] print("{:>12}\ttotal: ${:8.2f}\tper user: ${:6.2f}".format( k, course.monthly_cost_total(), course.monthly_cost_per_student() ))
docs/cost-estimation/gce_budgeting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.1 64-bit (''base'': conda)' # language: python # name: python37164bitbasecondabc65611516f5429da066830f275de3ab # --- # # %% # # Top Topics # # * model: (50t,600,10) # * scale: 125 # + import pandas as pd import numpy as np from helpers import potus import glob import re import os import matplotlib.pyplot as plt import seaborn as sns # - import imp imp.reload(potus) # ## (50t,600,10) # # Load results for scales 125 and 25. # + exp = 'potus_50t_600_10' scales = [25] result = potus.gridExpResult(exp, scales) print(len(result.df)) print(len(result.settings)) display(result.settings) display(result.df.groupby('Nw').speech.count()) # - # # Print Top Topics topics.get_top_topics_from_df(result.model, result.df.iloc[0].probs, ntopics=5, nterms=4) # + import numpy as np from helpers.topics import get_topic ntopics = 5 result_model = result.model iloc = 123 df_weigths_matrix = pd.DataFrame() for idx, r in result.df.iterrows(): speech_topic_probs = r.probs speech_id = r.speech_id topics_ordered = np.flip(speech_topic_probs.argsort()) dft =pd.Series(topics_ordered, name='topic').reset_index().rename(columns={'index': 'weight'}) dft.weight = np.maximum(0,ntopics-dft.weight) dft['speech_id'] = speech_id df_weigths_matrix = df_weigths_matrix.append(dft) # + top_topics_by_weight = df_weigths_matrix.groupby('topic').sum('weight').sort_values('weight', ascending=False) display(top_topics_by_weight) sum(top_topics_by_weight.weight) # -
Insights_potus_top_topics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # now the first step in preprocessing, # in order to organise our data accordingly # isolate or region of interest # and expose the visuals in the preprocessing part of the pipeline # NB: this was computed on a machine with Dlib, hence the error here, # but the preprocessed dataset has been made available via Preprocessor's 'loadDataSet()' function call import Preprocessor as p p.preprocess() # + import FeatureExtraction as ft import os from sklearn.externals import joblib import numpy as np def loadDataSet(): lbp = ft.LBP(8, 1) faceEthnicity = ['White', 'Black', 'Asian', 'Indian', 'Other'] input_dir = 'data/UTKFace.tar/UTKFace/' # output_dir = 'data/output/' preprocess_dir = 'data/preprocess/' #facial_features2 = ['face_region_of_interest', 'mouth_region_of_interest', 'nose_region_of_interest', # 'left_eye_region_of_interest', 'right_eye_region_of_interest'] labels = os.listdir(preprocess_dir) #print(labels) ys = [faceEthnicity.index(label) for label in labels] # y=np.array(y) #print(ys) X = {'face': None, 'mouth': None, 'nose': None, 'left eye': None, 'right eye': None} X_face = [] X_nose = [] X_mouth = [] X_left_eye = [] X_right_eye = [] y = {'face': None, 'mouth': None, 'nose': None, 'left eye': None, 'right eye': None} y_face = [] y_nose = [] y_mouth = [] y_left_eye = [] y_right_eye = [] for i, label in enumerate(labels): path_to_label = preprocess_dir + label #print(path_to_label) individuals = os.listdir(path_to_label) #print(individuals) for individual in individuals: path_to_individual = path_to_label+'/'+individual+'/' #print(path_to_individual) #print('-----------') facial_features = os.listdir(path_to_individual) for f in facial_features: if f.split('__')[1][:-4] == 'face_region_of_interest': path_to_facial_feature = path_to_individual + f face_lbph = ft.getFeatureVector(path_to_facial_feature, lbp) X_face.append(face_lbph) y_face.append(ys[i]) elif f.split('__')[1][:-4] == 'mouth_region_of_interest': path_to_facial_feature = path_to_individual + f mouth_lbph = ft.getFeatureVector(path_to_facial_feature, lbp) X_mouth.append(mouth_lbph) y_mouth.append(ys[i]) elif f.split('__')[1][:-4] == 'nose_region_of_interest': path_to_facial_feature = path_to_individual + f nose_lbph = ft.getFeatureVector(path_to_facial_feature, lbp) X_nose.append(nose_lbph) y_nose.append(ys[i]) elif f.split('__')[1][:-4] == 'left_eye_region_of_interest': path_to_facial_feature = path_to_individual + f left_eye_lbph = ft.getFeatureVector(path_to_facial_feature, lbp) X_left_eye.append(left_eye_lbph) y_left_eye.append(ys[i]) elif f.split('__')[1][:-4] == 'right_eye_region_of_interest': path_to_facial_feature = path_to_individual + f right_eye_lbph = ft.getFeatureVector(path_to_facial_feature, lbp) X_right_eye.append(right_eye_lbph) y_right_eye.append(ys[i]) X_face = np.vstack(tuple(X_face)) #print('X_face shape now', X_face.shape) y_face = np.array(y_face) #print('y_face shape', y_face.shape) X['face'] = X_face y['face'] = y_face X_mouth = np.vstack(tuple(X_mouth)) #print('X_mouth shape now', X_mouth.shape) y_mouth = np.array(y_mouth) #print('y_mouth shape', y_mouth.shape) X['mouth'] = X_mouth y['mouth'] = y_mouth X_nose = np.vstack(tuple(X_nose)) #print('X_nose shape now', X_nose.shape) y_nose = np.array(y_nose) #print('y_nose shape', y_nose.shape) X['nose'] = X_nose y['nose'] = y_nose X_left_eye = np.vstack(tuple(X_left_eye)) #print('X_left_eye shape now', X_left_eye.shape) y_left_eye = np.array(y_left_eye) #print('y_left_eye shape', y_left_eye.shape) X['left eye'] = X_left_eye y['left eye'] = y_left_eye X_right_eye = np.vstack(tuple(X_right_eye)) #print('X_right_eye shape now', X_right_eye.shape) y_right_eye = np.array(y_right_eye) #print('y_right_eye shape', y_right_eye.shape) X['right eye'] = X_right_eye y['right eye'] = y_right_eye return X, y def serializeDataSet(): X, y = loadDataSet() #joblib.dump(X, 'X.pkl') #joblib.dump(y, 'y.pkl') print('Done loading, now pickling X and y') joblib.dump({'X':X, 'y':y}, 'X_And_y.pkl') def deserializeDataSet(): #return joblib.load('X.pkl'), joblib.load('y.pkl') return joblib.load('X_And_y.pkl') def save_classifier(clf, filename): joblib.dump(clf, filename) def load_classifier(filename): return joblib.load(filename) # - serializeDataSet() dataSet = deserializeDataSet() # + # seperate labels from features X = dataSet['X'] y = dataSet['y'] # get the lbp histograms facial feature X_face = X['face'] X_nose = X['nose'] X_mouth = X['mouth'] X_left_eye = X['left eye'] X_right_eye = X['right eye'] # get the lbp histogram labels y_face = y['face'] y_nose = y['nose'] y_mouth = y['mouth'] y_left_eye = y['left eye'] y_right_eye = y['right eye'] # + # this will help us determine the appropriate train-test splits import scikitplot as skplt import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=2700) skplt.estimators.plot_learning_curve(rf, X_face, y_face) plt.show() # + # Given the learning curve plot produced above, it seems an 75-25 split would be sufficient # the curve presents the model as plateauing on a near perfect score for training # and the model improves with more training examples. # we will assume that this curve is an indication that the other classifiers to be trained will also behave # similarly due to the size of the data set and not overfit. from sklearn.model_selection import train_test_split X_face_train, X_face_test, y_face_train, y_face_test = train_test_split(X_face, y_face, test_size=0.25) # + # now, classifiers will be trained and pickled # loading all necessary classifiers from sklearn first from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.cluster import MiniBatchKMeans from sklearn.naive_bayes import GaussianNB # now begin the training and saving of models in each cell print('Begin Training random forest classifier') rf = RandomForestClassifier(n_estimators=1000) rf.fit(X_face_train, y_face_train) print('Done training random forest classifier') print('----------SAVE RF MODEL-----------') save_classifier(rf, 'rf.pkl') print('----------RF MODEL SAVED-----------') # - print('Begin training support vector machine') sv = SVC(probability=True) sv.fit(X_face_train, y_face_train) print('Done training support vector machine') print('----------SAVE SV MODEL-----------') save_classifier(sv, 'sv.pkl') print('----------SV MODEL SAVED-----------') print('Begin training gaussian naive bayes classifier') gnb = GaussianNB() gnb.fit(X_face_train, y_face_train) print('Done training gaussian naive bayes classifier') print('----------SAVE GNB MODEL-----------') save_classifier(gnb, 'gnb.pkl') print('----------GNB MODEL SAVED-----------') print('Begin training decision tree classifier') dt = DecisionTreeClassifier() dt.fit(X_face_train, y_face_train) print('Done training decision tree classifier') print('----------SAVE DT MODEL-----------') save_classifier(dt, 'dt.pkl') print('----------DT MODEL SAVED-----------') print('Begin training k nearest neighbors classifier') knn = KNeighborsClassifier()# using the default n_neighbors=5 since there are 5 classes knn.fit(X_face_train, y_face_train) print('Done training k nearest neighbors classifier') print('----------SAVE KNN MODEL-----------') save_classifier(knn, 'knn.pkl') print('----------KNN MODEL SAVED-----------') print('Begin training minibatch k means clustering') mbkm = MiniBatchKMeans(n_clusters=5, batch_size=2700) mbkm.fit(X_face_train) print('Done training minibatch k means clustering') print('----------SAVE MBKM MODEL-----------') save_classifier(mbkm, 'mbkm.pkl') print('----------MBKM MODEL SAVED-----------') # loading all the models in order to obtain classification metrics rf = load_classifier('rf.pkl') sv = load_classifier('sv.pkl') gnb = load_classifier('gnb.pkl') dt = load_classifier('dt.pkl') knn = load_classifier('knn.pkl') mbkm = load_classifier('mbkm.pkl') # + # obtain label predicions and probabilities for plotting and generating metrics y_pred_RF = rf.predict(X_face_test) y_prob_RF = rf.predict_proba(X_face_test) y_pred_SV = sv.predict(X_face_test) y_prob_SV = sv.predict_proba(X_face_test) y_pred_GNB = gnb.predict(X_face_test) y_prob_GNB = gnb.predict_proba(X_face_test) y_pred_DT = dt.predict(X_face_test) y_prob_DT = dt.predict_proba(X_face_test) y_pred_KNN = knn.predict(X_face_test) y_prob_KNN = knn.predict_proba(X_face_test) y_pred_MBKM = mbkm.predict(X_face_test) # y_prob_MBKM = mbkm.predict_proba(X_face_test) # cannot obtain label probabilities from clustering model # - # Produce Random Forest Classifier Confusion Matrix skplt.metrics.plot_confusion_matrix(y_face_test, y_pred_RF, normalize=True) plt.show() # Produce Support Vector Classifier Confusion Matrix skplt.metrics.plot_confusion_matrix(y_face_test, y_pred_SV, normalize=True) plt.show() # Produce Gaussian Naive Bayes Classifier Confusion Matrix skplt.metrics.plot_confusion_matrix(y_face_test, y_pred_GNB, normalize=True) plt.show() # Produce Decision Tree Classifier Confusion Matrix skplt.metrics.plot_confusion_matrix(y_face_test, y_pred_DT, normalize=True) plt.show() # Produce K Nearest Neighbors Classifier Confusion Matrix skplt.metrics.plot_confusion_matrix(y_face_test, y_pred_KNN, normalize=True) plt.show() # Produce Mini Batch K Means Clustering Confusion Matrix skplt.metrics.plot_confusion_matrix(y_face_test, y_pred_MBKM, normalize=True) plt.show() # Produce Random Forest ROC Curves skplt.metrics.plot_roc(y_face_test, y_prob_RF) plt.show() # Produce Support Vector Classifier ROC Curves skplt.metrics.plot_roc(y_face_test, y_prob_SV) plt.show() # Produce Gaussian Naive Bayes Classifier ROC Curves skplt.metrics.plot_roc(y_face_test, y_prob_GNB) plt.show() # Produce Decision Tree Classifier ROC Curves skplt.metrics.plot_roc(y_face_test, y_prob_DT) plt.show() # Produce K Nearest Neighbors Classifier ROC Curves skplt.metrics.plot_roc(y_face_test, y_prob_KNN) plt.show() # Produce Random Forest Classifier Precision-Recall Curves skplt.metrics.plot_precision_recall(y_face_test, y_prob_RF) plt.show() # Produce Support Vector Classifier Precision-Recall Curves skplt.metrics.plot_precision_recall(y_face_test, y_prob_SV) plt.show() # Produce Gaussian Naive Bayes Classifier Precision-Recall Curves skplt.metrics.plot_precision_recall(y_face_test, y_prob_GNB) plt.show() # Produce Decision Tree Classifier Precision-Recall Curves skplt.metrics.plot_precision_recall(y_face_test, y_prob_DT) plt.show() # Produce K Nearest Neighbors Classifier Precision-Recall Curves skplt.metrics.plot_precision_recall(y_face_test, y_prob_KNN) plt.show() # Produce Mini Batch K Means Silhouette Analysis skplt.metrics.plot_silhouette(X_face_test, y_pred_MBKM) plt.show()
TrainModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## map2loop: The Northern Territory # # <img src='../graphics/loop_sm.png'> # # This notebook allows you to select a rectangular sub-area in the Northern Territory, Australia, and using <a href="https://nt.gov.au/industry/mining-and-petroleum/geoscience-data-maps/access-geoscience-information/northern-territory-geological-survey-publications-and-maps" target="_blank">NT</a> geology polygons, bedding orientation data and fault polylines, calculates the topological relationships between the different features. All the data is Open Access and is served on a Loop WFS server. # # This all gets fed into successive tolopogical and geometric transfroms that end up feeding into a mdelling engine to make a 3D model. # # # <font color='red'>The leaflet version here dangerously allows you to select your own area from the map, this is just for showing off, as in many parts of NT, the area we focus on here, there isn't enough data in the map alone to make a decent 3D model</font> # # The outputs from this notebook can be fed into gempy, LoopStructural, Geomodeller and, for the faults, noddy. Having some data doen't mean that you will get a reasonable model for three reasons: # # 1) The map is itself a model, and it may be wrong, or internally inconsistent # # 2) map2loop may have stuffed up # # 3) The modelling algorithm may be too simple to accept all the data, or to build a geologically reasonable model with even good data. # # Codes here were developed as part of the <a href="https://loop3d.org/">Loop</a> | <a href="https://minexcrc.com.au/"> MinEx CRC</a> | <a href="https://www.darecentre.org.au/">DARE</a> partnerships with export codes drawing heavily on the expertise of <NAME> (<a href="https://github.com/cgre-aachen/gempy">gempy</a>), <NAME> (Loop Structural, coming soon!), <NAME> (<a href="https://www.intrepid-geophysics.com/product/geomodeller/Geomodeller">Geomodeller</a>) and <NAME> & <NAME> (<a href="https://github.com/cgre-aachen/pynoddy">pynoddy</a>). # # ### Instructions: # 1) Select a pre-defined example area or draw your own rectangular area from map below, in an area with some bedding data (red dots). *I advise you start off with the examples, then near where the map open ups below, the further you stray, the harder it gets! Also don't draw too large a rectangle (50km x 50km), yes I know it needs a scale bar* # # 2) Select a target modelling engine (loopstructural, gempy, geomodeller, noddy) # # 3) Run all code below that and post an issue at <a href="https://github.com/Loop3D/map2loop/issues">github</a> when it crashes # #### If not already available, the notebook will first download a 6 MB data package to set things up, this may take a few minutes, depending on your network speed. Once downloaded, this will not need to happen again (unless you delete the NT directory) # + from zipfile import ZipFile import os from urllib.request import urlopen from io import BytesIO if(not os.path.isdir('../NT/data')): with urlopen('http://geo.loop-gis.org/files/NTdata.zip') as zipresp: with ZipFile(BytesIO(zipresp.read())) as zfile: zfile.extractall('../NT/') # + import ipywidgets as widgets import os # load last saved map area and mdoe engine (if they exist) if(not os.path.isdir('../scratch/')): os.mkdir('../scratch/') if(not os.path.isdir('../NT/tmp')): os.mkdir('../NT/tmp') if(os.path.isfile('../NT/last_choices.txt')): f=open('../NT/last_choices.txt','r') contents =f.readlines() f.close() default_map=contents[0].replace("\n","") default_engine=contents[1].replace("\n","") else: default_map='Turner_Syncline' default_engine='loopstructural' options=['Draw Your Own','Last Area Drawn'] if(not default_map in options): default_map= options[0] map_choice=widgets.Dropdown( options=options, value=default_map, description='Map area:', disabled=False, ) display(map_choice) # - test_data_name=map_choice.value print(test_data_name) if(not test_data_name =='Draw Your Own' and not test_data_name =='Last Area Drawn'): test_data_path='../'+test_data_name+'/' os.chdir(test_data_path) print(os.getcwd()) # %run -i "m2l_config.py" # + import folium import pandas as pd import json import random from shapely.geometry import Polygon from ipyleaflet import Map, basemaps, GeoJSON, LayersControl, DrawControl,WMSLayer, GeoData from ipywidgets import Label from ipywidgets import Label import ipywidgets as widgets import geopandas as gpd if(not test_data_name =='Draw Your Own'): if(test_data_name=='Last Area Drawn'): last_coords=pd.read_csv('../NT/last_area.csv') display(last_coords) minx=last_coords.iloc[0]['minx'] miny=last_coords.iloc[0]['miny'] maxx=last_coords.iloc[0]['maxx'] maxy=last_coords.iloc[0]['maxy'] roi_poly=gpd.read_file('../scratch/NT/roi_poly.shp') elif(not test_data_name =='Draw Your Own'): y_point_list = [miny, miny, maxy, maxy, maxy] x_point_list = [minx, maxx, maxx, minx, minx] bbox_geom = Polygon(zip(x_point_list, y_point_list)) polygon = gpd.GeoDataFrame(index=[0], crs=dst_crs, geometry=[bbox_geom]) polygon_ll=polygon.to_crs(src_crs) minx=polygon_ll.total_bounds[0] maxx=polygon_ll.total_bounds[2] miny=polygon_ll.total_bounds[1] maxy=polygon_ll.total_bounds[3] minlong=minx maxlong=maxx minlat=miny maxlat=maxy #print("x",polygon_ll.total_bounds[0]) st_bbox=[minlong,minlat,maxlong,maxlat] lat_point_list = [minlat, minlat, maxlat, maxlat,maxlat] lon_point_list = [minlong, maxlong, maxlong, minlong, minlong] bbox_geom = Polygon(zip(lon_point_list, lat_point_list)) rect = gpd.GeoDataFrame(index=[0], crs=src_crs, geometry=[bbox_geom]) src_crs = "epsg:4326" # coordinate reference system for imported dtms (geodetic lat/long WGS84) dst_crs = "epsg:28352" # coordinate reference system for imported dtms (geodetic lat/long WGS84) bbox2=str(minx)+","+str(miny)+","+str(maxx)+","+str(maxy) y_point_list = [miny, miny, maxy, maxy, maxy] x_point_list = [minx, maxx, maxx, minx, minx] bbox_geom = Polygon(zip(x_point_list, y_point_list)) polygon = gpd.GeoDataFrame(index=[0], crs=dst_crs, geometry=[bbox_geom]) polygon_ll=polygon.to_crs(src_crs) minlong=polygon_ll.total_bounds[0] maxlong=polygon_ll.total_bounds[2] minlat=polygon_ll.total_bounds[1] maxlat=polygon_ll.total_bounds[3] minlong=minx maxlong=maxx minlat=miny maxlat=maxy lat_point_list = [minlat, minlat, maxlat, maxlat,maxlat] lon_point_list = [minlong, maxlong, maxlong, minlong, minlong] bbox_geom = Polygon(zip(lon_point_list, lat_point_list)) rect = gpd.GeoDataFrame(index=[0], crs=src_crs, geometry=[bbox_geom]) example_rect = GeoData(geo_dataframe = rect, style={'color': 'purple', 'opacity':3, 'weight':1.9, 'dashArray':'2', 'fillOpacity':0.6}, name = 'Example') if(test_data_name=='Last Area Drawn'): roi_poly=gpd.read_file('../NT/tmp/roi_poly.shp') example_rect = GeoData(geo_dataframe = roi_poly, style={'color': 'purple', 'opacity':3, 'weight':1.9, 'dashArray':'2', 'fillOpacity':0.6}, name = 'Example') center=(minlat+((maxlat-minlat)/2),minlong+((maxlong-minlong)/2)) else: center=(-13.58,131.2) wms_warox = WMSLayer( url='http://geo.loop-gis.org/geoserver/loop/wms?', layers='loop:pco_structures', format='image/png', transparent=True, attribution='Outcrop data from NTGS', name='outcrops' ) wms_geol = WMSLayer( url='http://geo.loop-gis.org/geoserver/NTGS/wms?', layers='NTGS:pco_lithinterp_500k', format='image/png', transparent=True, opacity=0.4, attribution='Geology data from NTGS', name='geology' ) m =Map(basemap=basemaps.OpenTopoMap, center=center, zoom=11,scroll_wheel_zoom=True) label = Label() display(label) def handle_interaction(**kwargs): if kwargs.get('type') == 'mousemove': label.value = str(kwargs.get('coordinates')) m.on_interaction(handle_interaction) m.add_layer(wms_geol) m.add_layer(wms_warox) if(not test_data_name =='Draw Your Own'): m.add_layer(example_rect) m.add_control(LayersControl()) dc = DrawControl(rectangle={'shapeOptions': {'color': '#0000FF'}}) m.add_control(dc) m # - # ## If you don't see a map above, please uncomment the next two lines of code # # So they should look like this: # # # !conda install -c conda-forge ipyleaflet -y # # !jupyter nbextension enable --py --sys-prefix ipyleaflet # # #!conda install -c conda-forge ipyleaflet -y # #!jupyter nbextension enable --py --sys-prefix ipyleaflet if(test_data_name=='Draw Your Own' or test_data_name=='Last Area Drawn'): if(test_data_name=='Draw Your Own'): new_poly=GeoJSON(data=dc.last_draw) old_poly=str(new_poly) if("'geometry': None" in old_poly): raise NameError('map2loop error: No rectangle selected') old_poly=old_poly.rsplit("'coordinates': ", 1)[1] old_poly=old_poly.replace('[[[','').replace('[','').replace(']]]}})','').replace('],','').replace(',','').split(" ") longs=old_poly[0::2] lats=old_poly[1::2] minlong=float(min(longs)) maxlong=float(max(longs)) minlat=float(max(lats)) #ignores sign maxlat=float(min(lats)) #ignores sign bounds=(minlong,maxlong,minlat,maxlat) src_crs = "epsg:4326" # coordinate reference system for imported dtms (geodetic lat/long WGS84) dst_crs = "epsg:28352" # coordinate system for example data if(test_data_name=='Draw Your Own'): flat_list=[item for sublist in new_poly.data['geometry']['coordinates'] for item in sublist] flat_list=[item for sublist in flat_list for item in sublist] longs=flat_list[0::2] lats=flat_list[1::2] #display(lats,longs) poly_geom = Polygon(zip(longs, lats)) roi_poly = gpd.GeoDataFrame(index=[0], crs=src_crs, geometry=[poly_geom]) roi_poly.to_file('../NT/tmp/roi_poly.shp') roi_poly=roi_poly.to_crs(dst_crs) lat_point_list = [minlat, minlat, maxlat, maxlat,maxlat] lon_point_list = [minlong, maxlong, maxlong, minlong, minlong] bbox_geom = Polygon(zip(lon_point_list, lat_point_list)) mbbox = gpd.GeoDataFrame(index=[0], crs=src_crs, geometry=[bbox_geom]) print(src_crs,mbbox.total_bounds) mbbox=mbbox.to_crs(dst_crs) print(dst_crs,mbbox.total_bounds) f=open('../NT/last_area.csv','w') ostr='minx,miny,maxx,maxy\n' f.write(ostr) ostr=str(minlong)+','+str(minlat)+','+str(maxlong)+','+str(maxlat)+'\n' f.write(ostr) f.close() # ## Choose model engine # 1. Run cell below # 2. Select modelling engine from drop down menu # 3. Click in next cell and run to end using menu: *Cell->Run all below* # + engine_choice=widgets.Dropdown( options=['geomodeller', 'gempy','loopstructural','noddy','null'], value=default_engine, description='Modeller:', disabled=False, ) display(engine_choice) # - workflow={'model_engine':engine_choice.value} print(workflow['model_engine']) # save last map and engine choices f=open('../NT/last_choices.txt','w') ostr=str(map_choice.value)+'\n'+str(engine_choice.value)+'\n' f.write(ostr) f.close() # + pycharm={"is_executing": false} import geopandas as gpd import pandas as pd import numpy as np import os import sys import stat import functools import operator import matplotlib import networkx as nx import rasterio from matplotlib import pyplot import matplotlib.pyplot as plt from shapely.geometry import Polygon from map2loop import m2l_utils from map2loop import m2l_topology from map2loop import m2l_geometry from map2loop import m2l_interpolation from map2loop import m2l_export from map2loop import m2l_map_checker import time import shutil # %matplotlib inline t0 = time.time() # - # ## Load config file # + pycharm={"is_executing": false} if(test_data_name=='Draw Your Own' or test_data_name=='Last Area Drawn' ): test_data_path='../NT/' minx=mbbox.total_bounds[0] maxx=mbbox.total_bounds[2] miny=mbbox.total_bounds[1] maxy=mbbox.total_bounds[3] bbox2=str(minx)+","+str(miny)+","+str(maxx)+","+str(maxy) os.chdir('../NT/') # #%run -i "m2l_config.py" # %run -i "m2l_config_remote.py" print(os.getcwd()) else: test_data_path='../'+test_data_name+'/' lat_point_list = [miny, miny, maxy, maxy, maxy] lon_point_list = [minx, maxx, maxx, minx, minx] bbox_geom = Polygon(zip(lon_point_list, lat_point_list)) polygon = gpd.GeoDataFrame(index=[0], crs=dst_crs, geometry=[bbox_geom]) bbox=(minx,miny,maxx,maxy) #dc.clear() # + pycharm={"is_executing": false} # opportunity to second guess config file or add extra parameters not yet in config file... fold_decimate=2 fault_decimate=0 contact_decimate=5 orientation_decimate=0 use_interpolations=True #use interpolated dips/contacts as additional constraints use_fat=True #use fold axial trace orientation hints pluton_form='saucers' fault_dip=90 min_fault_length=1000 compute_etc=False #spacing > 0 gives absolute spacing, spacing <0 gives # of grid points in x direction spacing=-200 #spacing=500 #grid spacing in metres of interpolation points Australia=True drift_prefix=['None'] ################################# # There are many alternative datasets that # can be extracted from the input data, # and many choices of possible input data # # These flags define what the actual workflow # will be for this experiment, based partly # on which (if any) modelling engine is used # ############################################# workflow={'model_engine':engine_choice.value} if(workflow['model_engine']=='geomodeller'): workflow.update( {'seismic_section':False, 'cover_map':False, 'near_fault_interpolations':True, 'fold_axial_traces':False, 'stereonets':True, 'formation_thickness':True, 'polarity':False, 'strat_offset':True, 'contact_dips':True} ) elif(workflow['model_engine']=='loopstructural'): workflow.update( {'seismic_section':False, 'cover_map':False, 'near_fault_interpolations':True, 'fold_axial_traces':True, 'stereonets':True, 'formation_thickness':True, 'polarity':False, 'strat_offset':True, 'contact_dips':True} ) elif(workflow['model_engine']=='gempy'): workflow.update( {'seismic_section':False, 'cover_map':False, 'near_fault_interpolations':False, 'fold_axial_traces':True, 'stereonets':False, 'formation_thickness':False, 'polarity':False, 'strat_offset':False, 'contact_dips':False} ) elif(workflow['model_engine']=='noddy'): workflow.update( {'seismic_section':False, 'cover_map':False, 'near_fault_interpolations':False, 'fold_axial_traces':False, 'stereonets':False, 'formation_thickness':False, 'polarity':False, 'strat_offset':False, 'contact_dips':False} ) else: workflow.update( {'seismic_section':False, 'cover_map':False, 'near_fault_interpolations':False, 'fold_axial_traces':False, 'stereonets':True, 'formation_thickness':True, 'polarity':False, 'strat_offset':True, 'contact_dips':False} ) # no cover info so no need load cover layers if(not workflow['cover_map']): dtb=0 dtb_null=0 else: dtb_grid=data_path+'young_cover_grid.tif' #obviously hard-wired for the moment dtb_null='-2147483648' #obviously hard-wired for the moment cover_map_path=data_path+'Young_Cover_FDS_MGA_clean.shp' #obviously hard-wired for the moment dtb_clip=output_path+'young_cover_grid_clip.tif' #obviously hard-wired for the moment cover_dip=10 # dip of cover away from contact cover_spacing=5000 # of contact grid in metres # - # ## First we test to see if we have access to the online data we need # # + pycharm={"is_executing": false} loopwfs=m2l_utils.have_access("geo.loop-gis.org") ga=m2l_utils.have_access("services.ga.gov.au") if(not local_paths and not loopwfs): raise NameError('map2loop error: No access to remote map server') if(not (loopwfs & ga)): local_paths=True net=False print('using local paths') else: net=True # - # ## Check Map for valid input files # # ### Checks for: # - Files exist # - Requred fields # - No NaN/blanks in required fields # - Sufficient orientation data # - LineString/PolyLines for faults (i.e. not MultiLineStrings/MultiPolylines), if found splits into unique ID polylines # - Commas in unit code (maybe should check in groups and alt groups?? # # ### Should also check for: # - Significantly overlapping polygons # - Faults that should be joined (c.f. FracG code) # - Orientations near contacts that make no sense (i.e. strike is at high angle to contact tangent) # # structure_file,geology_file,fault_file,mindep_file,fold_file,c_l=m2l_map_checker.check_map(structure_file,geology_file,fault_file,mindep_file,fold_file,tmp_path,bbox,c_l,dst_crs,local_paths,drift_prefix,roi_poly) # ## Display stereonets of bedding by formations and group to see how we can combine them later # + # Accounts for shapefile truncation of field names to 10 characters. Needed when original was a MapInfo TAB file. for code in c_l: if(not code =='bedding' and not code =='otype' and not code =='sill' and not code =='intrusive' and not code =='volcanic' and not code =='minf' and not code =='fault' and not code =='fdipnull' and not code =='fdipest_vals' and not code =='syn'): c_l[code]=c_l[code][:10] display(c_l) # + print(geology_file) geology = gpd.read_file(geology_file,bbox=bbox) #geology[c_l['g']].fillna(geology[c_l['g2']], inplace=True) #geology[c_l['g']].fillna(geology[c_l['c']], inplace=True) orientations = gpd.read_file(structure_file,bbox=bbox) display(orientations[c_l['dd']].dtype) if(len(orientations)<2): raise NameError('Not enough orientations to complete calculations (need at least 2)') group_girdle=m2l_utils.plot_bedding_stereonets(orientations,geology,c_l) # - # ## Test code to automatically create super_groups and use_group3 misorientation=30 super_groups,use_gcode3=m2l_topology.super_groups_and_groups(group_girdle,tmp_path,misorientation) # ## Decide on super groups and groups # Supergroups define what shall be interpolated as single system # use_gcode defines which groups we choose to calculate model from # # <font color='red'>The following outputs are the default settings that put all groups as one super_group for orientation interpolation purposes and use_gcode3 defines which groups will actually be modelled If after looking at the stereonets you want to change these defaults, just copy paste the two following lines into the next cell below and edit them as you wish. </font> # + print("super_groups=",super_groups) print("use_gcode3=",use_gcode3) # + # - # ## Selection of derived data according modelling requirements # + print('only processing',use_gcode3) inputs=('') if(workflow['model_engine'] =='geomodeller'): inputs=('invented_orientations','intrusive_orientations','fat_orientations','near_fault_orientations','fault_tip_contacts','contact_orientations') elif(workflow['model_engine']=='loopstructural'): inputs=('invented_orientations','contact_orientations','fat_orientations') elif(workflow['model_engine']=='gempy'): inputs=('invented_orientations','interpolated_orientations','fat_orientations','contact_orientations') elif(workflow['model_engine']=='noddy'): inputs=('') # - # ## Plot geology polygons and bounding box # + pycharm={"is_executing": false} print(geology_file) geology_ll = gpd.read_file(geology_file,bbox=bbox) geology_ll[c_l['g']].fillna(geology_ll[c_l['g2']], inplace=True) geology_ll[c_l['g']].fillna(geology_ll[c_l['c']], inplace=True) display(geology_ll.head()) base=geology_ll.plot(column=c_l['c'],figsize=(10,10),edgecolor='#000000',linewidth=0.2) polygon.plot(ax=base, color='none',edgecolor='black') # - # ## Save geology to file as WKT # + pycharm={"is_executing": false} hint_flag=False # use GSWA strat database to provide relative age hints def unique_list(list1): # insert the list to the set list_set = set(list1) # convert the set to the list return(list(list_set)) list1=['geometry', c_l['o'],c_l['c'],c_l['g'],c_l['u'],c_l['min'],c_l['max'],c_l['ds'],c_l['r1'],c_l['r2']] list2=unique_list(list1) sub_geol = geology_ll[list2] m2l_topology.save_geol_wkt(sub_geol,geology_file_csv, c_l,hint_flag) # - # ## Save mineral deposits to file as WKT # This is not needed by map2loop to build 3D models, but is used by map2model to calculate mineral deposit/topology analyses. # + pycharm={"is_executing": false} mindep = gpd.read_file(mindep_file,bbox=bbox) sub_mindep = mindep[['geometry', c_l['msc'],c_l['msn'],c_l['mst'],c_l['mtc'],c_l['mscm'],c_l['mcom']]] m2l_topology.save_mindep_wkt(sub_mindep,mindep_file_csv, c_l) base=sub_mindep.plot() polygon.plot(ax=base, color='none',edgecolor='black') # - # ## Read and save orientations data point data as WKT # + pycharm={"is_executing": false} orientations = gpd.read_file(structure_file,bbox=bbox) sub_pts = orientations[['geometry', c_l['gi'],c_l['d'],c_l['dd']]] m2l_topology.save_structure_wkt(sub_pts,structure_file_csv,c_l) base=sub_pts.plot() polygon.plot(ax=base, color='none',edgecolor='black') # - # ## Plot faults and bounding box # + pycharm={"is_executing": false} if(os.path.exists(fault_file)): lines_ll=gpd.read_file(fault_file,bbox=bbox) sub_lines = lines_ll[['geometry', c_l['o'],c_l['f']]] base=sub_lines.plot() polygon.plot(ax=base, color='none',edgecolor='black') else: sub_lines=[] # - # ## Save faults to file as WKT # + pycharm={"is_executing": false} m2l_topology.save_faults_wkt(sub_lines,fault_file_csv,c_l) # - # ## Create map2model input file # + pycharm={"is_executing": false} m2l_topology.save_Parfile(m2m_cpp_path,c_l,graph_path,geology_file_csv,fault_file_csv,structure_file_csv,mindep_file_csv,minx,maxx,miny,maxy,500.0,'Fe,Cu,Au,NONE',2) # - # ## Calculate topology # + pycharm={"is_executing": false} import subprocess import platform os.chdir(m2m_cpp_path) print(os.getcwd()) # #%system map2model.exe Parfile if(platform.system()=='Windows'): subprocess.run(["map2model.exe", "Parfile"]) else: subprocess.run(["./map2model", "Parfile"]) # - # ## Simple network graph of the geology with legend # If in Australia we can use data derived from the ASUD database to refine stratigraphic relationshipsle network graph of the geology with legend if(Australia): asud_strat_file='../source_data/ASUD.csv' m2l_topology.use_asud(strat_graph_file, asud_strat_file,graph_path) strat_graph_file=graph_path+'ASUD_strat.gml' # + pycharm={"is_executing": false} G=nx.read_gml(strat_graph_file,label='id') selected_nodes = [n for n,v in G.nodes(data=True) if n >=0] nx.draw_networkx(G, pos=nx.kamada_kawai_layout(G), arrows=True, nodelist=selected_nodes) nlist=list(G.nodes.data('LabelGraphics')) nlist.sort() for no in nlist: if(no[0]>=0): elem=str(no[1]).replace("{'text':","").replace(", 'fontSize': 14}","") elem=elem.replace(", 'anchor': 'n', 'fontStyle': 'bold'","") print(no[0]," ",elem) # - # ## Process topography, stratigraphy, fold axial traces and faults # # ### Takes GML file produced by topology code, combines with geology polygons, structure points and dtm to create 3D model in gempy.<br><br> # # Limitations: no dykes, no sills. Sills require us to assign a unique surface to each instance of a sill (sill between units A and B needs to be different from sill of same age and strat codes as one found between E and F). Dykes via cokriging are really hard without just cookie cutting them in (but that is not our problem!). We are not checking for onlap relationships, which can perhaps been seen by having lots of units from one series adjacent to the youngest surface of the older series. Could also think about interpreting these as faults to introduce conceptual uncertainty. All mistakes belong to <NAME>, topology code that feeds this system by <NAME>.<br><br> # # Geology layer needs to have some unique strat code or text, some group code or text to function<br> # Structure layer needs dip/dip direction<br> # # # # + pycharm={"is_executing": false} os.chdir('../map2loop') print(os.getcwd()) #from IPython.core.display import display, HTML #display(HTML("<style>.container { width:80% !important; }</style>")) sys.path.insert(0,"../..") print(os.getcwd()) # - # ## Next we define an area of interest and some other basic stuff # + pycharm={"is_executing": false} print(os.getcwd()) bbox2=str(minx)+","+str(miny)+","+str(maxx)+","+str(maxy) lat_point_list = [miny, miny, maxy, maxy, maxy] lon_point_list = [minx, maxx, maxx, minx, minx] bbox_geom = Polygon(zip(lon_point_list, lat_point_list)) polygon = gpd.GeoDataFrame(index=[0], crs=dst_crs, geometry=[bbox_geom]) bbox=(minx,miny,maxx,maxy) step_out=0.045 #add (in degrees) so edge pixel from dtm reprojection are not found # - # ## Download and reproject the appropriate SRTM data # mj: Getting this from GA, but could also get from Hawaii # + pycharm={"is_executing": false} polygon_ll=polygon.to_crs(src_crs) minlong=polygon_ll.total_bounds[0]-step_out maxlong=polygon_ll.total_bounds[2]+step_out minlat=polygon_ll.total_bounds[1]-step_out maxlat=polygon_ll.total_bounds[3]+step_out print(minlong,maxlong,minlat,maxlat) downloaded = False i=0 print('Attempt: 0 ',end='') while downloaded == False: try: m2l_utils.get_dtm(dtm_file, minlong,maxlong,minlat,maxlat) downloaded=True except: time.sleep(10) i=i+1 print(' ',i,end='') if(i==100): raise NameError('map2loop error: Could not access DTM server after 100 attempts') print() geom_rp=m2l_utils.reproject_dtm(dtm_file,dtm_reproj_file,src_crs,dst_crs) dtm = rasterio.open(dtm_reproj_file) pyplot.imshow(dtm.read(1), cmap='terrain',vmin=0,vmax=1000) pyplot.show() # - # ## Load stratigraphy graph and create list of series (aka groups) # mj: The choice of what constitutes basic unit and what a group of units is hard-wired at the moment, but could be altered to any pair. Not even sure we need two levels but it seemed like a good idea at the time. Note that this needs the arcgis plugin version of the topology code (for now) as it seperates the different sub graphs. Text outputs list alternate topologies for series and surfaces, which if confirmed by comapring max-min ages will be a nice source of uncertainty. # + pycharm={"is_executing": false} groups,glabels,G = m2l_topology.get_series(strat_graph_file,'id') print(groups,glabels) # + pycharm={"is_executing": false} m2l_topology.save_units(G,tmp_path,glabels,Australia,asud_strat_file) # - # ## Load geology & structure data # Currently loading from local files, but could load geology from WFS server at GSWA EXCEPT that the WFS online map has less fields that the zipped shapefiles. Go figure. We don't use fault layer at the moment (except for Vitaliy's topology code) but same logic applies in terms of where to get it from. Already have fault/strat relationships and once we have fault/fault relationships will start to include faults in models. # + pycharm={"is_executing": false} # Extract point data from structure & geology layers for modelling ##First we readin the structure and map from shapefiles, or wherever... bbox=(minx,miny,maxx,maxy) geology = gpd.read_file(geology_file,bbox=bbox) geology[c_l['g']].fillna(geology[c_l['g2']], inplace=True) geology[c_l['g']].fillna(geology[c_l['c']], inplace=True) structure = gpd.read_file(structure_file,bbox=bbox) structure.crs=dst_crs print(fault_file) if(os.path.exists(fault_file)): faults_clip = gpd.read_file(fault_file,bbox=bbox) faults_clip.crs=dst_crs list1=['geometry',c_l['d'],c_l['dd'],c_l['sf'],c_l['bo']] list2=unique_list(list1) sub_pts = structure[list2] base=geology.plot(column=c_l['c'],figsize=(10,10),edgecolor='#000000',linewidth=0.2) sub_pts.plot(ax=base,edgecolor='black') faults_clip.plot(ax=base, column=c_l['f'],edgecolor='black') # - # ## Join geology and structures, save fold axial traces # + pycharm={"is_executing": false} geol_clip = m2l_utils.explode(geology) geol_clip.crs = dst_crs pd.set_option('display.max_columns', None) pd.set_option('display.max_rows',None) structure_code = gpd.sjoin(sub_pts, geol_clip, how="left", op="within") y_point_list = [miny, miny, maxy, maxy, miny] x_point_list = [minx, maxx, maxx, minx, minx] bbox_geom = Polygon(zip(x_point_list, y_point_list)) polygo = gpd.GeoDataFrame(index=[0], crs=dst_crs, geometry=[bbox_geom]) is_bed=structure_code[c_l['sf']].str.contains(c_l['bedding'], regex=False) structure_clip = structure_code[is_bed] structure_clip.crs = dst_crs if(c_l['otype']=='strike'): structure_clip['azimuth2'] = structure_clip.apply(lambda row: row[c_l['dd']]+90.0, axis = 1) c_l['dd']='azimuth2' c_l['otype']='dip direction' structure_clip=structure_clip[~structure_clip[c_l['o']].isnull()] structure_clip.to_file(tmp_path+'structure_clip.shp') # - # ## Create possible stratigraphy sets per group # mj: <font color='red'>Uses first of each possible set of toplogies per unit and per group, which is arbitrary. </font>On the other hand we are not checking relative ages again to see if this helps reduce ambiguity, which I think it would. # + pycharm={"is_executing": false} m2l_topology.save_group(G,tmp_path,glabels,geol_clip,c_l) # - # ## Calculate cover depth grid and contacts # Grid is assumed to be of depth below surface # # Also need to cater for case when we only have grid, no shapefile, so need to add a fake horizontal orientation in the middle of the map at average depth. if(workflow['cover_map']): dtm = rasterio.open(dtm_reproj_file) dtb_raw = rasterio.open(dtb_grid) cover=gpd.read_file(cover_map_path) with fiona.open(cover_map_path, "r") as shapefile: shapes = [feature["geometry"] for feature in shapefile] with rasterio.open(dtb_grid) as src: out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True) out_meta = src.meta.copy() out_meta.update({"driver": "GTiff", "height": out_image.shape[1], "width": out_image.shape[2], "transform": out_transform}) with rasterio.open(dtb_clip, "w", **out_meta) as dest: dest.write(out_image) dtb = rasterio.open(dtb_clip) m2l_geometry.process_cover(output_path,dtm,dtb,dtb_null,cover,workflow['cover_map'],cover_dip,bbox,dst_crs,cover_spacing,contact_decimate=3,use_vector=True,use_grid=True) # ## Export orientation data in csv format # mj: Orientation data needs calculated height as file does not provide it, taken from SRTM data already downloaded. To calculate polarity <font color='red'>(WHICH WE DON'T DO YET)</font> we can calculate the dot product of the dip direction of a bedding plane and the vector to that points nearest basal contact node, if abs(acos(dot product))>90 then right way up? # # # # Added code to not save intrusion orientation data as they won't have associated surfaces if sill... # + pycharm={"is_executing": false} m2l_geometry.save_orientations(structure_clip,output_path,c_l,orientation_decimate,dtm,dtb,dtb_null,workflow['cover_map']) m2l_utils.plot_points(output_path+'orientations.csv',geol_clip, 'formation','X','Y',False,'alpha') # - # ## Find those series that don't have any orientation or contact point data then create arbitrary point for series with no orientation data # Not sure if gempy needs this but geomodeller does. Currently just gives a point dipping 45 degrees to North, but could use dip direction normal to basal surface) # + pycharm={"is_executing": false} m2l_geometry.create_orientations( tmp_path, output_path, dtm,dtb,dtb_null,workflow['cover_map'],geol_clip,structure_clip,c_l) # - # ## Export contact information subset of each polygon to gempy format # mj: Orientation data needs calculated height as file does not provide it, taken from SRTM data already downloaded. Need to reduce number of points whilst retaining useful info (Ranee's job!)' # To calculate which are the basal units contact for a polygon find the polygons which are older than the selected polygon, in the example below the central polygon has relative age 23 so its basal contact is with the polygons whose ages are 26 & 28. If there are no older units for a polygon it has no basal content. We keep every nth node based on the decimate term (simple count along polyline). gempy seems to need at least two points per surface, so we always take the first two points. # # # <img src='../graphics/base.png'> # + pycharm={"is_executing": false} ls_dict,ls_dict_decimate=m2l_geometry.save_basal_contacts(tmp_path,dtm,dtb,dtb_null,workflow['cover_map'],geol_clip,contact_decimate,c_l,intrusion_mode) # - # ## Remove all basal contacts that are defined by faults and save to shapefile (no decimation) # + pycharm={"is_executing": false} m2l_geometry.save_basal_no_faults(tmp_path+'basal_contacts.shp',tmp_path+'faults_clip.shp',ls_dict,10,c_l,dst_crs) # - # ## Remove faults from decimated basal contacts as save as csv file # + pycharm={"is_executing": false} contacts=gpd.read_file(tmp_path+'basal_contacts.shp') m2l_geometry.save_basal_contacts_csv(contacts,output_path,dtm,dtb,dtb_null,workflow['cover_map'],contact_decimate,c_l) m2l_utils.plot_points(output_path+'contacts4.csv',geol_clip, 'formation','X','Y',False,'alpha') # - # ## New interpolation test # Interpolates a regular grid of orientations from an shapefile of arbitrarily-located points and saves out four csv files of l,m & n direction cosines and dip dip direction data # # Can choose between various RBF and IDW options # # The purpose of these interpolations and associated code is to help in four cases: # - Providing estimated dips and contacts in fault-bounded domains where no structural data are available # - Needed to estimate true thickness of formations # - Estimating fault offset # - Possibly useful for populating parts of maps where little structural data is available # + basal_contacts=tmp_path+'basal_contacts.shp' orientation_interp,contact_interp,combo_interp=m2l_interpolation.interpolation_grids(geology_file,structure_file,basal_contacts,bbox,spacing,dst_crs,scheme,super_groups,c_l) # - f=open(tmp_path+'interpolated_orientations.csv','w') f.write('X,Y,l,m,n,dip,dip_dir\n') for row in orientation_interp: ostr='{},{},{},{},{},{},{}\n'.format(row[0],row[1],row[2],row[3],row[4],row[5],row[6]) f.write(ostr) f.close() f=open(tmp_path+'interpolated_contacts.csv','w') f.write('X,Y,l,m,angle\n') for row in contact_interp: ostr='{},{},{},{},{}\n'.format(row[0],row[1],row[2],row[3],row[4]) f.write(ostr) f.close() f=open(tmp_path+'interpolated_combined.csv','w') f.write('X,Y,l,m,n,dip,dip_dir\n') for row in combo_interp: ostr='{},{},{},{},{},{},{}\n'.format(row[0],row[1],row[2],row[3],row[4],row[5],row[6]) f.write(ostr) f.close() # + if(spacing<0): spacing=-(bbox[2]-bbox[0])/spacing x=int((bbox[2]-bbox[0])/spacing)+1 y=int((bbox[3]-bbox[1])/spacing)+1 print(x,y) dip_grid=np.ones((y,x)) dip_grid=dip_grid*-999 dip_dir_grid=np.ones((y,x)) dip_dir_grid=dip_dir_grid*-999 contact_grid=np.ones((y,x)) contact_grid=dip_dir_grid*-999 for row in combo_interp: r=int((row[1]-bbox[1])/spacing) c=int((row[0]-bbox[0])/spacing) dip_grid[r,c]=float(row[5]) dip_dir_grid[r,c]=float(row[6]) for row in contact_interp: r=int((row[1]-bbox[1])/spacing) c=int((row[0]-bbox[0])/spacing) contact_grid[r,c]=float(row[4]) print('interpolated dips') plt.imshow(dip_grid, cmap="hsv",origin='lower',vmin=-90,vmax=90) plt.show() # + print('interpolated dip directions') plt.imshow(dip_dir_grid, cmap="hsv",origin='lower',vmin=0,vmax=360) plt.show() # + print('interpolated contacts') plt.imshow(contact_grid, cmap="hsv",origin='lower',vmin=-360,vmax=360) plt.show() # - # ## Process fault geometry # Save Faults as decimated points and representative orientation # Then, for each fault string: # - incementally advance along polyline every at each inter-node (no point in doing more?) # - find local stratigraphy 10m to left and right of fault # # Once full fault has been traversed: # - Find list of contacts left # - Find equivalent contacts on right # - use interpolated orientations to estimate minimum true offset assuming vertical displacement and store # - if no equivalent found, flag as domain fault and find min strat offset for contact, use cumulative minimum thickness estimate and store with flag (not implemented) # - estimate median & sd of minimum fault offset and store with flag (not implemented) # # Local Orientations # Since much of the code is the same, we benefit by calculating local orientation data either side of fault so that geomodeller/gempy have satisfied fault compartment orientation data## Save fault as contact info and and orientation info make vertical (for the moment) # + pycharm={"is_executing": false} m2l_geometry.save_faults(tmp_path+'faults_clip.shp',output_path,dtm,dtb,dtb_null,workflow['cover_map'],c_l,fault_decimate,min_fault_length,fault_dip) # + pycharm={"is_executing": false, "name": "#%%\n"} import warnings warnings.filterwarnings('ignore') if(os.path.exists(fault_file)): faults=pd.read_csv(output_path+'faults.csv') faults_len=len(faults) if(faults_len>0): m2l_interpolation.process_fault_throw_and_near_faults_from_grid(tmp_path,output_path,dtm_reproj_file,dtb,dtb_null,workflow['cover_map'],c_l,dst_crs,bbox, scheme,dip_grid,dip_dir_grid,x,y,spacing) # - # ## Process plutons # # For each instruve but not sill polygon, find older neighbours and store decimated contact points. Also store dipping contact orientations (user defined, just because) with four possible sub-surface configurations: # # <b>saucers: \\_+++_/ <br> # batholiths: +++/__ __ _\\+++ <br> # domes: /‾+++‾\\ <br> # pendants: +++\\_ _/+++ <br> # </b> # # Saves out orientations and contact points, as well as updated group level stratigraphic column.<br> # # + pycharm={"is_executing": false} bbox=(minx,miny,maxx,maxy) pluton_dip=str(pluton_dip) dist_buffer=10 m2l_geometry.process_plutons(tmp_path,output_path,geol_clip,local_paths,dtm,dtb,dtb_null,workflow['cover_map'],pluton_form,pluton_dip,contact_decimate,c_l) # - # ## Extract faults and basal contacts of groups from seismic section # + pycharm={"is_executing": false} if(workflow['seismic_section']): seismic_line_file=data_path+'seismic_line_10GA-CP1_rev.shp' #input geology file (if local) seismic_line = gpd.read_file(seismic_line_file) #import map seismic_line.plot(figsize=(10,10),edgecolor='#000000',linewidth=0.2) #display map display(seismic_line) seismic_bbox_file=data_path+'seismic_bbox.shp' #input geology file (if local) seismic_bbox = gpd.read_file(seismic_bbox_file) #import map seismic_bbox.set_index('POSITION',inplace=True) seismic_interp_file=data_path+'seismic_interp.shp' #input geology file (if local) seismic_interp = gpd.read_file(seismic_interp_file) #import map seismic_interp.plot(column='FEATURE',figsize=(10,10),edgecolor='#000000',linewidth=0.5) #display map display(seismic_interp) surface_cut=2000 m2l_geometry.extract_section(tmp_path,output_path,seismic_line,seismic_bbox,seismic_interp,dtm,dtb,dtb_null,workflow['cover_map'],surface_cut) contacts=pd.read_csv(output_path+'contacts4.csv',",") seismic_contacts=pd.read_csv(output_path+'seismic_base.csv',",") all_contacts=pd.concat([contacts,seismic_contacts],sort=False) all_contacts.to_csv (output_path+'contacts4.csv', index = None, header=True) faults=pd.read_csv(output_path+'faults.csv',",") seismic_faults=pd.read_csv(output_path+'seismic_faults.csv',",") all_faults=pd.concat([faults,seismic_faults],sort=False) all_faults.to_csv (output_path+'faults.csv', index = None, header=True) # - # ## Propagate dips along contacts # # + pycharm={"is_executing": false} if(workflow['contact_dips']): orientations=pd.read_csv(output_path+'orientations.csv',",") contact_dip=70 contact_orientation_decimate=5 m2l_geometry.save_basal_contacts_orientations_csv(contacts,orientations,geol_clip,tmp_path,output_path,dtm,dtb, dtb_null,workflow['cover_map'],contact_orientation_decimate,c_l,contact_dip,dip_grid,spacing,bbox) # - # ## Estimate formation thickness and normalised formation thickness # + pycharm={"is_executing": false} if(workflow['formation_thickness']): geology_file=tmp_path+'basal_contacts.shp' contact_decimate=5 null_scheme='null' m2l_interpolation.save_contact_vectors(geology_file,tmp_path,dtm,dtb,dtb_null,workflow['cover_map'],bbox,c_l,null_scheme,contact_decimate) buffer =5000 max_thickness_allowed=10000 m2l_geometry.calc_thickness_with_grid(tmp_path,output_path,buffer,max_thickness_allowed, c_l,bbox,dip_grid,dip_dir_grid,x,y,spacing) m2l_geometry.calc_min_thickness_with_grid(tmp_path,output_path,buffer,max_thickness_allowed, c_l,bbox,dip_grid,dip_dir_grid,x,y,spacing) m2l_geometry.normalise_thickness(output_path) m2l_utils.plot_points(output_path+'formation_thicknesses_norm.csv',geol_clip,'norm_th','x','y',True,'numeric') # - # ## Creates fold axial trace points # + pycharm={"is_executing": false} if(workflow['fold_axial_traces'] and os.path.exists(fold_file)): folds_clip = gpd.read_file(fold_file) if(len(folds_clip)>0): m2l_geometry.save_fold_axial_traces(fold_file,output_path,dtm,dtb,dtb_null,workflow['cover_map'],c_l,fold_decimate) #Save fold axial trace near-hinge orientations fat_step=750 # how much to step out normal to fold axial trace close_dip=70 #dip to assign to all new orientations (-999= use local interpolated dip) m2l_geometry.save_fold_axial_traces_orientations(fold_file,output_path,tmp_path,dtm,dtb,dtb_null,workflow['cover_map'],c_l,dst_crs, fold_decimate,fat_step,close_dip,scheme,bbox,spacing,dip_grid,dip_dir_grid) # - # ## Preprocess data to ensure it meets modelling requirements # + pycharm={"is_executing": false} m2l_geometry.tidy_data(output_path,tmp_path,clut_path,use_gcode3,use_interpolations,use_fat,pluton_form,inputs,workflow,c_l) model_top=round(np.amax(dtm.read(1)),-2) dtm.close() if(workflow['cover_map']): dtb.close() # - # ## Calculate polarity of original bedding orientation data (not used yet in final calc) # + pycharm={"is_executing": false} if(workflow['polarity']): m2l_geometry.save_orientations_with_polarity(output_path+'orientations.csv',output_path,c_l,tmp_path+'basal_contacts.shp',tmp_path+'all_sorts.csv',) m2l_utils.plot_points(output_path+'orientations_polarity.csv',geol_clip,'polarity','X','Y',True,'alpha') # - # ## Calculate minimum fault offset from stratigraphy and stratigraphic fault offset # + pycharm={"is_executing": false} if(workflow['strat_offset'] and os.path.exists(fault_file)): fault_test=pd.read_csv(output_path+'fault_dimensions.csv',',') if(len(fault_test)>0): m2l_geometry.fault_strat_offset(output_path,c_l,dst_crs,output_path+'formation_summary_thicknesses.csv', tmp_path+'all_sorts.csv',tmp_path+'faults_clip.shp',tmp_path+'geol_clip.shp',output_path+'fault_dimensions.csv') m2l_utils.plot_points(output_path+'fault_strat_offset3.csv',geol_clip,'min_offset','X','Y',True,'numeric') m2l_utils.plot_points(output_path+'fault_strat_offset3.csv',geol_clip,'strat_offset','X','Y',True,'numeric') # - # ## Analyse fault-fault topology # + pycharm={"is_executing": false} if(os.path.exists(fault_file)): m2l_topology.parse_fault_relationships(graph_path,tmp_path,output_path) # - # # loop2geomodeller test # # # + pycharm={"is_executing": false} if(workflow['model_engine']=='geomodeller'): from datetime import datetime import shutil m2l_topology.check_near_fault_contacts(tmp_path+'faults_clip.shp',tmp_path+'all_sorts_clean.csv', output_path+'fault_dimensions.csv',output_path+'group-fault-relationships.csv', output_path+'contacts_clean.csv',c_l,dst_crs) nowtime=datetime.now().isoformat(timespec='minutes') model_name='leaflet'+'_'+nowtime.replace(":","-").replace("T","-") os.mkdir(test_data_path+'/'+model_name) save_faults=True compute_etc=True t1 = time.time() m2l_export.loop2geomodeller(model_name,test_data_path,tmp_path,output_path,'../dtm/dtm_rp.tif',bbox, model_top,model_base,save_faults,compute_etc,workflow) t2 = time.time() os.chdir(test_data_path+'/'+model_name) # %system geomodellerbatch.exe -batch m2l.taskfile t3 = time.time() # #%system geomodellerbatch.exe -batch m2l_compute.taskfile t4 = time.time() print("m2l",(t1-t0)/60.0,"export process",(t2-t1)/60.0,"batch process",(t3-t2)/60.0,"batch calculate",(t4-t3)/60.0,"minutes") #shutil.copy('../tmp','.') #shutil.copy('../output','.') #shutil.copy('../graph','.') #shutil.copy('../dtm','.') os.chdir('..') # - # # loopstructural test # + pycharm={"is_executing": false} if(workflow['model_engine']=='loopstructural'): #model_base=-8200 import random from datetime import datetime os.environ['SURFE'] = 'C:/Users/00073294/Dropbox/1_Jupyter_notebooks/surfe/Release/' from LoopStructural import GeologicalModel import surfepy #from LoopStructural import GeologicalModel import lavavu from LoopStructural.visualisation import LavaVuModelViewer from LoopStructural import GeologicalModel import logging logging.getLogger().setLevel(logging.ERROR) import sys #sys.path.append('C:/Users/00073294/Dropbox/1_Jupyter_notebooks/surfe/Release/') #os.environ['SURFE'] = 'C:/Users/00073294/Dropbox/1_Jupyter_notebooks/surfe/Release/' #import surfepy nowtime=datetime.now().isoformat(timespec='minutes') model_name='leaflet'+'_'+nowtime.replace(":","-").replace("T","-") os.mkdir(vtk_path+model_name) filename=vtk_path+model_name+'/'+'surface_name_{}.vtk' f=open(tmp_path+'bbox.csv','w') f.write('minx,miny,maxx,maxy,lower,upper\n') ostr='{},{},{},{},{},{}\n'.format(minx,miny,maxx,maxy,model_base,model_top) f.write(ostr) f.close() t1 = time.time() fault_params = {'interpolatortype':'FDI', 'nelements':3e4, 'data_region':.1, 'solver':'pyamg', # overprints:overprints, 'cpw':10, 'npw':10} foliation_params = {'interpolatortype':'PLI' , # 'interpolatortype':'PLI', 'FDI', 'surfe' 'nelements':5e5, # how many tetras/voxels 'buffer':0.8, # how much to extend interpolation around box 'solver':'pyamg', 'damp':True, 'cpw':5} if(not os.path.exists(fault_file)): f=open(output_path + '/fault_displacements3.csv','w') f.write('X,Y,fname,apparent_displacement,vertical_displacement,downthrow_dir\n') f.close() f=open(output_path + '/fault_orientations.csv','w') f.write('X,Y,Z,DipDirection,dip,DipPolarity,formation\n') f.close() f=open(output_path + '/faults.csv','w') f.write('X,Y,Z,formation\n') f.close() f=open(output_path + '/fault-fault-relationships.csv','w') f.write('fault_id\n') f.close() f=open(output_path + '/group-fault-relationships.csv','w') f.write('group\n') f.close() model, m2l_data = GeologicalModel.from_map2loop_directory(test_data_path, skip_faults=True, fault_params=fault_params, foliation_params=foliation_params) else: model, m2l_data = GeologicalModel.from_map2loop_directory(test_data_path, skip_faults=False, fault_params=fault_params, foliation_params=foliation_params) view = LavaVuModelViewer(model,vertical_exaggeration=1) view.nsteps = np.array([200,200,200]) #view.set_zscale(2) view.add_model(cmap='tab20') for sg in model.feature_name_index: if( 'super' in sg): view.add_data(model.features[model.feature_name_index[sg]]) view.nsteps=np.array([50,50,50]) view.add_model_surfaces() #view.add_model_surfaces(filename=filename) view.interactive() for sg in model.feature_name_index: if( 'super' in sg): view.add_data(model.features[model.feature_name_index[sg]]) t2 = time.time() print("m2l",(t1-t0)/60.0,"LoopStructural",(t2-t1)/60.0,"Total",(t2-t0)/60.0,"minutes") # - # ## Overlay map over model if(workflow['model_engine']=='loopstructural'): cmap='' dtm = rasterio.open(dtm_reproj_file) if(os.path.exists(fault_file)): f_clip=faults_clip[faults_clip[c_l['f']].str.contains(c_l['fault'])] m2l_export.display_LS_map(model,dtm,geol_clip,f_clip,dst_crs,False,cmap,use_topo=True,use_faults=True) else: f_clip=[] m2l_export.display_LS_map(model,dtm,geol_clip,f_clip,dst_crs,False,cmap,use_topo=True,use_faults=False) dtm.close() # ## Save out voxel model if(workflow['model_engine']=='loopstructural'): voxel_size=500 sizex=int((maxx-minx)/voxel_size) sizey=int((maxy-miny)/voxel_size) sizez=int((model_top-model_base)/voxel_size) print('voxel_size=',voxel_size,', saved in Z,Y,X order, X=',sizex,', Y=',sizey,', Z=',sizez) print('lower south west corner: west=',minx,', south=',miny,', lower=',model_base) voxels=model.evaluate_model(model.regular_grid(nsteps=(sizey,sizex,sizez),shuffle=False)) np.savetxt(tmp_path+'voxels.raw', voxels, fmt='%d',delimiter='\t',newline='') print('voxels saved as',tmp_path+'voxels.raw') # # gempy test # + pycharm={"is_executing": false} if(workflow['model_engine']=='gempy'): t1 = time.time() import importlib importlib.reload(m2l_export) import gempy as gp vtk = False if vtk: vtkexp = gp._plot.export_to_vtk(geo_model, path=vtk_path, name=test_data_name + '.vtk', voxels=False, block=None, surfaces=True) else: m2l_export.loop2gempy(test_data_name,tmp_path,vtk_path,output_path+'orientations_clean.csv', output_path+'contacts_clean.csv',tmp_path+'groups_clean.csv', bbox,model_base, model_top,vtk,dtm_reproj_file) # - # # noddy test # + pycharm={"is_executing": false} if(workflow['model_engine']=='noddy'): import pynoddy.history import networkx as nx #Read a csv file with the vertices of the faults #see notes in the bottom of the notebook for instructions on how to generate such vertices files t1 = time.time() scale=1.5 # scales model to fit predefined volume (complete hack) # load fault coordinates faultsxy=pd.read_csv(output_path+'faults.csv') #load fault graph, remove cyclic loops and find (non-unique) age-ordered list G=nx.read_gml(tmp_path+"fault_network.gml") cycles=list(nx.simple_cycles(G)) for c in cycles: G.remove_edge(c[0], c[1]) faults=nx.topological_sort(G) # write out Noe format format file file=open(tmp_path+'faults_for_noe.csv','w') file.write('id,DipDirecti,X,Y\n') for f in faults: fxy=faultsxy[faultsxy["formation"]==f.replace("\n","")] #display(f.replace("\n","")) for ind,xy in fxy.iterrows(): ostr=f.replace('\n','')+',West,'+str(xy['X']/scale)+','+str(xy['Y']/scale)+'\n' file.write(ostr) file.close() csvfile = tmp_path+'faults_for_noe.csv' CsvFaultData = pd.read_csv(csvfile) #how much does the fault slip relative to the fault length SlipParam = 0.1 #the xyz origin of the model you will be generating xy_origin=[minx/scale,miny/scale, 1200-4000] #Get information about each parameter in Noddy format #The output from the function is a dictionary with lists of the fault parameters noddyFormattedFaultData = pynoddy.history.setUpFaultRepresentation(CsvFaultData, xy_origin=xy_origin, SlipParam=SlipParam) #Create a dictionary with the stratigraphy information StratDict = {} StratDict['Heights'] = [2000, 2500, 3000, 3700] StratDict['Names'] = ['Intrusive', 'Felsic', 'Mafic','Sed'] StratDict['Density'] = [2.65, 2.5, 2.4, 2.3] StratDict['MagSus'] = [0.0015, 0.0012, 0.0018, 0.001] #Now make the history file filename = output_path+'faultmodel.his' noddyFormattedFaultData = pynoddy.history.createPyNoddyHistoryFile(noddyFormattedFaultData, StratDict, filename=filename) # + pycharm={"is_executing": false} if(workflow['model_engine']=='noddy'): import vtkplotter as vtkP import itkwidgets import k3d import pynoddy.output import pynoddy.history modelfile = output_path+'faultmodel.his' # Determine the path to the noddy executable noddy_path = '../../pynoddy-new/noddyapp/noddy_win64.exe' # Where you would like to place all your output files outputfolder = tmp_path # choose what software to use for visualizing the model #you can also choose to change to itkwidgets, k3d, False (popup), or panel #you might need to install packages depending on what you choose vtkP.settings.embedWindow('k3d') # create a plot in vtkplotter plot = vtkP.Plotter(axes=1, bg='white', interactive=1) # call the plotting function points = pynoddy.output.CalculatePlotStructure(modelfile, plot, noddy_path, outputfolder=outputfolder, LithologyOpacity=0.2, outputOption=0) plot.show(viewup='z') t2 = time.time() print("m2l",(t1-t0)/60.0,"noddy",(t2-t1)/60.0,"Total",(t2-t0)/60.0,"minutes") # -
notebooks/4. NT leaflet example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Welcome to my first Binder # This notebook is my first Binder. # ## Setup imports from boto.s3.connection import S3Connection import requests from io import BytesIO from PIL import Image import pandas as pd from netCDF4 import Dataset # ## Load an image (from S3) # To use `boto` to load our data, we start by making an `S3Connection`. Because the data are public, we set `anon` to `True`. (NOTE: currently passing empty strings as credentials to prevent a weird bug where `boto` tries and fails to pull credentials automatically, this may be specific to running in GCE.) conn = S3Connection('', '', anon=True) # Get the bucket bucket = conn.get_bucket('binder-sample-data') # Get the key to the image file key = bucket.get_key('sample-image.png') # Load the contents into a buffer and load into an image raw = key.get_contents_as_string() buf = BytesIO(raw) im = Image.open(buf) # Show our image using Jupyter's rich display system! im # ## Load a table (from S3) # We're going to do something very similar, but for a table stored as CSV # First get the bucket and key as before bucket = conn.get_bucket('binder-sample-data') key = bucket.get_key('sample-table.csv') # Now load the file into a byte buffer blob = BytesIO(key.get_contents_as_string()) # And read it into pandas data = pd.read_csv(blob) # And display with the rich display system data.head(10)
index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="Y9eJv789s5EL" # # **1/ Introduction:** # # This dataset is about bank marketing in Portugues: # https://www.kaggle.com/henriqueyamahata/bank-marketing # + [markdown] id="ItA5fU1zuoRj" # **#Input variables:** # # 1. Bank client data: # - age (numeric) # - job : type of job (categorical: "admin.","blue-collar","entrepreneur","housemaid","management","retired","self-employed","services","student","technician","unemployed","unknown") # - marital : marital status (categorical: "divorced","married","single","unknown"; note: "divorced" means divorced or widowed) # - education (categorical: "basic.4y","basic.6y","basic.9y","high.school","illiterate","professional.course","university.degree","unknown") # - default: has credit in default? (categorical: "no","yes","unknown") # - housing: has housing loan? (categorical: "no","yes","unknown") # - loan: has personal loan? (categorical: "no","yes","unknown") # # 2. Related with the last contact of the current campaign: # - contact: contact communication type (categorical: "cellular","telephone") # - month: last contact month of year (categorical: "jan", "feb", "mar", ..., "nov", "dec") # - day_of_week: last contact day of the week (categorical: "mon","tue","wed","thu","fri") # - duration: last contact duration, in seconds (numeric). Important note: this attribute highly affects the output target (e.g., if duration=0 then y="no"). Yet, the duration is not known before a call is performed. Also, after the end of the call y is obviously known. Thus, this input should only be included for benchmark purposes and should be discarded if the intention is to have a realistic predictive model. # # **#Other attributes:** # - campaign: number of contacts performed during this campaign and for this client (numeric, includes last contact) # - pdays: number of days that passed by after the client was last contacted from a previous campaign (numeric; 999 means client was not previously contacted) # - previous: number of contacts performed before this campaign and for this client (numeric) # - poutcome: outcome of the previous marketing campaign (categorical: "failure","nonexistent","success") # # 3. Social and economic context attributes # - emp.var.rate: employment variation rate - quarterly indicator (numeric) # - cons.price.idx: consumer price index - monthly indicator (numeric) # - cons.conf.idx: consumer confidence index - monthly indicator (numeric) # - euribor3m: euribor 3 month rate - daily indicator (numeric) # - nr.employed: number of employees - quarterly indicator (numeric) # # **#Output variable (desired target):** # y - has the client subscribed a term deposit? (binary: "yes","no") # - # **#Import Data:** # + id="b6N0UTV_oUaS" outputId="91266bb6-57da-4f90-8e5d-e2d3a8258e90" #importing required libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # - # # **2/ Data Observation:** # + id="LoABQcECqE3m" path = '../input/bank-marketing/bank-additional-full.csv' bank_mkt_df = pd.read_csv(path, sep=';') bank_mkt_df.head() # + id="6OKUS2nSqGI3" outputId="a951140a-ad04-4744-ac75-7e442f6b980a" bank_mkt_df.info() # + id="7CPwJ10IqGDb" outputId="f9d68548-231e-4b80-8b36-d732a6b3161d" print(bank_mkt_df.y.value_counts()) dfgrouped = df.groupby('y') # - categorical = bank_mkt_df.dtypes[df.dtypes == 'object'].index.tolist() numerical = bank_mkt_df.dtypes[df.dtypes != 'object'].index.tolist() print('numerical: '+str(len(numerical))+': '+str(numerical)) print('categorical: '+str(len(categorical))+': '+ str(categorical)) # + id="rlAVTtwWqF2f" outputId="35c75295-f2b8-4e81-ccf0-cff90a6805af" for type,data in dfgrouped: display(type) display(data) # - # # **3/ Exploratory Data Analysis:** # - Xét các biến numeric: # + import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') df.hist(bins=20, figsize=(14,10), color='#366644') plt.show() # - # - Xét các biến categorical plt.figure(figsize=(22,80), facecolor='white') plotnumber =1 for i in categorical: ax = plt.subplot(15,3,plotnumber) sns.countplot(y=i, data=bank_mkt_df) plt.xlabel(i) plt.title(i) plotnumber+=1 plt.show() # #Tổng quá về data: # # - Đa số mọi người nằm ở nhóm tuổi từ 25-60, phân khúc tập trung là 28-40. # - Phần lớn là lần đầu thực hiện cuộc gọi hoặc < 2 lần (khách hàng mới) và không có tương tác/liên lạc gần đây. # - Top 5 nghề nghiệp phổ biến nhất trong bộ data là admin, blue-collar, technician, services & management. # - Đa số đã kết hôn (>60%), còn lại là độc thân (30%), một số ít đã ly hôn (10%). # - Hầu hết là không có nợ xấu nhưng đều đang trả góp mua nhà, một số ít có những khoản vay nhỏ. # - Phương thức tiếp cận chủ yếu: điện thoại di động. # - Liên lạc chủ yếu rơi vào mùa hè, đặc biệt là tháng 5. Không có sự khác biệt nhiều giữa các ngày trong tuần. # #Mối tương quan giữa các biến numeric & categorical: def show_correlation_matrix(df, vmin_val = -1, vmax_val = 1): f, ax = plt.subplots(figsize=(16, 10)) corr_numeric = numeric_df.corr() sns.heatmap(corr_numeric, cbar=True, cmap="RdBu_r") plt.title("Correlation Matrix", fontsize=16) plt.show() # - Từ ma trận tương quan giữa các hệ số, có thể thấy mối quan hệ giữa các biến về yếu tố xã hội và biến y là mối quan hệ nghịch biến, khi các chỉ số này âm, thì tỉ lệ thành công sẽ tăng, có thể dựa vào các chỉ số này để giảm thiểu rủi ro hoặc tăng tỉ lệ thành công cho chiến dịch, cụ thể: # > Chỉ số cons.conf.idx: thể hiện độ tin tưởng và lạc quan của khách hàng đối với thị trường, chỉ số này dương tức là họ sẽ tiêu dùng mạnh, nhưng trong bộ data này, tỉ lệ chỉ số cons.conf.idx âm, tức họ không lạc quan với thị trường, vì vậy sẽ giảm tiêu dùng và có xu hướng tiết kiệm tiền gửi. # - Mối liên hệ giữa biến categorical và tỉ lệ thành công: # + def feature_perc(feature, groupby= 'yes'): count = dfgrouped.get_group(groupby)[feature].value_counts() total_count = df[feature].value_counts()[count.index] perc = (count/total_count)*100 return perc def plot_barh(array,incrementer, bias,ax = None, text_color ='green', palette_style = 'whitegrid',palette_color = 'RdBu'): sns.set_style(palette_style) sns.set_palette(palette_color) sns.barplot(x= array, y= array.index, ax=ax) #plt.barh(array.index, width = array.values, height = .5) plt.yticks(np.arange(len(array))) plt.xticks( range(0, round(max(array)) +bias, incrementer )) for index, value in enumerate(array.values): plt.text(value +.5, index, s= '{:.1f}%'.format(value), color = text_color) #plt.show() return plt # + id="UDvh1xXldiQk" outputId="b829d52a-989e-4835-8b33-9fab2c705acf" object_feature_list = list(df.dtypes[df.dtypes == 'object'].index) plt.style.use('seaborn-whitegrid') plt.figure(figsize=(12,6), facecolor='white') for feature in object_feature_list[:-1]: feature_perct = feature_perc(feature) plt.title('Tỉ lệ thành công theo {}'.format(feature)) plot_barh(feature_perct.sort_values(ascending= False),10,20, text_color = 'green') plt.show() # - # # 6/ Findings: # # 1. Potential Customer Persona: nằm trong nhóm tuổi trung bình từ 25-50, chủ yếu là học sinh - sinh viên, người đã nghỉ hưu, lao động thất nghiệp, admin và quản lý. Những người đang độc thân có xu hướng gửi tiền cao hơn, không có nợ xấu nhưng vẫn có thể đang trả góp mua nhà hoặc số ít đang có những khoản vay nhỏ. Phương thức tiếp cận chủ yếu với nhóm khách này là điện thoại di động. # 2. Là khách hàng mới, chưa từng liên lạc hoặc mới liên lạc 1-2 lần thì tỉ lệ thành công sẽ cao hơn. # 3. Tỉ lệ liên lạc gửi tiền thành công cao nhất là vào tháng 3 (có thể là sau kỳ nghỉ Tết), tiếp đó là tháng 12, tháng 9, tháng 10 và tháng 4. # 4. Không có sự khác biệt khi liên hệ giữa các ngày trong tuần nhưng tỉ lệ thành công của ngày T5 có vẻ nhỉnh hơn, sau đó là T4 và T3, có thể do giữa tuần, khách không quá bận để xử lý công việc nên có thời gian lắng nghe tư vấn -> gửi tiền. # 5. Key metric: **Age, Job Title, Education, Marital, Default, Loan, Housing, Campaign, Month, Contact**. Chỉ số previous không ảnh hưởng nhiều đến kết quả của biến y, các chỉ số về xã hội có thể căn cứ nhưng khó thu thập nên có thể đưa vào phương án optional. # # **7/ Data Modeling:** from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix,accuracy_score
bank-telemarketing-campaign.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + # #!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Date : Nov-15-20 13:58 # @Author : <NAME> (<EMAIL>) # @Link : http://example.org
feature.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # OpenCV Overlay: Filter2D and Dilate # <img src="attachment:image.png" width="200" align="right"> # This notebook takes the design from notebook [1\_\_Intro_to_OpenCV_on_Jupyter_notebooks.ipynb](http://192.168.3.1:9090/notebooks/computer_vision/1__Intro_to_OpenCV_on_Jupyter_notebooks.ipynb) and shows the power of FPGA acceleration through the use of PYNQ overlays. If you are unfamiliar with the OpenCV function filter2D and dilate, please go through [1\_\_Intro_to_OpenCV_on_Jupyter_notebooks.ipynb](http://192.168.3.1:9090/notebooks/computer_vision/1__Intro_to_OpenCV_on_Jupyter_notebooks.ipynb) first. This example consists of a 2D filter and a dilate function and does the following. # 1. Program overlay # 2. Sets up USB camera # 3. Run software only filter2D # 4. Run SW filter2D + dilate and measure performance # 5. Run SW filter2D + dilate and measure performance # 6. Plot performance # 7. Setup widgets # 8. Run HW filter2D + dilate on input frames from USB camera in real-time # ## Program overlay # Here we program the overlay on the FPGA, load the associated overlay library and load the PYNQ xlnk memory manager library. This process takes a few seconds to do. Overlays are generally composed of an FPGA bitstream and a shared library to access the accelerators via function calls. We use the pynq.Overlay class to load the overlay and rename the associated shared library to xv2. It is also important to set the xlnk allocator library to point the same shared library so allocated memory maps are consistent. The pynq.Xlnk class contains hooks to the SDx memory allocation functions which is important when allocating certain kinds of memory for our accelerators (e.g. continguous non-cacheable buffers) # + import cv2 #NOTE: This needs to be loaded first # Load filter2D + dilate overlay from pynq import Overlay bs = Overlay("/usr/local/lib/python3.6/dist-packages/pynq_cv/overlays/xv2Filter2DDilate.bit") bs.download() import pynq_cv.overlays.xv2Filter2DDilate as xv2 # Load xlnk memory mangager from pynq import Xlnk Xlnk.set_allocator_library('/usr/local/lib/python3.6/dist-packages/pynq_cv/overlays/xv2Filter2DDilate.so') mem_manager = Xlnk() # - # ## Setup and configure USB camera <img src="attachment:image.png" width="80" align="right"> # We use OpenCV (cv2) for capturing frames from a USB camera and processing those image frames. Here, we start by setting up the interface to the USB camera and configuring its resolution (1080p). A successful camera setup returns a 'True'. If something is is outputted, shutdown the notebook and restart it again. # + import cv2 camera = cv2.VideoCapture(0) width = 1920 height = 1080 camera.set(cv2.CAP_PROP_FRAME_WIDTH,width) camera.set(cv2.CAP_PROP_FRAME_HEIGHT,height) # - # We add another helper function which sets up an IPython-based imshow call which encodes OpenCV image data to jpeg format before displaying it within the notebook itself. Other methods of displaying image data would perform similar conversions as well. # + import IPython def imshow(img): returnValue, buffer = cv2.imencode('.jpg', img) IPython.display.display(IPython.display.Image(data=buffer.tobytes())) # - # ## Read input frame from USB camera # Read input frame and convert image to gray scale with OpenCV function cvtColor. # # **NOTE**: We do a few extra reads up front to flush out the frame buffers in case camera was previously used # + # Flush webcam buffers (needed when rerunning notebook) for _ in range(5): ret, frame_in = camera.read() # Read in a frame ret, frame_in = camera.read() if ret: frame_in_gray = cv2.cvtColor(frame_in,cv2.COLOR_RGB2GRAY) else: print("Error reading frame from camera.") # - # Show input frame in notebook. imshow(frame_in_gray) # ## Run SW Filter2D # Here, we call the OpenCV 2D filter function on the input frame using kernel coefficients for a Laplacian high-pass filter (which gives a kind edge detection). Note that many of the vision processing functions used will operate on gray scale only images. The matrix returned by filter2D is an 2D array (1920x1080) of 8-bit values denoting the brightness of each pixel. # * [OpenCV Filter2D](https://docs.opencv.org/3.2.0/d4/d86/group__imgproc__filter.html#ga27c049795ce870216ddfb366086b5a04) # + import numpy as np #Sobel Hor filter kernelF = np.array([[1.0,2.0,1.0],[0.0,0.0,0.0],[-1.0,-2.0,-1.0]],np.float32) frameF = np.ones((height,width),np.uint8) cv2.filter2D(frame_in_gray, -1, kernelF, frameF, borderType=cv2.BORDER_CONSTANT) # software 2D filter imshow(frameF) # - # ## Run SW Filter2D + SW Dilate and measure performance # We first run our vision processing pipeline in software only mode. By doing a 2D filter followed by a dilate function, we effectively brighten all the white edge lines of our image. In addition, we will iterate our small image processing pipeline (filter2d, dilate) over a few frames and measure the performance of each function as well as the overall performance. We also enable profiling with the %%prun command. # + # %%prun -s cumulative -q -l 10 -T prunSW import numpy as np import time kernelF = np.array([[1.0,2.0,1.0],[0.0,0.0,0.0],[-1.0,-2.0,-1.0]],np.float32) #Sobel Hor filter kernelD = np.ones((3,3),np.uint8) frameF = np.ones((height,width),np.uint8) frameD = np.ones((height,width),np.uint8) num_frames = 20 start = time.time() for _ in range(num_frames): cv2.filter2D(frame_in_gray, -1, kernelF, frameF) cv2.dilate(frameF, kernelD, frameD, iterations=1) time_sw_total = time.time() - start print("Frames per second: " + str(num_frames / time_sw_total)) imshow(frameD) # - # ## Process SW profile results print(open('prunSW','r').read()) # res = !cat prunSW | grep filter2D | awk '{{print $$2}}' tottime_sw_filter2d = float(res[0]) # res = !cat prunSW | grep dilate | awk '{{print $$2}}' tottime_sw_dilate = float(res[0]) # ## Run HW Filter2D + HW Dilate and measure performance # Now we take advantage of the library of accelerators in our overlay and accelerate the same two OpenCV functions in hardware. From a function signature point of view, it's about as simple as replacing the OpenCV library call (cv2) with a python-extended Xilinx OpenCV function call (xv2) provided to us by the overlay. The other concept necessary for hardware acceleration is making sure we use continguous memory for our frames. This is done through the use of cma_array calls as opposed to the numpy calls. Some data copying will be necessary when moving data between numpy arrays and cma_arrays if the data. # * [xFOpenCV Filter2D](https://github.com/Xilinx/xfopencv/blob/master/include/imgproc/xf_custom_convolution.hpp) ([ug1233](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2018_2/ug1233-xilinx-opencv-user-guide.pdf)) # + # %%prun -s cumulative -q -l 10 -T prunHW import numpy as np import time #laplacian filter, high-pass kernelF = np.array([[1.0,2.0,1.0],[0.0,0.0,0.0],[-1.0,-2.0,-1.0]],np.float32) #Sobel Hor filter kernelVoid = np.zeros(0) xFin = mem_manager.cma_array((height,width),np.uint8) xFbuf = mem_manager.cma_array((height,width),np.uint8) xFout = mem_manager.cma_array((height,width),np.uint8) num_frames = 20 xFin[:] = frame_in_gray[:] start = time.time() for _ in range(num_frames): xv2.filter2D(xFin, -1, kernelF, xFbuf, borderType=cv2.BORDER_CONSTANT) xv2.dilate(xFbuf, kernelVoid, xFout, borderType=cv2.BORDER_CONSTANT) time_hw_total = time.time() - start print("Frames per second: " + str(num_frames / time_hw_total)) frame_out = np.ones((height,width),np.uint8) frame_out[:] = xFout[:] imshow(frame_out) # - # ## Process HW profile results print(open('prunHW','r').read()) # res = !cat prunHW | grep filter2D | awk '{{print $$2}}' tottime_hw_filter2d = float(res[0]) # res = !cat prunHW | grep dilate | awk '{{print $$2}}' tottime_hw_dilate = float(res[0]) # ## Plot performance # In addition to having easy access to OpenCV functions, we can access functions from pyPlot for plotting results in graphs and charts. Here, we take the recorded time data and plot out the processing times in a bar chart along with computed FPS of each function. Pay particular attention to the actual performance of each function and note the effect when placing two functions back-to-back in this example. # + # %matplotlib inline from matplotlib import pyplot as plt TIME_SW = [t*1000/num_frames for (t) in (time_sw_total, tottime_sw_dilate, tottime_sw_filter2d)] FPS_SW = [1000/t for (t) in (TIME_SW)] TIME_HW = [t*1000/num_frames for (t) in (time_hw_total, tottime_hw_dilate, tottime_hw_filter2d)] FPS_HW = [1000/t for (t) in (TIME_HW)] LABELS = ['Total','Dilate','Filter2D'] f, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(15,4)) x_pos = np.arange(len(LABELS)) plt.yticks(x_pos, LABELS) ax1.barh(x_pos, TIME_SW, height=0.6, color='g', zorder=3) ax1.invert_yaxis() ax1.set_xlabel("Execution Time per frame [ms]") ax1.set_ylabel("Kernel (SW)") ax1.grid(zorder=0) ax2.barh(x_pos, FPS_SW, height=0.6, color='b', zorder=3) ax2.invert_yaxis() ax2.set_xlabel("Frames per second") ax2.grid(zorder=0) ax3.barh(x_pos, TIME_HW, height=0.6, color='g', zorder=3) ax3.invert_yaxis() ax3.set_xlabel("Execution Time per frame [ms]") ax3.set_ylabel("Kernel (HW)") ax3.grid(zorder=0) ax4.barh(x_pos, FPS_HW, height=0.6, color='b', zorder=3) ax4.invert_yaxis() ax4.set_xlabel("Frames per second") ax4.grid(zorder=0) plt.show() # - # ## Setup control widgets # # Here, we define some kernel configurations that will be used to change the functionality of the 2D filter on the fly. A pulldown menu will appear below this cell to be used to change the filter2D kernel used subsequent cells. # + from ipywidgets import interact, interactive, fixed, interact_manual, IntSlider, FloatSlider import ipywidgets as widgets kernel_g = np.array([[1.0,2.0,1.0],[0.0,0.0,0.0],[-1.0,-2.0,-1.0]],np.float32) #Sobel Hor def setKernelAndFilter3x3(kernelName): global kernel_g kernel_g = { 'laplacian high-pass': np.array([[0.0, 1.0, 0],[1.0, -4, 1.0],[0, 1.0, 0.0]],np.float32), 'gaussian high-pass': np.array([[-0.0625,-0.125,-0.0625],[-0.125,0.75,-0.125],[-0.0625,-0.125,-0.0625]],np.float32), 'gaussian blur': np.array([[0.0625,0.125,0.0625],[0.125,0.25,0.125],[0.0625,0.125,0.0625]],np.float32), 'Sobel Ver': np.array([[1.0,0.0,-1.0],[2.0,0.0,-2.0],[1.0,0.0,-1.0]],np.float32), 'Sobel Hor': np.array([[1.0,2.0,1.0],[0.0,0.0,0.0],[-1.0,-2.0,-1.0]],np.float32) }.get(kernelName, np.ones((3,3),np.float32)/9.0) interact(setKernelAndFilter3x3, kernelName=['Sobel Hor','Sobel Ver','laplacian high-pass','gaussian high-pass','gaussian blur']); # + [markdown] slideshow={"slide_type": "subslide"} # ## Run HW filter2D + dilate on input frames from USB camera in real-time # Now we will run the 2D filter + dilate on input frames from the USB camera and display the results in real time. We will also respond to feedback from our pulldown menu which changes the 2D filter coefficients. While this interaction does not capture the performance acceleration of the HW implementation, it does allow us to receive real-time feedback of the accelerated hardware functions. # # NOTE: To allow for live interaction of the pull-down menu selection with the processing loop, the loop will be run as a separate thread for ~20 seconds. As soon as the kernel coefficient is changed, the image will instead appear under the widget cell along with the eventual reported FPS calculations. The normal feedback of when a cell is complete will therefore not function (* will disappear immediately). The indication when the loop is complete will therefore be when the FPS calculation is reported. Adjusting the loop time requires changing the num_frames variable. # + def loop_hw2_app(): global kernel_g kernelD = np.ones((3,3),np.uint8) frame_out = np.ones((height,width),np.uint8) xFin = mem_manager.cma_array((height,width),np.uint8) xFbuf = mem_manager.cma_array((height,width),np.uint8) xFout = mem_manager.cma_array((height,width),np.uint8) num_frames = 60 start = time.time() for _ in range(num_frames): # Capture frame-by-frame ret, frame_in = camera.read() if (not ret): # Release the Video Device if ret is false camera.release() # Message to be displayed after releasing the device print("Released Video Resource") break frame_in_gray = cv2.cvtColor(frame_in,cv2.COLOR_RGB2GRAY) xFin[:] = frame_in_gray[:] xv2.filter2D(xFin, -1, kernel_g, xFbuf, borderType=cv2.BORDER_CONSTANT) xv2.dilate(xFbuf, kernelVoid, xFout, borderType=cv2.BORDER_CONSTANT) frame_out[:] = xFout[:] imshow(frame_out) IPython.display.clear_output(wait=True) end = time.time() print("Frames per second: " + str(num_frames / (end - start))) from threading import Thread t = Thread(target=loop_hw2_app, ) t.start() # - # ## Release USB camera resource # # **NOTE**: This is needed to close the camera between subsequent runs. If the camera is unable to read a frame, be sure to call camera.release() and then try opening the VideoCapture again. camera.release() # <font color=red size=4>IMPORTANT NOTE</font>: Be sure to run the cell below, shutting down the notebook, before starting a new one. The notebook interface shows "No Kernel", the cell below will incorrectly show a running status [ * ]. You can ignore this an safely close the tab of the notebook. # + language="javascript" # Jupyter.notebook.session.delete(); # -
boards/ZCU104/notebooks/2__Overlays-filter2d_and_dilate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Embeddings model # + # ps use conda Kernel = (embeddings_py2) or language_model # %matplotlib inline import os import random import numpy as np import json import matplotlib.pyplot as plt #import cPickle as pickle import pickle from matplotlib.pyplot import imshow from PIL import Image, ImageDraw from sklearn.manifold import TSNE from tqdm import tqdm from IPython.display import clear_output import keras # - # First, we will load our image paths and feature vectors from the previous notebook into memory. We can print their contents to get an idea of what they look like: # + # ORIGINAL DATASET # ebmeddings_from_celeb_12k_noflips_160px.p # embeddings_from_celeb_12kwithflips_160px.p # embeddings_from_celeb_30k_noflip_160px.p # embeddings_from_celeb_30k_twoflips_160px.p #path = '/home/kangeunsu/facenet/ebmeddings_from_celeb_12k_noflips_160px.p' path = '/home/kangeunsu/facenet/embeddings_from_celeb_30k_noflip_160px.p' directory = "/home/kangeunsu/progressive_growing_of_gans/celeba-hq-1024x1024_test_extract/" # load features full_features = pickle.load(open(path, 'rb'), encoding='latin1') full_features = full_features[0] print("full_features : ",len(full_features), len(full_features[0])) import fnmatch # load images files = sorted(os.listdir(directory)) frame_files = fnmatch.filter(files, '*.png') full_paths = [directory+file for file in frame_files] images_mine = full_paths images_ORIG_loaded = images_mine pca_features_ORIG = full_features for i, f in list(zip(images_ORIG_loaded, pca_features_ORIG))[0:5]: print("image: %s, features: %0.2f,%0.2f,%0.2f,%0.2f... "%(i, f[0], f[1], f[2], f[3])) # + # GENERATED DATASET # embeddings_from_genCeleb_2ksample_noflips.p # embeddings_from_genCeleb_20ksample_noflips.p #1.3G Apr 20 14:07 embeddings_from_100k_generated_images_no_flips.p path = '/home/kangeunsu/facenet/embeddings_from_100k_generated_images_no_flips.p' #directory = "/home/kangeunsu/CelebAHQ_generated_images/200-celebahq-1024x1024/100k_generated/" directory = "/home/kangeunsu/CelebAHQ_generated_images/200-celebahq-1024x1024/100k_resized_generated/" # not the source images = but downscaled ones, which can serve as tiles # load features full_features = pickle.load(open(path, 'rb'), encoding='latin1') full_features = full_features[0] print("full_features : ",len(full_features), len(full_features[0])) import fnmatch # load images files = sorted(os.listdir(directory)) frame_files = fnmatch.filter(files, '*.png') full_paths = [directory+file for file in frame_files] images_mine = full_paths images_GEN_loaded = images_mine pca_features_GEN = full_features for i, f in list(zip(images_GEN_loaded, pca_features_GEN))[0:5]: print("image: %s, features: %0.2f,%0.2f,%0.2f,%0.2f... "%(i, f[0], f[1], f[2], f[3])) # + num_images_to_plot_ORIG = 30000 num_images_to_plot_GEN = 30000 # should be fair representation print(len(images_ORIG_loaded), len(images_GEN_loaded), len(pca_features_ORIG), len(pca_features_GEN)) y_original = np.ones(len(pca_features_ORIG)) y_generated = np.zeros(len(pca_features_GEN)) print(len(y_original), len(y_generated)) # - # In our dataset that we've loaded, there are 9144 images. Although in principle, t-SNE works with any number of images, it's difficult to place that many tiles in a single image. So instead, we will take a random subset of 1000 images and plot those on a t-SNE instead. This step is optional. # + # Shuffle images_ORIG_shuffled = images_ORIG_loaded images_GEN_shuffled = images_GEN_loaded print("Total images: ", len(images_ORIG_loaded), "orig and ", len(images_GEN_loaded), "gen") pca_features_ORIG_shuffled = pca_features_ORIG pca_features_GEN_shuffled = pca_features_GEN print("Total features: ", len(pca_features_ORIG), "orig and ", len(pca_features_GEN), "gen") y_original_shuffled = y_original y_generated_shuffled = y_generated if len(images_ORIG_loaded) >= num_images_to_plot_ORIG: sort_order = sorted(random.sample(range(len(images_ORIG_shuffled)), num_images_to_plot_ORIG)) images_ORIG_shuffled = [images_ORIG_shuffled[i] for i in sort_order] pca_features_ORIG_shuffled = [pca_features_ORIG_shuffled[i] for i in sort_order] y_original_shuffled = [y_original_shuffled[i] for i in sort_order] print("shuffled originals") if len(images_GEN_loaded) >= num_images_to_plot_GEN: sort_order = sorted(random.sample(range(len(images_GEN_shuffled)), num_images_to_plot_GEN)) images_GEN_shuffled = [images_GEN_shuffled[i] for i in sort_order] pca_features_GEN_shuffled = [pca_features_GEN_shuffled[i] for i in sort_order] y_generated_shuffled = [y_generated_shuffled[i] for i in sort_order] print("shuffled generated") print("After shuffling:") print("Shuffled Original: images", len(images_ORIG_shuffled), "features", len(pca_features_ORIG_shuffled),"x",len(pca_features_ORIG_shuffled[0])) print("Shuffled Generated: images", len(images_GEN_shuffled),"features", len(pca_features_GEN_shuffled),"x",len(pca_features_GEN_shuffled[0])) images = np.concatenate((images_ORIG_shuffled, images_GEN_shuffled), axis=0) pca_features = np.concatenate((pca_features_ORIG_shuffled, pca_features_GEN_shuffled), axis=0) y = np.concatenate((y_original_shuffled, y_generated_shuffled), axis=0) print("Combined: images", len(images), "features", len(pca_features),"x", len(pca_features[0])) print("Y: ", len(y)) # + # Then run PCA (optionally) from sklearn.decomposition import PCA pca = PCA(n_components=300) pca.fit(pca_features) pca_features = pca.transform(pca_features) print("pca_features : ",len(pca_features), len(pca_features[0])) # - # # process model # + #data processing # X = features of both # Y = 0 if real, 1 if generated data = pca_features labels = y # + import pandas from keras.models import Sequential from keras.layers import Dense from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import cross_val_score from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline import numpy as np from sklearn.model_selection import train_test_split # - #data_train, data_test, labels_train, labels_test = train_test_split(data, labels, test_size=0.33) #random_state=42 # + from keras.layers import Input, Dense, Dropout from keras.models import Model inputs = Input(shape=(300,)) x = Dense(256, activation='relu')(inputs) x = Dense(2, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) # This creates a model that includes # the Input layer and three Dense layers model = Model(inputs=inputs, outputs=predictions) # adam rmsprop model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # or https://machinelearningmastery.com/binary-classification-tutorial-with-the-keras-deep-learning-library/ # + #data_train, data_test, labels_train, labels_test #history = model.fit(data_train, labels_train, epochs=100, verbose=1, validation_data=(data_test, labels_test)) # starts training history = model.fit(data, labels, epochs=100, verbose=1) # starts training # - #print(history.history) plt.figure() plt.title("simple model with pca300 dim data->256->2->1") plt.plot(history.history['loss'], label="loss") plt.plot(history.history['acc'], label="acc") #plt.plot(history.history['val_loss'], label="val_loss") #plt.plot(history.history['val_acc'], label="val_acc") plt.legend() plot_save_path = "/home/kangeunsu/ArtML/Embeddings/model_training/300features_Dense256-2_1out.pdf" fig.savefig(plot_save_path, bbox_inches='tight') for layer in model.layers: print(layer.output) # + from keras.layers import Input, Dense, Dropout from keras.models import Model inputs = Input(shape=(300,)) x = Dense(256, activation='relu')(inputs) x = Dense(256, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) # This creates a model that includes # the Input layer and three Dense layers model = Model(inputs=inputs, outputs=predictions) # adam rmsprop model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(data_train, labels_train, epochs=100, verbose=1, validation_data=(data_test, labels_test)) # starts training # - plt.figure() plt.title("two layer model") plt.plot(history.history['loss'], label="loss") plt.plot(history.history['acc'], label="acc") plt.plot(history.history['val_loss'], label="val_loss") plt.plot(history.history['val_acc'], label="val_acc") plt.legend()
Embeddings_to_process_features/facenet_embeddings_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## `for` sententzia # # Programazio lengoai gehienetan oinarrizko `for` kontrol egitura errepikakor bat egon ohi da. Python-en kasuan, `for` sententzia ez da horren *oinarrizkoa*: # # ``` python # for aldagaia in iteragarria : # kode-blokea1 # else : # kode-blokea2 # ``` # # <div class="alert alert-info"> # Objektu iteragarri batek objektu sekuentzia bat adierazten du. # </div> # + [markdown] slideshow={"slide_type": "slide"} # ``` python # for aldagaia in iteragarria : # kode-blokea1 # else : # kode-blokea2 # ``` # # Kontrol egituraren konportamoldea: # # 1. Aldagaiari iteragarriaren objektuak banan-banan esleituko zaizkio, eta esleipen bakoitzaren ondoren lehenengo kode blokea exekutatuko da. # 1. Objektu iteragarria *agortzen* denean, Bigarren kode blokea exetutatuko da, kontrol egitura amaituz # + [markdown] slideshow={"slide_type": "slide"} # Orain arte ikusitako objektuetatik, `str` motakoak iteragarriak dira. Karaktere kate bat *zeharkatzean* (iteratzean), bere karaktereak lortuko ditugu: # + slideshow={"slide_type": "fragment"} for k in "kaixo denoi!": print("-->",k,"<--",type(k)) # - ## 1728 * idatzi("kaixo") for _ in " "*1728: print("kaixo") # + [markdown] slideshow={"slide_type": "slide"} # ## `range()` funtzioa # # Zenbaki oso zekuentziak sortzen dituen funtzioa da. Funtzioak iteragarria izango den objektu bat bueltatzen du. Objektua zeharkatzean, sekuentziako balioak lortzen ditugu: # + slideshow={"slide_type": "-"} print(type(range(10))) print(range(10)) print(list(range(10))) for i in range(5) : # 0-tik habiatuz, lehenengo 5 zenbaki osoak print(i,type(i)) # + [markdown] slideshow={"slide_type": "slide"} # `range()` funtzioan argumentu gehiago erabil ditzakegu: # + slideshow={"slide_type": "-"} help(range) # + [markdown] slideshow={"slide_type": "slide"} # `range()` funtzioarekin 1, 2 edo 3 argumentu erabil daitezke: # * `range(g)` &rarr; $\{0,1,2,...,g-1\}$ # * `range(a,g)` &rarr; $\{a,a+1,a+2,...,g-1\}$ # * `range(a,g,h)` &rarr; $\{a,a+h,a+2h,\dots,k\} , k \lt g \le k+h$ # * `a`: *abiapuntua* (defektuz, 0) # * `g`: *geldiunea* # * `h`: *hurratsa* (defektuz, 1) # # <div class="alert alert-info"> # <code>stop</code>, <code>start</code> eta <code>step</code> argumentuek zenbaki osoak behar dute izan. # </div> # + [markdown] slideshow={"slide_type": "slide"} # `range()` funtzioa oso erabilgarria da `for` kontrol egiturarekin sententzia multzo bat nahi adina aldiz errepikatzeko: # + slideshow={"slide_type": "slide"} list(range(3,10,2)) # + slideshow={"slide_type": "fragment"} n = int(input("zenbat aldiz:")) for i in range(n): print("Ez dut berriro behar baina gehiago idatziko") # + [markdown] slideshow={"slide_type": "slide"} # `for` eta `range()` erabiliz, oso erraza litzateke # # $\sum_{i=1}^{100}{i} = 1 + 2 + 3 + \dots + 100$ # # batukariaren emaitza kalkulatzea: # + slideshow={"slide_type": "fragment"} batura = 0 for i in range(1,1_000_000+1): batura = batura + i #batura += i print(batura) print(1_000_000 * 1_000_001 // 2) # + [markdown] slideshow={"slide_type": "slide"} # `range()` funtzioak bueltatzen duen objektuak zekuentzia bat *errepresentatzen* du. Sekuentziako balioak zeharkatu ahala sortzen dira, ez aurrez. # + slideshow={"slide_type": "-"} r = range(10000000000000000000000000000000000000000000000000000) print(r) a = 0 # + [markdown] slideshow={"slide_type": "-"} # <div class="alert alert-info"> # Aurreko sekuentzia honetan sortuko liratekeen zenbaki oso guztiak gordetzeko, munduko disko zurrun guztiak gutxi lirateke! # </div> # + [markdown] slideshow={"slide_type": "-"} # <table border="0" width="100%" style="margin: 0px;"> # <tr> # <td style="text-align:left"><a href="while sententzia.ipynb">&lt; &lt; while sententzia &lt; &lt;</a></td> # <td style="text-align:right"><a href="break eta continue sententziak.ipynb">&gt; &gt; break eta continue sententziak &gt; &gt;</a></td> # </tr> # </table>
Gardenkiak/Programazioa/for sententzia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regularized logistic regression # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import plot_utils from logistic_regressor import LogisticRegressor import warnings warnings.filterwarnings("ignore", category=FutureWarning) # This is a bit of magic to make matplotlib figures appear inline in the notebook # rather than in a new window. # %matplotlib inline plt.rcParams['figure.figsize'] = (15.0, 10.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython # %load_ext autoreload # %autoreload 2 # + ##================ Part 0: Reading data and plotting ==================# import pandas as pd import numpy as np data = pd.read_csv('ex2data2.txt') X = np.vstack([data.x1,data.x2]).T y = data.y print('Plotting data with green circle indicating (y=1) examples and red circle indicating (y=0) examples ...') plot_utils.plot_twoclass_data(X,y,'Chip Test 1', 'Chip Test 2',['y=0','y=1']) plt.show() # - XX = np.vstack([np.ones((X.shape[0],)),X.T]).T from logistic_regressor import RegLogisticRegressor log_reg1 = RegLogisticRegressor() theta = np.ones((XX.shape[1],)) loss = log_reg1.loss(theta,XX,y,100) grad = log_reg1.grad_loss(theta,XX,y,100) # + #================ Part 1: Compute cost and gradient ==================# # open logistic_regressor.py and implement the regularized loss function # and gradient # map the features in ex2data2.txt into a pth order polynomial import sklearn from sklearn.preprocessing import PolynomialFeatures # Map X onto polynomial features and normalize p = 6 poly = sklearn.preprocessing.PolynomialFeatures(degree=p,include_bias=False) X_poly = poly.fit_transform(X) # set up the data matrix (expanded basis functions) with the column of ones as intercept XX = np.vstack([np.ones((X_poly.shape[0],)),X_poly.T]).T # set up a regularized logistic regression model from logistic_regressor import RegLogisticRegressor reg_lr1 = RegLogisticRegressor() # run fmin on the loss function and gradient reg = 100 theta_opt = reg_lr1.train(XX,y,reg=reg,num_iters=1000,norm=False) # print the theta found and the final loss print('Theta found by fmin_bfgs: %s' %theta_opt) print("Final loss = %.4f" %reg_lr1.loss(theta_opt,XX,y,0.0)) # plot the decision boundary plot_utils.plot_decision_boundary_poly(X,y,theta_opt,reg,p,'Chip Test 1', 'Chip Test 2',['y = 0','y = 1']) plt.show() # + # compute accuracy on training set # implement the predict method in logistic_regressor.py reg_lr1.theta = theta_opt predy = reg_lr1.predict(XX) # TODO: fill in the expression for accuracy of prediction accuracy = float(np.sum(y == predy)) / y.shape[0] print("Accuracy on the training set = %.4f" %accuracy) # - # # Comparing learned model with sklearn's logistic ridge regression # + # Compare with model learned by sklearn's logistic regression with reg = 1/C # the regularization parameter set below can be varied (on a logarithmic scale) reg = 10 # L2 regularization with sklearn LogisticRegression from sklearn import linear_model sk_logreg_l2 = linear_model.LogisticRegression(C=1.0/reg,solver='lbfgs',fit_intercept=False) sk_logreg_l2.fit(XX,y) print("Theta found by sklearn with L2 reg: %s" %sk_logreg_l2.coef_) print("Loss with sklearn theta: %.4f" %reg_lr1.loss(sk_logreg_l2.coef_[0],XX,y,0.0)) plot_utils.plot_decision_boundary_sklearn_poly(X,y,sk_logreg_l2,reg,p,'Exam 1 score', 'Exam 2 score',['Not Admitted','Admitted']) plt.show() plot_utils.plot_regularization_path(XX,y,reg,'l2') plt.show() # - # # L1 regularized logistic regression # + # L1 regularization witk sklearn LogisticRegression sk_logreg_l1 = linear_model.LogisticRegression(C=1.0/reg,solver='liblinear',fit_intercept=False,penalty='l1') sk_logreg_l1.fit(XX,y) print("Theta found by sklearn with L1 reg: %s" %sk_logreg_l1.coef_) print("Loss with sklearn theta: %.4f" %reg_lr1.loss(sk_logreg_l1.coef_[0],XX,y,0.0)) plot_utils.plot_decision_boundary_sklearn_poly(X,y,sk_logreg_l1,reg,p,'Exam 1 score', 'Exam 2 score',['Not Admitted','Admitted']) plt.show() # plot regularization paths for L1 regression # Exploration of L1 regularization # plot_utils.plot_regularization_path(XX,y,reg,'l1') plt.show() # -
hw2/logreg/logreg_reg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from google.cloud import bigquery from google.cloud.exceptions import NotFound # + READMISSIONS_DATASET = 'diabetic_readmissions' client = bigquery.Client() dataset_ref = client.dataset(READMISSIONS_DATASET) try: dataset = client.get_dataset(dataset_ref) except NotFound: dataset = bigquery.Dataset(dataset_ref) dataset.location = 'US' dataset = client.create_dataset(dataset) print("Dataset ID: ", dataset.dataset_id) print("Dataset Project: ", dataset.project) # + DEVELOPMENT_DATASET = 'gs://jk-readmissions/test_set_renamed.csv' schema = [ bigquery.SchemaField('age', 'STRING'), bigquery.SchemaField('time_in_hospital', 'INTEGER'), bigquery.SchemaField('num_lab_procedures', 'INTEGER'), bigquery.SchemaField('num_procedures', 'INTEGER'), bigquery.SchemaField('num_medications', 'INTEGER'), bigquery.SchemaField('number_outpatient', 'INTEGER'), bigquery.SchemaField('number_emergency', 'INTEGER'), bigquery.SchemaField('number_inpatient', 'INTEGER'), bigquery.SchemaField('number_diagnoses', 'INTEGER'), bigquery.SchemaField('max_glu_serum', 'STRING'), bigquery.SchemaField('A1Cresult', 'STRING'), bigquery.SchemaField('metformin', 'STRING'), bigquery.SchemaField('repaglinide', 'STRING'), bigquery.SchemaField('glimepiride', 'STRING'), bigquery.SchemaField('glipizide', 'STRING'), bigquery.SchemaField('glyburide', 'STRING'), bigquery.SchemaField('pioglitazone', 'STRING'), bigquery.SchemaField('rosiglitazone', 'STRING'), bigquery.SchemaField('insulin', 'STRING'), bigquery.SchemaField('change', 'STRING'), bigquery.SchemaField('diabetes_med', 'STRING'), bigquery.SchemaField('readmitted', 'STRING'), bigquery.SchemaField('num_visits', 'INTEGER'), bigquery.SchemaField('diseases_of_the_blood', 'INTEGER'), bigquery.SchemaField('diseases_of_the_circulatory_system', 'INTEGER'), bigquery.SchemaField('diseases_of_the_digestive_system', 'INTEGER'), bigquery.SchemaField('disaeses_of_the_genitourinary_system', 'INTEGER'), bigquery.SchemaField('diseases_of_the_musculoskeletal_system', 'INTEGER'), bigquery.SchemaField('diseases_of_the_nervous_system', 'INTEGER'), bigquery.SchemaField('diseases_of_the_respiratory_system', 'INTEGER'), bigquery.SchemaField('diseases_of_the_skin', 'INTEGER'), bigquery.SchemaField('endocrine_and_other_diseases', 'INTEGER'), bigquery.SchemaField('injury_and_poisoning', 'INTEGER'), bigquery.SchemaField('mental_disorders', 'INTEGER'), bigquery.SchemaField('neoplasms', 'INTEGER'), bigquery.SchemaField('other_conditions', 'INTEGER'), bigquery.SchemaField('persons_encountering_health_services', 'INTEGER'), bigquery.SchemaField('persons_with_condition', 'INTEGER'), bigquery.SchemaField('symptoms_signs_ill_defined_conditions', 'INTEGER'), bigquery.SchemaField('diagnosis', 'INTEGER'), bigquery.SchemaField('african_american', 'INTEGER'), bigquery.SchemaField('asian', 'INTEGER'), bigquery.SchemaField('hispanic', 'INTEGER'), bigquery.SchemaField('other_race', 'INTEGER'), bigquery.SchemaField('female', 'INTEGER') ] job_config = bigquery.LoadJobConfig() job_config.schema = schema job_config.skip_leading_rows = 1 job_config.source_format = bigquery.SourceFormat.CSV load_job = client.load_table_from_uri( DEVELOPMENT_DATASET, dataset_ref.table("new_patient_records"), job_config=job_config ) load_job.result() # -
readmissions/notebooks/sandbox.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # --- # # _You are currently looking at **version 1.3** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._ # # --- # # Assignment 1 - Introduction to Machine Learning # For this assignment, you will be using the Breast Cancer Wisconsin (Diagnostic) Database to create a classifier that can help diagnose patients. First, read through the description of the dataset (below). # + import numpy as np import pandas as pd from sklearn.datasets import load_breast_cancer cancer = load_breast_cancer() #print(cancer.DESCR) # Print the data set description # - # The object returned by `load_breast_cancer()` is a scikit-learn Bunch object, which is similar to a dictionary. #cancer.keys() # ### Question 0 (Example) # # How many features does the breast cancer dataset have? # # *This function should return an integer.* # + # You should write your whole answer within the function provided. The autograder will call # this function and compare the return value against the correct solution value def answer_zero(): # This function returns the number of features of the breast cancer dataset, which is an integer. # The assignment question description will tell you the general format the autograder is expecting return len(cancer['feature_names']) # You can examine what your function returns by calling it in the cell. If you have questions # about the assignment formats, check out the discussion forums for any FAQs answer_zero() # - # ### Question 1 # # Scikit-learn works with lists, numpy arrays, scipy-sparse matrices, and pandas DataFrames, so converting the dataset to a DataFrame is not necessary for training this model. Using a DataFrame does however help make many things easier such as munging data, so let's practice creating a classifier with a pandas DataFrame. # # # # Convert the sklearn.dataset `cancer` to a DataFrame. # # *This function should return a `(569, 31)` DataFrame with * # # *columns = * # # ['mean radius', 'mean texture', 'mean perimeter', 'mean area', # 'mean smoothness', 'mean compactness', 'mean concavity', # 'mean concave points', 'mean symmetry', 'mean fractal dimension', # 'radius error', 'texture error', 'perimeter error', 'area error', # 'smoothness error', 'compactness error', 'concavity error', # 'concave points error', 'symmetry error', 'fractal dimension error', # 'worst radius', 'worst texture', 'worst perimeter', 'worst area', # 'worst smoothness', 'worst compactness', 'worst concavity', # 'worst concave points', 'worst symmetry', 'worst fractal dimension', # 'target'] # # *and index = * # # RangeIndex(start=0, stop=569, step=1) # + def answer_one(): # Your code here import pandas as pd df = pd.DataFrame(data = cancer.data, columns=cancer.feature_names, index = np.arange(0, 569,1)) df["target"] = cancer.target return df answer_one() # - # ### Question 2 # What is the class distribution? (i.e. how many instances of `malignant` (encoded 0) and how many `benign` (encoded 1)?) # # *This function should return a Series named `target` of length 2 with integer values and index =* `['malignant', 'benign']` # + def answer_two(): cancerdf = answer_one() # Your code here target = cancerdf["target"].map({0:cancer.target_names[0],1:cancer.target_names[1]}).value_counts() target.reindex(['malignant', 'benign']) return target # - # ### Question 3 # Split the DataFrame into `X` (the data) and `y` (the labels). # # *This function should return a tuple of length 2:* `(X, y)`*, where* # * `X`*, a pandas DataFrame, has shape* `(569, 30)` # * `y`*, a pandas Series, has shape* `(569,)`. # + def answer_three(): cancerdf = answer_one() # Your code here X =cancerdf.iloc[:, :-1] y= cancerdf["target"] return (X, y) # - # ### Question 4 # Using `train_test_split`, split `X` and `y` into training and test sets `(X_train, X_test, y_train, and y_test)`. # # **Set the random number generator state to 0 using `random_state=0` to make sure your results match the autograder!** # # *This function should return a tuple of length 4:* `(X_train, X_test, y_train, y_test)`*, where* # * `X_train` *has shape* `(426, 30)` # * `X_test` *has shape* `(143, 30)` # * `y_train` *has shape* `(426,)` # * `y_test` *has shape* `(143,)` # + from sklearn.model_selection import train_test_split def answer_four(): X, y = answer_three() # Your code here X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=143 ,random_state=0) return (X_train, X_test, y_train, y_test) # - # ### Question 5 # Using KNeighborsClassifier, fit a k-nearest neighbors (knn) classifier with `X_train`, `y_train` and using one nearest neighbor (`n_neighbors = 1`). # # *This function should return a * `sklearn.neighbors.classification.KNeighborsClassifier`. # + from sklearn.neighbors import KNeighborsClassifier def answer_five(): X_train, X_test, y_train, y_test = answer_four() # Your code here myKNN = KNeighborsClassifier(n_neighbors = 1) myKNN.fit(X_train,y_train) return myKNN# Return your answer # - # ### Question 6 # Using your knn classifier, predict the class label using the mean value for each feature. # # Hint: You can use `cancerdf.mean()[:-1].values.reshape(1, -1)` which gets the mean value for each feature, ignores the target column, and reshapes the data from 1 dimension to 2 (necessary for the precict method of KNeighborsClassifier). # # *This function should return a numpy array either `array([ 0.])` or `array([ 1.])`* def answer_six(): cancerdf = answer_one() means = cancerdf.mean()[:-1].values.reshape(1, -1) print(means) # Your code here myKNN = answer_five() print(myKNN) prediction = myKNN.predict(means) print(prediction) return prediction# Return your answer # ### Question 7 # Using your knn classifier, predict the class labels for the test set `X_test`. # # *This function should return a numpy array with shape `(143,)` and values either `0.0` or `1.0`.* def answer_seven(): X_train, X_test, y_train, y_test = answer_four() knn = answer_five() # Your code here predictions = knn.predict(X_test) return predictions # Return your answer # ### Question 8 # Find the score (mean accuracy) of your knn classifier using `X_test` and `y_test`. # # *This function should return a float between 0 and 1* def answer_eight(): X_train, X_test, y_train, y_test = answer_four() knn = answer_five() # Your code here score = knn.score(X_test,y_test) return score# Return your answer # ### Optional plot # # Try using the plotting function below to visualize the differet predicition scores between training and test sets, as well as malignant and benign cells. # + def accuracy_plot(): import matplotlib.pyplot as plt # %matplotlib notebook X_train, X_test, y_train, y_test = answer_four() print(y_train) # Find the training and testing accuracies by target value (i.e. malignant, benign) mal_train_X = X_train[y_train==0] mal_train_y = y_train[y_train==0] ben_train_X = X_train[y_train==1] ben_train_y = y_train[y_train==1] mal_test_X = X_test[y_test==0] mal_test_y = y_test[y_test==0] ben_test_X = X_test[y_test==1] ben_test_y = y_test[y_test==1] knn = answer_five() scores = [knn.score(mal_train_X, mal_train_y), knn.score(ben_train_X, ben_train_y), knn.score(mal_test_X, mal_test_y), knn.score(ben_test_X, ben_test_y)] plt.figure() # Plot the scores as a bar chart bars = plt.bar(np.arange(4), scores, color=['#4c72b0','#4c72b0','#55a868','#55a868']) # directly label the score onto the bars for bar in bars: height = bar.get_height() plt.gca().text(bar.get_x() + bar.get_width()/2, height*.90, '{0:.{1}f}'.format(height, 2), ha='center', color='w', fontsize=11) # remove all the ticks (both axes), and tick labels on the Y axis plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='on') # remove the frame of the chart for spine in plt.gca().spines.values(): spine.set_visible(False) plt.xticks([0,1,2,3], ['Malignant\nTraining', 'Benign\nTraining', 'Malignant\nTest', 'Benign\nTest'], alpha=0.8); plt.title('Training and Test Accuracies for Malignant and Benign Cells', alpha=0.8) # - # Uncomment the plotting function to see the visualization. # # **Comment out** the plotting function when submitting your notebook for grading. # + #accuracy_plot() # -
Michigan_AppliedDataScienceWithPython/AppliedMachineLearning/Assignment+1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import tensorflow as tf import tensorflow_hub as hub from riptide.data.UCF101.dataset import train_dataset, test_dataset #tf.logging.set_verbosity(tf.logging.ERROR) def model_fn(features, labels, mode): i3d = hub.Module("https://tfhub.dev/deepmind/i3d-kinetics-400/1") logits = i3d(features) predictions = { "classes": tf.argmax(input=logits, axis=1), "probabilities": tf.nn.softmax(logits, name="softmax_tensor") } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # otherwise need to compute some more loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) # configure train op if we need to if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) #train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step()) train_op = tf.no_op() return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) # otherwise we must be doing eval eval_metric_ops = { "accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions["classes"]) } return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) # + def train_input_fn(): ds = train_dataset() ds = ds.cache().shuffle(buffer_size=5000).batch(1) ds = ds.prefetch(tf.contrib.data.AUTOTUNE) ds = ds.repeat() return ds def eval_input_fn(): ds = test_dataset() ds = ds.batch(1) return ds # - testimator = tf.estimator.Estimator(model_fn=model_fn, model_dir="/data/jwfromm/models/test") testimator.evaluate(input_fn=eval_input_fn)
notebooks/UCF101Estimator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.033613, "end_time": "2021-11-17T20:57:20.719004", "exception": false, "start_time": "2021-11-17T20:57:20.685391", "status": "completed"} tags=[] # ### Import # + jupyter={"source_hidden": true} papermill={"duration": 2.002046, "end_time": "2021-11-17T20:57:22.753227", "exception": false, "start_time": "2021-11-17T20:57:20.751181", "status": "completed"} tags=[] from music_classifier_utils import Cutter, Loader, MusicClassifier, MusicFeaturesExtractor, genres from musicnet_utils import features import musicnet_utils # + _cell_guid="dea412fc-1cd9-4285-9d95-ab2112df300a" _uuid="3bb24b93-2400-474a-b5c7-bd0631597d1c" jupyter={"source_hidden": true} papermill={"duration": 1.42429, "end_time": "2021-11-17T20:57:24.211489", "exception": false, "start_time": "2021-11-17T20:57:22.787199", "status": "completed"} tags=[] # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os import IPython import librosa from sklearn.model_selection import train_test_split, cross_val_score from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from matplotlib import pyplot as plt from warnings import filterwarnings, resetwarnings from multiprocessing import Pool import joblib from copy import deepcopy from xgboost import XGBClassifier, XGBRFClassifier from sklearn.decomposition import PCA from sklearn.metrics import accuracy_score, log_loss import seaborn as sns import torch from sklearn.preprocessing import LabelEncoder from sklearn.manifold import TSNE from sklearn.decomposition import TruncatedSVD from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler from tqdm.notebook import tqdm SEED = int(1e9+7e7+17) def seed_init(): np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True seed_init() # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # + _cell_guid="e6daeb30-3820-4f38-8330-761b3a6d1d02" _uuid="c4f48dfd-9534-43f8-af07-fd7d8b586b75" jupyter={"source_hidden": true} papermill={"duration": 0.039381, "end_time": "2021-11-17T20:57:24.283304", "exception": false, "start_time": "2021-11-17T20:57:24.243923", "status": "completed"} tags=[] filterwarnings("ignore", message="PySoundFile failed. Trying audioread instead.") filterwarnings("ignore", category=DeprecationWarning) filterwarnings("ignore", category=np.VisibleDeprecationWarning) filterwarnings("ignore", category=UserWarning) n_jobs=os.cpu_count() # filterwarnings("ignore") # + [markdown] papermill={"duration": 0.031843, "end_time": "2021-11-17T20:57:24.347040", "exception": false, "start_time": "2021-11-17T20:57:24.315197", "status": "completed"} tags=[] # ### hist, plot # + papermill={"duration": 0.040095, "end_time": "2021-11-17T20:57:24.419420", "exception": false, "start_time": "2021-11-17T20:57:24.379325", "status": "completed"} tags=[] def plot(*args, title='', xlabel='', ylabel='', **kwargs): plt.figure(figsize=(18,5)) plt.plot(*args, **kwargs) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.show() def hist(*args, **kwargs): plt.figure(figsize=(18,5)) plt.hist(*args, **kwargs) plt.show() # + jupyter={"source_hidden": true} papermill={"duration": 0.087333, "end_time": "2021-11-17T20:57:24.538446", "exception": false, "start_time": "2021-11-17T20:57:24.451113", "status": "completed"} tags=[] device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print("Device:",torch.cuda.get_device_name(device.index) if device.type=="cuda" else device.type) # + [markdown] papermill={"duration": 0.032659, "end_time": "2021-11-17T20:57:24.603797", "exception": false, "start_time": "2021-11-17T20:57:24.571138", "status": "completed"} tags=[] # ### global parameters # + jupyter={"source_hidden": true} papermill={"duration": 0.040054, "end_time": "2021-11-17T20:57:24.676311", "exception": false, "start_time": "2021-11-17T20:57:24.636257", "status": "completed"} tags=[] n_input = 57 n_output = 10 # + [markdown] papermill={"duration": 0.038229, "end_time": "2021-11-17T20:57:24.748678", "exception": false, "start_time": "2021-11-17T20:57:24.710449", "status": "completed"} tags=[] # ### Activator, Kernel # + jupyter={"source_hidden": true} papermill={"duration": 0.047609, "end_time": "2021-11-17T20:57:24.832823", "exception": false, "start_time": "2021-11-17T20:57:24.785214", "status": "completed"} tags=[] class Activator(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): return torch.pow(torch.add(x,1),3) class Kernel(torch.nn.Module): def __init__(self, n_input:int, n_output:int, gamma:float=1): super().__init__() self.n_input, self.n_output = n_input, n_output tmp=torch.nn.Linear(self.n_input, self.n_output) self.weight = torch.nn.Parameter(data=tmp.weight) self.bias = torch.nn.Parameter(data=tmp.bias) # self.weight = torch.nn.Parameter(data=torch.randn((self.n_output, self.n_input), dtype=torch.float, device=device)) # self.bias = torch.nn.Parameter(data=torch.randn((self.n_output), dtype=torch.float, device=device)) # self.gamma = torch.nn.Parameter(data=torch.tensor([1], dtype=torch.float)) self.gamma=gamma def forward(self, input: torch.Tensor) -> torch.Tensor: # x = torch.nn.functional.linear(input, self.weight, self.bias) # return x # return torch.pow(torch.add(x,1),3) n,m,k = input.shape[0], self.n_input, self.n_output ones = torch.ones((n, k, m), dtype=torch.float, device=device) distances = torch.sum( torch.pow( (torch.matmul (ones, torch.diag_embed(input)) - self.weight.expand(n,k,m)), 2), dim=2) return torch.exp(-self.gamma*distances)-self.bias # + [markdown] papermill={"duration": 0.03331, "end_time": "2021-11-17T20:57:24.899360", "exception": false, "start_time": "2021-11-17T20:57:24.866050", "status": "completed"} tags=[] # ### ClassifierNet # + papermill={"duration": 0.068382, "end_time": "2021-11-17T20:57:25.001726", "exception": false, "start_time": "2021-11-17T20:57:24.933344", "status": "completed"} tags=[] class ClassifierNet(torch.nn.Module): def __init__(self, n_input:int=n_input, n_output:int=n_output, n_hidden=[256,128,64], dropout=0.1, act='elu')->None: super(ClassifierNet, self).__init__() self.encoder = LabelEncoder() self.encoder.fit(genres) params = {'n_input':n_input, 'n_output':n_output, 'n_hidden':n_hidden, 'dropout':dropout, 'act':act} self.model = self.sequential(**params) params['n_output'] = 1 self.models = torch.nn.ModuleList([self.sequential(**params) for i in range(10)]) def sequential(self, n_input:int=n_input, n_output:int=n_output, n_hidden=[256,128,64], dropout=0.0, act='elu'): act = torch.nn.ELU if act == 'elu' else torch.nn.ReLU seq = torch.nn.Sequential( torch.nn.Linear(n_input, n_hidden[0]), # Kernel(n_input, n_hidden[0]), torch.nn.BatchNorm1d(n_hidden[0]), torch.nn.Dropout(p=dropout), act(), torch.nn.Linear(n_hidden[0], n_hidden[1]), # Kernel(n_hidden[0], n_hidden[1]), torch.nn.BatchNorm1d(n_hidden[1]), torch.nn.Dropout(p=dropout), act(), torch.nn.Linear(n_hidden[1], n_hidden[2]), # Kernel(n_hidden[1], n_hidden[2]), torch.nn.BatchNorm1d(n_hidden[2]), torch.nn.Dropout(p=dropout), act(), torch.nn.Linear(n_hidden[2], n_output), # Kernel(n_hidden[2], n_output), # torch.nn.BatchNorm1d(n_output), # act(), ) return seq def get_weights(self, weights, n_input, lb, ub): return torch.nn.parameter.Parameter((weights/np.sqrt(1/n_input)+1)/2*(ub-lb)+lb, requires_grad=True) def forward(self, x, x_frequencies=None): if len(x.shape)==1: x = x.reshape(1,-1) x = self.model(x) # x = torch.cat([self.models[i](x) for i in range(10)], dim=1) return x def predict_proba(self, x): if isinstance(x, pd.DataFrame): x = torch.tensor(x.values, dtype=torch.float, device=device) if isinstance(x, np.ndarray): x = torch.tensor(x, dtype=torch.float, device=device) sm = torch.nn.Softmax() if not self.training: x = torch.utils.data.TensorDataset(x) generator = torch.utils.data.DataLoader(x, batch_size=100, shuffle=False, drop_last=False) x = torch.cat([self.forward(X_batch).data for (X_batch,) in generator], dim=0) return sm(x) return sm(self.forward(x)) def predict_index(self, x): return torch.tensor(self.predict_proba(x).argmax(axis=1), device=device) def predict(self, x): return self.encoder.inverse_transform(self.predict_index(x).cpu()) def fit(self, X_train, y_train, X_test, y_test, train_minutes=1, verbose=1, verbose_interval=200, opt_params=None, sched_params=None, epochs=100, batch_size=None): device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') train_loss_history = [] test_loss_history = [] test_acc_history = [] self.best_loss = None self.best_params = None y_train = self.encoder.transform(y_train) y_test = self.encoder.transform(y_test) order_train = [i for i in range(X_train.shape[0])] order_test = [i for i in range(X_test.shape[0])] if batch_size is None: batch_size = X_train.shape[0]//40 # batch_size = 128 self.to(device) loss = torch.nn.CrossEntropyLoss() X_train = X_train.to(device) y_train = torch.tensor(y_train).to(device) X_test = X_test.to(device) y_test = torch.tensor(y_test).to(device) train_data = torch.utils.data.TensorDataset(X_train, y_train) test_data = torch.utils.data.TensorDataset(X_test, y_test) # train_speed = 17/200*60 #epochs per minute train_speed = 2104/26.8*60 # epochs = int(train_minutes*train_speed) if opt_params is None: opt_params = dict(lr=0.1, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) if sched_params is None: sched_params = dict(milestones=[20,70], gamma=0.1) optimizer = torch.optim.Adam(self.parameters(), **opt_params) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, **sched_params) for epoch in tqdm(range(epochs)): generator = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True) self.train() torch.cuda.empty_cache() for X_batch, y_batch in generator: optimizer.zero_grad() preds = net.forward(X_batch) loss_value = loss(preds, y_batch) train_loss_history.append(loss_value.data.cpu()) loss_value.backward() optimizer.step() scheduler.step() self.eval() generator = torch.utils.data.DataLoader(test_data, batch_size=min(X_test.shape[0],batch_size), shuffle=True, drop_last=True) test_results = [] test_results_acc = [] for X_batch, y_batch in generator: with torch.no_grad(): test_preds = self.forward(X_batch) test_results.append(loss(test_preds, y_batch).data.cpu()) test_results_acc.append(accuracy_score(y_batch.cpu(), test_preds.argmax(axis=1).cpu())) test_loss_history.append(sum(test_results)/len(test_results)) test_acc_history.append(sum(test_results_acc)/len(test_results_acc)) if(verbose==1 and (epoch+1)%verbose_interval==0): print(train_loss_history[-1].item(), test_loss_history[-1].item()) if(self.best_loss is None or test_loss_history[-1] < self.best_loss): self.best_loss = test_loss_history[-1] self.best_params = {'net_state_dict': self.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'scheduler_state_dict': scheduler.state_dict(), 'loss': self.best_loss} return (train_loss_history, test_loss_history, test_acc_history) # + [markdown] papermill={"duration": 0.032176, "end_time": "2021-11-17T20:57:25.066643", "exception": false, "start_time": "2021-11-17T20:57:25.034467", "status": "completed"} tags=[] # ### Features generation # + jupyter={"source_hidden": true} papermill={"duration": 0.038812, "end_time": "2021-11-17T20:57:25.138441", "exception": false, "start_time": "2021-11-17T20:57:25.099629", "status": "completed"} tags=[] # # %%time # extractor = MusicFeaturesExtractor() # cutter = Cutter() # loader = Loader() # dataset = loader.load_tracks('../input/gtzan-dataset-music-genre-classification/Data/genres_original') # dataset, labels = cutter.cut_dataset(dataset, [5,10,15], n_jobs=n_jobs, default_labels=musicnet_utils.default_labels) # print(dataset.shape, len(labels)) # X = extractor.extract(dataset) # Y = np.array(labels.copy()) # output_features = pd.concat([X,pd.DataFrame(Y)], axis=1) # output_features.columns = features + ["label"] # output_features.to_csv('./features_5_10_15.csv', index=False) # + jupyter={"source_hidden": true} papermill={"duration": 1.417783, "end_time": "2021-11-17T20:57:26.589346", "exception": false, "start_time": "2021-11-17T20:57:25.171563", "status": "completed"} tags=[] rfc = joblib.load('../input/musicclassifier/MusicClassifier.rfc') # + jupyter={"source_hidden": true} papermill={"duration": 2.52125, "end_time": "2021-11-17T20:57:29.143556", "exception": false, "start_time": "2021-11-17T20:57:26.622306", "status": "completed"} tags=[] package = MusicClassifier() MusicClassifier.__module__="music_classifier_utils" joblib.dump(package, './MusicClassifier.spgn', compress=True) # + _cell_guid="4589196f-c8cd-46f0-bf13-e0220a9f023b" _uuid="1bca501c-56ae-4446-8a8b-5346153f32f3" jupyter={"source_hidden": true} papermill={"duration": 0.17817, "end_time": "2021-11-17T20:57:29.354633", "exception": false, "start_time": "2021-11-17T20:57:29.176463", "status": "completed"} tags=[] # %%time extractor = MusicFeaturesExtractor() cutter = Cutter() loader = Loader() net = package(os.cpu_count()) # net.predict_proba_display(X_path = "../input/musicpack2") # + [markdown] papermill={"duration": 0.032747, "end_time": "2021-11-17T20:57:29.421682", "exception": false, "start_time": "2021-11-17T20:57:29.388935", "status": "completed"} tags=[] # ### X_train_, y_train_, X_test_, y_test_ # + jupyter={"source_hidden": true} papermill={"duration": 1.038205, "end_time": "2021-11-17T20:57:30.492562", "exception": false, "start_time": "2021-11-17T20:57:29.454357", "status": "completed"} tags=[] # %%time # extractor = MusicFeaturesExtractor() # cutter = Cutter() # loader = Loader() # data = loader.load_tracks('../input/gtzan-dataset-music-genre-classification/Data/genres_original') # default_labels = musicnet_utils.default_labels # data, labels = cutter.cut_dataset(data, [-1], n_jobs=n_jobs, default_labels=default_labels) # print(data.shape, len(labels)) # X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.33, random_state=SEED, shuffle=True) # X_train, y_train = cutter.cut_dataset(X_train, [-1, -1/2, -1/4, -1/8, -1/10, -1/15], default_labels=y_train) # X_train = extractor.extract_batch(X_train) # X_test, y_test = cutter.cut_dataset(X_test, [-1, -1/2, -1/4, -1/8, -1/10, -1/15], default_labels=y_test) # X_test = extractor.extract_batch(X_test) # print(X_train.shape, len(y_train), X_test.shape, len(y_test)) # joblib.dump((X_train, y_train, X_test, y_test), './gtzan_train_test', compress=True) X_train_, y_train_, X_test_, y_test_ = joblib.load('../input/gtzan-features/gtzan_train_test') # + [markdown] papermill={"duration": 0.032674, "end_time": "2021-11-17T20:57:30.559145", "exception": false, "start_time": "2021-11-17T20:57:30.526471", "status": "completed"} tags=[] # ### X_valid_, y_valid_, names_valid_ # + jupyter={"source_hidden": true} papermill={"duration": 0.086534, "end_time": "2021-11-17T20:57:30.678511", "exception": false, "start_time": "2021-11-17T20:57:30.591977", "status": "completed"} tags=[] # # %%time # # extractor = MusicFeaturesExtractor() # # cutter = Cutter() # # loader = Loader() # # data, names = loader.load_tracks('../input/musicpack4', get_names=True, verbose=0) # # default_labels = ['blues']*10+['classical']*10+['country']*10+['disco']*10+['hiphop']*10+['jazz']*10+['metal']*10+['pop']*10+['reggae']*10+['rock']*10 # # print('loaded') # # default_labels = [(default_labels[i], names[i]) for i in range(len(names))] # # data, _labels = cutter.cut_dataset(data, [-1, -1/2, -1/4, -1/8], n_jobs=n_jobs, default_labels=default_labels) # # labels, names = [],[] # # for el in _labels: # # labels.append(el[0]) # # names.append(el[1]) # # print(data.shape, len(labels)) # # X = extractor.extract_batch(data, batch_size=data.shape[0]//100) # # y = labels.copy() # # X_valid = X[::2].reset_index(drop=True); y_valid=pd.DataFrame(Y[::2]); names_valid=pd.DataFrame(names[::2]) # # X_test=X[1::2].reset_index(drop=True); y_test=pd.DataFrame(Y[1::2]); names_test=pd.DataFrame(names[1::2]) # # # del X,y,names # # print(X_valid.shape, y_valid.shape) # # output_features = pd.concat([pd.DataFrame(names_valid), X_valid, pd.DataFrame(y_valid)], axis=1) # # output_features.columns = ["name"] + features + ["label"] # # output_features.to_csv('./musicpack4_names_features_valid.csv', index=False) # # output_features = pd.concat([pd.DataFrame(names_test), X_test, pd.DataFrame(y_test)], axis=1) # # output_features.columns = ["name"] + features + ["label"] # # output_features.to_csv('./musicpack4_names_features_test.csv', index=False) dataset = pd.read_csv('../input/gtzan-features/musicpack4_names_features_valid.csv') dataset = dataset.sample(frac=1, random_state=SEED).reset_index(drop=True) X_valid_ = dataset.drop(columns=['name', 'label']) y_valid_ = dataset.loc[:,'label'] names_valid_ = dataset.loc[:,'name'] # dataset = pd.read_csv('../input/gtzan-features/musicpack4_names_features_test.csv') # dataset = dataset.sample(frac=1, random_state=SEED).reset_index(drop=True) # X_test = dataset.drop(columns=['name', 'label']) # y_test = dataset.loc[:,'label'] # names_test = dataset.loc[:,'name'] # + [markdown] papermill={"duration": 0.033338, "end_time": "2021-11-17T20:57:30.745392", "exception": false, "start_time": "2021-11-17T20:57:30.712054", "status": "completed"} tags=[] # ### Scaling # + jupyter={"source_hidden": true} papermill={"duration": 0.12008, "end_time": "2021-11-17T20:57:30.898719", "exception": false, "start_time": "2021-11-17T20:57:30.778639", "status": "completed"} tags=[] # from sklearn.feature_selection import RFECV # xgbc = XGBClassifier(n_estimators=50, random_state=SEED, max_depth=5, learning_rate=0.3, n_jobs=n_jobs, # subsample=0.7, reg_alpha=0, reg_lambda=0, booster='gbtree', predictor='auto', # tree_method='gpu_hist', eval_metric='mlogloss') # selector = RFECV(xgbc, step=1, cv=5) # selector.fit(X_train_, y_train_) # selector.support_ # useful_features = [True, True, True, True, True, True, True, True, True, # True, True, True, True, True, False, True, True, True, # True, True, False, True, True, True, True, True, True, # True, True, True, True, True, True, True, True, True, # True, True, False, True, False, True, True, True, False, # True, False, True, False, True, False, True, False, True, # True, True, True] # features = list(np.array(musicnet_utils.features)[useful_features]) # X_train_, X_test_, X_valid_ = X_train_[features], X_test_[features], X_valid_[features] scaler = RobustScaler() scaler.fit(X_train_) # scaler.fit(np.concatenate((X_train_, X_test_), axis=0)) X_train_ = scaler.transform(X_train_) X_test_, X_valid_ = scaler.transform(X_test_), scaler.transform(X_valid_) # + [markdown] papermill={"duration": 0.033298, "end_time": "2021-11-17T20:57:30.965450", "exception": false, "start_time": "2021-11-17T20:57:30.932152", "status": "completed"} tags=[] # ### Net parameters selection # + jupyter={"source_hidden": true} papermill={"duration": 0.041271, "end_time": "2021-11-17T20:57:31.040009", "exception": false, "start_time": "2021-11-17T20:57:30.998738", "status": "completed"} tags=[] # # %%time # scores_ = [] # accuracies_ = [] # nums = [i for i in range(2,48,7)] # for num in nums: # svd = TruncatedSVD(n_components=num, n_iter=5, random_state=SEED) # svd.fit(X_train_.to_numpy()) # X_train = torch.tensor(svd.transform(X_train_), dtype=torch.float, device=device) # y_train = y_train_ # X_test = torch.tensor(svd.transform(X_test_), dtype=torch.float, device=device) # y_test = y_test_ # X_valid = torch.tensor(svd.transform(X_valid_), dtype=torch.float, device=device) # y_valid = y_valid_ # print("SVD completed for " + str(num) + " components") # net = ClassifierNet(n_hidden=[2048, 512, 256], n_input=num) # adam_params = dict(lr=1e-2, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) # train_loss, test_loss, test_acc = net.fit(X_train, y_train, X_test, y_test, train_minutes=0.5, verbose=0, # verbose_interval=300, opt_params = adam_params) # adam_params = dict(lr=1e-3, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) # tmp1, tmp2, tmp3 = net.fit(X_train, y_train, X_test, y_test, train_minutes=0.3, verbose=0, # verbose_interval=300, opt_params = adam_params) # train_loss += tmp1; test_loss += tmp2; test_acc += tmp3; # adam_params = dict(lr=1e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) # tmp1, tmp2, tmp3 = net.fit(X_train, y_train, X_test, y_test, train_minutes=0.3, verbose=0, # verbose_interval=300, opt_params = adam_params) # train_loss += tmp1; test_loss += tmp2; test_acc += tmp3; # # adam_params = dict(lr=1e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) # # tmp1, tmp2, tmp3 = net.fit(X_train, y_train, X_test, y_test, train_minutes=0.5, verbose=1, # # verbose_interval=300, opt_params = adam_params) # # train_loss += tmp1; test_loss += tmp2; test_acc += tmp3; # scores_.append(accuracy_score(y_test, net.predict(X_test.to(device)))) # print("Accuracy: {0}".format(scores_[-1])) # plot(train_loss) # plot(test_loss) # plot(test_acc) # results = net.predict(X_valid) # accuracies_.append(accuracy_score(y_valid, results)) # print(accuracies_[-1]) # scores = pd.DataFrame(data=np.zeros((10,10)), index=genres, columns=genres) # for valid,res in zip(y_valid, results): # # print(valid,res) # scores[valid][res] += 1 # sns.heatmap(scores) # plt.show() # plot(nums, scores_) # plot(nums,accuracies_) # + [markdown] papermill={"duration": 0.032976, "end_time": "2021-11-17T20:57:31.106328", "exception": false, "start_time": "2021-11-17T20:57:31.073352", "status": "completed"} tags=[] # ### Load & Scale X_train__, X_test__, X_valid, names_valid__ # + jupyter={"source_hidden": true} papermill={"duration": 0.44398, "end_time": "2021-11-17T20:57:31.583183", "exception": false, "start_time": "2021-11-17T20:57:31.139203", "status": "completed"} tags=[] df1 = pd.read_csv('../input/gtzan-dataset-music-genre-classification/Data/features_3_sec.csv') df1 = df1.drop(labels='filename',axis=1) y = df1.iloc[:, -1] dataset = pd.read_csv('../input/gtzan-features/musicpack4_names_features_valid.csv').sample(frac=1, random_state=SEED).reset_index(drop=True) X_valid__ = dataset.drop(columns=['name', 'label']) y_valid__ = dataset.loc[:,'label'] names_valid__ = dataset.loc[:,'name'] X_train__, X_test__, y_train__, y_test__ = train_test_split(np.array(df1.iloc[:, 1:-1], dtype = float), y, test_size=0.33) scaler2 = RobustScaler() scaler2.fit(X_train__) # scaler2.fit(np.concatenate((X_train__, X_test__), axis=0)) X_train__ = scaler2.transform(X_train__) X_test__, X_valid__ = scaler2.transform(X_test__), scaler2.transform(X_valid__) y_train__, y_test__ = list(y_train__), list(y_test__) # + [markdown] papermill={"duration": 0.03289, "end_time": "2021-11-17T20:57:31.649437", "exception": false, "start_time": "2021-11-17T20:57:31.616547", "status": "completed"} tags=[] # ### X, names, y # + jupyter={"source_hidden": true} papermill={"duration": 0.082011, "end_time": "2021-11-17T20:57:31.772051", "exception": false, "start_time": "2021-11-17T20:57:31.690040", "status": "completed"} tags=[] df2 = pd.read_csv('../input/gtzan-features/musicpack4_names_features_test.csv') X,names,y = df2.drop(columns=['name','label']), df2['name'], df2['label'] X = scaler.transform(X) # + [markdown] papermill={"duration": 0.034038, "end_time": "2021-11-17T20:57:31.839144", "exception": false, "start_time": "2021-11-17T20:57:31.805106", "status": "completed"} tags=[] # ### Train # + papermill={"duration": 112.19589, "end_time": "2021-11-17T20:59:24.068434", "exception": false, "start_time": "2021-11-17T20:57:31.872544", "status": "completed"} tags=[] # %%time ########## Net seed_init() scores = [] accuracies = [] classifierNetParams = {'n_hidden':[128, 64,16], 'dropout':0, 'act':'elu'} # classifierNetParams = {'n_hidden':[256,128,64], 'dropout':0.0, 'act':'elu'} # classifierNetParams = {'n_hidden':[4096, 1024, 256], 'dropout':0.0, 'act':'elu'} net = ClassifierNet(**classifierNetParams) X_train = torch.tensor(X_train_, dtype=torch.float) y_train = y_train_ X_test = torch.tensor(X_test_, dtype=torch.float) y_test = y_test_ adam_params = dict(lr=1e-2, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) sched_params = dict(milestones=[2*20,9*20,20*20], gamma=0.1) train_loss, test_loss, test_acc = net.fit(X_train, y_train, X_test, y_test, train_minutes=1, verbose=1, epochs=100, verbose_interval=300, opt_params = adam_params, sched_params=sched_params, batch_size=669) print("Loss: {0}".format(min(test_loss))) print("Accuracy: {0}".format(accuracy_score(y_test, net.predict(X_test.to(device))))) plot(train_loss, title='Train loss', xlabel='Batch number', ylabel='Loss') plot(test_loss, title='Test loss', xlabel='Epoch number', ylabel='Loss') plot(test_acc, title='Test accuracy', xlabel='Epoch number', ylabel='Accuracy') X_valid=X_valid_ y_valid=y_valid_ results = net.predict(X_valid) print(accuracy_score(y_valid, results)) scores = pd.DataFrame(data=np.zeros((10,10)), index=genres, columns=genres) for valid,res in zip(y_valid, results): scores[valid][res] += 1 sns.heatmap(scores) plt.show() results = net.predict(X) print("Accuracy test2: {0}".format(accuracy_score(y, results))) scores = pd.DataFrame(data=np.zeros((10,10)), index=genres, columns=genres) for valid,res in zip(y, results): scores[valid][res] += 1 sns.heatmap(scores) plt.show() # + [markdown] papermill={"duration": 0.040765, "end_time": "2021-11-17T20:59:24.150093", "exception": false, "start_time": "2021-11-17T20:59:24.109328", "status": "completed"} tags=[] # ### Test best net # + papermill={"duration": 2.18675, "end_time": "2021-11-17T20:59:26.377201", "exception": false, "start_time": "2021-11-17T20:59:24.190451", "status": "completed"} tags=[] seed_init() X_valid=X_valid_ y_valid=y_valid_ # net_best = net net_best = ClassifierNet(**classifierNetParams).to(device) net_best.load_state_dict(net.best_params['net_state_dict']) # print("Loss: {0}".format(net.best_loss.item())) results=net_best.predict(X_test.to(device)) print("Accuracy test: {0}".format(accuracy_score(y_test, results))) scores = pd.DataFrame(data=np.zeros((10,10)), index=genres, columns=genres) for test,res in zip(y_test, results): scores[test][res] += 1 sns.heatmap(scores) plt.show() results = net_best.predict(X_valid) print("Accuracy valid: {0}".format(accuracy_score(y_valid, results))) scores = pd.DataFrame(data=np.zeros((10,10)), index=genres, columns=genres) for valid,res in zip(y_valid, results): scores[valid][res] += 1 sns.heatmap(scores) plt.show() # print(net.best_params) results = net_best.predict(X) print("Accuracy test2: {0}".format(accuracy_score(y, results))) scores = pd.DataFrame(data=np.zeros((10,10)), index=genres, columns=genres) for valid,res in zip(y, results): scores[valid][res] += 1 sns.heatmap(scores) plt.show() # + papermill={"duration": 0.065464, "end_time": "2021-11-17T20:59:26.486085", "exception": false, "start_time": "2021-11-17T20:59:26.420621", "status": "completed"} tags=[] net.best_params['loss'] # + [markdown] papermill={"duration": 0.042972, "end_time": "2021-11-17T20:59:26.572119", "exception": false, "start_time": "2021-11-17T20:59:26.529147", "status": "completed"} tags=[] # ### Parameters seceltion # + jupyter={"source_hidden": true} papermill={"duration": 0.050417, "end_time": "2021-11-17T20:59:26.665878", "exception": false, "start_time": "2021-11-17T20:59:26.615461", "status": "completed"} tags=[] # scores = [] # accuracies = [] # valids = [] # params = [] # param_all_pairs = [[nh1,nh2,nh3,dr,lr,ml1,ml2,ml3,gm,ep,bs,act] for nh1,nh2,nh3,lr,ml1,ml2,ml3,gm,ep,bs,act in # [(128,64,16,1e-2,20,50,90,0.5,200,669,'elu')] for dr in [i/100 for i in range(101)]] # cnt=1 # for (nh1,nh2,nh3,dr,lr,ml1,ml2,ml3,gm,ep,bs,act) in param_all_pairs: # seed_init() # torch.cuda.empty_cache() # classifierNetParams = {'n_hidden':[nh1,nh2,nh3], 'dropout':dr, 'act':act} # net = ClassifierNet(**classifierNetParams) # adam_params = dict(lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) # sched_params = dict(milestones=[ml1,ml2,ml3], gamma=gm) # train_loss, test_loss, test_acc = net.fit(X_train, y_train, X_test, y_test, train_minutes=1, verbose=0, epochs=ep, # verbose_interval=300, opt_params = adam_params, sched_params=sched_params, batch_size=bs) # print("#" + str(cnt)) # cnt+=1 # params.append("Parameters: n_hidden=[{0},{1},{2}], dropout={3}, lr={4}, milestones=[{5},{6},{7}], gamma={8}, epochs={9}, batch_size={10}, activation={11}" # .format(nh1,nh2,nh3,dr,lr,ml1,ml2,ml3,gm,ep,bs,act)) # print(params[-1]) # ### Best # scores.append(min(test_loss)) # print("Best loss: {0}".format(scores[-1])) # net_best = ClassifierNet(**classifierNetParams).to(device) # net_best.load_state_dict(net.best_params['net_state_dict']) # accuracies.append(accuracy_score(y_test, net_best.predict(X_test.to(device)))) # print("Test accuracy: {0}".format(accuracies[-1])) # results = net_best.predict(X_valid) # valids.append(accuracy_score(y_valid, results)) # print("Validation accuracy: {0}".format(valids[-1])) # ### Last # # scores.append(test_loss[-1]) # # print("Loss: {0}".format(scores[-1])) # # results = net.predict(X_test) # # accuracies.append(accuracy_score(y_test, results)) # # print("Test accuracy: {0}".format(accuracies[-1])) # # results = net.predict(X_valid) # # valids.append(accuracy_score(y_valid, results)) # # print("Validation accuracy: {0}".format(valids[-1])) # + jupyter={"source_hidden": true} papermill={"duration": 0.04885, "end_time": "2021-11-17T20:59:26.758404", "exception": false, "start_time": "2021-11-17T20:59:26.709554", "status": "completed"} tags=[] # torch.save(net.best_params, './net_0.8315_42.66_elu.net') # + jupyter={"source_hidden": true} papermill={"duration": 0.048714, "end_time": "2021-11-17T20:59:26.849692", "exception": false, "start_time": "2021-11-17T20:59:26.800978", "status": "completed"} tags=[] # gen = net.model.modules() # arr = gen.__next__() # print(arr) # hist(arr[8].weight.cpu().detach().reshape(-1).numpy(), bins=300) # # assert False # + [markdown] papermill={"duration": 0.042707, "end_time": "2021-11-17T20:59:26.935300", "exception": false, "start_time": "2021-11-17T20:59:26.892593", "status": "completed"} tags=[] # ### RFC # + _cell_guid="ece7fe63-2fa3-48c0-adf8-dadd7cd256b5" _uuid="28e60441-265c-430c-8b67-0ad1e0a1ae14" jupyter={"source_hidden": true} papermill={"duration": 0.048761, "end_time": "2021-11-17T20:59:27.027730", "exception": false, "start_time": "2021-11-17T20:59:26.978969", "status": "completed"} tags=[] # # %%time # dataset = pd.read_csv('../input/gtzan-features/features(scaled)_3_5_10_15_30_rebuilt.csv') # dataset = dataset.sample(frac=1, random_state=SEED).reset_index(drop=True) # X = dataset.drop(columns=['label']) # Y = dataset.loc[:,'label'] # print(X.shape, Y.shape) # scores = [] # scores_mx, scores_mn = [], [] # nums = [i for i in range(60, 420, 20)] # for num in nums: # rfc = RandomForestClassifier(n_estimators=num, random_state=SEED) # cv_scores = cross_val_score(rfc, X, Y, cv=5, n_jobs=4) # scores.append(cv_scores.mean()) # scores_mn.append(cv_scores.min()) # scores_mx.append(cv_scores.max()) # print(str(num) + " " + str(cv_scores.min()) + " " + str(cv_scores.mean()) + " " + str(cv_scores.max())) # plt.plot(nums, scores) # plt.plot(nums, scores_mn) # plt.plot(nums, scores_mx) # ### D 0.968 0.97 0.972 # ### S 0.9425 0.949 0.953 # ### N 0.946 0.947 0.9495 # + _cell_guid="ece7fe63-2fa3-48c0-adf8-dadd7cd256b5" _uuid="28e60441-265c-430c-8b67-0ad1e0a1ae14" jupyter={"source_hidden": true} papermill={"duration": 0.048583, "end_time": "2021-11-17T20:59:27.119157", "exception": false, "start_time": "2021-11-17T20:59:27.070574", "status": "completed"} tags=[] # # %%time # ########## XGBRFC # # dataset = pd.read_csv('../input/gtzan-features/features(scaled)_3_5_10_15_30_rebuilt.csv') # # dataset = dataset.sample(frac=1, random_state=SEED).reset_index(drop=True) # # X = dataset.drop(columns=['label']) # # Y = dataset.loc[:,'label'] # # # pca=PCA(n_components=num, svd_solver='auto', random_state=SEED) # # # X = pca.fit_transform(X) # # print(X.shape, Y.shape) # scores = [] # accuracies = [] # # scores_mx, scores_mn = [], [] # nums = [i for i in range(5,205,5)] # for num in nums: # xgbrfc = XGBRFClassifier(n_estimators=num, random_state=SEED, max_depth=21, eta=0.3, n_jobs=n_jobs, # subsample=0.7, reg_alpha=0, reg_lambda=num, booster='gbtree', predictor='auto', # tree_method='gpu_hist', eval_metric='mlogloss') # xgbrfc.fit(X_train, y_train) # preds = xgbrfc.predict_proba(X_test) # accuracies.append(accuracy_score(y_test, [genres[x] for x in preds.argmax(axis=1)])) # scores.append(log_loss(y_test, preds)) # print(str(num) + " " + str(scores[-1]) + " " + str(accuracies[-1])) # plot(nums, scores) # plot(nums,accuracies) # #56 - 0.87468671679198 0.877768673776926 0.8806106174527227 # + [markdown] papermill={"duration": 0.042673, "end_time": "2021-11-17T20:59:27.204297", "exception": false, "start_time": "2021-11-17T20:59:27.161624", "status": "completed"} tags=[] # ### XGBClassifier # + papermill={"duration": 4.283412, "end_time": "2021-11-17T20:59:31.530689", "exception": false, "start_time": "2021-11-17T20:59:27.247277", "status": "completed"} tags=[] # %%time ########## XGBC # dataset = pd.read_csv('../input/gtzan-features/features(scaled)_3_5_10_15_30_rebuilt.csv') # dataset = dataset.sample(frac=1, random_state=SEED).reset_index(drop=True) # X = dataset.drop(columns=['label']) # Y = dataset.loc[:,'label'] # print(X.shape, Y.shape) scores = [] accuracies = [] nums = [i for i in range(5,6)] for num in nums: seed_init() xgbc = XGBClassifier(n_estimators=50, random_state=SEED, max_depth=num, learning_rate=0.3, n_jobs=n_jobs, subsample=0.7, reg_alpha=0, reg_lambda=0, booster='gbtree', predictor='auto', tree_method='gpu_hist', eval_metric='mlogloss') xgbc.fit(X_train_, y_train) preds = xgbc.predict_proba(X_test.cpu().numpy()) accuracies.append(accuracy_score(y_test, [genres[x] for x in preds.argmax(axis=1)])) scores.append(log_loss(y_test, preds)) print(str(num) + " " + str(scores[-1]) + " " + str(accuracies[-1])) # plot(nums, scores) # plot(nums,accuracies) #lr=0.31+-0.05 # + jupyter={"source_hidden": true} papermill={"duration": 0.04928, "end_time": "2021-11-17T20:59:31.623893", "exception": false, "start_time": "2021-11-17T20:59:31.574613", "status": "completed"} tags=[] # # %%time # scores = [] # # nums = [i for i in range(60,420,20)] # for num in [1]: # dataset = pd.read_csv('../input/gtzan-features/features(scaled)_3_5_10_15_30_rebuilt.csv')[:-999] # dataset = dataset.sample(frac=1, random_state=SEED).reset_index(drop=True) # X = dataset.drop(columns=['label']) # Y = dataset.loc[:,'label'] # # classifier = XGBRFClassifier(n_estimators=56, random_state=SEED, max_depth=21, eta=0.3, n_jobs=n_jobs, # # subsample=0.7, reg_alpha=0, reg_lambda=0, booster='gbtree', predictor='auto', # # tree_method='gpu_hist', eval_metric='mlogloss') # classifier = XGBClassifier(n_estimators=180, random_state=SEED, max_depth=6, learning_rate=0.3, n_jobs=n_jobs, # subsample=0.7, reg_alpha=0, reg_lambda=0, booster='gbtree', predictor='auto', # tree_method='gpu_hist', eval_metric='mlogloss') # classifier.fit(X,Y) # dataset = pd.read_csv('../input/gtzan-features/features(scaled)_3_5_10_15_30_rebuilt.csv')[-999:] # dataset = dataset.sample(frac=1, random_state=SEED*2).reset_index(drop=True) # X = dataset.drop(columns=['label']) # Y = dataset.loc[:,'label'] # results = classifier.predict(X) # accuracy = accuracy_score(Y, results) # scores.append(accuracy) # print(str(num) + " " + str(scores[-1])) # plot(nums, scores) # ### xgbrfc - 0.97997997997998 # ### xgbc - 0.995995995995996 # + [markdown] papermill={"duration": 0.054787, "end_time": "2021-11-17T20:59:31.721379", "exception": false, "start_time": "2021-11-17T20:59:31.666592", "status": "completed"} tags=[] # ### Predict test dataset # + papermill={"duration": 35.612873, "end_time": "2021-11-17T21:00:07.380441", "exception": false, "start_time": "2021-11-17T20:59:31.767568", "status": "completed"} tags=[] # dataset = pd.read_csv('../input/gtzan-features/features(scaled)_3_5_10_15_30_rebuilt.csv') # dataset = dataset.sample(frac=1, random_state=SEED).reset_index(drop=True) # X_train = dataset.drop(columns=['label']) # Y_train = dataset.loc[:,'label'] seed_init() classifier = XGBClassifier(n_estimators=200, random_state=SEED, max_depth=12, learning_rate=0.1, n_jobs=n_jobs, subsample=0.7, reg_alpha=0, reg_lambda=0, booster='gbtree', predictor='auto', tree_method='gpu_hist', eval_metric='mlogloss') classifier.fit(X_train_,y_train_) results = classifier.predict(X) accuracy = accuracy_score(y, results) print(accuracy) scores = pd.DataFrame(data=np.zeros((10,10)), index=genres, columns=genres) for valid,res in zip(y, results): # print(valid,res) scores[valid][res] += 1 sns.heatmap(scores) plt.show() # + [markdown] papermill={"duration": 0.043955, "end_time": "2021-11-17T21:00:07.468690", "exception": false, "start_time": "2021-11-17T21:00:07.424735", "status": "completed"} tags=[] # ### Predict test dataset with the trained net # + papermill={"duration": 2.535878, "end_time": "2021-11-17T21:00:10.048374", "exception": false, "start_time": "2021-11-17T21:00:07.512496", "status": "completed"} tags=[] seed_init() # X_valid=X_valid_ # y_valid=y_valid_ # net_best = net net_best = ClassifierNet(**classifierNetParams).to(device) net_best.load_state_dict(net.best_params['net_state_dict']) results = net_best.predict_proba(X).cpu().data.numpy() print("Accuracy: {0}".format(accuracy_score(y, [genres[el] for el in np.argmax(results,axis=1)]))) # for i in range(X_valid.shape[0]): # results_dataframe = pd.DataFrame([results[i]], columns=genres) # print(str(i+1) + "\t" + genres[np.argmax(results[i])] + "\t" + names[i]) # display(results_dataframe) results = [genres[el] for el in np.argmax(results,axis=1)] scores = pd.DataFrame(data=np.zeros((10,10)), index=genres, columns=genres) for valid,res in zip(y, results): # print(valid,res) scores[valid][res] += 1 sns.heatmap(scores) plt.show() # + jupyter={"source_hidden": true} papermill={"duration": 0.057446, "end_time": "2021-11-17T21:00:10.158362", "exception": false, "start_time": "2021-11-17T21:00:10.100916", "status": "completed"} tags=[] # # %%time # results = classifier.predict_proba(X_valid) # for i in range(X_valid.shape[0]): # results_dataframe = pd.DataFrame([results[i]], columns=genres) # print(str(i+1) + "\t" + genres[np.argmax(results[i])] + "\t" + names_valid[i]) # display(results_dataframe) # + jupyter={"source_hidden": true} papermill={"duration": 0.057712, "end_time": "2021-11-17T21:00:10.266663", "exception": false, "start_time": "2021-11-17T21:00:10.208951", "status": "completed"} tags=[] # # %%time # extractor = MusicFeaturesExtractor() # cutter = Cutter() # loader = Loader() # data_test, names = loader.load_tracks('../input/musicpack1', get_names=True) # data_test = extractor.extract(data_test) # + jupyter={"source_hidden": true} papermill={"duration": 0.05622, "end_time": "2021-11-17T21:00:10.373909", "exception": false, "start_time": "2021-11-17T21:00:10.317689", "status": "completed"} tags=[] # # %%time # results = classifier.predict_proba(data_test) # for i in range(data_test.shape[0]): # results_dataframe = pd.DataFrame([results[i]], columns=genres) # print(str(i+1) + "\t" + genres[np.argmax(results[i])] + "\t" + names[i]) # display(results_dataframe)
music-classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from datetime import datetime, date import math from IPython.display import Markdown as md import numpy as np from matplotlib import pyplot as plt from scipy.optimize import curve_fit from astropy import units as u from astropy.coordinates import (SkyCoord, EarthLocation, AltAz, HADec) import astropy.coordinates as coord from astropy.time import Time plt.rcParams['figure.figsize'] = [16,8] # %config IPCompleter.greedy=True # - # ## Sideral Time # + longitude=10.9836 year = datetime.utcnow().year month = datetime.utcnow().month day = datetime.utcnow().day hour = datetime.utcnow().hour minute = datetime.utcnow().minute second = datetime.utcnow().second UT = hour + minute / 60 + second / 3600 deltaJulianDays = julianDay(year, month, day) - julianDay(2000, 1, 1) - 0.5 deltaJulian = deltaJulianDays + hour/24 + minute/60/24 + second/3600/24 sideralTime = ((100.46 + 0.985647 * deltaJulian + longitude + 15*UT) % 360) / 15 sideralHour = math.floor(sideralTime) sideralMinute = math.floor((sideralTime - sideralHour) * 60) sideralSecond = ((sideralTime - sideralHour) * 60 - sideralMinute) * 60 print(f'UTC {hour:02d}:{minute:02d}:{math.floor(second):02d} => {sideralTime} => J2000 {sideralHour:02d}:{sideralMinute:02d}:{sideralSecond:.2f}') # + sin = math.sin cos = math.cos """Calculates the Julian Day from a given gregorian date""" def julianDay (year, month, day): if month in [1, 2]: year -= 1; month += 12; A = math.floor(year / 100) B = 2 - A + math.floor(A / 4); return math.floor(365.25 * (year + 4716)) + math.floor(30.6001 * (month + 1)) + day + B - 1524.5 # http://www2.arnes.si/~gljsentvid10/sidereal.htm def localSiderealTime(year, month, day, hour, minute, second, longitude): deltaJulian = julianDay(year, month, day) - julianDay(2000, 1, 1) - 0.5 + hour/24 + minute/60/24 + second/3600/24 julianCenturies = deltaJulian / 36525. # returns hours return ((280.46061837 + 360.98564736629 * deltaJulian + 0.000388 * julianCenturies**2 + longitude) % 360) / 15 def printSiderealTime(siderealTime): siderealHour = math.floor(siderealTime) siderealMinute = math.floor((siderealTime - siderealHour) * 60) siderealSecond = math.floor(((siderealTime - siderealHour) * 60 - siderealMinute) * 60) print(f'Sidereal Time (J2000) {siderealHour:02d}:{siderealMinute:02d}:{siderealSecond:02d}') def rad(deg): return deg * math.pi / 180. def deg(rad): return rad * 180. / math.pi # http://jonvoisey.net/blog/2018/07/data-converting-alt-az-to-ra-dec-example/ def horizontalToEqatorial(azimuth, altitude, latitude, localSiderealTimeDegrees): az = rad(azimuth) alt = rad(altitude) lat = rad(latitude) dec = math.asin( sin(lat) * sin(alt) + cos(lat) * cos(alt) * cos(az) ) ra = localSiderealTimeDegrees - math.acos( (sin(alt) - sin(lat) * sin(dec)) / (cos(lat) * cos(dec)) ) * 180 / math.pi # returns all values in degrees return (ra, deg(dec)) # http://www.stargazing.net/kepler/altaz.html def equatorialToHorizontal(ra, declination, latitude, localSiderealTimeDegrees): hourAngle = localSiderealTimeDegrees - ra hourAngle = rad(hourAngle) if hourAngle >= 0 else rad(hourAngle + 360) dec = rad(declination) lat = rad(latitude) altitude = math.asin( sin(dec)*sin(lat) + cos(dec)*cos(lat)*cos(hourAngle) ) A = math.acos( (sin(dec) - sin(altitude)*sin(lat))/(cos(altitude)*cos(lat)) ) azimuth = A if sin(hourAngle) < 0 else 2 * math.pi - A # returns all values in degrees return(deg(azimuth), deg(altitude)) """ derived from iauHd2ae from www.iausofa.org """ def Hd2ae(ra, declination, latitude, localSiderealTimeDegrees): hourAngle = localSiderealTimeDegrees - ra ha = rad(hourAngle) if hourAngle >= 0 else rad(hourAngle + 360) dec = rad(declination) phi = rad(latitude) sh = sin(ha) ch = cos(ha) sd = sin(dec) cd = cos(dec) sp = sin(phi) cp = cos(phi) x = - ch*cd*sp + sd*cp; y = - sh*cd; z = ch*cd*cp + sd*sp; r = math.sqrt(x*x + y*y); a = math.atan2(y,x) if r != 0 else 0 # (r != 0.0) ? atan2(y,x) : 0.0; az = a + 2*math.pi if a < 0 else a # (a < 0.0) ? a+D2PI : a; el = math.atan2(z,r); return(deg(az), deg(el)) """ derived from iauAe2hd from www.iausofa.org """ def Ae2hd(azimuth, altitude, latitude, localSiderealTimeDegrees): az = rad(azimuth) el = rad(altitude) phi = rad(latitude) sa = sin(az); ca = cos(az); se = sin(el); ce = cos(el); sp = sin(phi); cp = cos(phi); x = - ca*ce*sp + se*cp; y = - sa*ce; z = ca*ce*cp + se*sp; r = math.sqrt(x*x + y*y); ha = math.atan2(y,x) if r != 0 else 0 #(r != 0.0) ? atan2(y,x) : 0.0; dec = math.atan2(z,r); ra = localSiderealTimeDegrees - deg(ha) return(ra, deg(dec)) # - # # Sample Calculations and Testcases # ## Julian Day res = julianDay(2000, 1, 1) # 00:00 UTC assert(math.isclose(res, 2451544.5, abs_tol=0.01)) res = julianDay(2021, 12, 23) # 00:00 UTC assert(math.isclose(res, 2459571.5, abs_tol=0.01)) res = julianDay(2025, 7, 13) # 00:00 UTC assert(math.isclose(res, 2460869.5, abs_tol=0.01)) # ## Sidereal Time # Test case: expected result: 304.80762° from http://www.stargazing.net/kepler/altaz.html res = localSiderealTime(1998, 8, 10, 23, 10, 0, -1.9166667) * 360 / 24 # convert to degrees assert(math.isclose(res, 304.80762, rel_tol=0.0001)) # Test case: expected result: 174.77457° from http://www2.arnes.si/~gljsentvid10/sidereal.htm res = localSiderealTime(1994, 6, 16, 18, 0, 0, 0) * 360 / 24 # convert to degrees assert(math.isclose(res, 174.77457, rel_tol=0.0001)) # Test case: expected result: LST=06:39:00 res = localSiderealTime(2021, 12, 23, 8, 30, 34, -120) * 360 / 24 # convert to degrees assert(math.isclose(res, 99.75, rel_tol=0.0001)) # Test case: expected result: LST=02:22:54 res = localSiderealTime(2025, 7, 13, 6, 13, 22, 11) * 360 / 24 # convert to degrees assert(math.isclose(res, 35.7267, rel_tol=0.0001)) # ## Coordinate Transform # Testcase #1: expected result (RA=297.92, DEC=8.93) from http://jonvoisey.net/blog/2018/07/data-converting-alt-az-to-ra-dec-example/ res = horizontalToEqatorial(azimuth=180, altitude=60.34, latitude=38.59, localSiderealTimeDegrees=297.93) assert(math.isclose(res[0], 297.92, rel_tol=0.0001)) assert(math.isclose(res[1], 8.93, rel_tol=0.0001)) # Testcase #2: expected result (RA=250.425, DEC=36.4667) from http://www.stargazing.net/kepler/altaz.html res = horizontalToEqatorial(azimuth=269.14634, altitude=49.169122, latitude=52.5, localSiderealTimeDegrees=304.80762) assert(math.isclose(res[0], 250.425, rel_tol=0.0001)) assert(math.isclose(res[1], 36.4667, rel_tol=0.0001)) # Testcase #3: expected result: (AZ=269.14634, ALT=49.169122) res = equatorialToHorizontal(ra=250.425, declination=36.467, latitude=52.5, localSiderealTimeDegrees=304.808) assert(math.isclose(res[0], 269.14634, rel_tol=0.0001)) assert(math.isclose(res[1], 49.169122, rel_tol=0.0001)) # Testcase #4: Betelgeuse (RA=05h55m10.30536s = 5.91953h = 88.7929°, DEC = +07°24′25.4304″ = 7.4071°) res = equatorialToHorizontal(ra=88.7929, declination=7.4071, latitude=48, localSiderealTimeDegrees=localSiderealTime(2021, 12, 23, 19, 14, 28, 11) * 360 / 24) res #assert(math.isclose(res[0], 269.14634, rel_tol=0.0001)) #assert(math.isclose(res[1], 49.169122, rel_tol=0.0001)) res = horizontalToEqatorial(azimuth=110.8093, altitude=27.2852, latitude=48, localSiderealTimeDegrees=localSiderealTime(2021, 12, 23, 19, 14, 28, 11) * 360 / 24) (res[0], res[1]) #assert(math.isclose(res[0], 250.425, rel_tol=0.0001)) #assert(math.isclose(res[1], 36.4667, rel_tol=0.0001)) Ae2hd(azimuth=110.8093, altitude=27.2852, latitude=48, localSiderealTimeDegrees=localSiderealTime(2021, 12, 23, 19, 14, 28, 11) * 360 / 24) localSiderealTime(2021, 12, 23, 19, 14, 28, 11) * 360 / 24 - 20.20605*360/24 + 360 (localSiderealTime(2021, 12, 23, 19, 14, 28, 11) - 20.2)%24 (89.0752) / 360 * 24 localSiderealTime(2021, 12, 23, 19, 14, 28, 11) # # Astropy Comparison # + observing_location = EarthLocation.from_geodetic(lon=11*u.deg, lat=48*u.deg) observing_date = Time('2021-12-23 19:14:28') altaz = AltAz(location=observing_location, obstime=observing_date) betelgeuse = SkyCoord(ra=88.7929 * u.deg, dec=7.4071 * u.deg) astropy = (betelgeuse.transform_to(altaz).az.deg, betelgeuse.transform_to(altaz).alt.deg) simple = equatorialToHorizontal(ra=88.7929, declination=7.4071, latitude=48, localSiderealTimeDegrees=localSiderealTime(2021, 12, 23, 19, 14, 28, 11) * 360 / 24) tuple(map(lambda i, j: i - j, astropy, simple)) # + observing_location = EarthLocation.from_geodetic(lon=-1.9166667*u.deg, lat=52.5*u.deg) observing_date = Time('1998-08-10 23:10:00') hadec = HADec(location=observing_location, obstime=observing_date) altaz = AltAz(location=observing_location, obstime=observing_date) m13 = SkyCoord(ra=16.695/24*360*u.deg, dec=36.466667*u.deg) astropy = (m13.transform_to(altaz).az.deg, m13.transform_to(altaz).alt.deg) simple = equatorialToHorizontal(ra=250.425, declination=36.467, latitude=52.5, localSiderealTimeDegrees=304.808) tuple(map(lambda i, j: i - j, astropy, simple)) # - # ## SOFA Library Comparison iau = Ae2hd(azimuth=269.14634, altitude=49.169122, latitude=52.5, localSiderealTimeDegrees=304.80762) simple = horizontalToEqatorial(azimuth=269.14634, altitude=49.169122, latitude=52.5, localSiderealTimeDegrees=304.80762) tuple(map(lambda i, j: i - j, iau, simple)) iau = Hd2ae(ra=250.425, declination=36.467, latitude=52.5, localSiderealTimeDegrees=304.808) simple = equatorialToHorizontal(ra=250.425, declination=36.467, latitude=52.5, localSiderealTimeDegrees=304.808) tuple(map(lambda i, j: i - j, iau, simple)) -5.2%2 # # Sensor Quality import numpy as np elevation_IMUPLUS = np.array([0.062, 0.75, 0.688, 0.625, 0.438, 0.25, 0.375, 0.188, 0.125, 0.125, 0.5, 0.5, 0.812, 0.75, -0.438, 0]) print(f"IMUPLUS n={np.size(elevation_IMUPLUS)}, mean={np.mean(elevation_IMUPLUS)}, variance={np.var(elevation_IMUPLUS)}, stddev={np.std(elevation_IMUPLUS)}, 90%-percentile={np.percentile(elevation_IMUPLUS-np.mean(elevation_IMUPLUS), 90)}, min={np.min(elevation_IMUPLUS)}, max={np.max(elevation_IMUPLUS)}") elevation_M4G = np.array([0.312, 0.25, 0.125, 0.062, 0.062, -0.25, -0.125, -0.5, -0.688, -0.75, -0.75, -0.5, -1.188, -0.938, -0.938, -0.938, -0.938, -1.062, -1, -1.312, -1.312]) print(f"M4G n={np.size(elevation_M4G)}, mean={np.mean(elevation_M4G)}, variance={np.var(elevation_M4G)}, stddev={np.std(elevation_M4G)}, min={np.min(elevation_M4G)}, max={np.max(elevation_M4G)}") elevation_NDOF = np.array([0.25, 0.188, -0.375, 0.188, 0, -1, -0.438, 0.5, -0.188, -0.062, -0.312, 0.062, 0.188]) print(f"NDOF n={np.size(elevation_NDOF)}, mean={np.mean(elevation_NDOF)}, variance={np.var(elevation_NDOF)}, stddev={np.std(elevation_NDOF)}, min={np.min(elevation_NDOF)}, max={np.max(elevation_NDOF)}") # => Elevation is best obtained via IMUPLUS mode. azimuth_IMUPLUS = np.array([359.938, 359.75, 359.438, 358.938, 0.625, 0.312, 0.188, 359.188, 358.25, 359.062, 359.812, 359.688, 359.812, 359.5, 359.5, 359.188, 359.062, 359.188, 359.688, 359.625, 358.875, 359.188, 359.375]) azimuth_IMUPLUS = (azimuth_IMUPLUS + 180) % 360 print(f"IMUPLUS n={np.size(azimuth_IMUPLUS)}, mean={np.mean(azimuth_IMUPLUS)}, variance={np.var(azimuth_IMUPLUS)}, stddev={np.std(azimuth_IMUPLUS)}, 90%-percentile={np.percentile(azimuth_IMUPLUS-np.mean(azimuth_IMUPLUS), 90)}, min={np.min(azimuth_IMUPLUS)}, max={np.max(azimuth_IMUPLUS)}") # IMUPLUS-Method results in 90% of all measurements being within 0.39° (alt) and 0.65° (az), resulting in a total deviation of 0.75°. Precision is good enough to find objects with the 25mm Ocular (1° FOV@1200mm).
docs/calculations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import matplotlib.pyplot as plt import pandas as pd import matplotlib.patches as mpatches from mpl_toolkits.axes_grid.axislines import Subplot data = pd.read_csv("C:\\cygwin\\home\\Kieran\\FYP\\src\\main\\java\\popSize.csv") data.dtypes # + x = data['Gen'] fig = plt.figure() ax = Subplot(fig, 111) fig.add_subplot(ax) ax.axis["right"].set_visible(False) ax.axis["top"].set_visible(False) fig.suptitle('Population Size') fig.set_facecolor('white') plt.grid(True) plt.xlabel('Generations') plt.ylabel('Fitness') line1 = plt.plot(x, data['100'], linewidth=1.0, label="100") line2 = plt.plot(x, data['250'], linewidth=1.0, label="250") line3 = plt.plot(x, data['1000'], linewidth=1.0, label="1000") line4 = plt.plot(x, data['2500'], linewidth=1.0, label="2500") line5 = plt.plot(x, data['5000'], linewidth=1.0, label="5000") labels = ["100", "250", "1000", "2500", "5000"] plt.legend(labels) plt.show() # - data = pd.read_csv("C:\\cygwin\\home\\Kieran\\FYP\\src\\main\\java\\CrossoverPoints.csv") data.dtypes # + x = data['Gen'] fig = plt.figure() ax = Subplot(fig, 111) fig.add_subplot(ax) ax.axis["right"].set_visible(False) ax.axis["top"].set_visible(False) fig.suptitle('Crossover Percentage') fig.set_facecolor('white') plt.grid(True) plt.xlabel('Generations') plt.ylabel('Fitness') line4 = plt.plot(x, data['0'], linewidth=1.0, label="0") line1 = plt.plot(x, data['0.25'], linewidth=1.0, label="0.25") line2 = plt.plot(x, data['0.5'], linewidth=1.0, label="0.5") line3 = plt.plot(x, data['0.75'], linewidth=1.0, label="0.75") line5 = plt.plot(x, data['1'], linewidth=1.0, label="1") labels = ["0%", "25%", "50%", "75%", "100%"] plt.legend(labels) plt.show() # - data = pd.read_csv("C:\\cygwin\\home\\Kieran\\FYP\\src\\main\\java\\CopyRates.csv") data.dtypes # + x = data['Gen'] fig = plt.figure() ax = Subplot(fig, 111) fig.add_subplot(ax) ax.axis["right"].set_visible(False) ax.axis["top"].set_visible(False) fig.suptitle('Copy and Crossover Rates with Elitist Model') fig.set_facecolor('white') plt.grid(True) plt.xlabel('Generations') plt.ylabel('Fitness') line1 = plt.plot(x, data['0'], linewidth=1.0, label="0") line2 = plt.plot(x, data['0.25'], linewidth=1.0, label="0.25") line3 = plt.plot(x, data['0.5'], linewidth=1.0, label="0.5") line4 = plt.plot(x, data['0.75'], linewidth=1.0, label="0.75") #line5 = plt.plot(x, data['0.8'], linewidth=1.0, label="0.8") line5 = plt.plot(x, data['1'], linewidth=1.0, label="1") labels = ["0%", "25%", "50%", "75%", "100%"] plt.legend(labels) plt.show() # - data = pd.read_csv("C:\\cygwin\\home\\Kieran\\FYP\\src\\main\\java\\CopyRateswithoutElite.csv") data.dtypes # + x = data['Gen'] fig = plt.figure() ax = Subplot(fig, 111) fig.add_subplot(ax) ax.axis["right"].set_visible(False) ax.axis["top"].set_visible(False) fig.suptitle('Copy and Crossover Rates without Elitist Model') fig.set_facecolor('white') plt.grid(True) plt.xlabel('Generations') plt.ylabel('Fitness') line1 = plt.plot(x, data['0'], linewidth=1.0, label="0") line2 = plt.plot(x, data['0.25'], linewidth=1.0, label="0.25") line3 = plt.plot(x, data['0.5'], linewidth=1.0, label="0.5") line4 = plt.plot(x, data['0.75'], linewidth=1.0, label="0.75") #line5 = plt.plot(x, data['0.8'], linewidth=1.0, label="0.8") line5 = plt.plot(x, data['1'], linewidth=1.0, label="1") labels = ["0%", "25%", "50%", "75%", "100%"] plt.legend(labels) plt.show() # - data = pd.read_csv("C:\\cygwin\\home\\Kieran\\FYP\\src\\main\\java\\CompetingSize.csv") data.dtypes # + x = data['Gen'] fig = plt.figure() ax = Subplot(fig, 111) fig.add_subplot(ax) ax.axis["right"].set_visible(False) ax.axis["top"].set_visible(False) fig.suptitle('Competing Size without Penalisation') fig.set_facecolor('white') plt.grid(True) plt.xlabel('Generations') plt.ylabel('Fitness') line1 = plt.plot(x, data['5'], linewidth=1.0, label="5") line2 = plt.plot(x, data['10'], linewidth=1.0, label="10") line3 = plt.plot(x, data['15'], linewidth=1.0, label="15") labels = ["5", "10", "15"] plt.legend(labels) plt.show() # - data = pd.read_csv("C:\\cygwin\\home\\Kieran\\FYP\\src\\main\\java\\CompetingSizePenalisation.csv") data.dtypes # + x = data['Gen'] fig = plt.figure() ax = Subplot(fig, 111) fig.add_subplot(ax) ax.axis["right"].set_visible(False) ax.axis["top"].set_visible(False) fig.suptitle('Competing Size with Penalisation') fig.set_facecolor('white') plt.grid(True) plt.xlabel('Generations') plt.ylabel('Fitness') line1 = plt.plot(x, data['5'], linewidth=1.0, label="5") line2 = plt.plot(x, data['10'], linewidth=1.0, label="10") line3 = plt.plot(x, data['15'], linewidth=1.0, label="15") labels = ["5", "10", "15"] plt.legend(labels) plt.show() # - data = pd.read_csv("C:\\cygwin\\home\\Kieran\\FYP\\src\\main\\java\\MutateRates.csv") data.dtypes # + x = data['Gen'] fig = plt.figure() ax = Subplot(fig, 111) fig.add_subplot(ax) ax.axis["right"].set_visible(False) ax.axis["top"].set_visible(False) fig.suptitle('Mutate Rates') fig.set_facecolor('white') plt.grid(True) plt.xlabel('Generations') plt.ylabel('Fitness') line1 = plt.plot(x, data['0'], linewidth=1.0, label="0") line1 = plt.plot(x, data['0.01'], linewidth=1.0, label="0.01") line2 = plt.plot(x, data['0.05'], linewidth=1.0, label="0.05") line3 = plt.plot(x, data['0.1'], linewidth=1.0, label="0.1") line4 = plt.plot(x, data['0.25'], linewidth=1.0, label="0.25") labels = ["0", "0.01", "0.05", "0.1", "0.25"] plt.legend(labels) plt.show() # - data = pd.read_csv("C:\\cygwin\\home\\Kieran\\FYP\\src\\main\\java\\UserRates.csv") data.dtypes # + x = data['Gen'] fig = plt.figure() ax = Subplot(fig, 111) fig.add_subplot(ax) ax.axis["right"].set_visible(False) ax.axis["top"].set_visible(False) fig.suptitle('Different User Sizes') fig.set_facecolor('white') plt.grid(True) plt.xlabel('Generations') plt.ylabel('Fitness') line1 = plt.plot(x, data['100'], linewidth=1.0, label="100") line1 = plt.plot(x, data['200'], linewidth=1.0, label="200") line2 = plt.plot(x, data['300'], linewidth=1.0, label="300") labels = ["100", "200", "300"] plt.legend(labels) plt.show() # - data = pd.read_csv("C:\\cygwin\\home\\Kieran\\FYP\\src\\main\\java\\UserProportion.csv") data.dtypes # + x = data['Gen'] fig = plt.figure() ax = Subplot(fig, 111) fig.add_subplot(ax) ax.axis["right"].set_visible(False) ax.axis["top"].set_visible(False) fig.suptitle('Effect of Driver Proportion') fig.set_facecolor('white') plt.grid(True) plt.xlabel('Generations') plt.ylabel('Fitness') line1 = plt.plot(x, data['0'], linewidth=1.0, label="0") line2 = plt.plot(x, data['0.25'], linewidth=1.0, label="0.25") line3 = plt.plot(x, data['0.5'], linewidth=1.0, label="0.5") line4 = plt.plot(x, data['0.75'], linewidth=1.0, label="0.75") line5 = plt.plot(x, data['1'], linewidth=1.0, label="1") labels = ["0%", "25%", "50%", "75%", "100%"] plt.legend(labels) plt.show() # - data = pd.read_csv("C:\\cygwin\\home\\Kieran\\FYP\\src\\main\\java\\Flexibility.csv") data.dtypes # + x = data['Gen'] fig = plt.figure() ax = Subplot(fig, 111) fig.add_subplot(ax) ax.axis["right"].set_visible(False) ax.axis["top"].set_visible(False) fig.suptitle('Flexibility') fig.set_facecolor('white') plt.grid(True) plt.xlabel('Generations') plt.ylabel('Fitness') line1 = plt.plot(x, data['5 & 10'], linewidth=1.0, label="5 & 10") line2 = plt.plot(x, data['10 & 20'], linewidth=1.0, label="10 & 20") line3 = plt.plot(x, data['20 & 40'], linewidth=1.0, label="20 & 40") line4 = plt.plot(x, data['40 & 60'], linewidth=1.0, label="40 & 60") #line5 = plt.plot(x, data['1'], linewidth=1.0, label="1") labels = ["5 & 10", "10 & 20", "20 & 40", "40 & 60"] plt.legend(labels) plt.show() # -
graphs/Graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: thesis # language: python # name: thesis # --- # + [markdown] id="A5Aig8imNspl" # # S2orc (exploration, clustering & visualization) # --- # --- # For presenting some results we need to analyze (and rapidly compare) some of the methods we used untill now in order to discriminates between paper's `field_of_study` based on their `title` and `abstract`. # This notebook is an extention of some previous work done by Master's students from University of Florence (cite here). # + [markdown] colab={"base_uri": "https://localhost:8080/"} id="aKxjDuDNNhHf" outputId="a7a55043-bedb-4a12-dedc-f7f71ea17e86" # ## Dataset # # From each scientific paper we took the `title` and the `abstract`, as well as a property identifying the field in witch the article pertrains. # The dataset (only 1000 elements) has been selected randomly from a full-version of 80M papers from different fields. # The field of studies (that are called in the dataset `mag_field_of_study`) are the following: # # | Field of study | All papers | Full text | # |----------------|------------|-----------| # | Medicine | 12.8M | 1.8M | # | Biology | 9.6M | 1.6M | # | Chemistry | 8.7M | 484k | # | n/a | 7.7M | 583k | # | Engineering | 6.3M | 228k | # | Comp Sci | 6.0M | 580k | # | Physics | 4.9M | 838k | # | Mat Sci | 4.6M | 213k | # | Math | 3.9M | 669k | # | Psychology | 3.4M | 316k | # | Economics | 2.3M | 198k | # | Poli Sci | 1.8M | 69k | # | Business | 1.8M | 94k | # | Geology | 1.8M | 115k | # | Sociology | 1.6M | 93k | # | Geography | 1.4M | 58k | # | Env Sci | 766k | 52k | # | Art | 700k | 16k | # | History | 690k | 22k | # | Philosophy | 384k | 15k | # # + [markdown] id="7fCI7WRyDxJB" # Note for reproducibility: `data` is a `DatasetDict` object composed by `Dataset` object for every key (in `train`, `test`, `valid`): # # ```python # { # "train": Dataset, # "test" : Dataset, # "valid": Dataset # } # ``` # - # %load_ext autoreload # %autoreload 2 # + colab={"base_uri": "https://localhost:8080/"} id="JBaf8ygZPEha" outputId="1582b3e1-e705-46de-cd94-6078bd05a22c" MAIN_PATH = '/home/vivoli/Thesis' DATA_PATH = '/home/vivoli/Thesis/data' OUT_PATH = '/home/vivoli/Thesis/outputs/' ARGS_PATH = '/home/vivoli/Thesis/' # + # Imports from thesis.utils.general import load_dataset_wrapper from thesis.utils.parsers.args_parser import parse_args DICTIONARY_FIELD_NAMES = dict( train = ['train'], test = ['test', 'debug', 'dev'], validation = ['validation', 'valid'] ) # - # ## Getting the dataset # --- # In order to get the dataset we need to create a dictionary with the DatasetArguments (params) and use our "library" called `thesis`. # + # ------------------ # Creating Arguments # ------------------ # create arguments dictionary args = dict( # DatasetArguments model_name_or_path = "allenai/scibert_scivocab_uncased", dataset_name = "s2orc", # "keyphrase", dataset_config_name = "full", # "inspec", # TrainingArguments seed = '1234', output_dir = "/home/vivoli/Thesis/output", num_train_epochs = '1', per_device_train_batch_size = "8", # 16 and 32 end with "RuntimeError: CUDA out of memory." per_device_eval_batch_size = "8", # 16 and 32 end with "RuntimeError: CUDA out of memory." max_seq_length = '512', # S2orcArguments & KeyPhArguments dataset_path = "/home/vivoli/Thesis/data", data = "abstract", target = "title", classes = "mag_field_of_study", # "keywords", # S2orcArguments idxs = '0', zipped = 'True', mag_field_of_study = "Computer Science", keep_none_papers = 'False', keep_unused_columns = 'False', # RunArguments run_name = "scibert-s2orc", run_number = '0', run_iteration = '0', # LoggingArguments verbose = 'True', debug_log = 'True', time = 'False', callbacks = "WandbCallback,CometCallback,TensorBoardCallback", ) # save dictionary to file import json import os ARGS_FILE = 'arguments.json' with open(os.path.join(ARGS_PATH, ARGS_FILE), 'w') as fp: json.dump(args, fp) print(args) # + # ------------------ # Parsing the Arguments # ------------------ dataset_args, training_args, model_args, run_args, log_args, embedding_args = parse_args(['params_path', os.path.join(ARGS_PATH, ARGS_FILE)]) # + # ------------------ # Getting the datasets # ------------------ # Getting the load_dataset wrapper that manages huggingface dataset and the custom ones custom_load_dataset = load_dataset_wrapper() # Loading the raw data based on input (and default) values of arguments raw_datasets = custom_load_dataset(dataset_args, training_args, model_args, run_args, log_args, embedding_args) # The Datasets in the raw form can have different form of key names (depending on the configuration). # We need all datasets to contain 'train', 'test', 'validation' keys, if not we change the dictionary keys' name # based on the `names_tuple` and conseguently on `names_map`. def format_key_names(raw_datasets): # The creation of `names_map` happens to be here # For every element in the values lists, one dictionary entry is added # with (k,v): k=Value of the list, v=Key such as 'train', etc. def names_dict_generator(names_tuple: dict): names_map = dict() for key, values in names_tuple.items(): for value in values: names_map[value] = key return names_map names_map = names_dict_generator(DICTIONARY_FIELD_NAMES) split_names = raw_datasets.keys() for split_name in split_names: new_split_name = names_map.get(split_name) if split_name != new_split_name: raw_datasets[new_split_name] = raw_datasets.pop(split_name) return raw_datasets logger.info(f"Formatting DatasetDict keys") datasets = format_key_names(raw_datasets) # + colab={"base_uri": "https://localhost:8080/"} id="pO3vdi0LQRUH" outputId="b2a34317-2a31-4c45-9f81-ff576ecde3eb" # + colab={"base_uri": "https://localhost:8080/"} id="86d93F19RB2c" outputId="59213c14-6d9e-498f-a41b-bccade43352c" keywords = [] keywords_info = {} for item in data: temp = item['keywords'] for keyword in temp: keyword = keyword.replace("-", "").replace(",","").replace("/", "") #mi ero scordato di togliere il trattino nel preprocessing delle keywords. Per la virgola, non è un separatore, ma è una keyword che ha la virgola, e.g. "segmentation, features and descriptions" if keyword not in keywords: keywords.append(keyword) keywords_info[keyword] = {'count': 0, 'appears_in': []} keywords_info[keyword]['count'] += 1 keywords_info[keyword]['appears_in'].append(item['filename']) print(keywords_info) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="8c8qaR0NXgtL" outputId="7227b8f5-cabe-4421-856d-2570d64d08c6" #plot distribution import numpy as np import matplotlib.pyplot as plt import matplotlib.pyplot as plt; plt.rcdefaults() pos = np.arange(len(keywords)) counts = [] for kw in keywords: counts.append(keywords_info[kw]['count']) plt.figure(figsize=(10,25)) y_pos = np.arange(len(keywords)) plt.barh(y_pos, counts, alpha=0.5) plt.yticks(y_pos, keywords) plt.xlabel('Count') plt.title('Count distribution for each keyword') plt.grid() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="kPCMbCLxhXkN" outputId="62e3c704-c289-4510-c068-cea06ad60572" #order by count ordered_kws = [x for _,x in sorted(zip(counts,keywords))] ordered_cts = sorted(counts) plt.figure(figsize=(5,22)) y_pos = np.arange(len(keywords)) plt.barh(y_pos, ordered_cts, alpha=0.5) plt.yticks(y_pos, ordered_kws) plt.xlabel('Count') plt.title('Count distribution for each keyword') #plt.grid() plt.show() # + [markdown] id="Adl8P9s8jjwF" # Given the chart of frequency of the keywords (NOT normalized), a threshold can be set to only consider the most relevant keywords # + [markdown] id="JRCHp4_FmIZr" # # Definition of groups for the "ground truth" (?) "baseline" (?) # # + [markdown] id="HVg8adeq-cyR" # Note: the following is arguable; in fact, the most frequent keywords are also the blandest and, maybe, less significant for a categorization. # + colab={"base_uri": "https://localhost:8080/"} id="pZMC-jWJ7v3n" outputId="ecea0176-095c-47aa-8a8e-37c6baab4d40" len(keywords) # + colab={"base_uri": "https://localhost:8080/"} id="oOtdlvO-oj9F" outputId="4c7ef29e-d323-409c-abb6-ab15370e09ba" len(keywords)*.2 #the first 29 words make up to 20% of all the keywords # + colab={"base_uri": "https://localhost:8080/"} id="qbN2usEFpL4k" outputId="5dd76dc5-ff38-4fa5-ddf1-0f1b6a655d26" sum(ordered_cts[len(ordered_cts)-43:len(ordered_cts)])/sum(ordered_cts) # + colab={"base_uri": "https://localhost:8080/"} id="YoZzuU27rxKK" outputId="5bc720e2-5a76-4aed-cdd6-9f7d9617f633" sum(ordered_cts[len(ordered_cts)-29:len(ordered_cts)])/sum(ordered_cts) # + [markdown] id="B4jDp1mTrf-l" # 43 keywords make up roughly 80% of the total keywords count. However, for practical reasons, 29 keywords (20%) will be used, also considering how many documents have multiple keywords associated to them. # This will have consequences in the choice of the number of clusters i.e. if HDBSCAN doesn't need nor wants the number of clusters to be specified, when using k-means it is mandatory for the nature of the algorithm. # + id="xKKiE1TG8ryS" MOST_IMPORTANT_KW_THRESHOLD = 29 # + colab={"base_uri": "https://localhost:8080/", "height": 321} id="e-NegFO3p5ke" outputId="240dc74b-669c-4344-a789-191c4a628e0c" mi_keywords = ordered_kws[len(ordered_kws)-MOST_IMPORTANT_KW_THRESHOLD:len(ordered_kws)] #most important keywords mi_keywords_counts = ordered_cts[len(ordered_kws)-MOST_IMPORTANT_KW_THRESHOLD:len(ordered_kws)] plt.rc('font', size=8) plt.figure(figsize=(10,3)) y_pos = np.arange(len(mi_keywords)) plt.barh(y_pos, mi_keywords_counts, alpha=0.5) plt.yticks(y_pos, mi_keywords, ) plt.xlabel('Count') plt.title('Count distribution for each of the most important keywords') plt.grid() plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="7Lyn9-RT8jha" outputId="f4699294-9aca-4ace-f326-b8d96620b7b7" mi_associations = {} #direi molto poco elegante ma okay for keyword in mi_keywords: mi_associations[keyword] = keywords_info[keyword] print(mi_associations['deeplearning']) # + [markdown] id="BnVI-BfEk3T-" # # Clustering # From here on clustering will be considered using SBERT embeddings. # The variable called 'clustering_on' is used to discriminate and chose weather the embeddings are made on the abstracts or on the titles. In both cases, clustering is made through HDBSCAN and k-means (see the two subsections). # WARNING: do not execute cells in random order. Some variables names are used both for the HDBSCAN clustering and for k-means; the suggestion is to execute hdbscan first, and k-means second. Otherwise, it is important to execute the definition of the functions used for both cases. # + [markdown] id="x6IJsf1c_ZRO" # ## Clustering with HDBSCAN # + colab={"base_uri": "https://localhost:8080/"} id="mWet-np_BZlc" outputId="8abf47e2-7aa7-4f4f-92b6-b586cedf3105" # !pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html # !pip install hdbscan # !pip install sentence-transformers # !pip install umap-learn # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["86d788ed61724813bb8b6c69eefb7c57", "141b9a41a5074395bf9de0aba4402e20", "118d8e84375d4da7ad465401f40afa31", "963a04b8e07e4a8daf78fae4f290ceb7", "748bf5d937b64b79a8c3363417a6b14d", "c6be1091a10c44c08aa7c232fadcdba3", "d1c1852504b74066910d8af0af79c0ee", "db76ce2ff1744a3686bbdbea36805b42"]} id="fq9ABby2CAhH" outputId="e6346d11-5d8e-481d-b039-d70e475e3fb0" from sentence_transformers import SentenceTransformer #model_in = SentenceTransformer('distilbert-base-nli-mean-tokens') #this model is kinda bad model_in = SentenceTransformer('stsb-roberta-large') #model = SentenceTransformer('paraphrase-distilroberta-base-v1') # + id="XU2qJ-uP_c2S" #since the same exact thing has to be done for both abstracts and titles, i define a function def elaborate(subject = None, model = None): ''' :param str subject: can be either 'abstract' or 'title', specifies what the clustering has to be made on :param SentenceTransformer model: instanciated model of SentenceTransformer (SBERT) ''' textual_data = [] for item in data: # the condition is rather importante. In the case of abstracts, it kept clustering # (with HDBSCAN) all the error in the same cluster (which makes sense), remortion is thereby necessary if not (item[subject] == 'UNABLE TO DECODE ABSTRACT' or item[subject] == 'Unable to open the pdf' or item[subject] == ""): textual_data.append(item[subject]) print(textual_data) return textual_data, model.encode(textual_data, show_progress_bar = True) # + id="_UN4A3GyLRrs" #funzione, dati dati originali e label li stampa assieme def constructDictionaryOfClusters(labels, original_data): print(labels) associative = [] #prepare dictionary for i in range(labels.max()+1): associative.append([]) #print(f"associative: {associative}") for i in range(len(original_data)): if labels[i] != -1: #in the case of HDBSCAN the labels -1 are the "outsiders" associative[labels[i]].append(original_data[i]) #print(i) for item in associative: print(len(item)) print(item) print(len(associative)) return associative #nota: l'ultimo elemento della lista è composto dagli outsiders # + id="iEhNJ5RR3Mpu" clustering_on = "abstract" # + colab={"base_uri": "https://localhost:8080/", "height": 103, "referenced_widgets": ["4ae4025047e34855a941c71c9e274141", "59ea2938700548a6b05e741942be2d60", "5178786c44d646c6aa6ace829845e879", "<KEY>", "<KEY>", "523f95fa48d041e184366495c60e3440", "<KEY>", "317698dade564644acb8b530ea2b50ab"]} id="7bB2uow7FKGy" outputId="29cc1bdd-4172-4c3d-dca1-c40d11e8a505" textual_data, embeddings = elaborate(clustering_on, model_in) ######################### #instanciate model embeddings = model.encode(textual_data, show_progress_bar = True) # + colab={"base_uri": "https://localhost:8080/"} id="vnsegBIYJS1v" outputId="3a02d7eb-39d3-4366-8df6-ada6b459b6d0" print(embeddings.shape) # + id="97lajAFUFoiL" #here after, once calculated the embeddings (either for the abstracts or the title) the clustering is considered #first with HDBSCAN, then k-means import umap import hdbscan # first it's *better* to do dimensionality reduction, sbert returns embeddings of dimension 700+ or even 1000+ (depending on the chosen model) # clustering algorithms seem not to perform well for for big dimensions use-cases # UMAP is a popular algorithm for dimensionality reduction umap_embeddings = umap.UMAP(n_neighbors=15, n_components=5, metric='cosine').fit_transform(embeddings) cluster = hdbscan.HDBSCAN(min_cluster_size=5, metric='euclidean', cluster_selection_method='eom').fit(umap_embeddings) # + colab={"base_uri": "https://localhost:8080/", "height": 631} id="MnRok7WMm3Bj" outputId="fb4d9f3d-3729-4cbe-8685-4645c4ac2b8c" import pandas as pd umap_data = umap.UMAP(n_neighbors=15, n_components=2, min_dist=0.0, metric='cosine').fit_transform(umap_embeddings) result = pd.DataFrame(umap_data, columns=['x', 'y']) result['labels'] = cluster.labels_ # Visualize clusters fig, ax = plt.subplots(figsize=(10, 7)) outliers = result.loc[result.labels == -1, :] clustered = result.loc[result.labels != -1, :] plt.scatter(outliers.x, outliers.y, color='#BDBDBD', s=0.05) plt.scatter(clustered.x, clustered.y, c=clustered.labels, s=0.05, cmap='hsv_r') plt.colorbar() plt.title("Visualization of one instance of clustering through HDBSCAN") # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="yb-lOFSaI93L" outputId="24dad4ff-e655-435f-c4fc-055271de2842" '''cluster = hdbscan.HDBSCAN(min_cluster_size=3, metric='manhattan', cluster_selection_method='eom').fit(embeddings) ''' # + colab={"base_uri": "https://localhost:8080/"} id="f4Sz6YKoIk01" outputId="1c665bb5-12bb-4fc1-e502-e73830e0bf0d" #number of clusters print(cluster.labels_.max()) #nota: è il label più alto => #numero clusters = max()+1 # + colab={"base_uri": "https://localhost:8080/"} id="NYvFY1ehLcsy" outputId="d089d1c5-60bf-47a9-c873-5d0073ba58f6" clusters = constructDictionaryOfClusters(cluster.labels_, textual_data) # + [markdown] id="SFIhzkvpuO0H" # # + colab={"base_uri": "https://localhost:8080/"} id="CMwc8L7huo96" outputId="9586b21b-6fd1-4355-fd50-efe3c05e9efa" print(len(clusters)) # + id="C2mRbIeTuwAu" #really not necessary to be honest, clusters_desc = clusters.copy()[:-1] clusters_desc.sort(key = len, reverse = True) # + colab={"base_uri": "https://localhost:8080/"} id="7uDetROTyH43" outputId="6d60d344-3435-4e98-d13d-4aaedc56dcb0" print(len(clusters)) print(len(clusters_desc)) # + [markdown] id="1EiqiuhXzI6J" # Now I do sort of a matching matrix. Maybe it would be appropriate to do intersection over union for each element of the matrix? # + id="6dpK3_eP2kKi" #transform clusters of titles/abstracts into clusters of ids '''clusters_of_ids = [] for item in clusters_desc: temp = [] for text in item: tmp_ids = [] for dt in data: #print(dt) tp = None if dt[clustering_on] == text: tmp_ids.append(dt['filename']) tp = dt['filename'] #print(tp) break temp.append(tp) clusters_of_ids.append(temp) #print(clusters_of_ids) ''' # + id="KJ1S8QjUQeSI" def transform_clusters_into_id_clusters(temp_clust_desc): id_clusters = [] for item in clusters_desc: temp = [] for text in item: tmp_ids = [] for dt in data: #print(dt) tp = None if dt[clustering_on] == text: tmp_ids.append(dt['filename']) tp = dt['filename'] #print(tp) break temp.append(tp) id_clusters.append(temp) return id_clusters # + id="2OQXCWhGQugu" clusters_of_ids = transform_clusters_into_id_clusters(clusters_desc) # + colab={"base_uri": "https://localhost:8080/"} id="pM0OoWyB5aaW" outputId="2bcd3f7a-b97f-4caf-97bc-86d001de2dee" print(len(clusters_of_ids[0])) print(len(clusters_of_ids[-1])) # + id="c_btYP6Bp4S5" #should have stored this way since the beginning, useless transformation.. new_data = {} for item in data: new_data[item['filename']] = {'title': item['title'], 'abstract': item['abstract']} # + id="FCcFJETZqlsE" #new_data # + id="I2yQ0hLzzW4d" mi_keywords_desc = mi_keywords.copy() mi_keywords_desc.reverse() matching_matrix = [] #for each cluster for c_item in clusters_of_ids: #for each keyword of the previously defined keywords row = [] for kwd in mi_keywords_desc: # keyword k_item appears in doc1, doc2, ... # c_item is the first set, the second set should be k_item['appears_in'] appears_in = mi_associations[kwd]['appears_in'] #the following can be replaced with whatever metric union = len(set(c_item).union(set(appears_in))) intersection = len(set(c_item).intersection(set(appears_in))) row.append(intersection/union) matching_matrix.append(row) import numpy as np np_matching_matrix = np.array(matching_matrix) #print(np_matching_matrix) # + colab={"base_uri": "https://localhost:8080/"} id="9CSsFK1VScDf" outputId="c2846b8e-24f3-46ae-b652-3080c6f46d5f" mi_keywords_desc # + colab={"base_uri": "https://localhost:8080/", "height": 934} id="R3jOabK19L0X" outputId="2e312f74-b1bd-49c6-cde6-f84043c24943" import seaborn as sn #TODO add the labels to the chart ? plt.figure(figsize=(11,8)) sn.heatmap(np_matching_matrix, annot=False, xticklabels=mi_keywords_desc) # + [markdown] id="NfFGqTSBINy3" # ## Clustering with K-means # + [markdown] id="Sd3hjYskRTsA" # The reason for doing two types of clustering is that, while HDBSCAN leaves out the outsideres, k-means forces each element into a cluster. # + id="E-hzH6NjIMDA" from sklearn.cluster import KMeans clustering_model = KMeans(n_clusters=29) clustering_model.fit(umap_embeddings) cluster_assignment = clustering_model.labels_ # number of clusters print(clustering_model.labels_.max()) clusters = constructDictionaryOfClusters(clustering_model.labels_, textual_data) # + colab={"base_uri": "https://localhost:8080/", "height": 631} id="fL560IfWp0-m" outputId="8310ff8d-f0a8-4692-aa1e-cc565e69d320" import pandas as pd #umap_data = umap.UMAP(n_neighbors=15, n_components=2, min_dist=0.0, metric='cosine').fit_transform(umap_embeddings) result = pd.DataFrame(umap_data, columns=['x', 'y']) result['labels'] = clustering_model.labels_ # Visualize clusters fig, ax = plt.subplots(figsize=(10, 7)) outliers = result.loc[result.labels == -1, :] clustered = result.loc[result.labels != -1, :] plt.scatter(outliers.x, outliers.y, color='#BDBDBD', s=0.05) plt.scatter(clustered.x, clustered.y, c=clustered.labels, s=0.05, cmap='hsv_r') plt.colorbar() plt.title("Visualization of one instance of clustering through k-means") # + id="VYb8NyKaOxYw" #really not necessary to be honest, clusters_desc = clusters.copy()[:-1] clusters_desc.sort(key = len, reverse = True) # + [markdown] id="Rc-lL49_Pir1" # NOTE: from this point on, it's the same code as with HDBSCAN # + colab={"base_uri": "https://localhost:8080/"} id="0ql69rISPmNk" outputId="3eab138c-b664-46fb-a7b2-bd3a5e5cae66" clusters_of_ids = transform_clusters_into_id_clusters(clusters_desc) print(len(clusters_of_ids[0])) print(len(clusters_of_ids[-1])) # + id="1zLXS2ztRSu0" #should have stored this way since the beginning, useless transformation.. new_data = {} for item in data: new_data[item['filename']] = {'title': item['title'], 'abstract': item['abstract']} # + id="LqZ-M9F-RoSy" mi_keywords_desc = mi_keywords.copy() mi_keywords_desc.reverse() matching_matrix = [] #for each cluster for c_item in clusters_of_ids: #for each keyword of the previously defined keywords row = [] for kwd in mi_keywords_desc: # keyword k_item appears in doc1, doc2, ... # c_item is the first set, the second set should be k_item['appears_in'] appears_in = mi_associations[kwd]['appears_in'] #the following can be replaced with whatever metric union = len(set(c_item).union(set(appears_in))) intersection = len(set(c_item).intersection(set(appears_in))) row.append(intersection/union) matching_matrix.append(row) import numpy as np np_matching_matrix = np.array(matching_matrix) #print(np_matching_matrix) # + colab={"base_uri": "https://localhost:8080/", "height": 934} id="A-n4U9oARpao" outputId="6c54de09-d4f2-4356-cbee-dff2096a95a7" import seaborn as sn #TODO add the labels to the chart ? plt.figure(figsize=(11,8)) sn.heatmap(np_matching_matrix, annot=False, xticklabels=mi_keywords_desc) # + [markdown] id="MqcWzWPo-aHt" # # A further step: automatic keyword assignment # Given the clusters, it's possibile tu use c- # TF-IDF to infer the topic, this *could* allow for automatic labeling of a set of documents # + id="sjTgMijdAsy6" #smarter way of doing things.. import pandas as pd #for each cluster, create pandas dataframe docs_df = pd.DataFrame(textual_data, columns=["Doc"]) docs_df['Topic'] = cluster.labels_ docs_df['Doc_ID'] = range(len(docs_df)) docs_per_topic = docs_df.groupby(['Topic'], as_index = False).agg({'Doc': ' '.join}) # + id="5vUgRaKhqsgP" from sklearn.feature_extraction.text import CountVectorizer #note: c-tf-idf is simply tf-idf but the measurements are made on one entire cluster def c_tf_idf(documents, m, ngram_range=(1, 1)): count = CountVectorizer(ngram_range=ngram_range, stop_words="english").fit(documents) t = count.transform(documents).toarray() w = t.sum(axis=1) tf = np.divide(t.T, w) sum_t = t.sum(axis=0) idf = np.log(np.divide(m, sum_t)).reshape(-1, 1) tf_idf = np.multiply(tf, idf) return tf_idf, count # + id="xBw6NZ0nrLZr" def extract_top_n_words_per_topic(tf_idf, count, docs_per_topic, n=20): words = count.get_feature_names() labels = list(docs_per_topic.Topic) tf_idf_transposed = tf_idf.T indices = tf_idf_transposed.argsort()[:, -n:] top_n_words = {label: [(words[j], tf_idf_transposed[i][j]) for j in indices[i]][::-1] for i, label in enumerate(labels)} return top_n_words def extract_topic_sizes(df): topic_sizes = (df.groupby(['Topic']) .Doc .count() .reset_index() .rename({"Topic": "Topic", "Doc": "Size"}, axis='columns') .sort_values("Size", ascending=False)) return topic_sizes # + colab={"base_uri": "https://localhost:8080/", "height": 359} id="YA9yCjyzrR29" outputId="4a8232b2-c47c-4a28-ca1f-66e9c07ef7a4" tf_idf, count = c_tf_idf(docs_per_topic.Doc.values, m=len(data)) top_n_words = extract_top_n_words_per_topic(tf_idf, count, docs_per_topic, n=20) topic_sizes = extract_topic_sizes(docs_df) topic_sizes.head(10) #nota, i topic con '-1' sono quelli che hdbscan non ha clusterizzato # + colab={"base_uri": "https://localhost:8080/"} id="uDDMT_hyrzEw" outputId="0a3f21d0-a2d3-496e-c817-9c386b695be0" top_n_words[0][:100]
notebooks/99. Journal Analysis project - Francesco.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import matplotlib.pyplot as plt # Write your imports here # # Calculus Exercise # ## Numerical Computation. Derivatives, integrals. Calculus in many dimensions # ### Problem 1. Derivative and Slope at a Point # We'll warm up by visualizing how the derivative of a function relates to the slope of the function at a given point. # # We can either calculate the derivative analytically (by hand) or use the numerical definition. So, let's see what a function and its derivative look like. # # Write a Python function which plots a math function and its derivative. # + def calculate_derivative_at_point(function, point, precision = 1e-7): """ Calculates a numerical approximation to the derivative of the specified function at the given point """ return (function(point + precision) - function(point)) / precision def plot_derivative(function, derivative = None, min_x = -10, max_x = 10): """ Plots the function and its derivative. The `derivative` parameter is optional and can be provided as a separate function. If it's not provided, the derivative will be calculated automatically """ # We're using vectorized functions to make our code simpler: this only hides the for-loop, # it doesn't provide any performance gain vectorized_function = np.vectorize(function) x = np.linspace(min_x, max_x, 1000) y = vectorized_function(x) # TODO: Generate the function values `y` dy = [] if derivative is None: dy = np.vectorize(calculate_derivative_at_point)(function, x) else: dy = np.vectorize(derivative)(x) # TODO: Plot the function and its derivative. # Don't forget to add axis labels. # Feel free to make the plot as pretty as you wish - you may add titles, # tick marks, legends, etc. plt.plot(x, y) plt.plot(x, dy) plt.show() # - # Let's now test with out favourite function: $y = x^2$ whose derivative is $y' = 2x$. If you've worked correctly, both of the following plots should be the same. plot_derivative(lambda x: x ** 2, lambda x: 2 * x) # The derivative is calculated by hand plot_derivative(lambda x: x ** 2) # The derivative is not pre-calculated, should be calculated inside the function # Let's try one more: $y = \sin(x)$, $y'= \cos(x)$. plot_derivative(np.sin) # These plots may look nice but they don't reflect the idea of **slope at a point** very well. Now that we're sure our functions are working, let's actually calculate the derivative at **one point** and see that it is, indeed, equal to the slope of the function at that point. # # #### How to plot the tangent line? # We need to find the line equation first. # # We're given the derivative, which is equal to the slope of the line. In the line equation $y = ax + b$, the slope is $a$. We now have to find $b$. We're given a point $P(x_P; y_P)$ through which the line passes. Substitute $x_P$ and $y_P$: # $$ y_P = ax_P + b $$ # # $$ b = y_P - ax_P $$ # # Now that we have $a$ and $b$, we can plot the line given by $y = ax + b$. The parameter $b$ is sometimes called "y-intercept" (or "intercept"). # # Now we can copy the code from the previous function. This time, however, we won't plot the entire range, only one value of the derivative. We'll also show the point where we're calculating. def plot_derivative_at_point(function, point, derivative = None, min_x = -10, max_x = 10): """ Plots the function in the range [x_min; x_max]. Computes the tangent line to the function at the given point and also plots it """ vectorized_function = np.vectorize(function) x = np.linspace(min_x, max_x, 1000) y = vectorized_function(x) slope = 0 # Slope of the tangent line if derivative is None: slope = calculate_derivative_at_point(function, point) else: slope = derivative(point) intercept = function(point) - slope * point tangent_line_x = np.linspace(point - 2, point + 2, 10) tangent_line_y = slope * tangent_line_x + intercept plt.plot(x, y) plt.plot(tangent_line_x, tangent_line_y) plt.show() plot_derivative_at_point(lambda x: x ** 2, 2) # Looks like it! Let's zoom in to confirm: plot_derivative_at_point(lambda x: x ** 2, 2, min_x = 0, max_x = 4) # Let's also plot several tangents to the same function. Note that this will create many graphs by default. You can plot them all at once if you wish. for x in np.arange(-8, 10, 2): plot_derivative_at_point(lambda x: x ** 2, x) for x in np.arange(-8, 10, 2): plot_derivative_at_point(np.sin, x) # Now we have a visual proof that the derivative of a function at a given point is equal to the slope of the tangent line to the function. # ### Problem 2. Limits. The Number $e$ as a Limit # We know what limits are, what they mean and how they relate to our tasks. Let's explore a special limit. This one arises from economics but we'll see it has applications throughout science because of its unique properties. # # Imagine you're saving money in a bank. Every period, you accumulate [interest](https://en.wikipedia.org/wiki/Compound_interest) on your money. Let's say the bank is very generous and gives you **100% interest every year**. # # How much money will you have after one year? Let's say you got $\$1$. After one year, you'll get your interest from the bank and you'll have $\$2$. Your money doubled, which was expected. # # How about this offer: **50% interest every 6 months**? Will this be the same, better, or worse? # You start with $\$1$. After 6 months, you'll accumulate $50%$ interest to get $\$1,50$. After 6 more months, you'll get $50\%.1,50 = 0,75$, so your total is $2,25$. You got $\$0,25$ more! # # Let's try to exploit the scheme and get rich. # # **$100/12\%$ every month** # * January: $1 + 1/12$ # * February: $(1 + 1/12) * (1 + 1/12)$ # * March: $(1 + 1/12) * (1 + 1/12) * (1 + 1/12)$ # * ... # # We can see a pattern. Every period, we multiply our money by $1 + 1/12$. So, the final sum will be $$\$1.\left(1+\frac{1}{12}\right)^{12} = \$2,61$$ # # We did even better. This is always true. The more periods, the more money we accumulate. The more money we have, the more interest we accumulate. And that completes the vicious circle known as money saving :). # # Let's try other options: # # **$100/52\%$ every week** # # $\$1.\left(1+\frac{1}{52}\right)^{52} = \$2,69$ # # **$100/31556926\%$ every second** # # $\$1.\left(1+\frac{1}{31556926}\right)^{31556926} = \$2,718$ # # Well, there's a slight problem to our world domination plans. Even though we accumulate more and more money, we get *diminishing returns*. For 52 periods we got $2,69$, and for more than 3 million periods we only got like $\$0,02$ more. This pattern will continue. # # Now we can ask ourselves, what is the maximum profit we can accumulate for a year? To do this, we can ask # $$ \lim_{n \ \rightarrow \infty}\left(1+\frac{1}{n}\right)^n = ? $$ # # It turns out this is a constant. It is approximately equal to $2,71828182\dots$. Since it's very useful, it's got a name: $e$, or Euler's number (sometimes called Napier's number). The limit above is **the definition of $e$**. # # Why is it so useful? Let's go back to the original problem. In compound interest, the extra amount after every period is proportional to the amount at the start of the period. In other words, **the rate of change of a value is proportional to the value**. This pops out everywhere in nature and business. Some examples include radioactive decay (more atoms $\Rightarrow$ more decays), cooling down a cup of tea (the rate of cooling down depends on the temperature difference between the cup and the room), animal population models (more animals $\Rightarrow$ more babies), infection models, and so on. # # To quickly verify the value of $e$, calculate the limit as we defined it above. def calculate_limit_at_infinity(function): """ Calculates a numerical approximation of the limit of the specified function as its parameter goes to infinity """ n = 10 ** np.arange(0, 10) return zip(n, function(n)) limits = calculate_limit_at_infinity(lambda x: (1 + 1 / x) ** x) for limit in limits: print(limit) # ### Problem 3. Derivatives of Exponential Functions # Use the function you defined in the first problem to plot the derivative of $y = 2^x$. plot_derivative(lambda x: 2 ** x, min_x = 0, max_x = 10) # The function and its derivative look closely related, only the derivative seems to grow a bit slower than the function. Let's confirm that by looking at a broader range: plot_derivative(lambda x: 2 ** x, min_x = 0, max_x = 20) # The same pattern will continue if we try to plot any exponential function, e.g. $y = 3^x$, $y = 4^x$ and so on, if the base of the exponent is greater than 1. If we want to plot, say, $y = 0,5^x$, we'll get a slightly different result. Note that the functions look more or less the same, only their signs are flipped. plot_derivative(lambda x: 0.5 ** x) # Very interesting things happen if we plot $y = e^x$: plot_derivative(np.exp) # The plots overlap. You can see that this is true if you plot the function and its derivative with different line widths. This means that # # $$ (e^x)' = e^x $$ # # Also: # $$ (e^x)'' = e^x $$ # $$ (e^x)''' = e^x $$ # ... and so on. This is the only function whose rate of change (derivative) is equal to the function itself. This property makes it even more interesting for science and math. # # Also, do you remember that $e^{i\varphi} = \cos(\varphi) + i\sin(\varphi)$? This constant never ceases to amaze. # ### Problem 4. Integrals and Area. Changing Variables in Integration # We know that the definition of an integral is the area "between" the function we're integrating, and the x-axis. This gives us a method to calculate integrals. Most integrals can't be solved analytically but have numerical solutions. One such integral is # $$\int\sin(x^2)dx$$ # # Note that we can only solve **definite integrals** numerically. # # The simplest way to calculate the integral is to apply the definition, like in the case of the derivative. This is called [the trapezoid method](http://www.mathwords.com/t/trapezoid_rule.htm) because the area is approximated as a series of trapezoids. # # Write a function which does exactly that. Use `numpy` and vectorization as much as possible. def calculate_integral(function, x_min, x_max, num_points = 5000): """ Calculates a numerical approximation of the definite integral of the provided function between the points x_min and x_max. The parameter n specifies the number of points at which the integral will be calculated """ width = (x_max-x_min)/num_points area_sum = 0 for i in range(num_points): higth = function(x_min + i*width) area = higth * width area_sum += area return area_sum print(calculate_integral(lambda x: x ** 2, 0, 1)) # Should be close to 0.333 print(calculate_integral(lambda x: np.sin(x ** 2), 0, 5)) # Should be close to 0.528 # Let's apply our insight to finding the area of a circle. We know the equation of a circle is not a function (it's more like two functions). We can, however be clever. One way is to integrate both of the functions and add them together. Another way is to integrate one and double the area. # # **Note:** We're trying to find the total area of the circle, there is **no negative area** in this particular case. # # Another, even more clever way is to look at a quarter of the circle. This is convenient because we may look at the quadrant where $x > 0$ and $y > 0$. So, we'll need to find the area between: # 1. $x \ge 0$ # 2. $y \ge 0$ # 3. The circle $x^2 + y^2 \le R^2$ (let's fix the radius to be 1) # # $\Rightarrow y = \sqrt{R^2 - x^2} = \sqrt{1 - x^2}$ # # After all of this, we'll need to multiply the result by 4. # # $$ S = 4 \int_0^1\sqrt{1 - x^2}dx $$ circle_piece_area = calculate_integral(lambda x: np.sqrt(1 - x ** 2), 0, 1) total_area = 4 * circle_piece_area print(total_area) # And we got something similar to $\pi$, which is the real answer. # # #### * Optional: Integration in polar coordinates # We can, however, do better. We know that a circle looks much simpler in polar coordinates. Let's now change our viewpoint. # # In polar coordinates $(r, \varphi)$, the equation of a circle is # # $$ r = R $$ # # which is our case simplifies to $r = 1$. Note there's no dependence on $\theta$: the radius is the same regardless of the angle. The boundaries for $\theta$ are every possible angle from $0$ to $2\pi$ radians. For $r$, we have $r \ge 0$ and $r \le R$. This translates to the integral: # # $$ S = \int_{\theta=0}^{2\pi}\int_{r=0}^R r dr d\theta $$ # # **Note:** We usually don't write the variables at the bottom of the integral symbol. I've done this just for clarity. # # I won't go into details but since there's no dependence on $\theta$, we can simply "separate the integrals" and multiply # them. # # $$ S = \int_{0}^{2\pi}d\theta .\int_{r=0}^R r dr $$ # # The first one is: # $$ I_1 = \int_0^R r dr = \left.\frac{r^2}{2}\right|_{0}^{R} = \frac{R^2}{2} - \frac{0^2}{2} = \frac{R^2}{2} $$ # # And the second one is: # $$ I_2 = \int_0^{2\pi}1d\theta = \left.\theta\right|_0^{2\pi} = 2\pi - 0 = 2\pi $$ # # $$ \Rightarrow S = I_1I_2 = 2\pi\frac{R^2}{2} = \pi R^2$$ # # $$ S = \pi R^2$$ # ### ** Problem 5. Taylor Series. Computing Function Value Approximations # Some functions can be really hard to compute, or even impossible. However, there are ways to approximate functions with other functions, which are simpler. The idea is the same as what we already used: we approximated the function with its tangent line to find the derivative at a given point. Research what Taylor series are and how they're used in approximating functions. Derive and implement the Taylor expansions of several functions. # * What is "function approximation"? # * Why can't we compute all functions numerically? # * What are some easy ways to approximate a function near a given point? # * Polynomial functions are very useful approximators. Why? Think about their continuity and derivatives # * What information does the first derivative of a function give us? # * How does that information generalize to higher-order derivatives (second, third, fourth, etc.)? # * What are Taylor series? Why does it work? # * Derive the formula # * What are Maclaurin series? What's the difference? # * Derive some Taylor series expansions. Some common functions are $\sin(x)$ and $\cos(x)$ # * To work with infinite sums, we have to "cut" them somewhere. Why are we allowed to do that? Think about the different terms: one Taylor expansion term doesn't affect the others # * How are those expansions used in calculators / numerical libraries? # * Write the code that computes the approximations # * Test the code # * How many terms do we need? # * Visualize how the approximation is getting better as we include more terms in the Taylor expansion # # ### ** 6. Calculus in Medical Data. Electrocardiography (EKG) # EKG data is basically a 1-dimensional function of time. It records electrical impulses which depend on the state of the heart. How can we analyze EKG data? # * What is EKG data? What is the physical process and what is its interpretation? # * Provide several EKG plots # * What are the main parts of an EKG? # * How does the concept of local extrema relate to EKG? # * How does the concept of "convex" / "concave" graphs relate to EKG? # # You can take various paths witk EKG data. Some companies, Samsung for example, use a heart rate sensor to measure stress and blood oxygen levels. An algorithm takes data for several seconds or minutes and analyzes it in several ways. # * Optional: Research how you can use EKG or pulse sensor data to measure oxygen in the blood. Have a look [here](https://www.quora.com/How-do-optical-heart-rate-sensors-work) for example # * Optional: How can you use the pulse sensor data to measure stress levels? There are many approaches to this one. # * Optional: Many diseases can be diagnosed from EKG data. Can you train an algorithm to recognize a certain disease? This is related to *time series* analysis # ### ** 7. Physics Engine Implementation # Use your knowledge of vectors, algebra and calculus to create a physics engine. You can use this for many things. One example is computer games. Another example is computer simulation. # # Don't forget to provide details about your calculations and implementations. # * Implement vectors and operations # * Implement visualization # * Implement Newtonian mechanics with forces as vectors acting on material points # * Implement another physical process: e.g. collision detection. This should take into account momentum and the law of conservation of momentum # * You can also implement other physical phenomena (e.g. heat transfer, reflection of light, etc.), as you need.
Calculus/Calculus Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="47c31b0b-53a9-ee29-2e3f-ab39e6cc2d0e" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # Any results you write to the current directory are saved as output.
downloaded_kernels/university_rankings/kernel_155.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- clean_up=True # removes gams-related files in work-folder if true # %run StdPackages.ipynb os.chdir(directory['py']) import Production,ShockFunction,ReadData os.chdir(directory['curr']) data_folder = os.getcwd()+'\\Data' gams_folder = os.getcwd()+'\\gamsmodels\\A1' # # A very simple input-displacing model # We consider the simple case with: # * Two technologies, using a combination of two fuels and capital in a leontief-nest. # * Technology $1$ produces two goods $(u1,u2)$. Technology $2$ produces one good $(u3)$. This nest is CET (normalized). # * $u1$ is used to produce a component $C1$, goods $(u2,u3)$ are combined as component $C2$. This is MNL (normalized). # * Components $(C1,C2)$ are combined into one good $E$. This is CES. # ## 1: Trees # *Data file:* data_file = 'TreeData.xlsx' # *Main tree:* nt = nesting_tree.nesting_tree(name='A1') # *Add Trees:* nt.add_tree(data_folder+'\\'+data_file,tree_name='T_inp',**{'sheet':'T'}) nt.add_tree(data_folder+'\\'+data_file,tree_name='T_out',**{'sheet':'U', 'type_io':'output','type_f':'CET_norm'}) nt.add_tree(data_folder+'\\'+data_file,tree_name='C',**{'sheet':'C', 'type_f':'MNL'}) nt.add_tree(data_folder+'\\'+data_file,tree_name='E',**{'sheet':'E', 'type_f': 'CES_norm'}) nt.run_all() # *Read in data on variables as well:* [DataBase.GPM_database.merge_dbs(nt.database,excel2py.xl2PM.pm_from_workbook(data_folder+'\\'+data_file,{sheet:'vars'}),'first') for sheet in ('T','U','C','E')]; # ## 2: Production module gm.get('map_all') gm = Production.pr_static(nt=nt,work_folder=directory['work'],**{'data_folder':gams_folder,'name':'A1'}) gm.write_and_run(kwargs_init={'check_variables':True}) db = gm.model_instances['baseline'].out_db gm.model_instances['baseline'].modelstat,gm.model_instances['baseline'].solvestat db.get('qD').plot.bar(figsize=(4,3)); db.get('PbT').plot.bar(figsize=(4,3));
examples/Abatement/A2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python3 # --- # # Python Data Visualization Project # ## Importing necessary Modules # + import pandas as pd import matplotlib as mpt import matplotlib.pyplot as plt import numpy as np import seaborn as sns # %matplotlib inline # - # ## Question 1 # ### Read the data from the csv file filename = 'https://cocl.us/datascience_survey_data' df = pd.read_csv(filename) df.rename(columns={'Unnamed: 0':' '}, inplace =True) df.set_index(' ',inplace=True) df.head(7) # ## Question 2 # ### Bar chart # + #Sort the values df.sort_values(['Very interested'],ascending=False,axis = 0, inplace =True) #Percentage Values dummydf = df * (100/2233) dummydf = dummydf.round(2) dummydf.head() #Chart features barchart = dummydf.plot(kind='bar', figsize=(20,8),width=0.8,color=['#5cb85c','#5bc0de','#d9534f'],fontsize=14) barchart.set_title('Percentage of Respondents\' Interest in Data Science Areas',fontsize=16) barchart.legend(fontsize=14) barchart.axes.yaxis.set_visible(False) barchart.grid(False) barchart.set_facecolor('white') barchart.legend(facecolor='white', framealpha=1) for i in barchart.patches: barchart.text(i.get_x(),i.get_y()+(i.get_height()+1),str(i.get_height())+'%',fontsize=14) # - # ## Question 3 # ### Map plot data frame #Load the variable filename = 'https://cocl.us/sanfran_crime_dataset' df = pd.read_csv(filename) # + #Rescheduling of Variables temp = df.PdDistrict.value_counts() combine = pd.DataFrame(data=temp.values, index=temp.index, columns=['Count']) combine = combine.reindex(['CENTRAL','NORTHERN','PARK','SOUTHERN','MISSION','TENDERLOIN','RICHMOND','TARAVAL','INGLESIDE','BAYVIEW']) combine = combine.reset_index() combine.rename({'index':'Neighborhood'},axis='columns',inplace=True) combine.head(10) # - # ## Question 4 # ### map plot San Fransisco # + #load the json file # !wget --quiet https://cocl.us/sanfran_geojson -O sanfrancisco.json df_sanfran = r'sanfrancisco.json' # - #install and import folium maps # !pip install folium import folium # + #center map to sanfrancisco latitude = 37.7749 longitude = -122.4194 #start map zoom sanfran = folium.Map(location=[latitude,longitude],zoom_start=12) #Generate map based on data sanfran.choropleth(geo_data=df_sanfran, data=combine, columns=['Neighborhood', 'Count'], key_on='feature.properties.DISTRICT', fill_color='YlOrRd', fill_opacity=0.7, line_opacity=0.2, legend_name='Crime Rate in San Francisco') #show map sanfran # -
07_Data_Visualization_with_Python/Final Assignment/Data Visualization Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### N-gram language models or how to write scientific papers (4 pts) # # We shall train our language model on a corpora of [ArXiv](http://arxiv.org/) articles and see if we can generate a new one! # # ![img](https://media.npr.org/assets/img/2013/12/10/istock-18586699-monkey-computer_brick-16e5064d3378a14e0e4c2da08857efe03c04695e-s800-c85.jpg) # # _data by neelshah18 from [here](https://www.kaggle.com/neelshah18/arxivdataset/)_ # # _Disclaimer: this has nothing to do with actual science. But it's fun, so who cares?!_ import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # Alternative manual download link: https://yadi.sk/d/_nGyU2IajjR9-w # !wget "https://www.dropbox.com/s/99az9n1b57qkd9j/arxivData.json.tar.gz?dl=1" -O arxivData.json.tar.gz # !tar -xvzf arxivData.json.tar.gz data = pd.read_json("./arxivData.json") data.sample(n=5) # + # assemble lines: concatenate title and description lines = data.apply(lambda row: row['title'] + ' ; ' + row['summary'], axis=1).tolist() sorted(lines, key=len)[:3] # - # ### Tokenization # # You know the dril. The data is messy. Go clean the data. Use WordPunctTokenizer or something. # # + # Task: convert lines (in-place) into strings of space-separated tokens. import & use WordPunctTokenizer <YOUR CODE> lines = <...> # - assert sorted(lines, key=len)[0] == \ 'differential contrastive divergence ; this paper has been retracted .' assert sorted(lines, key=len)[2] == \ 'p = np ; we claim to resolve the p =? np problem via a formal argument for p = np .' # ### N-Gram Language Model # # A language model is a probabilistic model that estimates text probability: the joint probability of all tokens $w_t$ in text $X$: $P(X) = P(w_1, \dots, w_T)$. # # It can do so by following the chain rule: # $$P(w_1, \dots, w_T) = P(w_1)P(w_2 \mid w_1)\dots P(w_T \mid w_1, \dots, w_{T-1}).$$ # # The problem with such approach is that the final term $P(w_T \mid w_1, \dots, w_{T-1})$ depends on $n-1$ previous words. This probability is impractical to estimate for long texts, e.g. $T = 1000$. # # One popular approximation is to assume that next word only depends on a finite amount of previous words: # # $$P(w_t \mid w_1, \dots, w_{t - 1}) = P(w_t \mid w_{t - n + 1}, \dots, w_{t - 1})$$ # # Such model is called __n-gram language model__ where n is a parameter. For example, in 3-gram language model, each word only depends on 2 previous words. # # $$ # P(w_1, \dots, w_n) = \prod_t P(w_t \mid w_{t - n + 1}, \dots, w_{t - 1}). # $$ # # You can also sometimes see such approximation under the name of _n-th order markov assumption_. # The first stage to building such a model is counting all word occurences given N-1 previous words # + from tqdm import tqdm from collections import defaultdict, Counter # special tokens: # - unk represents absent tokens, # - eos is a special token after the end of sequence UNK, EOS = "_UNK_", "_EOS_" def count_ngrams(lines, n): """ Count how many times each word occured after (n - 1) previous words :param lines: an iterable of strings with space-separated tokens :returns: a dictionary { tuple(prefix_tokens): {next_token_1: count_1, next_token_2: count_2}} When building counts, please consider the following two edge cases - if prefix is shorter than (n - 1) tokens, it should be padded with UNK. For n=3, empty prefix: "" -> (UNK, UNK) short prefix: "the" -> (UNK, the) long prefix: "the new approach" -> (new, approach) - you should add a special token, EOS, at the end of each sequence "... with deep neural networks ." -> (..., with, deep, neural, networks, ., EOS) count the probability of this token just like all others. """ counts = defaultdict(Counter) # counts[(word1, word2)][word3] = how many times word3 occured after (word1, word2) <YOUR CODE> return counts # - # let's test it dummy_lines = sorted(lines, key=len)[:100] dummy_counts = count_ngrams(dummy_lines, n=3) assert set(map(len, dummy_counts.keys())) == {2}, "please only count {n-1}-grams" assert len(dummy_counts[('_UNK_', '_UNK_')]) == 78 assert dummy_counts['_UNK_', 'a']['note'] == 3 assert dummy_counts['p', '=']['np'] == 2 assert dummy_counts['author', '.']['_EOS_'] == 1 # Once we can count N-grams, we can build a probabilistic language model. # The simplest way to compute probabilities is in proporiton to counts: # # $$ P(w_t | prefix) = { Count(prefix, w_t) \over \sum_{\hat w} Count(prefix, \hat w) } $$ class NGramLanguageModel: def __init__(self, lines, n): """ Train a simple count-based language model: compute probabilities P(w_t | prefix) given ngram counts :param n: computes probability of next token given (n - 1) previous words :param lines: an iterable of strings with space-separated tokens """ assert n >= 1 self.n = n counts = count_ngrams(lines, self.n) # compute token proabilities given counts self.probs = defaultdict(Counter) # probs[(word1, word2)][word3] = P(word3 | word1, word2) # populate self.probs with actual probabilities <YOUR CODE> def get_possible_next_tokens(self, prefix): """ :param prefix: string with space-separated prefix tokens :returns: a dictionary {token : it's probability} for all tokens with positive probabilities """ prefix = prefix.split() prefix = prefix[max(0, len(prefix) - self.n + 1):] prefix = [ UNK ] * (self.n - 1 - len(prefix)) + prefix return self.probs[tuple(prefix)] def get_next_token_prob(self, prefix, next_token): """ :param prefix: string with space-separated prefix tokens :param next_token: the next token to predict probability for :returns: P(next_token|prefix) a single number, 0 <= P <= 1 """ return self.get_possible_next_tokens(prefix).get(next_token, 0) # Let's test it! # + dummy_lm = NGramLanguageModel(dummy_lines, n=3) p_initial = dummy_lm.get_possible_next_tokens('') # '' -> ['_UNK_', '_UNK_'] assert np.allclose(p_initial['learning'], 0.02) assert np.allclose(p_initial['a'], 0.13) assert np.allclose(p_initial.get('meow', 0), 0) assert np.allclose(sum(p_initial.values()), 1) p_a = dummy_lm.get_possible_next_tokens('a') # '' -> ['_UNK_', 'a'] assert np.allclose(p_a['machine'], 0.15384615) assert np.allclose(p_a['note'], 0.23076923) assert np.allclose(p_a.get('the', 0), 0) assert np.allclose(sum(p_a.values()), 1) assert np.allclose(dummy_lm.get_possible_next_tokens('a note')['on'], 1) assert dummy_lm.get_possible_next_tokens('a machine') == \ dummy_lm.get_possible_next_tokens("there have always been ghosts in a machine"), \ "your 3-gram model should only depend on 2 previous words" # - # Now that you've got a working n-gram language model, let's see what sequences it can generate. But first, let's train it on the whole dataset. lm = NGramLanguageModel(lines, n=3) # The process of generating sequences is... well, it's sequential. You maintain a list of tokens and iteratively add next token by sampling with probabilities. # # $ X = [] $ # # __forever:__ # * $w_{next} \sim P(w_{next} | X)$ # * $X = concat(X, w_{next})$ # # # Instead of sampling with probabilities, one can also try always taking most likely token, sampling among top-K most likely tokens or sampling with temperature. In the latter case (temperature), one samples from # # $$w_{next} \sim {P(w_{next} | X) ^ {1 / \tau} \over \sum_{\hat w} P(\hat w | X) ^ {1 / \tau}}$$ # # Where $\tau > 0$ is model temperature. If $\tau << 1$, more likely tokens will be sampled with even higher probability while less likely tokens will vanish. def get_next_token(lm, prefix, temperature=1.0): """ return next token after prefix; :param temperature: samples proportionally to lm probabilities ^ (1 / temperature) if temperature == 0, always takes most likely token. Break ties arbitrarily. """ <YOUR CODE> # + from collections import Counter test_freqs = Counter([get_next_token(lm, 'there have') for _ in range(10000)]) assert 250 < test_freqs['not'] < 450 assert 8500 < test_freqs['been'] < 9500 assert 1 < test_freqs['lately'] < 200 test_freqs = Counter([get_next_token(lm, 'deep', temperature=1.0) for _ in range(10000)]) assert 1500 < test_freqs['learning'] < 3000 test_freqs = Counter([get_next_token(lm, 'deep', temperature=0.5) for _ in range(10000)]) assert 8000 < test_freqs['learning'] < 9000 test_freqs = Counter([get_next_token(lm, 'deep', temperature=0.0) for _ in range(10000)]) assert test_freqs['learning'] == 10000 print("Looks nice!") # - # Let's have fun with this model # + prefix = 'artificial' # <- your ideas :) for i in range(100): prefix += ' ' + get_next_token(lm, prefix) if prefix.endswith(EOS) or len(lm.get_possible_next_tokens(prefix)) == 0: break print(prefix) # + prefix = 'bridging the' # <- more of your ideas for i in range(100): prefix += ' ' + get_next_token(lm, prefix, temperature=0.5) if prefix.endswith(EOS) or len(lm.get_possible_next_tokens(prefix)) == 0: break print(prefix) # - # ### Evaluating language models: perplexity # # Perplexity is a measure of how well does your model approximate true probability distribution behind data. __Smaller perplexity = better model__. # # To compute perplexity on one sentence, use: # $$ # {\mathbb{P}}(w_1 \dots w_N) = P(w_1, \dots, w_N)^{-\frac1N} = \left( \prod_t P(w_t \mid w_{t - n}, \dots, w_{t - 1})\right)^{-\frac1N}, # $$ # # # On the corpora level, perplexity is a product of probabilities of all tokens in all sentences to the power of 1, divided by __total length of all sentences__ in corpora. # # This number can quickly get too small for float32/float64 precision, so we recommend you to first compute log-perplexity (from log-probabilities) and then take the exponent. def perplexity(lm, lines, min_logprob=np.log(10 ** -50.)): """ :param lines: a list of strings with space-separated tokens :param min_logprob: if log(P(w | ...)) is smaller than min_logprop, set it equal to min_logrob :returns: corpora-level perplexity - a single scalar number from the formula above Note: do not forget to compute P(w_first | empty) and P(eos | full_sequence) PLEASE USE lm.get_next_token_prob and NOT lm.get_possible_next_tokens """ <YOUR CODE> return <...> # + lm1 = NGramLanguageModel(dummy_lines, n=1) lm3 = NGramLanguageModel(dummy_lines, n=3) lm10 = NGramLanguageModel(dummy_lines, n=10) ppx1 = perplexity(lm1, dummy_lines) ppx3 = perplexity(lm3, dummy_lines) ppx10 = perplexity(lm10, dummy_lines) ppx_missing = perplexity(lm3, ['the jabberwock , with eyes of flame , ']) # thanks, <NAME> print("Perplexities: ppx1=%.3f ppx3=%.3f ppx10=%.3f" % (ppx1, ppx3, ppx10)) assert all(0 < ppx < 500 for ppx in (ppx1, ppx3, ppx10)), "perplexity should be nonnegative and reasonably small" assert ppx1 > ppx3 > ppx10, "higher N models should overfit and " assert np.isfinite(ppx_missing) and ppx_missing > 10 ** 6, "missing words should have large but finite perplexity. " \ " Make sure you use min_logprob right" assert np.allclose([ppx1, ppx3, ppx10], (318.2132342216302, 1.5199996213739575, 1.1838145037901249)) # - # Now let's measure the actual perplexity: we'll split the data into train and test and score model on test data only. # + from sklearn.model_selection import train_test_split train_lines, test_lines = train_test_split(lines, test_size=0.25, random_state=42) for n in (1, 2, 3): lm = NGramLanguageModel(n=n, lines=train_lines) ppx = perplexity(lm, test_lines) print("N = %i, Perplexity = %.5f" % (n, ppx)) # + # whoops, it just blew up :) # - # ### LM Smoothing # # The problem with our simple language model is that whenever it encounters an n-gram it has never seen before, it assigns it with the probabilitiy of 0. Every time this happens, perplexity explodes. # # To battle this issue, there's a technique called __smoothing__. The core idea is to modify counts in a way that prevents probabilities from getting too low. The simplest algorithm here is Additive smoothing (aka [Lapace smoothing](https://en.wikipedia.org/wiki/Additive_smoothing)): # # $$ P(w_t | prefix) = { Count(prefix, w_t) + \delta \over \sum_{\hat w} (Count(prefix, \hat w) + \delta) } $$ # # If counts for a given prefix are low, additive smoothing will adjust probabilities to a more uniform distribution. Not that the summation in the denominator goes over _all words in the vocabulary_. # # Here's an example code we've implemented for you: class LaplaceLanguageModel(NGramLanguageModel): """ this code is an example, no need to change anything """ def __init__(self, lines, n, delta=1.0): self.n = n counts = count_ngrams(lines, self.n) self.vocab = set(token for token_counts in counts.values() for token in token_counts) self.probs = defaultdict(Counter) for prefix in counts: token_counts = counts[prefix] total_count = sum(token_counts.values()) + delta * len(self.vocab) self.probs[prefix] = {token: (token_counts[token] + delta) / total_count for token in token_counts} def get_possible_next_tokens(self, prefix): token_probs = super().get_possible_next_tokens(prefix) missing_prob_total = 1.0 - sum(token_probs.values()) missing_prob = missing_prob_total / max(1, len(self.vocab) - len(token_probs)) return {token: token_probs.get(token, missing_prob) for token in self.vocab} def get_next_token_prob(self, prefix, next_token): token_probs = super().get_possible_next_tokens(prefix) if next_token in token_probs: return token_probs[next_token] else: missing_prob_total = 1.0 - sum(token_probs.values()) missing_prob_total = max(0, missing_prob_total) # prevent rounding errors return missing_prob_total / max(1, len(self.vocab) - len(token_probs)) #test that it's a valid probability model for n in (1, 2, 3): dummy_lm = LaplaceLanguageModel(dummy_lines, n=n) assert np.allclose(sum([dummy_lm.get_next_token_prob('a', w_i) for w_i in dummy_lm.vocab]), 1), "I told you not to break anything! :)" for n in (1, 2, 3): lm = LaplaceLanguageModel(train_lines, n=n, delta=0.1) ppx = perplexity(lm, test_lines) print("N = %i, Perplexity = %.5f" % (n, ppx)) # + # optional: try to sample tokens from such a model # - # ### Kneser-Ney smoothing # # Additive smoothing is simple, reasonably good but definitely not a State of The Art algorithm. # # # Your final task in this notebook is to implement [Kneser-Ney](https://en.wikipedia.org/wiki/Kneser%E2%80%93Ney_smoothing) smoothing. # # It can be computed recurrently, for n>1: # # $$P_{kn}(w_t | prefix_{n-1}) = { \max(0, Count(prefix_{n-1}, w_t) - \delta) \over \sum_{\hat w} Count(prefix_{n-1}, \hat w)} + \lambda_{prefix_{n-1}} \cdot P_{kn}(w_t | prefix_{n-2})$$ # # where # - $prefix_{n-1}$ is a tuple of {n-1} previous tokens # - $lambda_{prefix_{n-1}}$ is a normalization constant chosen so that probabilities add up to 1 # - Unigram $P_{kn}(w_t | prefix_{n-2})$ corresponds to Kneser Ney smoothing for {N-1}-gram language model. # - Unigram $P_{kn}(w_t)$ is a special case: how likely it is to see x_t in an unfamiliar context # # See lecture slides or wiki for more detailed formulae. # # __Your task__ is to # - implement KneserNeyLanguageModel # - test it on 1-3 gram language models # - find optimal (within one order of magnitude) smoothing delta for 3-gram language model with Kneser-Ney smoothing class KneserNeyLanguageModel(NGramLanguageModel): """ A template for Kneser-Ney language model. Default delta may be suboptimal. """ def __init__(self, lines, n, delta=5.0): self.n = n <YOUR CODE> def get_possible_next_tokens(self, prefix): < YOUR CODE > def get_next_token_prob(self, prefix, next_token): <YOUR CODE> #test that it's a valid probability model for n in (1, 2, 3): dummy_lm = KneserNeyLanguageModel(dummy_lines, n=n) assert np.allclose(sum([dummy_lm.get_next_token_prob('a', w_i) for w_i in dummy_lm.vocab]), 1), "I told you not to break anything! :)" for n in (1, 2, 3): lm = KneserNeyLanguageModel(train_lines, n=n, smoothing=<...>) ppx = perplexity(lm, test_lines) print("N = %i, Perplexity = %.5f" % (n, ppx))
week03_lm/seminar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 22 - Model Deployment # # by [<NAME>](albahnsen.com/) # # version 0.1, May 2016 # # ## Part of the class [Practical Machine Learning](https://github.com/albahnsen/PracticalMachineLearningClass) # # # # This notebook is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License] # ## Agenda: # # 1. Creating and saving a model # 2. Running the model in batch # 3. Exposing the model as an API # ## Part 1: Phishing Detection # # Phishing, by definition, is the act of defrauding an online user in order to obtain personal information by posing as a trustworthy institution or entity. Users usually have a hard time differentiating between legitimate and malicious sites because they are made to look exactly the same. Therefore, there is a need to create better tools to combat attackers. import pandas as pd import zipfile with zipfile.ZipFile('phishing.csv.zip', 'r') as z: f = z.open('phishing.csv') data = pd.read_csv(f, index_col=False) data.head() data.phishing.value_counts() # ### Creating features data.url[data.phishing==1].sample(50, random_state=1).tolist() # Contain any of the following: # * https # * login # * .php # * .html # * @ # * sign # * ? keywords = ['https', 'login', '.php', '.html', '@', 'sign'] for keyword in keywords: data['keyword_' + keyword] = data.url.str.contains(keyword).astype(int) # * Lenght of the url # * Lenght of domain # * is IP? # * Number of .com data['lenght'] = data.url.str.len() - 2 domain = data.url.str.split('/', expand=True).iloc[:, 2] data['lenght_domain'] = domain.str.len() domain.head(12) data['isIP'] = (domain.str.replace('.', '') * 1).str.isnumeric().astype(int) data['count_com'] = data.url.str.count('com') data.sample(15, random_state=4) # ### Create Model X = data.drop(['url', 'phishing'], axis=1) y = data.phishing from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import cross_val_score clf = RandomForestClassifier(n_jobs=-1, n_estimators=100) cross_val_score(clf, X, y, cv=10) clf.fit(X, y) # ### Save model from sklearn.externals import joblib joblib.dump(clf, '22_clf_rf.pkl', compress=3) # ## Part 2: Model in batch # # See 22_model_deployment.py from m22_model_deployment import predict_proba predict_proba('http://www.vipturismolondres.com/com.br/?atendimento=Cliente&/LgSgkszm64/B8aNzHa8Aj.php') # ## Part 3: API # # Flask is considered more Pythonic than Django because Flask web application code is in most cases more explicit. Flask is easy to get started with as a beginner because there is little boilerplate code for getting a simple app up and running. # First we need to install some libraries # # ``` # pip install flask-restplus # ``` # Load Flask from flask import Flask from flask.ext.restplus import Api from flask.ext.restplus import fields from sklearn.externals import joblib from flask.ext.restplus import Resource from sklearn.externals import joblib import pandas as pd # Create api # + app = Flask(__name__) api = Api( app, version='1.0', title='Phishing Prediction API', description='Phishing Prediction API') ns = api.namespace('predict', description='Phishing Classifier') parser = api.parser() parser.add_argument( 'URL', type=str, required=True, help='URL to be analyzed', location='args') resource_fields = api.model('Resource', { 'result': fields.String, }) # - # Load model and create function that predicts an URL # + clf = joblib.load('22_clf_rf.pkl') @ns.route('/') class PhishingApi(Resource): @api.doc(parser=parser) @api.marshal_with(resource_fields) def get(self): args = parser.parse_args() result = self.predict_proba(args) return result, 200 def predict_proba(self, args): url = args['URL'] url_ = pd.DataFrame([url], columns=['url']) # Create features keywords = ['https', 'login', '.php', '.html', '@', 'sign'] for keyword in keywords: url_['keyword_' + keyword] = url_.url.str.contains(keyword).astype(int) url_['lenght'] = url_.url.str.len() - 2 domain = url_.url.str.split('/', expand=True).iloc[:, 2] url_['lenght_domain'] = domain.str.len() url_['isIP'] = (url_.url.str.replace('.', '') * 1).str.isnumeric().astype(int) url_['count_com'] = url_.url.str.count('com') # Make prediction p1 = clf.predict_proba(url_.drop('url', axis=1))[0,1] print('url=', url,'| p1=', p1) return { "result": p1 } # - # Run API app.run(debug=True, use_reloader=False, host='0.0.0.0', port=5000) # Check using # # * http://localhost:5000/predict/?URL=http://consultoriojuridico.co/pp/www.paypal.com/ #
notebooks/22-ModelDeployment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #import module import os #Get the current working directory os.getcwd() #List the items in CWD os.listdir() #change CWD os.chdir('I&O') os.getcwd() #check is a file path="E:\AI using python\Python programs\OpenCV\demo.mp4" os.path.isfile(path) #similarly ypu can check is directory or not os.path.isdir(path) #traverse all directories recursively path="E:\AI using python\Python programs" for root,dirs,files in os.walk(path): print(root)#print root directory print(dirs)#prints all directories in root directory print(files)#prints all files print("***************************")
python/OS module.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # Network Interpretability of Lung X-rays # # In this tutorial, we demonstrate visualizing network interpretability through a classification task. We will make use of [MONAI](https://monai.io/), a PyTorch-based deep learning framework for medical imaging. Specifically, we'll adapt of one of the existing [tutorials](https://github.com/Project-MONAI/tutorials) and show how the [xaitk-saliency](https://github.com/XAITK/xaitk-saliency) package can complement the current interpretability functionality in `MONAI`. # # The data are a set of X-rays collated from a variety of sources. The labels used are: # - normal (the absence of the following classes) # - pneumonia # - covid # # Using the `MONAI` package, we will demo the use of GradCam and occlusion sensitivity to interpret the trained network's classification choices. Using the `xaitk-saliency` package, we will demo the use of occlusion sensitivity with both sliding window and randomized input sampling ([RISE](https://cs-people.bu.edu/vpetsiuk/rise/)) perturbation. # # ### Table of Contents # * [Set Up Environment](#Set-Up-Environment-covid) # * [Download Data](#Download-Data-covid) # * [Load Images](#Load-Images-covid) # * [Training](#Training-covid) # * [Interpretability Using MONAI](#Interpretability-Using-MONAI) # * [Interpretability Using xaitk-saliency](#Interpretability-Using-xaitk-saliency) # # <br> # # To run this notebook in Colab, use the link below: # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/XAITK/xaitk-saliency/blob/master/examples/covid_classification.ipynb) # - # # Set Up Environment <a name="Set-Up-Environment-covid"></a> # + tags=[] # !pip install -qU pip # !pip install -q "monai==0.6.0" # !pip install -q "xaitk-saliency" # + import os from glob import glob from enum import Enum import numpy as np import torch import random import matplotlib.pyplot as plt from sklearn.metrics import ( classification_report, confusion_matrix, ConfusionMatrixDisplay ) import monai from monai.data import decollate_batch from monai.networks.utils import eval_mode from monai.networks.nets import DenseNet121 from monai.transforms import ( Activations, AsDiscrete, Compose, LoadImage, Lambda, AddChannel, ScaleIntensity, EnsureType, RandRotate, RandFlip, Rand2DElastic, RandZoom, Resize, ) from monai.apps import download_and_extract monai.config.print_config() monai.utils.set_determinism() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # - # # Download Data <a name="Download-Data-covid"></a> # # The data is currently hosted on [Kaggle](https://www.kaggle.com/ericspod/project-monai-2020-bootcamp-challenge-dataset) and [Zenodo](https://zenodo.org/record/4621066). Here, we'll download the data from Zenodo. For simplicity, we'll only use the images in the training folder. # + root_dir = 'monai-example' train_url = "https://zenodo.org/record/4621066/files/training_data.zip?download=1" train_md5 = "3e8d3e6ca43903ead0666eb6ec8849d8" train_zip = os.path.join(root_dir, "covid_train.zip") train_dir = os.path.join(root_dir, "covid") download_and_extract(train_url, train_zip, train_dir, train_md5) # - # # Load Images <a name="Load-Images-covid"></a> # + crop_size = (320, 320) # set size of images for network class Diagnosis(Enum): normal = 0 pneumonia = 1 covid = 2 num_class = len(Diagnosis) def get_label(path): fname = os.path.basename(path) if fname[:6] == "normal": return Diagnosis.normal.value elif fname[:9] == "pneumonia": return Diagnosis.pneumonia.value elif fname[:5] == "covid": return Diagnosis.covid.value else: raise RuntimeError(f"Unknown label: {path}") class CovidImageDataset(torch.utils.data.Dataset): def __init__(self, files, transforms, even_balance=True): self.image_files = files self.labels = list(map(get_label, self.image_files)) self.transforms = transforms # For even balance, find out which diagnosis has the fewest images # and then get that many of each diagnosis if even_balance: # fewest images of any diagnosis num_to_keep = min(self.labels.count(i.value) for i in Diagnosis) print(f"num to keep per class: {num_to_keep}") self.image_files = [] for d in Diagnosis: files_for_diagnosis = \ [file for file in files if get_label(file) == d.value] self.image_files += files_for_diagnosis[:num_to_keep] random.shuffle(self.image_files) self.labels = list(map(get_label, self.image_files)) def __len__(self): return len(self.image_files) def __getitem__(self, index): return self.transforms(self.image_files[index]), self.labels[index] train_transforms = Compose([ LoadImage(image_only=True), Lambda(lambda im: im if im.ndim == 2 else im[..., 0]), AddChannel(), Resize(spatial_size=crop_size, mode="area"), ScaleIntensity(), RandRotate(range_x=15, prob=0.5, keep_size=True), RandFlip(spatial_axis=0, prob=0.5), Rand2DElastic((0.3, 0.3), (1.0, 2.0)), RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5), EnsureType(), ]) val_transforms = Compose([ LoadImage(image_only=True), Lambda(lambda im: im if im.ndim == 2 else im[..., 0]), AddChannel(), Resize(spatial_size=crop_size, mode="area"), ScaleIntensity(), EnsureType(), ]) y_pred_trans = Compose([EnsureType(), Activations(softmax=True)]) y_trans = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=num_class)]) all_files = glob(os.path.join(train_dir, "*.png")) random.shuffle(all_files) train_frac = 0.9 num_training_files = round(train_frac * len(all_files)) train_files = all_files[:num_training_files] val_files = all_files[num_training_files:] batch_size = 10 train_ds = CovidImageDataset(train_files, train_transforms, False) train_loader = torch.utils.data.DataLoader( train_ds, batch_size=batch_size, shuffle=True, num_workers=10) val_ds = CovidImageDataset(val_files, val_transforms, False) val_loader = torch.utils.data.DataLoader( val_ds, batch_size=batch_size, shuffle=True, num_workers=10) # + # Use JPEG format for inline visualizations here. # %config InlineBackend.figure_format = "jpeg" # Display examples fig, axes = plt.subplots(1, 3, figsize=(20, 12), facecolor='white') for true_label in Diagnosis: fnames = [v for v in val_files if true_label.name in os.path.basename(v)] random.shuffle(fnames) fname = fnames[0] im = val_transforms(fname) ax = axes[true_label.value] im_show = ax.imshow(im[0], cmap='gray') ax.set_title(os.path.basename(fname), fontsize=25) ax.axis('off') # - # # Training <a name="Training-covid"></a> def create_new_net(): return DenseNet121( spatial_dims=2, in_channels=1, out_channels=num_class ).to(device) # + # %matplotlib notebook max_epochs = 30 val_interval = 1 lr = 1e-5 epoch_loss_values = [] auc = [] acc = [] best_acc = -1 net = create_new_net() loss = torch.nn.CrossEntropyLoss() opt = torch.optim.Adam(net.parameters(), lr) auc_metric = monai.metrics.ROCAUCMetric() # Plotting stuff fig, ax = plt.subplots(1, 1, facecolor='white') ax.set_xlabel('Epoch') ax.set_ylabel('Metrics') plt.ion() fig.show() fig.canvas.draw() for epoch in range(max_epochs): net.train() epoch_loss = 0 for batch_data in train_loader: inputs, labels = batch_data[0].to(device), batch_data[1].to(device) opt.zero_grad() outputs = net(inputs) lossval = loss(outputs, labels) lossval.backward() opt.step() epoch_loss += lossval.item() epoch_loss /= len(train_loader) epoch_loss_values.append(epoch_loss) if (epoch + 1) % val_interval == 0: with eval_mode(net): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: val_images, val_labels = val_data[0].to( device), val_data[1].to(device), outputs = net(val_images) y_pred = torch.cat([y_pred, outputs], dim=0) y = torch.cat([y, val_labels], dim=0) y_onehot = [y_trans(i) for i in decollate_batch(y)] y_pred_act = [y_pred_trans(i) for i in decollate_batch(y_pred)] auc_metric(y_pred_act, y_onehot) del y_pred_act, y_onehot auc_result = auc_metric.aggregate() auc_metric.reset() auc.append(auc_result) acc_value = torch.eq(y_pred.argmax(dim=1), y) acc_metric = acc_value.sum().item() / len(acc_value) acc.append(acc_metric) if acc_metric > best_acc: best_acc = acc_metric torch.save(net.state_dict(), os.path.join(root_dir, "best_acc_covid_tutorial.pth")) ax.clear() train_epochs = np.linspace(1, epoch + 1, epoch + 1) ax.plot(train_epochs, epoch_loss_values, label='Avg. loss') val_epochs = np.linspace( 1, epoch + 1, np.floor( (epoch + 1) / val_interval).astype(np.int32)) ax.plot(val_epochs, acc, label='ACC') ax.plot(val_epochs, auc, label='AUC') ax.set_xlabel('Epoch') ax.set_ylabel('Metrics') ax.legend() fig.canvas.draw() # + # %matplotlib inline # Load best model net.load_state_dict(torch.load(os.path.join(root_dir, "best_acc_covid_tutorial.pth"))) net.to(device) net.eval() with eval_mode(net): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: val_images, val_labels = val_data[0].to( device), val_data[1].to(device), outputs = net(val_images) y_pred = torch.cat([y_pred, outputs.argmax(dim=1)], dim=0) y = torch.cat([y, val_labels], dim=0) print(classification_report( y.cpu().numpy(), y_pred.cpu().numpy(), target_names=[d.name for d in Diagnosis])) cm = confusion_matrix( y.cpu().numpy(), y_pred.cpu().numpy(), normalize='true', ) disp = ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=[d.name for d in Diagnosis], ) disp.plot(ax=plt.subplots(1, 1, facecolor='white')[1]) # - # # Interpretability Using MONAI # # Use GradCAM and occlusion sensitivity for network interpretability. # # The occlusion sensitivity returns two images: the sensitivity image and the most probable class. # # * Sensitivity image -- how the probability of an inferred class changes as the corresponding part of the image is occluded. # * Big decreases in the probability imply that that region was important in inferring the given class. # * The output is the same as the input, with an extra dimension of size N appended. Here, N is the number of inferred classes. To then see the sensitivity image of the class we're interested in (maybe the true class, maybe the predcited class, maybe anything else), we simply do ``im[...,i]``. # * Most probable class -- if that part of the image is covered up, does the predicted class change, and if so, to what? # # In this example, the network has been sufficiently trained that the predicted class doesn't change as parts of the image are occluded (so we don't show the most probable class maps). However, this feature might be useful when the results are less than satisfactory. # + # for name, _ in net.named_modules(): print(name) target_layer = "class_layers.relu" gradcam = monai.visualize.GradCAM(nn_module=net, target_layers=target_layer) occ_sens = monai.visualize.OcclusionSensitivity( nn_module=net, mask_size=10, n_batch=batch_size, stride=10) # - # We can now visualize the computed saliency maps for each image. # + # Display examples subplot_shape = [3, num_class] fig, axes = plt.subplots(*subplot_shape, figsize=(20, 12), facecolor='white') for true_label in Diagnosis: fnames = [v for v in val_files if true_label.name in os.path.basename(v)] random.shuffle(fnames) # Find a correctly predicted example for fname in fnames: img = val_transforms(fname)[None].to(device) y_pred = net(img) pred_label = Diagnosis(y_pred.argmax(1).item()) if pred_label == true_label: break im_title = f"{os.path.basename(fname)}\npredicted as {pred_label.name}" for d in Diagnosis: im_title += f"\n{d.name}: {y_pred[0,d.value]:.3}" res_cam = gradcam(x=img, class_idx=true_label.value) occ_map, occ_most_prob = occ_sens(x=img) occ_map = occ_map[..., true_label.value] # the rest is for visualisations for row, (im, title) in enumerate(zip( [img, res_cam, occ_map], [im_title, "CAM", "Occ. sens."], )): cmap = 'gray' if row == 0 else 'jet' col = true_label.value ax = axes[row, col] if isinstance(im, torch.Tensor): im = im.detach().cpu() im_show = ax.imshow(im[0][0], cmap=cmap) ax.set_title(title, fontsize=25) ax.axis('off') fig.colorbar(im_show, ax=ax) # - # --- # # Interpretability Using ``xaitk-saliency`` # # Using the ``xaitk-saliency`` package, we can also compute occlusion-based saliency using either a sliding window approach (similar to the method provided by `MONAI`) or the [RISE](https://arxiv.org/abs/1806.07421) saliency algorithm. Here, we will demo the ability to switch out the saliency algorithm. # + from smqtk_classifier import ClassifyImage from xaitk_saliency.impls.gen_image_classifier_blackbox_sal.rise import RISEStack from xaitk_saliency.impls.gen_image_classifier_blackbox_sal.slidingwindow import SlidingWindowStack gen_slidingwindow = SlidingWindowStack((50, 50), (20, 20), threads=4) gen_rise = RISEStack(1000, 8, 0.5, seed=0, threads=4, debiased=False) # - # We will wrap the COVID model in smqtk-classifier's `ClassifyImage` interface for standardized classifier operation with our API. # + class COVIDModel(ClassifyImage): """ Blackbox model based on smqtk-classifier's ClassifyImage. """ def get_labels(self): return list(Diagnosis.__members__) @torch.no_grad() def classify_images(self, image_iter): # Input may either be an NDaray, or some arbitrary iterable of NDarray images. for img in image_iter: img = val_transforms(img)[None].to(device) y_pred = net(img) # Converting feature extractor output to probabilities. class_conf = torch.nn.functional.softmax(y_pred, dim=1).cpu().detach().numpy().squeeze() # Only return the confidences for the focus classes yield dict(zip(self.get_labels(), class_conf)) def get_config(self): # Required by a parent class. return {} blackbox_classifier = COVIDModel() # - # Redefine val_transforms here to be able to take in perturbed images if needed val_transforms = Compose([ Lambda(lambda s: LoadImage(image_only=True)(s) if isinstance(s, str) else np.array(s)), Lambda(lambda im: im if im.ndim == 2 else im[..., 0]), AddChannel(), Resize(spatial_size=crop_size, mode="area"), ScaleIntensity(), EnsureType(), ]) # Similarly, we'll visualize the saliency maps for each image. # + # Display examples subplot_shape = [3, num_class] fig, axes = plt.subplots(*subplot_shape, figsize=(20, 12), facecolor='white') for true_label in Diagnosis: fnames = [v for v in val_files if true_label.name in os.path.basename(v)] random.shuffle(fnames) # Find a correctly predicted example for fname in fnames: img = val_transforms(fname)[None].to(device) y_pred = net(img) pred_label = Diagnosis(y_pred.argmax(1).item()) if pred_label == true_label: break im_title = f"{os.path.basename(fname)}\npredicted as {pred_label.name}" for d in Diagnosis: im_title += f"\n{d.name}: {y_pred[0,d.value]:.3}" # Generate saliency maps ref_image = img.cpu().numpy().squeeze() sw_sal_map = gen_slidingwindow(ref_image, blackbox_classifier)[true_label.value] rise_sal_map = gen_rise(ref_image, blackbox_classifier)[true_label.value] # the rest is for visualisations for row, (im, title) in enumerate(zip( [img[0][0], sw_sal_map, rise_sal_map], [im_title, "Sliding Window", "RISE"], )): cmap = 'gray' if row == 0 else 'jet' col = true_label.value ax = axes[row, col] if isinstance(im, torch.Tensor): im = im.detach().cpu() im_show = ax.imshow(im, cmap=cmap) ax.set_title(title, fontsize=25) ax.axis('off') fig.colorbar(im_show, ax=ax)
examples/covid_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="m73cebXjdUDB" import pandas as pd, numpy as np, os, warnings, seaborn as sns, matplotlib.pyplot as plt, matplotlib from datetime import datetime warnings.simplefilter(action='ignore', category=FutureWarning) pd.options.mode.chained_assignment = None get_ipython().run_line_magic('matplotlib', 'inline') plt.style.use('seaborn') sns.set_color_codes('colorblind') matplotlib.rcParams.update({'font.size': 14}) matplotlib.rcParams.update({'xtick.labelsize':16}) matplotlib.rcParams.update({'ytick.labelsize':16}) matplotlib.rcParams.update({'axes.labelsize':16}) matplotlib.rcParams.update({'axes.titlesize':20}) matplotlib.rcParams.update({'legend.fontsize': 16}) sns.set_style('white') # + id="fSAtRRFrdpPU" url = '/content/Winters-Attribution-PS3.csv' df = pd.read_csv(url) # + colab={"base_uri": "https://localhost:8080/", "height": 203} id="E3uPjjabeJm-" outputId="0ba38616-e2ad-4f1a-b5ca-f90f2d08de4f" df.head(5) # + colab={"base_uri": "https://localhost:8080/"} id="jAQomq-yZdWt" outputId="fd9fa413-f48f-4c84-a239-2a1393c73501" df.shape # + id="WKgnVPd1e8CM" df = df[['Orderid', 'Orderdatetime', 'Saleamount', 'Newcustomer', 'Position', 'Positiondatetime', 'Groupname', 'Networkname', 'Networkid', 'Brand', 'Positionname', 'DaysToConvert']] # + colab={"base_uri": "https://localhost:8080/"} id="5Y7lUJl-eXys" outputId="1a164b80-a639-48ba-db29-9925da2e6a2c" print('Time range: ', df['Orderdatetime'].min(), 'to', df['Orderdatetime'].max()) print('Number of touchpoints:', len(df)) print('Number of orders:', len(df['Orderid'].unique())) print('Number of touchpoints per order:', np.round(len(df)/len(df['Orderid'].unique()), 2)) # + colab={"base_uri": "https://localhost:8080/"} id="UH9_Hzw6euSE" outputId="e7453c7d-c826-4e34-dfb9-8cea1bec0ff9" df.Positionname.unique() # + [markdown] id="j6i4sWY1faNP" # **Q1. (30 pts) Compare first-touch vs. last-touch attribution models** # # **a) (10 pts) What is the number of orders attributed to each channel using a last-touch model? What about the number of orders attributed to each channel using a first-touch model? What is the corresponding share of credit from the two attribution models?** # + colab={"base_uri": "https://localhost:8080/", "height": 327} id="jhv7lSeWfedS" outputId="35727647-2541-40db-8d5c-3bd460ee810f" # calculating the count per channel by last touch num_order = len(df['Orderid'].unique()) T_last_count = pd.DataFrame(df.loc[df['Positionname']=='CONVERTER', 'Groupname'].value_counts()).reset_index().sort_values('index') np.round(T_last_count,3) # + colab={"base_uri": "https://localhost:8080/", "height": 327} id="ufccavwngq_8" outputId="439c6411-0611-4ab8-8bde-88432457a348" # calculating the percentage per channel by last touch num_order = len(df['Orderid'].unique()) T_last_percent = pd.DataFrame((df.loc[df['Positionname']=='CONVERTER', 'Groupname'].value_counts()/num_order)*100).reset_index().sort_values('index') np.round(T_last_percent,3) # + colab={"base_uri": "https://localhost:8080/", "height": 450} id="2AofZwTchCLP" outputId="a93a420b-81b3-4d16-c620-27fa2fa7d200" # calculating the count per channel by first touch num_order = len(df['Orderid'].unique()) T_first_count = pd.DataFrame(df.loc[df['Positionname']=='ORIGINATOR', 'Groupname'].value_counts()).reset_index().sort_values('index') np.round(T_first_count,3) # + colab={"base_uri": "https://localhost:8080/", "height": 450} id="9_6M7eAff4Ws" outputId="7dd17685-b8fb-44c7-f41f-b6a0636be6e1" # calculating the percentage per channel by first touch num_order = len(df['Orderid'].unique()) T_first_percent = pd.DataFrame(df.loc[df['Positionname']=='ORIGINATOR', 'Groupname'].value_counts()/num_order *100).reset_index().sort_values('index') np.round(T_first_percent,3) # + id="RnhXlBwxf7D7" # merging all 4 tables merge_1 = np.round(pd.merge(T_last_count, T_first_count, how = 'outer', on = 'index').fillna(0).rename( columns={"Groupname_x": "Converter", "Groupname_y": "Originator"}), 3) merge_2= np.round(pd.merge(T_last_percent, T_first_percent, how = 'outer', on = 'index').fillna(0).rename( columns={"Groupname_x": "Converter %", "Groupname_y": "Originator %"}), 5) first_last = np.round(pd.merge(merge_1, merge_2, how = 'outer', on = 'index').fillna(0), 3) # + [markdown] id="iJSioY9ZmAiX" # The table below displays the number of orders and corresponding share of credit from the two attribution (first and last touch) models. # + colab={"base_uri": "https://localhost:8080/", "height": 482} id="h4a7PHj3ipUw" outputId="149110c4-c065-4453-8fa5-8d3f84b980fa" new_row = {'index': 'TOTAL', 'Converter': first_last.Converter.sum(), 'Originator': first_last.Originator.sum(), 'Converter %': first_last['Converter %'].sum(), 'Originator %': first_last['Originator %'].sum()} first_last = first_last.append(new_row, ignore_index=True) first_last # + [markdown] id="wj1VW-almOS0" # **b) (10 pts) In a single bar chart, plot the share of credit (in percentage) for the first- and last touch attribution models by marketing channel.** # + colab={"base_uri": "https://localhost:8080/", "height": 859} id="MDzpRnvNkWBr" outputId="256ec20d-6b50-4b01-8b13-567c7fd5a41d" first_last[:13].plot(x='index', y=['Converter %', 'Originator %'], kind='bar', figsize=(15, 10)) # + [markdown] id="5OigNpuRoXWD" # **c) (10 pts) Compare results from the two attribution model. What would be the # consequence to Winters if it allocated its marketing budget entirely based on the lasttouch attribution model?** # + [markdown] id="CqeRu8fsowku" # The CPM channel has the share of credit when using both first and last touch attribution models (51.01% and 37.98% respectively). The Buzz Affiliates channel holds the second highest share of credit when using the last touch attribution model (29.87%), but has a significantly lower proportion of credit share for the first touch attribution model (13.83%). When customers search for google brands, the first touch attribution displays that this accounts to 29.13% of the total share of credit. The CJ channel has the third highest share of credit (13.77%) across the last touch attribution model. We note that all other channels have less than 8% of the total share of credit across both the first and last touch attribution models. This implies that CPM and Buzz Affiliates have the highest share of credits across both models, with an addition of the CJ channel's share of credit only for the last touch attribution model. # # If the marketing budget is allocated entirely based on the last touch attribution model, then Winters will not be able to account for the consumer interactions with all other touchpoints. There might be an interaction effect between all these different marketing touchpoints, which a single touch model (either first or last touch model) will fail to take into account. # + [markdown] id="xwMEW2azragc" # **Q2. (20 pts) Compare new customers and old customers** # # **a) (5 pts) What is the average number of days that it takes for a new customer to convert (from the first touchpoint)? What is the average number of days that it takes for an old customer to convert?** # + id="szlllrDbmjQT" num_order_new = len(df[(df['Positionname']=='CONVERTER')&(df['Newcustomer'] == 'Y')]) num_order_old = len(df[(df['Positionname']=='CONVERTER')&(df['Newcustomer'] == 'N')]) # + colab={"base_uri": "https://localhost:8080/"} id="zmaTkU37s5JI" outputId="521c6a7a-af29-4ad7-b114-cc28c687f976" new_firsttouchpoint = df[(df['Newcustomer'] == 'Y') & (df['Positionname'] == 'ORIGINATOR')] new_firsttouchpoint.DaysToConvert.mean() # + [markdown] id="mlMIuedIvIC8" # On average, it takes 5.5 days for a new customer to convert from the first touch point. # + colab={"base_uri": "https://localhost:8080/"} id="u2I4GRmJvIOZ" outputId="44fad093-ddb7-4573-8be1-f9e2a7aacf99" old_firsttouchpoint = df[(df['Newcustomer'] == 'N') & (df['Positionname'] == 'ORIGINATOR')] old_firsttouchpoint.DaysToConvert.mean() # + [markdown] id="nPH-jNuxvW2-" # It takes 32 days on average for an old customer to convert from the first touch point. # + [markdown] id="Oo0S8ieYvk97" # **b) (5 pts) What is the average number of touchpoints by new versus old customer’s orders? Hint: Use the Touches variable if available. If not, create the ‘Touches’ variable for the number of touchpoints per order. R users can use the add_count() function.** # + colab={"base_uri": "https://localhost:8080/", "height": 449} id="nbTWj00-vl9D" outputId="41f3538e-4f9d-49c6-cb4b-0349ef744b40" new = df[df['Newcustomer'] == 'Y'] temp = new.groupby('Orderid')['Position'].count() temp_new = pd.DataFrame(temp) temp_new['Touches'] = temp_new['Position'] temp_new = temp_new['Touches'] pd.DataFrame(temp_new) # + colab={"base_uri": "https://localhost:8080/"} id="Kdfadz-_1pHK" outputId="164d8310-a235-4bf4-bd49-d8ed62e8f734" temp_new.sum()/temp_new.shape[0] # + colab={"base_uri": "https://localhost:8080/", "height": 449} id="e3EAilXJ2DsE" outputId="39450691-ebec-49ec-b113-9910e41e8b03" old = df[df['Newcustomer'] == 'N'] temp = old.groupby('Orderid')['Position'].count() temp_old = pd.DataFrame(temp) temp_old['Touches'] = temp_old['Position'] temp_old = temp_old['Touches'] pd.DataFrame(temp_old) # + colab={"base_uri": "https://localhost:8080/"} id="oKKQLbo32E7b" outputId="68503bec-a228-4dea-d684-aabfd3c98795" temp_old.sum()/temp_old.shape[0] # + [markdown] id="ztISmUvn21cT" # New customers on average have 4.32 touchpoints while old customers on average have 5.24 touchpoints. # + [markdown] id="-CU3NPFt4B04" # **c) (5 pts) What is the average order sales amount by new versus old customer’s orders?** # + colab={"base_uri": "https://localhost:8080/"} id="tDKA2HPosg4t" outputId="58f6a668-0de0-4a86-d08e-1dc6915323ec" print(new.Saleamount.mean()) print(old.Saleamount.mean()) # + [markdown] id="AFnHijQt4TyP" # The average order sales for new customers' orders is 264.26 USD while the average order sales for old customers' orders is 205.50 USD. # + [markdown] id="xSk8BD0x4jG0" # **d) (5 pts) Summarize how new and old customers differ along these three variables.** # + [markdown] id="NsDZD7gW4o2q" # The three variables used for the summary include the average number of days to convert, average number of touchpoints and average order sales for customers' orders. # # Old customers take approximately 27 days more to convert than new customers and they have a marginally higher number of touchpoints on average (old customers have 5.34 touchpoints on average while new customers have 4.32 touchpoints on average). However, the average order sales for new customers' is higher than that of old customers. This indicates that there might be some new customers who might be making bulk purchases, while old customers prefer to visit the site regularly to make smaller purchases. Old customers might prefer browsing on the website longer before making a purchase, which in turn leads to having more touchpoints when browsing longer on the site. # + colab={"base_uri": "https://localhost:8080/", "height": 482} id="gLxEoe3Ys3cd" outputId="98cd4df3-e46b-418d-fa82-4bb4a8e947c7" T_first_new_percent = pd.DataFrame(df.loc[(df['Positionname']=='ORIGINATOR')&(df['Newcustomer'] == 'Y'), 'Groupname'].value_counts()/num_order_new*100).reset_index().sort_values('index') T_first_new_count = pd.DataFrame(df.loc[(df['Positionname']=='ORIGINATOR')&(df['Newcustomer'] == 'Y'), 'Groupname'].value_counts()).reset_index().sort_values('index') T_first_old_percent = pd.DataFrame(df.loc[(df['Positionname']=='ORIGINATOR')&(df['Newcustomer'] == 'N'), 'Groupname'].value_counts()/num_order_old*100).reset_index().sort_values('index') T_first_old_count = pd.DataFrame(df.loc[(df['Positionname']=='ORIGINATOR')&(df['Newcustomer'] == 'N'), 'Groupname'].value_counts()).reset_index().sort_values('index') # merging all 4 tables merge_1 = np.round(pd.merge(T_first_new_count, T_first_old_count, how = 'outer', on = 'index').fillna(0).rename( columns={"Groupname_x": "New Customers", "Groupname_y": "Old Customers"}), 3) merge_2= np.round(pd.merge(T_first_new_percent, T_first_old_percent, how = 'outer', on = 'index').fillna(0).rename( columns={"Groupname_x": "New Customer Share of Credit", "Groupname_y": "Old Customer Share of Credit"}), 5) first_last = np.round(pd.merge(merge_1, merge_2, how = 'outer', on = 'index').fillna(0), 3) new_row = {'index': 'TOTAL', 'New Customers': first_last['New Customers'].sum(), 'Old Customers': first_last['Old Customers'].sum(), 'New Customer Share of Credit': first_last['New Customer Share of Credit'].sum(), 'Old Customer Share of Credit': first_last['Old Customer Share of Credit'].sum()} first_last = first_last.append(new_row, ignore_index=True) first_last # + [markdown] id="Nbq2jdIWKp-_" # **Q3. (20 pts) Consider the revenue per marketing channel using first-touch attribution.** # **a) (10 pts) Create a table (as in Q1) containing the average sales per order as well as the total revenue by originator channel.** # + colab={"base_uri": "https://localhost:8080/"} id="2NDpyqwyLfVz" outputId="9b495b10-90ee-4c5e-f548-8c6b763a70ab" originator = df[df['Positionname'] == 'ORIGINATOR'] originator.groupby('Positionname')['Saleamount'].mean() # + colab={"base_uri": "https://localhost:8080/", "height": 482} id="2Q-uM5TuMGB8" outputId="4740adc7-a14f-4eb0-f654-aee27e47f7da" df1=pd.DataFrame(df.loc[df['Positionname']=='ORIGINATOR']) originator1 = pd.DataFrame(df1.groupby('Groupname')['Saleamount'].mean()) originator1 # + colab={"base_uri": "https://localhost:8080/", "height": 482} id="pdtEvtBHOoE0" outputId="e200c15c-ed23-45f7-999e-398b4ba697ee" df1=pd.DataFrame(df.loc[df['Positionname']=='ORIGINATOR']) originator2 = pd.DataFrame(df1.groupby('Groupname')['Saleamount'].sum()) originator2 # + colab={"base_uri": "https://localhost:8080/", "height": 482} id="yT5S2N6sRRAy" outputId="3c0fd37a-56dc-4384-9ebe-a38d33041ffe" # merging the 2 tables merge_1 = np.round(pd.merge(originator1, originator2, how = 'outer', on = 'Groupname').fillna(0).rename( columns={"Saleamount_x": "Average Sales by Channel", "Saleamount_y": "Total Sales by Channel"}), 3) merge_1 # + [markdown] id="XRnU8SuISfgd" # **b) (5 pts) What is the total incremental gross revenue accruing to Winters by originator channel? Express your answer in a table. Assume that Winters has a gross margin of 40%. Also assume 5% of sales from branded search are incremental, and 10% sales are incremental for the remaining channels.** # + [markdown] id="4JB7b54eWD2s" # **c) (5 pts) You just found out that Winters search ad team spent $4,200 on branded search advertising during the time period in the data. What is your advice to the search team based on the calculation above?** # + [markdown] id="kF3tATtOWHSL" # **Q4. (25 pts) Linear/uniform attribution # The linear attribution model divides the attribution share between touches equally. For example, an order with one CPM, one CJ, and one TV touchpoint will have place one third attribution share on each touch. This can be accomplished by using the Touches variable (see Q2) to define a new variable: # LinearAttributionShare = 1 / Touches** # + colab={"base_uri": "https://localhost:8080/", "height": 173} id="yNHDnxwKRp9T" outputId="a430427c-7a53-468e-f379-a38bc0ea6c7f" temp = df.groupby('Orderid')['Position'].count() uniform_df = pd.DataFrame(temp) uniform_df['Touches'] = uniform_df['Position'] uniform_df = uniform_df['Touches'] uniform = pd.DataFrame(uniform_df) uniform.head(3) # + colab={"base_uri": "https://localhost:8080/", "height": 141} id="AMZB58O2Yl-M" outputId="9943be2f-a6fa-4ba0-e4be-a0b9ab75af2d" df_uniform = np.round(pd.merge(df, uniform, how = 'outer', on = 'Orderid').fillna(0), 3) df_uniform.head(3) # + colab={"base_uri": "https://localhost:8080/", "height": 141} id="u4cFmu9FY4mZ" outputId="ae778cff-fc91-4789-ff15-ff472577b1d8" df_uniform['LinearAttributionShare'] = 1/(df_uniform.Touches) df_uniform.head(3) # + [markdown] id="IHcaujtfeYdN" # **a) (10 pts) For each channel, what is the sum of the linear attribution shares? What is the corresponding share of credit (in percentage) according to the linear attribution model? Express your answer in a table like in Q1.** # + colab={"base_uri": "https://localhost:8080/", "height": 482} id="TGscWGkVaacE" outputId="76227bf0-f23c-4663-de2a-4f82d09f161c" linear = pd.DataFrame(df_uniform.groupby('Groupname')['LinearAttributionShare'].sum()) linear['Linear Atrribution-Share of Credit %'] = linear['LinearAttributionShare']/sum(linear['LinearAttributionShare'])*100 linear # + [markdown] id="7xy07ltjefnU" # **b) (10 pts) In a single bar chart, plot the share of credit (in percentage) for all three attribution models: first-touch, last-touch and linear.** # + colab={"base_uri": "https://localhost:8080/", "height": 450} id="26zI20JBb93i" outputId="22f605b7-64e4-43c0-ae85-56ecc3df228a" first_last['Channels'] = first_last['index'] linear['Channels'] = linear.index merge_3= np.round(pd.merge(first_last, linear, how = 'outer', on = 'Channels').fillna(0), 5) merge_3['First Touch-Share of Credit %'] = merge_3['Originator %'] merge_3['Last Touch-Share of Credit %'] = merge_3['Converter %'] merge_3 = merge_3[['index', 'First Touch-Share of Credit %', 'Last Touch-Share of Credit %', 'Linear Atrribution-Share of Credit %']] merge_3 = merge_3[:13] merge_3 # + colab={"base_uri": "https://localhost:8080/", "height": 859} id="VzgOHo8jfLx1" outputId="34173729-a5df-4d84-a1cc-047b24b4db51" merge_3[:13].plot(x='index', y=['First Touch-Share of Credit %', 'Last Touch-Share of Credit %', 'Linear Atrribution-Share of Credit %'], kind='bar', figsize=(15, 10)) # + [markdown] id="MNLGYAyejNpe" # **c) (5 pts) Compare the linear model to the first-touch and last-touch models.** # + [markdown] id="uiplx46cjRWv" # CPM holds the highest share of credit across all 3 models (~50% for last touch and linear models and 37.98% for the first touch model). The Buzz Affiliate channel nholds the second highest share of credit across both the linear and last touch models. Noticeably, both search channels (Google and MSN) only display share of credit across linear and first touch models. The linear model is the only attribution model that has share of credit across all 13 channels. # # The linear model has some disadvantages as well. While it assigns equal credits to each touchpoint, we lose the ability to examine the true impact of each touchpoint on the sale. # + [markdown] id="qvSVqkr3xbQm" # **Q5. (30 pts) Examine the role of the intermediate (Roster and Assist) touch points.** # # **a) (10 pts) Focusing on the top channels, what is the proportion of each channel’s touchpoints by position name: 1) Originator, 2) Roster, 3) Assist, and 4) Converter. Show your result using a table like the following (with the exact top channels listed)** # + colab={"base_uri": "https://localhost:8080/"} id="9iSRiosJ2mzB" outputId="ab17dd04-2e35-47b8-884b-ee6ae33280b6" df.groupby('Groupname')['Position'].count() # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="OqVcTw-gjEZo" outputId="e2751a3d-2b5e-4348-ac00-a5120259821e" filter_df = df[(df['Groupname'] == 'BUZZ AFFILIATE') | (df['Groupname'] == 'CJ') | (df['Groupname'] == 'CPM') | (df['Groupname'] == 'SEARCH GOOGLE BRAND') | (df['Groupname'] == 'SEARCH GOOGLE NON-BRAND') | (df['Groupname'] == 'SEARCH MSN BRAND') | (df['Groupname'] == 'TV')] channel = pd.DataFrame(filter_df.groupby(['Groupname', 'Positionname'])['Position'].count()) channel.reset_index(inplace=True) channel_pivot = channel.pivot(index='Groupname', columns='Positionname', values='Position') channel_pivot = channel_pivot.fillna(0) channel_pivot['Total_old'] = channel_pivot.ASSIST + channel_pivot.CONVERTER + channel_pivot.ORIGINATOR + channel_pivot.ROSTER channel_pivot['ASSIST'] = channel_pivot['ASSIST']/channel_pivot['Total_old']*100 channel_pivot['CONVERTER'] = channel_pivot['CONVERTER']/channel_pivot['Total_old']*100 channel_pivot['ORIGINATOR'] = channel_pivot['ORIGINATOR']/channel_pivot['Total_old']*100 channel_pivot['ROSTER'] = channel_pivot['ROSTER']/channel_pivot['Total_old']*100 channel_pivot['TOTAL'] = channel_pivot.ASSIST + channel_pivot.CONVERTER + channel_pivot.ORIGINATOR + channel_pivot.ROSTER channel_pivot.drop(columns=['Total_old'], inplace=True) channel_pivot.reset_index(inplace=True) channel_pivot # + [markdown] id="2zi7Q899553y" # **b) (10 pts) In a single bar chart, plot the share in percentage (y-axis) of touchpoint types by marketing channels (x-axis).** # + colab={"base_uri": "https://localhost:8080/", "height": 859} id="uWDT8L7Q5xdV" outputId="380f13a6-4b54-419a-d1b7-665ea0a9d19e" channel_pivot.plot(x='Groupname', y=['ASSIST', 'CONVERTER', 'ORIGINATOR', 'ROSTER'], kind='bar', figsize=(15, 10)) matplotlib.pyplot.title(label, fontdict=None, loc='center', pad=None, **kwargs) # + [markdown] id="7LYc7A9l-7Tc" # **c) (10 pts) Summarize the touch-point type results. Which channels seem to have relatively more or less of its touchpoints as rosters and assist?** # # All channels include 'rosters' and assist as 'touchpoints', as opposed to the 'converter', where both searches (MSN and Google) don't have any proportion of the 'converter' touchpoint. # # **Roster** # # * The CPM, Buzz Affiliate, Search Google Brand and TV channels have a higher proportion of 'Rosters' as their touchpoints (49.18%, 30.63%, 27.91% and 28.07% respectively). # * The CJ, Search Google Non-Brand and Search MSN Brand channels have lower proportion of 'Rosters' as their touchpoints (23.95%, 14.81% and 23.98% respectively). # # **Assist** # * The CPM, Buzz Affiliate and CJ channels have a higher proportion of 'Rosters' as their touchpoints (18.08%, 18.83% and 16.48% respectively). # * The Search Google Non-Brand, Search MSN Brand and TV channels have a lower proportion of 'Rosters' as their touchpoints (6.48%, 7.60% and 12.28% respectively). # # # **Compared with linear attribution, which of these channels would receive too much or too little credit under first- and lasttouch attribution?** # # * The graph in 4b) displays that the channels with the highest share of credit based on the linear attribution model include CPM, Buzz Affiliates and Search Google Brand (50.88%, 20.84% and 12.35% respectively). # * In the above graph, we notice that the channels with the highest proportion of 'Originator' (first touch attribution) include Search Google Brand, Search Google Non Brand and Search MSN Brand, out of which the only overlap between this and the share of credit in the linear attribution model is with the Search Google Brand channel. The Search Google Brand channel has the second highest proportion of 'Originators' as its touchpoint, and the linear attribution model displays a slightly high share of credit for this channel (12.35%). # * In the above graph, we notice that the channels with the highest proportion of 'Convertor' (last touch attribution) include CJ, Buzz Affiliate, Search Google Non-Brand and TV, out of which the only overlap between this and the share of credit in the linear attribution model is with the Buzz Affiliates. The Buzz Affiliates channel has the third highest proportion of 'Convertors' as its touchpoint, and the linear attribution model displays a high share of credit for this channel (20.84%). # * The CPM channel has relatively low proportions of 'Originator' and 'Convertor' as touchpoints, as compared to the linear attribution model which displays the highest share of credit for this channel (50.88%).
Assignment_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pySpark (Spark 1.6.0) # language: python # name: pyspark1.6 # --- # + #SKIP_COMPARE_OUTPUT import pixiedust pixiedust.installPackage("com.databricks:spark-csv_2.10:1.5.0") pixiedust.installPackage("org.apache.commons:commons-csv:0") # - pixiedust.sampleData() #SKIP_COMPARE_OUTPUT dd = pixiedust.sampleData(1) dd.count() # + pixiedust={"displayParams": {"aggregation": "SUM", "charttype": "subplots", "handlerId": "barChart", "keyFields": "name", "mpld3": "true", "rowCount": "25", "showLegend": "true", "valueFields": "mpg,engine"}} #SKIP_COMPARE_OUTPUT display(dd, no_gen_tests='true') # - display(dd,cell_id='174EF8FEFACF47F9811183C2C0EE3DC3',showLegend='true',rowCount='25',mpld3='true',aggregation='SUM',valueFields='mpg,engine',charttype='subplots',keyFields='name',handlerId='barChart',rendererId='matplotlib',nostore_figureOnly='true',nostore_cw='1098',nostore_bokeh='false',prefix='850663c0') display(dd,cell_id='174EF8FEFACF47F9811183C2C0EE3DC3',showLegend='true',rowCount='25',mpld3='true',aggregation='SUM',valueFields='mpg,engine',charttype='stacked',keyFields='name',handlerId='barChart',rendererId='matplotlib',nostore_figureOnly='true',nostore_cw='1098',nostore_bokeh='false',prefix='7e232629') display(dd,cell_id='174EF8FEFACF47F9811183C2C0EE3DC3',showLegend='true',rowCount='25',mpld3='true',aggregation='SUM',valueFields='mpg,engine',charttype='grouped',keyFields='name',handlerId='barChart',rendererId='matplotlib',nostore_figureOnly='true',nostore_cw='1098',nostore_bokeh='false',prefix='3cdb283f') display(dd,cell_id='174EF8FEFACF47F9811183C2C0EE3DC3',showLegend='true',rowCount='25',mpld3='true',aggregation='SUM',valueFields='mpg,engine',charttype='grouped',keyFields='name',handlerId='dataframe',nostore_cw='1098',nostore_bokeh='false',prefix='a7e4ae58')
tests/TestDisplay-SampleDataGroupedStackedSub.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="VK4LSq3HhSGG" colab_type="code" outputId="8fd08a4e-7716-4bbd-9ccd-42bcc72f731f" executionInfo={"status": "ok", "timestamp": 1556896647063, "user_tz": -120, "elapsed": 3247, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-Jn7z57ibgqA/AAAAAAAAAAI/AAAAAAAAAl4/Px3MbmkWgsU/s64/photo.jpg", "userId": "04365449340744413916"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # ! git clone https://github.com/5amessi/license_plates.git # + id="Hq9S8YUjhS1T" colab_type="code" outputId="c6f2b6df-9451-4460-9628-04b8b176cfcf" executionInfo={"status": "ok", "timestamp": 1556896650035, "user_tz": -120, "elapsed": 6207, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-Jn7z57ibgqA/AAAAAAAAAAI/AAAAAAAAAl4/Px3MbmkWgsU/s64/photo.jpg", "userId": "04365449340744413916"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # ! git clone https://github.com/SeyedHamidreza/car_plate_dataset.git # + id="daT8pqk5hS4C" colab_type="code" colab={} import zipfile zip_ref = zipfile.ZipFile("/content/car_plate_dataset/IRCP_dataset_1024X768.zip", 'r') zip_ref.extractall("") zip_ref.close() # + id="Tah4F3UHhS6x" colab_type="code" outputId="7602a263-af09-4c5a-de85-5bbc25791050" executionInfo={"status": "ok", "timestamp": 1556896661389, "user_tz": -120, "elapsed": 17537, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-Jn7z57ibgqA/AAAAAAAAAAI/AAAAAAAAAl4/Px3MbmkWgsU/s64/photo.jpg", "userId": "04365449340744413916"}} colab={"base_uri": "https://localhost:8080/", "height": 153} import scipy import cv2 from glob import glob import numpy as np import matplotlib.pyplot as plt import keras from keras.models import * from keras.layers import * class DataLoader(): def __init__(self, dataset_name, img_res=(480,240),out_res=(480,240)): self.dataset_name = dataset_name self.img_res = img_res self.out_res = out_res def load_data(self,png = False): if png == True: path = glob('/content/%s/*.png' % (self.dataset_name)) else: path = glob('/content/%s/*' % (self.dataset_name)) imgs_hr = [] imgs_lr = [] for idx , i in enumerate(path): if idx >= 1000: break img = cv2.imread(i) w, h = self.img_res low_w, low_h = int(w / 4), int(h / 4) img_hr = cv2.resize(img, self.out_res) img_lr = cv2.resize(img, (low_w, low_h)) img_lr = cv2.resize(img_lr, self.img_res) flr=np.fliplr(img_lr) fhr=np.fliplr(img_hr) imgs_hr.append(img_hr) imgs_hr.append(fhr) imgs_lr.append(img_lr) imgs_lr.append(flr) imgs_hr = np.array(imgs_hr) / 127.5 - 1. imgs_lr = np.array(imgs_lr) / 127.5 - 1. return imgs_hr, imgs_lr dl = DataLoader("license_plates") hr , lr = dl.load_data() print(np.shape(hr)) print(np.shape(lr)) dl = DataLoader("IRCP_dataset_1024X768") hr2 , lr2 = dl.load_data() print(np.shape(hr2)) print(np.shape(lr2)) hr = np.concatenate((hr, hr2)) lr = np.concatenate((lr, lr2)) print(np.shape(hr)) print(np.shape(lr)) t1 = (hr[0] + 1) * 127.5 t1 = np.array(t1, dtype=np.uint8) cv2.imwrite('hr.jpg',t1) t1 = (lr[0] + 1) * 127.5 t1 = np.array(t1, dtype=np.uint8) cv2.imwrite('lr.jpg',t1) # + id="QqQiatkPhS9h" colab_type="code" colab={} def load_image_test(img): imgs_lr = [] w, h = 480,240 img_lr = cv2.resize(img, (w,h)) imgs_lr.append(img_lr) imgs_lr = np.array(imgs_lr) / 127.5 - 1. return imgs_lr # + id="XeY1DtgiiiTE" colab_type="code" colab={} def pred(count = 0,idx = 0): test = cv2.imread("/content/test.jpg") test = load_image_test(test) result = model.predict([test]) result = (result + 1) * 127.5 result = np.array(result, dtype=np.uint8) cv2.imwrite("testout%d.jpg"%(count),result[0]) result = model.predict([[lr[idx]]]) result = (result + 1) * 127.5 result = np.array(result, dtype=np.uint8) cv2.imwrite("output%d.jpg"%(count),result[0]) result = (lr[idx] + 1) * 127.5 result = np.array(result, dtype=np.uint8) cv2.imwrite("input%d.jpg"%(count),result) model.save("model%d"%(count)) # + id="wH70IAqahTAK" colab_type="code" colab={} from keras.engine.saving import load_model from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Add from keras.layers.advanced_activations import PReLU, LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.applications import VGG19 from keras.models import Sequential, Model from keras.optimizers import Adam import datetime import matplotlib.pyplot as plt import sys import numpy as np import os def Cnn(): n_residual_blocks = 16 def residual_block(layer_input, filters): d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input) d = BatchNormalization(momentum=0.5)(d) d = Activation('relu')(d) d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d) d = BatchNormalization(momentum=0.5)(d) d = Add()([d, layer_input]) return d img_lr = Input(shape=(240,480,3)) c1 = Conv2D(64, kernel_size=9, strides=1, padding='same')(img_lr) c1 = Activation('relu')(c1) r = residual_block(c1, 64) for _ in range(n_residual_blocks - 1): r = residual_block(r, 64) c2 = Conv2D(64, kernel_size=3, strides=1, padding='same')(r) c2 = BatchNormalization(momentum=0.5)(c2) c2 = Add()([c2, c1]) gen_hr = Conv2D(3, kernel_size=9, strides=1, padding='same', activation='tanh')(c2) return Model(img_lr, gen_hr) # + id="Q3wt0HqRiGAi" colab_type="code" outputId="51a06f4a-23b9-4eff-e8b8-30d8f97af6ee" executionInfo={"status": "ok", "timestamp": 1556919992617, "user_tz": -120, "elapsed": 36295, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-Jn7z57ibgqA/AAAAAAAAAAI/AAAAAAAAAl4/Px3MbmkWgsU/s64/photo.jpg", "userId": "04365449340744413916"}} colab={"base_uri": "https://localhost:8080/", "height": 3539} model = Cnn() optimizer = keras.optimizers.Adam(0.0001) model.compile(loss='mse', optimizer=optimizer) model.fit(x=lr,y=hr,batch_size=4,epochs=100,verbose=1) pred(0) # + id="hAHKsf-k_02Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 731} outputId="2d861c59-b3a8-46c4-ff13-1a61e1c1ea2f" model.fit(x=lr,y=hr,batch_size=4,epochs=100,verbose=1) pred(1) # + id="CqP1zNy_P4nw" colab_type="code" colab={} model.fit(x=lr,y=hr,batch_size=4,epochs=500,verbose=1) pred(2) # + id="jaRbOs_IjSfR" colab_type="code" colab={} model.fit(x=lr,y=hr,batch_size=4,epochs=1000,verbose=1) pred(3) # + id="x3OpRClqjSmq" colab_type="code" colab={} model.fit(x=lr,y=hr,batch_size=4,epochs=1000,verbose=1) pred(4) # + id="sQy7l_KojSkX" colab_type="code" colab={} model.fit(x=lr,y=hr,batch_size=4,epochs=1000,verbose=1) pred(5) # + id="91yABePyjSiO" colab_type="code" colab={} model.fit(x=lr,y=hr,batch_size=4,epochs=1000,verbose=1) pred(6)
RESNET.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:root] * # language: python # name: conda-root-py # --- # + import astropy.coordinates as coord import astropy.table as at from astropy.io import fits import astropy.units as u import matplotlib as mpl import matplotlib.pyplot as plt # %matplotlib inline import numpy as np from pyia import GaiaData # gala import gala.coordinates as gc import gala.dynamics as gd import gala.potential as gp from gala.units import galactic # - dr16 = fits.getdata('/Users/apricewhelan/data/APOGEE_beta/allStarLite-r13-l33-58932beta-GaiaDR2-xmatch.fits') row = at.Table(dr16[dr16['APOGEE_ID'].astype(str) == '2M00093859-5202193']) row['LOGG', 'TEFF'] row['VHELIO_AVG', 'radial_velocity'] row['GAIA_PMRA', 'pmra'] row['GAIA_PMDEC', 'pmdec'] g = GaiaData(row[0]) # c = g.get_skycoord() c = coord.SkyCoord(ra=2.410821*u.deg, dec=-52.038704*u.deg, distance=4.7905*u.kpc, pm_ra_cosdec=6.52841742*u.mas/u.yr, pm_dec=-4.1192706*u.mas/u.yr, radial_velocity=-0.712103*u.km/u.s) galcen = c.transform_to(coord.Galactocentric()) w0 = gd.PhaseSpacePosition(galcen.data) pot = gp.MilkyWayPotential() orbit = pot.integrate_orbit(w0, dt=1, n_steps=6000) # + fig = orbit.plot() for ax in fig.axes: ax.set_xlim(-30, 30) ax.set_ylim(-30, 30) # - orbit.pericenter(), orbit.apocenter() orbit.eccentricity()
apogee-jhelum/notebooks/Aidan-Ally-orbit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!python -m pip install --upgrade pip # - #show result directly # #%pylab inline # %matplotlib inline # + import numpy as np import matplotlib as plt from matplotlib import style style.use('ggplot') import pandas as pd from sklearn.metrics import accuracy_score from sklearn.cluster import KMeans #from sklearn import preprocessing,cross_validation churn = pd.read_csv('../datasets/Churn.csv') # - # # Data Exploration churn.head(3) churn.isna().any() # + #churn.dtypes # - #explore numerical data churn.describe().transpose() # + # manage if there is negative #churn1 = churn.drop(churn[churn.revenue < 0].index) #churn2 = churn1.drop(churn1[churn1.eqpdays < 0].index) # - #explore non numerical data churn['Churn'] # ## تحليل التوزيع >> Analysis of Distribution #bins _> x-axis cosider count # y-axis consider values churn['gender'].hist(bins=50) churn['MonthlyCharges'].hist(bins=50) # + #combine between more attricute/column to study dataset better #temp1= pd.crosstab(churn['total eve calls'],churn['churn']) #temp1.plot(kind='bar',stacked=True,color=['red','blue'],grid=False) # - r1_churn_Mchurge= pd.crosstab(churn["gender"],churn["Churn"]) r1_churn_Mchurge.plot(kind='bar',color=["red","black"]) #another way import matplotlib.pyplot as plt fig =plt.figure(figsize=[8,4]) ax1=fig.add_subplot(121) ax1.set_ylabel('total night calls') ax1.set_xlabel('churn') r1_churn_Mchurge.plot(kind='bar') # # Data cleaning and preparation # ### prepare non numerical data # # #### missing values in the dataset : # # ##### 1- fill--> non/standard types ( NAN , NA , na , nan , " " ) # ##### 2- convert--> unexpected data type (string--> yes,no,true...... to int--> 0,1,2,3......) # # + #churn.convert_objects(convert_numeric=True) churn.fillna(0, inplace=True) def handle_non_numerical_data(churn): columns=churn.columns.values for column in columns: text_digit_vals={'yes':1,'no':0} def convert_to_int(val): return text_digit_vals[val] if churn[column].dtypes != np.int64 and churn[column].dtypes != np.float64: column_contents=churn[column].values.tolist() unique_elements=set(column_contents) x=0 for unique in unique_elements: if unique not in text_digit_vals: text_digit_vals[unique]=x x+=1 churn[column]=list(map(convert_to_int,churn[column])) return churn churn= handle_non_numerical_data(churn) # - churn.head(5) # #### put in dataframe and distribute to X and y # import seaborn as sns sns.barplot(x=churn["gender"],y=churn["Churn"],hue=churn["Churn"]) # + X = pd.DataFrame(churn, columns = [ #"state", #"state","phone number","international plan"and "voice mail plan", #problem : data type is string\object "international plan", "voice mail plan" , "account length" , "area code", "number vmail messages", "total day minutes" , "total day calls", "total day charge" , "total eve minutes" , "total eve calls" , "total eve charge" , "total night minutes", "total night calls", "total night charge", "total intl minutes" , "total intl calls", "total intl charge", "customer service calls", ]) y = pd.DataFrame(churn, columns = ["churn"]) # - # #### prepare non numerical data after putting in dataframe # # from sklearn.preprocessing import LabelEncoder # le= LabelEncoder() # churn['account length']=le.fit_transform(churn['account length']) # churn['area code']=le.fit_transform(churn['area code']) # churn['number vmail messages']=le.fit_transform(churn['number vmail messages']) # churn['total day minutes']=le.fit_transform(churn['total day minutes']) # churn['total day calls']=le.fit_transform(churn['total day calls']) # churn['total day charge']=le.fit_transform(churn['total day charge']) # churn['total eve minutes']=le.fit_transform(churn['total eve minutes']) # churn['total eve calls']=le.fit_transform(churn['total eve calls']) # churn['total eve charge']=le.fit_transform(churn['total eve charge']) # churn['total night minutes']=le.fit_transform(churn['total night minutes']) # churn['total night calls']=le.fit_transform(churn['total night calls']) # churn['total night charge']=le.fit_transform(churn['total night charge']) # churn['total intl minutes']=le.fit_transform(churn['total intl minutes']) # churn['total intl charge']=le.fit_transform(churn['total intl charge']) # churn['customer service calls']=le.fit_transform(churn['customer service calls']) # # churn['churn']=le.fit_transform(churn['churn']) # y.dtypes # + #X.head(5) # + #X.dtypes # - # #away to split dataset colomns into x , y simple # #X =churn.iloc[:,:].values # #print(X) # #y =churn.iloc[:,20:].values # #print(y) #shuffle to improve accuracy from sklearn.utils import shuffle X,y=shuffle(X,y) # # Building a Predictive Model from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, stratify =y,test_size = 0.3, random_state = 0) len(y_test) # # prepare adata frame for model analysis # ## decision tree model # from sklearn import tree from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() clf = clf.fit(X_train, y_train) prediction =clf.predict(X_test) #print(prediction) # + from sklearn.preprocessing import StandardScaler scaler=StandardScaler() X_train_scaled=scaler.fit(X_train).transform(X_train) X_test_scaled=scaler.fit(X_test).transform(X_test) clf=DecisionTreeClassifier() clf.fit(X_train_scaled,y_train.values.ravel()) prediction =clf.predict(X_test_scaled) # - # #### accuracy # from sklearn.metrics import accuracy_score print('accuracy =',100*accuracy_score(y_test,prediction)) #print(prediction) len(prediction) from sklearn.metrics import confusion_matrix confusion_matrix(y_test,prediction) from sklearn.metrics import mean_squared_error mean_squared_error(y_test,prediction) from sklearn.metrics import classification_report print(classification_report(y_test,prediction)) # # plot decision tree # from numpy import loadtxt # from xgboost import XGBClassifier # from xgboost import plot_tree # import matplotlib.pyplot as plt # # fit model no training data # # model = XGBClassifier() # model.fit(X_train, y_train.values.ravel()) # # plot single tree # plot_tree(model) # plt.show() # ## random forest model # # + #from sklearn.feature_selection import RFECV # + #m=RFECV(RandomForestClassifier(),scoring='roc_auc') #m.fit(X,y.values.ravel()) # + #m.score(X,y) # + from sklearn.ensemble import RandomForestClassifier clf_rf = RandomForestClassifier(n_estimators=10) clf_rf = clf_rf.fit(X_train, y_train.values.ravel()) ## .values.ravel() solve y vector problem prediction =clf.predict(X_test) #print(prediction) # + from sklearn.preprocessing import StandardScaler scaler=StandardScaler() X_train_scaled=scaler.fit(X_train).transform(X_train) X_test_scaled=scaler.fit(X_test).transform(X_test) clf_rf=RandomForestClassifier(n_estimators=15) clf_rf.fit(X_train_scaled,y_train.values.ravel()) prediction =clf_rf.predict(X_test_scaled) # - # #### accuracy # from sklearn.metrics import accuracy_score print('accuracy =',100*accuracy_score(y_test,prediction)) from sklearn.metrics import mean_squared_error mean_squared_error(y_test,prediction) # #### Confusion matrix from sklearn.metrics import confusion_matrix confusion_matrix(y_test,prediction) from sklearn.metrics import classification_report print(classification_report(y_test,prediction)) feature_imp = pd.Series(clf.feature_importances_,index=X.columns).sort_values(ascending=False) feature_imp import matplotlib.pyplot as plt import seaborn as sns # #%matplotlib inline # Creating a bar plot sns.barplot(x=feature_imp, y=feature_imp.index) # Add labels to your graph plt.xlabel('Feature Importance Score') plt.ylabel('Features') plt.title("Visualizing Important Features") #plt.legend() plt.show() # + X = pd.DataFrame(churn, columns = [ #"state", #"state","phone number","international plan"and "voice mail plan", #problem : data type is string\object "international plan", "voice mail plan" , "account length" , #"area code", #"number vmail messages", "total day minutes" , #"total day calls", "total day charge" , "total eve minutes" , "total eve calls" , #"total eve charge" , "total night minutes", #"total night calls", "total night charge", #"total intl minutes" , "total intl calls", "total intl charge", "customer service calls", ]) y = pd.DataFrame(churn, columns = ["churn"]) # + from sklearn.ensemble import RandomForestClassifier clf_rf = RandomForestClassifier(n_estimators=10) clf_rf = clf_rf.fit(X_train, y_train.values.ravel()) ## .values.ravel() solve y vector problem prediction =clf.predict(X_test) #print(prediction) # + from sklearn.preprocessing import StandardScaler scaler=StandardScaler() X_train_scaled=scaler.fit(X_train).transform(X_train) X_test_scaled=scaler.fit(X_test).transform(X_test) clf_rf=RandomForestClassifier(n_estimators=15) clf_rf.fit(X_train_scaled,y_train.values.ravel()) prediction =clf_rf.predict(X_test_scaled) # - from sklearn.metrics import accuracy_score print('accuracy =',100*accuracy_score(y_test,prediction)) from sklearn.neural_network import MLPClassifier mlp=MLPClassifier(random_state=42) mlp.fit(X_train,y_train.values.ravel()) prediction =mlp.predict(X_test) # #### accuracy # from sklearn.metrics import accuracy_score print('accuracy =',100*accuracy_score(y_test,prediction)) # #### imorove model # + from sklearn.preprocessing import StandardScaler scaler=StandardScaler() X_train_scaled=scaler.fit(X_train).transform(X_train) X_test_scaled=scaler.fit(X_test).transform(X_test) mlp=MLPClassifier(max_iter=1000,alpha=1,random_state=42) mlp.fit(X_train_scaled,y_train.values.ravel()) prediction =mlp.predict(X_test_scaled) # - # #### accuracy # # + from sklearn.metrics import accuracy_score print('accuracy =',100*accuracy_score(y_test,prediction)) # - from sklearn.metrics import classification_report print(classification_report(y_test,prediction)) # # Evaluating Model # ###cross validation(select best model) # ####kfold from sklearn.model_selection import KFold kf=KFold(n_splits=10) kf for train_index,test_index in kf.split(X,y): print(train_index,test_index) def get_score(model,X_train, X_test, y_train, y_test): model.fit(X_train, y_train) return model.score(X_test,y_test) # + print(get_score(mlp,X_train, X_test, y_train, y_test)) print(get_score(clf_rf,X_train, X_test, y_train, y_test)) print(get_score(clf,X_train, X_test, y_train, y_test)) # - # #### stratifiedkfold # + from sklearn.model_selection import cross_val_score clf = svm.SVC(kernel='linear', C=1) scores = cross_val_score(clf, X, y.ravel(), cv=2) # -
Projects/churn project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Clustering Algorithmen # In diesem Notebook wird kurz die Anwendung von k-means-Clustering und DBScan an einem Toy-Datensatz gezeigt. # ### Daten erstellen # + from sklearn.datasets import make_blobs import numpy as np import matplotlib.pyplot as plt n_samples=2000 blob_centers = np.array( [[ 0.2, 2.3], [-1.5 , 2.3], [-2.8, 1.8], [-2.8, 2.8], [-2.8, 1.3]]) blob_std = np.array([0.4, 0.3, 0.1, 0.1, 0.1]) X, y = make_blobs(n_samples, centers=blob_centers, cluster_std=blob_std, random_state=7) # - # ### Plotting def plot_clusters(X, y=None): plt.scatter(X[:, 0], X[:, 1], c=y, s=1) plt.xlabel("$x_1$", fontsize=14) plt.ylabel("$x_2$", fontsize=14, rotation=0) # Plot der Datenpunkte ohne Cluster-Zuordnung: plt.figure(figsize=(8, 4)) plot_clusters(X) plt.show() # Plot der Datenpunkte mit zufälligem Assignment zu vier Clustern: plt.figure(figsize=(8, 4)) plot_clusters(X, y=np.random.randint(4, size=n_samples)) plt.show() # ### K-Means # Wir erstellen ein Clustering der Daten mit k-means für $k=3$ und berechnen den Silhouette-Score: # + from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score kmeans = KMeans(n_clusters=3, random_state=0) kmeans.fit(X) y_kmeans = kmeans.predict(X) print(y_kmeans) s_score = silhouette_score(X, y_kmeans) print(f"Silhouette-Score for k=3 is {s_score}") # - # ### DBSCAN Clustering # Nun clustern wir die Daten mit dem DBScan-Algorithmus: from sklearn.cluster import DBSCAN dbscan = DBSCAN(eps=0.1, min_samples=5) dbscan.fit(X) plot_clusters(X, dbscan.labels_) # ### Aufgabe: # Implementieren Sie auf dem gegebenen Datensatz ein k-Means-Clustering mit den Werten k = {2,3,4,5,6,7,8,9,10}. Finden Sie mit Hilfe des Silhouette-Scores die drei besten k und plotten Sie für diese das Clustering. # Welches ist das beste Clustering? # + from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score scores = [] for k in range(2, 11): kmeans = KMeans(n_clusters=k, random_state=0) kmeans.fit(X) y_kmeans = kmeans.predict(X) s_score = silhouette_score(X, y_kmeans) scores.append(s_score) print(f"Score for {k} is {s_score}") # - plt.figure(figsize=(8, 4)) plt.plot(range(2, 11), scores) plt.show() kmeans = KMeans(n_clusters=4, random_state=0) kmeans.fit(X) y_kmeans = kmeans.predict(X) plot_clusters(X, y_kmeans) kmeans = KMeans(n_clusters=5, random_state=0) kmeans.fit(X) y_kmeans = kmeans.predict(X) plot_clusters(X, y_kmeans) kmeans = KMeans(n_clusters=6, random_state=0) kmeans.fit(X) y_kmeans = kmeans.predict(X) plot_clusters(X, y_kmeans)
solutions/7_Clustering-Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Training Neural Networks # # The network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time. # # <img src="assets/function_approx.png" width=500px> # # At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function. # # To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems # # $$ # \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2} # $$ # # where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels. # # By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. # # <img src='assets/gradient_descent.png' width=350px> # ## Backpropagation # # For single layer networks, gradient descent is simple to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks, although it's straightforward once you learn about it. # # This is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation. # # <img src='assets/w1_backprop_graph.png' width=400px> # # In the forward pass through the network, our data and operations go from right to left here. To train the weights with gradient descent, we propagate the gradient of the cost backwards through the network. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule. # # $$ # \frac{\partial \ell}{\partial w_1} = \frac{\partial l_1}{\partial w_1} \frac{\partial s}{\partial l_1} \frac{\partial l_2}{\partial s} \frac{\partial \ell}{\partial l_2} # $$ # # We update our weights using this gradient with some learning rate $\alpha$. # # $$ # w^\prime = w - \alpha \frac{\partial \ell}{\partial w} # $$ # # The learning rate is set such that the weight update steps are small enough that the iterative method settles in a minimum. # # The first thing we need to do for training is define our loss function. In PyTorch, you'll usually see this as `criterion`. Here we're using softmax output, so we want to use `criterion = nn.CrossEntropyLoss()` as our loss. Later when training, you use `loss = criterion(output, targets)` to calculate the actual loss. # # We also need to define the optimizer we're using, SGD or Adam, or something along those lines. Here I'll just use SGD with `torch.optim.SGD`, passing in the network parameters and the learning rate. # ## Autograd # # Torch provides a module, `autograd`, for automatically calculating the gradient of tensors. It does this by keeping track of operations performed on tensors. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`. # # You can turn off gradients for a block of code with the `torch.no_grad()` content: # ```python # x = torch.zeros(1, requires_grad=True) # >>> with torch.no_grad(): # ... y = x * 2 # >>> y.requires_grad # False # ``` # # Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`. # # The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`. # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' from collections import OrderedDict import numpy as np import time import torch from torch import nn from torch import optim import torch.nn.functional as F import helper # - x = torch.randn(2,2, requires_grad=True) print(x) y = x**2 print(y) # Below we can see the operation that created `y`, a power operation `PowBackward0`. ## grad_fn shows the function that generated this variable print(y.grad_fn) # The autgrad module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean. z = y.mean() print(z) # You can check the gradients for `x` and `y` but they are empty currently. print(x.grad) # To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x` # # $$ # \frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2} # $$ z.backward() print(x.grad) print(x/2) # These gradients calculations are particularly useful for neural networks. For training we need the gradients of the weights with respect to the cost. With PyTorch, we run data forward through the network to calculate the cost, then, go backwards to calculate the gradients with respect to the cost. Once we have the gradients we can make a gradient descent step. # ## Get the data and define the network # # The same as we saw in part 3, we'll load the MNIST dataset and define our network. # + from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), ]) # Download and load the training data trainset = datasets.MNIST('MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # - # I'll build a network with `nn.Sequential` here. Only difference from the last part is I'm not actually using softmax on the output, but instead just using the raw output from the last layer. This is because the output from softmax is a probability distribution. Often, the output will have values really close to zero or really close to one. Due to [inaccuracies with representing numbers as floating points](https://docs.python.org/3/tutorial/floatingpoint.html), computations with a softmax output can lose accuracy and become unstable. To get around this, we'll use the raw output, called the **logits**, to calculate the loss. # + # Hyperparameters for our network input_size = 784 hidden_sizes = [128, 64] output_size = 10 # Build a feed-forward network model = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_size, hidden_sizes[0])), ('relu1', nn.ReLU()), ('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])), ('relu2', nn.ReLU()), ('logits', nn.Linear(hidden_sizes[1], output_size))])) # - # ## Training the network! # # The first thing we need to do for training is define our loss function. In PyTorch, you'll usually see this as `criterion`. Here we're using softmax output, so we want to use `criterion = nn.CrossEntropyLoss()` as our loss. Later when training, you use `loss = criterion(output, targets)` to calculate the actual loss. # # We also need to define the optimizer we're using, SGD or Adam, or something along those lines. Here I'll just use SGD with `torch.optim.SGD`, passing in the network parameters and the learning rate. criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.01) # First, let's consider just one learning step before looping through all the data. The general process with PyTorch: # # * Make a forward pass through the network to get the logits # * Use the logits to calculate the loss # * Perform a backward pass through the network with `loss.backward()` to calculate the gradients # * Take a step with the optimizer to update the weights # # Below I'll go through one training step and print out the weights and gradients so you can see how it changes. # + print('Initial weights - ', model.fc1.weight) images, labels = next(iter(trainloader)) images.resize_(64, 784) # Clear the gradients, do this because gradients are accumulated optimizer.zero_grad() # Forward pass, then backward pass, then update weights output = model.forward(images) loss = criterion(output, labels) loss.backward() print('Gradient -', model.fc1.weight.grad) optimizer.step() # - print('Updated weights - ', model.fc1.weight) # ### Training for real # # Now we'll put this algorithm into a loop so we can go through all the images. This is fairly straightforward. We'll loop through the mini-batches in our dataset, pass the data through the network to calculate the losses, get the gradients, then run the optimizer. optimizer = optim.SGD(model.parameters(), lr=0.003) epochs = 3 print_every = 40 steps = 0 for e in range(epochs): running_loss = 0 for images, labels in iter(trainloader): steps += 1 # Flatten MNIST images into a 784 long vector images.resize_(images.size()[0], 784) optimizer.zero_grad() # Forward and backward passes output = model.forward(images) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every == 0: print("Epoch: {}/{}... ".format(e+1, epochs), "Loss: {:.4f}".format(running_loss/print_every)) running_loss = 0 # With the network trained, we can check out it's predictions. # + images, labels = next(iter(trainloader)) img = images[0].view(1, 784) # Turn off gradients to speed up this part with torch.no_grad(): logits = model.forward(img) # Output of the network are logits, need to take softmax for probabilities ps = F.softmax(logits, dim=1) helper.view_classify(img.view(1, 28, 28), ps) # - # Now our network is brilliant. It can accurately predict the digits in our images. Next up you'll write the code for training a neural network on a more complex dataset.
Part 3 - Training Neural Networks.ipynb